prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
# Copyright 2021, Blue Brain Project, EPFL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The fine merging of the annotation atlases."""
import numpy as np
from .JSONread import RegionData
def explore_voxel(origin, data, count=-1):
"""Explore a given voxel.
Ask Dimitri for more details.
Parameters
----------
origin : sequence
A triplet with the (x, y, z) coordinates of the origin voxel.
data : np.ndarray
A 3D array with the volume data.
count : int
Maximal number of iterations.
Returns
-------
value : int
The value of some voxel in the data volume.
"""
origin_value = data[origin[0], origin[1], origin[2]]
explored = np.zeros(data.shape, dtype=bool)
explored[origin[0], origin[1], origin[2]] = True
to_explore = [origin]
maxx = len(explored)
maxy = len(explored[0])
maxz = len(explored[0][0])
while len(to_explore) > 0 and count != 0:
current_voxel = to_explore[0]
current_value = data[current_voxel[0], current_voxel[1], current_voxel[2]]
if current_value != origin_value and current_value != 0:
return current_value
to_explore = to_explore[1:]
for (x, y, z) in [
(-1, 0, 0),
(0, -1, 0),
(1, 0, 0),
(0, 1, 0),
(0, 0, -1),
(0, 0, 1),
]:
new_vox = [current_voxel[0] + x, current_voxel[1] + y, current_voxel[2] + z]
if (
0 <= new_vox[0] < maxx
and 0 <= new_vox[1] < maxy
and 0 <= new_vox[2] < maxz
and not explored[new_vox[0], new_vox[1], new_vox[2]]
):
explored[new_vox[0], new_vox[1], new_vox[2]] = True
to_explore.append(new_vox)
count -= 1
# print("Error", origin)
return origin_value
def fine_merge(annotation, annotation2, brain_regions):
"""Perform the coarse atlas merging.
Parameters
----------
annotation : np.ndarray
The first atlas to merge, usually CCFv2
annotation2 : np.ndarray
The second atlas to merge, usually CCFv3
brain_regions : dict
The brain regions dictionary. Can be obtained from the "msg" key of
the `brain_regions.json` (`1.json`) file.
Returns
-------
ccfv2_corrected : np.ndarray
The merged CCFv2 atlas.
ccfv3_corrected : np.ndarray
The merged CCFv3 atlas.
"""
region_data = RegionData(brain_regions)
uniques = region_data.find_unique_regions(annotation, top_region_name="root")
children, _ = region_data.find_children(uniques)
uniques2 = region_data.find_unique_regions(annotation2, top_region_name="root")
children2, _ = region_data.find_children(uniques2)
ccfv2_corrected = np.copy(annotation)
ccfv3_corrected = np.copy(annotation2)
ids = np.unique(ccfv2_corrected)
ids2 = np.unique(ccfv3_corrected)
ids_to_correct = ids[np.in1d(ids, ids2, invert=True)]
for id_reg in ids_to_correct:
allname = region_data.id_to_region_dictionary_ALLNAME[id_reg]
if (
region_data.is_leaf[allname]
and id_reg not in ids2
and region_data.region_dictionary_to_id[
region_data.region_dictionary_to_id_parent[
region_data.id_to_region_dictionary[id_reg]
]
]
in ids2
):
ccfv2_corrected[
ccfv2_corrected == id_reg
] = region_data.region_dictionary_to_id[
region_data.region_dictionary_to_id_parent[
region_data.id_to_region_dictionary[id_reg]
]
]
elif region_data.is_leaf[allname] and (
"Medial amygdalar nucleus" in allname
or "Subiculum" in allname
or "Bed nuclei of the stria terminalis" in allname
):
ccfv2_corrected[
ccfv2_corrected == id_reg
] = region_data.region_dictionary_to_id[
region_data.region_dictionary_to_id_parent[
region_data.region_dictionary_to_id_parent[
region_data.id_to_region_dictionary[id_reg]
]
]
]
elif "Paraventricular hypothalamic nucleus" in allname:
ccfv2_corrected[ccfv2_corrected == id_reg] = 38
# Hippocampus Field CA2 is strongly different -> merge it with CA1
ccfv2_corrected[ccfv2_corrected == 423] = 382
ccfv3_corrected[ccfv3_corrected == 423] = 382
# Entorhinal area, lateral part
ccfv2_corrected[np.where(ccfv2_corrected == 60)] = 28 # L6b -> L6a
ccfv2_corrected[np.where(ccfv2_corrected == 999)] = 20 # L2/3 -> L2 # double check?
ccfv2_corrected[np.where(ccfv2_corrected == 715)] = 20 # L2a -> L2
ccfv2_corrected[np.where(ccfv2_corrected == 764)] = 20 # L2b -> L2
ccfv2_corrected[np.where(ccfv2_corrected == 92)] = 139 # L4 -> L5
ccfv2_corrected[np.where(ccfv2_corrected == 312)] = 139 # L4/5 -> L5
# Entorhinal area, medial part, dorsal zone
ccfv2_corrected[np.where(ccfv2_corrected == 468)] = 543 # L2a -> L2
ccfv2_corrected[np.where(ccfv2_corrected == 508)] = 543 # L2b -> L2
ccfv2_corrected[np.where(ccfv2_corrected == 712)] = 727 # L4 -> L5 # double check?
ccfv2_corrected[np.where(ccfv2_corrected == 195)] = 304 # L2 -> L2/3
ccfv2_corrected[np.where(ccfv2_corrected == 524)] = 582 # L2 -> L2/3
ccfv2_corrected[np.where(ccfv2_corrected == 606)] = 430 # L2 -> L2/3
ccfv2_corrected[np.where(ccfv2_corrected == 747)] = 556 # L2 -> L2/3
# subreg of Cochlear nuclei -> Cochlear nuclei
ccfv2_corrected[np.where(ccfv2_corrected == 96)] = 607
ccfv2_corrected[np.where(ccfv2_corrected == 101)] = 607
ccfv2_corrected[np.where(ccfv2_corrected == 112)] = 607
ccfv2_corrected[np.where(ccfv2_corrected == 560)] = 607
ccfv3_corrected[np.where(ccfv3_corrected == 96)] = 607
ccfv3_corrected[np.where(ccfv3_corrected == 101)] = 607
# subreg of Nucleus ambiguus -> Nucleus ambiguus
ccfv2_corrected[np.where(ccfv2_corrected == 143)] = 135
ccfv2_corrected[np.where(ccfv2_corrected == 939)] = 135
ccfv3_corrected[np.where(ccfv3_corrected == 143)] = 135
ccfv3_corrected[np.where(ccfv3_corrected == 939)] = 135
# subreg of Accessory olfactory bulb -> Accessory olfactory bulb
ccfv2_corrected[np.where(ccfv2_corrected == 188)] = 151
ccfv2_corrected[np.where(ccfv2_corrected == 196)] = 151
ccfv2_corrected[np.where(ccfv2_corrected == 204)] = 151
ccfv3_corrected[np.where(ccfv3_corrected == 188)] = 151
ccfv3_corrected[np.where(ccfv3_corrected == 196)] = 151
ccfv3_corrected[np.where(ccfv3_corrected == 204)] = 151
# subreg of Medial mammillary nucleus -> Medial mammillary nucleus
ccfv2_corrected[np.where(ccfv2_corrected == 798)] = 491
ccfv3_corrected[np.where(ccfv3_corrected == 798)] = 491
ccfv3_corrected[np.where(ccfv3_corrected == 606826647)] = 491
ccfv3_corrected[np.where(ccfv3_corrected == 606826651)] = 491
ccfv3_corrected[np.where(ccfv3_corrected == 606826655)] = 491
ccfv3_corrected[np.where(ccfv3_corrected == 606826659)] = 491
# Subreg to Dorsal part of the lateral geniculate complex
ccfv3_corrected[np.where(ccfv3_corrected == 496345664)] = 170
ccfv3_corrected[np.where(ccfv3_corrected == 496345668)] = 170
ccfv3_corrected[np.where(ccfv3_corrected == 496345672)] = 170
# Subreg to Lateral reticular nucleus
ccfv2_corrected[np.where(ccfv2_corrected == 955)] = 235
ccfv2_corrected[np.where(ccfv2_corrected == 963)] = 235
ccfv3_corrected[np.where(ccfv3_corrected == 955)] = 235
ccfv3_corrected[np.where(ccfv3_corrected == 963)] = 235
# subreg of Posterior parietal association areas combined layer by layer
ccfv3_corrected[np.where(ccfv3_corrected == 312782550)] = 532
ccfv3_corrected[np.where(ccfv3_corrected == 312782604)] = 532
ccfv3_corrected[np.where(ccfv3_corrected == 312782554)] = 241
ccfv3_corrected[np.where(ccfv3_corrected == 312782608)] = 241
ccfv3_corrected[np.where(ccfv3_corrected == 312782558)] = 635
ccfv3_corrected[np.where(ccfv3_corrected == 312782612)] = 635
ccfv3_corrected[np.where(ccfv3_corrected == 312782562)] = 683
ccfv3_corrected[np.where(ccfv3_corrected == 312782616)] = 683
ccfv3_corrected[np.where(ccfv3_corrected == 312782566)] = 308
ccfv3_corrected[np.where(ccfv3_corrected == 312782620)] = 308
ccfv3_corrected[np.where(ccfv3_corrected == 312782570)] = 340
ccfv3_corrected[np.where(ccfv3_corrected == 312782624)] = 340
# subreg to Parabrachial nucleus
ccfv2_corrected[np.where(ccfv2_corrected == 123)] = 867
ccfv2_corrected[np.where(ccfv2_corrected == 860)] = 867
ccfv2_corrected[np.where(ccfv2_corrected == 868)] = 867
ccfv2_corrected[np.where(ccfv2_corrected == 875)] = 867
ccfv2_corrected[np.where(ccfv2_corrected == 883)] = 867
ccfv2_corrected[np.where(ccfv2_corrected == 891)] = 867
ccfv2_corrected[np.where(ccfv2_corrected == 899)] = 867
ccfv2_corrected[np.where(ccfv2_corrected == 915)] = 867
ccfv3_corrected[np.where(ccfv3_corrected == 123)] = 867
for id_reg in np.unique(np.concatenate((ids, ids2)))[1:]:
allname = region_data.id_to_region_dictionary_ALLNAME[id_reg]
if "Visual areas" in allname:
if "ayer 1" in allname:
ccfv3_corrected[np.where(ccfv3_corrected == id_reg)] = 801
ccfv2_corrected[np.where(ccfv2_corrected == id_reg)] = 801
elif "ayer 2/3" in allname:
ccfv3_corrected[ | np.where(ccfv3_corrected == id_reg) | numpy.where |
"""
Test Surrogates Overview
========================
"""
# Author: <NAME> <<EMAIL>>
# License: new BSD
from PIL import Image
import numpy as np
import scripts.surrogates_overview as exo
import scripts.image_classifier as imgclf
import sklearn.datasets
import sklearn.linear_model
SAMPLES = 10
BATCH = 50
SAMPLE_IRIS = False
IRIS_SAMPLES = 50000
def test_bilmey_image():
"""Tests surrogate image bLIMEy."""
# Load the image
doggo_img = Image.open('surrogates_overview/img/doggo.jpg')
doggo_array = np.array(doggo_img)
# Load the classifier
clf = imgclf.ImageClassifier()
explain_classes = [('tennis ball', 852),
('golden retriever', 207),
('Labrador retriever', 208)]
# Configure widgets to select occlusion colour, segmentation granularity
# and explained class
colour_selection = {
i: i for i in ['mean', 'black', 'white', 'randomise-patch', 'green']
}
granularity_selection = {'low': 13, 'medium': 30, 'high': 50}
# Generate explanations
blimey_image_collection = {}
for gran_name, gran_number in granularity_selection.items():
blimey_image_collection[gran_name] = {}
for col_name in colour_selection:
blimey_image_collection[gran_name][col_name] = \
exo.build_image_blimey(
doggo_array,
clf.predict_proba,
explain_classes,
explanation_size=5,
segments_number=gran_number,
occlusion_colour=col_name,
samples_number=SAMPLES,
batch_size=BATCH,
random_seed=42)
exp = []
for gran_ in blimey_image_collection:
for col_ in blimey_image_collection[gran_]:
exp.append(blimey_image_collection[gran_][col_]['surrogates'])
assert len(exp) == len(EXP_IMG)
for e, E in zip(exp, EXP_IMG):
assert sorted(list(e.keys())) == sorted(list(E.keys()))
for key in e.keys():
assert e[key]['name'] == E[key]['name']
assert len(e[key]['explanation']) == len(E[key]['explanation'])
for e_, E_ in zip(e[key]['explanation'], E[key]['explanation']):
assert e_[0] == E_[0]
assert np.allclose(e_[1], E_[1], atol=.001, equal_nan=True)
def test_bilmey_tabular():
"""Tests surrogate tabular bLIMEy."""
# Load the iris data set
iris = sklearn.datasets.load_iris()
iris_X = iris.data # [:, :2] # take the first two features only
iris_y = iris.target
iris_labels = iris.target_names
iris_feature_names = iris.feature_names
label2class = {lab: i for i, lab in enumerate(iris_labels)}
# Fit the classifier
logreg = sklearn.linear_model.LogisticRegression(C=1e5)
logreg.fit(iris_X, iris_y)
# explained class
_dtype = iris_X.dtype
explained_instances = {
'setosa': np.array([5, 3.5, 1.5, 0.25]).astype(_dtype),
'versicolor': np.array([5.5, 2.75, 4.5, 1.25]).astype(_dtype),
'virginica': np.array([7, 3, 5.5, 2.25]).astype(_dtype)
}
petal_length_idx = iris_feature_names.index('petal length (cm)')
petal_length_bins = [1, 2, 3, 4, 5, 6, 7]
petal_width_idx = iris_feature_names.index('petal width (cm)')
petal_width_bins = [0, .5, 1, 1.5, 2, 2.5]
discs_ = []
for i, ix in enumerate(petal_length_bins): # X-axis
for iix in petal_length_bins[i + 1:]:
for j, jy in enumerate(petal_width_bins): # Y-axis
for jjy in petal_width_bins[j + 1:]:
discs_.append({
petal_length_idx: [ix, iix],
petal_width_idx: [jy, jjy]
})
for inst_i in explained_instances:
for cls_i in iris_labels:
for disc_i, disc in enumerate(discs_):
inst = explained_instances[inst_i]
cls = label2class[cls_i]
exp = exo.build_tabular_blimey(
inst, cls, iris_X, iris_y, logreg.predict_proba, disc,
IRIS_SAMPLES, SAMPLE_IRIS, 42)
key = '{}&{}&{}'.format(inst_i, cls, disc_i)
exp_ = EXP_TAB[key]
assert exp['explanation'].shape[0] == exp_.shape[0]
assert np.allclose(
exp['explanation'], exp_, atol=.001, equal_nan=True)
EXP_IMG = [
{207: {'explanation': [(13, -0.24406872165780585),
(11, -0.20456180387430317),
(9, -0.1866779131424261),
(4, 0.15001224157793785),
(3, 0.11589480417160983)],
'name': 'golden retriever'},
208: {'explanation': [(13, -0.08395966359346249),
(0, -0.0644986107387837),
(9, 0.05845584633658977),
(1, 0.04369763085720947),
(11, -0.035958188394941866)],
'name': '<NAME>'},
852: {'explanation': [(13, 0.3463529698715463),
(11, 0.2678050131923326),
(4, -0.10639863421417416),
(6, 0.08345792378117327),
(9, 0.07366945242386444)],
'name': '<NAME>'}},
{207: {'explanation': [(13, -0.0624167912596456),
(7, 0.06083359545295548),
(3, 0.0495953943686462),
(11, -0.04819787147412231),
(2, -0.03858823761391199)],
'name': '<NAME>'},
208: {'explanation': [(13, -0.08408428146916162),
(7, 0.07704235920590158),
(3, 0.06646468388122273),
(11, -0.0638326572126609),
(2, -0.052621478002380796)],
'name': '<NAME>'},
852: {'explanation': [(11, 0.35248212611685886),
(13, 0.2516925608037859),
(2, 0.13682853028454384),
(9, 0.12930134856644754),
(6, 0.1257747954095489)],
'name': '<NAME>'}},
{207: {'explanation': [(3, 0.21351937934930917),
(10, 0.16933456312772083),
(11, -0.13447244552856766),
(8, 0.11058919217055371),
(2, -0.06269239798368743)],
'name': '<NAME>'},
208: {'explanation': [(8, 0.05995551486884414),
(9, -0.05375302972380482),
(11, -0.051997353324246445),
(6, 0.04213181405953071),
(2, -0.039169895361928275)],
'name': '<NAME>'},
852: {'explanation': [(7, 0.31382219776986503),
(11, 0.24126214884275987),
(13, 0.21075924370226598),
(2, 0.11937652039885377),
(8, -0.11911265319329697)],
'name': '<NAME>'}},
{207: {'explanation': [(3, 0.39254403293049134),
(9, 0.19357165018747347),
(6, 0.16592079671652987),
(0, 0.14042059731407297),
(1, 0.09793027079765507)],
'name': '<NAME>'},
208: {'explanation': [(9, -0.19351859273276703),
(1, -0.15262967987262344),
(3, 0.12205127112235375),
(2, 0.11352141032313934),
(6, -0.11164209893429898)],
'name': '<NAME>'},
852: {'explanation': [(7, 0.17213007100844877),
(0, -0.1583030948868859),
(3, -0.13748574615069775),
(5, 0.13273283867075436),
(11, 0.12309551170070354)],
'name': '<NAME>'}},
{207: {'explanation': [(3, 0.4073533182995105),
(10, 0.20711667988142463),
(8, 0.15360813290032324),
(6, 0.1405424759832785),
(1, 0.1332920685413575)],
'name': '<NAME>'},
208: {'explanation': [(9, -0.14747910525112617),
(1, -0.13977061235228924),
(2, 0.10526833898161611),
(6, -0.10416022118399552),
(3, 0.09555992655161764)],
'name': '<NAME>'},
852: {'explanation': [(11, 0.2232260929107954),
(7, 0.21638443149433054),
(5, 0.21100464215582274),
(13, 0.145614853795006),
(1, -0.11416523431311262)],
'name': '<NAME>'}},
{207: {'explanation': [(1, 0.14700178977744183),
(0, 0.10346667279328238),
(2, 0.10346667279328238),
(7, 0.10346667279328238),
(8, 0.10162900633690726)],
'name': '<NAME>'},
208: {'explanation': [(10, -0.10845134816658476),
(8, -0.1026920429226184),
(6, -0.10238154733842847),
(18, 0.10094164937411244),
(16, 0.08646888450232793)],
'name': '<NAME>'},
852: {'explanation': [(18, -0.20542297091894474),
(13, 0.2012751176130666),
(8, -0.19194747162742365),
(20, 0.14686930696710473),
(15, 0.11796990086271067)],
'name': '<NAME>'}},
{207: {'explanation': [(13, 0.12446259821701779),
(17, 0.11859084421095789),
(15, 0.09690553833007137),
(12, -0.08869743701731962),
(4, 0.08124900427893789)],
'name': '<NAME>'},
208: {'explanation': [(10, -0.09478194981909983),
(20, -0.09173392507039077),
(9, 0.08768898801254493),
(17, -0.07553994244536394),
(4, 0.07422905503397653)],
'name': '<NAME>'},
852: {'explanation': [(21, 0.1327882942965061),
(1, 0.1238236573086363),
(18, -0.10911712271717902),
(19, 0.09707191051320978),
(6, 0.08593672504338913)],
'name': '<NAME>'}},
{207: {'explanation': [(6, 0.14931728779865114),
(14, 0.14092073957103526),
(1, 0.11071480021464616),
(4, 0.10655287976934531),
(8, 0.08705404649152573)],
'name': '<NAME>'},
208: {'explanation': [(8, -0.12242580400886727),
(9, 0.12142729544158742),
(14, -0.1148252787068248),
(16, -0.09562322208795092),
(4, 0.09350160975513132)],
'name': '<NAME>'},
852: {'explanation': [(6, 0.04227675072263027),
(9, -0.03107924340879173),
(14, 0.028007115650713045),
(13, 0.02771190348545554),
(19, 0.02640441416071482)],
'name': '<NAME>'}},
{207: {'explanation': [(19, 0.14313680656283245),
(18, 0.12866508562342843),
(8, 0.11809779264185447),
(0, 0.11286255403442104),
(2, 0.11286255403442104)],
'name': '<NAME>'},
208: {'explanation': [(9, 0.2397917428082761),
(14, -0.19435572812170654),
(6, -0.1760894833446507),
(18, -0.12243333818399058),
(15, 0.10986343675377105)],
'name': '<NAME>'},
852: {'explanation': [(14, 0.15378038774613365),
(9, -0.14245940635481966),
(6, 0.10213601012183973),
(20, 0.1009180838986786),
(3, 0.09780065767815548)],
'name': '<NAME>'}},
{207: {'explanation': [(15, 0.06525850448807077),
(9, 0.06286791243851698),
(19, 0.055189970374185854),
(8, 0.05499197604401475),
(13, 0.04748220842936177)],
'name': '<NAME>'},
208: {'explanation': [(6, -0.31549091899770765),
(5, 0.1862302670824446),
(8, -0.17381478451341995),
(10, -0.17353516098662508),
(14, -0.13591542421754205)],
'name': '<NAME>'},
852: {'explanation': [(14, 0.2163853942943355),
(6, 0.17565046338282214),
(1, 0.12446193028474549),
(9, -0.11365789839746396),
(10, 0.09239073691962967)],
'name': '<NAME>'}},
{207: {'explanation': [(19, 0.1141207265647932),
(36, -0.08861425922625768),
(30, 0.07219209872026074),
(9, -0.07150939547859836),
(38, -0.06988288637544438)],
'name': '<NAME>'},
208: {'explanation': [(29, 0.10531073909547647),
(13, 0.08279642208039652),
(34, -0.0817952443980797),
(33, -0.08086848205765082),
(12, 0.08086848205765082)],
'name': '<NAME>'},
852: {'explanation': [(13, -0.1330452414595897),
(4, 0.09942366413042845),
(12, -0.09881995683190645),
(33, 0.09881995683190645),
(19, -0.09596925317560831)],
'name': '<NAME>'}},
{207: {'explanation': [(37, 0.08193926967758253),
(35, 0.06804043021426347),
(15, 0.06396269230810163),
(11, 0.062255657227065296),
(8, 0.05529200233091672)],
'name': '<NAME>'},
208: {'explanation': [(19, 0.05711957286614678),
(27, -0.050230108135410824),
(16, -0.04743034616549999),
(5, -0.046717346734255705),
(9, -0.04419100026638039)],
'name': '<NAME>'},
852: {'explanation': [(3, -0.08390967998497496),
(30, -0.07037680222442452),
(22, 0.07029819368543713),
(8, -0.06861396187180349),
(37, -0.06662511956402824)],
'name': '<NAME>'}},
{207: {'explanation': [(19, 0.048418845359024805),
(9, -0.0423869575883795),
(30, 0.04012650790044438),
(36, -0.03787242980067195),
(10, 0.036557999380695635)],
'name': '<NAME>'},
208: {'explanation': [(10, 0.12120686823129677),
(17, 0.10196564232230493),
(7, 0.09495133975425854),
(25, -0.0759657891182803),
(2, -0.07035244568286837)],
'name': '<NAME>'},
852: {'explanation': [(3, -0.0770578003457272),
(28, 0.0769372258280398),
(6, -0.06044725989272927),
(22, 0.05550155775286349),
(31, -0.05399028046597057)],
'name': '<NAME>'}},
{207: {'explanation': [(14, 0.05371383110181226),
(0, -0.04442539316084218),
(18, 0.042589475382826494),
(19, 0.04227647855354252),
(17, 0.041685661662754295)],
'name': '<NAME>'},
208: {'explanation': [(29, 0.14419601354489464),
(17, 0.11785174500536676),
(36, 0.1000501679652906),
(10, 0.09679790134851017),
(35, 0.08710376081189208)],
'name': '<NAME>'},
852: {'explanation': [(8, -0.02486237985832769),
(3, -0.022559886154747102),
(11, -0.021878686669239856),
(36, 0.021847953817988534),
(19, -0.018317598300716522)],
'name': '<NAME>'}},
{207: {'explanation': [(37, 0.08098729255605368),
(35, 0.06639102704982619),
(15, 0.06033721190370432),
(34, 0.05826267856117829),
(28, 0.05549505160798173)],
'name': '<NAME>'},
208: {'explanation': [(17, 0.13839012042250542),
(10, 0.11312187488346881),
(7, 0.10729071207480922),
(25, -0.09529127965797404),
(11, -0.09279834572979286)],
'name': '<NAME>'},
852: {'explanation': [(3, -0.028385651836694076),
(22, 0.023364702783498722),
(8, -0.023097812578270233),
(30, -0.022931236620034406),
(37, -0.022040170736525342)],
'name': '<NAME>'}}
]
EXP_TAB = {
'setosa&0&0': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&1': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&2': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&3': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&4': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&5': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&6': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&7': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&8': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&9': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&10': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&11': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&12': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&13': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&14': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&15': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&16': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&17': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&18': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&19': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&20': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&21': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&22': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&23': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&24': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&25': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&26': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&27': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&28': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&29': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&30': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&31': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&32': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&33': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&34': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&35': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&36': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&37': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&38': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&39': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&40': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&41': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&42': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&43': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&44': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&45': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&46': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&47': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&48': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&49': np.array([0.4656481363306145, 0.007982539480288167]),
'setosa&0&50': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&51': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&52': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&53': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&54': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&55': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&56': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&57': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&58': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&59': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&60': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&61': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&62': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&63': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&0&64': np.array([0.3094460464703627, 0.11400643817329122]),
'setosa&0&65': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&66': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&67': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&68': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&69': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&70': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&71': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&72': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&73': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&74': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&0&75': np.array([0.0, 0.95124502153736]),
'setosa&0&76': np.array([0.0, 0.9708703761803881]),
'setosa&0&77': np.array([0.0, 0.5659706098422994]),
'setosa&0&78': np.array([0.0, 0.3962828716108186]),
'setosa&0&79': np.array([0.0, 0.2538069363248767]),
'setosa&0&80': np.array([0.0, 0.95124502153736]),
'setosa&0&81': np.array([0.0, 0.95124502153736]),
'setosa&0&82': np.array([0.0, 0.95124502153736]),
'setosa&0&83': np.array([0.0, 0.95124502153736]),
'setosa&0&84': np.array([0.0, 0.9708703761803881]),
'setosa&0&85': np.array([0.0, 0.9708703761803881]),
'setosa&0&86': np.array([0.0, 0.9708703761803881]),
'setosa&0&87': np.array([0.0, 0.5659706098422994]),
'setosa&0&88': np.array([0.0, 0.5659706098422994]),
'setosa&0&89': np.array([0.0, 0.3962828716108186]),
'setosa&0&90': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&91': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&92': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&93': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&94': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&95': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&96': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&97': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&98': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&99': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&100': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&101': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&102': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&103': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&104': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&105': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&106': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&107': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&108': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&109': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&110': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&111': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&112': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&113': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&114': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&115': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&116': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&117': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&118': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&119': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&120': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&121': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&122': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&123': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&124': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&125': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&126': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&127': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&128': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&129': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&130': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&131': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&132': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&133': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&134': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&135': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&136': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&137': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&138': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&139': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&140': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&141': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&142': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&143': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&144': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&145': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&146': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&147': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&148': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&149': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&150': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&151': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&152': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&153': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&154': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&155': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&156': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&157': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&158': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&159': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&160': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&161': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&162': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&163': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&164': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&165': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&166': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&167': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&168': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&169': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&170': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&171': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&172': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&173': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&174': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&175': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&176': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&177': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&178': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&179': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&180': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&181': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&182': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&183': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&184': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&185': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&186': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&187': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&188': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&189': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&190': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&191': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&192': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&193': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&194': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&195': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&196': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&197': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&198': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&199': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&200': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&201': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&202': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&203': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&204': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&205': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&206': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&207': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&208': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&209': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&210': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&211': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&212': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&213': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&214': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&215': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&216': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&217': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&218': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&219': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&220': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&221': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&222': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&223': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&224': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&225': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&226': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&227': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&228': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&229': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&230': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&231': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&232': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&233': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&234': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&235': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&236': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&237': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&238': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&239': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&240': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&241': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&242': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&243': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&244': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&245': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&246': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&247': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&248': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&249': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&250': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&251': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&252': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&253': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&254': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&255': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&256': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&257': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&258': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&259': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&260': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&261': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&262': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&263': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&264': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&265': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&266': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&267': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&268': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&269': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&270': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&271': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&272': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&273': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&274': np.array([0.4656481363306145, 0.007982539480288167]),
'setosa&0&275': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&276': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&277': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&278': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&279': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&280': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&281': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&282': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&283': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&284': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&285': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&286': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&287': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&288': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&289': np.array([0.4656481363306145, 0.007982539480288167]),
'setosa&0&290': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&291': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&292': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&293': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&294': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&295': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&296': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&297': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&298': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&299': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&300': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&301': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&302': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&303': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&0&304': np.array([0.3094460464703627, 0.11400643817329122]),
'setosa&0&305': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&306': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&307': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&308': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&309': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&310': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&311': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&312': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&313': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&314': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&1&0': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&1': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&2': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&3': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&4': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&5': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&6': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&7': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&8': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&9': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&10': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&11': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&12': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&13': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&14': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&15': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&16': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&17': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&18': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&19': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&20': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&21': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&22': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&23': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&24': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&25': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&26': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&27': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&28': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&29': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&30': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&31': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&32': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&33': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&34': np.array([-0.26192650167775977, 0.33491141590339474]),
'setosa&1&35': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&36': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&37': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&38': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&39': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&40': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&41': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&42': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&43': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&44': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&45': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&46': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&47': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&48': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&49': np.array([0.4079256832347186, 0.038455640985860955]),
'setosa&1&50': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&51': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&52': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&53': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&54': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&55': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&56': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&57': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&58': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&59': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&60': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&61': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&62': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&63': np.array([0.13717260713320106, 0.3627779907901665]),
'setosa&1&64': np.array([0.3093950298647913, 0.1140298206733954]),
'setosa&1&65': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&66': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&67': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&68': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&69': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&70': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&71': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&72': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&73': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&74': np.array([0.13717260713320106, 0.3627779907901665]),
'setosa&1&75': np.array([0.0, -0.4756207622944677]),
'setosa&1&76': np.array([0.0, -0.4854334805210761]),
'setosa&1&77': np.array([0.0, 0.16885577975809635]),
'setosa&1&78': np.array([0.0, 0.395805885538554]),
'setosa&1&79': np.array([0.0, 0.2538072707138344]),
'setosa&1&80': np.array([0.0, -0.4756207622944677]),
'setosa&1&81': np.array([0.0, -0.4756207622944677]),
'setosa&1&82': np.array([0.0, -0.4756207622944677]),
'setosa&1&83': np.array([0.0, -0.4756207622944677]),
'setosa&1&84': np.array([0.0, -0.4854334805210761]),
'setosa&1&85': np.array([0.0, -0.4854334805210761]),
'setosa&1&86': np.array([0.0, -0.4854334805210761]),
'setosa&1&87': np.array([0.0, 0.16885577975809635]),
'setosa&1&88': np.array([0.0, 0.16885577975809635]),
'setosa&1&89': np.array([0.0, 0.395805885538554]),
'setosa&1&90': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&91': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&92': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&93': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&94': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&95': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&96': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&97': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&98': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&99': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&100': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&101': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&102': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&103': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&104': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&105': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&106': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&107': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&108': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&109': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&110': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&111': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&112': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&113': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&114': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&115': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&116': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&117': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&118': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&119': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&120': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&121': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&122': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&123': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&124': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&125': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&126': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&127': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&128': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&129': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&130': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&131': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&132': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&133': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&134': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&135': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&136': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&137': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&138': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&139': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&140': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&141': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&142': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&143': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&144': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&145': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&146': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&147': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&148': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&149': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&150': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&151': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&152': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&153': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&154': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&155': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&156': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&157': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&158': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&159': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&160': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&161': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&162': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&163': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&164': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&165': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&166': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&167': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&168': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&169': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&170': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&171': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&172': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&173': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&174': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&175': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&176': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&177': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&178': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&179': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&180': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&181': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&182': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&183': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&184': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&185': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&186': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&187': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&188': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&189': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&190': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&191': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&192': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&193': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&194': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&195': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&196': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&197': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&198': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&199': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&200': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&201': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&202': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&203': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&204': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&205': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&206': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&207': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&208': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&209': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&210': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&211': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&212': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&213': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&214': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&215': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&216': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&217': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&218': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&219': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&220': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&221': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&222': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&223': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&224': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&225': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&226': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&227': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&228': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&229': np.array([-0.26192650167775977, 0.33491141590339474]),
'setosa&1&230': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&231': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&232': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&233': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&234': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&235': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&236': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&237': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&238': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&239': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&240': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&241': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&242': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&243': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&244': np.array([-0.26192650167775977, 0.33491141590339474]),
'setosa&1&245': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&246': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&247': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&248': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&249': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&250': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&251': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&252': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&253': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&254': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&255': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&256': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&257': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&258': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&259': np.array([-0.26192650167775977, 0.33491141590339474]),
'setosa&1&260': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&261': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&262': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&263': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&264': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&265': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&266': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&267': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&268': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&269': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&270': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&271': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&272': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&273': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&274': np.array([0.4079256832347186, 0.038455640985860955]),
'setosa&1&275': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&276': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&277': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&278': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&279': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&280': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&281': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&282': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&283': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&284': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&285': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&286': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&287': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&288': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&289': np.array([0.4079256832347186, 0.038455640985860955]),
'setosa&1&290': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&291': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&292': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&293': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&294': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&295': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&296': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&297': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&298': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&299': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&300': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&301': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&302': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&303': np.array([0.13717260713320106, 0.3627779907901665]),
'setosa&1&304': np.array([0.3093950298647913, 0.1140298206733954]),
'setosa&1&305': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&306': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&307': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&308': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&309': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&310': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&311': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&312': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&313': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&314': np.array([0.13717260713320106, 0.3627779907901665]),
'setosa&2&0': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&1': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&2': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&3': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&4': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&5': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&6': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&7': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&8': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&9': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&10': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&11': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&12': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&13': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&14': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&15': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&16': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&17': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&18': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&19': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&20': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&21': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&22': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&23': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&24': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&25': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&26': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&27': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&28': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&29': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&30': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&31': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&32': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&33': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&34': np.array([-0.5354807894355184, -0.3418054346754283]),
'setosa&2&35': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&36': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&37': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&38': np.array([-0.5188517506916893, -0.036358567813067795]),
'setosa&2&39': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&40': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&41': np.array([-0.513193927394545, -0.041997482667908786]),
'setosa&2&42': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&43': np.array([-0.06285591932387405, -0.6914253444924359]),
'setosa&2&44': np.array([-0.34904320225465857, -0.6233384360811872]),
'setosa&2&45': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&46': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&47': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&48': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&49': np.array([-0.8735738195653328, -0.046438180466149094]),
'setosa&2&50': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&51': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&52': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&53': np.array([-0.8252668830593567, -0.11450866713130638]),
'setosa&2&54': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&55': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&56': np.array([-0.8211795643076093, -0.1186965077161071]),
'setosa&2&57': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&58': np.array([-0.6441664102689847, -0.3012046426099901]),
'setosa&2&59': np.array([-0.7640280271176497, -0.19364537761420375]),
'setosa&2&60': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&61': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&62': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&63': np.array([-0.2741128763380603, -0.7260889090887469]),
'setosa&2&64': np.array([-0.6188410763351541, -0.22803625884668638]),
'setosa&2&65': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&66': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&67': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&68': np.array([-0.5227340800279542, -0.42092675740881474]),
'setosa&2&69': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&70': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&71': np.array([-0.5140708637198534, -0.43053612380573514]),
'setosa&2&72': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&73': np.array([-0.2661726847443776, -0.6902916602462779]),
'setosa&2&74': np.array([-0.2741128763380603, -0.7260889090887469]),
'setosa&2&75': np.array([0.0, -0.47562425924289314]),
'setosa&2&76': np.array([0.0, -0.48543689565931186]),
'setosa&2&77': np.array([0.0, -0.7348263896003956]),
'setosa&2&78': np.array([0.0, -0.7920887571493729]),
'setosa&2&79': np.array([0.0, -0.507614207038711]),
'setosa&2&80': np.array([0.0, -0.47562425924289314]),
'setosa&2&81': np.array([0.0, -0.47562425924289314]),
'setosa&2&82': np.array([0.0, -0.47562425924289314]),
'setosa&2&83': np.array([0.0, -0.47562425924289314]),
'setosa&2&84': np.array([0.0, -0.48543689565931186]),
'setosa&2&85': np.array([0.0, -0.48543689565931186]),
'setosa&2&86': np.array([0.0, -0.48543689565931186]),
'setosa&2&87': np.array([0.0, -0.7348263896003956]),
'setosa&2&88': np.array([0.0, -0.7348263896003956]),
'setosa&2&89': np.array([0.0, -0.7920887571493729]),
'setosa&2&90': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&91': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&92': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&93': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&94': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&95': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&96': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&97': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&98': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&99': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&100': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&101': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&102': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&103': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&104': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&105': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&106': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&107': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&108': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&109': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&110': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&111': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&112': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&113': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&114': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&115': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&116': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&117': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&118': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&119': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&120': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&121': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&122': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&123': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&124': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&125': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&126': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&127': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&128': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&129': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&130': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&131': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&132': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&133': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&134': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&135': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&136': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&137': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&138': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&139': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&140': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&141': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&142': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&143': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&144': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&145': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&146': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&147': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&148': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&149': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&150': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&151': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&152': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&153': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&154': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&155': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&156': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&157': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&158': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&159': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&160': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&161': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&162': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&163': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&164': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&165': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&166': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&167': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&168': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&169': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&170': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&171': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&172': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&173': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&174': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&175': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&176': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&177': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&178': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&179': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&180': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&181': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&182': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&183': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&184': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&185': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&186': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&187': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&188': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&189': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&190': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&191': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&192': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&193': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&194': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&195': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&196': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&197': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&198': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&199': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&200': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&201': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&202': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&203': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&204': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&205': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&206': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&207': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&208': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&209': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&210': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&211': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&212': np.array([-0.044492463210563125, -0.7096449459722027]),
'setosa&2&213': np.array([-0.29537842173874096, -0.6750352694420283]),
'setosa&2&214': np.array([-0.47415719445227245, -0.38726974144168774]),
'setosa&2&215': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&216': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&217': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&218': np.array([-0.3715769132100501, -0.12216227283618744]),
'setosa&2&219': np.array([-0.24630541996506924, -0.24630541996506994]),
'setosa&2&220': | np.array([-0.24630541996506924, -0.24630541996506994]) | numpy.array |
import numpy as np
import pandas as pd
import boto3
from io import BytesIO
import librosa
from botocore.exceptions import ClientError
def call_s3(s3_client, bucket_name, fname, folder='audio_train/'):
"""Call S3 instance to retrieve data from .wav file(or other format).
Assumes file is in folder name path"""
try:
path = folder + fname
except TypeError:
return None
try:
response = s3_client.get_object(Bucket=bucket_name, Key=path)
except ClientError as ex:
if ex.response['Error']['Code'] == 'NoSuchKey':
return dict()
data = BytesIO(response['Body'].read())
return data
def audio_vectorize(fname, data):
"""
Analyze audio data in order to extract features for model development.
Parameters:
fname: (str)
data: (_io.BytesIO)
Returns:
Feature dictionary
"""
try:
y, sr = librosa.load(data, mono=True, duration=10, offset = .5)
except RuntimeError:
return pd.Series()
chroma_stft = np.mean(librosa.feature.chroma_stft(y, sr))
spec_cent = np.mean(librosa.feature.spectral_centroid(y=y, sr=sr))
spec_bw = np.mean(librosa.feature.spectral_bandwidth(y=y, sr=sr))
rolloff = np.mean(librosa.feature.spectral_rolloff(y=y, sr=sr))
zcr = np.mean(librosa.feature.zero_crossing_rate(y))
mfccs = librosa.feature.mfcc(y=y, sr=sr)
mfccs = | np.mean(mfccs, axis=1) | numpy.mean |
############################################################################################
### Functions to analyze and plot weight and activity distributions from simulation data ###
############################################################################################
### Copyright 2019-2021 <NAME>
### licensed under Apache-2.0 (http://www.apache.org/licenses/LICENSE-2.0)
from utilityFunctions import *
import sys
import warnings
import os.path
import numpy as np
from pathlib import Path
from subprocess import call
# findOverallMinMax
# Determines the minimum and maximum values across all data files that are located somewhere in a given directory
# and that have the same readout time
# nppath: path to the directory to read the data from
# Nl_exc: the number of excitatory neurons in one line of a quadratic grid
# time_for_readout: the time that at which the weights shall be read out (as a string)
# h_0: the initial weight, and normalization factor for z
# return: two-dimensional array containing the minimum and maximum values for the four different data types
def findOverallMinMax(nppath, Nl_exc, time_for_readout, h_0):
sysmin, sysmax = sys.float_info.min, sys.float_info.max
(h_min, z_min, w_min, v_min) = (sysmax, sysmax, sysmax, sysmax) # initially, assign the maximum possible value
(h_max, z_max, w_max, v_max) = (sysmin, sysmin, sysmin, sysmin) # initially, assign the minimum possible value
# recurseFind
# Function to recursively move through directories and look for data to find their minima/maxima
# path: the directory to iterate through
def recurseFindMinMax(path):
nonlocal h_min, z_min, w_min, v_min
nonlocal h_max, z_max, w_max, v_max
rawpaths = Path(path)
for x in rawpaths.iterdir():
if x.is_dir():
recurseFindMinMax(x) # if the found file is a directory, recurse into it
tmppath = str(x)
if ("_net_" + time_for_readout + ".txt") in tmppath: # file containing network simulation data found
# read data from file
try:
connections, h, z, v = readWeightMatrixData(tmppath, Nl_exc)
h = h[connections] # reduce h (leave out non-existent synapses)
z = h_0*z[connections] # reduce and normalize z
w = h + z # compute total synaptic weight
except ValueError:
raise
except OSError:
raise
# checkAndAdjust
# Compares two numbers and returns the larger/lower one, depending on the operator
# a: a floating point number
# b: a floating point number
# op [optional]: the operator to be used
# return: the larger/lower one of the two numbers
def checkAndAdjust(a, b, op=">"):
if b > a:
return b if op == ">" else a
else:
return a if op == ">" else b
# adjust maxima
h_max = checkAndAdjust(h_max, np.max(h), ">")
z_max = checkAndAdjust(z_max, np.max(z), ">")
w_max = checkAndAdjust(w_max, np.max(w), ">")
v_max = checkAndAdjust(v_max, np.max(v), ">")
# adjust minima
h_min = checkAndAdjust(h_min, np.min(h), "<")
z_min = checkAndAdjust(z_min, np.min(z), "<")
w_min = checkAndAdjust(w_min, np.min(w), "<")
v_min = checkAndAdjust(v_min, np.min(v), "<")
# iterate across files in the directory
recurseFindMinMax(nppath)
return np.array([[h_min, h_max], [z_min, z_max], [w_min, w_max], [v_min, v_max]])
# plotDistributions
# Creates data and plot files of the weight and activity distribution at a given time
# nppath: path to the directory to read the data from
# timestamp: a string containing date and time (to access correct paths) OR equal to "any"
# add: additional descriptor
# Nl_exc: the number of excitatory neurons in one line of a quadratic grid
# time_for_readout: the time that at which the weights shall be read out (as a string)
# core: array of indices of the cell assembly (core) neurons
# h_0 [optional]: the initial weight, and normalization factor for z
# norm_all [optional]: specifies whether to normalize across all subpopulations (True) or across each subpop. individually (False)
# - the first is recommendable if samples of different subpopulations are compared against each other,
# the latter is recommendable if different samples of the same subpopulation are compared
# bins [optional]: list of four arrays, each containing the bins for one of the four quantities
def plotDistributions(nppath, timestamp, add, Nl_exc, time_for_readout, core, h_0=0.420075, norm_all=False, bins=None):
orgdir = os.getcwd() # store the current working directory
# "any" case: not looking for a specific timestamp, but for any data with a certain time_for_readout in the given directory
if timestamp == "any":
if bins is None:
warnings.warn("Warning: timestamp=\"any\": bins should be provided by the calling function to compare across trials.")
rawpaths = Path(nppath)
for x in rawpaths.iterdir():
tmppath = os.path.split(str(x))[1] # remove head from path
if ("_net_" + time_for_readout + ".txt") in tmppath:
timestamp = tmppath.split("_net_")[0]
plotDistributions(nppath, timestamp, add, Nl_exc, time_for_readout, core, h_0, norm_all, bins) # call this function again, now with specific timestamp
return
# read data from file [timestamp]_net_[time_for_readout].txt
os.chdir(nppath) # change to data directory
try:
connections, h, z, v = readWeightMatrixData(timestamp + "_net_" + time_for_readout + ".txt", Nl_exc)
z = h_0*z # normalize z
w = h + z # compute total synaptic weight
except ValueError:
raise
except OSError:
raise
# determine subpopulations
N_tot = Nl_exc**2 # total number of neurons
N_CA = len(core) # number of neurons in the cell assembly
N_control = N_tot - N_CA # number of neurons in the control subpopulation
all = np.arange(N_tot)
noncore = all[np.logical_not(np.in1d(all, core))] # array of indices of the neurons not in the cell assembly (core)
block_CA_within = np.ones((N_CA, N_CA), dtype=bool) # array of ones for the synapses within the cell assembly
block_CA_outgoing = np.ones((N_CA, N_control), dtype=bool) # array of ones for the synapses outgoing from the cell assembly
block_CA_incoming = np.ones((N_control, N_CA), dtype=bool) # array of ones for the synapses incoming to the cell assembly
block_control = np.ones((N_control, N_control), dtype=bool) # array of ones for the synapses within the control subpopulation
mask_CA_within = np.append(np.append(block_CA_within, np.logical_not(block_CA_outgoing), axis=1), \
np.logical_not(np.append(block_CA_incoming, block_control, axis=1)),
axis=0) # binary mask defining the synapses within the cell assembly
mask_CA_outgoing = np.append(np.append(np.logical_not(block_CA_within), block_CA_outgoing, axis=1), \
np.logical_not(np.append(block_CA_incoming, block_control, axis=1)),
axis=0) # binary mask defining the synapses outgoing from the cell assembly
mask_CA_incoming = np.append(np.logical_not(np.append(block_CA_within, block_CA_outgoing, axis=1)), \
np.append(block_CA_incoming, np.logical_not(block_control), axis=1),
axis=0) # binary mask defining the synapses incoming to the cell assembly
mask_control = np.append(np.logical_not(np.append(block_CA_within, block_CA_outgoing, axis=1)), \
np.append(np.logical_not(block_CA_incoming), block_control, axis=1),
axis=0) # binary mask defining the synapses within the control subpopulation
# early-phase weights
'''h_CA_within = h[mask_CA_within]
h_CA_outgoing = h[mask_CA_outgoing]
h_CA_incoming = h[mask_CA_incoming]
h_control = h[mask_control]'''
h_CA_within = h[np.logical_and(connections, mask_CA_within)]
h_CA_outgoing = h[np.logical_and(connections, mask_CA_outgoing)]
h_CA_incoming = h[np.logical_and(connections, mask_CA_incoming)]
h_control = h[np.logical_and(connections, mask_control)]
# late-phase weights
'''z_CA_within = z[mask_CA_within]
z_CA_outgoing = z[mask_CA_outgoing]
z_CA_incoming = z[mask_CA_incoming]
z_control = z[mask_control]'''
z_CA_within = z[np.logical_and(connections, mask_CA_within)]
z_CA_outgoing = z[np.logical_and(connections, mask_CA_outgoing)]
z_CA_incoming = z[np.logical_and(connections, mask_CA_incoming)]
z_control = z[np.logical_and(connections, mask_control)]
# total synaptic weights
w_CA_within = h_CA_within + z_CA_within
w_CA_outgoing = h_CA_outgoing + z_CA_outgoing
w_CA_incoming = h_CA_incoming + z_CA_incoming
w_control = h_control + z_control
# firing rates
v_CA = v.flatten()[np.in1d(all, core)]
v_control = v.flatten()[np.logical_not(np.in1d(all, core))]
# discretization of the distribution
if bins is None:
binh = np.linspace(np.min(h), np.max(h), 101, endpoint=True) # create range of bins for marginalProbDist(h...)
binz = np.linspace(np.min(z), np.max(z), 101, endpoint=True) # create range of bins for marginalProbDist(z...)
binw = np.linspace(np.min(w), np.max(w), 101, endpoint=True) # create range of bins for marginalProbDist(w...)
binv = np.linspace(np.min(v), np.max(v), 101, endpoint=True) # create range of bins for marginalProbDist(v...)
else:
[binh, binz, binw, binv] = bins # use pre-defined bins
hstep = binh[1]-binh[0]
zstep = binz[1]-binz[0]
wstep = binw[1]-binw[0]
vstep = binv[1]-binv[0]
valh = np.delete(binh, -1) + hstep/2 # use mean values instead of lower bounds of the bins as values
valz = np.delete(binz, -1) + zstep/2 # use mean values instead of lower bounds of the bins as values
valw = np.delete(binw, -1) + wstep/2 # use mean values instead of lower bounds of the bins as values
valv = np.delete(binv, -1) + vstep/2 # use mean values instead of lower bounds of the bins as values
# normalization of the distribution
if norm_all:
norm_value_w = np.sum(connections) # normalization factor for weights (number of all connections)
norm_value_v = N_CA + N_control # normalization factor for activities (number of all neurons)
else:
norm_value_w = None # use default (normalization across each subpopulation individually)
norm_value_v = None # use default (normalization across CA and control individually)
buf, ph_CA_within = marginalProbDist(h_CA_within, binning = True, bin_edges = binh, norm = norm_value_w)
buf, ph_CA_outgoing = marginalProbDist(h_CA_outgoing, binning = True, bin_edges = binh, norm = norm_value_w)
buf, ph_CA_incoming = marginalProbDist(h_CA_incoming, binning = True, bin_edges = binh, norm = norm_value_w)
buf, ph_control = marginalProbDist(h_control, binning = True, bin_edges = binh, norm = norm_value_w)
buf, pz_CA_within = marginalProbDist(z_CA_within, binning = True, bin_edges = binz, norm = norm_value_w)
buf, pz_CA_outgoing = marginalProbDist(z_CA_outgoing, binning = True, bin_edges = binz, norm = norm_value_w)
buf, pz_CA_incoming = marginalProbDist(z_CA_incoming, binning = True, bin_edges = binz, norm = norm_value_w)
buf, pz_control = marginalProbDist(z_control, binning = True, bin_edges = binz, norm = norm_value_w)
buf, pw_CA_within = marginalProbDist(w_CA_within, binning = True, bin_edges = binw, norm = norm_value_w)
buf, pw_CA_outgoing = marginalProbDist(w_CA_outgoing, binning = True, bin_edges = binw, norm = norm_value_w)
buf, pw_CA_incoming = marginalProbDist(w_CA_incoming, binning = True, bin_edges = binw, norm = norm_value_w)
buf, pw_control = marginalProbDist(w_control, binning = True, bin_edges = binw, norm = norm_value_w)
buf, pv_CA = marginalProbDist(v_CA, binning = True, bin_edges = binv, norm = norm_value_v)
buf, pv_control = marginalProbDist(v_control, binning = True, bin_edges = binv, norm = norm_value_v)
# write early-phase weight distribution to file
f = open(timestamp + "_eweight_dist_" + time_for_readout + add + ".txt", "w")
for i in range(len(valh)):
f.write(str(valh[i]) + "\t\t" + str(ph_CA_within[i]) + "\t\t" + str(ph_CA_outgoing[i]) + "\t\t" + \
str(ph_CA_incoming[i]) + "\t\t" + str(ph_control[i]) + "\n")
f.close()
# write late-phase weight distribution to file
f = open(timestamp + "_lweight_dist_" + time_for_readout + add + ".txt", "w")
for i in range(len(valz)):
f.write(str(valz[i]) + "\t\t" + str(pz_CA_within[i]) + "\t\t" + str(pz_CA_outgoing[i]) + "\t\t" + \
str(pz_CA_incoming[i]) + "\t\t" + str(pz_control[i]) + "\n")
f.close()
# write distribution of total synaptic weights to file
f = open(timestamp + "_totweight_dist_" + time_for_readout + add + ".txt", "w")
for i in range(len(valw)):
f.write(str(valw[i]) + "\t\t" + str(pw_CA_within[i]) + "\t\t" + str(pw_CA_outgoing[i]) + "\t\t" + \
str(pw_CA_incoming[i]) + "\t\t" + str(pw_control[i]) + "\n")
f.close()
# write activity distribution to file
f = open(timestamp + "_act_dist_" + time_for_readout + add + ".txt", "w")
for i in range(len(valv)):
f.write(str(valv[i]) + "\t\t" + str(pv_CA[i]) + "\t\t" + str(pv_control[i]) + "\n")
f.close()
# write gnuplot script
f = open(timestamp + "_plot_dist.gpl", "w")
f.write("### DO NOT EDIT THIS FILE! IT WILL BE OVERWRITTEN. ###\n\n" + \
"#set terminal png size 1024,640 enhanced\nset terminal pdf enhanced\n\n" + \
"#set style fill transparent solid 0.8 noborder # for 'boxes' style\n" + \
"#set style fill transparent pattern 4 bo # for 'boxes' style\n" + \
"set log y\nset format y \"%.0e\"\nset yrange [3e-06:1]\nset key outside\n\n" + \
"h_0 = " + str(h_0) + "\n" +\
"epsilon = " + str(epsilon) + "\n\n")
# plotting of early-phase weight distribution
f.write("set output \"" + timestamp + "_eweight_dist_" + time_for_readout + add + ".pdf\"\n")
f.write("set xrange [" + str(binh[0]-10*hstep) + "/h_0:" + str(binh[-1]+10*hstep) + "/h_0]\n")
f.write("set xlabel \"Early-phase weight / h_0\"\nset ylabel \"Relative frequency\"\n")
f.write("plot \"" + timestamp + "_eweight_dist_" + time_for_readout + add + ".txt\" using ($1/h_0):($2 > 0 ? $2 : epsilon) t \"CA\" with histeps, \\\n" + \
" \"\" using ($1/h_0):($3 > 0 ? $3 : epsilon) t \"outgoing\" with histeps, \\\n" + \
" \"\" using ($1/h_0):($4 > 0 ? $4 : epsilon) t \"incoming\" with histeps, \\\n" + \
" \"\" using ($1/h_0):($5 > 0 ? $5 : epsilon) t \"control\" with histeps\n")
# plotting of late-phase weight distribution
f.write("\nset output \"" + timestamp + "_lweight_dist_" + time_for_readout + add + ".pdf\"\n")
f.write("set xrange [" + str(binz[0]-10*zstep) + "/h_0:" + str(binz[-1]+10*zstep) + "/h_0]\n")
f.write("set xlabel \"Late-phase weight / h_0\"\nset ylabel \"Relative frequency\"\nset format y \"%.0e\"\n")
f.write("plot \"" + timestamp + "_lweight_dist_" + time_for_readout + add + ".txt\" using ($1/h_0):($2 > 0 ? $2 : epsilon) t \"CA\" with histeps, \\\n" + \
" \"\" using ($1/h_0):($3 > 0 ? $3 : epsilon) t \"outgoing\" with histeps, \\\n" + \
" \"\" using ($1/h_0):($4 > 0 ? $4 : epsilon) t \"incoming\" with histeps, \\\n" + \
" \"\" using ($1/h_0):($5 > 0 ? $5 : epsilon) t \"control\" with histeps\n")
# plotting of total weight distribution
f.write("\nset output \"" + timestamp + "_totweight_dist_" + time_for_readout + add + ".pdf\"\n")
f.write("set xrange [" + str(binw[0]-10*wstep) + "/h_0*100:" + str(binw[-1]+10*wstep) + "/h_0*100]\n")
f.write("set xlabel \"Total synaptic weight (%)\"\nset ylabel \"Relative frequency\"\nset format y \"%.0e\"\n")
f.write("plot \"" + timestamp + "_totweight_dist_" + time_for_readout + add + ".txt\" using ($1/h_0*100):($2 > 0 ? $2 : epsilon) t \"CA\" with histeps, \\\n" + \
" \"\" using ($1/h_0*100):($3 > 0 ? $3 : epsilon) t \"outgoing\" with histeps, \\\n" + \
" \"\" using ($1/h_0*100):($4 > 0 ? $4 : epsilon) t \"incoming\" with histeps, \\\n" + \
" \"\" using ($1/h_0*100):($5 > 0 ? $5 : epsilon) t \"control\" with histeps\n")
# plotting of activity distribution
f.write("\nset output \"" + timestamp + "_act_dist_" + time_for_readout + add + ".pdf\"\n")
f.write("set xrange [" + str(binv[0]-10*vstep) + ":" + str(binv[-1]+10*vstep) + "]\n")
f.write("set xlabel \"Neuronal firing rate (Hz)\"\nset ylabel \"Relative frequency\"\nset format y \"%.0e\"\n")
f.write("plot \"" + timestamp + "_act_dist_" + time_for_readout + add + ".txt\" using 1:($2 > 0 ? $2 : epsilon) t \"CA\" with histeps, \\\n" + \
" \"\" using 1:($3 > 0 ? $3 : epsilon) t \"control\" with histeps\n\n")
f.close()
call(["gnuplot", timestamp + "_plot_dist.gpl"])
os.chdir(orgdir) # change back to original directory
# plotWeightDistributions3CAs
# Creates data and plot files of the weight distribution of a network with 2 or 3, possibly overlapping, assemblies at a given time
# nppath: path to the network_plots directory to read the data from
# timestamp: a string containing date and time (to access correct paths)
# add: additional descriptor
# Nl_exc: the number of excitatory neurons in one line of a quadratic grid
# time_for_readout: the time that at which the weights shall be read out
# coreA: array of indices of the first cell assembly (core) neurons
# coreB [optional]: array of indices of the second cell assembly (core) neurons
# coreC [optional]: array of indices of the third cell assembly (core) neurons
# h_0 [optional]: the initial weight, and normalization factor for z
# bins [optional]: list of three arrays, each containing the bins for one of the four quantities
def plotWeightDistributions3CAs(nppath, timestamp, add, Nl_exc, time_for_readout, coreA, coreB = None, coreC = None, h_0 = 0.420075, bins = None):
orgdir = os.getcwd() # store the current working directory
# "any" case: not looking for a specific timestamp, but for any data with a certain time_for_readout in the given directory
if timestamp == "any":
rawpaths = Path(nppath)
for x in rawpaths.iterdir():
tmppath = os.path.split(str(x))[1] # remove head from path
if ("_net_" + time_for_readout + ".txt") in tmppath:
timestamp = tmppath.split("_net_")[0]
plotWeightDistributions3CAs(nppath, timestamp, add, Nl_exc, time_for_readout, coreA, coreB, coreC, h_0, bins) # call this function again, now with specific timestamp; bins should be provided by calling function
return
# read data from file [timestamp]_net_[time_for_readout].txt
os.chdir(nppath) # change to data directory
try:
connections, h, z, v = readWeightMatrixData(timestamp + "_net_" + time_for_readout + ".txt", Nl_exc)
z = h_0*z # normalize z
w = h + z # compute total synaptic weight
except ValueError:
raise
except OSError:
raise
# determine synapses within the cell assemblies
N_tot = Nl_exc**2 # total number of neurons
mask_coreA = np.zeros((N_tot, N_tot), dtype=bool)
for syn_pre in coreA:
for syn_post in coreA:
if connections[syn_pre,syn_post]: # NEW, TEST
mask_coreA[syn_pre,syn_post] = True
mask_coreB = np.zeros((N_tot, N_tot), dtype=bool)
if coreB is not None:
for syn_pre in coreB:
for syn_post in coreB:
if connections[syn_pre,syn_post]: # NEW, TEST
mask_coreB[syn_pre,syn_post] = True
mask_coreC = np.zeros((N_tot, N_tot), dtype=bool)
if coreC is not None:
for syn_pre in coreC:
for syn_post in coreC:
if connections[syn_pre,syn_post]: # NEW, TEST
mask_coreC[syn_pre,syn_post] = True
# find control synapses (all synapses that are not within a cell assembly)
mask_control = np.logical_and(connections, np.logical_not(np.logical_or(mask_coreA, np.logical_or(mask_coreB, mask_coreC))))
# synapses outgoing from A // TODO
#block_outgoing_A = np.ones((len(coreA), N_tot-len(coreA)), dtype=bool) # array of ones for the synapses outgoing from assembly A
#mask_A_to_B = np.logical_not(np.logical_or(mask_coreA, np.logical_or(mask_coreB, mask_coreC)))
#mask_A_to_C = np.logical_not(np.logical_or(mask_coreA, np.logical_or(mask_coreB, mask_coreC)))
#mask_A_to_ctrl = np.logical_not(np.logical_or(mask_coreA, np.logical_or(mask_coreB, mask_coreC)))
# synapses outgoing from B // TODO
#mask_B_to_A = np.logical_not(np.logical_or(mask_coreA, np.logical_or(mask_coreB, mask_coreC)))
#mask_B_to_C = np.logical_not(np.logical_or(mask_coreA, np.logical_or(mask_coreB, mask_coreC)))
#mask_B_to_ctrl = np.logical_not(np.logical_or(mask_coreA, np.logical_or(mask_coreB, mask_coreC)))
# synapses outgoing from C // TODO
#mask_C_to_A = np.logical_not(np.logical_or(mask_coreA, np.logical_or(mask_coreB, mask_coreC)))
#mask_C_to_B = np.logical_not(np.logical_or(mask_coreA, np.logical_or(mask_coreB, mask_coreC)))
#mask_C_to_ctrl = np.logical_not(np.logical_or(mask_coreA, np.logical_or(mask_coreB, mask_coreC)))
# synapses incoming... // TODO
# find exclusive intersections
mask_I_AB = np.logical_and( np.logical_and(mask_coreA, mask_coreB), np.logical_not(mask_coreC) )
mask_I_AC = np.logical_and( np.logical_and(mask_coreA, mask_coreC), np.logical_not(mask_coreB) )
mask_I_BC = np.logical_and( np.logical_and(mask_coreB, mask_coreC), np.logical_not(mask_coreA) )
mask_I_ABC = np.logical_and( mask_coreA, np.logical_and(mask_coreB, mask_coreC) )
# remove intersections from exclusive cores
mask_coreA = np.logical_and(mask_coreA, \
np.logical_and(np.logical_not(mask_I_AB), \
np.logical_and(np.logical_not(mask_I_AC), np.logical_not(mask_I_ABC))))
mask_coreB = np.logical_and(mask_coreB, \
np.logical_and(np.logical_not(mask_I_AB), \
np.logical_and(np.logical_not(mask_I_BC), np.logical_not(mask_I_ABC))))
mask_coreC = np.logical_and(mask_coreC, \
np.logical_and(np.logical_not(mask_I_AC), \
np.logical_and(np.logical_not(mask_I_BC), np.logical_not(mask_I_ABC))))
# tests (each should yield true)
#print("Test:", not np.any(np.logical_and(mask_coreA, mask_coreB)))
#print("Test:", not np.any(np.logical_and(mask_coreA, mask_coreC)))
#print("Test:", not np.any(np.logical_and(mask_coreB, mask_coreC)))
#print("Test:", not np.any(np.logical_and(mask_I_AB, mask_I_BC)))
#print("Test:", not np.any(np.logical_and(mask_I_AB, mask_I_AC)))
#print("Test:", not np.any(np.logical_and(mask_I_AB, mask_I_ABC)))
#print("Test:", not np.any(np.logical_and(mask_I_AC, mask_I_BC)))
#print("Test:", not np.any(np.logical_and(mask_I_AC, mask_I_ABC)))
#print("Test:", not np.any(np.logical_and(mask_I_BC, mask_I_ABC)))
#print("Test:", not np.any(np.logical_and(mask_control, mask_coreA)))
#print("Test:", not np.any(np.logical_and(mask_control, mask_coreB)))
#print("Test:", not np.any(np.logical_and(mask_control, mask_coreC)))
#print("Test:", not np.any(np.logical_and(mask_control, mask_I_AB)))
#print("Test:", not np.any(np.logical_and(mask_control, mask_I_AC)))
#print("Test:", not np.any(np.logical_and(mask_control, mask_I_BC)))
#print("Test:", not np.any(np.logical_and(mask_control, mask_I_ABC)))
#print("Test:", not np.any(np.logical_and(mask_coreA, mask_I_AB)))
#print("Test:", not np.any(np.logical_and(mask_coreA, mask_I_AC)))
#print("Test:", not np.any(np.logical_and(mask_coreA, mask_I_BC)))
#print("Test:", not np.any(np.logical_and(mask_coreA, mask_I_ABC)))
#print("Test:", not np.any(np.logical_and(mask_coreB, mask_I_AB)))
#print("Test:", not np.any(np.logical_and(mask_coreB, mask_I_AC)))
#print("Test:", not np.any(np.logical_and(mask_coreB, mask_I_BC)))
#print("Test:", not np.any(np.logical_and(mask_coreB, mask_I_ABC)))
#print("Test:", not np.any(np.logical_and(mask_coreC, mask_I_AB)))
#print("Test:", not np.any(np.logical_and(mask_coreC, mask_I_AC)))
#print("Test:", not np.any(np.logical_and(mask_coreC, mask_I_BC)))
#print("Test:", not np.any(np.logical_and(mask_coreC, mask_I_ABC)))
# early-phase weights
h_coreA = h[mask_coreA]
h_coreB = h[mask_coreB]
h_coreC = h[mask_coreC]
h_I_AB = h[mask_I_AB]
h_I_AC = h[mask_I_AC]
h_I_BC = h[mask_I_BC]
h_I_ABC = h[mask_I_ABC]
h_control = h[mask_control]
# late-phase weights
z_coreA = z[mask_coreA]
z_coreB = z[mask_coreB]
z_coreC = z[mask_coreC]
z_I_AB = z[mask_I_AB]
z_I_AC = z[mask_I_AC]
z_I_BC = z[mask_I_BC]
z_I_ABC = z[mask_I_ABC]
z_control = z[mask_control]
# total synaptic weights
w_coreA = h_coreA + z_coreA
w_coreB = h_coreB + z_coreB
w_coreC = h_coreC + z_coreC
w_I_AB = h_I_AB + z_I_AB
w_I_AC = h_I_AC + z_I_AC
w_I_BC = h_I_BC + z_I_BC
w_I_ABC = h_I_ABC + z_I_ABC
w_control = h_control + z_control
# mean and standard deviation of the subpopulations (to compare to values from adjacencyFunctionsAttractors.py)
#mean_z_coreA = np.mean(z_coreA)
#mean_z_coreB = np.mean(z_coreB)
#mean_z_coreC = np.mean(z_coreC)
#mean_z_I_AB = np.mean(z_I_AB)
#mean_z_I_AC = np.mean(z_I_AC)
#mean_z_I_BC = np.mean(z_I_BC)
#mean_z_I_ABC = np.mean(z_I_ABC)
#mean_z_control = np.mean(z_control)
#sd_z_coreA = np.std(z_coreA)
#sd_z_coreB = np.std(z_coreB)
#sd_z_coreC = np.std(z_coreC)
#sd_z_I_AB = np.std(z_I_AB)
#sd_z_I_AC = np.std(z_I_AC)
#sd_z_I_BC = np.std(z_I_BC)
#sd_z_I_ABC = np.std(z_I_ABC)
#sd_z_control = np.std(z_control)
# discretization of the distribution
if bins is None:
binh = np.linspace(np.min(h[connections]), np.max(h), 101, endpoint=True) # create range of bins for marginalProbDist(h...)
binz = np.linspace( | np.min(z[connections]) | numpy.min |
from __future__ import print_function, division
#
# File:
# nio01.py
#
# Synopsis:
# Creates a NetCDF with scalar and array versions of all permissible types and then reads it
# printing typecodes and other info
#
# Category:
# Processing.
#
# Author:
# <NAME> (modelled after an example of <NAME>).
#
import numpy
import Nio
import time
import os
import pwd
#
# Function to retrieve the user's name.
#
def getUserName():
pwd_entry = pwd.getpwuid(os.getuid())
raw_name = pwd_entry[4]
name = raw_name.split(",")[0].strip()
if name == '':
name = pwd_entry[0]
return name
#
# Creating a NetCDF file named "test-types.nc". If there is already
# a file with that name, delete it first.
#
if (os.path.exists("test-types.nc")):
os.system("/bin/rm -f test-types.nc")
#
# Specify a global history attribute and open a NetCDF file
# for writing.
#
hatt = "Created " + time.ctime(time.time()) + " by " + getUserName()
file = Nio.open_file("test-types.nc", "w", None, hatt)
#
# Create some global attributes.
#
file.title = "Nio test NetCDF file"
file.series = [ 1, 2, 3, 4, 5,6 ]
file.version = 45
#
# Create some dimensions.
#
file.create_dimension("array", 3)
#file.create_dimension("strlen", 6)
file.create_dimension("strlen", 10)
file.create_dimension("dim1", 2)
file.create_dimension("dim2", 1)
file.create_dimension("dim3",4)
#
# Create some variables.
#
print("creating and assigning scalar double")
v1 = file.create_variable("v1", 'd', ())
v1.assign_value(42.0)
print("creating and assigning scalar float")
v2 = file.create_variable("v2", 'f', ())
v2.assign_value(52.0)
print("creating and assigning scalar integer")
v3 = file.create_variable("v3", 'i', ())
v3.assign_value(42)
print("creating and assigning scalar long")
v4 = file.create_variable("v4", 'l', ())
v4.assign_value(42)
print("creating and assigning scalar short")
v5 = file.create_variable("v5", 'h', ())
v5.assign_value(42)
print("creating and assigning scalar byte")
v6 = file.create_variable("v6", 'b', ())
v6.assign_value(42)
print("creating and assigning scalar char")
v7 = file.create_variable("v7", 'S1', ())
v7.assign_value('x')
print("creating and assigning array double")
v11 = file.create_variable("v11", 'd', ('array',))
v11.assign_value([42.0,43.0,44.0])
print("creating and assigning array float")
v22 = file.create_variable("v22", 'f', ('array',))
v22.assign_value([52.0,53.0,54.0])
print("creating and assigning array integer")
v33 = file.create_variable("v33", 'i', ('array',))
v33.assign_value([42,43,44])
print("creating and assigning array long")
v44 = file.create_variable("v44", 'l', ('array',))
a = numpy.array([42,43,44],'l')
v44.assign_value(a)
print("creating and assigning array short")
v55 = file.create_variable("v55", 'h', ('array',))
v55.assign_value([42,43,44])
print("creating and assigning array byte")
v66 = file.create_variable("v66", 'b', ('array',))
v66.assign_value([42,43,44])
print("creating and assigning array char")
v77 = file.create_variable("v77", 'S1', ('array','strlen'))
v77.assign_value(['bcdef','uvwxyz','ijklmnopqr'])
#v77.assign_value(['ab','uv','ij'])
#v77.assign_value(['a','u','i'])
#v77[1] = v77[1,::-1]
print(v77[:])
v_single = file.create_variable("v_single",'f',("dim1","dim2","dim3"))
print(v_single)
# type mismatch (double created then assigned to float variable)
a = numpy.array([1.0,2,3,4,5,6,7,8])
a.shape = (2,1,4)
print(a)
try:
v_single.assign_value(a)
print(v_single[:])
except:
print("type mismatch in assignment")
# now do it right
a = | numpy.array([1.0,2,3,4,5,6,7,8],'f') | numpy.array |
"""
Utilities for running inference
@author: <NAME>
"""
import os
from os import path as op
import multiprocessing
from functools import partial
import subprocess
import warnings
from itertools import zip_longest, repeat
import logging
import ogr
import numpy as np
from tqdm import tqdm
import skimage.io as sio
from skimage.transform import downscale_local_mean
from skimage.measure import regionprops, find_contours
from rasterio.windows import Window
def iter_grouper(iterable, n, fillvalue=None):
"Itertool recipe to collect data into fixed-length chunks or blocks"
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return zip_longest(*args, fillvalue=fillvalue)
def calculate_region_grad(image, masked_pix):
"""Calculate the vertical and horizontal gradient using numpy's gradient.
Parameters
----------
image: array-like
Image pixels.
masked_pix: array-like
Integer mask for pixels to include when returning mean gradient.
For example, pass the binary crater mask to calculate only the gradient
for pixels within the crater.
Returns
-------
mean_h: float
Mean horizontal gradient of included pixels. Positive is to the right.
mean_v: float
Mean vertical gradient of included pixels. Positive is downward.
"""
# TODO: could improve function to take a list of good_inds and avoid
# recomputing gradient repeatedly
if masked_pix.dtype != np.bool:
raise ValueError('`masked_inds` must be of type bool')
if masked_pix.shape[:2] != image.shape[:2]:
raise ValueError('Height and width of `masked_inds` must match `image`')
mean_h = np.ma.masked_array(np.gradient(image, axis=0),
mask=masked_pix).mean()
mean_v = np.ma.masked_array( | np.gradient(image, axis=1) | numpy.gradient |
"""
csalt_models.py
Usage:
- import modules
Outputs:
- various
"""
import os, sys
import numpy as np
from astropy.io import fits
from vis_sample import vis_sample
from scipy.ndimage import convolve1d
from scipy.interpolate import interp1d
from vis_sample.classes import *
from simple_disk import simple_disk
import const as const
import matplotlib.pyplot as plt
def cube_parser(pars, FOV=8, Npix=128, dist=150, r_min=0, r_max=500, r0=10,
RA=240, DEC=-40, restfreq=230.538e9, Vsys=0, vel=None,
datafile=None, outfile=None):
### Generate a model disk
disk = simple_disk(pars[0], pars[1], x0=0, y0=0, dist=dist, mstar=pars[2],
r_min=r_min, r_max=r_max, r0=r0, r_l=pars[3],
z0=pars[4], zpsi=pars[5], zphi=np.inf,
Tb0=pars[6], Tbq=pars[7], Tbeps=np.inf, Tbmax=1000,
Tbmax_b=pars[8], tau0=1000, tauq=0, taueta=np.inf,
taumax=5000, dV0=pars[9], dVq=0.5*pars[7], dVmax=1000,
FOV=FOV, Npix=Npix)
### Set velocities for cube (either use the channels in an already-existing
### cube from a .FITS file, or use the provided values)
if datafile is not None:
hd = fits.open(datafile)[0].header
f0, ix, nf, df = hd['CRVAL4'], hd['CRPIX4'], hd['NAXIS4'], hd['CDELT4']
freqs = f0 + (np.arange(nf) - ix + 1) * df
vel = const.c_ * (1 - freqs / restfreq)
else:
freqs = restfreq * (1 - vel / const.c_)
# adjust for systemic velocity
vlsr = vel - Vsys
### Generate the spectral line cube
cube = disk.get_cube(vlsr)
# convert from brightness temperatures to Jy / pixel
pixel_area = (disk.cell_sky * np.pi / (180 * 3600))**2
for i in range(len(freqs)):
cube[i,:,:] *= 1e26 * pixel_area * 2 * freqs[i]**2 * \
const.k_ / const.c_**2
### Prepare the output: either into the specified .FITS file or into a
### vis_sample "SKY OBJECT".
if outfile is not None:
hdu = fits.PrimaryHDU(cube[:,::-1,:])
header = hdu.header
# basic header inputs
header['EPOCH'] = 2000.
header['EQUINOX'] = 2000.
header['LATPOLE'] = -1.436915713634E+01
header['LONPOLE'] = 180.
# spatial coordinates
header['CTYPE1'] = 'RA---SIN'
header['CUNIT1'] = 'DEG'
header['CDELT1'] = -disk.cell_sky / 3600.
header['CRPIX1'] = 0.5 * disk.Npix + 0.5
header['CRVAL1'] = RA
header['CTYPE2'] = 'DEC--SIN'
header['CUNIT2'] = 'DEG'
header['CDELT2'] = disk.cell_sky / 3600.
header['CRPIX2'] = 0.5 * disk.Npix + 0.5
header['CRVAL2'] = DEC
# frequency coordinates
header['CTYPE3'] = 'FREQ'
header['CUNIT3'] = 'Hz'
header['CRPIX3'] = 1.
header['CDELT3'] = freqs[1]-freqs[0]
header['CRVAL3'] = freqs[0]
header['SPECSYS'] = 'LSRK'
header['VELREF'] = 257
# intensity units
header['BSCALE'] = 1.
header['BZERO'] = 0.
header['BUNIT'] = 'JY/PIXEL'
header['BTYPE'] = 'Intensity'
# output FITS
hdu.writeto(outfile, overwrite=True)
return cube[:,::-1,:]
# otherwise, return a vis_sample SkyObject
else:
# adjust cube formatting
mod_data = np.rollaxis(cube[:,::-1,:], 0, 3)
# spatial coordinates
npix_ra = disk.Npix
mid_pix_ra = 0.5 * disk.Npix + 0.5
delt_ra = -disk.cell_sky / 3600
if (delt_ra < 0):
mod_data = np.fliplr(mod_data)
mod_ra = (np.arange(npix_ra) - (mid_pix_ra-0.5))*np.abs(delt_ra)*3600
npix_dec = disk.Npix
mid_pix_dec = 0.5 * disk.Npix + 0.5
delt_dec = disk.cell_sky / 3600
if (delt_dec < 0):
mod_data = np.flipud(mod_data)
mod_dec = (np.arange(npix_dec)-(mid_pix_dec-0.5))* | np.abs(delt_dec) | numpy.abs |
import itertools
import numpy as np
from ..normalizations import linear_normalization
from ..distance_metrics import euclidean
from .mcda_method import MCDA_method
class CODAS(MCDA_method):
def __init__(self, normalization_method = linear_normalization, distance_metric = euclidean, tau = 0.02):
"""
Create the CODAS method object and select normalization method `normalization_method`, default
normalization method for CODAS is `linear_normalization`, distance metric
`distance_metric` selected from `distance_metrics`, which is `euclidean` by default and tau parameter `tau`,
which is set on 0.02 by default.
Parameters
-----------
normalization_method : function
method for decision matrix normalization chosen from `normalizations`
distance_metric : functions
method for calculating the distance between two vectors
tau : float
the threshold parameter between 0.01 to 0.05. If the difference between
Euclidean `euclidean` or other selected distances of two alternatives is less than tau, these two alternatives
are also compared by the Taxicab distance
"""
self.normalization_method = normalization_method
self.distance_metric = distance_metric
self.tau = tau
def __call__(self, matrix, weights, types):
"""
Score alternatives provided in decision matrix `matrix` with m alternatives and n criteria
using criteria `weights` and criteria `types`.
Parameters
-----------
matrix : ndarray
Decision matrix with m alternatives in rows and n criteria in columns.
weights: ndarray
Vector with criteria weights. Sum of weights must be equal to 1.
types: ndarray
Vector with criteria types. Profit criteria are represented by 1 and cost by -1.
Returns
--------
ndrarray
Vector with preference values of each alternative. The best alternative has the highest preference value.
Examples
----------
>>> codas = CODAS(normalization_method = linear_normalization, distance_metric = euclidean, tau = 0.02)
>>> pref = codas(matrix, weights, types)
>>> rank = rank_preferences(pref, reverse = True)
"""
CODAS._verify_input_data(matrix, weights, types)
return CODAS._codas(self, matrix, weights, types, self.normalization_method, self.distance_metric)
# psi 0.01 - 0.05 recommended range of tau (threshold parameter) value
def _psi(self, x):
return 1 if np.abs(x) >= self.tau else 0
@staticmethod
def _codas(self, matrix, weights, types, normalization_method, distance_metric):
# Normalize matrix using linear normalization
norm_matrix = normalization_method(matrix, types)
# Multiply all rows of normalized matrix by weights
weighted_matrix = norm_matrix * weights
m, n = weighted_matrix.shape
# Calculate NIS vector (anti-ideal solution)
nis = np.min(weighted_matrix, axis=0)
# Calculate chosen distance (for example Euclidean) and Taxicab distance from anti-ideal solution
E = np.array([distance_metric(x, nis) for x in weighted_matrix])
# Calculate Taxicab (Manhattan) distance
T = np.sum( | np.abs(weighted_matrix - nis) | numpy.abs |
from sklearn.datasets import fetch_20newsgroups
import pandas as pd
from sklearn import preprocessing
from sklearn.feature_extraction.text import CountVectorizer
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_classif
from sklearn.model_selection import train_test_split
from datetime import datetime
import time
import os
from scipy.sparse import csr_matrix
import matplotlib.pyplot as plt
class newsgroup_data:
# Prepare the data
# select the top 20000 features from the vector of tokens
NGRAM_RANGE = (1, 2)
TOP_K = 20000
TOKEN_MODE = 'word'
MIN_DOC_FREQ = 2
@staticmethod
def getData():
def ngram_vectorize(texts, labels):
kwargs = {
'ngram_range' : newsgroup_data.NGRAM_RANGE,
'dtype' : 'int32',
'strip_accents' : 'unicode',
'decode_error' : 'replace',
'analyzer' : newsgroup_data.TOKEN_MODE,
'min_df' : newsgroup_data.MIN_DOC_FREQ,
}
tfidf_vectorizer = TfidfVectorizer(**kwargs)
transformed_texts = tfidf_vectorizer.fit_transform(texts)
# Select best k features, with feature importance measured by f_classif
selector = SelectKBest(f_classif, k=min(newsgroup_data.TOP_K, transformed_texts.shape[1]))
selector.fit(transformed_texts, labels)
transformed_texts = selector.transform(transformed_texts).astype('float32')
return transformed_texts
# Get the training and testing datasets
training_set = fetch_20newsgroups(subset='train', remove=('headers', 'footers', 'quotes'))
testing_set = fetch_20newsgroups(subset='test', remove=('headers','footers','quotes'))
training_data = training_set.data
training_target = list(training_set.target)
testing_data = testing_set.data
testing_target = list(testing_set.target)
# Temporarily combine the two datasets (albeit in a way that we can separate them after)
training_length = len(training_data)
training_data.extend(testing_data)
training_target.extend(testing_target)
all_data = training_data
all_target = training_target
# Vectorize the full dataset
vectorized_all_data = ngram_vectorize(all_data,all_target)
print("\nVectorized all data shape: ", vectorized_all_data.shape )
# Reseparate the datasets
training_data = vectorized_all_data[:training_length]
training_target = all_target[:training_length]
testing_data = vectorized_all_data[training_length:]
testing_target = all_target[training_length:]
print("\nVectorized training data shape: ",training_data.shape)
print("\nVectorized training data shape: ",testing_data.shape)
#Formalize the datasets
X_train = training_data.toarray()
y_train = | np.array(training_target) | numpy.array |
import numpy as np
import librosa
from scipy import interpolate
import pywt
from matplotlib.image import imsave
from scipy.signal import butter, lfilter, freqz
from matplotlib import pyplot as plt
from imageProcessingUtil import ImageProcessing
import SimpleITK as sitk
class AudioProcessing(object):
def __init__(self):
pass
@staticmethod
def read(absFilePath,sr=None):
"""
Reading audio
:param absFilePath: Absolute File Path
:param sr: Sampling rate of audio to be read (If None, original sampling rate is considered)
:return: audio samples,
"""
data,fs = librosa.load(absFilePath,sr=sr)
return data,fs
@staticmethod
def writeAsWav(data,sr,filename):
"""
Write .wav files
:param data: audio data
:param sr: sampling rate
:param filename: filename to be saved
:return: None
"""
if filename is None or sr is None or data is None :
return "Please provid arguements as writeAsWav(data,sr,filename)"
if "wav" not in filename:
return "Only wav files!"
filename_split = filename.rsplit(".",1)
filename = filename_split[0]
filetype = filename_split[1].lower()
data = AudioProcessing.rescaleAmplitude(data)
librosa.output.write_wav("{}.{}".format(filename,filetype),data,sr)
@staticmethod
def generateSineWave(amp,f,phi,fs):
"""
Generating a simple sine wave
:param amp: Amplitude
:param f: Frequency
:param phi: Phase
:param fs: Frequency sampling rate
:return: Sine wave signal
"""
# considering 5 time periodics
t = np.arange(0,10.0/f,1.0/fs)
x = amp*np.cos(2*np.pi*f*t + phi)
return(t,x)
@staticmethod
def convert_to_mono(x):
"""
Convert multi channel sounds to mono channel
:param x: audio data
:return: mono channel (audio data)
"""
if x.ndim > 1:
return librosa.to_mono(x)
return x
@staticmethod
def DFT(data,N,fs,start_time = 0.0):
"""
calculating N point DFT
:param data: audio data
:param N: N point DFT
:param fs: sampling frequency
:return:
"""
data = AudioProcessing.convert_to_mono(data)
size = data.size
new_data = np.zeros(N)
if size < N:
diff = N - size
new_data[:size] = data
else:
new_data = data[start_time*fs:start_time*fs+N]
hanning = np.hanning(N)
new_data = new_data*hanning
print("Calculating DFT for {} ms window with start time {} sec".format(N*1000/float(fs),start_time))
nv = np.arange(N)
kv = np.arange(N)
nv = np.arange(-N/2.0,N/2.0)
kv = np.arange(-N/2.0,N/2.0)
X = np.array([])
# Calculating the DFT of the cropped signal
for k in kv:
s = | np.exp(1j*2*np.pi*k/N*nv) | numpy.exp |
import numpy as np
from typing import List, Tuple
from numba import njit, prange
from gym_rubiks_cube.envs.functions import RotationMatrix3D, getQuadrant
import gym_rubiks_cube.envs.objects3D as o3
@njit
def rasterizeBottomFlatTriangle(v1, v2, v3, width, height):
"""assumes v1.y < v2.y = v3.y"""
object_map = np.ones((width, height), dtype=np.float32) * np.inf
v1[:2], v2[:2], v3[:2] = np.floor(v1[:2]), np.floor(v2[:2]), | np.floor(v3[:2]) | numpy.floor |
# -*- coding: utf-8 -*-
"""
Created 2020.09.28.
Script for doing group level analysis of anatomical precision rates.
@author: rouhinen
"""
import numpy as np
import os
import glob
import matplotlib.pyplot as plt
from fidelityOpMinimal import (make_series_paired, make_series, collapse_operator,
source_fid_to_weights)
"""Load source identities, forward and inverse operators. """
subjectsPath = 'C:\\temp\\fWeighting\\fwSubjects_p\\'
sourceIdPattern = '\\sourceIdentities_parc2018yeo7_200.npy' # For collapsing forward and inverse operators.
sourceFidPattern = '\\sourceFidelities_MEEG_parc2018yeo7_200.npy'
forwardPattern = '\\*forward*MEEG.npy' # Should be at source space
inversePattern = '\\inverseOperatorMEEG.npy' # Should be at source space.
n_iterations = 100
n_samples = 5000
n_cut_samples = 40
widths = np.arange(5, 6)
# Source fidelity to weights settings
exponent = 2
normalize = True
flips = False
def get_precision_rates(cp_PLV, truth_matrix):
# Set thresholds from the data. Get about 200 thresholds.
maxVal = np.max(cp_PLV)
thresholds = np.sort(np.ravel(cp_PLV))
distance = int(len(thresholds) // 200) + (len(thresholds) % 200 > 0) # To int, round up.
thresholds = thresholds[0:-1:distance]
thresholds = np.append(thresholds, maxVal)
precisions = np.zeros([cp_PLV.shape[0], len(thresholds)], dtype=float)
for i, threshold in enumerate(thresholds):
estTrueMat = cp_PLV > threshold
tPos = np.sum(estTrueMat * truth_matrix, axis=1)
fPos = np.sum(estTrueMat * np.logical_not(truth_matrix), axis=1)
precisions[:, i] = tPos/(tPos + fPos)
return precisions, thresholds
def find_nearest_index(array, value):
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return idx
def get_n_parcels(identities):
idSet = set(identities) # Get unique IDs
idSet = [item for item in idSet if item >= 0] # Remove negative values (should have only -1 if any)
n_parcels = len(idSet)
return n_parcels
# def get_nearest_tp_semi_bin(binArray, tpRate, fpRate):
# nearestTP = np.zeros(len(binArray))
# for i, fpval in enumerate(binArray):
# index = find_nearest_index(fpRate, fpval)
# nearestTP[i] = tpRate[index]
# return nearestTP
def delete_diagonal(symm_matrix):
symm_matrix = symm_matrix[~np.eye(symm_matrix.shape[0],dtype=bool)].reshape(
symm_matrix.shape[0],-1)
return symm_matrix
def fidelity_estimation_collapsed(fwd, inv, n_samples = 5000, parcel_series= | np.asarray([]) | numpy.asarray |
import numpy as np
import torch
from torch.utils.data import Dataset
import os
import time
import collections
import random
from DSB2017.layers import iou, nms
from scipy.ndimage import zoom
import warnings
from scipy.ndimage.interpolation import rotate
import pandas
class DataBowl3Classifier(Dataset):
def __init__(self, split, config, phase='train'):
assert (phase == 'train' or phase == 'val' or phase == 'test')
self.random_sample = config['random_sample']
self.T = config['T']
self.topk = config['topk']
self.crop_size = config['crop_size']
self.stride = config['stride']
self.augtype = config['augtype']
self.filling_value = config['filling_value']
# self.labels = np.array(pandas.read_csv(config['labelfile']))
datadir = config['datadir']
bboxpath = config['bboxpath']
self.phase = phase
self.candidate_box = []
self.pbb_label = []
idcs = split
self.filenames = []
for idx in idcs:
file_name = os.path.splitext(idx)[0] + '_clean.npy'
self.filenames.append(os.path.join(datadir, file_name))
if self.phase != 'test':
self.yset = 1 - np.array([f.split('-')[1][2] for f in idcs]).astype('int')
for idx in idcs:
file_name = os.path.splitext(idx)[0]
pbb = np.load(os.path.join(bboxpath, file_name + '_pbb.npy'))
pbb = pbb[pbb[:, 0] > config['conf_th']]
pbb = nms(pbb, config['nms_th'])
lbb = np.load(os.path.join(bboxpath, file_name + '_lbb.npy'))
pbb_label = []
for p in pbb:
isnod = False
for l in lbb:
score = iou(p[1:5], l)
if score > config['detect_th']:
isnod = True
break
pbb_label.append(isnod)
# if idx.startswith()
self.candidate_box.append(pbb)
self.pbb_label.append(np.array(pbb_label))
self.crop = simpleCrop(config, phase)
def __getitem__(self, idx, split=None):
t = time.time()
np.random.seed(int(str(t % 1)[2:7])) # seed according to time
pbb = self.candidate_box[idx]
pbb_label = self.pbb_label[idx]
conf_list = pbb[:, 0]
T = self.T
topk = self.topk
img = np.load(self.filenames[idx])
if self.random_sample and self.phase == 'train':
chosenid = sample(conf_list, topk, T=T)
# chosenid = conf_list.argsort()[::-1][:topk]
else:
chosenid = conf_list.argsort()[::-1][:topk]
croplist = np.zeros([topk, 1, self.crop_size[0], self.crop_size[1], self.crop_size[2]]).astype('float32')
coordlist = np.zeros([topk, 3, self.crop_size[0] // self.stride, self.crop_size[1] // self.stride,
self.crop_size[2] // self.stride]).astype('float32')
padmask = np.concatenate([np.ones(len(chosenid)), np.zeros(self.topk - len(chosenid))])
isnodlist = np.zeros([topk])
for i, id in enumerate(chosenid):
target = pbb[id, 1:]
isnod = pbb_label[id]
crop, coord = self.crop(img, target)
if self.phase == 'train':
crop, coord = augment(crop, coord,
ifflip=self.augtype['flip'], ifrotate=self.augtype['rotate'],
ifswap=self.augtype['swap'], filling_value=self.filling_value)
crop = crop.astype(np.float32)
croplist[i] = crop
coordlist[i] = coord
isnodlist[i] = isnod
if self.phase != 'test':
y = np.array([self.yset[idx]])
return torch.from_numpy(croplist).float(), torch.from_numpy(coordlist).float(), torch.from_numpy(
isnodlist).int(), torch.from_numpy(y)
else:
return torch.from_numpy(croplist).float(), torch.from_numpy(coordlist).float()
def __len__(self):
if self.phase != 'test':
return len(self.candidate_box)
else:
return len(self.candidate_box)
class simpleCrop():
def __init__(self, config, phase):
self.crop_size = config['crop_size']
self.scaleLim = config['scaleLim']
self.radiusLim = config['radiusLim']
self.jitter_range = config['jitter_range']
self.isScale = config['augtype']['scale'] and phase == 'train'
self.stride = config['stride']
self.filling_value = config['filling_value']
self.phase = phase
def __call__(self, imgs, target):
if self.isScale:
radiusLim = self.radiusLim
scaleLim = self.scaleLim
scaleRange = [np.min([np.max([(radiusLim[0] / target[3]), scaleLim[0]]), 1])
, np.max([np.min([(radiusLim[1] / target[3]), scaleLim[1]]), 1])]
scale = np.random.rand() * (scaleRange[1] - scaleRange[0]) + scaleRange[0]
crop_size = ( | np.array(self.crop_size) | numpy.array |
#from ...pybids import BIDSLayout
import os
import numpy as np
import pandas as pd
import nibabel as nb
from nibabel.processing import smooth_image
from scipy.stats import gmean
from nipype import logging
from nipype.utils.filemanip import fname_presuffix,split_filename,copyfiles
from nipype.interfaces.base import (
traits, TraitedSpec, BaseInterfaceInputSpec, SimpleInterface,
File, InputMultiPath, OutputMultiPath, isdefined,Undefined)
from nipype.interfaces.fsl.base import (FSLCommand, FSLCommandInputSpec, Info)
from nipype.interfaces import fsl
from nipype.interfaces.ants import ApplyTransforms
LOGGER = logging.getLogger('nipype.interface')
class _refinemaskInputSpec(BaseInterfaceInputSpec):
in_t1mask=File(exists=True,mandatory=True,desc='t1 mask')
in_boldmask=File(exists=True,mandatory=True,desct='bold mask')
transforms=File(exists=True,mandatory=True,desc='transfom')
out_mask=File(exists=False,mandatory=False,desc='output mask')
out_tmp=File(exists=False,mandatory=False,desc='tmp mask')
class _refinemaskOutputSpec(TraitedSpec):
out_mask=File(exists=False,desc='output mask')
out_tmp=File(exists=False,desc='tmp mask')
class refinemask(SimpleInterface):
input_spec = _refinemaskInputSpec
output_spec =_refinemaskOutputSpec
def _run_interface(self, runtime):
self._results['out_tmp'] = fname_presuffix(self.inputs.in_boldmask,
suffix='_tempmask', newpath=runtime.cwd)
self._results['out_mask'] = fname_presuffix(self.inputs.in_boldmask,
suffix='_refinemask', newpath=runtime.cwd)
b1=ApplyTransforms()
b1.inputs.dimension=3
b1.inputs.float=True
b1.inputs.input_image=self.inputs.in_t1mask
b1.inputs.interpolation='NearestNeighbor'
b1.inputs.reference_image=self.inputs.in_boldmask
b1.inputs.transforms=self.inputs.transforms
b1.inputs.input_image_type=3
b1.inputs.output_image=self._results['out_tmp']
b1.run()
from nipype.interfaces.fsl import MultiImageMaths
mat1 = MultiImageMaths()
mat1.inputs.in_file =self._results['out_tmp']
mat1.inputs.op_string = " -mul %s -bin"
mat1.inputs.operand_files=self.inputs.in_boldmask
mat1.inputs.out_file = self._results['out_mask']
mat1.run()
self.inputs.out_mask=os.path.abspath(self._results['out_mask'])
return runtime
class _extractCBFInputSpec(BaseInterfaceInputSpec):
in_file = File(exists=True, mandatory=True,
desc='preprocessed file')
in_ASLcontext = File(exists=True, mandatory=True,
desc='ASL conext text tsv file with label and control')
out_file=File(exists=False,mandatory=False,desc='cbf timeries data')
out_avg=File(exists=False,mandatory=False,desc='average control')
class _extractCBFOutputSpec(TraitedSpec):
out_file = File(exists=False, desc='cbf timeries data')
out_avg = File(exists=False, desc='average control')
class extractCBF(SimpleInterface):
"""
extract CBF timeseries
by substracting label from control
or viceversa
"""
input_spec = _extractCBFInputSpec
output_spec =_extractCBFOutputSpec
def _run_interface(self, runtime):
aslcontext=pd.read_csv(self.inputs.in_ASLcontext,header=None)
idasl=aslcontext[0].tolist()
controllist= [ i for i in range(len(idasl)) if idasl[i] == 'Control' ]
labellist=[ i for i in range(len(idasl)) if idasl[i] == 'Label' ]
# read the nifti image
allasl=nb.load(self.inputs.in_file)
dataasl=allasl.get_fdata()
if len(dataasl.shape) == 5:
raise RuntimeError('Input image (%s) is 5D.')
control_img=dataasl[:,:,:,controllist]
label_img=dataasl[:,:,:,labellist]
cbf_data=np.subtract(control_img,label_img)
avg_control=np.mean(control_img,axis=3)
self._results['out_file'] = fname_presuffix(self.inputs.in_file,
suffix='_cbftimeseries', newpath=runtime.cwd)
self._results['out_avg'] = fname_presuffix(self.inputs.in_file,
suffix='_avg_control', newpath=runtime.cwd)
nb.Nifti1Image(
cbf_data, allasl.affine, allasl.header).to_filename(
self._results['out_file'])
nb.Nifti1Image(
avg_control, allasl.affine, allasl.header).to_filename(
self._results['out_avg'])
self.inputs.out_file=os.path.abspath(self._results['out_file'])
self.inputs.out_avg=os.path.abspath(self._results['out_avg'])
return runtime
class _computeCBFInputSpec(BaseInterfaceInputSpec):
#in_file = File(exists=True, mandatory=True,
#desc='asl raw')
in_cbf = File(exists=True,mandatory=True,desc= 'cbf nifti')
in_metadata = traits.Dict(exists=True, mandatory=True,
desc='metadata for CBF ')
in_m0file = File(exists=True, mandatory=False,
desc='M0 nifti file')
in_mask = File(exists=True, mandatory=False,
desc='mask')
out_cbf=File(exists=False,mandatory=False,desc='cbf timeries data')
out_mean=File(exists=False,mandatory=False,desc='average control')
out_att=File(exists=False,mandatory=False,desc='Arterial Transit Time')
class _computeCBFOutputSpec(TraitedSpec):
out_cbf=File(exists=False,desc='cbf timeries data')
out_mean=File(exists=False,desc='average control')
out_att=File(exists=False,desc='Arterial Transit Time')
class computeCBF(SimpleInterface):
"""
compute cbf pASL or pCASL
"""
input_spec = _computeCBFInputSpec
output_spec =_computeCBFOutputSpec
def _run_interface(self, runtime):
labeltype=self.inputs.in_metadata['LabelingType']
tau=self.inputs.in_metadata['LabelingDuration']
plds=np.array(self.inputs.in_metadata['InitialPostLabelDelay'])
m0scale=self.inputs.in_metadata['M0']
magstrength=self.inputs.in_metadata['MagneticFieldStrength']
mask=nb.load(self.inputs.in_mask).get_fdata()
t1blood=(110*int(magstrength[:-1])+1316)/1000
inverstiontime=np.add(tau,plds)
if self.inputs.in_metadata['LabelingEfficiency']:
labeleff=self.inputs.in_metadata['LabelingEfficiency']
elif 'CASL' in labeltype:
labeleff=0.72
elif 'PASL' in labeltype:
labeleff=0.8
else:
print( 'no labelelling effiecieny')
part_coeff=0.9 # brain partition coefficient
if 'CASL' in labeltype:
pf1=(6000*part_coeff)/(2*labeleff*t1blood*(1-np.exp(-(tau/t1blood))))
perfusion_factor=pf1*np.exp(plds/t1blood)
elif 'PASL' in labeltype:
pf1=(6000*part_coeff)/(2*labeleff)
perfusion_factor=(pf1*np.exp(inverstiontime/t1blood))/inverstiontime
perfusion_factor=np.array([perfusion_factor])
# get control now
avg_control=[]
mzero=nb.load(self.inputs.in_m0file).get_fdata()
if len(mzero.shape) > 3:
avg_control=np.multiply(mask,np.mean(mzero,axis=3))
else:
avg_control=np.multiply(mzero,mask)
if not m0scale:
m0scale=1
cbf_data=nb.load(self.inputs.in_cbf).get_fdata()
cbf1=np.zeros(cbf_data.shape)
for i in range(cbf_data.shape[3]):
cbf1[:,:,:,i]=mask*(np.divide(cbf_data[:,:,:,i],(m0scale*avg_control)))
#m1=m0scale*m0_data
#cbf1=np.divide(cbf_data,m1)
# for compute cbf for each PLD and TI
att=None
if len(perfusion_factor) > 1:
cbf_data_ts=np.zeros(np.concatenate(cbf.shape,len(perfusion_factor)))
dm1factor=(2*labeleff*1.5*(1-np.exp(tau/1.5)))*avg_control
deltaM=np.zeros(np.concatenate(avg_control.shape,len(perfusion_factor)))
for i in range(len(perfusion_factor)):
cbf_data_ts[:,:,:,:,i]=cbf1*perfusion_factor[i]
deltaM[:,:,:,i]=dm1factor*(np.exp(-plds[i]/t1blood))
cbf=np.mean(cbf_data_ts,axis=4)
# compute arterial transisttime
deltaM2=np.zeros(np.concatenate(avg_control.shape,len(perfusion_factor)))
for i in range(len(perfusion_factor)):
deltaM2[:,:,:,i]=deltaM[:,:,:,i]*plds[i]
att=mask*(np.sum(deltaM2,axis=4)/np.sum(deltaM,axis=4))
else:
cbf=cbf1*perfusion_factor
## cbf is timeseries
meancbf=mask*(np.mean(cbf,axis=3))
self._results['out_cbf'] = fname_presuffix(self.inputs.in_cbf,
suffix='_cbf', newpath=runtime.cwd)
self._results['out_mean'] = fname_presuffix(self.inputs.in_cbf,
suffix='_meancbf', newpath=runtime.cwd)
samplecbf=nb.load(self.inputs.in_cbf)
nb.Nifti1Image(
cbf, samplecbf.affine, samplecbf.header).to_filename(
self._results['out_cbf'])
nb.Nifti1Image(
meancbf, samplecbf.affine, samplecbf.header).to_filename(
self._results['out_mean'])
if att is not None:
self._results['out_att'] = fname_presuffix(self.inputs.in_cbf,
suffix='_att', newpath=runtime.cwd)
nb.Nifti1Image(
att, samplecbf.affine, samplecbf.header).to_filename(
self._results['out_att'])
self.inputs.out_att=os.path.abspath(self._results['out_att'])
self.inputs.out_cbf=os.path.abspath(self._results['out_cbf'])
self.inputs.out_mean=os.path.abspath(self._results['out_mean'])
return runtime
#score and scrub
class _scorescrubCBFInputSpec(BaseInterfaceInputSpec):
in_file = File(exists=True, mandatory=True,
desc='computed CBF from computeCBF')
in_greyM = File(exists=True, mandatory=True,desc='grey matter')
in_whiteM = File(exists=True, mandatory=True,desc='white matter')
in_mask = File(exists=True, mandatory=True,desc='mask')
in_csf = File(exists=True, mandatory=True,desc='csf')
in_thresh=traits.Float(default_value=0.7,exists=True,mandatory=False,desc='threshold of propbaility matter')
in_wfun=traits.Str(exists=True,mandatory=False,default_value='huber',
option=['bisquare','andrews','cauchy','fair','logistics','ols','talwar','welsch'],
desc='wavelet fun ')
out_score=File(exists=False,desc='score timeseries data')
out_avgscore=File(exists=False,desc='average score')
out_scrub=File(exists=False,desc='average scrub')
out_scoreindex=File(exists=False,desc='index of volume remove or leave by score')
class _scorescrubCBFOutputSpec(TraitedSpec):
out_score=File(exists=True,mandatory=True,desc='score timeseries data')
out_avgscore=File(exists=True,mandatory=True,desc='average score')
out_scrub=File(exists=True,mandatory=True,desc='average scrub')
out_scoreindex=File(exists=True,mandatory=True,desc='index of volume remove or leave by score')
class scorescrubCBF(SimpleInterface):
"""
compute score and scrub
"""
input_spec = _scorescrubCBFInputSpec
output_spec =_scorescrubCBFOutputSpec
def _run_interface(self, runtime):
cbf_ts=nb.load(self.inputs.in_file).get_fdata()
mask=nb.load(self.inputs.in_mask).get_fdata()
greym=nb.load(self.inputs.in_greyM).get_fdata()
whitem=nb.load(self.inputs.in_whiteM).get_fdata()
csf=nb.load(self.inputs.in_csf).get_fdata()
cbf_scorets,index_score=_getcbfscore(cbfts=cbf_ts,wm=whitem,gm=greym,csf=csf,
thresh=self.inputs.in_thresh)
cbfscrub=_scrubcbf(cbf_ts=cbf_scorets,gm=greym,wm=whitem,csf=csf,mask=mask,
wfun=self.inputs.in_wfun,thresh=self.inputs.in_thresh)
self._results['out_score'] = fname_presuffix(self.inputs.in_file,
suffix='_cbfscorets', newpath=runtime.cwd)
self._results['out_avgscore'] = fname_presuffix(self.inputs.in_file,
suffix='_meancbfscore', newpath=runtime.cwd)
self._results['out_scrub'] = fname_presuffix(self.inputs.in_file,
suffix='_cbfscrub', newpath=runtime.cwd)
self._results['out_scoreindex'] =fname_presuffix(self.inputs.in_file,suffix='_scoreindex.txt',
newpath=runtime.cwd,use_ext=False)
samplecbf=nb.load(self.inputs.in_mask)
nb.Nifti1Image(
cbf_scorets, samplecbf.affine, samplecbf.header).to_filename(
self._results['out_score'])
nb.Nifti1Image(
np.mean(cbf_scorets,axis=3), samplecbf.affine, samplecbf.header).to_filename(
self._results['out_avgscore'])
nb.Nifti1Image(
cbfscrub, samplecbf.affine, samplecbf.header).to_filename(
self._results['out_scrub'])
np.savetxt(self._results['out_scoreindex'],index_score, delimiter=',')
self.inputs.out_score=os.path.abspath(self._results['out_score'])
self.inputs.out_avgscore=os.path.abspath(self._results['out_avgscore'])
self.inputs.out_scrub=os.path.abspath(self._results['out_scrub'])
self.inputs.out_scoreindex=os.path.abspath(self._results['out_scoreindex'])
return runtime
def _weightfun(x,wfun='huber'):
""""
get weight fun and tuner
"""
if wfun == 'andrews':
tuner=1.339
weight=(np.abs(x)<np.pi)*np.sin(x)
elif wfun== 'bisquare':
tuner=4.685
weight=(np.abs(x)<1)*np.power((1-np.power(x,2)),2)
elif wfun == 'cauchy':
tuner=2.385
weight=1/(1+np.power(x,2))
elif wfun == 'logistic':
tuner=1.205
weight == np.tanh(x)/x
elif wfun == 'ols':
tuner=1
weight=np.repeat(1,len(x))
elif wfun== 'talwar':
tuner=2.795
weight=1*(np.abs(x)<1)
elif wfun == 'welsch':
tuner=2.985
weight=np.exp(-(np.power(x,2)))
else:
tuner=1.345
weight=1/np.abs(x)
return weight,tuner
def _tune(wfun='huber'):
""""
get weight fun and tuner
"""
if wfun == 'andrews':
tuner=1.339
elif wfun== 'bisquare':
tuner=4.685
elif wfun == 'cauchy':
tuner=2.385
elif wfun == 'logistic':
tuner=1.205
elif wfun == 'ols':
tuner=1
elif wfun== 'talwar':
tuner=2.795
elif wfun == 'welsch':
tuner=2.985
else:
tuner=1.345
return tuner
def _getchisquare(n):
a=[0.000000, 15.484663, 8.886835, 7.224733, 5.901333, 5.126189, 4.683238, 4.272937, 4.079918,
3.731612, 3.515615, 3.459711, 3.280471, 3.078046, 3.037280, 2.990761, 2.837119, 2.795526, 2.785189,
2.649955, 2.637642, 2.532700, 2.505253, 2.469810, 2.496135, 2.342210, 2.384975, 2.275019, 2.244482,
2.249109, 2.271968, 2.210340, 2.179537, 2.133762, 2.174928, 2.150072, 2.142526, 2.071512, 2.091061,
2.039329, 2.053183, 2.066396, 1.998564, 1.993568, 1.991905, 1.981837, 1.950225, 1.938580, 1.937753,
1.882911, 1.892665, 1.960767, 1.915530, 1.847124, 1.947374, 1.872383, 1.852023, 1.861169, 1.843109,
1.823870, 1.809643, 1.815038, 1.848064, 1.791687, 1.768343, 1.778231, 1.779046, 1.759597, 1.774383,
1.774876, 1.751232, 1.755293, 1.757028, 1.751388, 1.739384, 1.716395, 1.730631, 1.718389, 1.693839,
1.696862, 1.691245, 1.682541, 1.702515, 1.700991, 1.674607, 1.669986, 1.688864, 1.653713, 1.641309,
1.648462, 1.630380, 1.634156, 1.660821, 1.625298, 1.643779, 1.631554, 1.643987, 1.624604, 1.606314,
1.609462]
b=[0, 2.177715, 1.446966, 1.272340, 1.190646, 1.151953, 1.122953, 1.103451, 1.089395, 1.079783,
1.071751, 1.063096, 1.058524, 1.054137, 1.049783, 1.046265, 1.043192, 1.039536, 1.038500, 1.037296,
1.033765, 1.032317, 1.031334, 1.029551, 1.028829, 1.027734, 1.024896, 1.024860, 1.025207, 1.024154,
1.022032, 1.021962, 1.021514, 1.020388, 1.019238, 1.020381, 1.019068, 1.018729, 1.018395, 1.017134,
1.016539, 1.015676, 1.015641, 1.015398, 1.015481, 1.015566, 1.014620, 1.014342, 1.013901, 1.013867,
1.013838, 1.013602, 1.013322, 1.012083, 1.013168, 1.012667, 1.011087, 1.011959, 1.011670, 1.011494,
1.010463, 1.010269, 1.010393, 1.010004, 1.010775, 1.009399, 1.011000, 1.010364, 1.009831, 1.009563,
1.010085, 1.009149, 1.008444, 1.009455, 1.009705, 1.008597, 1.008644, 1.008051, 1.008085, 1.008550,
1.008265, 1.009141, 1.008235, 1.008002, 1.008007, 1.007660, 1.007993, 1.007184, 1.008093, 1.007816,
1.007770, 1.007932, 1.007819, 1.007063, 1.006712, 1.006752, 1.006703, 1.006650, 1.006743, 1.007087]
return a[n-1],b[n-1]
def _getcbfscore(cbfts,wm,gm,csf,thresh=0.7):
gm[gm<thresh]=0; gm[gm>0]=1
wm[wm<thresh]=0; wm[wm>0]=1
csf[csf<thresh]=0;csf[csf>0]=1
# get the total number of voxle within csf,gm and wm
nogm =np.sum(gm==1)-1; nowm=np.sum(wm==1)-1; nocf=np.sum(csf==0)-1
mask=gm+wm+csf
#msk=sum(mask>0)
# mean of times series cbf within greymatter
mgmts=np.squeeze(np.mean(cbfts[gm==1,:],axis=0))
# robiust mean and meadian
medmngm = np.median(mgmts); sdmngm=np.mean(np.abs(mgmts - np.mean(mgmts)))/0.675
indx=1*(np.abs(mgmts-medmngm)>(2.5*sdmngm))
R=np.mean(cbfts[:,:,:,indx==0],axis=3)
V=nogm*np.var(R[gm==1]) + nowm*np.var(R[wm==1]) + nocf*np.var(R[csf==1])
V1=V+1
while V < V1:
V1=V;CC =(-2*np.repeat(1,cbfts.shape[3]))*1
for s in range(cbfts.shape[3]):
if indx[s] != 0 :
break
else:
tmp1 = cbfts[:,:,:,s]
CC[s]=np.corrcoef(R[mask>0],tmp1[mask>0])[0][1]
inx=np.argmax(CC); indx[inx]=2
R=np.mean(cbfts[:,:,:,indx==0],axis=3)
V=nogm*np.var(R[gm==1]) + nowm*np.var(R[wm==1]) + nocf*np.var(R[csf==1])
cbfts_recon=cbfts[:,:,:,indx==0]
return cbfts_recon,indx
def _roubustfit(Y,mu,Globalprior,modrobprior,lmd=0,localprior=0,wfun='huber',tune=1.345,flagstd=1,flagmodrobust=1,flagprior=1,thresh=0.7):
dimcbf=Y.shape
priow=np.ones([dimcbf[0],dimcbf[1]]);sw=1
X=priow
b=(np.sum(X*Y,axis=0)+mu*Globalprior+lmd*localprior)/(np.sum(X*X,axis=0)+mu+lmd)
b0=np.repeat(0,len(b))
h1=X/np.power(np.tile(np.sqrt(np.sum(X*X,axis=0)),(dimcbf[0],1)),2)
h0=0.9999*np.ones([dimcbf[0],dimcbf[1]])
h=np.minimum(h0,h1)
adjfactor=1/(np.sqrt(1-h/priow))
tiny_s=(1e-6)*(np.std(h,axis=0));tiny_s[tiny_s==0]=1
D=np.sqrt(np.finfo(float).eps)
iter =0; interlim=10
while iter<interlim:
print('iteration ', iter,"\n")
iter=iter + 1
check1=np.subtract(np.abs(b-b0),(D*np.maximum(np.abs(b),np.abs(b0))))
check1[check1>0]=0
if any(check1):
print(' \n converged after ', iter,"iterations\n")
break
r = Y - X*(np.tile(b,(dimcbf[0],1)))
radj = r * adjfactor/sw
if flagstd == 1 :
s=np.sqrt(np.mean(np.power(radj,2),axis=0))
else:
rs=np.sort(np.abs(radj),axis=0); s=np.median(rs,axis=0)/0.6745
r1=radj*(1-flagmodrobust*np.exp(-np.tile(modrobprior,(dimcbf[0],1))))/np.tile(np.maximum(s,tiny_s)*tune,(dimcbf[0],1))
w,_=_weightfun(r1,wfun)
b0=b; z=np.sqrt(w); x = X*z; yz = Y*z
b=(np.sum(x*yz,axis=0)+mu*Globalprior+lmd*localprior)/(np.sum(x*x,axis=0)+mu+lmd)
return b
def _scrubcbf(cbf_ts,gm,wm,csf,mask,wfun='huber',thresh=0.7):
gm=mask*gm;wm=mask*wm; csf=csf*mask
gmidx=gm[mask==1]; gmidx[gmidx<thresh]=0; gmidx[gmidx>0] = 1
wmidx=wm[mask==1]; wmidx[wmidx<thresh]=0; wmidx[wmidx>0] = 1
csfidx = csf[mask==1]; csfidx[csfidx<thresh] = 0; csfidx[csfidx>0] =1
#midx = mask[mask==1]
meancbf=np.mean(cbf_ts,axis=3)
y=np.transpose(cbf_ts[mask==1,:,])
VV=np.var(y,axis=0)
thresh1,thresh3=_getchisquare(y.shape[0])
mu1=VV/(np.median(VV[gmidx==1])*thresh3)
mu =((mu1>thresh1)&(mu1<10*thresh1))*(mu1-thresh1) +(mu1 >=10*thresh1)*(1/(2*thresh1*10)*np.power(mu1,2))+(thresh1*10/2 - thresh1)
M=meancbf*mask; M[mask==1]=mu; modrobprior = mu/10
gmidx2 = 1*([gm.flatten()>thresh] and [M.flatten()==0] and [wm.flatten() > csf.flatten()])[0]
wmidx2 =1*([wm.flatten()>thresh] and [M.flatten()==0] and [gm.flatten() > csf.flatten()])[0]
if np.sum(gmidx2)==0 or np.sum(wmidx2)==0:
gmidx2 =1*(gm.flatten()>thresh); wmidx2 = 1*(wm.flatten()>thresh)
idxx =gmidx2 + wmidx2; idxx[idxx>0]=1
X = np.zeros([len(idxx),2])
X[:,0] = gm.flatten()[gm.flatten()>=(0)]*idxx
X[:,1] = wm.flatten()[wm.flatten()>=(0)]*idxx
A=(meancbf.flatten()[idxx >= 0])*idxx
c=np.linalg.lstsq(X,A)[0]
Globalpriorfull=c[0]*gm.flatten() +c[1]*wm.flatten()
Globalprior =Globalpriorfull[mask.flatten()==1]
localprior=0;lmd=0
tune=_tune(wfun=wfun)
bb=_roubustfit(Y=y,mu=mu,Globalprior=Globalprior,modrobprior=modrobprior,lmd=lmd,
localprior=localprior,wfun=wfun,tune=tune,flagstd=1,flagmodrobust=1,flagprior=1,thresh=0.7)
newcbf=meancbf*mask
newcbf[mask==1]=bb
return newcbf
# basil and pvcorr
class _BASILCBFInputSpec(FSLCommandInputSpec):
# We use position args here as list indices - so a negative number
# will put something on the end
in_file = File(
exists=True,
desc="input file cbf after substracting tag-control or control-tag",
argstr=" -i %s",
position=0,
mandatory=True,
)
mask= File(exists=True,argstr=" -m %s ",desc="mask in the same space as in_infile",mandatory=True,)
mzero=File(exists=True,argstr=" -c %s ",desc='m0 scan',mandatory=True,)
m0scale=traits.Float(desc='calibration of asl',argstr=" --cgain %.2f ",mandatory=True,)
m0tr=traits.Float(desc='Mzero TR',argstr=" --tr %.2f ",mandatory=True,)
tis=traits.Float(desc='invertion recovery time =plds+bolus',argstr=" --tis %.2f ",mandatory=True,)
pcasl=traits.Bool(desc='label type:defualt is PASL',argstr=" --casl ",mandatory=False,default_value=False)
bolus=traits.Float(desc='bolus or tau: label duraytion',argstr=" --bolus %.2f ",mandatory=True,)
pvc=traits.Bool(desc='calibration of asl',mandatory=False,argstr=" --pvcorr ",default_value=True)
pvgm=File(exists=True,mandatory=False,desc='grey matter probablity matter ',argstr=" --pvgm %s ",)
pvwm=File(exists=True,mandatory=False,desc='white matter probablity matter ',argstr=" --pvwm %s ",)
out_basename=File(desc="base name of output files", argstr=" -o %s ",mandatory=True)
out_cbfb=File(exists=False,desc='cbf with spatial correction')
out_cbfpv=File(exists=False,desc='cbf with spatial correction')
out_attb=File(exists=False,desc='aretrial transist time')
#environ=traits.Str('FSLOUTPUTTYPE': 'NIFTI_GZ'}
class _BASILCBFOutputSpec(TraitedSpec):
out_cbfb=File(exists=False,desc='cbf with spatial correction')
out_cbfpv=File(exists=False,desc='cbf with spatial correction')
out_attb=File(exists=False,desc='aretrial transist time')
class BASILCBF(FSLCommand):
_cmd = " oxford_asl "
input_spec = _BASILCBFInputSpec
output_spec = _BASILCBFOutputSpec
def _run_interface(self, runtime):
import shutil
#if os.path.isdir(self.inputs.out_basename+'/native_space'):
#shutil.rmtree(self.inputs.out_basename+'/native_space')
#shutil.rmtree(self.inputs.out_basename+'/calib')
runtime = super(BASILCBF, self)._run_interface(runtime)
return runtime
def _gen_outfilename(self,suffix):
if isdefined(self.inputs.in_file):
out_file = self._gen_fname(self.inputs.in_file, suffix=suffix)
return os.path.abspath(out_file)
def _list_outputs(self):
outputs = self.output_spec().get()
#outputs["out_cbfb"]=self.inputs.out_basename+'/basilcbf.nii.gz'
outputs["out_cbfb"]=fname_presuffix(self.inputs.in_file,suffix='_cbfbasil')
from shutil import copyfile
copyfile(self.inputs.out_basename+'/native_space/perfusion_calib.nii.gz',outputs["out_cbfb"])
if len(np.array([self.inputs.tis])) > 1:
#outputs["out_att"]=self.inputs.out_basename+'/arrivaltime.nii.gz'
outputs["out_att"]=fname_presuffix(self.inputs.in_file,suffix='_arrivaltime')
copyfile(self.inputs.out_basename+'/native_space/arrival.nii.gz',outputs["out_att"])
self.inputs.out_att=os.path.abspath(outputs["out_att"])
#outputs["out_cbfpv"]=self.inputs.out_basename+'/basilcbfpv.nii.gz'
outputs["out_cbfpv"]=fname_presuffix(self.inputs.in_file,suffix='_cbfbasilpv')
copyfile(self.inputs.out_basename+'/native_space/pvcorr/perfusion_calib.nii.gz',outputs["out_cbfpv"])
self.inputs.out_cbfb=os.path.abspath(outputs["out_cbfb"])
self.inputs.out_cbfpv=os.path.abspath(outputs["out_cbfpv"])
return outputs
class _qccbfInputSpec(BaseInterfaceInputSpec):
in_file=File(exists=True,mandatory=True,desc='original asl_file')
in_meancbf=File(exists=True,mandatory=True,desc='cbf img')
in_avgscore=File(exists=True,mandatory=True,desc='cbf img')
in_scrub=File(exists=True,mandatory=True,desc='cbf img')
in_basil=File(exists=True,mandatory=True,desc='cbf img')
in_pvc=File(exists=True,mandatory=True,desc='cbf img')
in_greyM = File(exists=True, mandatory=True,desc='grey matter')
in_whiteM = File(exists=True, mandatory=True,desc='white matter')
in_csf = File(exists=True, mandatory=True,desc='csf')
in_confmat=File(exists=True,mandatory=True,desc=' cofnound matrix')
in_boldmask=File(exists=True,mandatory=True,desc='bold mask in native space')
in_t1mask=File(exists=True,mandatory=True,desc='t1wmask in native space ')
in_boldmaskstd=File(exists=True,mandatory=False,desc='bold mask in native space')
in_templatemask=File(exists=True,mandatory=False,desc='template mask or image')
qc_file=File(exists=False,mandatory=False,desc='qc file ')
class _qccbfOutputSpec(TraitedSpec):
qc_file=File(exists=False,desc='qc file ')
class qccbf(SimpleInterface):
input_spec = _qccbfInputSpec
output_spec = _qccbfOutputSpec
def _run_interface(self, runtime):
time1=pd.read_csv(self.inputs.in_confmat,sep='\t')
time1.fillna(0,inplace=True)
fd=np.mean(time1['framewise_displacement'])
rms=time1[['rot_x','rot_y','rot_z']];rms1=rms.pow(2)
rms=np.mean(np.sqrt(rms1.sum(axis=1)/3))
regDC=dc(self.inputs.in_boldmask,self.inputs.in_t1mask)
regJC=jc(self.inputs.in_boldmask,self.inputs.in_t1mask)
regCC=crosscorr(self.inputs.in_boldmask,self.inputs.in_t1mask)
regCov=coverage(self.inputs.in_boldmask,self.inputs.in_t1mask)
if self.inputs.in_boldmaskstd and self.inputs.in_templatemask:
normDC=dc(self.inputs.in_boldmaskstd,self.inputs.in_templatemask)
normJC=jc(self.inputs.in_boldmaskstd,self.inputs.in_templatemask)
normCC=crosscorr(self.inputs.in_boldmaskstd,self.inputs.in_templatemask)
normCov=coverage(self.inputs.in_boldmaskstd,self.inputs.in_templatemask)
meancbf_qei=cbf_qei(gm=self.inputs.in_greyM,wm=self.inputs.in_whiteM,
csf=self.inputs.in_csf,img=self.inputs.in_meancbf,thresh=0.7)
scorecbf_qei=cbf_qei(gm=self.inputs.in_greyM,wm=self.inputs.in_whiteM,
csf=self.inputs.in_csf,img=self.inputs.in_avgscore,thresh=0.7)
basilcbf_qei=cbf_qei(gm=self.inputs.in_greyM,wm=self.inputs.in_whiteM,
csf=self.inputs.in_csf,img=self.inputs.in_basil,thresh=0.7)
pvcbf_qei=cbf_qei(gm=self.inputs.in_greyM,wm=self.inputs.in_whiteM,
csf=self.inputs.in_csf,img=self.inputs.in_pvc,thresh=0.7)
scrub_qei=cbf_qei(gm=self.inputs.in_greyM,wm=self.inputs.in_whiteM,
csf=self.inputs.in_csf,img=self.inputs.in_scrub,thresh=0.7)
if self.inputs.in_boldmaskstd and self.inputs.in_templatemask:
dict1 = {'fd':[fd],'rel_rms':[rms],'coregDC':[regDC],'coregJC':[regJC],'coregCC':[regCC],'coregCOV':[regCov],
'normDC':[normDC],'normJC':[normJC],'normCC':[normCC],'normCOV':[normCov],
'cbfQei':[meancbf_qei],'scoreQei':[scorecbf_qei],'scrubQei':[scrub_qei],
'basilQei':[basilcbf_qei],'pvcQei':[pvcbf_qei] }
else:
dict1 = {'fd':[fd],'rel_rms':[rms],'regDC':[regDC],'regJC':[regJC],'coregCC':[regCC],'coregCOV':[regCov],
'cbfQei':[meancbf_qei],'scoreQei':[scorecbf_qei],'scrubQei':[scrub_qei],
'basilQei':[basilcbf_qei],'pvcQei':[pvcbf_qei] }
_,file1=os.path.split(self.inputs.in_file)
bb=file1.split('_')
dict2={}
for i in range(len(bb)-1):
dict2.update({bb[i].split('-')[0]:bb[i].split('-')[1]})
dict2.update(dict1)
df = pd.DataFrame(dict2)
self._results['qc_file'] =fname_presuffix(self.inputs.in_meancbf,suffix='qc_cbf.csv',
newpath=runtime.cwd,use_ext=False)
df.to_csv (self._results['qc_file'], index = False, header=True)
self.inputs.qc_file=os.path.abspath(self._results['qc_file'])
return runtime
def dc(input1, input2):
r"""
Dice coefficient
Computes the Dice coefficient (also known as Sorensen index) between the binary
objects in two images.
The metric is defined as
.. math::
DC=\frac{2|A\cap B|}{|A|+|B|}
, where :math:`A` is the first and :math:`B` the second set of samples (here: binary objects).
Parameters
----------
input1 : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
input2 : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
Returns
-------
dc : float
The Dice coefficient between the object(s) in ```input1``` and the
object(s) in ```input2```. It ranges from 0 (no overlap) to 1 (perfect overlap).
Notes
-----
This is a real metric.
"""
input1=nb.load(input1).get_fdata()
input2=nb.load(input2).get_fdata()
input1 =np.atleast_1d(input1.astype(np.bool))
input2 = np.atleast_1d(input2.astype(np.bool))
intersection = np.count_nonzero(input1 & input2)
size_i1 = np.count_nonzero(input1)
size_i2 = np.count_nonzero(input2)
try:
dc = 2. * intersection / float(size_i1 + size_i2)
except ZeroDivisionError:
dc = 0.0
return dc
def jc(input1, input2):
r"""
Jaccard coefficient
Computes the Jaccard coefficient between the binary objects in two images.
Parameters
----------
input1: array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
input2: array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
Returns
-------
jc: float
The Jaccard coefficient between the object(s) in `input1` and the
object(s) in `input2`. It ranges from 0 (no overlap) to 1 (perfect overlap).
Notes
-----
This is a real metric.
"""
input1=nb.load(input1).get_fdata()
input2=nb.load(input2).get_fdata()
input1 = np.atleast_1d(input1.astype(np.bool))
input2 = np.atleast_1d(input2.astype(np.bool))
intersection = np.count_nonzero(input1 & input2)
union = np.count_nonzero(input1 | input2)
jc = float(intersection) / float(union)
return jc
def crosscorr(input1,input2):
"""
cross correlation
computer compute cross correction bewteen input mask
"""
input1=nb.load(input1).get_fdata()
input2=nb.load(input2).get_fdata()
input1 = np.atleast_1d(input1.astype(np.bool)).flatten()
input2 = np.atleast_1d(input2.astype(np.bool)).flatten()
cc=np.corrcoef(input1,input2)[0][1]
return cc
def coverage(input1,input2):
"""
estimate the coverage between two mask
"""
input1=nb.load(input1).get_fdata()
input2=nb.load(input2).get_fdata()
input1 = np.atleast_1d(input1.astype(np.bool))
input2 = np.atleast_1d(input2.astype(np.bool))
intsec=np.count_nonzero(input1 & input2)
if np.sum(input1)> np.sum(input2):
smallv=np.sum(input2)
else:
smallv=np.sum(input1)
cov=float(intsec)/float(smallv)
return cov
def cbf_qei(gm,wm,csf,img,thresh=0.7):
def fun1(x,xdata):
d1=np.exp(-(x[0])*np.power(xdata,x[1]))
return(d1)
def fun2(x,xdata):
d1=1-np.exp(-(x[0])*np.power(xdata,x[1]))
return(d1)
x1 = [0.054,0.9272]; x2 = [2.8478,0.5196]; x4 = [3.0126, 2.4419]
scbf=smooth_image(nb.load(img),fwhm=5).get_fdata()# smooth the image
if len(scbf.shape) > 3:
scbf=scbf[:,:,:,0]
#load prob maps
gmm=nb.load(gm).get_fdata(); wmm=nb.load(wm).get_fdata(); ccf=nb.load(csf).get_fdata()
if len(gmm.shape) > 3:
gmm=gmm[:,:,:,0]
wmm=gmm[:,:,:,0]
ccf=ccf[:,:,:,0]
pbcf=2.5*gmm+wmm # gmm is 2.5 times wm
msk=np.array((scbf!= 0)&(scbf != np.nan )&(pbcf != np.nan )).astype(int)
gm1=np.array(gmm>thresh)
wm1=np.array(wmm>thresh)
cc1=np.array(ccf>thresh)
r1=np.array([0,np.corrcoef(scbf[msk==1],pbcf[msk==1])[1,0]]).max()
V=((np.sum(gm1)-1)*np.var(scbf[gm1>0])+(np.sum(wm1)-1)*np.var(scbf[wm1>0])+(np.sum(cc1)-1) \
*np.var(scbf[cc1>0]))/( | np.sum(gm1>0) | numpy.sum |
from junctiontree import computation as comp
import numpy as np
from .util import assert_potentials_equal
def get_arrays_and_vars(tree, node_list, potentials):
"""Get all arrays and their variables as a flat list
Output: [array1, vars1, ..., arrayN, varsN]
"""
return list([potentials[tree[0]],node_list[tree[0]]]) + sum(
[
get_arrays_and_vars(child_tree, node_list, potentials)
for child_tree in tree[1:]
],
[]
)
def brute_force_sum_product(tree, node_list, potentials):
"""Compute brute force sum-product with einsum """
# Function to compute the sum-product with brute force einsum
arrays_vars = get_arrays_and_vars(tree, node_list, potentials)
f = lambda output_vars: np.einsum(*(arrays_vars + [output_vars]))
def __run(tree, node_list, p, f, res=[]):
res.append(f(node_list[tree[0]]))
for child_tree in tree[1:]:
__run(child_tree, node_list, p, f, res)
return res
return __run(tree, node_list, potentials, f)
def assert_sum_product(tree, node_order, potentials, variables):
""" Test shafer-shenoy vs brute force sum-product """
# node_order represents the order nodes are traversed
# in get_arrays_and_vars function
assert_potentials_equal(
brute_force_sum_product(
tree,
[variables[idx] for idx in node_order],
[potentials[idx] for idx in node_order]
),
comp.compute_beliefs(tree, potentials, variables)
)
def test_one_scalar_node():
assert_sum_product(
[
0,
],
[0],
[
np.random.randn(),
],
[[]] # no variables for scalar
)
def test_one_matrix_node():
assert_sum_product(
[
0,
],
[0],
[
np.random.randn(2, 3),
],
[
[3,5]
]
)
def test_one_child_node_with_all_variables_shared():
assert_sum_product(
[
0,
(
2,
[
1,
]
)
],
[0,2,1],
[
np.random.randn(2, 3),
np.random.randn(3, 2),
np.ones((3, 2)),
],
[
[3,5],
[5,3],
[5,3]
]
)
def test_one_child_node_with_one_common_variable():
assert_sum_product(
[
0,
(
2,
[
1,
]
)
],
[0,2,1],
[
np.random.randn(2, 3),
np.random.randn(3, 4),
np.ones((3,)),
],
[
[3,5],
[5,9],
[5]
]
)
def test_one_child_node_with_no_common_variable():
assert_sum_product(
[
0,
(
2,
[
1,
]
)
],
[0,2,1],
[
np.random.randn(2),
np.random.randn(3),
np.ones(()),
],
[
[3],
[9],
[]
]
)
def test_one_grand_child_node_with_no_variable_shared_with_grand_parent():
assert_sum_product(
[
0,
(
3,
[
1,
(
4,
[
2,
]
)
]
)
],
[0,2,4,1,3],
[
np.random.randn(2, 3),
np.random.randn(3, 4),
np.random.randn(4, 5),
np.ones((3,)),
np.ones((4,)),
],
[
[3, 5],
[5, 9],
[9, 1],
[5],
[9]
]
)
def test_one_grand_child_node_with_variable_shared_with_grand_parent():
assert_sum_product(
[
0,
(
3,
[
1,
(
4,
[
2,
]
)
]
)
],
[0,2,4,1,3],
[
np.random.randn(2, 3),
np.random.randn(3, 4),
np.random.randn(6, 3),
np.ones((3,)),
np.ones((3,)),
],
[
[3, 5],
[5, 9],
[1, 5],
[5],
[5]
]
)
def test_two_children_with_no_variable_shared():
assert_sum_product(
[
0,
(
3,
[
1,
]
),
(
4,
[
2,
]
)
],
[0,2,4,1,3],
[
np.random.randn(2, 3),
np.random.randn(3, 4),
np.random.randn(2, 5),
np.ones((3,)),
np.ones((2,)),
],
[
[3, 5],
[5, 9],
[3, 1],
[5],
[3]
]
)
def test_two_child_with_shared_variable():
assert_sum_product(
[
0,
(
3,
[
1,
]
),
(
4,
[
2,
]
)
],
[0,2,4,1,3],
[
np.random.randn(2, 3),
np.random.randn(3, 4),
np.random.randn(3),
np.ones((3,)),
| np.ones((3,)) | numpy.ones |
#Copyright (c) 2020 Ocado. All Rights Reserved.
import sys, os
import numpy as np
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir)))
from amrrt.metrics import GeodesicMetric
def test_distance_geodesic(maze_environment_info, maze_distance_matrix):
space = maze_environment_info["space"]
shortest_paths = [74.61747343047412, 84.61575781480384, 64.40995862204083, 98.07522464033143, 127.19815498031583, 95.70905438785839]
waypoints = [[63.0, 51.0], [7.2, 6.6], [28.0, 81.0], [35.0, 37.0], [93.0, 91.0], [89.2, 7.0], [16.2, 37.2]]
geodesic_metric = GeodesicMetric(maze_environment_info["space"], distance_matrix=maze_distance_matrix)
for i in range(6):
a = space.create_state(np.array(waypoints[i]))
b = space.create_state(np.array(waypoints[i+1]))
assert geodesic_metric.distance(a, b) == geodesic_metric.distance(b, a) >= | np.linalg.norm(a.pos - b.pos) | numpy.linalg.norm |
########################################################################
#
# License: BSD
# Created: September 1, 2010
# Author: <NAME> - <EMAIL>
#
########################################################################
import sys
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
from unittest import TestCase
import blaze.carray as ca
from common import MayBeDiskTest
class createTest(MayBeDiskTest, TestCase):
def test00a(self):
"""Testing ctable creation from a tuple of carrays"""
N = 1e1
a = ca.carray(np.arange(N, dtype='i4'))
b = ca.carray(np.arange(N, dtype='f8')+1)
t = ca.ctable((a, b), ('f0', 'f1'), rootdir=self.rootdir)
#print "t->", `t`
ra = np.rec.fromarrays([a[:],b[:]]).view(np.ndarray)
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
def test00b(self):
"""Testing ctable creation from a tuple of lists"""
t = ca.ctable(([1,2,3],[4,5,6]), ('f0', 'f1'), rootdir=self.rootdir)
#print "t->", `t`
ra = np.rec.fromarrays([[1,2,3],[4,5,6]]).view(np.ndarray)
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
def test00c(self):
"""Testing ctable creation from a tuple of carrays (single column)"""
N = 1e1
a = ca.carray(np.arange(N, dtype='i4'))
self.assertRaises(ValueError, ca.ctable, a, 'f0', rootdir=self.rootdir)
def test01(self):
"""Testing ctable creation from a tuple of numpy arrays"""
N = 1e1
a = np.arange(N, dtype='i4')
b = np.arange(N, dtype='f8')+1
t = ca.ctable((a, b), ('f0', 'f1'), rootdir=self.rootdir)
#print "t->", `t`
ra = np.rec.fromarrays([a,b]).view(np.ndarray)
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
def test02(self):
"""Testing ctable creation from an structured array"""
N = 10
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
t = ca.ctable(ra, rootdir=self.rootdir)
#print "t->", `t`
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
def test03a(self):
"""Testing ctable creation from large iterator"""
N = 10*1000
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
t = ca.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8',
count=N, rootdir=self.rootdir)
#print "t->", `t`
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
def test03b(self):
"""Testing ctable creation from large iterator (with a hint)"""
N = 10*1000
ra = np.fromiter(((i, i*2.) for i in xrange(N)),
dtype='i4,f8', count=N)
t = ca.fromiter(((i, i*2.) for i in xrange(N)),
dtype='i4,f8', count=N, rootdir=self.rootdir)
#print "t->", `t`
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
class createDiskTest(createTest, TestCase):
disk = True
class persistentTest(MayBeDiskTest, TestCase):
disk = True
def test00a(self):
"""Testing ctable opening in "r" mode"""
N = 1e1
a = ca.carray(np.arange(N, dtype='i4'))
b = ca.carray(np.arange(N, dtype='f8')+1)
t = ca.ctable((a, b), ('f0', 'f1'), rootdir=self.rootdir)
# Open t
t = ca.open(rootdir=self.rootdir, mode='r')
#print "t->", `t`
ra = np.rec.fromarrays([a[:],b[:]]).view(np.ndarray)
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
# Now check some accesses
self.assertRaises(RuntimeError, t.__setitem__, 1, (0, 0.0))
self.assertRaises(RuntimeError, t.append, (0, 0.0))
def test00b(self):
"""Testing ctable opening in "w" mode"""
N = 1e1
a = ca.carray(np.arange(N, dtype='i4'))
b = ca.carray(np.arange(N, dtype='f8')+1)
t = ca.ctable((a, b), ('f0', 'f1'), rootdir=self.rootdir)
# Open t
t = ca.open(rootdir=self.rootdir, mode='w')
#print "t->", `t`
N = 0
a = ca.carray(np.arange(N, dtype='i4'))
b = ca.carray(np.arange(N, dtype='f8')+1)
ra = np.rec.fromarrays([a[:],b[:]]).view(np.ndarray)
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
# Now check some accesses
t.append((0, 0.0))
t.append((0, 0.0))
t[1] = (1, 2.0)
ra = np.rec.fromarrays([(0,1),(0.0, 2.0)], 'i4,f8').view(np.ndarray)
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
def test00c(self):
"""Testing ctable opening in "a" mode"""
N = 1e1
a = ca.carray(np.arange(N, dtype='i4'))
b = ca.carray(np.arange(N, dtype='f8')+1)
t = ca.ctable((a, b), ('f0', 'f1'), rootdir=self.rootdir)
# Open t
t = ca.open(rootdir=self.rootdir, mode='a')
#print "t->", `t`
# Check values
ra = np.rec.fromarrays([a[:],b[:]]).view(np.ndarray)
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
# Now check some accesses
t.append((10, 11.0))
t.append((10, 11.0))
t[-1] = (11, 12.0)
# Check values
N = 12
a = ca.carray(np.arange(N, dtype='i4'))
b = ca.carray(np.arange(N, dtype='f8')+1)
ra = np.rec.fromarrays([a[:],b[:]]).view(np.ndarray)
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
def test01a(self):
"""Testing ctable creation in "r" mode"""
N = 1e1
a = ca.carray(np.arange(N, dtype='i4'))
b = ca.carray(np.arange(N, dtype='f8')+1)
self.assertRaises(RuntimeError, ca.ctable, (a, b), ('f0', 'f1'),
rootdir=self.rootdir, mode='r')
def test01b(self):
"""Testing ctable creation in "w" mode"""
N = 1e1
a = ca.carray(np.arange(N, dtype='i4'))
b = ca.carray(np.arange(N, dtype='f8')+1)
t = ca.ctable((a, b), ('f0', 'f1'), rootdir=self.rootdir)
# Overwrite the last ctable
t = ca.ctable((a, b), ('f0', 'f1'), rootdir=self.rootdir, mode='w')
#print "t->", `t`
ra = np.rec.fromarrays([a[:],b[:]]).view(np.ndarray)
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
# Now check some accesses
t.append((10, 11.0))
t.append((10, 11.0))
t[11] = (11, 12.0)
# Check values
N = 12
a = ca.carray(np.arange(N, dtype='i4'))
b = ca.carray(np.arange(N, dtype='f8')+1)
ra = np.rec.fromarrays([a[:],b[:]]).view(np.ndarray)
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
def test01c(self):
"""Testing ctable creation in "a" mode"""
N = 1e1
a = ca.carray(np.arange(N, dtype='i4'))
b = ca.carray(np.arange(N, dtype='f8')+1)
t = ca.ctable((a, b), ('f0', 'f1'), rootdir=self.rootdir)
# Overwrite the last ctable
self.assertRaises(RuntimeError, ca.ctable, (a, b), ('f0', 'f1'),
rootdir=self.rootdir, mode='a')
class add_del_colTest(MayBeDiskTest, TestCase):
def test00a(self):
"""Testing adding a new column (list flavor)"""
N = 10
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
t = ca.ctable(ra, rootdir=self.rootdir)
c = np.arange(N, dtype='i8')*3
t.addcol(c.tolist(), 'f2')
ra = np.fromiter(((i, i*2., i*3) for i in xrange(N)), dtype='i4,f8,i8')
#print "t->", `t`
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
def test00(self):
"""Testing adding a new column (carray flavor)"""
N = 10
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
t = ca.ctable(ra, rootdir=self.rootdir)
c = np.arange(N, dtype='i8')*3
t.addcol(ca.carray(c), 'f2')
ra = np.fromiter(((i, i*2., i*3) for i in xrange(N)), dtype='i4,f8,i8')
#print "t->", `t`
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
def test01a(self):
"""Testing adding a new column (numpy flavor)"""
N = 10
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
t = ca.ctable(ra, rootdir=self.rootdir)
c = np.arange(N, dtype='i8')*3
t.addcol(c, 'f2')
ra = np.fromiter(((i, i*2., i*3) for i in xrange(N)), dtype='i4,f8,i8')
#print "t->", `t`
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
def test01b(self):
"""Testing cparams when adding a new column (numpy flavor)"""
N = 10
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
t = ca.ctable(ra, cparams=ca.cparams(1), rootdir=self.rootdir)
c = np.arange(N, dtype='i8')*3
t.addcol(c, 'f2')
self.assert_(t['f2'].cparams.clevel == 1, "Incorrect clevel")
def test02(self):
"""Testing adding a new column (default naming)"""
N = 10
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
t = ca.ctable(ra, rootdir=self.rootdir)
c = np.arange(N, dtype='i8')*3
t.addcol(ca.carray(c))
ra = np.fromiter(((i, i*2., i*3) for i in xrange(N)), dtype='i4,f8,i8')
#print "t->", `t`
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
def test03(self):
"""Testing inserting a new column (at the beginning)"""
N = 10
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
t = ca.ctable(ra, rootdir=self.rootdir)
c = np.arange(N, dtype='i8')*3
t.addcol(c, name='c0', pos=0)
ra = np.fromiter(((i*3, i, i*2.) for i in xrange(N)), dtype='i8,i4,f8')
ra.dtype.names = ('c0', 'f0', 'f1')
#print "t->", `t`
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
def test04(self):
"""Testing inserting a new column (in the middle)"""
N = 10
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
t = ca.ctable(ra, rootdir=self.rootdir)
c = np.arange(N, dtype='i8')*3
t.addcol(c, name='c0', pos=1)
ra = np.fromiter(((i, i*3, i*2.) for i in xrange(N)), dtype='i4,i8,f8')
ra.dtype.names = ('f0', 'c0', 'f1')
#print "t->", `t`
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
def test05(self):
"""Testing removing an existing column (at the beginning)"""
N = 10
ra = np.fromiter(((i, i*3, i*2.) for i in xrange(N)), dtype='i4,i8,f8')
t = ca.ctable(ra, rootdir=self.rootdir)
t.delcol(pos=0)
# The next gives a segfault. See:
# http://projects.scipy.org/numpy/ticket/1598
#ra = np.fromiter(((i*3, i*2) for i in xrange(N)), dtype='i8,f8')
#ra.dtype.names = ('f1', 'f2')
dt = np.dtype([('f1', 'i8'), ('f2', 'f8')])
ra = np.fromiter(((i*3, i*2) for i in xrange(N)), dtype=dt)
#print "t->", `t`
#print "ra", ra
#assert_array_equal(t[:], ra, "ctable values are not correct")
def test06(self):
"""Testing removing an existing column (at the end)"""
N = 10
ra = np.fromiter(((i, i*3, i*2.) for i in xrange(N)), dtype='i4,i8,f8')
t = ca.ctable(ra, rootdir=self.rootdir)
t.delcol(pos=2)
ra = np.fromiter(((i, i*3) for i in xrange(N)), dtype='i4,i8')
ra.dtype.names = ('f0', 'f1')
#print "t->", `t`
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
def test07(self):
"""Testing removing an existing column (in the middle)"""
N = 10
ra = np.fromiter(((i, i*3, i*2.) for i in xrange(N)), dtype='i4,i8,f8')
t = ca.ctable(ra, rootdir=self.rootdir)
t.delcol(pos=1)
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
ra.dtype.names = ('f0', 'f2')
#print "t->", `t`
#print "ra[:]", ra[:]
assert_array_equal(t[:], ra, "ctable values are not correct")
def test08(self):
"""Testing removing an existing column (by name)"""
N = 10
ra = np.fromiter(((i, i*3, i*2.) for i in xrange(N)), dtype='i4,i8,f8')
t = ca.ctable(ra, rootdir=self.rootdir)
t.delcol('f1')
ra = np.fromiter(((i, i*2.) for i in xrange(N)), dtype='i4,f8')
ra.dtype.names = ('f0', 'f2')
#print "t->", `t`
#print "ra[:]", ra[:]
| assert_array_equal(t[:], ra, "ctable values are not correct") | numpy.testing.assert_array_equal |
import pytest
import sys
import os
import numpy as np
from anytree import RenderTree
from pvtrace.geometry.box import Box
class TestBox:
def test_init(self):
assert type(Box(size=(1,1,1))) == Box
def test_is_on_surface(self):
b = Box(size=(1,1,1))
assert b.is_on_surface((0.5, 0.0, 0.0)) == True
assert b.is_on_surface((0.0, 0.5, 0.0)) == True
assert b.is_on_surface((0.0, 0.0, 0.5)) == True
assert b.is_on_surface((-0.5, 0.0, 0.0)) == True
assert b.is_on_surface((0.0, -0.5, 0.0)) == True
assert b.is_on_surface((0.0, 0.0, -0.5)) == True
assert b.is_on_surface((0.0, 0.0, 0.0)) == False
assert b.is_on_surface((0.501, 0.0, 0.0)) == False
def test_is_on_surface_bad(self):
b = Box(size=(1.0, 1.0, 0.02))
bad_pos = (0.06608370507653762, 0.5, -0.007798573829629238)
bad_dir = (0.2108918904984852, 0.8577010754312269, 0.4689066812555477)
ray_pos = np.array(bad_pos) - 0.0001 * | np.array(bad_dir) | numpy.array |
import math
from mpl_toolkits.mplot3d import Axes3D, axes3d
import warnings
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import numpy as np
import itertools
from src import constants
from src.my_utils.constant_class import *
from src.my_utils.my_math.line import distance_btw_two_point
class PotentialType:
Repulsive_potential = "Repulsive_potential"
Attractive_potential = "Attractive_potential"
Camera_potential_steps = "camera_potential_steps"
Camera_potential_quadratic = "camera_potential_quadratic"
class PotentialShape:
Circular = "circular"
Angle = "angle"
Linear_X_direction = "Linear X unbound"
Linear_X_direction_bound = "Linear X bound"
Linear_Y_direction = "Linear Y unbound"
Linear_Y_direction_bound = "Linear Y bound"
class HeatMaps:
"""
Maps are composed of multiple points
[(x,y,data_saved-controller-xi,rho_0,PotentialType),...]
x and y are the coordinate of the point where we set the potential function
data_saved-controller-xi is a factor between 0 and 1, to give more or less importance to the point
rho_0 is the radius in which the potential is determined
PotentialType is the function we want to use
If the potential are set correctly and are not to close from each other than the map gives always
a value between 0 and 1.
"""
"""All cam field"""
def HEAT_MAP_INSIDE_OF_FIELD():
return [(0,0,0,1,1,1,1000,PotentialType.Camera_potential_steps)]
"""For one target"""
def HEAT_MAP_ONE_TARGET_CENTER(field_depth):
return [(constants.DISTANCE_TO_KEEP_FROM_TARGET * field_depth,0, 0,1,1,1,1,PotentialType.Camera_potential_quadratic)]
"""For two targets"""
def HEAT_MAP_TWO_TARGET_CENTER(field_depth,beta):
x = constants.DISTANCE_TO_KEEP_FROM_TARGET*field_depth * math.cos(beta / 4)
y = 1.5*constants.DISTANCE_TO_KEEP_FROM_TARGET * field_depth * math.sin(beta / 4)
return [(x, y,0,1,2,1,2,PotentialType.Camera_potential_quadratic), (x,-y,0,1,2,1,2,PotentialType.Camera_potential_quadratic)]
def HEAT_MAP_TWO_TARGET_FAR(field_depth,beta,side=1):
return [(0.8*field_depth*math.cos(beta/ 4),side*0.3*field_depth * math.sin(beta / 4), side*0, 2, 1, 1, 1.5,PotentialType.Camera_potential_quadratic),
(0.3*field_depth*math.cos(beta/ 4),side*-0.1*field_depth * math.sin(beta/ 4),side*55, 1, 15, 1, 0.75,PotentialType.Camera_potential_quadratic)]
"""For three targets"""
def HEAT_MAP_THREE_TARGET(field_depth,beta):
x = constants.DISTANCE_TO_KEEP_FROM_TARGET*field_depth * math.cos(beta / 4)
y = 1.5*constants.DISTANCE_TO_KEEP_FROM_TARGET * field_depth * math.sin(beta / 4)
return [(x, y,0,1,1,1,0.5,PotentialType.Camera_potential_steps),
(x,-y,0,1,1,1,0.5,PotentialType.Camera_potential_steps),
(x+2, 0, 0, 1, 1, 1, 0.5, PotentialType.Camera_potential_steps)]
def HEAT_MAP_TWO_TARGET_OVERLAP(field_depth, beta):
x = constants.DISTANCE_TO_KEEP_FROM_TARGET * field_depth * math.cos(beta / 4)
y = 1.5 * constants.DISTANCE_TO_KEEP_FROM_TARGET * field_depth * math.sin(beta / 4)
return [(x, y, 0, 1, 2, 1, 2.5, PotentialType.Camera_potential_quadratic),
(x, -y, 0, 1, 2, 1, 2.5, PotentialType.Camera_potential_quadratic)]
def rotate_vector_field_angle(angle, X, Y):
norm = np.float_power(np.square(X) + np.square(Y), 0.5)
old_angle = np.arctan2(Y, X)
X = norm * np.cos(angle + old_angle)
Y = norm * np.sin(angle + old_angle)
return X, Y
def rotate_map_from_angle_alpha(angle, x, y, x_mean, y_mean):
x_offset = x - x_mean
y_offset = y - y_mean
x_rotate = math.cos(angle) * x_offset + math.sin(angle) * y_offset
y_rotate = -math.sin(angle) * x_offset + math.cos(angle) * y_offset
return x_rotate, y_rotate
def unrotate_map_from_angle_alpha(angle, x, y, x_mean, y_mean):
x_rotate = math.cos(angle) * x + math.sin(angle) * y
y_rotate = -math.sin(angle) * x + math.cos(angle) * y
x = x_rotate + x_mean
y = y_rotate + y_mean
return x, y
def define_potential_shape(shape, X=None, mean_x=None, var_x=None, Y=None, mean_y=None, var_y=None, X_min=None,
X_max=None, Y_min=None, Y_max=None, angle_min=None, angle_max=None):
if shape == PotentialShape.Circular and X is not None and Y is not None:
distances = np.power(np.square(X - mean_x) / var_x + np.square(Y - mean_y) / var_y, 0.5)
angle = np.arctan2(Y - mean_y, X - mean_x)
elif shape == PotentialShape.Angle and X is not None and Y is not None and angle_min is not None and angle_max is not None:
distances = np.power(np.square(X - mean_x) / var_x + np.square(Y - mean_y) / var_y, 0.5)
angle = np.arctan2(Y - mean_y, X - mean_x)
distances = np.where(angle < angle_max, distances, -1)
distances = np.where(angle > angle_min, distances, -1)
elif shape == PotentialShape.Linear_X_direction and Y is not None:
distances = np.square((Y - mean_y) / var_y)
angle = np.arctan2(Y - mean_y, 0)
elif shape == PotentialShape.Linear_Y_direction and X is not None:
distances = np.square((X - mean_x) / var_x)
angle = np.arctan2(0, X - mean_x)
else:
print("define_potential_shape : choice not found or values not set correctly")
if not X is None:
distances = np.zeros( | np.shape(X) | numpy.shape |
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 26 19:50:38 2019
@author: admin
"""
import numpy as np
import pandas as pd
### pandas数据处理 ###
# 数据准备、数据转换、数据聚合
## 数据准备 ##
# 加载、组装:合并,拼接,组合、变形(轴向旋转)、删除
# 合并 #
# merge()函数
frame1 = pd.DataFrame({'id':['ball','pencil','pen','mug','ashtray'],
'price':[12.33,11.44,33.21,13.23,33.62]})
frame2 = pd.DataFrame({'id':['pencil','pencil','ball','pen'],
'color':['white','red','red','black']})
frame1
frame2
# 合并frame1与frame2
pd.merge(frame1, frame2)
# 指定基于哪一列合并,增加on选项
frame1 = pd.DataFrame({'id':['ball','pencil','pen','mug','ashtray'],
'color': ['white','red','red','black','green'],
'brand':['OMG','ABC','ABC','POD','POD']})
frame2 = pd.DataFrame({'id':['pencil','pencil','ball','pen'],
'brand':['OMG','POD','ABC','POD']})
frame1
frame2
# frame1与frame2有两个相同列名的列,对其执行合并操作得到空DataFrame对象
pd.merge(frame1, frame2)
# 指定其合并操作标准
pd.merge(frame1, frame2, on='id')
pd.merge(frame1, frame2, on='brand')
# 使用left_on和right_on指定frame1和frame2的基准列,即以frame1中id与frame2中是sid执行合并操作
frame2.columns = ['brand','sid']
pd.merge(frame1, frame2, left_on='id', right_on='brand')
# merge()函数默认执行内连接操作,how选项可以指定连接方式
frame2.columns = ['id','brand']
pd.merge(frame1, frame2, on='id')
pd.merge(frame1, frame2, on='id', how='outer')
pd.merge(frame1, frame2, on='id', how='left')
pd.merge(frame1, frame2, on='id', how='right')
# 合并多个键
pd.merge(frame1, frame2, on=['id','brand'], how='left')
# 根据索引合并
# 将left_index与right_index选项设置为Ture,可将索引而非键作为合并的基准
pd.merge(frame1,frame2,left_index=True,right_index=True)
# DataFrame对象的join()函数更适合根据索引进行合并,可以用于合并多个索引相同或索引相同但列却不一致的DataFrame对象
frame2.columns = ['brand2','id2']
frame1.join(frame2)
## 拼接 ##
# numpy中concatenate()函数
array1 = np.arange(9).reshape((3,3))
array2 = array1 + 6
np.concatenate([array1, array2])
np.concatenate([array1, array2], axis=1)
# pandas中concat()函数实现按轴拼接的功能
ser1 = pd.Series(np.random.rand(4), index=[1,2,3,4])
ser2 = pd.Series(np.random.rand(4), index=[5,6,7,8])
# 默认按照axis=0拼接数据
pd.concat([ser1, ser2])
# 结果中无重复数据,实际上执行的是外连接操作
ser3 = pd.concat([ser1,ser2], axis=1)
pd.concat([ser1,ser3], axis=1, join='inner')
# 在用于拼接的轴上创建等级索引,keys选项
pd.concat([ser1,ser2], keys=[1,2])
# axis=1时,指定的键变为DataFrame对象的列名
pd.concat([ser1,ser2], axis=1, keys=[1,2])
frame1 = pd.DataFrame(np.random.rand(9).reshape(3,3), index=[1,2,3],
columns=['A','B','C'])
frame2 = pd.DataFrame(np.random.rand(9).reshape(3,3), index=[4,5,6],
columns=['A','B','C'])
pd.concat([frame1, frame2])
pd.concat([frame1,frame2], axis=1)
# 组合 #
# combine函数可用来组合series对象并对其数据
ser1 = pd.Series(np.random.rand(5), index=[1,2,3,4,5])
ser2 = pd.Series(np.random.rand(4), index=[2,4,5,6])
ser1
ser2
# 相同索引处使用的是ser1的值
ser1.combine_first(ser2)
# 相同索引处使用的是ser2的值
ser2.combine_first(ser1)
# 进行部分合并,索引值1,2,3,4使用的都是ser1的值
ser1[:4].combine_first(ser2[:4])
# 轴向旋转 #
# 轴向旋转有两个基础操作:入栈-旋转数据结构,将列转换为行、出栈-行转为列
# 按等级索引旋转
frame1 = pd.DataFrame(np.arange(9).reshape(3,3), index=['white','red','black'],
columns=['ball','pen','pencil'])
# 列转为行,得到一个series对象
ser1 = frame1.stack()
ser1.unstack()
# 出栈操作可应用于不同的层级,为unstack函数传入表示层级的编号或名称
ser1.unstack(0)
ser1.unstack(1)
# 长格式转换为宽格式 pivot()函数,可以使用键的一列或多列作为参数
# 长格式:各列都有数据项,每一列后面的数据常常会根前面的有所重复,并且常常为列表形式,有一行行数据组成
longframe = pd.DataFrame({'color':['white','white','white','red','red','red','black','black','black'],
'item':['ball','pen','mug','ball','pen','mug','ball','pen','mug'],
'value':np.random.rand(9)})
longframe
longframe.pivot('color','item')
longframe.pivot('item','color')
# 删除 #
frame1 = pd.DataFrame( | np.arange(9) | numpy.arange |
############################################################################
# Project: Electrical Pre-Conditioning of Convective Clouds,
# Title: Plotting Radiosonde Data
# Author: <NAME>,
# Email: <EMAIL>.
# Version: 1.9
# Date: 10/01/2019
# Status: Stable
# Change: Major overhaul of datastreams
############################################################################
from __future__ import absolute_import, division, print_function
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
import pandas as pd
import os, sys, time, warnings, glob, argparse
from datetime import datetime, timedelta
sys.path.insert(0, '/home/users/th863480/PhD/Global_Functions')
#User Processing Modules
import Gilly_Utilities as gu
#Data Set-up Modules
from Data_Importer import EPCC_Importer
from Data_Quality import Radiosonde_Checks_v2 as Radiosonde_Checks
from Data_Output import SPRadiosonde
#Import Global Variables
import PhD_Config as PhD_Global
#Import Tephigram Plotter
from Extras.Tephigram import Tephigram as SPTephigram
#Import WC3 Extras (for GPS2UTC)
from Extras.WC3_Extras import GPS2UTC, CloudDrift, Radiosonde_Launch
class Radiosonde(EPCC_Importer, Radiosonde_Checks, SPRadiosonde, SPTephigram):
"""This class will process all the data aquired from a radiosonde output the data in various forms,
including, height plot, tepihgram and indicies.
Parameters
----------
EPCC_Importer : class
Used to import other datasets other than the actual radiosonde
Radiosonde_Checks : class
Used to quality control the radiosonde data
SPRadiosonde : class
Used to plot the radiosonde data
` SPTephigram : class
Used to plot the tepihgram of the data
"""
def __init__(self, Sensor_Package=None, Height_Range=None, Calibrate='Counts', verbose=False):
"""Set-up radiosonde data"""
############################################################################
"""Prerequisites"""
#Time Controls
t_begin = time.time()
#Storage Locations
self.Storage_Path = PhD_Global.Storage_Path_WC3
self.Processed_Data_Path = 'Processed_Data/Radiosonde/'
self.Raw_Data_Path = 'Raw_Data/'
self.Radiosonde_Plots_Path = 'Plots/Radiosonde/'
self.Tephigram_Plots_Path = 'Plots/Tephigram/'
#Bound classes
self.importer = EPCC_Importer()
self.sensor_package = Sensor_Package
self.height_range = Height_Range
self.calibrate = Calibrate
self.verbose = verbose
#Real name for all Pandora Channels for each radiosonde launch
self.RawChannelList = {0 : ['Lin', 'Log', 'Cyan/PLL', 'IR', 'Parity'],
1 : ['Lin', 'Log', 'Cyan/PLL', 'IR', 'Parity'],
2 : ['Lin', 'Log', 'Cyan/PLL', 'IR', 'Parity'],
3 : ['Lin', 'Log', 'Cyan/PLL', 'IR', 'Parity'],
4 : ['Lin', 'Log', 'Cyan/PLL', 'IR', 'Parity'],
5 : ['Lin', 'Log', 'Cyan/PLL', 'IR', 'Parity'],
6 : ['Lin', 'Log/Turbulence', 'Cyan', 'IR/Parity'], #Not Launched Yet
7 : ['Lin', 'Log/Turbulence', 'Cyan', 'IR/Parity'], #Not Launched Yet
8 : ['Lin', 'Log/Turbulence', 'Cyan', 'IR/Parity'], #Not Launched Yet
9 : ['Lin', 'Log', 'Cyan', 'IR', 'Turbulence'],
10 : ['Lin', 'Log', 'Cyan', 'IR', 'Turbulence']}
#Number of bits (2^n)
self.NumofBits = {0 : 12,
1 : 12,
2 : 12,
3 : 12,
4 : 12,
5 : 12,
6 : 16, #Not Launched Yet
7 : 16, #Not Launched Yet
8 : 16, #Not Launched Yet
9 : 12,
10 : 12}
############################################################################
#Import Radiosonde Data
self.Radiosonde_Data, self.Launch_Datetime = self._RadiosondeImporter(self.sensor_package)
#Identify clouds within data
self.Clouds_ID, self.LayerType = self._CloudIdentifier(self.height_range)
#Calculate the space charge density using the log charge sensor
#self.Calibration_Log = self._ChargeCalibrator(self.calibrate, self.sensor_package, self.Clouds_ID, self.LayerType) if np.any(np.in1d(self.calibrate, ['Volts', 'Units'])) else None
def _RadiosondeImporter(self, Sensor_Package=None):
"""Check and Import Data"""
#Error check that either Radiosonde_File or Sensor_Package has been specified
if Sensor_Package is None: sys.exit("[Error] You must specify either the Sensor_Package number")
#Attempt to find the radiosonde file either directly or from glob
self.Radiosonde_File = glob.glob(self.Storage_Path + self.Processed_Data_Path + 'Radiosonde_Flight_No.' + str(Sensor_Package).rjust(2,'0') + '_*/Radiosonde_Flight_PhD_James_No.' + str(Sensor_Package) + '*a.txt')
#If no radiosonde file was found we end program
if len(self.Radiosonde_File) == 0: sys.exit("[Error] Radiosonde package No.%s does not exist. Has the radiosonde been launched yet or has the data been misplaced?" % (Sensor_Package))
#If the radiosonde file was found via glob we need to convert to str from list
if isinstance(self.Radiosonde_File, list): self.Radiosonde_File = self.Radiosonde_File[0]
#Once the radiosonde file is found we can attempt to find the GPS file in the raw file section
self.GPS_File = glob.glob(self.Storage_Path + self.Raw_Data_Path + 'Radiosonde_Flight_No.' + str(Sensor_Package).rjust(2,'0') + '_*/GPSDCC_RESULT*.tsv')
#Import all the data
Radiosonde_Data = pd.read_csv(self.Radiosonde_File, sep=r"\s*", header=None, engine='python',
names=('time',
'height',
'P',
'Tdry',
'RH',
self.RawChannelList[Sensor_Package][0],
self.RawChannelList[Sensor_Package][1],
self.RawChannelList[Sensor_Package][2],
self.RawChannelList[Sensor_Package][3],
self.RawChannelList[Sensor_Package][4],
'long',
'lat',
'range',
'bearing',
'Tdew',
'u',
'v',
'MR'),
dtype={'time': np.float64,
'height': np.float64,
'P': np.float64,
'Tdry': np.float64,
'RH': np.float64,
self.RawChannelList[Sensor_Package][0]: np.float64,
self.RawChannelList[Sensor_Package][1]: np.float64,
self.RawChannelList[Sensor_Package][2]: np.float64,
self.RawChannelList[Sensor_Package][3]: np.float64,
self.RawChannelList[Sensor_Package][4]: np.float64,
'long': np.float64,
'lat': np.float64,
'range': np.float64,
'bearing': np.float64,
'Tdew': np.float64,
'u': np.float64,
'v': np.float64,
'MR': np.float64},
na_values=-32768, comment='#', index_col=False).to_records(index=False)
GPS_Data = pd.read_csv(self.GPS_File[0], sep="\t", skiprows=51, header=None, usecols=(1,2,4),
names=('GPS_Week',
'GPS_Second',
'SondeX'),
dtype={'GPS_Week': np.int32,
'GPS_Second': np.float64,
'SondeX': np.float64},
na_values=-32768, comment='#', index_col=False).to_records(index=False) if len(self.GPS_File) != 0 else None
#Fix np.recarray issue
Radiosonde_Data = gu.fix_recarray(Radiosonde_Data)
GPS_Data = gu.fix_recarray(GPS_Data)
#Estimate the launch time from the data
if self.GPS_File is not None:
GPS_Data = GPS_Data[~np.isnan(GPS_Data['SondeX'])]
Launch_Datetime = GPS2UTC(GPS_Data['GPS_Week'][0], GPS_Data['GPS_Second'][0])
#Calibrate Height, Temperature and Convert PANDORA channels from counts to volts if required.
Radiosonde_Cal_Counts = Radiosonde_Checks(Radiosonde_Data.copy(), None, self.sensor_package, self.height_range, check=1111, verbose=self.verbose)
Radiosonde_Cal_Volts = Radiosonde_Checks(Radiosonde_Data.copy(), 'Volts', self.sensor_package, self.height_range, check=1111, verbose=self.verbose)
Radiosonde_Cal_Units = Radiosonde_Checks(Radiosonde_Data.copy(), 'Units', self.sensor_package, self.height_range, check=1111, verbose=self.verbose)
#Calibrate RH
Radiosonde_Cal_Counts.RH()
Radiosonde_Cal_Volts.RH()
Radiosonde_Cal_Units.RH()
#Calibrate Cloud Sensor
Radiosonde_Cal_Counts.Cloud(method='offset')
Radiosonde_Cal_Volts.Cloud(method='offset')
Radiosonde_Cal_Units.Cloud(method='offset')
#Calibrate Charge
Radiosonde_Cal_Volts.Charge()
Radiosonde_Cal_Units.Charge(lab_calibration=True)
#Calibrate Vibrating Wire
Radiosonde_Cal_Volts.PLL()
Radiosonde_Cal_Units.PLL()
#Nest Radiosonde_Cal into Radiosonde_Data
Radiosonde_Data = {'Raw' : Radiosonde_Data,
'Counts' : Radiosonde_Cal_Counts.finalise(),
'Volts' : Radiosonde_Cal_Volts.finalise(),
'Units' : Radiosonde_Cal_Units.finalise()}
return Radiosonde_Data, Launch_Datetime
def _CloudIdentifier(self, Height_Range=None):
"""This function will identify the cloud layers within a radiosonde ascent by using the cloud sensor and
relative humidity measurements
Reference
---------
<NAME>., <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME> (2010). Analysis of cloud layer structure
in Shouxian, China using RS92 radiosonde aided by 95 GHz cloud radar. J. Geophys. Res., 115, D00K30,
doi: 10.1029/2010JD014030.
WMO, 2017. Clouds. In: Internal Cloud Atlas Manual on the Observation of Clouds and Other Meteors.
Hong Kong: WMO, Section 2.2.1.2.
"""
if self.verbose is True: gu.cprint("[INFO] You are running Radiosonde_CloudIdentifier from the STABLE release", type='bold')
############################################################################
"""[METHOD 1]: Relative Humidity (Zhang et al. 2010)"""
#Define data into new variables
Z = self.Radiosonde_Data['Counts']['height'].copy()
RH = self.Radiosonde_Data['Counts']['RHice'].copy()
#Create Height-Resolving RH Thresholds (see Table 1 in Zhang et al. (2010))
#N.B. use np.interp(val, RH_Thresholds['altitude'], RH_Thresholds['*RH']) where val is the height range you want the RH Threshold
RH_Thresholds = {'minRH' : [0.92, 0.90, 0.88, 0.75, 0.75],
'maxRH' : [0.95, 0.93, 0.90, 0.80, 0.80],
'interRH' : [0.84, 0.82, 0.78, 0.70, 0.70],
'altitude' : [0, 2, 6, 12, 20]}
#Define the cloud height levels as defined by WMO (2017).
Z_Levels = {'low' : [0,2], 'middle' : [2,7], 'high' : [5,13]}
#Define the types of layers that can be detected.
Cloud_Types = {0 : 'Clear Air', 1 : 'Moist (Not Cloud)', 2 : 'Cloud'}
#Define the min, max and interRH for all measure altitudes
minRH = np.interp(Z, RH_Thresholds['altitude'], RH_Thresholds['minRH'], left=np.nan, right=np.nan)*100
maxRH = np.interp(Z, RH_Thresholds['altitude'], RH_Thresholds['maxRH'], left=np.nan, right=np.nan)*100
interRH = np.interp(Z, RH_Thresholds['altitude'], RH_Thresholds['interRH'], left=np.nan, right=np.nan)*100
#[Step 1]: The base of the lowest moist layer is determined as the level when RH exceeds the min-RH corresponding to this level
minRH_mask = (RH > minRH)
#[Step 2 and 3]: Above the base of the moist layer, contiguous levels with RH over the corresponding min-RH are treated as the same layer
Z[~minRH_mask] = np.nan
Clouds_ID = gu.contiguous(Z, 1)
#[Step 4]: Moist layers with bases lower than 120m and thickness's less than 400m are discarded
for Cloud in np.unique(Clouds_ID)[1:]:
if Z[Clouds_ID == Cloud][0] < 0.12:
if Z[Clouds_ID == Cloud][-1] - Z[Clouds_ID == Cloud][0] < 0.4:
Clouds_ID[Clouds_ID == Cloud] = 0
#[Step 5]: The moist layer is classified as a cloud layer is the maximum RH within this layer is greater than the corresponding max-RH at the base of this moist layer
LayerType = np.zeros(Z.size, dtype=int) #0: Clear Air, 1: Moist Layer, 2: Cloud Layer
for Cloud in np.unique(Clouds_ID)[1:]:
if np.any(RH[Clouds_ID == Cloud] > maxRH[Clouds_ID == Cloud][0]):
LayerType[Clouds_ID == Cloud] = 2
else:
LayerType[Clouds_ID == Cloud] = 1
#[Step 6]: The base of the cloud layers is set to 280m AGL, and cloud layers are discarded if their tops are lower than 280m
for Cloud in np.unique(Clouds_ID)[1:]:
if Z[Clouds_ID == Cloud][-1] < 0.280:
Clouds_ID[Clouds_ID == Cloud] = 0
LayerType[Clouds_ID == Cloud] = 0
#[Step 7]: Two contiguous layers are considered as one-layer cloud if the distance between these two layers is less than 300m or the minimum RH within this distance is more than the maximum inter-RG value within this distance
for Cloud_Below, Cloud_Above in zip(np.unique(Clouds_ID)[1:-1], np.unique(Clouds_ID)[2:]):
#Define the index between clouds of interest
Air_Between = np.arange(gu.bool2int(Clouds_ID == Cloud_Below)[-1], gu.bool2int(Clouds_ID == Cloud_Above)[0])
if ((Z[Clouds_ID == Cloud_Above][0] - Z[Clouds_ID == Cloud_Below][-1]) < 0.3) or (np.nanmin(RH[Air_Between]) > np.nanmax(interRH[Air_Between])):
Joined_Cloud_Mask = np.arange(gu.bool2int(Clouds_ID == Cloud_Below)[0], gu.bool2int(Clouds_ID == Cloud_Above)[-1])
#Update the cloud ID array as the Cloud_Below and Cloud_Above are not distinct clouds
Clouds_ID[Joined_Cloud_Mask] = Cloud_Below
#Update the LayerType to reflect the new cloud merging
if np.any(LayerType[Clouds_ID == Cloud_Below] == 2) or np.any(LayerType[Clouds_ID == Cloud_Above] == 2):
LayerType[Joined_Cloud_Mask] = 2
else:
LayerType[Joined_Cloud_Mask] = 1
#[Step 8] Clouds are discarded if their thickness's are less than 30.5m for low clouds and 61m for middle/high clouds
for Cloud in np.unique(Clouds_ID)[1:]:
if Z[Clouds_ID == Cloud][0] < Z_Levels['low'][1]:
if Z[Clouds_ID == Cloud][-1] - Z[Clouds_ID == Cloud][0] < 0.0305:
Clouds_ID[Clouds_ID == Cloud] = 0
LayerType[Clouds_ID == Cloud] = 0
else:
if Z[Clouds_ID == Cloud][-1] - Z[Clouds_ID == Cloud][0] < 0.0610:
Clouds_ID[Clouds_ID == Cloud] = 0
LayerType[Clouds_ID == Cloud] = 0
#Re-update numbering of each cloud identified
Clouds_ID = gu.contiguous(Clouds_ID, invalid=0)
#Output verbose to screen
if self.verbose is True:
print("Detected Clouds and Moist Layers\n--------------------------------")
for Cloud in np.unique(Clouds_ID)[1:]:
print("Cloud %s. Cloud Base = %.2fkm, Cloud Top = %.2fkm, Layer Type: %s" % (Cloud, Z[Clouds_ID == Cloud][0], Z[Clouds_ID == Cloud][-1], Cloud_Types[LayerType[Clouds_ID == Cloud][0]]))
return Clouds_ID, LayerType
def _ChargeCalibrator(self, Calibrate=None, Sensor_Package=None, Clouds_ID=None, LayerType=None):
if self.verbose is True: gu.cprint("[INFO] You are running Radiosonde_ChargeCalibrator from the DEV release", type='bold')
############################################################################
"""Prerequisites"""
#Time Controls
t_begin = time.time()
#Plotting requirements
import matplotlib.pyplot as plt
plt.style.use('classic') #necessary if Matplotlib version is >= 2.0.0
#Calibration boundaries
Height_Boundaries = {0 : [],
1 : [],
2 : [],
3 : [],
4 : [10.0,12.0],
5 : [10.5,12.0],
6 : [],
7 : [],
8 : [],
9 : [6,12.0],
10 : [12,18.0]}
############################################################################
"""[Step 1] Calibrate bespoke sensors"""
Radiosonde_Data = self.Radiosonde_Data['Units'].copy()
Linear = gu.moving_average(Radiosonde_Data['Lin_Current'], 11)
Log = gu.moving_average(Radiosonde_Data['Log_Current'], 11)
PosMask = Linear >= 0
NegMask = Linear < 0
LinearPos = np.log10(Linear[PosMask])
LogPos = Log[PosMask]
LinearNeg = -np.log10(-Linear[NegMask])
LogNeg = Log[NegMask]
#Calculate Linear Regressions
slope_all, intercept_all, r_value_all, p_value_all, std_err_all = sp.stats.linregress(Log, Linear)
slope_pos, intercept_pos, r_value_pos, p_value_pos, std_err_pos = sp.stats.linregress(LogPos, LinearPos)
try:
slope_neg, intercept_neg, r_value_neg, p_value_neg, std_err_neg = sp.stats.linregress(LogNeg, LinearNeg)
except:
slope_neg, intercept_neg, r_value_neg, p_value_neg, std_err_neg = (0,0,0,0,0)
if self.verbose is True: print(slope_all, intercept_all, r_value_all, p_value_all, std_err_all)
if self.verbose is True: print(slope_pos, intercept_pos, r_value_pos, p_value_pos, std_err_pos)
if self.verbose is True: print(slope_neg, intercept_neg, r_value_neg, p_value_neg, std_err_neg)
############################################################################
"""[Step 2] Plot the calibration values for positive and negative linear currents"""
plt.clf()
plt.close()
f, ax = plt.subplots(1,3)
ax[0].plot(Log, Linear , 'p', ms=1, marker='o', markeredgecolor='None', markerfacecolor='black', alpha=1, label="Clouds")
ax[1].plot(LogPos, LinearPos , 'p', ms=1, marker='o', markeredgecolor='None', markerfacecolor='black', alpha=1, label="Clouds")
ax[2].plot(LogNeg, LinearNeg , 'p', ms=1, marker='o', markeredgecolor='None', markerfacecolor='black', alpha=1, label="Clouds")
ax[0].plot(Log, slope_all*Log+intercept_all, lw=0.5, c='red')
ax[1].plot(LogPos, slope_pos*LogPos+intercept_pos, lw=0.5, c='red')
ax[2].plot(LogNeg, slope_neg*LogNeg+intercept_neg, lw=0.5, c='red')
ax[0].set_ylabel("Linear Sensor Current (A)", fontsize=8)
ax[1].set_ylabel("Linear Sensor Current (log10(pA))", fontsize=8)
ax[2].set_ylabel("Linear Sensor Current (-log10(-pA))", fontsize=8)
for subplot in ax: subplot.minorticks_on()
for subplot in ax: subplot.set_xlabel("Log Sensor Current (Counts)", fontsize=8)
for subplot in ax: subplot.grid(which='major',axis='both',c='grey')
for subplot in ax: subplot.tick_params(axis='both', which='major', labelsize=8)
for subplot in ax: subplot.tick_params(axis='both', which='minor', labelsize=8)
f.suptitle("Linear and Log Charge Sensors for Radiosonde Flight No.5", y=0.90)
ax[0].get_xaxis().get_major_formatter().labelOnlyBase = False
for subplot in ax:
x0, x1 = subplot.get_xlim()
y0, y1 = subplot.get_ylim()
subplot.set_aspect(np.abs((x1-x0)/(y1-y0)))
ax[0].annotate("All Data", xy=(0, 1), xycoords='axes fraction', xytext=(20, -20), textcoords='offset pixels', horizontalalignment='left', verticalalignment='top', fontsize=8)
ax[1].annotate("Positive Linear Current", xy=(0, 1), xycoords='axes fraction', xytext=(20, -20), textcoords='offset pixels', horizontalalignment='left', verticalalignment='top', fontsize=8)
ax[2].annotate("Negative Linear Current", xy=(0, 1), xycoords='axes fraction', xytext=(20, -20), textcoords='offset pixels', horizontalalignment='left', verticalalignment='top', fontsize=8)
ax[0].annotate("$R^{2}$ = %.4f\n$Counts$ = %.0f" % (r_value_all**2, Log.size), xy=(1, 1), xycoords='axes fraction', fontsize=8, xytext=(-3, -3), textcoords='offset points', ha='right', va='top')
ax[1].annotate("$R^{2}$ = %.4f\n$Counts$ = %.0f" % (r_value_pos**2, LogPos.size), xy=(1, 1), xycoords='axes fraction', fontsize=8, xytext=(-3, -3), textcoords='offset points', ha='right', va='top')
ax[2].annotate("$R^{2}$ = %.4f\n$Counts$ = %.0f" % (r_value_neg**2, LogNeg.size), xy=(1, 1), xycoords='axes fraction', fontsize=8, xytext=(-3, -3), textcoords='offset points', ha='right', va='top')
f.set_size_inches(11.7, 4.3)
############################################################################
"""[Step 3] Save plot to file"""
#Specify the directory the plots are stored in
path = os.path.dirname(self.Radiosonde_File).replace(self.Storage_Path + self.Processed_Data_Path,"")
#Find any other plots stored in this directory
previous_plots = glob.glob(self.Storage_Path + self.Radiosonde_Plots_Path + path + "/*")
#Find the biggest 'v' number in plots
plot_version = []
for plots in previous_plots:
try:
plot_version.append(int(os.path.basename(plots)[34:37]))
except ValueError:
plot_version.append(int(os.path.basename(plots)[34:36]))
plot_version = str(np.max(plot_version)+1) if len(plot_version) != 0 else '1'
#Create full directory and file name
Save_Location = self.Storage_Path + self.Radiosonde_Plots_Path + path + '/' + path + '_v' + plot_version.rjust(2,'0') + '_ChargeCalibrator.png'
#Ensure the directory exists on file system and save to that location
gu.ensure_dir(os.path.dirname(Save_Location))
plt.savefig(Save_Location, bbox_inches='tight', pad_inches=0.1, dpi=300)
#Return regression of positive current, regression of negative current and the boundary for counts
return (slope_all, intercept_all), (slope_pos, intercept_pos), (slope_neg, intercept_neg), (PosMask, NegMask)
def Superplotter(self):
"""This function will plot the data from a single radiosonde flight
Parameters
----------
Clouds_ID :
LayerType :
Calibration_Log : 2x2 tuple or array, optional
Used to calculate the space charge density using the log sensor. Due to
the temperature drift of the log sensor, but the wide range of measurements,
the log sensor needs to be calibrate with the linear sensor first. Use the
output from Radiosonde_ChargeCalibrator to populate this parameter.
"""
if self.verbose is True: gu.cprint("[INFO] You are running Superplotter from the DEV release", type='bold')
############################################################################
"""Prerequisites"""
t_begin = time.time()
############################################################################
"""[Step 1] Plot radiosonde data"""
Title = 'Radiosonde Flight No.' + str(self.sensor_package) + ' (' + self.Launch_Datetime.strftime("%d/%m/%Y %H%MUTC") + ')' if self.GPS_File is not None else 'Radiosonde Flight (N/A)'
if self.sensor_package < 8:
Superplotter = SPRadiosonde(8, Title, self.height_range, self.Radiosonde_Data[self.calibrate], calibrate=self.calibrate) if self.calibrate == "Units" else SPRadiosonde(7, Title, self.height_range, self.Radiosonde_Data[self.calibrate], calibrate=self.calibrate)
else:
Superplotter = SPRadiosonde(7, Title, self.height_range, self.Radiosonde_Data[self.calibrate], calibrate=self.calibrate)
if self.calibrate in ['Counts', 'Volts']:
Superplotter.Charge(type='counts')
else:
Superplotter.Charge(type='space_charge')
if self.sensor_package < 8:
Superplotter.Cloud(mask_cyan=(self.Radiosonde_Data[self.calibrate]['Parity'] == 1111))
Superplotter.PLL(type='freq', mask_pll=(self.Radiosonde_Data[self.calibrate]['Parity'] == 1112), point=False) if self.sensor_package < 3 else Superplotter.PLL(type='freq', mask_pll=(self.Radiosonde_Data[self.calibrate]['Parity'] == 1112), point=True)
if self.calibrate in ['Units']: Superplotter.PLL(type='slwc', mask_pll=(self.Radiosonde_Data[self.calibrate]['Parity'] == 1112), point=False) if self.sensor_package < 3 else Superplotter.PLL(type='slwc', mask_pll=(self.Radiosonde_Data[self.calibrate]['Parity'] == 1112), point=True)
else:
Superplotter.Cloud()
Superplotter.Turbulence()
#Plot the processed PLL data
if (self.calibrate == "units") & (self.sensor_package < 8): Superplotter.ch(14, 'SLWC $(g$ $m^{-3})$', 'Supercooled Liquid\nWater Concentration', check=1112, point=True)
#Plot the cloud boundaries if specified
if self.Clouds_ID is not None: Superplotter.Cloud_Boundaries(self.Clouds_ID, self.LayerType, CloudOnly=True)
############################################################################
"""[Step 2] Save plot and return"""
#Specify the directory the plots are stored in
path = os.path.dirname(self.Radiosonde_File).replace(self.Storage_Path + self.Processed_Data_Path,"")
#Find any other plots stored in this directory
previous_plots = glob.glob(self.Storage_Path + self.Radiosonde_Plots_Path + path + "/*")
#Find the biggest 'v' number in plots
plot_version = []
for plots in previous_plots:
try:
plot_version.append(int(os.path.basename(plots)[34:37]))
except ValueError:
plot_version.append(int(os.path.basename(plots)[34:36]))
plot_version = str(np.max(plot_version)+1) if len(plot_version) != 0 else '1'
#Create full directory and file name
Save_Location = self.Storage_Path + self.Radiosonde_Plots_Path + path + '/' + path + '_v' + plot_version.rjust(2,'0') + '_' + str(self.height_range[0]).rjust(2,'0') + 'km_to_' + str(self.height_range[1]).rjust(2,'0') + 'km.png'
#Ensure the directory exists on file system and save to that location
gu.ensure_dir(os.path.dirname(Save_Location))
Superplotter.savefig(Save_Location)
if self.verbose is True: print("[INFO] Superplotter completed successfully (In %.2fs)" % (time.time()-t_begin))
def Tephigram(self, plot_tephigram=False, plot_camborne=False):
"""The Radiosonde_Tephigram function will plot a tephigram from the dry bulb temperature,
T_dry and the Dew point Temperature, T_dew for pressure values, P at each corresponding
height.
Certain tephigram outputs are available from this function including:
1) Lower Condensation Level (LCL) in m
2) Level of Free Convection (LFC) in m
3) Environmental Level (EL) in m
4) Convective Available Potential Energy (CAPE) in J/kg
5) Convective INhibition (CIN) in J/kg
Parameters
----------
plot_tephigram : bool, optional, default is False
Specify True to plot a tephigram of the sounding data. Otherwise
just calculate the sounding indices
plot_camborne : bool, optional, default is False
Specify True to add the sounding from Camborne at the closest time
to the launch time. Only used if plot_tephigram is True.
Outputs
-------
References
----------
<NAME>., 2010. Water in the Atmosphere. In: Thermal Physics of the Atmosphere. Oxford: Wiley & Sons, pp. 93-109
<NAME>. 2018. Tephigram. Original Matlab code found in Matlab_Code directory
<NAME>. 2018. Tephigram. Original Python code found in the same directory.
"""
if self.verbose is True: gu.cprint("[INFO] You are running Radiosonde_Tephigram from the STABLE release", type='bold')
############################################################################
"""Prerequisites"""
#Time Controls
t_begin = time.time()
#Set-up data importer
EPCC_Data = EPCC_Importer()
############################################################################
"""[Step 1] Calibrate bespoke sensors"""
#Return Data (make local to function only. i.e. DON'T use self.Radiosonde_Data)
Radiosonde_Data = self.Radiosonde_Data['Counts'].copy()
Z = Radiosonde_Data['height'][1:]
Tdry = Radiosonde_Data['Tdry'][1:]
Tdew = Radiosonde_Data['Tdew'][1:]
Pres = Radiosonde_Data['P'][1:]
RH = Radiosonde_Data['RH'][1:]/100; RH -= np.max(RH) - 0.01
Wind_Mag = (Radiosonde_Data['u'][1:]**2 + Radiosonde_Data['v'][1:]**2)**0.5
Wind_Dir = np.arctan2(Radiosonde_Data['u'][1:], Radiosonde_Data['v'][1:]) * 180 / np.pi
############################################################################
"""[Step 2] Create Tephigram"""
if plot_tephigram is True:
print("[INFO] Plotting Tephigram...")
#Unpack variables
Z_Plot = Radiosonde_Data['height']
Tdry_Plot = Radiosonde_Data['Tdry']
Tdew_Plot = Radiosonde_Data['Tdew']
Pres_Plot = Radiosonde_Data['P']
#Subset the tephigram to specified location
locator = gu.argneararray(Z_Plot, | np.array(self.height_range) | numpy.array |
"""
.. module:: reporters
:platform: Unix, Windows
:synopsis: a module for defining OpenMM reporter classes.
.. moduleauthor:: <NAME> <<EMAIL>>
.. _pandas.DataFrame: https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.html
.. _StateDataReporter: http://docs.openmm.org/latest/api-python/generated/simtk.openmm.app.statedatareporter.StateDataReporter.html
.. _CustomIntegrator: http://docs.openmm.org/latest/api-python/generated/simtk.openmm.openmm.CustomIntegrator.html
.. _CustomCVForce: docs.openmm.org/latest/api-python/generated/simtk.openmm.openmm.CustomCVForce.html
"""
import sys
import numpy as np
import pandas as pd
from simtk import openmm
from simtk import unit
from simtk.openmm import app
from .computers import PressureComputer
from .computers import _MoleculeTotalizer
from .utils import InputError
class _MultiStream:
def __init__(self, outputs):
self._outputs = list()
for output in outputs:
self._outputs.append(open(output, 'w') if isinstance(output, str) else output)
def __del__(self):
for output in self._outputs:
if output != sys.stdout and output != sys.stderr:
output.close()
def write(self, message):
for output in self._outputs:
output.write(message)
def flush(self):
for output in self._outputs:
output.flush()
class _AtomsMM_Reporter():
"""
Base class for reporters.
"""
def __init__(self, file, reportInterval, **kwargs):
self._reportInterval = reportInterval
self._requiresInitialization = True
self._needsPositions = False
self._needsVelocities = False
self._needsForces = False
self._needEnergy = False
extraFile = kwargs.pop('extraFile', None)
if extraFile is None:
self._out = open(file, 'w') if isinstance(file, str) else file
else:
self._out = _MultiStream([file, extraFile])
self._separator = kwargs.pop('separator', ',')
def _initialize(self, simulation, state):
pass
def _generateReport(self, simulation, state):
pass
def describeNextReport(self, simulation):
"""
Get information about the next report this object will generate.
Parameters
----------
simulation : Simulation
The Simulation to generate a report for
Returns
-------
tuple
A five element tuple. The first element is the number of steps
until the next report. The remaining elements specify whether
that report will require positions, velocities, forces, and
energies respectively.
"""
steps = self._reportInterval - simulation.currentStep % self._reportInterval
return (steps, self._needsPositions, self._needsVelocities, self._needsForces, self._needEnergy)
def report(self, simulation, state):
"""
Generate a report.
Parameters
----------
simulation : Simulation
The Simulation to generate a report for
state : State
The current state of the simulation
"""
if self._requiresInitialization:
self._initialize(simulation, state)
self._requiresInitialization = False
self._generateReport(simulation, state)
class ExtendedStateDataReporter(app.StateDataReporter):
"""
An extension of OpenMM's StateDataReporter_ class, which outputs information about a simulation,
such as energy and temperature, to a file.
All original functionalities of StateDataReporter_ are preserved and the following ones are
included:
1. Report the Coulomb contribution of the potential energy (keyword: `coulombEnergy`):
This contribution includes both real- and reciprocal-space terms.
2. Report the atomic virial of a fully-flexible system (keyword: `atomicVirial`):
Considering full scaling of atomic coordinates in a box volume change (i.e. without any
distance constraints), the internal virial of the system is given by
.. math::
W = -\\sum_{i,j} r_{ij} E^\\prime(r_{ij}),
where :math:`E^\\prime(r)` is the derivative of the pairwise interaction potential as a
function of the distance between to atoms. Such interaction includes van der Waals, Coulomb,
and bond-stretching contributions. Bond-bending and dihedral angles are not considered
because they are invariant to full volume-scaling of atomic coordinates.
3. Report the nonbonded contribution of the atomic virial (keyword: `nonbondedVirial`):
The nonbonded virial is given by
.. math::
W_\\mathrm{nb} = -\\sum_{i,j} r_{ij} E_\\mathrm{nb}^\\prime(r_{ij}),
where :math:`E_\\mathrm{nb}^\\prime(r)` is the derivative of the nonbonded pairwise
potential, which comprises van der Waals and Coulomb interactions only.
4. Report the atomic pressure of a fully-flexible system (keyword: `atomicPressure`):
.. math::
P = \\frac{2 K + W}{3 V},
where :math:`K` is the kinetic energy sum for all atoms in the system. If keyword
`bathTemperature` is employed (see below), the instantaneous kinetic energy is substituted
by its equipartition-theorem average
:math:`\\left\\langle K \\right\\rangle = 3 N_\\mathrm{atoms} k_B T/2`,
where :math:`T` is the heat-bath temperature.
5. Report the molecular virial of a system (keyword: `molecularVirial`):
To compute the molecular virial, only the center-of-mass coordinates of the molecules are
considered to scale in a box volume change, while the internal molecular structure is kept
unaltered. The molecular virial is computed from the nonbonded part of the atomic virial by
using the formulation of Ref. :cite:`Hunenberger_2002`:
.. math::
W_\\mathrm{mol} = W - \\sum_{i} (\\mathbf{r}_i - \\mathbf{r}_i^\\mathrm{cm}) \\cdot \\mathbf{F}_i,
where :math:`\\mathbf{r}_i` is the coordinate of atom i, :math:`\\mathbf{F}_i` is the
resultant pairwise force acting on it (excluding bond-bending and dihedral angles), and
:math:`\\mathbf{r}_i^\\mathrm{cm}` is the center-of-mass coordinate of its containing
molecule.
6. Report the molecular pressure of a system (keyword: `molecularPressure`):
.. math::
P = \\frac{2 K_\\mathrm{mol} + W_\\mathrm{mol}}{3 V},
where :math:`K_\\mathrm{mol}` is the center-of-mass kinetic energy summed for all molecules
in the system. If keyword `bathTemperature` is employed (see below), the instantaneous
kinetic energy is substituted by its equipartition-theorem average
:math:`\\left\\langle K_\\mathrm{mol} \\right\\rangle = 3 N_\\mathrm{mols} k_B T/2`,
where :math:`T` is the heat-bath temperature.
7. Report the center-of-mass kinetic energy (keyword: `molecularKineticEnergy`):
.. math::
K_\\mathrm{mol} = \\frac{1}{2} \\sum_{i=1}^{N_\\mathrm{mol}} M_i v_{\\mathrm{cm}, i}^2,
where :math:`N_\\mathrm{mol}` is the number of molecules in the system, :math:`M_i` is the
total mass of molecule `i`, and :math:`v_{\\mathrm{cm}, i}` is the center-of-mass velocity
of molecule `i`.
8. Report potential energies at multiple global parameter states (keyword: `globalParameterStates`):
Computes and reports the potential energy of the system at a number of provided global
parameter states.
9. Report global parameter values (keyword: `globalParameters`):
Reports the values of specified global parameters.
10. Report derivatives of energy with respect to global parameters (keyword: `energyDerivatives`):
Computes and reports derivatives of the potential energy of the system at the current
state with respect to specified global parameters.
11. Report values of collective variables (keyword: `collectiveVariables`)
Report the values of a set of collective variables.
12. Allow specification of an extra file for reporting (keyword: `extraFile`).
This can be used for replicating a report simultaneously to `sys.stdout` and to a file
using a unique reporter.
Keyword Args
------------
coulombEnergy : bool, optional, default=False
Whether to write the Coulomb contribution of the potential energy to the file.
atomicVirial : bool, optional, default=False
Whether to write the total atomic virial to the file.
nonbondedVirial : bool, optional, default=False
Whether to write the nonbonded contribution to the atomic virial to the file.
atomicPressure : bool, optional, default=False
Whether to write the internal atomic pressure to the file.
molecularVirial : bool, optional, default=False
Whether to write the molecular virial to the file.
molecularPressure : bool, optional, default=False
Whether to write the internal molecular pressure to the file.
molecularKineticEnergy : bool, optional, default=False
Whether to write the molecular center-of-mass kinetic energy to the file.
globalParameterStates : pandas.DataFrame_, optional, default=None
A DataFrame containing context global parameters (column names) and sets of values
thereof. If it is provided, then the potential energy will be reported for every state
these parameters define.
globalParameters : list(str), optional, default=None
A list of global parameter names. If it is provided, then the values of these parameters
will be reported.
energyDerivatives : list(str), optional, default=None
A list of global parameter names. If it is provided, then the derivatives of the
total potential energy with respect to these parameters will be reported. It is
necessary that the calculation of these derivatives has been activated beforehand
(see, for instance, CustomIntegrator_).
collectiveVariables : list(openmm.CustomCVForce), optional, default=None
A list of CustomCVForce_ objects. If it is provided, then the values of all collective
variables associated with these objects will be reported.
pressureComputer : :class:`~atomsmm.computers.PressureComputer`, optional, default=None
A computer designed to determine pressures and virials. This is mandatory if any keyword
related to virial or pressure is set as `True`.
extraFile : str or file, optional, default=None
Extra file to write to, specified as a file name or a file object.
"""
def __init__(self, file, reportInterval, **kwargs):
self._coulombEnergy = kwargs.pop('coulombEnergy', False)
self._atomicVirial = kwargs.pop('atomicVirial', False)
self._nonbondedVirial = kwargs.pop('nonbondedVirial', False)
self._atomicPressure = kwargs.pop('atomicPressure', False)
self._molecularVirial = kwargs.pop('molecularVirial', False)
self._molecularPressure = kwargs.pop('molecularPressure', False)
self._molecularKineticEnergy = kwargs.pop('molecularKineticEnergy', False)
self._globalParameterStates = kwargs.pop('globalParameterStates', None)
self._globalParameters = kwargs.pop('globalParameters', None)
self._energyDerivatives = kwargs.pop('energyDerivatives', None)
self._collectiveVariables = kwargs.pop('collectiveVariables', None)
self._pressureComputer = kwargs.pop('pressureComputer', None)
extra = kwargs.pop('extraFile', None)
if extra is None:
super().__init__(file, reportInterval, **kwargs)
else:
super().__init__(_MultiStream([file, extra]), reportInterval, **kwargs)
self._computing = any([self._coulombEnergy,
self._atomicVirial,
self._nonbondedVirial,
self._atomicPressure,
self._molecularVirial,
self._molecularPressure,
self._molecularKineticEnergy])
if self._computing:
if self._pressureComputer is not None and not isinstance(self._pressureComputer, PressureComputer):
raise InputError('keyword "pressureComputer" requires a PressureComputer instance')
self._needsPositions = True
self._needsForces = any([self._needsForces,
self._molecularVirial,
self._molecularPressure])
self._needsVelocities = any([self._needsVelocities,
self._molecularPressure,
self._atomicPressure,
self._molecularKineticEnergy])
self._backSteps = -sum([self._speed, self._elapsedTime, self._remainingTime])
def _add_item(self, lst, item):
if self._backSteps == 0:
lst.append(item)
else:
lst.insert(self._backSteps, item)
def _constructHeaders(self):
headers = super()._constructHeaders()
if self._coulombEnergy:
self._add_item(headers, 'Coulomb Energy (kJ/mole)')
if self._atomicVirial:
self._add_item(headers, 'Atomic Virial (kJ/mole)')
if self._nonbondedVirial:
self._add_item(headers, 'Nonbonded Virial (kJ/mole)')
if self._atomicPressure:
self._add_item(headers, 'Atomic Pressure (atm)')
if self._molecularVirial:
self._add_item(headers, 'Molecular Virial (kJ/mole)')
if self._molecularPressure:
self._add_item(headers, 'Molecular Pressure (atm)')
if self._molecularKineticEnergy:
self._add_item(headers, 'Molecular Kinetic Energy (kJ/mole)')
if self._globalParameterStates is not None:
for index in self._globalParameterStates.index:
self._add_item(headers, 'Energy[{}] (kJ/mole)'.format(index))
if self._globalParameters is not None:
for name in self._globalParameters:
self._add_item(headers, name)
if self._energyDerivatives is not None:
for name in self._energyDerivatives:
self._add_item(headers, 'diff(E,{})'.format(name))
if self._collectiveVariables is not None:
for force in self._collectiveVariables:
for index in range(force.getNumCollectiveVariables()):
name = force.getCollectiveVariableName(index)
self._add_item(headers, name)
return headers
def _constructReportValues(self, simulation, state):
values = super()._constructReportValues(simulation, state)
if self._computing:
computer = self._pressureComputer
computer.import_configuration(state)
atomicVirial = computer.get_atomic_virial().value_in_unit(unit.kilojoules_per_mole)
if self._coulombEnergy:
coulombVirial = computer.get_coulomb_virial()
self._add_item(values, coulombVirial.value_in_unit(unit.kilojoules_per_mole))
if self._atomicVirial:
self._add_item(values, atomicVirial)
if self._nonbondedVirial:
nonbondedVirial = computer.get_dispersion_virial() + computer.get_coulomb_virial()
self._add_item(values, nonbondedVirial.value_in_unit(unit.kilojoules_per_mole))
if self._atomicPressure:
atomicPressure = computer.get_atomic_pressure()
self._add_item(values, atomicPressure.value_in_unit(unit.atmospheres))
if self._molecularVirial or self._molecularPressure:
forces = state.getForces(asNumpy=True)
if self._molecularVirial:
molecularVirial = computer.get_molecular_virial(forces)
self._add_item(values, molecularVirial.value_in_unit(unit.kilojoules_per_mole))
if self._molecularPressure:
molecularPressure = computer.get_molecular_pressure(forces)
self._add_item(values, molecularPressure.value_in_unit(unit.atmospheres))
if self._molecularKineticEnergy:
molKinEng = computer.get_molecular_kinetic_energy()
self._add_item(values, molKinEng.value_in_unit(unit.kilojoules_per_mole))
if self._globalParameterStates is not None:
original = dict()
for name in self._globalParameterStates.columns:
original[name] = simulation.context.getParameter(name)
latest = original.copy()
for index, row in self._globalParameterStates.iterrows():
for name, value in row.items():
if value != latest[name]:
simulation.context.setParameter(name, value)
latest[name] = value
energy = simulation.context.getState(getEnergy=True).getPotentialEnergy()
self._add_item(values, energy.value_in_unit(unit.kilojoules_per_mole))
for name, value in original.items():
if value != latest[name]:
simulation.context.setParameter(name, value)
if self._globalParameters is not None:
for name in self._globalParameters:
self._add_item(values, simulation.context.getParameter(name))
if self._energyDerivatives is not None:
mystate = simulation.context.getState(getParameterDerivatives=True)
derivative = mystate.getEnergyParameterDerivatives()
for name in self._energyDerivatives:
self._add_item(values, derivative[name])
if self._collectiveVariables is not None:
for force in self._collectiveVariables:
for cv in force.getCollectiveVariableValues(simulation.context):
self._add_item(values, cv)
return values
class XYZReporter(_AtomsMM_Reporter):
"""
Outputs to an XYZ-format file a series of frames containing the coordinates, velocities,
momenta, or forces on all atoms in a Simulation.
.. note::
Coordinates are expressed in nanometers, velocities in nanometer/picosecond, momenta in
dalton*nanometer/picosecond, and forces in dalton*nanometer/picosecond^2.
To use this reporter, create an XYZReporter object and append it to the Simulation's list of
reporters.
Keyword Args
------------
output : str, default='positions'
Which kind of info to report. Valid options are 'positions', 'velocities', 'momenta' and
'forces'.
groups : set(int), default=None
Which force groups to consider in the force calculations. If this is `None`, then all
force groups will be evaluated.
"""
def __init__(self, file, reportInterval, **kwargs):
self._output = kwargs.get('output', 'positions')
self._groups = kwargs.get('groups', None)
if self._output == 'positions':
self._unit = unit.angstroms
elif self._output == 'velocities':
self._unit = unit.angstroms/unit.picoseconds
elif self._output == 'momenta':
self._unit = unit.dalton*unit.angstroms/unit.picoseconds
elif self._output == 'forces':
self._unit = unit.dalton*unit.angstroms/unit.picoseconds**2
else:
raise InputError('Unrecognizable keyword value')
super().__init__(file, reportInterval, **kwargs)
self._needsPositions = self._output == 'positions'
self._needsVelocities = self._output in ['velocities', 'momenta']
self._needsForces = self._output == 'forces'
def _initialize(self, simulation, state):
self._symbols = [atom.element.symbol for atom in simulation.topology.atoms()]
sys = simulation.system
self._N = sys.getNumParticles()
if self._output == 'momenta':
mass = [sys.getParticleMass(i).value_in_unit(unit.dalton) for i in range(self._N)]
self._mass = np.vstack([mass, mass, mass]).transpose()*unit.dalton
def _get_values(self, simulation, state):
if self._output == 'positions':
values = state.getPositions(asNumpy=True)
elif self._output == 'velocities':
values = state.getVelocities(asNumpy=True)
elif self._output == 'momenta':
values = self._mass*state.getVelocities(asNumpy=True)
elif self._groups is None:
values = state.getForces(asNumpy=True)
else:
new_state = simulation.context.getState(getForces=True, groups=self._groups)
values = new_state.getForces(asNumpy=True)
return values.value_in_unit(self._unit)
def _write(self, step, N, names, values):
print(N, file=self._out)
pd.DataFrame(index=names, data=values).to_csv(
self._out,
sep='\t',
header=[f'{self._output} in {self._unit} at time step {step}', '', ''],
)
def _generateReport(self, simulation, state):
values = self._get_values(simulation, state)
self._write(simulation.currentStep, self._N, self._symbols, values)
class CenterOfMassReporter(XYZReporter):
"""
Outputs to an XYZ-format file a series of frames containing the center-of-mass coordinates,
center-of-mass velocities, total momenta, or resultant forces on all molecules in a Simulation.
.. note::
Coordinates are expressed in nanometers, velocities in nanometer/picosecond, momenta in
dalton*nanometer/picosecond, and forces in dalton*nanometer/picosecond^2.
To use this reporter, create an CenterOfMassReporter object and append it to the Simulation's
list of reporters.
Keyword Args
------------
output : str, default='positions'
Which kind of info to report. Valid options are 'positions', 'velocities', 'momenta' and
'forces'.
groups : set(int), default=None
Which force groups to consider in the force calculations. If this is `None`, then all
force groups will be evaluated.
"""
def _initialize(self, simulation, state):
super()._initialize(simulation, state)
self._mols = _MoleculeTotalizer(simulation.context, simulation.topology)
def _generateReport(self, simulation, state):
values = self._get_values(simulation, state)
if self._output in ['positions', 'velocities']:
cm_values = self._mols.massFrac.dot(values)
else:
cm_values = self._mols.selection.dot(values)
self._write(simulation.currentStep, self._mols.nmols, self._mols.residues, cm_values)
class CustomIntegratorReporter(_AtomsMM_Reporter):
"""
Outputs global and per-DoF variables of a CustomIntegrator instance.
Keyword Args
------------
describeOnly : bool, optional, default=True
Whether to output only descriptive statistics that summarize the activated per-Dof
variables.
"""
def __init__(self, file, reportInterval, **kwargs):
super().__init__(file, reportInterval, **kwargs)
self._describeOnly = kwargs.pop('describeOnly', True)
self._variables = []
for key, value in kwargs.items():
if value is True:
self._variables.append(key)
if not self._variables:
raise InputError("No global or perDof variables have been passed")
def _initialize(self, simulation, state):
integrator = self._integrator = simulation.integrator
if not isinstance(integrator, openmm.CustomIntegrator):
raise Exception("simulation.integrator is not a CustomIntegrator")
self._globals = {}
for index in range(integrator.getNumGlobalVariables()):
variable = integrator.getGlobalVariableName(index)
if variable in self._variables:
self._globals[variable] = index
self._perDof = {}
for index in range(integrator.getNumPerDofVariables()):
variable = integrator.getPerDofVariableName(index)
if variable in self._variables:
self._perDof[variable] = index
if set(self._variables) != set(self._globals) | set(self._perDof):
raise InputError("Unknown variables have been passed")
def _generateReport(self, simulation, state):
for variable, index in self._globals.items():
value = self._integrator.getGlobalVariable(index)
print('{}\n{}'.format(variable, value), file=self._out)
for variable, index in self._perDof.items():
values = self._integrator.getPerDofVariable(index)
titles = ['{}.{}'.format(variable, dir) for dir in ['x', 'y', 'z']]
df = pd.DataFrame(data=np.array(values), columns=titles)
if self._describeOnly:
print(df.describe(), file=self._out)
else:
df.to_csv(self._out, sep='\t')
class ExpandedEnsembleReporter(_AtomsMM_Reporter):
"""
Performs an Expanded Ensemble simulation and reports the energies of multiple states.
Parameters
----------
states : pandas.DataFrame_
A DataFrame containing context global parameters (column names) and sets of values
thereof. The potential energy will be reported for every state these parameters define.
If one of the variables is named as `weight`, then its set of values will be assigned
to every state as an importance sampling weight. Otherwise, all states will have
identical weights. States which are supposed to only have their energies reported, with
no actual visits, can have their weights set up to `-inf`.
temperature : unit.Quantity
The system temperature.
Keyword Args
------------
reportsPerExchange : int, optional, default=1
The number of reports between attempts to exchange the global parameter state, that is,
the exchange interval measured in units of report intervals.
"""
def __init__(self, file, reportInterval, states, temperature, **kwargs):
self._parameter_states = states.copy()
self._nstates = len(states.index)
self._reports_per_exchange = kwargs.pop('reportsPerExchange', 1)
super().__init__(file, reportInterval, **kwargs)
if 'weight' in states:
self._weights = self._parameter_states.pop('weight').values
finite = np.where(np.isfinite(self._weights))[0]
self._first_state = finite[0]
self._last_state = finite[-1]
else:
self._weights = np.zeros(self._nstates)
self._first_state = 0
self._last_state = self._nstates - 1
kT = (unit.MOLAR_GAS_CONSTANT_R*temperature).value_in_unit(unit.kilojoules_per_mole)
self._beta = 1.0/kT
self._nreports = 0
self._overall_visits = | np.zeros(self._nstates, dtype=int) | numpy.zeros |
import numpy as np
import matplotlib.pyplot as plt
from io import StringIO
import matplotlib.pylab as pylab
import pandas as pd
from operator import itemgetter
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import PolynomialFeatures#calling the polynomial feature that will calculate the powers of our features
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
import seaborn as sns
from sklearn.metrics import accuracy_score
def make_summary_tables( res ):
""" takes a summary from statsmodel fitting results and turn it into 2 dataFrame.
- result_general_df : contains general info and fit quality metrics
- result_fit_df : coefficient values and confidence intervals
"""
# transform second table to csv and read this as a dataFrame
result_fit_df = pd.read_csv(StringIO( res.tables[1].as_csv() ), sep=",",index_col=0)
result_fit_df.columns = [i.strip() for i in result_fit_df.columns]
result_fit_df.index = [i.strip() for i in result_fit_df.index]
# first table is trickier because the data is spread on to columns, and there is title line
L = res.tables[0].as_html().split('\n')
L.pop(1) # get rid of the title
tmp = pd.read_html('\n'.join(L) , header=None)[0] # read as a dataframe, but with 4 columns
names = list(tmp[0]) + list(tmp[2])[:-2] # columns 0 and 2 are metric names
values = list(tmp[1]) + list(tmp[3])[:-2] # columns 1 and 3 are the corresponding values
# NB : I exclude the last 2 elements which are empty
result_general_df = pd.DataFrame( {'Name': names , 'Value' : values}, index = names , columns=['Value'] )
return result_general_df , result_fit_df
def poly_fit(X,y):
poly = PolynomialFeatures(degree=3)#here we settle for a third degree polynomial object
X_poly=poly.fit_transform(X)#do the actual fit and transformation of data
print(X_poly[0,1])
lr=LinearRegression()
lr.fit(X_poly,y)
y_predict=lr.predict(X_poly)
R2=r2_score(y,y_predict)
MSE=mean_squared_error(y,y_predict)
fig, ax = plt.subplots(1, 1,figsize=(5,5))
ax.plot(X[:,0],y,'ko',label='Data')
ax.plot(X[:,0],y_predict,'r-.',label='Predicted')
ax.legend(loc='best',fontsize=10)
ax.set_title('R2={0:.2f}, MSE={1:.2f}'.format(R2,MSE),fontsize=13)
ax.set_xlabel("Number of pedestrians per ha per min",fontsize=13)
ax.set_ylabel("Breeding density(individuals per ha)",fontsize=13)
#plt.show()
print('fit param',lr.coef_[1:],lr.intercept_)
def poly_fit_train_test(X,y,seed,deg, ax = None):
"""
Takes:
- X : covariable matrix
- y : dependent variable matrix
- seed : random seed to determine train and test set
- deg : degree of the polynomial to fit
- ax = None : matplotlib ax to plot the fit (will not be plotted if None)
Returns:
( float , float ) : R-squared on the train and the test set
"""
poly = PolynomialFeatures(degree=deg)#here we settle for a third degree polynomial object
X_poly=poly.fit_transform(X)#do the actual fit and transformation of data
# we split X and y into a test set and train set
# the train set will be used to fit
# the test set will be used to evaluate the fit
X_train, X_test, y_train, y_test = train_test_split(X_poly, y,
random_state=seed,test_size=0.5)
#print(X_poly)
lr=LinearRegression()
lr.fit(X_train,y_train)
# R2 with train set
y_train_predict=lr.predict(X_train)
R2_train=r2_score(y_train,y_train_predict)
MSE_train=mean_squared_error(y_train,y_train_predict)
# R2 with test set
y_test_predict=lr.predict(X_test)
R2=r2_score(y_test,y_test_predict)
MSE=mean_squared_error(y_test,y_test_predict)
if not ax is None :
# horrible code to sort the points
y_predict = lr.predict(X_poly)
xx , yy = zip( * sorted([[u,v] for u,v in zip(X_poly[:,1],y_predict)],key=itemgetter(0)) )
ax.plot( X_train[:,1], y_train , marker = 'o' , linestyle='None' , color = 'teal' , label = 'train' )
ax.plot( X_test[:,1], y_test , marker = 'o' , linestyle='None' , color = 'orange' , label = 'test' )
ax.plot(xx , yy ,'r--' , label='predicted')
ax.set_title('train : R2={0:.2f}, MSE={1:.2f}\n test : R2={2:.2f}, MSE={3:.2f}'.format(R2_train,MSE_train,
R2,MSE),
fontsize=13)
ax.legend()
return R2_train, R2
def make_meshgrid(x, y, h=.02):
"""Create a mesh of points to plot in
Parameters
----------
x: data to base x-axis meshgrid on
y: data to base y-axis meshgrid on
h: stepsize for meshgrid, optional
Returns
-------
xx, yy : ndarray
"""
x_min, x_max = x.min() - 1, x.max() + 1
y_min, y_max = y.min() - 1, y.max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
return xx, yy
def plot_contours(ax, clf, xx, yy, **params):
"""Plot the decision boundaries for a classifier.
Parameters
----------
ax: matplotlib axes object
clf: a classifier
xx: meshgrid ndarray
yy: meshgrid ndarray
params: dictionary of params to pass to contourf, optional
"""
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
out = ax.contourf(xx, yy, Z, **params)
return out
def countour_lr_kypho(X,y,df,p='l2',c=10**8):#(number of nearest neighbors, feature matrix, label, voting rule)
models = LogisticRegression(penalty = p,C=c,class_weight='balanced')
models = models.fit(X, y)
# title for the plots
titles = 'GLM Bernouilli'
# Set-up 2x2 grid for plotting.
fig, ax = plt.subplots(1, 1,figsize=(5,5))
#plt.subplots_adjust(wspace=0.4, hspace=0.4)
X0, X1 = X[:, 0], X[:, 1]
xx, yy = make_meshgrid(X0, X1)
y_pred_c=models.predict(X)
plot_contours(ax, models, xx, yy,
cmap=plt.cm.coolwarm, alpha=0.3)
ax.scatter(X0, X1, c=y, cmap=plt.cm.coolwarm, s=40, edgecolors='k')
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(titles+' accuracy= '+str(accuracy_score(y,
y_pred_c)))
ax.set_xlabel("age")
ax.set_ylabel("number")
plt.show()
print([[w,list(df.columns)[i]]for i,w in enumerate(models.coef_[0])]+['intercept',models.intercept_])
def countour_lr_kypho_train_test(df,y,seed,p='l2',c=10**8,plot=True):#(number of nearest neighbors, feature matrix, label, voting rule)
X_train, X_test, y_train, y_test = train_test_split(df, y,
random_state=seed)
scaler1 = StandardScaler()
scaler1.fit(df)
X_1=scaler1.transform(df)
scaler = StandardScaler()
scaler.fit(X_train)
X_train=scaler.transform(X_train)
X_test=scaler.transform(X_test)
models = LogisticRegression(penalty = p,C=c,class_weight='balanced',solver='liblinear')
models = models.fit(X_train, y_train)
super_xx,super_yy=make_meshgrid(X_1[:, 0], X_1[:, 1])
# title for the plots
titles = 'GLM Bernouilli'
y_pred_train_c=models.predict(X_train)
y_pred_test_c=models.predict(X_test)
if plot==True:
# Set-up 2x2 grid for plotting.
fig, ax = plt.subplots(1, 2,figsize=(14,7))
#plt.subplots_adjust(wspace=0.4, hspace=0.4)
X0, X1 = X_train[:, 0], X_train[:, 1]
xx, yy = make_meshgrid(X0, X1)
titles = 'GLM Bernouilli known'
y_pred_train_c=models.predict(X_train)
plot_contours(ax[0], models, super_xx, super_yy,
cmap=plt.cm.coolwarm, alpha=0.3)
ax[0].scatter(X0, X1, c=y_train, cmap=plt.cm.coolwarm, s=40, edgecolors='k')
ax[0].set_xlim(super_xx.min(), super_xx.max())
ax[0].set_ylim(super_yy.min(), super_yy.max())
ax[0].set_xticks(())
ax[0].set_yticks(())
ax[0].set_title(titles+' accuracy= '+str(accuracy_score(y_train,
y_pred_train_c)))
ax[0].set_xlabel("age")
ax[0].set_ylabel("number")
#y_pred_train_c=models.predict(X_train)
#annot_kws = {"ha": 'center',"va": 'center'}
#confusion_mc_c = confusion_matrix(y_train, y_pred_train_c)
#df_cm_c = pd.DataFrame(confusion_mc_c,
#index = ['Absent','Present'], columns = ['Absent','Present'])
#sns.heatmap(df_cm_c, annot=True,ax=ax[1,0],annot_kws=annot_kws)
#ax[1,0].set_ylabel("True label")
#ax[1,0].set_xlabel("Predicted label")
titles = 'GLM Bernouilli new'
X0, X1 = X_test[:, 0], X_test[:, 1]
xx, yy = make_meshgrid(X0, X1)
y_pred_test_c=models.predict(X_test)
plot_contours(ax[1], models, super_xx, super_yy,
cmap=plt.cm.coolwarm, alpha=0.3)
ax[1].scatter(X0, X1, c=y_test, cmap=plt.cm.coolwarm, s=40, edgecolors='k')
ax[1].set_xlim(super_xx.min(), super_xx.max())
ax[1].set_ylim(super_yy.min(), super_yy.max())
ax[1].set_xticks(())
ax[1].set_yticks(())
ax[1].set_title(titles+' accuracy= '+str(accuracy_score(y_test,
y_pred_test_c)))
ax[1].set_xlabel("age")
ax[1].set_ylabel("number")
#confusion_mc_c2 = confusion_matrix(y_test, y_pred_test_c)
#df_cm_c2 = pd.DataFrame(confusion_mc_c2,
#index = ['Absent','Present'], columns = ['Absent','Present'])
#sns.heatmap(df_cm_c2,ax=ax[1,1],annot=True,annot_kws=annot_kws)
#ax[1,1].set_ylabel("True label")
#ax[1,1].set_xlabel("Predicted label")
plt.tight_layout()
plt.show()
print([[w,list(df.columns)[i]]for i,w in enumerate(models.coef_[0])]+['intercept',models.intercept_])
return accuracy_score(y_train, y_pred_train_c),accuracy_score(y_test, y_pred_test_c)
from sklearn.metrics import accuracy_score, confusion_matrix
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_curve, auc
from sklearn.multiclass import OneVsRestClassifier
from sklearn.preprocessing import label_binarize
from scipy import interp
from itertools import cycle
from sklearn.preprocessing import StandardScaler
def countour_lr2(p,X,y,c,mult):
models = LogisticRegression(penalty = p,C=c, multi_class=mult)# Create the logistic regresison object(with 3 main hyperparameters!!)
# penalty is either l1 or l2, C is how much weight we put on the regularization, multi_calss is how we proceed when multiclasses
scaler=StandardScaler()
scaler.fit(X)
X=scaler.transform(X)
models = models.fit(X, y)
dico_color={0:'blue',1:'white',2:'red'}
titles = 'Logistic regression penalty='+str(p)+' C='+str(c)+'\n1./C=$\\alpha$='+str(1./c)
fig1, ax1 = plt.subplots(1,1,figsize=(10,5))
#plt.subplots_adjust(wspace=0.4, hspace=0.4)
#plt.subplot(1,2,1)
X0, X1 = X[:, 0], X[:, 1]
xx, yy = make_meshgrid(X0, X1)
plot_contours(ax1, models, xx, yy,cmap=plt.cm.coolwarm, alpha=0.8)
ax1.scatter(X0, X1, c=y, cmap=plt.cm.coolwarm, s=20, edgecolors='k')
interc=models.intercept_
wei=models.coef_
for i in range(len(interc)):
ax1.plot([xx.min(),xx.max()],[-(interc[i]+wei[i][0]*xx.min())/wei[i][1],-(interc[i]+wei[i][0]*xx.max())/wei[i][1]],
color=dico_color[i],ls='--')
ax1.set_xlim(xx.min(), xx.max())
ax1.set_ylim(yy.min(), yy.max())
ax1.set_xticks(())
ax1.set_yticks(())
ax1.set_title(titles)
#plt.savefig('C:\\Users\\sebas\\Desktop\\cours_scikit-learn\\Iris_example_knn_1_'+str(i)+'.pdf')
#plt.subplots_adjust(wspace=0.4, hspace=0.4)
#plt.subplot(1,2,1)
#X0, X1 = X_test[:, 0],X_test[:, 1]
#xx, yy = make_meshgrid(X0, X1)
X0, X1 = X[:, 0], X[:, 1]
xx = np.linspace(np.min(X0)-0.1, np.max(X0)+0.1, 100)
yy = np.linspace(np.min(X1)-0.1, np.max(X1)+0.1, 100).T
xx, yy = np.meshgrid(xx, yy)
Xfull = np.c_[xx.ravel(), yy.ravel()]
y_pred = models.predict(X)
accuracy = accuracy_score(y, y_pred)
#print("Accuracy (train) for %s: %0.1f%% " % (name, accuracy * 100))
# View probabilities:
probas = models.predict_proba(Xfull)
n_classes = np.unique(y).size
fig,ax=plt.subplots(1,n_classes,figsize=(10,10*n_classes))
for k in range(n_classes):
#ax.subplot(1, n_classes, k + 1)
#plt.title("Class %d" % k)
#print(k,min(probas[:, k]))
if k == 0:
ax[k].set_ylabel('LogiReg')
imshow_handle = ax[k].imshow(probas[:, k].reshape((100, 100)),extent=(np.min(X0)-0.1, np.max(X0)+0.1, np.min(X1)-0.1, np.max(X1)+0.1), origin='lower',cmap='plasma')
ax[k].set_xticks(())
ax[k].set_xlim([np.min(X0)-0.1, np.max(X0)+0.1])
ax[k].set_ylim([np.min(X1)-0.1, np.max(X1)+0.1])
ax[k].set_yticks(())
ax[k].set_title('Class '+str(k))
for i in range(len(interc)):
ax[k].plot([np.min(X0)-0.1,np.max(X0)+0.1],[-(interc[i]+wei[i][0]*(np.min(X0)-0.1))/wei[i][1],-(interc[i]+wei[i][0]*(np.max(X0)+0.1))/wei[i][1]],
color=dico_color[i],ls='--')
idx = (y_pred == k)
if idx.any():
ax[k].scatter(X[idx, 0], X[idx, 1], marker='o', c=[dico_color[h] for h in y[idx]], edgecolor='k')
else:
ax[k].set_visible(False)
ax0 = plt.axes([0.15, 0.35, 0.7, 0.01])
plt.title("Probability")
plt.colorbar(imshow_handle, cax=ax0, orientation='horizontal')
plt.show()
def countour_lr(p,X,y,c,mult):
models = LogisticRegression(penalty = p,C=c, multi_class=mult)# Create the logistic regresison object(with 3 main hyperparameters!!)
# penalty is either l1 or l2, C is how much weight we put on the regularization, multi_calss is how we proceed when multiclasses
X_train, X_test, y_train, y_test = train_test_split(X, y,
random_state=0,stratify=y)
scaler=StandardScaler()
scaler.fit(X_train)
X_train=scaler.transform(X_train)
X_test=scaler.transform(X_test)
models = models.fit(X_train, y_train)
dico_color={0:'blue',1:'white',2:'red'}
titles = 'Logistic regression penalty='+str(p)+' C='+str(c)+'\n1./C=$\\alpha$='+str(1./c)
fig1, ax1 = plt.subplots(1,2,figsize=(10,5))
#plt.subplots_adjust(wspace=0.4, hspace=0.4)
#plt.subplot(1,2,1)
X0, X1 = X_train[:, 0], X_train[:, 1]
xx, yy = make_meshgrid(X0, X1)
plot_contours(ax1[0], models, xx, yy,cmap=plt.cm.coolwarm, alpha=0.8)
ax1[0].scatter(X0, X1, c=y_train, cmap=plt.cm.coolwarm, s=20, edgecolors='k')
interc=models.intercept_
wei=models.coef_
for i in range(len(interc)):
ax1[0].plot([xx.min(),xx.max()],[-(interc[i]+wei[i][0]*xx.min())/wei[i][1],-(interc[i]+wei[i][0]*xx.max())/wei[i][1]],
color=dico_color[i],ls='--')
ax1[0].set_xlim(xx.min(), xx.max())
ax1[0].set_ylim(yy.min(), yy.max())
ax1[0].set_xticks(())
ax1[0].set_yticks(())
ax1[0].set_title(titles)
#plt.savefig('C:\\Users\\sebas\\Desktop\\cours_scikit-learn\\Iris_example_knn_1_'+str(i)+'.pdf')
#plt.subplots_adjust(wspace=0.4, hspace=0.4)
#plt.subplot(1,2,1)
#X0, X1 = X_test[:, 0],X_test[:, 1]
#xx, yy = make_meshgrid(X0, X1)
plot_contours(ax1[1], models, xx, yy,cmap=plt.cm.coolwarm, alpha=0.8)
ax1[1].scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=plt.cm.coolwarm, s=20, edgecolors='k')
interc=models.intercept_
wei=models.coef_
for i in range(len(interc)):
#print([-(interc[i]+wei[i][0]*xx.min())/wei[i][1],-(interc[i]+wei[i][0]*xx.max())/wei[i][1]])
ax1[1].plot([xx.min(),xx.max()],[-(interc[i]+wei[i][0]*xx.min())/wei[i][1],-(interc[i]+wei[i][0]*xx.max())/wei[i][1]],
color=dico_color[i],ls='--')
ax1[1].set_xlim(xx.min(), xx.max())
ax1[1].set_ylim(yy.min(), yy.max())
ax1[1].set_xticks(())
ax1[1].set_yticks(())
ax1[1].set_title(titles)
plt.show()
X=scaler.transform(X)
X0, X1 = X[:, 0], X[:, 1]
xx = np.linspace(np.min(X0)-0.1, | np.max(X0) | numpy.max |
import unittest.mock as mock
import numpy as np
import pytest
from smqtk_detection import DetectImageObjects
from xaitk_saliency.interfaces.gen_object_detector_blackbox_sal import GenerateObjectDetectorBlackboxSaliency
from xaitk_saliency.exceptions import ShapeMismatchError
def test_generate_checks_success() -> None:
"""
Tests successful passage though the wrapper method.
"""
m_impl = mock.Mock(spec=GenerateObjectDetectorBlackboxSaliency)
# mock implementation result, number of maps should match number of input detections
m_impl._generate.return_value = np.empty((5, 256, 256))
m_detector = mock.Mock(spec=DetectImageObjects)
# test reference detections inputs with matching lengths
test_bboxes = np.ones((5, 4), dtype=float)
test_scores = np.ones((5, 3), dtype=float)
test_objectness = np.ones((5,), dtype=float)
# 2-channel image as just HxW should work
test_image = np.ones((256, 256), dtype=np.uint8)
GenerateObjectDetectorBlackboxSaliency.generate(
m_impl,
test_image,
test_bboxes,
test_scores,
m_detector,
)
m_impl._generate.assert_called_with(
test_image,
test_bboxes,
test_scores,
m_detector,
None # no objectness passed
)
# multi-channel image shoudl work with whatever channel dim size
test_image = np.ones((256, 256, 7), dtype=np.uint8)
GenerateObjectDetectorBlackboxSaliency.generate(
m_impl,
test_image,
test_bboxes,
test_scores,
m_detector,
)
m_impl._generate.assert_called_with(
test_image,
test_bboxes,
test_scores,
m_detector,
None # no objectness passed
)
# With objectness
GenerateObjectDetectorBlackboxSaliency.generate(
m_impl,
test_image,
test_bboxes,
test_scores,
m_detector,
test_objectness
)
m_impl._generate.assert_called_with(
test_image,
test_bboxes,
test_scores,
m_detector,
test_objectness
)
def test_generate_checks_image_shape() -> None:
"""
Test that the input image shape conforms to our assumption.
"""
m_impl = mock.Mock(spec=GenerateObjectDetectorBlackboxSaliency)
m_detector = mock.Mock(spec=DetectImageObjects)
m_bboxes = mock.Mock(spec=np.ndarray)
m_scores = mock.Mock(spec=np.ndarray)
# a single vector is not considered an image
test_image = np.ones((256,), dtype=np.uint8)
with pytest.raises(
ValueError,
match=r"^Input image matrix has an unexpected number of dimensions: 1$"
):
GenerateObjectDetectorBlackboxSaliency.generate(
m_impl,
test_image,
m_bboxes,
m_scores,
m_detector,
)
# image with more than 3 dimenstions
test_image = np.ones((256, 256, 3, 2), dtype=np.uint8)
with pytest.raises(
ValueError,
match=r"^Input image matrix has an unexpected number of dimensions: 4$"
):
GenerateObjectDetectorBlackboxSaliency.generate(
m_impl,
test_image,
m_bboxes,
m_scores,
m_detector,
)
def test_generate_checks_detection_inputs_length() -> None:
"""
Test that the reference detection inputs must all have the same length.
"""
m_impl = mock.Mock(spec=GenerateObjectDetectorBlackboxSaliency)
m_detector = mock.Mock(spec=DetectImageObjects)
test_image = np.ones((64, 64))
# Mismatched number of bboxes and scores, without objectness
test_bboxes = np.ones((4, 4), dtype=float)
test_scores = np.ones((5, 3), dtype=float)
with pytest.raises(
ValueError,
match=r"^Number of input bounding boxes and scores do not match: "
r"\(bboxes\) 4 != 5 \(scores\)$"
):
GenerateObjectDetectorBlackboxSaliency.generate(
m_impl,
test_image,
test_bboxes,
test_scores,
m_detector
)
# Mismatched number of bboxes and scores, with objectness
test_bboxes = np.ones((5, 4), dtype=float)
test_scores = np.ones((4, 3), dtype=float)
test_objectness = np.ones((5,), dtype=float)
with pytest.raises(
ValueError,
match=r"^Number of input bounding boxes, scores, and objectness "
r"scores do not match: \(bboxes\) 5 != 4 \(scores\) and/or "
r"\(bboxes\) 5 != 5 \(objectness\)$"
):
GenerateObjectDetectorBlackboxSaliency.generate(
m_impl,
test_image,
test_bboxes,
test_scores,
m_detector,
test_objectness
)
# Different number of objectness scores
test_bboxes = | np.ones((5, 4), dtype=float) | numpy.ones |
#!/usr/bin/env python
# coding: utf-8
# # Recommendations with IBM
#
# In this notebook, you will be putting your recommendation skills to use on real data from the IBM Watson Studio platform.
#
#
# You may either submit your notebook through the workspace here, or you may work from your local machine and submit through the next page. Either way assure that your code passes the project [RUBRIC](https://review.udacity.com/#!/rubrics/2322/view). **Please save regularly.**
#
# By following the table of contents, you will build out a number of different methods for making recommendations that can be used for different situations.
#
#
# ## Table of Contents
#
# I. [Exploratory Data Analysis](#Exploratory-Data-Analysis)<br>
# II. [Rank Based Recommendations](#Rank)<br>
# III. [User-User Based Collaborative Filtering](#User-User)<br>
# IV. [Content Based Recommendations (EXTRA - NOT REQUIRED)](#Content-Recs)<br>
# V. [Matrix Factorization](#Matrix-Fact)<br>
# VI. [Extras & Concluding](#conclusions)
#
# At the end of the notebook, you will find directions for how to submit your work. Let's get started by importing the necessary libraries and reading in the data.
# In[56]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import project_tests as t
import pickle
get_ipython().run_line_magic('matplotlib', 'inline')
df = pd.read_csv('data/user-item-interactions.csv')
df_content = pd.read_csv('data/articles_community.csv')
del df['Unnamed: 0']
del df_content['Unnamed: 0']
# Show df to get an idea of the data
df.head()
# In[57]:
df.shape
# In[58]:
df.describe()
# In[59]:
df.info()
# In[60]:
# Show df_content to get an idea of the data
df_content.head()
# In[61]:
df_content.shape
# In[62]:
df.isnull().sum()
# In[63]:
df_content.describe()
# In[64]:
df_content.info()
# In[65]:
df_content.isnull().sum()
# ### <a class="anchor" id="Exploratory-Data-Analysis">Part I : Exploratory Data Analysis</a>
#
# Use the dictionary and cells below to provide some insight into the descriptive statistics of the data.
#
# `1.` What is the distribution of how many articles a user interacts with in the dataset? Provide a visual and descriptive statistics to assist with giving a look at the number of times each user interacts with an article.
# In[66]:
user_interaction = df.groupby('email').count()['article_id'].sort_values(ascending=False)
user_interaction
# In[67]:
# Visualization of User-articleS interaction
user_interaction.hist()
plt.title('Distribution of Article Interactions')
# In[68]:
# descriptive stats
user_interaction.describe()
# In[69]:
user_interaction.median()
# In[70]:
# Fill in the median and maximum number of user_article interactios below
median_val = 3 # 50% of individuals interact with ____ number of articles or fewer.
max_views_by_user = 364 # The maximum number of user-article interactions by any 1 user is 364.
# `2.` Explore and remove duplicate articles from the **df_content** dataframe.
# In[71]:
# Find and explore duplicate articles
df_content.nunique()
# In[72]:
df_content.duplicated("article_id").sum()
# In[73]:
# Remove any rows that have the same article_id - only keep the first
df_content.drop_duplicates(subset='article_id', keep='first', inplace=True)
# In[74]:
df_content.duplicated("article_id").sum()
# In[75]:
df_content.shape
# `3.` Use the cells below to find:
#
# **a.** The number of unique articles that have an interaction with a user.
# **b.** The number of unique articles in the dataset (whether they have any interactions or not).<br>
# **c.** The number of unique users in the dataset. (excluding null values) <br>
# **d.** The number of user-article interactions in the dataset.
# In[76]:
df.article_id.nunique()
# In[77]:
df_content.info()
# In[78]:
df.email.nunique()
# In[79]:
df.shape
# In[80]:
unique_articles = 714 # The number of unique articles that have at least one interaction
total_articles = 1051 # The number of unique articles on the IBM platform
unique_users = 5148 # The number of unique users
user_article_interactions = 45993 # The number of user-article interactions
# `4.` Use the cells below to find the most viewed **article_id**, as well as how often it was viewed. After talking to the company leaders, the `email_mapper` function was deemed a reasonable way to map users to ids. There were a small number of null values, and it was found that all of these null values likely belonged to a single user (which is how they are stored using the function below).
# In[81]:
df.groupby(["article_id"])["email"].count().sort_values(ascending=False).head()
# In[82]:
most_viewed_article_id = "1429.0" # The most viewed article in the dataset as a string with one value following the decimal
max_views = 937 # The most viewed article in the dataset was viewed how many times?
# In[83]:
df.head()
# In[84]:
## No need to change the code here - this will be helpful for later parts of the notebook
# Run this cell to map the user email to a user_id column and remove the email column
def email_mapper():
coded_dict = dict()
cter = 1
email_encoded = []
for val in df['email']:
if val not in coded_dict:
coded_dict[val] = cter
cter+=1
email_encoded.append(coded_dict[val])
return email_encoded
email_encoded = email_mapper()
del df['email']
df['user_id'] = email_encoded
# show header
df.head()
# In[85]:
## If you stored all your results in the variable names above,
## you shouldn't need to change anything in this cell
sol_1_dict = {
'`50% of individuals have _____ or fewer interactions.`': median_val,
'`The total number of user-article interactions in the dataset is ______.`': user_article_interactions,
'`The maximum number of user-article interactions by any 1 user is ______.`': max_views_by_user,
'`The most viewed article in the dataset was viewed _____ times.`': max_views,
'`The article_id of the most viewed article is ______.`': most_viewed_article_id,
'`The number of unique articles that have at least 1 rating ______.`': unique_articles,
'`The number of unique users in the dataset is ______`': unique_users,
'`The number of unique articles on the IBM platform`': total_articles
}
# Test your dictionary against the solution
t.sol_1_test(sol_1_dict)
# ### <a class="anchor" id="Rank">Part II: Rank-Based Recommendations</a>
#
# Unlike in the earlier lessons, we don't actually have ratings for whether a user liked an article or not. We only know that a user has interacted with an article. In these cases, the popularity of an article can really only be based on how often an article was interacted with.
#
# `1.` Fill in the function below to return the **n** top articles ordered with most interactions as the top. Test your function using the tests below.
# In[86]:
def get_top_articles(n, df=df):
'''
INPUT:
n - (int) the number of top articles to return
df - (pandas dataframe) df as defined at the top of the notebook
OUTPUT:
top_articles - (list) A list of the top 'n' article titles
'''
# Your code here
top_articles = list(df.groupby(['title'])['article_id'].count().sort_values(ascending=False).head(n).index)
return top_articles # Return the top article titles from df (not df_content)
def get_top_article_ids(n, df=df):
'''
INPUT:
n - (int) the number of top articles to return
df - (pandas dataframe) df as defined at the top of the notebook
OUTPUT:
top_articles - (list) A list of the top 'n' article titles
'''
# Your code here
top_articles = list(df['article_id'].value_counts().head(n).index)
return top_articles # Return the top article ids
# In[87]:
print(get_top_articles(10))
print(get_top_article_ids(10))
# In[88]:
# Test your function by returning the top 5, 10, and 20 articles
top_5 = get_top_articles(5)
top_10 = get_top_articles(10)
top_20 = get_top_articles(20)
# Test each of your three lists from above
t.sol_2_test(get_top_articles)
# ### <a class="anchor" id="User-User">Part III: User-User Based Collaborative Filtering</a>
#
#
# `1.` Use the function below to reformat the **df** dataframe to be shaped with users as the rows and articles as the columns.
#
# * Each **user** should only appear in each **row** once.
#
#
# * Each **article** should only show up in one **column**.
#
#
# * **If a user has interacted with an article, then place a 1 where the user-row meets for that article-column**. It does not matter how many times a user has interacted with the article, all entries where a user has interacted with an article should be a 1.
#
#
# * **If a user has not interacted with an item, then place a zero where the user-row meets for that article-column**.
#
# Use the tests to make sure the basic structure of your matrix matches what is expected by the solution.
# In[89]:
# create the user-article matrix with 1's and 0's
def create_user_item_matrix(df):
'''
INPUT:
df - pandas dataframe with article_id, title, user_id columns
OUTPUT:
user_item - user item matrix
Description:
Return a matrix with user ids as rows and article ids on the columns with 1 values where a user interacted with
an article and a 0 otherwise
'''
# Fill in the function here
user_item=df.groupby(by=['user_id', 'article_id']).agg(lambda x: 1).unstack().fillna(0)
return user_item # return the user_item matrix
user_item = create_user_item_matrix(df)
# In[90]:
## Tests: You should just need to run this cell. Don't change the code.
assert user_item.shape[0] == 5149, "Oops! The number of users in the user-article matrix doesn't look right."
assert user_item.shape[1] == 714, "Oops! The number of articles in the user-article matrix doesn't look right."
assert user_item.sum(axis=1)[1] == 36, "Oops! The number of articles seen by user 1 doesn't look right."
print("You have passed our quick tests! Please proceed!")
# In[91]:
user_item.head()
# `2.` Complete the function below which should take a user_id and provide an ordered list of the most similar users to that user (from most similar to least similar). The returned result should not contain the provided user_id, as we know that each user is similar to him/herself. Because the results for each user here are binary, it (perhaps) makes sense to compute similarity as the dot product of two users.
#
# Use the tests to test your function.
# In[97]:
def find_similar_users(user_id, user_item=user_item):
'''
INPUT:
user_id - (int) a user_id
user_item - (pandas dataframe) matrix of users by articles:
1's when a user has interacted with an article, 0 otherwise
OUTPUT:
similar_users - (list) an ordered list where the closest users (largest dot product users)
are listed first
Description:
Computes the similarity of every pair of users based on the dot product
Returns an ordered
'''
# compute similarity of each user to the provided user
similarity = {}
for uid in user_item.index:
similarity[uid] = np.dot(user_item.loc[user_id, :], user_item.loc[uid, :])
# sort by similarity
similarity_sort = sorted(similarity.items(), key=lambda kv: kv[1], reverse=True)
# create list of just the ids
most_similar_users = [key for (key, value) in similarity_sort]
# remove the own user's id
most_similar_users.remove(user_id)
return most_similar_users # return a list of the users in order from most to least simila
# In[98]:
# Do a spot check of your function
print("The 10 most similar users to user 1 are: {}".format(find_similar_users(1)[:10]))
print("The 5 most similar users to user 3933 are: {}".format(find_similar_users(3933)[:5]))
print("The 3 most similar users to user 46 are: {}".format(find_similar_users(46)[:3]))
# `3.` Now that you have a function that provides the most similar users to each user, you will want to use these users to find articles you can recommend. Complete the functions below to return the articles you would recommend to each user.
# In[99]:
def get_article_names(article_ids, df=df):
'''
INPUT:
article_ids - (list) a list of article ids
df - (pandas dataframe) df as defined at the top of the notebook
OUTPUT:
article_names - (list) a list of article names associated with the list of article ids
(this is identified by the title column)
'''
# Your code here
#article_names = df[df['article_id'].isin(article_ids)]['title'].unique().tolist()
article_names = df[df['article_id'].isin(article_ids)]['title'].drop_duplicates().values.tolist()
return article_names # Return the article names associated with list of article ids
def get_user_articles(user_id, user_item=user_item):
'''
INPUT:
user_id - (int) a user id
user_item - (pandas dataframe) matrix of users by articles:
1's when a user has interacted with an article, 0 otherwise
OUTPUT:
article_ids - (list) a list of the article ids seen by the user
article_names - (list) a list of article names associated with the list of article ids
(this is identified by the doc_full_name column in df_content)
Description:
Provides a list of the article_ids and article titles that have been seen by a user
'''
# Your code here
article_ids = [str(id) for id in list(user_item.loc[user_id][user_item.loc[user_id]==1].title.index)]
article_names = get_article_names(article_ids)
return article_ids, article_names # return the ids and names
def user_user_recs(user_id, m=10):
'''
INPUT:
user_id - (int) a user id
m - (int) the number of recommendations you want for the user
OUTPUT:
recs - (list) a list of recommendations for the user
Description:
Loops through the users based on closeness to the input user_id
For each user - finds articles the user hasn't seen before and provides them as recs
Does this until m recommendations are found
Notes:
Users who are the same closeness are chosen arbitrarily as the 'next' user
For the user where the number of recommended articles starts below m
and ends exceeding m, the last items are chosen arbitrarily
'''
# Your code here
recs = []
most_similar_users = find_similar_users(user_id)
viewed_article_ids_self, viewed_article_names_self = get_user_articles(user_id)
for user in most_similar_users:
article_ids, article_names = get_user_articles(user)
for article_id in article_ids:
if article_id not in viewed_article_ids_self:
if article_id not in recs and len(recs) < m:
recs.append(article_id)
if len(recs) >= m:
break
if len(recs) >= m:
break
return recs # return your recommendations for this user_id
# In[100]:
# Check Results
get_article_names(user_user_recs(1, 10)) # Return 10 recommendations for user 1
# In[101]:
# Test your functions here - No need to change this code - just run this cell
assert set(get_article_names(['1024.0', '1176.0', '1305.0', '1314.0', '1422.0', '1427.0'])) == set(['using deep learning to reconstruct high-resolution audio', 'build a python app on the streaming analytics service', 'gosales transactions for naive bayes model', 'healthcare python streaming application demo', 'use r dataframes & ibm watson natural language understanding', 'use xgboost, scikit-learn & ibm watson machine learning apis']), "Oops! Your the get_article_names function doesn't work quite how we expect."
assert set(get_article_names(['1320.0', '232.0', '844.0'])) == set(['housing (2015): united states demographic measures','self-service data preparation with ibm data refinery','use the cloudant-spark connector in python notebook']), "Oops! Your the get_article_names function doesn't work quite how we expect."
assert set(get_user_articles(20)[0]) == set(['1320.0', '232.0', '844.0'])
assert set(get_user_articles(20)[1]) == set(['housing (2015): united states demographic measures', 'self-service data preparation with ibm data refinery','use the cloudant-spark connector in python notebook'])
assert set(get_user_articles(2)[0]) == set(['1024.0', '1176.0', '1305.0', '1314.0', '1422.0', '1427.0'])
assert set(get_user_articles(2)[1]) == set(['using deep learning to reconstruct high-resolution audio', 'build a python app on the streaming analytics service', 'gosales transactions for naive bayes model', 'healthcare python streaming application demo', 'use r dataframes & ibm watson natural language understanding', 'use xgboost, scikit-learn & ibm watson machine learning apis'])
print("If this is all you see, you passed all of our tests! Nice job!")
# `4.` Now we are going to improve the consistency of the **user_user_recs** function from above.
#
# * Instead of arbitrarily choosing when we obtain users who are all the same closeness to a given user - choose the users that have the most total article interactions before choosing those with fewer article interactions.
#
#
# * Instead of arbitrarily choosing articles from the user where the number of recommended articles starts below m and ends exceeding m, choose articles with the articles with the most total interactions before choosing those with fewer total interactions. This ranking should be what would be obtained from the **top_articles** function you wrote earlier.
# In[102]:
def get_top_sorted_users(user_id, df=df, user_item=user_item):
'''
INPUT:
user_id - (int)
df - (pandas dataframe) df as defined at the top of the notebook
user_item - (pandas dataframe) matrix of users by articles:
1's when a user has interacted with an article, 0 otherwise
OUTPUT:
neighbors_df - (pandas dataframe) a dataframe with:
neighbor_id - is a neighbor user_id
similarity - measure of the similarity of each user to the provided user_id
num_interactions - the number of articles viewed by the user - if a u
Other Details - sort the neighbors_df by the similarity and then by number of interactions where
highest of each is higher in the dataframe
'''
# Your code here
neighbors_df = pd.DataFrame(columns=['neighbor_id', 'similarity', 'num_interactions'])
for user in user_item.index:
if user == user_id:
continue
neighbors_df.loc[user] = [user, np.dot(user_item.loc[user_id, :], user_item.loc[user, :]),
df[df['user_id']==user]['article_id'].count()]
neighbors_df.sort_values(by=['similarity', 'num_interactions'], ascending=False, inplace=True)
return neighbors_df # Return the dataframe specified in the doc_string
def user_user_recs_part2(user_id, m=10):
'''
INPUT:
user_id - (int) a user id
m - (int) the number of recommendations you want for the user
OUTPUT:
recs - (list) a list of recommendations for the user by article id
rec_names - (list) a list of recommendations for the user by article title
Description:
Loops through the users based on closeness to the input user_id
For each user - finds articles the user hasn't seen before and provides them as recs
Does this until m recommendations are found
Notes:
* Choose the users that have the most total article interactions
before choosing those with fewer article interactions.
* Choose articles with the articles with the most total interactions
before choosing those with fewer total interactions.
'''
# Your code here
recs = []
neighbors_df = get_top_sorted_users(user_id)
the_user_articles, the_article_names = get_user_articles(user_id)
for user in neighbors_df['neighbor_id']:
article_ids, article_names = get_user_articles(user)
for id in article_ids:
if id not in the_user_articles:
recs.append(id)
if len(recs) >= m:
break
if len(recs) >= m:
break
if len(recs) < m:
for id in [str(id) for id in get_top_article_ids(100)]:
if id not in the_user_articles:
recs.append(id)
if len(recs) >= m:
break
rec_names = get_article_names(recs)
return recs, rec_names
# In[103]:
# Quick spot check - don't change this code - just use it to test your functions
rec_ids, rec_names = user_user_recs_part2(20, 10)
print("The top 10 recommendations for user 20 are the following article ids:")
print(rec_ids)
print()
print("The top 10 recommendations for user 20 are the following article names:")
print(rec_names)
# `5.` Use your functions from above to correctly fill in the solutions to the dictionary below. Then test your dictionary against the solution. Provide the code you need to answer each following the comments below.
# In[105]:
get_top_sorted_users(1).iloc[0]
# In[107]:
get_top_sorted_users(1).neighbor_id.values[0]
# In[106]:
get_top_sorted_users(131).iloc[9]
# In[110]:
### Tests with a dictionary of results
user1_most_sim = 3933 # Find the user that is most similar to user 1
user131_10th_sim = 242 # Find the 10th most similar user to user 131
# In[111]:
## Dictionary Test Here
sol_5_dict = {
'The user that is most similar to user 1.': user1_most_sim,
'The user that is the 10th most similar to user 131': user131_10th_sim,
}
t.sol_5_test(sol_5_dict)
# `6.` If we were given a new user, which of the above functions would you be able to use to make recommendations? Explain. Can you think of a better way we might make recommendations? Use the cell below to explain a better method for new users.
# **Provide your response here.**
# `7.` Using your existing functions, provide the top 10 recommended articles you would provide for the a new user below. You can test your function against our thoughts to make sure we are all on the same page with how we might make a recommendation.
# In[114]:
new_user = '0.0'
# What would your recommendations be for this new user '0.0'? As a new user, they have no observed articles.
# Provide a list of the top 10 article ids you would give to
new_user_recs = get_top_article_ids(10) # Your recommendations here
new_user_recs = [str(ids) for ids in get_top_article_ids(10)]
# In[115]:
assert set(new_user_recs) == set(['1314.0','1429.0','1293.0','1427.0','1162.0','1364.0','1304.0','1170.0','1431.0','1330.0']), "Oops! It makes sense that in this case we would want to recommend the most popular articles, because we don't know anything about these users."
print("That's right! Nice job!")
# ### <a class="anchor" id="Content-Recs">Part IV: Content Based Recommendations (EXTRA - NOT REQUIRED)</a>
#
# Another method we might use to make recommendations is to perform a ranking of the highest ranked articles associated with some term. You might consider content to be the **doc_body**, **doc_description**, or **doc_full_name**. There isn't one way to create a content based recommendation, especially considering that each of these columns hold content related information.
#
# `1.` Use the function body below to create a content based recommender. Since there isn't one right answer for this recommendation tactic, no test functions are provided. Feel free to change the function inputs if you decide you want to try a method that requires more input values. The input values are currently set with one idea in mind that you may use to make content based recommendations. One additional idea is that you might want to choose the most popular recommendations that meet your 'content criteria', but again, there is a lot of flexibility in how you might make these recommendations.
#
# ### This part is NOT REQUIRED to pass this project. However, you may choose to take this on as an extra way to show off your skills.
# In[ ]:
def make_content_recs():
'''
INPUT:
OUTPUT:
'''
# `2.` Now that you have put together your content-based recommendation system, use the cell below to write a summary explaining how your content based recommender works. Do you see any possible improvements that could be made to your function? Is there anything novel about your content based recommender?
#
# ### This part is NOT REQUIRED to pass this project. However, you may choose to take this on as an extra way to show off your skills.
# **Write an explanation of your content based recommendation system here.**
# `3.` Use your content-recommendation system to make recommendations for the below scenarios based on the comments. Again no tests are provided here, because there isn't one right answer that could be used to find these content based recommendations.
#
# ### This part is NOT REQUIRED to pass this project. However, you may choose to take this on as an extra way to show off your skills.
# In[ ]:
# make recommendations for a brand new user
# make a recommendations for a user who only has interacted with article id '1427.0'
# ### <a class="anchor" id="Matrix-Fact">Part V: Matrix Factorization</a>
#
# In this part of the notebook, you will build use matrix factorization to make article recommendations to the users on the IBM Watson Studio platform.
#
# `1.` You should have already created a **user_item** matrix above in **question 1** of **Part III** above. This first question here will just require that you run the cells to get things set up for the rest of **Part V** of the notebook.
# In[116]:
# Load the matrix here
user_item_matrix = pd.read_pickle('user_item_matrix.p')
# In[117]:
# quick look at the matrix
user_item_matrix.head()
# `2.` In this situation, you can use Singular Value Decomposition from [numpy](https://docs.scipy.org/doc/numpy-1.14.0/reference/generated/numpy.linalg.svd.html) on the user-item matrix. Use the cell to perform SVD, and explain why this is different than in the lesson.
# In[118]:
# Perform SVD on the User-Item Matrix Here
u, s, vt = np.linalg.svd(user_item_matrix) # use the built in to get the three matrices
# **Provide your response here.**
# `3.` Now for the tricky part, how do we choose the number of latent features to use? Running the below cell, you can see that as the number of latent features increases, we obtain a lower error rate on making predictions for the 1 and 0 values in the user-item matrix. Run the cell below to get an idea of how the accuracy improves as we increase the number of latent features.
# In[119]:
num_latent_feats = np.arange(10,700+10,20)
sum_errs = []
for k in num_latent_feats:
# restructure with k latent features
s_new, u_new, vt_new = np.diag(s[:k]), u[:, :k], vt[:k, :]
# take dot product
user_item_est = np.around(np.dot(np.dot(u_new, s_new), vt_new))
# compute error for each prediction to actual value
diffs = np.subtract(user_item_matrix, user_item_est)
# total errors and keep track of them
err = np.sum(np.sum(np.abs(diffs)))
sum_errs.append(err)
plt.plot(num_latent_feats, 1 - np.array(sum_errs)/df.shape[0]);
plt.xlabel('Number of Latent Features');
plt.ylabel('Accuracy');
plt.title('Accuracy vs. Number of Latent Features');
# `4.` From the above, we can't really be sure how many features to use, because simply having a better way to predict the 1's and 0's of the matrix doesn't exactly give us an indication of if we are able to make good recommendations. Instead, we might split our dataset into a training and test set of data, as shown in the cell below.
#
# Use the code from question 3 to understand the impact on accuracy of the training and test sets of data with different numbers of latent features. Using the split below:
#
# * How many users can we make predictions for in the test set?
# * How many users are we not able to make predictions for because of the cold start problem?
# * How many articles can we make predictions for in the test set?
# * How many articles are we not able to make predictions for because of the cold start problem?
# In[120]:
df_train = df.head(40000)
df_test = df.tail(5993)
def create_test_and_train_user_item(df_train, df_test):
'''
INPUT:
df_train - training dataframe
df_test - test dataframe
OUTPUT:
user_item_train - a user-item matrix of the training dataframe
(unique users for each row and unique articles for each column)
user_item_test - a user-item matrix of the testing dataframe
(unique users for each row and unique articles for each column)
test_idx - all of the test user ids
test_arts - all of the test article ids
'''
# Your code here
user_item_train=create_user_item_matrix(df_train)
user_item_test=create_user_item_matrix(df_test)
test_idx=user_item_test.index
test_arts=user_item_test.columns
return user_item_train, user_item_test, test_idx, test_arts
user_item_train, user_item_test, test_idx, test_arts = create_test_and_train_user_item(df_train, df_test)
# In[122]:
user_item_train.head(5)
# In[124]:
print(u.shape, s.shape, vt.shape)
# In[123]:
# Number of users in both sets
len(user_item_test.index.intersection(user_item_train.index))
# In[137]:
# movies in the test set are we not able to make predictions
len(df_test.user_id.unique()) - len(np.intersect1d(df_train.user_id.unique(),df_test.user_id.unique()))
# In[138]:
# movies we can make predictions for in the test set
len(np.intersect1d(df_train.article_id.unique(),df_test.article_id.unique()))
# In[139]:
# users in the test set are we not able to make predictions
len(df_test.article_id.unique()) - len(np.intersect1d(df_train.article_id.unique(),df_test.article_id.unique()))
# In[136]:
# Replace the values in the dictionary below
a = 662
b = 574
c = 20
d = 0
sol_4_dict = {
'How many users can we make predictions for in the test set?':c, # letter here,
'How many users in the test set are we not able to make predictions for because of the cold start problem?': a , # letter here,
'How many movies can we make predictions for in the test set?': b, # letter here,
'How many movies in the test set are we not able to make predictions for because of the cold start problem?': d # letter here
}
t.sol_4_test(sol_4_dict)
# `5.` Now use the **user_item_train** dataset from above to find U, S, and V transpose using SVD. Then find the subset of rows in the **user_item_test** dataset that you can predict using this matrix decomposition with different numbers of latent features to see how many features makes sense to keep based on the accuracy on the test data. This will require combining what was done in questions `2` - `4`.
#
# Use the cells below to explore how well SVD works towards making predictions for recommendations on the test data.
# In[140]:
# fit SVD on the user_item_train matrix
u_train, s_train, vt_train = np.linalg.svd(user_item_train) # fit svd similar to above then use the cells below
# In[141]:
u_train.shape, s_train.shape, vt_train.shape
# In[ ]:
# Use these cells to see how well you can use the training
# decomposition to predict on test data
# In[149]:
num_latent_feats = np.arange(10,700+10,20)
sum_errs_train = []
sum_errs_test = []
user_item_test = user_item_test.loc[user_item_test.index.isin(user_item_train.index), user_item_test.columns.isin(user_item_train.columns)]
u_test = u_train[user_item_train.index.isin(user_item_test.index), :]
vt_test = vt_train[:, user_item_train.columns.isin(test_arts)]
for k in num_latent_feats:
# restructure with k latent features
s_new_train, u_new_train, vt_new_train = | np.diag(s_train[:k]) | numpy.diag |
import numpy as np
import sys
sys.path.append('..')
from scipy.stats import norm, poisson, uniform
class ToyPoissonLoader:
def __init__(self, mean_instrumental=110, std_instrumental=15, low_int=0, high_int=20, true_param=10.0,
out_dir='toy_poisson/', background_val=100, marginal=False, size_marginal=1000,
empirical_marginal=True):
self.mean_instrumental = mean_instrumental
self.std_instrumental = std_instrumental
self.low_int = low_int
self.high_int = high_int
self.background_val = background_val
self.g_distribution = norm(loc=self.mean_instrumental, scale=self.std_instrumental)
self.regen_flag = False
self.out_directory = out_dir
self.d = 1
self.d_obs = 1
self.num_grid = 51
self.grid = np.linspace(start=self.low_int + 0.001, stop=self.high_int, num=self.num_grid)
self.num_pred_grid = 41
self.pred_grid = np.linspace(start=self.low_int, stop=self.high_int, num=self.num_pred_grid)
self.true_param = true_param
self.empirical_marginal = empirical_marginal
if marginal:
self.compute_marginal_reference(size_marginal)
def compute_marginal_reference(self, size_marginal):
theta_vec_marg = self.sample_param_values(sample_size=size_marginal)
marginal_sample = np.random.poisson(lam=self.background_val + theta_vec_marg, size=size_marginal)
mean_mle = np.average(marginal_sample)
std_mle = np.std(marginal_sample)
self.mean_instrumental = mean_mle
self.std_instrumental = std_mle
self.g_distribution = norm(loc=mean_mle, scale=std_mle)
def sample_empirical_marginal(self, sample_size):
theta_vec_marg = self.sample_param_values(sample_size=sample_size)
return np.apply_along_axis(arr=theta_vec_marg.reshape(-1, self.d), axis=1,
func1d=lambda row: self.sample_sim(
sample_size=1, true_param=row)).reshape(-1, self.d_obs)
def sample_sim(self, sample_size, true_param):
return np.random.poisson(lam=self.background_val + true_param, size=sample_size)
def sample_param_values(self, sample_size):
return np.random.uniform(low=self.low_int, high=self.high_int, size=sample_size)
def generate_sample(self, sample_size, p=0.5, **kwargs):
theta_vec = self.sample_param_values(sample_size=sample_size)
bern_vec = np.random.binomial(n=1, p=p, size=sample_size)
concat_mat = np.hstack((theta_vec.reshape(-1, self.d), bern_vec.reshape(-1, 1)))
if self.empirical_marginal:
sample = np.apply_along_axis(arr=concat_mat, axis=1,
func1d=lambda row: self.sample_sim(
sample_size=1, true_param=row[:self.d]) if row[self.d]
else self.sample_empirical_marginal(sample_size=1))
else:
sample = np.apply_along_axis(arr=concat_mat, axis=1,
func1d=lambda row: self.sample_sim(
sample_size=1, true_param=row[0]) if row[1]
else np.int(self.g_distribution.rvs(size=1)))
return np.hstack((concat_mat, sample.reshape(-1, self.d_obs)))
def sample_msnh_algo5(self, b_prime, sample_size):
theta_mat = self.sample_param_values(sample_size=b_prime).reshape(-1, 1)
assert theta_mat.shape == (b_prime, 1)
sample_mat = np.apply_along_axis(arr=theta_mat, axis=1,
func1d=lambda row: self.sample_sim(sample_size=sample_size, true_param=row))
return theta_mat, sample_mat
def compute_exact_or(self, t0, t1, x_obs):
f0_val = poisson.pmf(k=x_obs.reshape(-1, ), mu=self.background_val + t0)
f1_val = poisson.pmf(k=x_obs.reshape(-1, ), mu=self.background_val + t1)
return f0_val / f1_val
def compute_exact_prob(self, theta_vec, x_vec, p=0.5):
f_val = poisson.pmf(k=x_vec.reshape(-1, ), mu=self.background_val + theta_vec.reshape(-1, ))
g_val = self.g_distribution.pdf(x=x_vec.reshape(-1, ))
return (f_val * p) / (f_val * p + g_val * (1 - p))
def compute_exact_odds(self, theta_vec, x_vec, p=0.5):
f_val = poisson.pmf(k=x_vec.reshape(-1, ), mu=self.background_val + theta_vec.reshape(-1, ))
g_val = self.g_distribution.pdf(x=x_vec.reshape(-1, ))
return (f_val * p) / (g_val * (1 - p))
def compute_exact_likelihood(self, x_obs, true_param):
return poisson.pmf(k=x_obs.reshape(-1, ), mu=self.background_val + true_param)
def compute_exact_lr_simplevsimple(self, x_obs, t0, t1):
'''
Compute the exact likelihood ratios for normal case.
'''
ll_t0 = poisson.pmf(k=x_obs.reshape(-1, ), mu=self.background_val + t0)
ll_t1 = poisson.pmf(k=x_obs.reshape(-1, ), mu=self.background_val + t1)
return np.sum(np.log(ll_t0) - np.log(ll_t1))
@staticmethod
def compute_mle(x_obs):
return np.average(x_obs.reshape(-1, ))
def compute_exact_lr_simplevcomp(self, x_obs, t0, mle):
'''
Compute the exact likelihood ratios for normal case.
'''
ll_t0 = poisson.pmf(k=x_obs.reshape(-1, ), mu=self.background_val + t0)
ll_mle = poisson.pmf(k=x_obs.reshape(-1, ), mu=self.background_val + mle)
return np.sum(np.log(ll_t0) - | np.log(ll_mle) | numpy.log |
from operator import itemgetter
from src.rrt.heuristics import cost_to_go
from src.rrt.heuristics import segment_cost, path_cost
from src.rrt.rrt import RRT
import numpy as np
class InformedRRTStar(RRT):
def __init__(self, X, Q, x_init, x_goal, max_samples,
r, prc=0.01, rewire_count=None):
"""
Informed RRT* Search
:param X: Search Space
:param Q: list of lengths of edges added to tree
:param x_init: tuple, initial location
:param x_goal: tuple, goal location
:param max_samples: max number of samples to take
:param r: resolution of points to sample along edge
when checking for collisions
:param prc: probability of checking whether there is a solution
:param rewire_count: number of nearby vertices to rewire
"""
super().__init__(X, Q, x_init, x_goal, max_samples, r, prc)
self.rewire_count = rewire_count if rewire_count is not None else 0
self.c_best = float('inf') # length of best solution thus far
self.RotationMatrix = self.RotationToWorldFrame(
self.x_init, self.x_goal,
np.linalg.norm(np.array(x_goal) - np.array(x_init))
)
def get_nearby_vertices(self, tree, x_init, x_new):
"""
Get nearby vertices to new vertex and their associated path costs
from the root of tree
as if new vertex is connected to each one separately.
:param tree: tree in which to search
:param x_init: starting vertex used to calculate path cost
:param x_new: vertex around which to find nearby vertices
:return: list of nearby vertices and their costs, sorted
in ascending order by cost
"""
X_near = self.nearby(tree, x_new, self.current_rewire_count(tree))
L_near = [
(path_cost(self.trees[tree].E, x_init, x_near) +
segment_cost(x_near, x_new), x_near) for x_near in X_near]
# noinspection PyTypeChecker
L_near.sort(key=itemgetter(0))
return L_near
def RotationToWorldFrame(self, x_start, x_goal, L):
dim = 2
# Transverse axis of the ellipsoid in the world frame
E1 = (np.array(x_goal) - np.array(x_start)) / L
# first basis vector of the world frame [1,0,0,...]
W1 = [1]+[0]*(dim - 1)
# outer product of E1 and W1
M = np.outer(E1, W1)
# SVD decomposition od outer product
U, S, V = np.linalg.svd(M)
# Calculate the middle diagonal matrix
middleM = np.eye(dim)
middleM[-1, -1] = np.linalg.det(U)*np.linalg.det(V)
# calculate the rotation matrix
C = U@[email protected]
return C
def rewire(self, tree, x_new, L_near):
"""
Rewire tree to shorten edges if possible
Only rewires vertices according to rewire count
:param tree: int, tree to rewire
:param x_new: tuple, newly added vertex
:param L_near: list of nearby vertices used to rewire
:return:
"""
for c_near, x_near in L_near:
curr_cost = path_cost(self.trees[tree].E, self.x_init, x_near)
tent_cost = path_cost(
self.trees[tree].E, self.x_init, x_new
) + segment_cost(x_new, x_near)
if tent_cost < curr_cost and self.X.collision_free(
x_near, x_new, self.r):
self.trees[tree].E[x_near] = x_new
def connect_shortest_valid(self, tree, x_new, L_near):
"""
Connect to nearest vertex that has an unobstructed path
:param tree: int, tree being added to
:param x_new: tuple, vertex being added
:param L_near: list of nearby vertices
"""
# check nearby vertices for total cost and connect shortest valid edge
for c_near, x_near in L_near:
if c_near + cost_to_go(x_near, self.x_goal) < self.c_best\
and self.connect_to_point(tree, x_near, x_new):
break
def current_rewire_count(self, tree):
"""
Return rewire count
:param tree: tree being rewired
:return: rewire count
"""
# if no rewire count specified, set rewire count to be all vertices
if self.rewire_count is None:
return self.trees[tree].V_count
# max valid rewire count
return min(self.trees[tree].V_count, self.rewire_count)
def InGoalRegion(self, x_new, q):
dist = np.linalg.norm(np.array(self.x_goal) - np.array(x_new))
if (dist < q).all():
return True
return False
def findCost(self, X_soln):
if len(X_soln) == 0:
return float("inf")
minimum_cost = float("inf")
minimum_path = []
for solution in X_soln:
path_cost = 0.0
for i in range(len(solution)-1):
node1 = | np.array(solution[i]) | numpy.array |
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright 2013-2019 pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
"""This module contains algorithms for the empirical interpolation of |Operators|.
The main work for generating the necessary interpolation data is handled by
the :func:`ei_greedy` method. The objects returned by this method can be used
to instantiate an |EmpiricalInterpolatedOperator|.
As a convenience, the :func:`interpolate_operators` method allows to perform
the empirical interpolation of the |Operators| of a given model with
a single function call.
"""
import numpy as np
from scipy.linalg import solve
from pymor.core.logger import getLogger
from pymor.algorithms.pod import pod as pod_alg
from pymor.operators.ei import EmpiricalInterpolatedOperator
from pymor.parallel.dummy import dummy_pool
from pymor.parallel.interfaces import RemoteObjectInterface
from pymor.parallel.manager import RemoteObjectManager
from pymor.vectorarrays.interfaces import VectorArrayInterface
def ei_greedy(U, error_norm=None, atol=None, rtol=None, max_interpolation_dofs=None,
copy=True, pool=dummy_pool):
"""Generate data for empirical interpolation using EI-Greedy algorithm.
Given a |VectorArray| `U`, this method generates a collateral basis and
interpolation DOFs for empirical interpolation of the vectors contained in `U`.
The returned objects can be used to instantiate an |EmpiricalInterpolatedOperator|
(with `triangular=True`).
The interpolation data is generated by a greedy search algorithm, where in each
loop iteration the worst approximated vector in `U` is added to the collateral basis.
Parameters
----------
U
A |VectorArray| of vectors to interpolate.
error_norm
Norm w.r.t. which to calculate the interpolation error. If `None`, the Euclidean norm
is used.
atol
Stop the greedy search if the largest approximation error is below this threshold.
rtol
Stop the greedy search if the largest relative approximation error is below this threshold.
max_interpolation_dofs
Stop the greedy search if the number of interpolation DOF (= dimension of the collateral
basis) reaches this value.
copy
If `False`, `U` will be modified during executing of the algorithm.
pool
If not `None`, the |WorkerPool| to use for parallelization.
Returns
-------
interpolation_dofs
|NumPy array| of the DOFs at which the vectors are evaluated.
collateral_basis
|VectorArray| containing the generated collateral basis.
data
Dict containing the following fields:
:errors: Sequence of maximum approximation errors during
greedy search.
:triangularity_errors: Sequence of maximum absolute values of interoplation
matrix coefficients in the upper triangle (should
be near zero).
"""
if pool: # dispatch to parallel implemenation
assert isinstance(U, (VectorArrayInterface, RemoteObjectInterface))
with RemoteObjectManager() as rom:
if isinstance(U, VectorArrayInterface):
U = rom.manage(pool.scatter_array(U))
return _parallel_ei_greedy(U, error_norm=error_norm, atol=atol, rtol=rtol,
max_interpolation_dofs=max_interpolation_dofs, copy=copy, pool=pool)
assert isinstance(U, VectorArrayInterface)
logger = getLogger('pymor.algorithms.ei.ei_greedy')
logger.info('Generating Interpolation Data ...')
interpolation_dofs = np.zeros((0,), dtype=np.int32)
collateral_basis = U.empty()
max_errs = []
triangularity_errs = []
if copy:
U = U.copy()
ERR = U
errs = ERR.l2_norm() if error_norm is None else error_norm(ERR)
max_err_ind = np.argmax(errs)
initial_max_err = max_err = errs[max_err_ind]
# main loop
while True:
if max_interpolation_dofs is not None and len(interpolation_dofs) >= max_interpolation_dofs:
logger.info('Maximum number of interpolation DOFs reached. Stopping extension loop.')
logger.info(f'Final maximum interpolation error with'
f'{len(interpolation_dofs)} interpolation DOFs: {max_err}')
break
logger.info(f'Maximum interpolation error with '
f'{len(interpolation_dofs)} interpolation DOFs: {max_err}')
if atol is not None and max_err <= atol:
logger.info('Absolute error tolerance reached! Stopping extension loop.')
break
if rtol is not None and max_err / initial_max_err <= rtol:
logger.info('Relative error tolerance reached! Stopping extension loop.')
break
# compute new interpolation dof and collateral basis vector
new_vec = U[max_err_ind].copy()
new_dof = new_vec.amax()[0][0]
if new_dof in interpolation_dofs:
logger.info(f'DOF {new_dof} selected twice for interplation! Stopping extension loop.')
break
new_dof_value = new_vec.dofs([new_dof])[0, 0]
if new_dof_value == 0.:
logger.info(f'DOF {new_dof} selected for interpolation has zero maximum error! Stopping extension loop.')
break
new_vec *= 1 / new_dof_value
interpolation_dofs = np.hstack((interpolation_dofs, new_dof))
collateral_basis.append(new_vec)
max_errs.append(max_err)
# update U and ERR
new_dof_values = U.dofs([new_dof])
U.axpy(-new_dof_values[:, 0], new_vec)
errs = ERR.l2_norm() if error_norm is None else error_norm(ERR)
max_err_ind = np.argmax(errs)
max_err = errs[max_err_ind]
interpolation_matrix = collateral_basis.dofs(interpolation_dofs).T
triangularity_errors = np.abs(interpolation_matrix - np.tril(interpolation_matrix))
for d in range(1, len(interpolation_matrix) + 1):
triangularity_errs.append(np.max(triangularity_errors[:d, :d]))
if len(triangularity_errs) > 0:
logger.info(f'Interpolation matrix is not lower triangular with maximum error of {triangularity_errs[-1]}')
data = {'errors': max_errs, 'triangularity_errors': triangularity_errs}
return interpolation_dofs, collateral_basis, data
def deim(U, modes=None, pod=True, atol=None, rtol=None, product=None, pod_options={}):
"""Generate data for empirical interpolation using DEIM algorithm.
Given a |VectorArray| `U`, this method generates a collateral basis and
interpolation DOFs for empirical interpolation of the vectors contained in `U`.
The returned objects can be used to instantiate an |EmpiricalInterpolatedOperator|
(with `triangular=False`).
The collateral basis is determined by the first :func:`~pymor.algorithms.pod.pod` modes of `U`.
Parameters
----------
U
A |VectorArray| of vectors to interpolate.
modes
Dimension of the collateral basis i.e. number of POD modes of the vectors in `U`.
pod
If `True`, perform a POD of `U` to obtain the collateral basis. If `False`, `U`
is used as collateral basis.
atol
Absolute POD tolerance.
rtol
Relative POD tolerance.
product
Inner product |Operator| used for the POD.
pod_options
Dictionary of additional options to pass to the :func:`~pymor.algorithms.pod.pod` algorithm.
Returns
-------
interpolation_dofs
|NumPy array| of the DOFs at which the vectors are interpolated.
collateral_basis
|VectorArray| containing the generated collateral basis.
data
Dict containing the following fields:
:svals: POD singular values.
"""
assert isinstance(U, VectorArrayInterface)
logger = getLogger('pymor.algorithms.ei.deim')
logger.info('Generating Interpolation Data ...')
data = {}
if pod:
collateral_basis, svals = pod_alg(U, modes=modes, atol=atol, rtol=rtol, product=product, **pod_options)
data['svals'] = svals
else:
collateral_basis = U
interpolation_dofs = np.zeros((0,), dtype=np.int32)
interpolation_matrix = np.zeros((0, 0))
for i in range(len(collateral_basis)):
logger.info(f'Choosing interpolation point for basis vector {i}.')
if len(interpolation_dofs) > 0:
coefficients = solve(interpolation_matrix,
collateral_basis[i].dofs(interpolation_dofs).T).T
U_interpolated = collateral_basis[:len(interpolation_dofs)].lincomb(coefficients)
ERR = collateral_basis[i] - U_interpolated
else:
ERR = collateral_basis[i]
# compute new interpolation dof and collateral basis vector
new_dof = ERR.amax()[0][0]
if new_dof in interpolation_dofs:
logger.info(f'DOF {new_dof} selected twice for interplation! Stopping extension loop.')
break
interpolation_dofs = np.hstack((interpolation_dofs, new_dof))
interpolation_matrix = collateral_basis[:len(interpolation_dofs)].dofs(interpolation_dofs).T
if len(interpolation_dofs) < len(collateral_basis):
del collateral_basis[len(interpolation_dofs):len(collateral_basis)]
logger.info('Finished.')
return interpolation_dofs, collateral_basis, data
def interpolate_operators(fom, operator_names, parameter_sample, error_norm=None,
product=None, atol=None, rtol=None, max_interpolation_dofs=None,
pod_options={}, alg='ei_greedy', pool=dummy_pool):
"""Empirical operator interpolation using the EI-Greedy/DEIM algorithm.
This is a convenience method to facilitate the use of :func:`ei_greedy` or :func:`deim`.
Given a |Model|, names of |Operators|, and a sample of |Parameters|, first
the operators are evaluated on the solution snapshots of the model for the
provided parameters. These evaluations are then used as input for
:func:`ei_greedy`/:func:`deim`. Finally the resulting interpolation data is used to
create |EmpiricalInterpolatedOperators| and a new model with the interpolated
operators is returned.
Note that this implementation creates *one* common collateral basis for all specified
operators, which might not be what you want.
Parameters
----------
fom
The |Model| whose |Operators| will be interpolated.
operator_names
List of keys in the `operators` dict of the model. The corresponding
|Operators| will be interpolated.
parameter_sample
A list of |Parameters| for which solution snapshots are calculated.
error_norm
See :func:`ei_greedy`.
Has no effect if `alg == 'deim'`.
product
Inner product for POD computation in :func:`deim`.
Has no effect if `alg == 'ei_greedy'`.
atol
See :func:`ei_greedy`.
rtol
See :func:`ei_greedy`.
max_interpolation_dofs
See :func:`ei_greedy`.
pod_options
Further options for :func:`~pymor.algorithms.pod.pod` algorithm.
Has no effect if `alg == 'ei_greedy'`.
alg
Either `ei_greedy` or `deim`.
pool
If not `None`, the |WorkerPool| to use for parallelization.
Returns
-------
eim
|Model| with |Operators| given by `operator_names` replaced by
|EmpiricalInterpolatedOperators|.
data
Dict containing the following fields:
:dofs: |NumPy array| of the DOFs at which the |Operators| have to be evaluated.
:basis: |VectorArray| containing the generated collateral basis.
In addition, `data` contains the fields of the `data` `dict` returned by
:func:`ei_greedy`/:func:`deim`.
"""
assert alg in ('ei_greedy', 'deim')
logger = getLogger('pymor.algorithms.ei.interpolate_operators')
with RemoteObjectManager() as rom:
operators = [getattr(fom, operator_name) for operator_name in operator_names]
with logger.block('Computing operator evaluations on solution snapshots ...'):
if pool:
logger.info(f'Using pool of {len(pool)} workers for parallel evaluation')
evaluations = rom.manage(pool.push(fom.solution_space.empty()))
pool.map(_interpolate_operators_build_evaluations, parameter_sample,
fom=fom, operators=operators, evaluations=evaluations)
else:
evaluations = operators[0].range.empty()
for mu in parameter_sample:
U = fom.solve(mu)
for op in operators:
evaluations.append(op.apply(U, mu=mu))
if alg == 'ei_greedy':
with logger.block('Performing EI-Greedy:'):
dofs, basis, data = ei_greedy(evaluations, error_norm, atol=atol, rtol=rtol,
max_interpolation_dofs=max_interpolation_dofs,
copy=False, pool=pool)
elif alg == 'deim':
if alg == 'deim' and pool is not dummy_pool:
logger.warn('DEIM algorithm not parallel. Collecting operator evaluations.')
evaluations = pool.apply(_identity, x=evaluations)
evs = evaluations[0]
for e in evaluations[1:]:
evs.append(e, remove_from_other=True)
evaluations = evs
with logger.block('Executing DEIM algorithm:'):
dofs, basis, data = deim(evaluations, modes=max_interpolation_dofs,
atol=atol, rtol=rtol, pod_options=pod_options, product=product)
else:
assert False
ei_operators = {name: EmpiricalInterpolatedOperator(operator, dofs, basis, triangular=(alg == 'ei_greedy'))
for name, operator in zip(operator_names, operators)}
eim = fom.with_(name=f'{fom.name}_ei', **ei_operators)
data.update({'dofs': dofs, 'basis': basis})
return eim, data
def _interpolate_operators_build_evaluations(mu, fom=None, operators=None, evaluations=None):
U = fom.solve(mu)
for op in operators:
evaluations.append(op.apply(U, mu=mu))
def _parallel_ei_greedy(U, pool, error_norm=None, atol=None, rtol=None, max_interpolation_dofs=None, copy=True):
assert isinstance(U, RemoteObjectInterface)
logger = getLogger('pymor.algorithms.ei.ei_greedy')
logger.info('Generating Interpolation Data ...')
logger.info(f'Using pool of {len(pool)} workers for parallel greedy search')
interpolation_dofs = np.zeros((0,), dtype=np.int32)
collateral_basis = pool.apply_only(_parallel_ei_greedy_get_empty, 0, U=U)
max_errs = []
triangularity_errs = []
with pool.push({}) as distributed_data:
errs = pool.apply(_parallel_ei_greedy_initialize,
U=U, error_norm=error_norm, copy=copy, data=distributed_data)
max_err_ind = np.argmax(errs)
initial_max_err = max_err = errs[max_err_ind]
# main loop
while True:
if max_interpolation_dofs is not None and len(interpolation_dofs) >= max_interpolation_dofs:
logger.info('Maximum number of interpolation DOFs reached. Stopping extension loop.')
logger.info(f'Final maximum interpolation error with '
f'{len(interpolation_dofs)} interpolation DOFs: {max_err}')
break
logger.info(f'Maximum interpolation error with {len(interpolation_dofs)} interpolation DOFs: {max_err}')
if atol is not None and max_err <= atol:
logger.info('Absolute error tolerance reached! Stopping extension loop.')
break
if rtol is not None and max_err / initial_max_err <= rtol:
logger.info('Relative error tolerance reached! Stopping extension loop.')
break
# compute new interpolation dof and collateral basis vector
new_vec = pool.apply_only(_parallel_ei_greedy_get_vector, max_err_ind, data=distributed_data)
new_dof = new_vec.amax()[0][0]
if new_dof in interpolation_dofs:
logger.info(f'DOF {new_dof} selected twice for interpolation! Stopping extension loop.')
break
new_dof_value = new_vec.dofs([new_dof])[0, 0]
if new_dof_value == 0.:
logger.info(f'DOF {new_dof} selected for interpolation has zero maximum error! '
f'Stopping extension loop.')
break
new_vec *= 1 / new_dof_value
interpolation_dofs = np.hstack((interpolation_dofs, new_dof))
collateral_basis.append(new_vec)
max_errs.append(max_err)
errs = pool.apply(_parallel_ei_greedy_update, new_vec=new_vec, new_dof=new_dof, data=distributed_data)
max_err_ind = | np.argmax(errs) | numpy.argmax |
import os
import pickle
import unittest
import cv2
import numpy as np
import numpy.testing as npt
import torch
from PIL import Image
from ...common_util.global_vars import PROJ_DIR
from ...common_util.image import image_path_to_numpy
from ...devil import compute_optical_flow, get_spaced_index_list, hausdorff_distance, compute_object_instances
from ...devil.config import get_default_config, namespace_to_dict
from ...devil.scoring import NormalizedImageNormScorer, AffineTransformComputer, AffineScoreScorer, \
LowCameraMotionSegmentsComputer
TEST_DATA_ROOT = os.path.join(PROJ_DIR, 'test-data')
class DevilTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
frames = []
masks = []
for i in range(6):
img = np.array(Image.open(os.path.join(TEST_DATA_ROOT, 'bear', '0000' + str(i) + '.jpg'))).astype(np.uint8)
mask = cv2.imread(os.path.join(TEST_DATA_ROOT, 'mask', '0000' + str(i) + '.png'), 0)
mask = np.where(mask > 0, True, False)
frames.append(img)
masks.append(mask)
cls.frames = frames
cls.masks = masks
cls.default_config_dict = namespace_to_dict(get_default_config())
cls.normalized_image_norm_scorer = NormalizedImageNormScorer(
**cls.default_config_dict['NormalizedImageNormScorer'])
cls.affine_transform_computer = AffineTransformComputer(**cls.default_config_dict['AffineTransformComputer'])
cls.affine_score_scorer = AffineScoreScorer(**cls.default_config_dict['AffineScoreScorer'])
cls.low_camera_motion_segments_computer = LowCameraMotionSegmentsComputer(
**cls.default_config_dict['LowCameraMotionSegmentsComputer'])
def assert_similar_affine_alignment(self, target_path, input_path):
"""Check that compute_affine_transform finds a good affine alignment between the images located at the two
given image paths.
:param target_path: File path to the target image that the aligned input image should resemble
:param input_path: File path to the input image that should resemble the target image when aligned
"""
image_target = cv2.imread(target_path)
image_input = cv2.imread(input_path)
# Make masks
mask_target = np.where(cv2.cvtColor(image_target, cv2.COLOR_BGR2GRAY) > 0, True, False)
mask_input = np.where(cv2.cvtColor(image_input, cv2.COLOR_BGR2GRAY) > 0, True, False)
# Compute the affine matrix
A = self.affine_transform_computer.compute_affine_transform(image_target, image_input, mask_target, mask_input)
# Warp the image
warped = cv2.warpAffine(image_target, A, (image_target.shape[1], image_target.shape[0]))
# Compare with the target image
e_move = np.mean((image_input - warped) ** 2)
self.assertTrue(e_move < 1)
def assert_torch_equal(self, a, b):
self.assertTrue(torch.equal(a, b))
def test_compute_affine_transform_identity(self):
"""Check that compute_affine_transform finds a good affine alignment between an image and itself."""
self.assert_similar_affine_alignment(os.path.join(TEST_DATA_ROOT, 'affine/original.png'),
os.path.join(TEST_DATA_ROOT, 'affine/original.png'))
def test_compute_affine_transform_move(self):
"""Check that compute_affine_transform finds a good affine alignment between an image and version that has
been translated to the right."""
self.assert_similar_affine_alignment(os.path.join(TEST_DATA_ROOT, 'affine/original.png'),
os.path.join(TEST_DATA_ROOT, 'affine/move.png'))
def test_compute_affine_transform_rot(self):
"""Check that compute_affine_transform finds a good affine alignment between an image and version that has
been rotated."""
self.assert_similar_affine_alignment(os.path.join(TEST_DATA_ROOT, 'affine/original.png'),
os.path.join(TEST_DATA_ROOT, 'affine/rot.png'))
def test_compute_affine_transform_rot_move(self):
"""Check that compute_affine_transform finds a good affine alignment between an image and version that has
been rotated AND translated to the right."""
self.assert_similar_affine_alignment(os.path.join(TEST_DATA_ROOT, 'affine/original.png'),
os.path.join(TEST_DATA_ROOT, 'affine/rot_move.png'))
def test_compute_affine_transform_no_keypoints(self):
"""Check that compute_affine_transform returns None when there are no keypoints in one of the images."""
img = cv2.imread(os.path.join(TEST_DATA_ROOT, 'affine/original.png'))
# Create a black background where no keypoints can be captured
bad_img = np.zeros_like(img)
A = self.affine_transform_computer.compute_affine_transform(img, bad_img)
self.assertEqual(A, None)
def test_compute_affine_transform_too_few_keypoints(self):
"""Check that compute_affine_transform returns None when there are not enough sufficiently matched keypoints
between the two images."""
img1 = cv2.imread(os.path.join(TEST_DATA_ROOT, 'affine/original.png'))
# Input a picture where few keypoints can be extracted
img2 = cv2.imread(os.path.join(TEST_DATA_ROOT, 'affine/bad.png'))
mask1 = cv2.imread(os.path.join(TEST_DATA_ROOT, 'affine/original_mask.png'), 0)
mask2 = cv2.imread(os.path.join(TEST_DATA_ROOT, 'affine/bad_mask.png'), 0)
object_mask1 = np.where(mask1 == 0, False, True)
object_mask2 = np.where(mask2 == 0, False, True)
object_mask2[329, 641] = False
A = self.affine_transform_computer.compute_affine_transform(img1, img2, object_mask1, object_mask2)
self.assertIsNone(A)
def test_compute_normalized_image_norm(self):
img_a = np.zeros((5, 5, 3))
img_b = 5 * np.ones((5, 5, 3))
self.assertEqual(self.normalized_image_norm_scorer.score_normalized_image_norm(img_a, img_b),
np.linalg.norm([5, 5, 5], ord=1))
def test_compute_normalized_image_norm_masked(self):
img_a = np.zeros((6, 6, 3))
img_b = np.empty((6, 6, 3))
# Fill image with 1-4 in top-left, top-right, bottom-left, and bottom-right quadrants
img_b[:3, :3] = 1
img_b[:3, 3:] = 2
img_b[3:, :3] = 3
img_b[3:, 3:] = 4
# Create masks whose unmasked areas intersect over 1 TL pixel, 2 TR pixels, 1 BL pixel, and 2 BR pixels
mask_a = np.zeros((6, 6), dtype=np.bool)
mask_a[2:4, 2:] = True
mask_b = np.zeros((6, 6), dtype=np.bool)
mask_b[1:5, 1:5] = True
result = self.normalized_image_norm_scorer.score_normalized_image_norm(
img_a, img_b, mask_a=mask_a, mask_b=mask_b)
# Take average norm over expected unmasked values
expected = (1 * np.linalg.norm([1, 1, 1], ord=1)
+ 2 * np.linalg.norm([2, 2, 2], ord=1)
+ 1 * np.linalg.norm([3, 3, 3], ord=1)
+ 2 * np.linalg.norm([4, 4, 4], ord=1)) / 6
self.assertEqual(result, expected)
def test_compute_affine_score_identity(self):
img1 = self.frames[0].copy()
# Check case with no mask input
score = self.affine_score_scorer.score_affine_score(img1, img1)
self.assertEqual(score, 0.0)
def test_compute_affine_score_no_keypoints(self):
img1 = self.frames[0].copy()
black = np.zeros_like(img1)
# Check case with no mask input
score = self.affine_score_scorer.score_affine_score(img1, black)
self.assertEqual(score, np.inf)
def test_compute_affine_score_too_few_keypoints(self):
img1 = cv2.imread(os.path.join(TEST_DATA_ROOT, 'affine/original.png'))
img2 = cv2.imread(os.path.join(TEST_DATA_ROOT, 'affine/bad.png'))
mask1 = cv2.imread(os.path.join(TEST_DATA_ROOT, 'affine/original_mask.png'), 0)
mask2 = cv2.imread(os.path.join(TEST_DATA_ROOT, 'affine/bad_mask.png'), 0)
# Focus on object
object_mask1 = np.where(mask1 == 0, False, True)
object_mask2 = np.where(mask2 == 0, False, True)
object_mask2[329, 641] = False
score = self.affine_score_scorer.score_affine_score(img1, img2, object_mask1, object_mask2)
self.assertEqual(score, np.inf)
def test_compute_affine_score(self):
img1, img2 = self.frames[0].copy(), self.frames[1].copy()
mask1, mask2 = self.masks[0].copy(), self.masks[1].copy()
# Check case with no mask input
score = self.affine_score_scorer.score_affine_score(img1, img2)
self.assertIsInstance(score, float)
# Check case with both mask inputs
score = self.affine_score_scorer.score_affine_score(img1, img2, mask1, mask2)
self.assertIsInstance(score, float)
# Check case with one mask input
score = self.affine_score_scorer.score_affine_score(img1, img2, mask_input=mask1)
self.assertIsInstance(score, float)
score = self.affine_score_scorer.score_affine_score(img1, img2, mask_target=mask2)
self.assertIsInstance(score, float)
def test_compute_optical_flow(self):
self.assertTrue(torch.cuda.is_available(), 'This test requires a GPU to run')
# check the flow generated by demo.py (written by RAFT's author) and the flow from __init__.py
for i in range(5):
img1 = self.frames[i].copy()
img2 = self.frames[i+1].copy()
# Compute flow from our wrapper function, and load flow from reference implementation
flo_ours = compute_optical_flow(img1, img2)
with open(os.path.join(TEST_DATA_ROOT, 'flow', 'flow0' + str(i) + '.npy'), 'rb') as f:
flo_ref = np.load(f)
# Convert reference optical flow grid to sampling grid
H, W, _ = flo_ours.shape
uu, vv = np.meshgrid(np.arange(W), np.arange(H))
sample_loc_u_ref = uu + flo_ref[:, :, 0]
sample_loc_v_ref = vv + flo_ref[:, :, 1]
# Determine valid sampling locations for horiz and vert components separately, then combine
valid_u_ref = np.greater(sample_loc_u_ref, 0) * np.less(sample_loc_u_ref, W-1)
valid_v_ref = np.greater(sample_loc_v_ref, 0) * | np.less(sample_loc_v_ref, H-1) | numpy.less |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 30 17:18:58 2018
@author: chrelli
"""
# Demo getting the KRLS-t to work!
#%%
import time, os, sys, shutil
# for math and plotting
import pandas as pd
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
#import math
# small utilities
#import csv
#from colour import Color
from itertools import compress # for list selection with logical
from tqdm import tqdm
# for image manipulation
#import cv2
# for recording and connecting to the intel realsense librar
#import pyrealsense as pyrs
#import multiprocessing
from multiprocessing import Process
# for cloud handling
#from pyntcloud import PyntCloud
# import handy Functions
#from utils.common_utils import *
#from utils.recording_utils import *
#from utils.cloud_utils import *
from utils.fitting_utils import *
#from merge_and_filter_clouds import filter_and_downsample_cloud
# h5py for acessing data
#import h5py
# ALLSO JIT STUFF
from numba import jit, njit
tracking_holder = np.load("utils/raw_tracking_no_bounds_full.npy")
# call the fitted values for X (is N body dimension x M time steps)
#%% Try to generate an estimate! Just xy for now!
xx = tracking_holder[-3,:]
yy = tracking_holder[-2,:]
zz = tracking_holder[-1,:]
#response variable is the next value!
plt.figure()
plt.plot(xx,yy)
plt.show()
plt.figure()
plt.plot(xx)
#%% Now, try generating the time embedded data!
#%% Generate training data by time embedding!
N_train = 2000
embedding = 5
def time_embedding(X,embedding):
# X is a column vector!
N = X.shape[0]
X_embedded = np.zeros((N,embedding))
for i in range(embedding):
X_embedded[i:,i] = X[:(N-i)]
return X_embedded
X = time_embedding(xx[:N_train],embedding)
Y = xx[1:(N_train+1)]
# add extra time dimension to the start for Xt
Xt = np.column_stack((np.arange(X.shape[0]),X))
#%% from matlab we have
#sigma_est,reg_est,lambda_est = 0.1631, 1.1680e-08,1.0000
#sigma_est,reg_est,lambda_est = 0.3775, 2.4780e-08,.9999
#sigma_est,reg_est,lambda_est = 14, 2.4780e-04,.999
#sigma_est = 0.2215
#reg_est = 4.449468e-09
#lambda_est = 1.0000
sigma_est = 0.1902
reg_est = 0.7567e-07
lambda_est = 0.9999
# Now make the kernel function!
from utils.gaussian import Gaussian
from utils.krlst import krlst
# make the kernel function with the appropriate sigma!
kern = Gaussian(sigma = sigma_est)
# make the regressor!
reg = krlst(kern)
reg.Lambda = lambda_est
#reg.Lambda = 0.99
reg.sn2 = reg_est
# % % Loop over the data and predict!
y_max = []
loops = np.linspace(100,len(Y)-100,num = 20)
for loop_from in loops:
y_pred = [0]
# loop_from = 200
# at 400, we stop adding 'real' data, and just recursively add predicted data!
for i,y in tqdm(enumerate(Y)):
if i < loop_from:
# train with real data!
reg.train(X[i,:],y)
X_train = X[i,:]
if i>0:
y_guess = float(reg.evaluate(X[i,:])[0])
y_pred.append(y_guess)
# get this ready for the prediction!
# initialize X_train for the next!
X_train = X[i+1,:]
else:
# estimate the guess
y_guess = float(reg.evaluate(X_train)[0])
# add to list
y_pred.append(y_guess)
# and update X_train
# now, just do it recursively!
#train here?
# reg.train(X_train,y_guess)
if i == loop_from + 20:
continue
X_train = np.hstack((y_guess,X_train[:-1]))
y_max.append(y_pred)
#% %
plt.close('all')
plt.figure()
plt.plot(Y)
for y_pred in y_max:
plt.plot(y_pred)
for loop_from in loops:
plt.axvline(x=loop_from-1)
#plt.xlim([loop_from-100,loop_from+100])
plt.show()
#%% Super naiive linear regression
from sklearn import linear_model
regr = linear_model.LinearRegression()
y_pred = [0]
y_pred2 = [0,0]
y_pred3 = [0,0,0]
loop_from = 2000
# at 400, we stop adding 'real' data, and just recursively add predicted data!
for i,y in enumerate(Y):
regr = linear_model.LinearRegression()
regr.fit(np.arange(embedding).reshape(-1,1),X[i,:],0.9**np.arange(embedding))
y_pred.append(regr.predict(np.array([-1]).reshape(-1,1)))
y_pred2.append(regr.predict(np.array([-2]).reshape(-1,1)))
y_pred3.append(regr.predict(np.array([-3]).reshape(-1,1)))
#% %
plt.close('all')
plt.figure()
plt.plot(Y)
plt.plot(y_pred)
plt.plot(y_pred2)
plt.plot(y_pred3)
plt.axvline(x=loop_from)
plt.show()
#%% Try just with KRLS
from utils.krlst import KRLS
#%%
def compute_RBF(mat1, mat2, sigma = 0.016):
trnorms1 = np.mat([(v * v.T)[0, 0] for v in mat1]).T
trnorms2 = np.mat([(v * v.T)[0, 0] for v in mat2]).T
k1 = trnorms1 * np.mat(np.ones((mat2.shape[0], 1), dtype=np.float64)).T
k2 = np.mat(np.ones((mat1.shape[0], 1), dtype=np.float64)) * trnorms2.T
k = k1 + k2
k -= 2 * | np.mat(mat1 * mat2.T) | numpy.mat |
'''Fairly basic set of tools for real-time data augmentation on image data.
Can easily be extended to include new transformations,
new process methods, etc...
'''
#Code from https://github.com/oeway/keras/blob/extendImageDataGenerator/keras/preprocessing/image.py
#wfrom __future__ import absolute_import
from __future__ import print_function
import numpy as np
import re
from scipy import linalg
import scipy.ndimage as ndi
from six.moves import range
import os
import sys
import threading
import copy
import inspect
import types
import keras.backend as K
from keras.utils.generic_utils import Progbar
def random_rotation(x, rg, row_index=1, col_index=2, channel_index=0,
fill_mode='nearest', cval=0.):
theta = np.pi / 180 * np.random.uniform(-rg, rg)
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
h, w = x.shape[row_index], x.shape[col_index]
transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_index, fill_mode, cval)
return x
def random_shift(x, wrg, hrg, row_index=1, col_index=2, channel_index=0,
fill_mode='nearest', cval=0.):
h, w = x.shape[row_index], x.shape[col_index]
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
translation_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
transform_matrix = translation_matrix # no need to do offset
x = apply_transform(x, transform_matrix, channel_index, fill_mode, cval)
return x
def random_shear(x, intensity, row_index=1, col_index=2, channel_index=0,
fill_mode='nearest', cval=0.):
shear = np.random.uniform(-intensity, intensity)
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
h, w = x.shape[row_index], x.shape[col_index]
transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_index, fill_mode, cval)
return x
def random_zoom(x, zoom_range, row_index=1, col_index=2, channel_index=0,
fill_mode='nearest', cval=0.):
if len(zoom_range) != 2:
raise Exception('zoom_range should be a tuple or list of two floats. '
'Received arg: ', zoom_range)
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
h, w = x.shape[row_index], x.shape[col_index]
transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_index, fill_mode, cval)
return x
def random_barrel_transform(x, intensity):
# TODO
pass
def random_channel_shift(x, intensity, channel_index=0):
x = np.rollaxis(x, channel_index, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [np.clip(x_channel + np.random.uniform(-intensity, intensity), min_x, max_x)
for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_index+1)
return x
def transform_matrix_offset_center(matrix, x, y):
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
def apply_transform(x, transform_matrix, channel_index=0, fill_mode='nearest', cval=0.):
x = np.rollaxis(x, channel_index, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [ndi.interpolation.affine_transform(x_channel, final_affine_matrix,
final_offset, order=0, mode=fill_mode, cval=cval) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_index+1)
return x
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
def array_to_img(x, dim_ordering=K.image_dim_ordering(), mode=None, scale=True):
from PIL import Image
x = x.copy()
if dim_ordering == 'th':
x = x.transpose(1, 2, 0)
if scale:
x += max(-np.min(x), 0)
x /= np.max(x)
x *= 255
if x.shape[2] == 3 and mode == 'RGB':
return Image.fromarray(x.astype('uint8'), mode)
elif x.shape[2] == 1 and mode == 'L':
return Image.fromarray(x[:, :, 0].astype('uint8'), mode)
elif mode:
return Image.fromarray(x, mode)
else:
raise Exception('Unsupported array shape: ', x.shape)
def img_to_array(img, dim_ordering=K.image_dim_ordering()):
if dim_ordering not in ['th', 'tf']:
raise Exception('Unknown dim_ordering: ', dim_ordering)
# image has dim_ordering (height, width, channel)
x = np.asarray(img, dtype='float32')
if len(x.shape) == 3:
if dim_ordering == 'th':
x = x.transpose(2, 0, 1)
elif len(x.shape) == 2:
if dim_ordering == 'th':
x = x.reshape((1, x.shape[0], x.shape[1]))
else:
x = x.reshape((x.shape[0], x.shape[1], 1))
else:
raise Exception('Unsupported image shape: ', x.shape)
return x
def load_img(path, target_mode=None, target_size=None):
from PIL import Image
img = Image.open(path)
if target_mode:
img = img.convert(target_mode)
if target_size:
img = img.resize((target_size[1], target_size[0]))
return img
def list_pictures(directory, ext='jpg|jpeg|bmp|png'):
return [os.path.join(directory, f) for f in os.listdir(directory)
if os.path.isfile(os.path.join(directory, f)) and re.match('([\w]+\.(?:' + ext + '))', f)]
def pil_image_reader(filepath, target_mode=None, target_size=None, dim_ordering=K.image_dim_ordering(), **kwargs):
img = load_img(filepath, target_mode=target_mode, target_size=target_size)
return img_to_array(img, dim_ordering=dim_ordering)
def standardize(x,
dim_ordering='th',
rescale=False,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
mean=None, std=None,
samplewise_std_normalization=False,
zca_whitening=False, principal_components=None,
featurewise_standardize_axis=None,
samplewise_standardize_axis=None,
fitting=False,
verbose=0,
config={},
**kwargs):
'''
# Arguments
featurewise_center: set input mean to 0 over the dataset.
samplewise_center: set each sample mean to 0.
featurewise_std_normalization: divide inputs by std of the dataset.
samplewise_std_normalization: divide each input by its std.
featurewise_standardize_axis: axis along which to perform feature-wise center and std normalization.
samplewise_standardize_axis: axis along which to to perform sample-wise center and std normalization.
zca_whitening: apply ZCA whitening.
'''
if fitting:
if config.has_key('_X'):
# add data to _X array
config['_X'][config['_iX']] = x
config['_iX'] +=1
if verbose and config.has_key('_fit_progressbar'):
config['_fit_progressbar'].update(config['_iX'], force=(config['_iX']==fitting))
# the array (_X) is ready to fit
if config['_iX'] >= fitting:
X = config['_X'].astype('float32')
del config['_X']
del config['_iX']
if featurewise_center or featurewise_std_normalization:
featurewise_standardize_axis = featurewise_standardize_axis or 0
if type(featurewise_standardize_axis) is int:
featurewise_standardize_axis = (featurewise_standardize_axis, )
assert 0 in featurewise_standardize_axis, 'feature-wise standardize axis should include 0'
if featurewise_center:
mean = np.mean(X, axis=featurewise_standardize_axis, keepdims=True)
config['mean'] = np.squeeze(mean, axis=0)
X -= mean
if featurewise_std_normalization:
std = np.std(X, axis=featurewise_standardize_axis, keepdims=True)
config['std'] = np.squeeze(std, axis=0)
X /= (std + 1e-7)
if zca_whitening:
flatX = np.reshape(X, (X.shape[0], X.shape[1] * X.shape[2] * X.shape[3]))
sigma = np.dot(flatX.T, flatX) / flatX.shape[1]
U, S, V = linalg.svd(sigma)
config['principal_components'] = np.dot(np.dot(U, np.diag(1. / np.sqrt(S + 10e-7))), U.T)
if verbose:
del config['_fit_progressbar']
else:
# start a new fitting, fitting = total sample number
config['_X'] = np.zeros((fitting,)+x.shape)
config['_iX'] = 0
config['_X'][config['_iX']] = x
config['_iX'] +=1
if verbose:
config['_fit_progressbar'] = Progbar(target=fitting, verbose=verbose)
return x
if rescale:
x *= rescale
# x is a single image, so it doesn't have image number at index 0
if dim_ordering == 'th':
channel_index = 0
if dim_ordering == 'tf':
channel_index = 2
samplewise_standardize_axis = samplewise_standardize_axis or channel_index
if type(samplewise_standardize_axis) is int:
samplewise_standardize_axis = (samplewise_standardize_axis, )
if samplewise_center:
x -= np.mean(x, axis=samplewise_standardize_axis, keepdims=True)
if samplewise_std_normalization:
x /= (np.std(x, axis=samplewise_standardize_axis, keepdims=True) + 1e-7)
if verbose:
if (featurewise_center and mean is None) or (featurewise_std_normalization and std is None) or (zca_whitening and principal_components is None):
print('WARNING: feature-wise standardization and zca whitening will be disabled, please run "fit" first.')
if featurewise_center:
if mean is not None:
x -= mean
if featurewise_std_normalization:
if std is not None:
x /= (std + 1e-7)
if zca_whitening:
if principal_components is not None:
flatx = np.reshape(x, (x.size))
whitex = np.dot(flatx, principal_components)
x = np.reshape(whitex, (x.shape[0], x.shape[1], x.shape[2]))
return x
def center_crop(x, center_crop_size, **kwargs):
centerw, centerh = x.shape[1]//2, x.shape[2]//2
halfw, halfh = center_crop_size[0]//2, center_crop_size[1]//2
return x[:, centerw-halfw:centerw+halfw,centerh-halfh:centerh+halfh]
def random_crop(x, random_crop_size, sync_seed=None, **kwargs):
np.random.seed(sync_seed)
w, h = x.shape[1], x.shape[2]
rangew = (w - random_crop_size[0]) // 2
rangeh = (h - random_crop_size[1]) // 2
offsetw = 0 if rangew == 0 else np.random.randint(rangew)
offseth = 0 if rangeh == 0 else np.random.randint(rangeh)
return x[:, offsetw:offsetw+random_crop_size[0], offseth:offseth+random_crop_size[1]]
def random_transform(x,
dim_ordering='th',
rotation_range=0.,
width_shift_range=0.,
height_shift_range=0.,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
sync_seed=None,
**kwargs):
'''
# Arguments
rotation_range: degrees (0 to 180).
width_shift_range: fraction of total width.
height_shift_range: fraction of total height.
shear_range: shear intensity (shear angle in radians).
zoom_range: amount of zoom. if scalar z, zoom will be randomly picked
in the range [1-z, 1+z]. A sequence of two can be passed instead
to select this range.
channel_shift_range: shift range for each channels.
fill_mode: points outside the boundaries are filled according to the
given mode ('constant', 'nearest', 'reflect' or 'wrap'). Default
is 'nearest'.
cval: value used for points outside the boundaries when fill_mode is
'constant'. Default is 0.
horizontal_flip: whether to randomly flip images horizontally.
vertical_flip: whether to randomly flip images vertically.
rescale: rescaling factor. If None or 0, no rescaling is applied,
otherwise we multiply the data by the value provided (before applying
any other transformation).
'''
np.random.seed(sync_seed)
x = x.astype('float32')
# x is a single image, so it doesn't have image number at index 0
if dim_ordering == 'th':
img_channel_index = 0
img_row_index = 1
img_col_index = 2
if dim_ordering == 'tf':
img_channel_index = 2
img_row_index = 0
img_col_index = 1
# use composition of homographies to generate final transform that needs to be applied
if rotation_range:
theta = np.pi / 180 * np.random.uniform(-rotation_range, rotation_range)
else:
theta = 0
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
if height_shift_range:
tx = np.random.uniform(-height_shift_range, height_shift_range) * x.shape[img_row_index]
else:
tx = 0
if width_shift_range:
ty = np.random.uniform(-width_shift_range, width_shift_range) * x.shape[img_col_index]
else:
ty = 0
translation_matrix = np.array([[1, 0, tx],
[0, 1, ty],
[0, 0, 1]])
if shear_range:
shear = np.random.uniform(-shear_range, shear_range)
else:
shear = 0
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
if np.isscalar(zoom_range):
zoom_range = [1 - zoom_range, 1 + zoom_range]
elif len(zoom_range) == 2:
zoom_range = [zoom_range[0], zoom_range[1]]
else:
raise Exception('zoom_range should be a float or '
'a tuple or list of two floats. '
'Received arg: ', zoom_range)
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
transform_matrix = np.dot(np.dot(np.dot(rotation_matrix, translation_matrix), shear_matrix), zoom_matrix)
h, w = x.shape[img_row_index], x.shape[img_col_index]
transform_matrix = transform_matrix_offset_center(transform_matrix, h, w)
x = apply_transform(x, transform_matrix, img_channel_index,
fill_mode=fill_mode, cval=cval)
if channel_shift_range != 0:
x = random_channel_shift(x, channel_shift_range, img_channel_index)
if horizontal_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_col_index)
if vertical_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_row_index)
# TODO:
# barrel/fisheye
np.random.seed()
return x
class ImageDataGenerator(object):
'''Generate minibatches with
real-time data augmentation.
# Arguments
featurewise_center: set input mean to 0 over the dataset.
samplewise_center: set each sample mean to 0.
featurewise_std_normalization: divide inputs by std of the dataset.
samplewise_std_normalization: divide each input by its std.
featurewise_standardize_axis: axis along which to perform feature-wise center and std normalization.
samplewise_standardize_axis: axis along which to to perform sample-wise center and std normalization.
zca_whitening: apply ZCA whitening.
rotation_range: degrees (0 to 180).
width_shift_range: fraction of total width.
height_shift_range: fraction of total height.
shear_range: shear intensity (shear angle in radians).
zoom_range: amount of zoom. if scalar z, zoom will be randomly picked
in the range [1-z, 1+z]. A sequence of two can be passed instead
to select this range.
channel_shift_range: shift range for each channels.
fill_mode: points outside the boundaries are filled according to the
given mode ('constant', 'nearest', 'reflect' or 'wrap'). Default
is 'nearest'.
cval: value used for points outside the boundaries when fill_mode is
'constant'. Default is 0.
horizontal_flip: whether to randomly flip images horizontally.
vertical_flip: whether to randomly flip images vertically.
rescale: rescaling factor. If None or 0, no rescaling is applied,
otherwise we multiply the data by the value provided (before applying
any other transformation).
dim_ordering: 'th' or 'tf'. In 'th' mode, the channels dimension
(the depth) is at index 1, in 'tf' mode it is at index 3.
It defaults to the `image_dim_ordering` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "th".
seed: random seed for reproducible pipeline processing. If not None, it will also be used by `flow` or
`flow_from_directory` to generate the shuffle index in case of no seed is set.
'''
def __init__(self,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
featurewise_standardize_axis=None,
samplewise_standardize_axis=None,
zca_whitening=False,
rotation_range=0.,
width_shift_range=0.,
height_shift_range=0.,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
dim_ordering=K.image_dim_ordering(),
seed=None,
verbose=1):
self.config = copy.deepcopy(locals())
self.config['config'] = self.config
self.config['mean'] = None
self.config['std'] = None
self.config['principal_components'] = None
self.config['rescale'] = rescale
if dim_ordering not in {'tf', 'th'}:
raise Exception('dim_ordering should be "tf" (channel after row and '
'column) or "th" (channel before row and column). '
'Received arg: ', dim_ordering)
self.__sync_seed = self.config['seed'] or np.random.randint(0, np.iinfo(np.int32).max)
self.default_pipeline = []
self.default_pipeline.append(random_transform)
self.default_pipeline.append(standardize)
self.set_pipeline(self.default_pipeline)
self.__fitting = False
self.fit_lock = threading.Lock()
@property
def sync_seed(self):
return self.__sync_seed
@property
def fitting(self):
return self.__fitting
@property
def pipeline(self):
return self.__pipeline
def sync(self, image_data_generator):
self.__sync_seed = image_data_generator.sync_seed
return (self, image_data_generator)
def set_pipeline(self, p):
if p is None:
self.__pipeline = self.default_pipeline
elif type(p) is list:
self.__pipeline = p
else:
raise Exception('invalid pipeline.')
def flow(self, X, y=None, batch_size=32, shuffle=True, seed=None,
save_to_dir=None, save_prefix='', save_mode=None, save_format='jpeg'):
return NumpyArrayIterator(
X, y, self,
batch_size=batch_size, shuffle=shuffle, seed=seed,
dim_ordering=self.config['dim_ordering'],
save_to_dir=save_to_dir, save_prefix=save_prefix,
save_mode=save_mode, save_format=save_format)
def flow_from_directory(self, directory,
color_mode=None, target_size=None,
image_reader='pil', reader_config=None,
read_formats=None,
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
save_to_dir=None, save_prefix='',
save_mode=None, save_format='jpeg'):
if reader_config is None:
reader_config={'target_mode':'RGB', 'target_size':(256,256)}
if read_formats is None:
read_formats={'png','jpg','jpeg','bmp'}
return DirectoryIterator(
directory, self,
color_mode=color_mode, target_size=target_size,
image_reader=image_reader, reader_config=reader_config,
read_formats=read_formats,
classes=classes, class_mode=class_mode,
dim_ordering=self.config['dim_ordering'],
batch_size=batch_size, shuffle=shuffle, seed=seed,
save_to_dir=save_to_dir, save_prefix=save_prefix,
save_mode=save_mode, save_format=save_format)
def process(self, x):
# get next sync_seed
np.random.seed(self.__sync_seed)
self.__sync_seed = np.random.randint(0, np.iinfo(np.int32).max)
self.config['fitting'] = self.__fitting
self.config['sync_seed'] = self.__sync_seed
for p in self.__pipeline:
x = p(x, **self.config)
return x
def fit_generator(self, generator, nb_iter):
'''Fit a generator
# Arguments
generator: Iterator, generate data for fitting.
nb_iter: Int, number of iteration to fit.
'''
with self.fit_lock:
try:
self.__fitting = nb_iter*generator.batch_size
for i in range(nb_iter):
next(generator)
finally:
self.__fitting = False
def fit(self, X, rounds=1):
'''Fit the pipeline on a numpy array
# Arguments
X: Numpy array, the data to fit on.
rounds: how many rounds of fit to do over the data
'''
X = np.copy(X)
with self.fit_lock:
try:
self.__fitting = rounds*X.shape[0]
for r in range(rounds):
for i in range(X.shape[0]):
self.process(X[i])
finally:
self.__fitting = False
class Iterator(object):
def __init__(self, N, batch_size, shuffle, seed):
self.N = N
self.batch_size = batch_size
self.shuffle = shuffle
self.seed = seed
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_generator = self._flow_index(N, batch_size, shuffle, seed)
def reset(self):
self.batch_index = 0
def _flow_index(self, N, batch_size=32, shuffle=False, seed=None):
# ensure self.batch_index is 0
self.reset()
while 1:
if self.batch_index == 0:
self.index_array = np.arange(N)
if shuffle:
if seed is not None:
np.random.seed(seed + self.total_batches_seen)
self.index_array = np.random.permutation(N)
if seed is not None:
np.random.seed()
current_index = (self.batch_index * batch_size) % N
if N >= current_index + batch_size:
current_batch_size = batch_size
self.batch_index += 1
else:
current_batch_size = N - current_index
self.batch_index = 0
self.total_batches_seen += 1
yield (self.index_array[current_index: current_index + current_batch_size],
current_index, current_batch_size)
def __add__(self, it):
assert self.N == it.N
assert self.batch_size == it.batch_size
assert self.shuffle == it.shuffle
seed = self.seed or np.random.randint(0, np.iinfo(np.int32).max)
it.total_batches_seen = self.total_batches_seen
self.index_generator = self._flow_index(self.N, self.batch_size, self.shuffle, seed)
it.index_generator = it._flow_index(it.N, it.batch_size, it.shuffle, seed)
if (sys.version_info > (3, 0)):
iter_zip = zip
else:
from itertools import izip
iter_zip = izip
return iter_zip(self, it)
def __iter__(self):
# needed if we want to do something like:
# for x, y in data_gen.flow(...):
return self
def __next__(self, *args, **kwargs):
return self.next(*args, **kwargs)
class NumpyArrayIterator(Iterator):
def __init__(self, X, y, image_data_generator,
batch_size=32, shuffle=False, seed=None,
dim_ordering=K.image_dim_ordering(),
save_to_dir=None, save_prefix='',
save_mode=None, save_format='jpeg'):
if y is not None and len(X) != len(y):
raise Exception('X (images tensor) and y (labels) '
'should have the same length. '
'Found: X.shape = %s, y.shape = %s' % (np.asarray(X).shape, np.asarray(y).shape))
self.X = X
self.y = y
self.image_data_generator = image_data_generator
self.dim_ordering = dim_ordering
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_mode = save_mode
self.save_format = save_format
seed = seed or image_data_generator.config['seed']
super(NumpyArrayIterator, self).__init__(X.shape[0], batch_size, shuffle, seed)
def __add__(self, it):
if isinstance(it, NumpyArrayIterator):
assert self.X.shape[0] == it.X.shape[0]
if isinstance(it, DirectoryIterator):
assert self.X.shape[0] == it.nb_sample
it.image_data_generator.sync(self.image_data_generator)
return super(NumpyArrayIterator, self).__add__(it)
def next(self):
# for python 2.x.
# Keeps under lock only the mechanism which advances
# the indexing of each batch
# see http://anandology.com/blog/using-iterators-and-generators/
with self.lock:
index_array, current_index, current_batch_size = next(self.index_generator)
# The transformation of images is not under thread lock so it can be done in parallel
batch_x = None
for i, j in enumerate(index_array):
x = self.X[j]
x = self.image_data_generator.process(x)
if i == 0:
batch_x = np.zeros((current_batch_size,) + x.shape)
batch_x[i] = x
if self.save_to_dir:
for i in range(current_batch_size):
img = array_to_img(batch_x[i], self.dim_ordering, mode=self.save_mode, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(prefix=self.save_prefix,
index=current_index + i,
hash=np.random.randint(1e4),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
if self.y is None:
return batch_x
batch_y = self.y[index_array]
return batch_x, batch_y
class DirectoryIterator(Iterator):
def __init__(self, directory, image_data_generator,
color_mode=None, target_size=None,
image_reader="pil", read_formats=None,
reader_config=None,
dim_ordering=K.image_dim_ordering,
classes=None, class_mode='categorical',
batch_size=32, shuffle=True, seed=None,
save_to_dir=None, save_prefix='',
save_mode=None, save_format='jpeg'):
self.directory = directory
self.image_data_generator = image_data_generator
self.image_reader = image_reader
if self.image_reader == 'pil':
self.image_reader = pil_image_reader
if read_formats is None:
read_formats = {'png','jpg','jpeg','bmp'}
if reader_config is None:
reader_config = {'target_mode': 'RGB', 'target_size':None}
self.reader_config = reader_config
# TODO: move color_mode and target_size to reader_config
if color_mode == 'rgb':
self.reader_config['target_mode'] = 'RGB'
elif color_mode == 'grayscale':
self.reader_config['target_mode'] = 'L'
if target_size:
self.reader_config['target_size'] = target_size
self.dim_ordering = dim_ordering
self.reader_config['dim_ordering'] = dim_ordering
if class_mode not in {'categorical', 'binary', 'sparse', None}:
raise ValueError('Invalid class_mode:', class_mode,
'; expected one of "categorical", '
'"binary", "sparse", or None.')
self.class_mode = class_mode
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_mode = save_mode
self.save_format = save_format
seed = seed or image_data_generator.config['seed']
# first, count the number of samples and classes
self.nb_sample = 0
if not classes:
classes = []
for subdir in sorted(os.listdir(directory)):
if os.path.isdir(os.path.join(directory, subdir)):
classes.append(subdir)
# if no class is found, add '' for scanning the root folder
if class_mode is None and len(classes) == 0:
classes.append('')
self.nb_class = len(classes)
self.class_indices = dict(zip(classes, range(len(classes))))
for subdir in classes:
subpath = os.path.join(directory, subdir)
for fname in os.listdir(subpath):
is_valid = False
for extension in read_formats:
if fname.lower().endswith('.' + extension):
is_valid = True
break
if is_valid:
self.nb_sample += 1
print('Found %d images belonging to %d classes.' % (self.nb_sample, self.nb_class))
# second, build an index of the images in the different class subfolders
self.filenames = []
self.classes = np.zeros((self.nb_sample,), dtype='int32')
i = 0
for subdir in classes:
subpath = os.path.join(directory, subdir)
for fname in os.listdir(subpath):
is_valid = False
for extension in read_formats:
if fname.lower().endswith('.' + extension):
is_valid = True
break
if is_valid:
self.classes[i] = self.class_indices[subdir]
self.filenames.append(os.path.join(subdir, fname))
i += 1
assert len(self.filenames)>0, 'No valid file is found in the target directory.'
self.reader_config['class_mode'] = self.class_mode
self.reader_config['classes'] = self.classes
self.reader_config['filenames'] = self.filenames
self.reader_config['directory'] = self.directory
self.reader_config['nb_sample'] = self.nb_sample
self.reader_config['seed'] = seed
self.reader_config['sync_seed'] = self.image_data_generator.sync_seed
super(DirectoryIterator, self).__init__(self.nb_sample, batch_size, shuffle, seed)
if inspect.isgeneratorfunction(self.image_reader):
self._reader_generator_mode = True
self._reader_generator = []
# set index batch_size to 1
self.index_generator = self._flow_index(self.N, 1 , self.shuffle, seed)
else:
self._reader_generator_mode = False
def __add__(self, it):
if isinstance(it, DirectoryIterator):
assert self.nb_sample == it.nb_sample
assert len(self.filenames) == len(it.filenames)
assert np.alltrue(self.classes == it.classes)
assert self.image_reader == it.image_reader
if inspect.isgeneratorfunction(self.image_reader):
self._reader_generator = []
it._reader_generator = []
if isinstance(it, NumpyArrayIterator):
assert self.nb_sample == self.X.shape[0]
it.image_data_generator.sync(self.image_data_generator)
return super(DirectoryIterator, self).__add__(it)
def next(self):
self.reader_config['sync_seed'] = self.image_data_generator.sync_seed
if self._reader_generator_mode:
sampleCount = 0
batch_x = None
_new_generator_flag = False
while sampleCount<self.batch_size:
for x in self._reader_generator:
_new_generator_flag = False
if x.ndim == 2:
x = np.expand_dims(x, axis=0)
x = self.image_data_generator.process(x)
self.reader_config['sync_seed'] = self.image_data_generator.sync_seed
if sampleCount == 0:
batch_x = | np.zeros((self.batch_size,) + x.shape) | numpy.zeros |
import numpy as np
class TransportationChecker(object):
"""
TransportationChecker 负责求检验数,并判断是否达到最优
"""
def __init__(self, supply: list, demand: list, costs: list):
super().__init__()
self.supply = [i[1] for i in supply]
self.demand = [i[1] for i in demand]
self.costs = np.array(costs)
self.transportation = | np.array([]) | numpy.array |
import gym
import numpy as np
from .utils import ImgProcessor
from .base import BaseEnv
COMMON_VERSION = "Deterministic-v4"
class _Atari(BaseEnv):
"""Atari environment.
Args:
name (str): name of environment in Atari games.
render (bool): parameter that determine whether to render.
gray_img (bool): parameter that determine whether to use gray image.
img_width (int): width of image input.
img_height (int): height of image input.
stack_frame (int): the number of stacked frame in one single state.
life_key (str): key of life query function in emulator.
no_op (bool): parameter that determine whether or not to operate during the first 30(no_op_max) steps.
reward_clip (bool): parameter that determine whether to use reward clipping.
reward_scale (float): reward normalization denominator.
dead_penatly (bool): parameter that determine whether to use penalty when the agent dies.
"""
def __init__(
self,
name,
render=False,
gray_img=True,
img_width=84,
img_height=84,
stack_frame=4,
life_key="lives",
no_op=False,
reward_clip=False,
reward_scale=None,
dead_penalty=False,
**kwargs,
):
self.render = render
self.gray_img = gray_img
self.img_width = img_width
self.img_height = img_height
self.img_processor = ImgProcessor(gray_img, img_width, img_height)
self.stack_frame = stack_frame
self.num_channel = 1 if self.gray_img else 3
self.stacked_state = np.zeros(
[self.num_channel * stack_frame, img_height, img_width]
)
self.env = gym.make(name)
self.state_size = [stack_frame, img_height, img_width]
self.action_size = self.env.action_space.n
self.action_type = "discrete"
self.score = 0
self.life = 0
self.life_key = life_key
self.no_op = no_op
self.no_op_max = 30
self.reward_clip = reward_clip
self.reward_scale = reward_scale
self.dead_penalty = dead_penalty
print(f"{name} Start!")
print(f"state size: {self.state_size}")
print(f"action size: {self.action_size}")
def reset(self):
self.env.reset()
state, reward, _, info = self.env.step(1)
self.score = reward
self.life = info[self.life_key]
if self.no_op:
for _ in range(np.random.randint(0, self.no_op_max)):
state, reward, _, info = self.env.step(0)
self.score += reward
if self.life != info[self.life_key]:
if self.life > info[self.life_key]:
state, reward, _, _ = self.env.step(1)
self.score += reward
self.life = info[self.life_key]
state = self.img_processor.convert_img(state)
self.stacked_state = np.tile(state, (self.stack_frame, 1, 1))
state = np.expand_dims(self.stacked_state, 0)
return state
def step(self, action):
if self.render:
self.env.render()
next_state, reward, done, info = self.env.step(np.asscalar(action))
self.score += reward
dead = False
if self.life != info[self.life_key] and not done:
if self.life > info[self.life_key]:
state, _reward, _, _ = self.env.step(1)
self.score += _reward
dead = True
self.life = info[self.life_key]
next_state = self.img_processor.convert_img(next_state)
self.stacked_state = np.concatenate(
(self.stacked_state[self.num_channel :], next_state), axis=0
)
if self.reward_clip:
reward = (
reward / self.reward_scale if self.reward_scale else np.tanh(reward)
)
if dead and self.dead_penalty:
reward = -1
next_state, reward, done = map(
lambda x: | np.expand_dims(x, 0) | numpy.expand_dims |
import configparser
from datetime import datetime
from math import cos
from skimage import filters
from skimage import measure
from math import radians
from scipy.interpolate import splprep, splev
import numpy as np
import pandas as pd
import scipy.ndimage as img
""" Tools to manipulate and analyze data """
def canopy_cover(data, radius):
"""Computes leaf area index: ratio of leaf to ground for a certain area.
Performs a convolve with an arbitrary radius to calculate how many
nonzero values are present within a
:param data: 2D or 3D numpy array of height data
:param radius: radius of the region (in number of 0.1 m squares)
:return: leaf area index computed for each point
"""
c = np.zeros_like(data, int)
kernel = np.ones((radius * 2 + 1, radius * 2 + 1))
for x in range(data.shape[2]):
d = data[:, :, x]
d[d > 0] = 1
conv = img.filters.convolve(d, kernel)
c[:, :, x] = conv
return c
def create_path_splines(points, data_shape, converter):
"""Create managable path splines from the thousands of datapoints"""
# Unpack rows and columns from data shape
rows, cols, _ = data_shape
# Convert the gps information and store it into a new DataFrame
path_pts = []
for p in points.itertuples():
# This gives us the data coordinates from the gps location
data_y = int(converter.lat_to_y(p[1]))
data_x = int(converter.lng_to_x(p[2]))
path_pts.append([data_x, data_y])
np.transpose(path_pts)
# Remove any duplicate points from the path,
# keeping the original array order
path_vals, ind = np.unique(path_pts, axis=0, return_index=True)
ind = sorted(ind)
path_pts = [path_pts[i] for i in ind]
# Create a spline from the remaining path points
# noinspection PyTupleAssignmentBalance
tck, u = splprep(np.transpose(path_pts), u=None, s=0.0)
return tck
def create_stress_map(height, canopy, rad, threshold):
"""Create map showing frequently stressed areas of plot.
Sums the stress masks for each individual date to determine which
areas are frequently coming up stressed.
:return - The 2D map of stressed locations
"""
# Deprecated create_suggestion_mask_v1 code
###########################################################################
# # Suggestion mask data (Normalized Height + Normalized Canopy)
# normh = np.divide(height, np.amax(height, axis=(0, 1))) * 50
# normc = np.divide(canopy, np.amax(canopy, axis=(0, 1))) * 50
#
# # Process data to create stress mask for each snapshot
# comb_data = np.add(normh, normc)
# stress_dates = create_suggestion_mask_v1(comb_data, rad, threshold)
###########################################################################
# Create the suggestion mask for each snapshot
stress_dates = create_suggestion_mask_v2(height, canopy, rad, threshold)
# Create stress map based on all data up to current date
stress_map = np.zeros_like(stress_dates)
for i in range(stress_dates.shape[2]):
stress_map[:, :, i] = np.sum(stress_dates[:, :, 0:(i+1)], axis=2)
stress_map[:, :, i] = np.divide(stress_map[:, :, i], (i+1))
return stress_map
def create_suggestion_mask_v1(d, rad, threshold):
"""Keep this here for a little while, then delete it. Suggestion mask v1
Uses statistical methods to determine outliers below the general
population of the data, and uses image processing techniques to discount
the edges of the plot from skewing the results.
:param d: The input data to create the mask
:param rad: The radius to average and desample the data
:param threshold: The percentile above which points will be filtered
:return: The mask from which suggested points are chosen
"""
# Create a new copy of the data to work on
data = np.copy(d)
# filter out data less than zero
data[data < 0] = 0
# Calculates each point as sum of nearby values within radius r
c = np.zeros_like(data, float)
kernel = np.ones((rad * 2 + 1, rad * 2 + 1))
for x in range(data.shape[2]):
conv = img.filters.convolve(data[:, :, x], kernel)
c[:, :, x] = conv
# Downsample array into pixels with same size as convolve
c = c[::rad * 2 + 1, ::rad * 2 + 1, :]
fullmask = np.zeros_like(d)
for i in range(c.shape[2]):
# Extract the ith layer of data
mask = c[:, :, i]
# Use image processing morphology to smooth out data
mask = img.grey_closing(mask, structure=np.ones((3, 3)))
# Use Sobel edge detection to decrease weight of edges
gx = img.sobel(mask, axis=0)
gy = img.sobel(mask, axis=1)
grad = np.hypot(gx, gy)
grad = (np.divide(grad, np.amax(grad))) * 100
mask = (np.divide(mask, np.amax(mask))) * 100
mask -= grad
# Calculate the threshold percentile, ignoring zeros
mask[mask <= 0] = np.nan
percent = np.nanpercentile(mask, threshold)
mask = np.nan_to_num(mask)
# Filter out data and create mask
mask[mask > percent] = 0
mask[mask > 0] = 1
# Perform binary opening to remove small regions
mask = img.binary_opening(mask)
# Rescale mask to fit data size
scale = np.divide(fullmask[:, :, 0].shape, mask.shape)
fullmask[:, :, i] = img.zoom(mask, scale, order=0)
return fullmask
def create_suggestion_mask_v2(height, canopy, rad=4, threshold=(20, 40)):
# Copy the data
height_data = np.copy(height)
# Silence isolated points (low canopy)
height_data[canopy < 5] = 0
# Downscale dataset to 0.5m squares, taking the max within each
height_data = downscale_max(height_data, rad)
# Place points into stress levels
stress_data = np.zeros_like(height_data)
for x in range(stress_data.shape[2]):
stress_layer = stress_data[:, :, x]
height_layer = height_data[:, :, x]
high_med_stress = np.percentile(height_layer[np.nonzero(height_layer)],
threshold[0])
med_low_stress = np.percentile(height_layer[np.nonzero(height_layer)],
threshold[1])
stress_layer[height_layer >= med_low_stress] = 0.01 # Low
height_layer[stress_layer > 0] = 0 # silence low points
stress_layer[height_layer >= high_med_stress] = 0.5 # Medium
height_layer[stress_layer > 0] = 0 # silence med points
stress_layer[0 < height_layer] = 0.99 # High
stress_data[:, :, x] = stress_layer
stress_data = rescale_like(stress_data, height)
return stress_data
def define_regions(data, rad):
"""Identify regions of high stress areas and """
region_map = np.copy(data)
region_map = region_map[::rad * 2 + 1, ::rad * 2 + 1]
val = filters.threshold_otsu(region_map)
mask = region_map > val
mask = img.binary_opening(mask, iterations=2)
scale = np.divide(data.shape, mask.shape)
mask = img.zoom(mask, scale, order=0)
labels = measure.label(mask, background=0)
regions = img.find_objects(labels)
small_regions = []
for i in range(len(regions)):
if np.nonzero(labels == i + 1)[0].size <= 500:
labels[regions[i]] = 0
small_regions.append(i)
for i in small_regions[::-1]:
del regions[i]
return StressMapWrapper(labels, regions)
def downscale_avg(data, radius):
# Calculates each point as sum of nearby values within radius r
diam = 2 * radius + 1
kernel = np.ones((diam, diam))
fullmap = np.zeros_like(data, float)
for x in range(data.shape[2]):
conv = img.filters.convolve(data[:, :, x], kernel)
fullmap[:, :, x] = conv
# Downsample array into pixels with same size as convolve
fullmap = fullmap[::diam, ::diam, :]
return fullmap
def downscale_max(data, radius):
# Turn radius into diameter centered at original point
diam = 2 * radius + 1
fullmap = np.zeros_like(data[::diam, ::diam, :], float)
for x in range(data.shape[2]):
layer = np.zeros_like(data[::diam, ::diam, 0])
for r in range((int(data.shape[0] / diam)) - 1):
for c in range((int(data.shape[1] / diam)) - 1):
selection = data[(r*diam):(r*diam + diam),
(c*diam):(c*diam + diam), x]
max_val = | np.amax(selection) | numpy.amax |
# -*- coding: utf-8 -*-
# pylint: disable=invalid-name,too-many-instance-attributes, too-many-arguments
"""
Copyright 2019 <NAME>
Copyright 2015 <NAME>.
FilterPy library.
http://github.com/rlabbe/filterpy
Documentation at:
https://filterpy.readthedocs.org
Supporting book at:
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the readme.MD file
for more information.
"""
from copy import deepcopy
from math import log, exp, sqrt
import sys
import numpy as np
from numpy import dot, outer, eye, zeros, ones, diag
import scipy.linalg as linalg
from filterpy.stats import logpdf
from filterpy.common import pretty_str, reshape_z
'''
UDU decomposition:
P = U * diag(D) * U^T
'''
def udu(p):
if 2 != len(p.shape):
return None
if p.shape[0] != p.shape[1]:
return None
n = p.shape[0]
u = zeros((n, n))
d = zeros((n))
d[n-1] = p[n-1,n-1]
u[:,n-1] = p[:,n-1] / d[n-1]
for j in range(n-2, -1, -1):
dd = d[j+1:]
c = dd * u[j,j+1:] #dd is meant to be diag(d[j+1:])
d[j] = p[j,j] - np.dot(u[j,j+1:].T, c)
if d[j] == 0:
return None
for i in range(j, -1, -1):
c = dd * u[j,j+1:]
u[i,j] = (p[i,j] - np.dot(u[i,j+1:].T, c))/d[j]
return u, d
'''
MWGS update:
U * diag(D) * U^T = w * diag(d) * w^T
Params:
w - is n*k float full rank
d - is k*None float
where k>n
return:
u - is n*n float upper triangular
D - id n*None float
'''
def mwgs(w,d):
if 1 != len(d.shape):
return None
if 2 != len(w.shape):
return None
if w.shape[1] != d.shape[0]:
return None
if w.shape[0] >= d.shape[0]:
return None
n = w.shape[0]
u = np.eye(n)
D = np.zeros((n))
for i in range(n-1, -1, -1):
c = w[i,:] * d
D[i] = np.dot(w[i,:], c)
if D[i] <= 0:
# How about partial reset heu_ristics here?
return None
dd = c/D[i]
for j in range(0, i):
u[j,i] = np.dot(dd, w[j,:])
w[j,:] -= u[j,i] * w[i,:]
return u, D
class UDExtendedKalmanFilter(object):
""" Implements an UD modification of extended Kalman filter (EKF).
You are responsible for setting the various state variables to
reasonable values; the defaults will not give you a functional filter.
You will have to set the following attributes after constructing this
object for the filter to perform properly. Please note that there are
various checks in place to ensure that you have made everything the
'correct' size. However, it is possible to provide incorrectly sized
arrays such that the linear algebra can not perform an operation.
It can also fail silently - you can end up with matrices of a size that
allows the linear algebra to work, but are the wrong shape for the problem
you are trying to solve.
Parameters
----------
dim_x : int
Number of state variables for the Kalman filter. For example, if
you are tracking the position and velocity of an object in two
dimensions, dim_x would be 4.
This is used to set the default size of P, Q, and u
dim_z : int
Number of of measurement inputs. For example, if the sensor
provides you with position in (x,y), dim_z would be 2.
Attributes
----------
x : numpy.array(dim_x, 1)
State estimate vector
P : numpy.array(dim_x, dim_x)
Covariance matrix
x_prior : numpy.array(dim_x, 1)
Prior (predicted) state estimate. The *_prior and *_post attributes
are for convienence; they store the prior and posterior of the
current epoch. Read Only.
P_prior : numpy.array(dim_x, dim_x)
Prior (predicted) state covariance matrix. Read Only.
x_post : numpy.array(dim_x, 1)
Posterior (updated) state estimate. Read Only.
P_post : numpy.array(dim_x, dim_x)
Posterior (updated) state covariance matrix. Read Only.
R : numpy.array(dim_z, dim_z)
Measurement noise matrix
Q : numpy.array(dim_x, dim_x)
Process noise matrix
F : numpy.array()
State Transition matrix
H : numpy.array(dim_x, dim_x)
Measurement function
y : numpy.array
Residual of the update step. Read only.
K : numpy.array(dim_x, dim_z)
Kalman gain of the update step. Read only.
S : numpy.array
Systen uncertaintly projected to measurement space. Read only.
z : ndarray
Last measurement used in update(). Read only.
log_likelihood : float
log-likelihood of the last measurement. Read only.
likelihood : float
likelihood of last measurment. Read only.
Computed from the log-likelihood. The log-likelihood can be very
small, meaning a large negative value such as -28000. Taking the
exp() of that results in 0.0, which can break typical algorithms
which multiply by this value, so by default we always return a
number >= sys.float_info.min.
mahalanobis : float
mahalanobis distance of the innovation. E.g. 3 means measurement
was 3 standard deviations away from the predicted value.
Read only.
"""
def __init__(self, dim_x, dim_z, dim_u=0):
self.dim_x = dim_x
self.dim_z = dim_z
self.dim_u = dim_u
self.x = zeros((dim_x, 1)) # state
# uncertainty covariance
self.U = eye(dim_x)
self.D = ones((dim_x))
self.B = 0 # control transition matrix
self.F = np.eye(dim_x) # state transition matrix
# state uncertainty
self.Dm = eye(dim_z) #Decorrelation matrix
self.Ur = eye(dim_z) #Decorrelation matrix
self.Dr = ones((dim_z))
# process uncertainty
self.Uq = eye(dim_x)
self.Dq = ones((dim_x))
z = np.array([None]*self.dim_z)
self.z = reshape_z(z, self.dim_z, self.x.ndim)
# residual is computed during the innovation step. We
# save them so that in case you want to inspect it for various
# purposes
self.y = zeros((dim_z, 1)) # residual
self.S = np.zeros((dim_z, dim_z)) # system uncertainty
self.SI = np.zeros((dim_z, dim_z)) # inverse system uncertainty
self._log_likelihood = log(sys.float_info.min)
self._likelihood = sys.float_info.min
self._mahalanobis = None
# these will always be a copy of x,P after predict() is called
self.x_prior = self.x.copy()
self.U_prior = self.U.copy()
self.D_prior = self.D.copy()
# these will always be a copy of x,P after update() is called
self.x_post = self.x.copy()
self.U_post = self.U.copy()
self.D_post = self.D.copy()
@property
def Q(self):
""" Process uncertainty"""
return dot(self.Uq, dot(diag(self.Dq), self.Uq.T))
@Q.setter
def Q(self, value):
""" Process uncertainty"""
self.Uq, self.Dq = udu(value)
@property
def P(self):
""" covariance matrix"""
return dot(self.U, dot(diag(self.D), self.U.T))
@property
def P_prior(self):
""" covariance matrix of the prior"""
return dot(self.U_prior, dot(diag(self.D_prior), self.U_prior.T))
@property
def P_post(self):
""" covariance matrix of the posterior"""
return dot(self.U_post, dot(diag(self.D_post), self.U_post.T))
@P.setter
def P(self, value):
""" covariance matrix"""
self.U,self.D = udu(value);
@property
def R(self):
""" measurement uncertainty"""
return dot(self.Ur, dot(diag(self.Dr), self.Ur.T))
@R.setter
def R(self, value):
""" measurement uncertainty"""
self.Ur, self.Dr = udu(value)
self.Dm = linalg.inv(self.Ur)
def predict_x(self, u=0):
"""
Predicts the next state of X. If you need to
compute the next state yourself, override this function. You would
need to do this, for example, if the usual Taylor expansion to
generate F is not providing accurate results for you.
"""
self.x = dot(self.F, self.x) + dot(self.B, u)
def predict(self, u=0):
"""
Predict next state (prior) using the Kalman filter state propagation
equations.
Parameters
----------
u : np.array
Optional control vector. If non-zero, it is multiplied by B
to create the control input into the system.
"""
self.predict_x(u)
W = np.concatenate((dot(self.F, self.U), self.Uq), axis=1)
D = | np.concatenate((self.D, self.Dq)) | numpy.concatenate |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
from numpy.fft import ifft
"""
Example of a closure-function. See also partial from ...
def force(A, f, ndof, fdof):
# Closure function. See also functools.partial
# fext = self.force(dt, tf=T)
def wrapped_func(dt, t0=0, tf=1):
ns = round((tf-t0)/dt)
fs = 1/dt
u,_ = sineForce(A, f=f, fs=fs, ns=ns, phi_f=0)
fext = toMDOF(u, ndof, fdof)
return fext
return wrapped_func
"""
def sinesweep(amp, fs, f1, f2, vsweep, nrep=1, inctype='lin', t0=0):
"""Do a linear or logarithmic sinus sweep excitation.
For a reverse sweep, swap f1 and f2 and set a negative sweep rate.
Parameters
----------
amp : float
Amplitude in N
fs : float
Sampling frequency
f1 : float
Starting frequency in Hz
f2 : float
Ending frequency in Hz
vsweep : float
Sweep rate in Hz/min
nrep : int
Number of times the signal is repeated
inctype : str (optional)
Type of increment. Linear or logarithmic: lin/log
t0 : float (optional)
Staring time, default t0=0
Notes
-----
See scipy.signal.chirp, which does the same
https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.chirp.html
"""
dt = 1/fs
if inctype == 'log':
tend = np.log2(f2 / f1) * (60/vsweep) + t0
else:
tend = (f2 - f1) / vsweep * 60 + t0
# Because we want to enforce the given fs, arange is used instead of
# linspace. This means that we might not include tend in t (which would be
# the case with linspace), but for that we get the desired fs.
ns = np.floor((tend-t0)*fs)
t = np.arange(0,ns+1)/fs
# t = np.linspace(t0, tend, ns +1)
# Instantaneous frequency
if inctype == 'log':
finst = f1 * 2**(vsweep*((t - t0)/60))
else:
finst = f1 + vsweep/60*(t-t0)
if inctype == 'log':
psi = (2*np.pi * f1*60/(np.log(2)*vsweep)) * (2**(vsweep*((t-t0)/60)) - 1)
else:
psi = 2*np.pi * f1*(t-t0) + 2*np.pi*vsweep/60*(t-t0)**2 / 2
u = amp * np.sin(psi)
if nrep > 1:
# repeat signal: 1 2 3 -> 1 2 3 1 2 3 1 2 3
u = np.tile(u, nrep)
# prevent the first number from reoccurring: 1 2 3 -> 1 2 3 2 3 2 3
idx = np.arange(1,nrep) * (ns+1)
u = np.delete(u, idx)
t = np.arange(0, ns*nrep+1) / fs
return u, t, finst
def multisine(f1=0, f2=None, N=1024, fs=None, R=1, P=1, lines='full',rms=1, ngroup=4):
"""Random periodic excitation
Generates R realizations of a zero-mean random phase multisine with
specified rms(amplitude). Random phase multisine signal is a periodic
random signal with a user-controlled amplitude spectrum and a random phase
spectrum drawn from a uniform distribution. If an integer number of periods
is measured, the amplitude spectrum is perfectly realized, unlike classical
Gaussian noise. Another advantage is that the periodic nature can help help
separate signal from noise.
The amplitude spectrum is flat between f1 and f2.
Parameters
----------
f1 : float, optional
Starting frequency in Hz. Default 0 Hz
f2 : float, optional
Ending frequency in Hz. Default 0.9* `nyquist frequency`
N : int, optional
Number of points per period. default = 1024
fs : float, optional
Sample frequency. Default fs=N
P : int, optional
Number of periods. default = 1
R : int, optional
Number of realizations. default = 1
lines : array_like or str: {'full', 'odd', 'oddrandom'}, optional
For characterization of NLs, only selected lines are excited.
rms : float, optional
rms(amplitude) of the generated signals. default = 1. Note that since
the signal is zero-mean, the std and rms is equal.
ngroup : int, optional
In case of ftype = oddrandom, 1 out of ngroup odd lines is discarded.
Returns
-------
u: RxNP record of the generated signals
lines: excited frequency lines -> 1 = dc, 2 = fs/N
freq: frequency vector
Examples
--------
Generate two realizations of a full multisine with 1000 samples and
excitation up to one third of the Nyquist frequency.
The two realizations have the same amplitude spectrum, but different phase
realizations (uniformly distributed between [-π,π))
>>> N = 1000 # One thousand samples
>>> kind = 'full' # Full multisine
>>> f2 = round(N//6) # Excitation up to one sixth of the sample frequency
>>> R = 2 # Two phase realizations
>>> u, lines, freq = multisine(f2=f2,N=N,lines=kind,R=R)
Generate a random odd multisine where the excited odd lines are split in
groups of three consecutive lines and where one line is randomly chosen in
each group to be a detection line (i.e. without excitation)
>>> kind = 'oddrandom'
>>> u1,lines, freq = multisine(f2=f2,N=N,lines=kind,R=1,ngroup=3)
Generate another phase realization of this multisine with the same excited
lines and detection lines
>>> u2,*_ = multisine(N=N,lines=lines,R=1)
Notes
-----
J.Schoukens, <NAME>, and <NAME>:
Linear System Identification in a Nonlinear Setting:
Nonparametric Analysis of the Nonlinear Distortions and Their Impact on the
Best Linear Approximation. https://arxiv.org/pdf/1804.09587.pdf
"""
if fs is None:
fs = N
if f2 is None:
f2 = np.floor(0.9*N/2)
if not fs >= 2*f2:
raise AssertionError(f"fs should be {fs} >= {2*f2}")
if not N >= 2*f2:
raise AssertionError('N should be higher than Nyquist freq, '
'N >= 2*f2. N={}, f2={}'.format(N,f2))
VALID_LINES = {'full', 'odd', 'oddrandom'}
if isinstance(lines, str) and lines.lower() in VALID_LINES:
lines = lines.lower()
# frequency resolution
f0 = fs/N
# lines selection - select which frequencies to excite
lines_min = np.ceil(f1/f0).astype('int')
lines_max = np.floor(f2/f0).astype('int')
_lines = np.arange(lines_min, lines_max, dtype=int)
elif isinstance(lines, (np.ndarray, list)): # user specified lines
_lines = np.array(lines)
else:
raise ValueError(f"Invalid lines-type. Should be one of {VALID_LINES}"
f" or array of frequency lines. Is {lines}")
# remove dc
if _lines[0] == 0:
_lines = _lines[1:]
if isinstance(lines, str):
if lines == 'full':
pass # do nothing
elif lines == 'odd':
# remove even lines
if np.remainder(_lines[0],2): # lines[0] is even
_lines = _lines[::2]
else:
_lines = _lines[1::2]
elif lines == 'oddrandom':
if np.remainder(_lines[0],2):
_lines = _lines[::2]
else:
_lines = _lines[1::2]
# remove 1 out of ngroup lines
nlines = len(_lines)
nremove = np.floor(nlines/ngroup).astype('int')
idx = np.random.randint(ngroup, size=nremove)
idx = idx + ngroup*np.arange(nremove)
_lines = np.delete(_lines, idx)
nlines = len(_lines)
# multisine generation - frequency domain implementation
U = np.zeros((R,N),dtype=complex)
# excite the selected frequencies
U[:,_lines] = np.exp(2j*np.pi*np.random.rand(R,nlines))
u = 2*np.real(ifft(U,axis=1)) # go to time domain
u = rms*u / np.std(u[0]) # rescale to obtain desired rms/std
# Because the ifft is for [0,2*pi[, there is no need to remove any point
# when the generated signal is repeated.
u = | np.tile(u,(1,P)) | numpy.tile |
#! /usr/bin/env python3
import math
import numpy as np
import open3d as o3d
import sensor_msgs.point_cloud2 as pc2
class Utils(object):
@staticmethod
def convert_pointcloud2_msg_to_array(cloud_msg):
points_list = []
for data in pc2.read_points(cloud_msg, skip_nans=True):
points_list.append([data[0], data[1], data[2], data[3]])
return np.array(points_list)
@staticmethod
def convert_pose_stamped_msg_to_array(pose_msg):
position = np.array([pose_msg.pose.position.x, pose_msg.pose.position.y, pose_msg.pose.position.z])
orientation = np.array([pose_msg.pose.orientation.w, pose_msg.pose.orientation.x, pose_msg.pose.orientation.y, pose_msg.pose.orientation.z])
return position, orientation
@staticmethod
def convert_pos_quat_to_transformation(pos, quat):
R = o3d.geometry.get_rotation_matrix_from_quaternion(quat)
T = np.empty((4, 4))
T[0:3, 0:3] = R
T[0:3, 3] = pos
T[3, :] = [0, 0, 0, 1]
return T
@staticmethod
def convert_pointcloud2_msg_to_array(cloud_msg):
points_list = []
for data in pc2.read_points(cloud_msg, skip_nans=True):
points_list.append([data[0], data[1], data[2], data[3]])
return np.array(points_list)
@staticmethod
def transform_pointcloud(cloud, T):
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(cloud[:, 0:3])
pcd.transform(T)
dst = np.asarray(pcd.points)
return np.column_stack((dst, cloud[:, 3]))
@staticmethod
def downsample_pointcloud(cloud, voxel_size=0.15):
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(cloud[:, 0:3])
pcd = pcd.voxel_down_sample(voxel_size=voxel_size)
dst = np.asarray(pcd.points)
# TODO(lbern): fix for intensity
return dst
@staticmethod
def fix_nn_output(n_neighbors, idx, nn_dists, nn_indices):
self_idx = np.where(nn_indices == idx)[0][0]
nn_dists = np.delete(nn_dists, [self_idx])
nn_indices = | np.delete(nn_indices, [self_idx]) | numpy.delete |
"""
File: continuous.py
Author: <NAME>
Email: <EMAIL>
Github: https://github.com/ComeBertrand
Description: Classical continuous functions for performance evaluation of
metaheuristics. All theses functions were taken from the following website :
https://www.sfu.ca/~ssurjano/optimization.html
"""
import numpy as np
from ...models import Problem
from ...common.representation import RealEncoding, Boundaries
from ...common.fitness import Objective
from ...operators.neighborhood import NeighborhoodOperator, move_distance_continuous, ContinuousLogMoveRange
class ContinuousProblem(Problem):
"""Problems that are defined by a continuous function.
# TODO: Do it in a more abstract way and move it in abstract
Args:
n_dim (int): Number of dimensions.
min_vals (np.array): Minimum values for each dimension.
max_vals (np.array): Maximum values for each dimension.
move_range (MoveRange): Range of the move step.
known_min (float): Minimum of the continuous function. None means that
the minimum is not known.
"""
def __init__(self, n_dim, min_vals, max_vals, move_range, known_min):
nb_neighbors = n_dim * 100 # TODO: shall be an argument of the object
neighborhood = NeighborhoodOperator(move_distance_continuous, move_range, nb_neighbors)
boundaries = Boundaries(min_vals, max_vals, np.float)
encoding = RealEncoding(boundaries)
objective = Objective(self._eval_func)
super().__init__(objective, encoding, neighborhood=neighborhood, known_min=known_min)
def _eval_func(self, solution):
"""Actual evaluation of a solution by the continuous function.
Args:
solution (Solution): Solution to be evaluated.
Returns:
float: function value of the solution.
"""
raise NotImplementedError("Abstract Class")
# --------------------------------------------------------------------------- #
# Functions with many local minima #
# --------------------------------------------------------------------------- #
class Ackleys(ContinuousProblem):
"""Ackley's function.
Args:
n_dim (int): Number of dimensions.
"""
def __init__(self, n_dim):
min_vals = np.array([-32.768] * n_dim, np.float)
max_vals = np.array([32.768] * n_dim, np.float)
move_range = ContinuousLogMoveRange(0.01, 1.0)
known_min = 0.0
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
n = len(solution)
part1 = -0.2 * np.sqrt(1/n * np.sum(solution * solution))
part2 = 1/n * np.sum(np.cos(2 * np.pi * solution))
return 20 - 20 * np.exp(part1) + np.e - np.exp(part2)
class Bukin6(ContinuousProblem):
"""Bukin funtion N.6."""
def __init__(self):
n_dim = 2
min_vals = np.array([-15.0, -3.0], np.float)
max_vals = np.array([-5.0, 3.0], np.float)
move_range = ContinuousLogMoveRange(0.01, 1.0)
known_min = 0.0
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
part1 = np.abs(solution[1] - 0.01 * solution[0] * solution[0])
part2 = np.abs(solution[0] + 10)
return 100 * np.sqrt(part1) + 0.01 * part2
class CrossInTray(ContinuousProblem):
"""Cross-in-tray function."""
def __init__(self):
n_dim = 2
min_vals = np.array([-10.0, -10.0], np.float)
max_vals = np.array([10.0, 10.0], np.float)
move_range = ContinuousLogMoveRange(0.01, 1.0)
known_min = -2.06261
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
part1 = np.abs(100 - np.sqrt(np.sum(solution * solution)) / np.pi)
part2 = np.sin(solution[0]) * np.sin(solution[1])
final = np.abs(part2 * np.exp(part1)) + 1.0
return -0.0001 * np.power(final, 0.1)
class DropWave(ContinuousProblem):
"""Drop-Wave function."""
def __init__(self):
n_dim = 2
min_vals = np.array([-5.12, -5.12], np.float)
max_vals = np.array([5.12, 5.12], np.float)
move_range = ContinuousLogMoveRange(0.01, 1.0)
known_min = -1.0
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
sum_sol_sq = np.sum(solution * solution)
part1 = 1.0 + np.cos(12 * np.sqrt(sum_sol_sq))
part2 = 0.5 * sum_sol_sq + 2.0
return -1.0 * (part1 / part2)
class Eggholder(ContinuousProblem):
"""Eggholder function."""
def __init__(self):
n_dim = 2
min_vals = np.array([-512, -512], np.float)
max_vals = np.array([512, 512], np.float)
move_range = ContinuousLogMoveRange(0.01, 10.0)
known_min = -959.6407
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
part1 = np.sin(np.sqrt(np.abs(solution[1] + (solution[0]/2.) + 47)))
part2 = np.sin(np.sqrt(np.abs(solution[0] - (solution[1] + 47))))
return -1.0 * (solution[1] + 47) * part1 - 1.0 * part2
class GramacyLee(ContinuousProblem):
"""Gramacy & Lee function."""
def __init__(self):
n_dim = 1
min_vals = np.array([0.5], np.float)
max_vals = np.array([2.5], np.float)
move_range = ContinuousLogMoveRange(0.01, 1.0)
known_min = None
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
part1 = np.sin(10 * np.pi * solution[0]) / (2 * solution[0])
part2 = np.power(solution[0] - 1.0, 4)
return part1 + part2
class Griewank(ContinuousProblem):
"""Griewank function.
Args:
n_dim (int): Number of dimensions.
"""
def __init__(self, n_dim):
min_vals = np.array([-600] * n_dim, np.float)
max_vals = np.array([600] * n_dim, np.float)
move_range = ContinuousLogMoveRange(0.01, 10.0)
known_min = 0.0
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
part1 = np.sum((solution * solution) / 4000.0)
sqrt = np.array([np.sqrt(i) for i in range(len(solution))], np.float)
part2 = np.prod(np.cos(solution / sqrt))
return part1 - 1.0 * part2 + 1.0
class HolderTable(ContinuousProblem):
"""Holder Table function."""
def __init__(self):
n_dim = 2
min_vals = np.array([-10.0, -10.0], np.float)
max_vals = np.array([10.0, 10.0], np.float)
move_range = ContinuousLogMoveRange(0.01, 1.0)
known_min = -19.2085
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
sum_sqrt_sq = np.sqrt(np.sum(solution * solution))
part1 = np.exp(np.abs(1.0 - (sum_sqrt_sq / np.pi)))
part2 = np.sin(solution[0]) * np.cos(solution[1])
return -1.0 * np.abs(part2 * part1)
class Langermann(ContinuousProblem):
"""Langermann function."""
def __init__(self):
n_dim = 2
min_vals = np.array([0.0] * n_dim, np.float)
max_vals = np.array([10.0] * n_dim, np.float)
move_range = ContinuousLogMoveRange(0.01, 1.0)
known_min = 0.0
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
A = np.array([[3, 5],
[5, 2],
[2, 1],
[1, 4],
[7, 9]], np.float)
c = np.array([1, 2, 5, 2, 3], np.float)
part_sum = np.sum((solution - A) * (solution - A), axis=1)
part1 = np.cos(np.pi * part_sum)
part2 = np.exp((-1.0 / np.pi) * part_sum)
return np.sum(c * part1 * part2)
class Levy(ContinuousProblem):
"""Levy function.
Args:
n_dim (int): Number of dimensions.
"""
def __init__(self, n_dim):
min_vals = np.array([-10] * n_dim, np.float)
max_vals = np.array([10] * n_dim, np.float)
move_range = ContinuousLogMoveRange(0.01, 1.0)
known_min = 0.0
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
w = 1.0 + (solution - 1.0) / 4.0
part_w = w[:-1]
part1 = np.power(np.sin(np.pi * w[0]), 2)
part2 = np.sum((part_w - 1) * (part_w - 1) *
(1 + 10 * np.power(np.sin(np.pi * part_w + 1), 2)))
part3 = ((w[-1] - 1) * (w[-1] - 1) * (1 + np.power(np.sin(2 * np.pi *
w[-1]), 2)))
return part1 + part2 + part3
class Levy13(ContinuousProblem):
"""Levy function N.13."""
def __init__(self):
n_dim = 2
min_vals = np.array([-10.0] * n_dim, np.float)
max_vals = np.array([10.0] * n_dim, np.float)
move_range = ContinuousLogMoveRange(0.01, 1.0)
known_min = 0.0
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
arg1, arg2 = solution
part1 = np.power(np.sin(3 * np.pi * arg1), 2)
part2 = (arg1 - 1) * (arg1 - 1) * (1 + np.power(np.sin(3 * np.pi * arg2), 2))
part3 = (arg2 - 1) * (arg2 - 1) * (1 + np.power(np.sin(2 * np.pi * arg2), 2))
return part1 + part2 + part3
class Rastrigin(ContinuousProblem):
"""Rastrigin function.
Args:
n_dim (int): Number of dimensions.
"""
def __init__(self, n_dim):
min_vals = np.array([-5.12] * n_dim, np.float)
max_vals = np.array([5.12] * n_dim, np.float)
move_range = ContinuousLogMoveRange(0.01, 1.0)
known_min = 0.0
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
A = 10
n = len(solution)
part1 = A * np.cos(2 * np.pi * solution)
part2 = solution * solution
return A*n + np.sum(part2 - part1)
class Schaffer2(ContinuousProblem):
"""Schaffer function N.2."""
def __init__(self):
n_dim = 2
min_vals = np.array([-100.0] * n_dim, np.float)
max_vals = np.array([100.0] * n_dim, np.float)
move_range = ContinuousLogMoveRange(0.01, 10.0)
known_min = 0.0
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
x1 = solution[0]
x2 = solution[1]
part1 = np.power(np.sin((x1 * x1) - (x2 * x2)), 2)
part2 = np.power(1.0 + 0.001 * ((x1 * x1) + (x2 * x2)), 2)
return 0.5 + (part1 - 0.5) / part2
class Schaffer4(ContinuousProblem):
"""Schaffer function N.4."""
def __init__(self):
n_dim = 2
min_vals = np.array([-100.0] * n_dim, np.float)
max_vals = np.array([100.0] * n_dim, np.float)
move_range = ContinuousLogMoveRange(0.01, 10.0)
known_min = 0.0
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
x1 = solution[0]
x2 = solution[1]
part1 = np.cos(np.sin(np.abs((x1 * x1) - (x2 * x2))))
part2 = np.power(1.0 + 0.001 * ((x1 * x1) + (x2 * x2)), 2)
return 0.5 + (part1 - 0.5) / part2
class Schwefel(ContinuousProblem):
"""Schwefel function.
Args:
n_dim (int): Number of dimensions.
"""
def __init__(self, n_dim):
min_vals = np.array([-500] * n_dim, np.float)
max_vals = np.array([500] * n_dim, np.float)
move_range = ContinuousLogMoveRange(0.01, 20.0)
known_min = 0.0
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
A_constant = 418.9829
n = len(solution)
sq_sol = np.sqrt(np.abs(solution))
return A_constant * n - 1.0 * np.sum(solution * np.sin(sq_sol))
class Shubert(ContinuousProblem):
"""Shubert function."""
def __init__(self):
n_dim = 2
min_vals = np.array([-5.12] * n_dim, np.float)
max_vals = np.array([5.12] * n_dim, np.float)
move_range = ContinuousLogMoveRange(0.01, 1.0)
known_min = -186.7309
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
A = np.array(range(1, 6), np.float)
x1 = solution[0]
x2 = solution[1]
part1 = A + np.cos((A + 1.) * x1 + A)
part2 = A + np.cos((A + 1.) * x2 + A)
return np.sum(part1) * np.sum(part2)
# --------------------------------------------------------------------------- #
# Functions Bowl-Shaped #
# --------------------------------------------------------------------------- #
class Bohachevsky(ContinuousProblem):
"""Bohachevsky functions.
Args:
num_func (int): 1, 2 or 3. Define which Bohachevsky function is used.
Default is 1.
"""
def __init__(self, num_func=1):
if num_func not in [1, 2, 3]:
raise ValueError("The Bohachevsky can only be of "
"numbers 1, 2 or 3.")
self.num_func = num_func
n_dim = 2
min_vals = np.array([-100] * n_dim, np.float)
max_vals = np.array([100] * n_dim, np.float)
move_range = ContinuousLogMoveRange(0.01, 10.0)
known_min = 0.0
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
if self.num_func == 1:
return self._eval_func_1(solution)
elif self.num_func == 2:
return self._eval_func_2(solution)
elif self.num_func == 3:
return self._eval_func_3(solution)
def _eval_func_1(self, solution):
x1 = solution[0]
x2 = solution[1]
part1 = (x1 * x1) + 2 * (x2 * x2)
part2 = 0.3 * np.cos(3 * np.pi * x1)
part3 = 0.4 * np.cos(4 * np.pi * x2)
return part1 - part2 - part3 + 0.7
def _eval_func_2(self, solution):
x1 = solution[0]
x2 = solution[1]
part1 = (x1 * x1) + 2 * (x2 * x2)
part2 = 0.3 * np.cos(3 * np.pi * x1)
part3 = np.cos(4 * np.pi * x2)
return part1 - (part2 * part3) + 0.3
def _eval_func_3(self, solution):
x1 = solution[0]
x2 = solution[1]
part1 = (x1 * x1) + 2 * (x2 * x2)
part2 = 3 * np.pi * x1
part3 = 4 * np.pi * x2
return part1 - 0.3 * np.cos(part2 + part3) + 0.3
class Perm0(ContinuousProblem):
"""Perm 0,d,B function.
Args:
n_dim (int): Number of dimensions.
beta (float): Argument of the function.
"""
def __init__(self, n_dim, beta):
self.beta = beta
min_vals = np.array([-1 * n_dim] * n_dim, np.float)
max_vals = np.array([n_dim] * n_dim, np.float)
move_range = ContinuousLogMoveRange(0.01, n_dim/10.)
known_min = 0.0
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
n = len(solution)
j = np.array(range(1, n+1), np.float)
s_mat = np.zeros((n, n), np.float)
j_mat = np.zeros((n, n), np.float)
for i in range(n):
s_mat[i] += np.power(solution, i+1)
j_mat[i] += 1 / np.power(j, i+1)
part1 = np.sum(j + self.beta) * np.sum(s_mat - 1.0 * j_mat, axis=1)
return np.sum(np.power(part1, 2))
class RotatedHyperEllipsoid(ContinuousProblem):
"""Rotated Hyper-Ellipsoid function.
Args:
n_dim (int): Number of dimensions.
"""
def __init__(self, n_dim):
min_vals = np.array([-65.536] * n_dim, np.float)
max_vals = np.array([65.536] * n_dim, np.float)
move_range = ContinuousLogMoveRange(0.01, 10.)
known_min = 0.0
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
n = len(solution)
s_mat = np.zeros((n, n), np.float)
solsq = solution * solution
prod = solsq.copy()
for i in range(n):
l = np.array(prod[:i+1].copy())
l.resize((n,))
s_mat[i] += l
return np.sum(s_mat)
class Sphere(ContinuousProblem):
"""Sphere function.
Args:
n_dim (int): Number of dimensions.
"""
def __init__(self, n_dim):
min_vals = np.array([-5.12] * n_dim, np.float)
max_vals = np.array([5.12] * n_dim, np.float)
move_range = ContinuousLogMoveRange(0.01, 1.)
known_min = 0.0
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
return np.sum(solution * solution)
class SumDiffPower(ContinuousProblem):
"""Sum of Different Powers function.
Args:
n_dim (int): Number of dimensions.
"""
def __init__(self, n_dim):
min_vals = np.array([-1] * n_dim, np.float)
max_vals = np.array([1] * n_dim, np.float)
move_range = ContinuousLogMoveRange(0.001, 0.1)
known_min = 0.0
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
n = len(solution)
powers = np.array(range(2, n+2), np.float)
return np.sum(np.power(np.abs(solution), powers))
class SumSquare(ContinuousProblem):
"""Sum Squares function.
Args:
n_dim (int): Number of dimensions.
"""
def __init__(self, n_dim):
min_vals = np.array([-10] * n_dim, np.float)
max_vals = np.array([10] * n_dim, np.float)
move_range = ContinuousLogMoveRange(0.01, 1.)
known_min = 0.0
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
n = len(solution)
i = np.array(range(1, n+1), np.float)
return np.sum(i * solution * solution)
class Trid(ContinuousProblem):
"""Trid function.
Global minimum are knowm for dimensions 6 and 10.
Args:
n_dim (int): Number of dimensions.
"""
def __init__(self, n_dim):
dimsq = n_dim * n_dim
min_vals = np.array([dimsq] * n_dim, np.float)
max_vals = np.array([dimsq] * n_dim, np.float)
move_range = ContinuousLogMoveRange(0.01, dimsq/10.)
if n_dim == 6:
known_min = -50.
elif n_dim == 10:
known_min = -200.
else:
known_min = None
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
part1 = np.sum(np.power(solution - 1, 2))
part2 = np.sum(solution[1:] * solution[:-1])
return part1 - part2
# --------------------------------------------------------------------------- #
# Functions Plate-Shaped #
# --------------------------------------------------------------------------- #
class Booth(ContinuousProblem):
"""Booth function."""
def __init__(self):
n_dim = 2
min_vals = np.array([-10] * n_dim, np.float)
max_vals = np.array([10] * n_dim, np.float)
move_range = ContinuousLogMoveRange(0.01, 1.)
known_min = 0.0
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
x1 = solution[0]
x2 = solution[1]
part1 = np.power(x1 + 2 * x2 - 7.0, 2)
part2 = | np.power(2 * x1 + x2 - 5.0, 2) | numpy.power |
#!/usr/bin/env python
import rospy
from geometry_msgs.msg import Vector3
from sensor_msgs.msg import Imu
import matplotlib.pyplot as plt
import numpy as np
angle = Vector3
Initialized = False
init_angle = Vector3
# "p" is eular angle array
def rotM(angle):
px = angle.x - init_angle.x
py = angle.y - init_angle.y
pz = angle.z - init_angle.z
Rx = np.array([[1, 0, 0],
[0, np.cos(px), np.sin(px)],
[0, -np.sin(px), np.cos(px)]])
Ry = np.array([[np.cos(py), 0, -np.sin(py)],
[0, 1, 0],
[np.sin(py), 0, | np.cos(py) | numpy.cos |
from abc import ABC, abstractmethod
from typing import List
import numpy as np
from scipy.stats import t, spearmanr
from scipy.special import erfinv
from chemprop.uncertainty.uncertainty_calibrator import UncertaintyCalibrator
from chemprop.train import evaluate_predictions
class UncertaintyEvaluator(ABC):
"""
A class for evaluating the effectiveness of uncertainty estimates with metrics.
"""
def __init__(
self,
evaluation_method: str,
calibration_method: str,
uncertainty_method: str,
dataset_type: str,
loss_function: str,
calibrator: UncertaintyCalibrator,
):
self.evaluation_method = evaluation_method
self.calibration_method = calibration_method
self.uncertainty_method = uncertainty_method
self.dataset_type = dataset_type
self.loss_function = loss_function
self.calibrator = calibrator
self.raise_argument_errors()
def raise_argument_errors(self):
"""
Raise errors for incompatibilities between dataset type and uncertainty method, or similar.
"""
if self.dataset_type == "spectra":
raise NotImplementedError(
"No uncertainty evaluators implemented for spectra dataset type."
)
if self.uncertainty_method in ['ensemble', 'dropout'] and self.dataset_type in ['classification', 'multiclass']:
raise NotImplementedError(
'Though ensemble and dropout uncertainty methods are available for classification \
multiclass dataset types, their outputs are not confidences and are not \
compatible with any implemented evaluation methods for classification.'
)
@abstractmethod
def evaluate(
self,
targets: List[List[float]],
preds: List[List[float]],
uncertainties: List[List[float]],
) -> List[float]:
"""
Evaluate the performance of uncertainty predictions against the model target values.
:param targets: The target values for prediction.
:param preds: The prediction values of a model on the test set.
:param uncertainties: The estimated uncertainty values, either calibrated or uncalibrated, of a model on the test set.
:return: A list of metric values for each model task.
"""
class MetricEvaluator(UncertaintyEvaluator):
"""
A class for evaluating confidence estimates of classification and multiclass datasets using builtin evaluation metrics.
"""
def evaluate(
self,
targets: List[List[float]],
preds: List[List[float]],
uncertainties: List[List[float]],
):
return evaluate_predictions(
preds=uncertainties,
targets=targets,
num_tasks=np.array(targets).shape[1],
metrics=[self.evaluation_method],
dataset_type=self.dataset_type,
)[self.evaluation_method]
class NLLRegressionEvaluator(UncertaintyEvaluator):
"""
A class for evaluating regression uncertainty values using the mean negative-log-likelihood
of the actual targets given the probability distributions estimated by the model.
"""
def raise_argument_errors(self):
super().raise_argument_errors()
if self.dataset_type != "regression":
raise ValueError(
"NLL Regression Evaluator is only for regression dataset types."
)
def evaluate(
self,
targets: List[List[float]],
preds: List[List[float]],
uncertainties: List[List[float]],
):
if self.calibrator is None: # uncalibrated regression uncertainties are variances
uncertainties = np.array(uncertainties)
preds = np.array(preds)
targets = np.array(targets)
nll = np.log(2 * np.pi * uncertainties) / 2 \
+ (preds - targets) ** 2 / (2 * uncertainties)
return np.mean(nll, axis=0).tolist()
else:
nll = self.calibrator.nll(
preds=preds, unc=uncertainties, targets=targets
) # shape(data, task)
return np.mean(nll, axis=0).tolist()
class NLLClassEvaluator(UncertaintyEvaluator):
"""
A class for evaluating classification uncertainty values using the mean negative-log-likelihood
of the actual targets given the probabilities assigned to them by the model.
"""
def raise_argument_errors(self):
super().raise_argument_errors()
if self.dataset_type != "classification":
raise ValueError(
"NLL Classification Evaluator is only for classification dataset types."
)
def evaluate(
self,
targets: List[List[float]],
preds: List[List[float]],
uncertainties: List[List[float]],
):
targets = np.array(targets)
uncertainties = np.array(uncertainties)
likelihood = uncertainties * targets + (1 - uncertainties) * (1 - targets)
nll = -1 * np.log(likelihood)
return np.mean(nll, axis=0).tolist()
class NLLMultiEvaluator(UncertaintyEvaluator):
"""
A class for evaluating multiclass uncertainty values using the mean negative-log-likelihood
of the actual targets given the probabilities assigned to them by the model.
"""
def raise_argument_errors(self):
super().raise_argument_errors()
if self.dataset_type != "multiclass":
raise ValueError(
"NLL Multiclass Evaluator is only for multiclass dataset types."
)
def evaluate(
self,
targets: List[List[float]],
preds: List[List[float]],
uncertainties: List[List[float]],
):
targets = np.array(targets, dtype=int) # shape(data, tasks)
uncertainties = np.array(uncertainties)
preds = np.array(preds)
nll = np.zeros_like(targets)
for i in range(targets.shape[1]):
task_preds = uncertainties[:, i]
task_targets = targets[:, i] # shape(data)
bin_targets = np.zeros_like(preds[:, 0, :]) # shape(data, classes)
bin_targets[np.arange(targets.shape[0]), task_targets] = 1
task_likelihood = np.sum(bin_targets * task_preds, axis=1)
task_nll = -1 * np.log(task_likelihood)
nll[:, i] = task_nll
return np.mean(nll, axis=0).tolist()
class CalibrationAreaEvaluator(UncertaintyEvaluator):
"""
A class for evaluating regression uncertainty values based on how they deviate from perfect
calibration on an observed-probability versus expected-probability plot.
"""
def raise_argument_errors(self):
super().raise_argument_errors()
if self.dataset_type != "regression":
raise NotImplementedError(
f"Miscalibration area is only implemented for regression dataset types."
)
def evaluate(
self,
targets: List[List[float]],
preds: List[List[float]],
uncertainties: List[List[float]],
):
targets = np.array(targets) # shape(data, tasks)
uncertainties = np.array(uncertainties)
preds = np.array(preds)
abs_error = np.abs(preds - targets) # shape(data, tasks)
fractions = np.zeros([preds.shape[1], 101]) # shape(tasks, 101)
fractions[:, 100] = 1
if self.calibrator is not None:
# using 101 bin edges, hardcoded
original_metric = self.calibrator.regression_calibrator_metric
original_scaling = self.calibrator.scaling
original_interval = self.calibrator.interval_percentile
for i in range(1, 100):
self.calibrator.regression_calibrator_metric = "interval"
self.calibrator.interval_percentile = i
self.calibrator.calibrate()
bin_scaling = self.calibrator.scaling
bin_unc = (
uncertainties
/ np.expand_dims(original_scaling, axis=0)
* np.expand_dims(bin_scaling, axis=0)
) # shape(data, tasks)
bin_fraction = np.mean(bin_unc >= abs_error, axis=0)
fractions[:, i] = bin_fraction
self.calibrator.regression_calibrator_metric = original_metric
self.calibrator.scaling = original_scaling
self.calibrator.interval_percentile = original_interval
else: # uncertainties are uncalibrated variances
std = np.sqrt(uncertainties)
for i in range(1, 100):
bin_scaling = erfinv(i / 100) * np.sqrt(2)
bin_unc = std * bin_scaling
bin_fraction = np.mean(bin_unc >= abs_error, axis=0)
fractions[:, i] = bin_fraction
# trapezoid rule
auce = np.sum(
0.01 * np.abs(fractions - np.expand_dims(np.arange(101) / 100, axis=0)),
axis=1,
)
return auce.tolist()
class ExpectedNormalizedErrorEvaluator(UncertaintyEvaluator):
"""
A class that evaluates uncertainty performance by binning together clusters of predictions
and comparing the average predicted variance of the clusters against the RMSE of the cluster.
Method discussed in https://doi.org/10.1021/acs.jcim.9b00975.
"""
def raise_argument_errors(self):
super().raise_argument_errors()
if self.dataset_type != "regression":
raise ValueError(
f"Expected normalized error is only appropriate for regression dataset types."
)
def evaluate(
self,
targets: List[List[float]],
preds: List[List[float]],
uncertainties: List[List[float]],
):
targets = np.array(targets) # shape(data, tasks)
uncertainties = np.array(uncertainties)
preds = np.array(preds)
abs_error = np.abs(preds - targets) # shape(data, tasks)
sort_record = np.rec.fromarrays([uncertainties, abs_error], names="i, j")
sort_record.sort(axis=0)
uncertainties = sort_record["i"]
abs_error = sort_record["j"]
# get stdev scaling
if self.calibrator is not None:
original_metric = self.calibrator.regression_calibrator_metric
original_scaling = self.calibrator.scaling
# 100 bins
split_unc = np.array_split(
uncertainties, 100, axis=0
) # shape(list100, data, tasks)
split_error = np.array_split(abs_error, 100, axis=0)
mean_vars = np.zeros([preds.shape[1], 100]) # shape(tasks, 100)
rmses = np.zeros_like(mean_vars)
for i in range(100):
if self.calibrator is None: # starts as a variance
mean_vars[:, i] = np.mean(split_unc[i], axis=0)
rmses[:, i] = np.sqrt(np.mean(np.square(split_error[i]), axis=0))
elif self.calibration_method == "tscaling": # convert back to sample stdev
bin_unc = split_unc[i] / np.expand_dims(original_scaling, axis=0)
bin_var = t.var(df=self.calibrator.num_models - 1, scale=bin_unc)
mean_vars[:, i] = np.mean(bin_var, axis=0)
rmses[:, i] = np.sqrt(np.mean( | np.square(split_error[i]) | numpy.square |
import numpy as np
import seaborn as sns
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rcParams["figure.dpi"] = 125
mpl.rcParams["text.usetex"] = True
mpl.rc("font", **{"family": "sans-serif"})
params = {"text.latex.preamble": r"\usepackage{amsmath}"}
plt.rcParams.update(params)
sns.set_theme()
# Q5
# Inverse Transform Sampling
pdf = np.vectorize(lambda x: (2 * x + 3) / 40)
inv_cdf = | np.vectorize(lambda u: (40 * u + 9 / 4) ** 0.5 - 3 / 2) | numpy.vectorize |
import os
import cv2
import glob
import pickle
import numpy as np
# Size of the chessboard used for calibration
CHESSBOARD_SIZE = (6, 9)
# Calculate curvature for each horizontal and vertical curves from the chessboard
# Return: curvature integral
def chessboard_measure(corners):
cornersMat = np.reshape(corners, (-1, CHESSBOARD_SIZE[0], 2))
curvature_integral = 0
# Vertical curves
for k in range(CHESSBOARD_SIZE[1]):
dx_dt = np.gradient(cornersMat[k, :, 0])
dy_dt = np.gradient(cornersMat[k, :, 1])
d2x_dt2 = np.gradient(dx_dt)
d2y_dt2 = np.gradient(dy_dt)
curvature = np.abs(d2x_dt2 * dy_dt - dx_dt * d2y_dt2) / (dx_dt * dx_dt + dy_dt * dy_dt) ** 1.5
curvature_integral += np.sum(curvature)
# Horizontal curves
for k in range(CHESSBOARD_SIZE[0]):
dx_dt = np.gradient(cornersMat[:, k, 0])
dy_dt = np.gradient(cornersMat[:, k, 1])
d2x_dt2 = np.gradient(dx_dt)
d2y_dt2 = np.gradient(dy_dt)
curvature = np.abs(d2x_dt2 * dy_dt - dx_dt * d2y_dt2) / (dx_dt * dx_dt + dy_dt * dy_dt) ** 1.5
curvature_integral += np.sum(curvature)
return curvature_integral
# Chessboard detection on the image loaded from the given file
# If saveDetection==True an image with the result of the detection is saved to the disk
# Return: success status (True or False), corners detected in image coordinates, size of the image
def detect_chessboard(img_path, save_detection):
img = cv2.imread(img_path)
ret, corners, img_shape = detect_chessboard_img(img, save_detection)
if ret == True:
if save_detection:
chess_path = img_path.replace('.png', '_chess.jpg')
cv2.imwrite(chess_path, img)
return ret, corners, img_shape
# Chessboard detection on input opencv image
# If showChessboard==True(default) we draw corners and lines on input image
# Return: success status (True or False), corners detected in image coordinates, size of the image
def detect_chessboard_img(img, showChessboard=True):
chessboard_flags = cv2.CALIB_CB_ADAPTIVE_THRESH + cv2.CALIB_CB_FAST_CHECK + cv2.CALIB_CB_NORMALIZE_IMAGE
subpix_criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.01)
img_shape = img.shape[:2]
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(gray, CHESSBOARD_SIZE, chessboard_flags)
if ret == True:
# Refining corners position with sub-pixels based algorithm
cv2.cornerSubPix(gray, corners, (3, 3), (-1, -1), subpix_criteria)
if showChessboard == True:
cv2.drawChessboardCorners(img, CHESSBOARD_SIZE, corners, ret)
else:
print('Chessboard not detected in image ')
return ret, corners, img_shape
# Show deformation measure on source image and undistorted image
# The less the better (no deformation means perfect straight lines which means 0 curvature)
def deformation_measure(src_image, undistorted_img):
ret, corners1, _ = detect_chessboard_img(src_image)
if ret == False:
return
ret, corners2, _ = detect_chessboard_img(undistorted_img)
if ret == False:
return
m1 = chessboard_measure(corners1)
m2 = chessboard_measure(corners2)
r = (1-m2/m1)*100
print('Deformation measure on source image: ' + str(m1))
print('Deformation measure on undistorted image: ' + str(m2))
print('Correction rate in percent: ' + str(r))
def evaluate(img_path, calibration_path):
src_image = cv2.imread(img_path)
k, d, dims = load_calibration(calibration_path)
undistorted_img = undistort(src_image, k, d, dims)
deformation_measure(src_image, undistorted_img)
# Launch calibration process from all png images in the given folder
# Return: calibration parameters K, D and image dimensions
def calibrate(img_folder, save_detection=False):
# Calibration paramenters
# NB: when CALIB_CHECK_COND is set, the algorithm checks if the detected corners of each images are valid.
# If not, an exception is thrown which indicates the zero-based index of the invalid image.
# Such image should be replaced or removed from the calibration dataset to ensure a good calibration.
# calibration_flags = cv2.fisheye.CALIB_RECOMPUTE_EXTRINSIC + cv2.fisheye.CALIB_CHECK_COND + cv2.fisheye.CALIB_FIX_SKEW
calibration_flags = cv2.fisheye.CALIB_RECOMPUTE_EXTRINSIC + cv2.fisheye.CALIB_FIX_SKEW
term_criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 1e-6)
# Logical coordinates of chessboard corners
obj_p = np.zeros((1, CHESSBOARD_SIZE[0]*CHESSBOARD_SIZE[1], 3), np.float32)
obj_p[0, :, :2] = np.mgrid[0:CHESSBOARD_SIZE[0], 0:CHESSBOARD_SIZE[1]].T.reshape(-1, 2)
img_ref_shape = None
obj_points = [] # 3d point in real world space
img_points = [] # 2d points in image plane.
# Iterate through all images in the folder
images = glob.glob(img_folder + '/*.jpg')
all_cnt, valid_cnt, not_valid_cnt = 0,0,0
not_valid_img_names = []
for filename in images:
# Chessboard detection
ret, corners, img_shape = detect_chessboard(filename, save_detection)
all_cnt += 1
if img_ref_shape == None:
img_ref_shape = img_shape
else:
assert img_ref_shape == img_shape, "All images must share the same size."
# If found, add object points, image points (after refining them)
if ret == True:
valid_cnt += 1
obj_points.append(obj_p)
img_points.append(corners)
print('Image ' + filename + ' is valid for calibration')
else:
not_valid_cnt += 1
not_valid_img_names.append(filename)
# print('Image ' + filename + ' is not valid for calibration')
print(f'共检测到图片{all_cnt}张,有效{valid_cnt}张,无效{not_valid_cnt}张')
print('下面为无效的标定图片:')
for tmp in not_valid_img_names:
print(tmp)
k = np.zeros((3, 3))
d = np.zeros((4, 1))
dims = img_shape[::-1]
valid_img_count = len(obj_points)
if valid_img_count > 0:
rvecs = [np.zeros((1, 1, 3), dtype=np.float64) for i in range(valid_img_count)]
tvecs = [np.zeros((1, 1, 3), dtype=np.float64) for i in range(valid_img_count)]
print('Beginning calibration process...')
rms, _, _, _, _ = cv2.fisheye.calibrate(
obj_points,
img_points,
img_shape,
k,
d,
rvecs,
tvecs,
calibration_flags,
term_criteria
)
print("Calibration done!")
print("Found " + str(valid_img_count) + " valid images for calibration")
return k, d, dims
# Save calibration to file -> use of pickle serialization
def save_calibration(path, dims, k, d ):
with open(path, 'wb') as f:
pickle.dump(dims, f)
pickle.dump(k, f)
pickle.dump(d, f)
# Load calibration from file -> use of pickle serialization
def load_calibration(path):
k = np.zeros((3, 3))
d = np.zeros((4, 1))
dims = np.zeros(2)
with open(path, 'rb') as f:
k = pickle.load(f)
d = pickle.load(f)
dims = pickle.load(f)
return k, d, dims
# Undistort FishEye image with given calibration parameters
def undistort(src_image, k, d, dims):
dim1 = src_image.shape[:2][::-1] # dim1 is the dimension of input image to un-distort
assert dim1[0] / dim1[1] == dims[0] / dims[1], "Image to undistort needs to have same aspect ratio as the ones used in calibration"
map1, map2 = cv2.fisheye.initUndistortRectifyMap(k, d, | np.eye(3) | numpy.eye |
import boto3
import json
import io
import tflite_runtime.interpreter as tflite
import numpy as np
from PIL import Image
interpreter = None
def handlerMapper(event,context):
if (np.random.rand(1)>0.5):
event['model_type'] = 'NewModel'
else:
event['model_type'] = 'OldModel'
event['image'] = 'images/flower.jpg'
return event
def loadImage(s3_bucket, s3_key):
s3 = boto3.client('s3')
result = s3.get_object(Bucket=s3_bucket, Key=s3_key)
result_image = Image.open(io.BytesIO(result["Body"].read()))
input_data = np.expand_dims(np.array(result_image.resize((299,299)),dtype=np.float32)/255, axis=0)
return input_data
def runInference(interpreter, input_data):
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
interpreter.set_tensor(input_details[0]['index'], input_data)
interpreter.invoke()
output_data = interpreter.get_tensor(output_details[0]['index'])
return output_data
def handlerInferenceNew(event, context):
label_list = ['Cyclamen','Lotus','Passionflower']
global interpreter
if interpreter is None:
interpreter = tflite.Interpreter(model_path="models/converted_model_quantized.tflite")
interpreter.allocate_tensors()
if ('image' in event):
input_data = loadImage('course-pdl-inference', event['image'])
else:
input_details = interpreter.get_input_details()
input_shape = input_details[0]['shape']
input_data = np.array(np.random.random_sample(input_shape), dtype=np.float32)
output_data = runInference(interpreter, input_data)
return {'feature_vector':output_data.tolist(), 'prediction':label_list[np.argmax(output_data)], 'model_type':'NewModel'}
def handlerInferenceOld(event, context):
label_list = ['Cyclamen','Lotus','Passionflower']
global interpreter
if interpreter is None:
interpreter = tflite.Interpreter(model_path="models/converted_model.tflite")
interpreter.allocate_tensors()
if ('image' in event):
input_data = loadImage('course-pdl-inference', event['image'])
else:
input_details = interpreter.get_input_details()
input_shape = input_details[0]['shape']
input_data = np.array(np.random.random_sample(input_shape), dtype=np.float32)
output_data = runInference(interpreter, input_data)
return {'feature_vector':output_data.tolist(), 'prediction':label_list[ | np.argmax(output_data) | numpy.argmax |
from pathlib import Path
from numpy import ( sin, cos, exp, pi, tan, log, sinh, cosh, tanh, sinc,
sqrt, cbrt, angle, real, imag, abs,
arcsin, arccos, arctan, arcsinh, arccosh, arctanh)
from numpy import pi, e
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import animation
import scipy.linalg
import scipy as sp
import scipy.sparse
import scipy.sparse.linalg
from numba import njit
from schrodinger import util
import sys
from time import time
""" Original french comments from
https://github.com/Azercoco/Python-2D-Simulation-of-Schrodinger-Equation
Le programme simule le comportement d'un paquet d'onde gaussien suivant
l'équation de Schrödinger. L'algorithme utilisé est la méthode
Alternating direction implicit method.
La simulation permet de configurer un potentiel constant avec le temps
ainsi que la présence d'obstacles (qui sont gérés comme des barrières
de potentiel très élévées).
La fonction d'onde complexe est affichée en convertissant les nombres
complexes en format de couleur HSV.
x , y : Les positions de départ du paquet d'onde
Kx, Ky : Ses nombres d'onde
Ax, Ay : Ses facteurs d'étalements selon x et y
V : L'expression du potentiel
O : L'expression de la présence d'obstacles
Le potentiel et la présence d'obstacle doivent être exprimés comme des
expressions Python valides dépendant de x et y (valant respectivement
un float et un boolean) car le progamme utilise la fonction Python
eval() pour les évaluer.
"""
""" Translated by Google Translate
https://github.com/Azercoco/Python-2D-Simulation-of-Schrodinger-Equation
The program simulates the behavior of a Gaussian wave packet following the
Schrödinger's equation. The algorithm used is the method
Alternating direction implicit method.
The simulation makes it possible to configure a constant potential over time
as well as the presence of obstacles (which are managed as barriers
very high potential).
Complex wave function is displayed by converting numbers
complex in HSV color format.
x, y: The starting positions of the wave packet
Kx, Ky: The numbers of the wave
Ax, Ay: Its spreading factors along x and y
V: The expression of potential
O: The expression of the presence of obstacles
The potential and the presence of obstacles must be expressed as
valid Python expressions depending on x and y (respectively
a float and a boolean) because the program uses the Python function
eval () to evaluate them.
"""
class Field:
def __init__(self):
self.potential_expr = None
self.obstacle_expr = None
def setPotential(self, expr):
self.potential_expr = expr
self.test_pot_expr()
def setObstacle(self, expr):
self.obstacle_expr = expr
self.test_obs_expr()
def test_pot_expr(self):
# required for eval()
x = 0
y = 0
try:
a = eval(self.potential_expr)
except:
print(self.potential_expr)
print('Potential calculation error: set to 0 by default')
self.potential_expr = '0'
def test_obs_expr(self):
# required for eval()
x = 0
y = 0
try:
a = eval(self.obstacle_expr)
except:
print('Error setting obstacle: Set to False by default')
self.obstacle_expr = 'False'
def isObstacle(self, x, y):
a = False
try:
a = eval(self.obstacle_expr)
except:
print(f'Invalid obstacle: {self.obstacle_expr}')
return a
def getPotential(self, x, y):
a = 0 + 0j
try:
a = eval(self.potential_expr)
except:
print(f'Invalid potential: {self.potential_expr}')
return a
def solve(wf, V_x, V_y, HX, HY, N, step, delta_t):
vector_wrt_x = util.x_concatenate(wf, N)
vector_derive_y_wrt_x = util.x_concatenate(util.dy_square(wf, N, step), N)
U_wrt_x = vector_wrt_x + (1j*delta_t/2 )*(vector_derive_y_wrt_x - V_x*vector_wrt_x)
U_wrt_x_plus = scipy.sparse.linalg.spsolve(HX, U_wrt_x)
wf = util.x_deconcatenate(U_wrt_x_plus, N)
vector_wrt_y = util.y_concatenate(wf, N)
vector_derive_x_wrt_y = util.y_concatenate(util.dx_square(wf, N, step), N)
U_wrt_y = vector_wrt_y + (1j*delta_t/2 )*(vector_derive_x_wrt_y - V_y *vector_wrt_y)
U_wrt_y_plus = scipy.sparse.linalg.spsolve(HY, U_wrt_y)
wf = util.y_deconcatenate(U_wrt_y_plus, N)
return wf
class Simulate:
SIZE = 10 # simulation self.size
# wavefunction collision
FPS = 60
DURATION = 5 # duration in seconds
DELTA_T = 0.005 # 0.125 #time elapsed per second of video
# wavefunction collapse
FPS = 60
DURATION = 5 # duration in seconds
DELTA_T = 0.01 # 0.125 #time elapsed per second of video
# wavefunction collapse 2 & 3
FPS = 60
DURATION = 5 # duration in seconds
DELTA_T = 0.03 # 0.125 #time elapsed per second of video
# wavefunction collapse 4
FPS = 60
DURATION = 5 # duration in seconds
DELTA_T = 0.005 # 0.125 #time elapsed per second of video
# entanglement1
FPS = 60
DURATION = 5 # duration in seconds
DELTA_T = 0.02 # 0.125 #time elapsed per second of video
# wavefunction movement
FPS = 60
DURATION = 5 # duration in seconds
DELTA_T = 0.005 # 0.125 #time elapsed per second of video
def __init__(self, N, collapse=False):
self.N = N # dimension in number of points of the simulation
self.FRAMES = self.DURATION * self.FPS
self.field = Field()
#Potential as a function of x and y
self.field.setPotential("0") # Ex: x**2+y**2"
#Obstacle: boolean expression in fct of x and y
# (set to False if you do not want an obstacle)
obstacles = ("(x > 0.5 and x < 1 and not "
"((y > 0.25 and y < 0.75) or "
"(y < -0.25 and y > -0.75)))")
obstacles = "False"
self.collapse = collapse
self.field.setObstacle(obstacles)
self.size = self.SIZE
#self.dataset = np.zeros((self.FRAMES,self.N,self.N), dtype='c16')
print(16*self.N*self.N*1e-9, 'GB of memory')
#if self.dataset.nbytes > 100e9:
# raise(Exception("TOO MUCH DATA FOR MEMORY"))
self.simulation_initialize()
""" ------ INITIAL CONDITIONS FOR WAVEFUNCTION COLLISION
x0 = [0, 0],
y0 = [0,1],
#number of waves
k_x = [0, 0],#5000
k_y = [0, 90000],#2500,
#spreading
a_x = [.2, .2], #.2, #.1,#.33,#0.05#.33
a_y = [.2, .2], #.2, #.1,#.33,#0.05#.33
"""
""" ------ INITIAL CONDITIONS FOR WAVEFUNCTION COLLISION 1
x0 = [0,0],
y0 = [0,1.5],
#number of waves
k_x = [10, 0],#5000
k_y = [0, 90000],#2500,
#spreading
a_x = [.15, .15], #.2, #.1,#.33,#0.05#.33
a_y = [.15, .15], #.2, #.1,#.33,#0.05#.33
"""
""" ------ INITIAL CONDITIONS FOR MOVEMENT SHOTS
x0 = [0],
y0 = [0],
#number of waves
k_x = [5000],
k_y = [2500],#2500,
#spreading
a_x = [.2], #.2, #.1,#.33,#0.05#.33
a_y = [.2], #.2, #.1,#.33,#0.05#.33
"""
""" ------ INITIAL CONDITIONS FOR WAVEFUNCTION COLLAPSE
x0 = [0],#0],
y0 = [0],
#number of waves
k_x = [50],
k_y = [25],#2500,
#spreading
a_x = [.25], #.2, #.1,#.33,#0.05#.33
a_y = [.25], #.2, #.1,#.33,#0.05#.33
"""
""" ------ INITIAL CONDITIONS FOR WAVEFUNCTION COLLAPSE 3
x0 = [0],#0],
y0 = [0],
#number of waves
k_x = [50],
k_y = [25],#2500,
#spreading
a_x = [.28], #.2, #.1,#.33,#0.05#.33
a_y = [.28], #.2, #.1,#.33,#0.05#.33
"""
""" ------ INITIAL CONDITIONS FOR ENTANGLEMENT
x0 = [0, 0],
y0 = [1,-1],
#number of waves
k_x = [0, 0],#5000
k_y = [-3000, 3000],#2500,
#spreading
a_x = [.15, .15], #.2, #.1,#.33,#0.05#.33
a_y = [.15, .15], #.2, #.1,#.33,#0.05#.33
"""
def simulation_initialize(self,
#characteristics of the wave packet gaussian 2D
#centre
x0 = [0],
y0 = [0],
#number of waves
k_x = [5000],
k_y = [2500],#2500,
#spreading
a_x = [.2], #.2, #.1,#.33,#0.05#.33
a_y = [.2], #.2, #.1,#.33,#0.05#.33
# keep below the same
wall_potential = 1e10,
):
""" initialize the wave packet """
N = self.N
step = self.SIZE/self.N
delta_t = self.DELTA_T/self.FPS
self.counter = 0
# create points at all xy coordinates in meshgrid
self.x_axis = np.linspace(-self.size/2, self.size/2, N)
self.y_axis = np.linspace(-self.size/2, self.size/2, N)
X, Y = np.meshgrid(self.x_axis, self.y_axis)
n = 0
phase = np.exp( 1j*(X*k_x[n] + Y*k_y[n]))
px = np.exp( - ((x0[n] - X)**2)/(4*a_x[n]**2))
py = np.exp( - ((y0[n] - Y)**2)/(4*a_y[n]**2))
wave_function = phase*px*py
norm = np.sqrt(util.integrate( | np.abs(wave_function) | numpy.abs |
import netCDF4 as nc
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
import numpy as np
obj=nc.Dataset('I:\\Python_related\\850hPa_uv_global_1mon_4daily.nc')
u=obj.variables['u'][0,35:76,70:141]
v=obj.variables['v'][0,35:76,70:141]
lat=obj.variables['latitude'][:];lon=obj.variables['longitude'][:]
m=Basemap(projection='cyl',llcrnrlat=15,urcrnrlat=55,llcrnrlon=70,urcrnrlon=140,resolution='l')
lons,lats=m.makegrid(71,41)
lats=lats[::-1]
x,y=m(lons,lats)
m.drawparallels(np.arange(15.,56.,10.),labels=[1,0,0,0],fontsize=15)
m.drawmeridians( | np.arange(75.,141.,15.) | numpy.arange |
"""This module/class contains functionality for computing (and plotting) radial
velocities and creating reference spectra for extracted fluxes. This should
ideally remain independent of the extraction method, such that it does not
matter which spectrograph took the data, nor what "Spectrograph" object was
used for extraction.
Most of the code below has been moved from the script "test_rhea2_extract.py".
Work still needs to be done post-refactor to ensure function input and outputs
are sensible, their docstrings are informative and they follow the principles of
Object Oriented Programming - such as the Single Responsibility Principle (Along
with a general clean up of the code and comments, such as having the code meet
the python line length guidelines --> the main benefit of which is having
multiple editors open side by side on smaller screens)
TODO
1) Move extract method to either extract module or rhea
2) Try to separate calculation/processing of data from saving/loading/displaying
3) Tidy up inputs to functions (e.g. cull unnecessary input parameters)
4) Make create_ref_spect() output variances (Median Absolute Deviations)
5) Possibly have dark calibration (for both flats and science frames) in its own
method. This would clean up the existing extract method, removing the need
to check whether darks and flats had been passed in (or varying permutations
of each - e.g. in the case where some of the data has already been dark
corrected, such as the solar data)
"""
from __future__ import division, print_function
import numpy as np
import matplotlib.pyplot as plt
import scipy.optimize as op
import scipy.interpolate as interp
from astropy.time import Time
from astropy.coordinates import SkyCoord
from astropy import constants as const
import PyAstronomy.pyasl as pyasl
import opticstools as ot
import pdb
try:
import pyfits
except:
import astropy.io.fits as pyfits
class RadialVelocity():
"""A RadialVelocity object for calculating and plotting RVS and generating
reference spectra.
Unclear if the object needs to be initialised with any parameters at this
stage. Perhaps a file path?
"""
def __init__(self):
"""(Presently empty) constructor.
"""
pass
def rv_shift_resid(self, params, wave, spect, spect_sdev, spline_ref,
return_spect=False):
"""Find the residuals to a fit of a (subsampled)reference spectrum to an
observed spectrum.
The function for parameters p[0] through p[3] is:
.. math::
y(x) = Ref[ wave(x) * (1 - p[0]/c) ] * exp(p[1] * x^2 + p[2] * x + p[3])
Here "Ref" is a function f(wave)
Parameters
----------
params: array-like
wave: float array
Wavelengths for the observed spectrum.
spect: float array
The observed spectra
spect_sdev: float array
standard deviation of the input spectra.
spline_ref: InterpolatedUnivariateSpline instance
For interpolating the reference spectrum
return_spect: boolean
Whether to return the fitted spectrum or the residuals.
wave_ref: float array
The wavelengths of the reference spectrum
ref: float array
The reference spectrum
Returns
-------
resid: float array
The fit residuals
"""
ny = len(spect)
xx = (np.arange(ny)-ny//2)/ny
norm = np.exp(params[1]*xx**2 + params[2]*xx + params[3])
# Lets get this sign correct. A redshift (positive velocity) means that
# a given wavelength for the reference corresponds to a longer
# wavelength for the target, which in turn means that the target
# wavelength has to be interpolated onto shorter wavelengths for the
# reference.
fitted_spect = spline_ref(wave*(1.0 - params[0]/const.c.si.value))*norm
if return_spect:
return fitted_spect
else:
return (fitted_spect - spect)/spect_sdev
def rv_shift_chi2(self, params, wave, spect, spect_sdev, spline_ref):
"""Find the chi-squared for an RV fit. Just a wrapper for rv_shift_resid,
so the docstring is cut and paste!
The function for parameters p[0] through p[3] is:
.. math::
y(x) = Ref[ wave(x) * (1 - p[0]/c) ] * exp(p[1] * x^2 + p[2] * x + p[3])
Here "Ref" is a function f(wave)
Parameters
----------
params:
...
wave: float array
Wavelengths for the observed spectrum.
spect: float array
The observed spectrum
spect_sdev:
...
spline_ref:
...
return_spect: boolean
Whether to return the fitted spectrum or the
wave_ref: float array
The wavelengths of the reference spectrum
ref: float array
The reference spectrum
Returns
-------
chi2:
The fit chi-squared
"""
return np.sum(self.rv_shift_resid(params, wave, spect, spect_sdev, spline_ref)**2)
def rv_shift_jac(self, params, wave, spect, spect_sdev, spline_ref):
r"""Explicit Jacobian function for rv_shift_resid.
This is not a completely analytic solution, but without it there seems to be
numerical instability.
The key equations are:
.. math:: f(x) = R( \lambda(x) (1 - p_0/c) ) \times \exp(p_1 x^2 + p_2 + p_3)
g(x) = (f(x) - d(x))/\sigma(x)
\frac{dg}{dp_0}(x) \approx [f(x + 1 m/s) -f(x) ]/\sigma(x)
\frac{dg}{dp_1}(x) = x^2 f(x) / \sigma(x)
\frac{dg}{dp_2}(x) = x f(x) / \sigma(x)
\frac{dg}{dp_3}(x) = f(x) / \sigma(x)
Parameters
----------
params: float array
wave: float array
Wavelengths for the observed spectrum.
spect: float array
The observed spectrum
spect_sdev:
...
spline_ref:
...
Returns
-------
jac:
The Jacobian.
"""
ny = len(spect)
xx = (np.arange(ny)-ny//2)/ny
norm = np.exp(params[1]*xx**2 + params[2]*xx + params[3])
fitted_spect = spline_ref(wave*(1.0 - params[0]/const.c.si.value))*norm
jac = np.empty( (ny,4) )
#The Jacobian is the derivative of fitted_spect/sdev with respect to
#p[0] through p[3]
jac[:,3] = fitted_spect/spect_sdev
jac[:,2] = fitted_spect*xx/spect_sdev
jac[:,1] = fitted_spect*xx**2/spect_sdev
jac[:,0] = (spline_ref(wave*(1.0 - (params[0] + 1.0)/const.c.si.value))*
norm - fitted_spect)/spect_sdev
return jac
def create_ref_spect(self, wave, fluxes, vars, bcors, rebin_fact=2,
gauss_sdev=1.0, med_cut=0.6,gauss_hw=7,threshold=100):
"""Create a reference spectrum from a series of target spectra.
The process is:
1) Re-grid the spectra into a rebin_fact times smaller wavelength grid.
2) The spectra are barycentrically corrected by linear interpolation. Note
that when used on a small data set, typically the spectra will be shifted by
many km/s. For an RV-stable star, the fitting process then needs to find the
opposite of this barycentric velocity.
3) Remove bad (i.e. low flux) files.
4) Median combine the spectra.
5) Convolve the result by a Gaussian to remove high spatial frequency noise. This
can be important when the reference spectrum is created from only a small
number of input spectra, and high-frequency noise can be effectively fitted to
itself.
Parameters
----------
wave: 2D np.array(float)
Wavelength coordinate map of form (Order, Wavelength/pixel)
fluxes: 3D np.array(float)
Fluxes of form (Observation, Order, Flux/pixel)
vars: 3D np.array(float)
Variance of form (Observation, Order, Variance/pixel)
bcors: 1D np.array(float)
Barycentric correction for each observation.
rebin_fact: int
Factor by which to rebin.
gauss_sdev:
...
med_cut:
...
gauss_hw:
...
Returns
-------
wave_ref: 2D np.array(float)
Wavelength coordinate map of form (Order, Wavelength/pixel*2+2),
where the wavelength scale has been interpolated.
ref_spect: 2D np.array(float)
Reference spectrum of form (Order, Flux/pixel*2+2),
where the flux scale has been interpolated.
"""
nm = fluxes.shape[1]
ny = fluxes.shape[2]
nf = fluxes.shape[0]
C = const.c.si.value
#Create arrays for our outputs.
wave_ref = np.empty( (nm,rebin_fact*ny + 2) )
ref_spect = np.empty( (nm,rebin_fact*ny + 2) )
#First, rebin everything, using opticstools.utils.regrid_fft
new_shape = (fluxes.shape[1],rebin_fact*fluxes.shape[2])
fluxes_rebin = np.empty( (fluxes.shape[0],fluxes.shape[1],
rebin_fact*fluxes.shape[2]) )
for i in range(nf):
fluxes_rebin[i] = ot.utils.regrid_fft(fluxes[i],new_shape)
#Create the final wavelength grid.
for j in range(nm):
wave_ref[j,1:-1] = np.interp(np.arange(rebin_fact*ny)/rebin_fact,
np.arange(ny),wave[j,:])
#Fill in the end wavelengths, including +/-100 km/s from the ends.
wave_ref[j,-2] = wave_ref[j,-3] + (wave_ref[j,-3]-wave_ref[j,-4])
wave_ref[j,0] = wave_ref[j,1] * (C + 1e5)/C
wave_ref[j,-1] = wave_ref[j,-2] * (C - 1e5)/C
#Barycentric correct. For a positive barycentric velocity, the observer is
#moving towards the star, which means that star is blue-shifted and the
#correct rest-frame spectrum is at longer wavelengths. The interpolation
#below shifts the spectrum to the red, as required.
for i in range(nf):
for j in range(nm):
# Awkwardly, we've extended the wavelength scale by 2 elements,
# but haven't yet extended the fluxes...
ww = wave_ref[j,1:-1]
fluxes_rebin[i,j] = np.interp(ww*(1-bcors[i]/C), ww[::-1],
fluxes_rebin[i,j,::-1])
#!!! New Code. This was already checked and makes no sense.
#Combine the spectra.
flux_meds = np.median(fluxes_rebin,axis=2)
flux_files = np.median(flux_meds,axis=1)
if med_cut > 0:
good_files = np.where(flux_files > med_cut*np.median(flux_files))[0]
else:
good_files = np.arange(len(flux_files),dtype=np.int)
flux_orders = np.median(flux_meds[good_files],axis=0)
flux_norm = fluxes_rebin.copy()
for g in good_files:
for j in range(nm):
flux_norm[g,j,:] /= flux_meds[g,j]
#pdb.set_trace()
#Create a median over files
flux_ref = np.median(flux_norm[good_files],axis=0)
#Multiply this by the median for each order
for j in range(nm):
flux_ref[j] *= flux_orders[j]
#Threshold the data whenever the flux is less than "threshold"
if (threshold > 0):
bad = flux_ref<2*threshold
flux_ref[bad] *= np.maximum(flux_ref[bad]-threshold,0)/threshold
# Create a Gaussian smoothing function for the reference spectrum. This
# is needed to prevent a bias to zero radial velocity, especially in the
# case of few data points.
gg = np.exp(-(np.arange(2*gauss_hw+1)-gauss_hw)**2/2.0/gauss_sdev**2)
gg /= np.sum(gg)
one_order = np.empty(flux_ref.shape[1] + 2*gauss_hw)
for j in range(nm):
one_order[gauss_hw:-gauss_hw] = flux_ref[j,:]
one_order[:gauss_hw] = one_order[gauss_hw]
one_order[-gauss_hw:] = one_order[-gauss_hw-1]
ref_spect[j,:] = np.convolve(one_order, gg,
mode='same')[gauss_hw-1:1-gauss_hw]
return wave_ref, ref_spect
def extract_spectra(self, files, extractor, star_dark=None, flat_files=None,
flat_dark=None, location=('151.2094','-33.865',100.0),
coord=None, do_bcor=True, ra_dec_hr=False):
"""Extract the spectrum from a file, given a dark file, a flat file and
a dark for the flat. The process is:
1) Dark correcting the data and the flat fields.
2) Computing (but not applying) Barycentric corrections.
3) Extracting the data and the flat fields using the extract module, to form
:math:`f_m(x)`, the flux for orders m and dispersion direction pixels x.
4) Normalising the flat fields, so that the median of each order is 1.0.
5) Dividing by the extracted flat field. Uncertainties from the flat field are
added in quadrature.
TODO: Not the neatest implementation, but should account for the fact that
there are no flats or darks for the ThAr frames. Might be worth tidying
up and making the implementation a little more elegant.
Parameters
----------
files: list of strings
One string for each file. CAn be on separate nights - a full
pathname should be given.
star_dark:
flat_files: list of strings.
One string for each star file. CAn be on separate nights - a full
pathname should be given.
flat_dark:
location: (lattitude:string, longitude:string, elevation:string)
The location on Earth where the data were taken.
coord: astropy.coordinates.sky_coordinate.SkyCoord
The coordinates of the observation site
do_bcor: boolean
Flag for whether to do barycentric correction
Returns
-------
fluxes: 3D np.array(float)
Fluxes of form (Observation, Order, Flux/pixel)
vars: 3D np.array(float)
Variance of form (Observation, Order, Variance/pixel)
bcors: 1D np.array(float)
Barycentric correction for each observation.
wave: 2D np.array(float)
Wavelength coordinate map of form (Order, Wavelength/pixel)
mjds: 1D np.array(float)
Modified Julian Date (MJD) of each observation.
"""
# Initialise list of return values
# Each index represents a single observation
fluxes = []
vars = []
dates = []
bcors = []
#!!! This is dodgy, as files and flat_files should go together in a dict
for ix,file in enumerate(files):
# Dark correct the science and flat frames
# Only if flat/darks have been supplied --> ThAr might not have them
# If not supplied, just use science/reference data
try:
# Dark correct science frames
if len(star_dark) > 0:
data = pyfits.getdata(file) - star_dark
else:
data = pyfits.getdata(file)
# Dark correct flats
if len(flat_files) > 0 and len(flat_dark) > 0:
flat = pyfits.getdata(flat_files[ix]) - flat_dark
elif len(flat_files) > 0:
flat = pyfits.getdata(flat_files[ix])
except:
print('Unable to calibrate file ' + file +
'. Check that format of data arrays are consistent.')
print(pyfits.getdata(file).shape)
print(star_dark.shape)
continue
header = pyfits.getheader(file)
date = Time(header['JD'], format='jd', location=location)
dates.append(date)
# Determine the barycentric correction
if do_bcor:
if not coord:
# Depending on whether the RA and DEC is saved in hours or
# degrees, load and create a SkyCoord object
if ra_dec_hr:
ra_deg = float(header['RA'])*15
else:
ra_deg = float(header['RA'])
dec_deg = float(header['DEC'])
coord = SkyCoord(ra=ra_deg, dec=dec_deg, unit='deg')
if not location:
location=(float(header['LONG']), float(header['LAT']),
float(header['HEIGHT']))
#(obs_long, obs_lat, obs_alt, ra2000, dec2000, jd, debug=False)
#pdb.set_trace()
bcors.append(1e3*pyasl.helcorr(float(location[0]),
float(location[1]),location[2],coord.ra.deg,
coord.dec.deg,date.jd)[0] )
else:
bcors.append(0.0)
# Extract the fluxes and variance for the science and flat frames
print("Extracting spectra from file #", str(ix))
flux, var = extractor.one_d_extract(data=data, rnoise=20.0)
# Continue only when flats have been supplied
# Perform flat field correction and adjust variances
if len(flat_files) > 0:
flat_flux, fvar = extractor.one_d_extract(data=flat,
rnoise=20.0)
for j in range(flat_flux.shape[0]):
medf = np.median(flat_flux[j])
flat_flux[j] /= medf
fvar[j] /= medf**2
#Calculate the variance after dividing by the flat
var = var/flat_flux**2 + fvar * flux**2/flat_flux**4
#Now normalise the flux.
flux /= flat_flux
# Regardless of whether the data has been flat field corrected,
# append to the arrays and continue
fluxes.append(flux[:,:,0])
vars.append(var[:,:,0])
fluxes = np.array(fluxes)
vars = np.array(vars)
bcors = np.array(bcors)
mjds = np.array([d.mjd for d in dates])
return fluxes, vars, bcors, mjds
def calculate_rv_shift(self, wave_ref, ref_spect, fluxes, vars, bcors,
wave,return_fitted_spects=False,bad_threshold=10):
"""Calculates the Radial Velocity of each spectrum
The radial velocity shift of the reference spectrum required
to match the flux in each order in each input spectrum is calculated
The input fluxes to this method are flat-fielded data, which are then fitted with
a barycentrically corrected reference spectrum :math:`R(\lambda)`, according to
the following equation:
.. math::
f(x) = R( \lambda(x) (1 - p_0/c) ) \\times \exp(p_1 x^2 + p_2 + p_3)
The first term in this equation is simply the velocity corrected spectrum, based on a
the arc-lamp derived reference wavelength scale :math:`\lambda(x)` for pixels coordinates x.
The second term in the equation is a continuum normalisation - a shifted Gaussian was
chosen as a function that is non-zero everywhere. The scipy.optimize.leastsq function is used
to find the best fitting set fof parameters :math:`p_0` through to :math`p_3`.
The reference spectrum function :math:`R(\lambda)` is created using a wavelength grid
which is over-sampled with respect to the data by a factor of 2. Individual fitted
wavelengths are then found by cubic spline interpolation on this :math:`R_j(\lambda_j)`
discrete grid.
Parameters
----------
wave_ref: 2D np.array(float)
Wavelength coordinate map of form (Order, Wavelength/pixel*2+2),
where the wavelength scale has been interpolated.
ref_spect: 2D np.array(float)
Reference spectrum of form (Order, Flux/pixel*2+2),
where the flux scale has been interpolated.
fluxes: 3D np.array(float)
Fluxes of form (Observation, Order, Flux/pixel)
vars: 3D np.array(float)
Variance of form (Observation, Order, Variance/pixel)
bcors: 1D np.array(float)
Barycentric correction for each observation.
wave: 2D np.array(float)
Wavelength coordinate map of form (Order, Wavelength/pixel)
Returns
-------
rvs: 2D np.array(float)
Radial velocities of format (Observation, Order)
rv_sigs: 2D np.array(float)
Radial velocity sigmas of format (Observation, Order)
"""
nm = fluxes.shape[1]
ny = fluxes.shape[2]
nf = fluxes.shape[0]
rvs = np.zeros( (nf,nm) )
rv_sigs = np.zeros( (nf,nm) )
initp = np.zeros(4)
initp[3]=0.5
initp[0]=0.0
spect_sdev = np.sqrt(vars)
fitted_spects = np.empty(fluxes.shape)
for i in range(nf):
# Start with initial guess of no intrinsic RV for the target.
initp[0] = -bcors[i] #!!! New Change
nbad=0
for j in range(nm):
# This is the *only* non-linear interpolation function that
# doesn't take forever
spl_ref = interp.InterpolatedUnivariateSpline(wave_ref[j,::-1],
ref_spect[j,::-1])
args = (wave[j,:], fluxes[i,j,:], spect_sdev[i,j,:], spl_ref)
# Remove edge effects in a slightly dodgy way.
# 20 pixels is about 30km/s.
args[2][:20] = np.inf
args[2][-20:] = np.inf
the_fit = op.leastsq(self.rv_shift_resid, initp, args=args,diag=[1e3,1,1,1],Dfun=self.rv_shift_jac, full_output=True)
#the_fit = op.leastsq(self.rv_shift_resid, initp, args=args,diag=[1e3,1e-6,1e-3,1], full_output=True,epsfcn=1e-9)
#The following line also doesn't work "out of the box".
#the_fit = op.minimize(self.rv_shift_chi2,initp,args=args)
#pdb.set_trace()
#Remove bad points...
resid = self.rv_shift_resid( the_fit[0], *args)
wbad = np.where( np.abs(resid) > bad_threshold)[0]
nbad += len(wbad)
#15 bad pixels in a single order is *crazy*
if len(wbad)>20:
fitted_spect = self.rv_shift_resid(the_fit[0], *args, return_spect=True)
plt.clf()
plt.plot(args[0], args[1])
plt.plot(args[0][wbad], args[1][wbad],'o')
plt.plot(args[0], fitted_spect)
plt.xlabel("Wavelength")
plt.ylabel("Flux")
#print("Lots of 'bad' pixels. Type c to continue if not a problem")
#pdb.set_trace()
args[2][wbad] = np.inf
the_fit = op.leastsq(self.rv_shift_resid, initp,args=args, diag=[1e3,1,1,1], Dfun=self.rv_shift_jac, full_output=True)
#the_fit = op.leastsq(self.rv_shift_resid, initp,args=args, diag=[1e3,1e-6,1e-3,1], full_output=True, epsfcn=1e-9)
#Some outputs for testing
fitted_spects[i,j] = self.rv_shift_resid(the_fit[0], *args, return_spect=True)
if ( np.abs(the_fit[0][0] - bcors[i]) < 1e-4 ):
#pdb.set_trace() #This shouldn't happen, and indicates a problem with the fit.
pass
#Save the fit and the uncertainty.
rvs[i,j] = the_fit[0][0]
try:
rv_sigs[i,j] = | np.sqrt(the_fit[1][0,0]) | numpy.sqrt |
# -----------------------------------------------------
# -*- coding: utf-8 -*-
# @Time : 8/9/2018 4:34 PM
# @Author : sunyonghai
# @Software: ZJ_AI
# -----------------------------------------------------
import xml.etree.ElementTree as ET
import os
import prettytable as pt
import numpy as np
def add_label_dict(xmlPath,label_dict):
'''
函数用于得到xml文件的object信息
:param xmlPath:
:return:
'''
if os.path.exists(xmlPath)!=1:
print(xmlPath)
et = ET.parse(xmlPath)
element = et.getroot()
element_objs = element.findall('object')
for element_obj in element_objs:
node = element_obj.find('name')
label=node.text
if label in label_dict.keys():
label_dict[label]+=1
else:
label_dict[label] = 1
return label_dict
def get_xml_label_num(xmlPath):
'''
函数用于得到xml文件的object信息
:param xmlPath:
:return:
'''
if os.path.exists(xmlPath)!=1:
print(xmlPath)
et = ET.parse(xmlPath)
element = et.getroot()
element_objs = element.findall('object')
count=len(element_objs)
labelList=[]
for element_obj in element_objs:
node = element_obj.find('name')
label=node.text
labelList.append(label)
return count,labelList
def get_tabs(test_infos):
tb = pt.PrettyTable()
tb.field_names = ["model_name","test_data",'label','presion','recall',"detect_num", "actual_num", "tp_num", "fp_num",'fn_num','ap']
for test_info in test_infos:
info=test_info.split(",")
tb.add_row(info)
return tb
def save_tb_in_images(path,tb):
from PIL import Image, ImageDraw, ImageFont
tab_info = str(tb)
space = 5
im = Image.new('RGB', (30, 30), (0, 0, 0, 0))
draw = ImageDraw.Draw(im, "RGB")
img_size = draw.multiline_textsize(tab_info)
im_new = im.resize((img_size[0] + space * 2, img_size[1] + space * 2))
del draw
del im
draw = ImageDraw.Draw(im_new, 'RGB')
draw.multiline_text((space, space), tab_info, fill=(255, 255, 255))
im_new.save(path+".png", "PNG")
del draw
def save_tb_in_txt(path,tb):
# tb.field_names = ["模型名称", "测试数据", '精确率', '召回率', "模型识别总数", "实际总数", "正确识别数量", "误识别总数",
# '漏识别总数']
f = open(path+'.txt', "a+")
f.write(str(tb))
f.write('\n')
f.close()
def save_tb_in_xml(path,tb):
f = open(path+'.xml', "a+")
s = tb.get_html_string()
f.write(str(s))
f.close()
def get_xml_field_name(path):
et = ET.parse(path)
element = et.getroot()
element_objs = element.findall('tr')
field_name=[]
for ele in element_objs:
td=ele.findall('th')
for t in td:
field_name.append(t.text)
return field_name
def get_xml_row_info(path):
et = ET.parse(path)
element = et.getroot()
element_objs = element.findall('tr')
row_info=[]
for ele in element_objs:
td=ele.findall('td')
for t in td:
row_info.append(t.text)
return row_info
def merge_tb_from_xml(path_list):
count =0
tb = pt.PrettyTable()
for path in path_list:
if count==0:
field_name=get_xml_field_name(path)
tb.field_names=field_name
row_info=get_xml_row_info(path)
tb.add_row(row_info)
else:
row_info = get_xml_row_info(path)
tb.add_row(row_info)
count+=1
print(tb)
def summary_tb(tb,test_infos):
presion,recall,d_num,t_num,tp_num,fp_num,fn_num,count=0,0,0,0,0,0,0,0
model_name,test_data='','total'
for test_info in test_infos:
if test_info.find('total')==-1:
continue
infos = test_info.split(",")
if count == 0:
model_name = infos[0]
count+=1
d_num+=int(infos[5])
t_num+=int(infos[6])
tp_num+=int(infos[7])
fp_num+=int(infos[8])
fn_num+=int(infos[9])
presion=tp_num/(tp_num+fp_num)
recall=tp_num/(tp_num+fn_num)
tb.add_row([model_name,test_data,'total',presion,recall,d_num,t_num,tp_num,fp_num,fn_num,'//'])
return tb
def cal_model_acc(xmlPath1,xmlPath2,cal_label=False):
xmlFileList1 = []
xmlFileList2 = []
for xmlFile in os.listdir(xmlPath1):
xmlFileList1.append(os.path.join(xmlPath1, xmlFile))
xmlFileList2.append(os.path.join(xmlPath2, xmlFile))
print(len(xmlFileList1), len(xmlFileList2))
tp_sum,fp_sum,fn_sum,d_sum,t_sum = 0,0,0,0,0
for i in range(len(xmlFileList1)):
tp,fp,fn = 0,0,0
xmlFile1 = xmlFileList1[i]
xmlFile2 = xmlFileList2[i]
d_labelNum, d_labelList = get_xml_label_num(xmlFile1)
t_labelNum, t_labelList = get_xml_label_num(xmlFile2)
for d_label in d_labelList:
if d_label in t_labelList:
labenIndex = t_labelList.index(d_label)
t_labelList.remove(t_labelList[labenIndex])
tp += 1
else:
fp += 1
fn = t_labelNum - tp
tp_sum += tp
fp_sum += fp
fn_sum += fn
d_sum += d_labelNum
t_sum += t_labelNum
prec = tp_sum / (fp_sum + tp_sum)
recall = tp_sum / (tp_sum + fn_sum)
print(prec, recall)
print(tp_sum, fp_sum, fn_sum, d_sum, t_sum)
return "{},{},{},{},{},{},{}".format(prec, recall,d_sum, t_sum, tp_sum, fp_sum, fn_sum)
def init_ind(class_name):
tp_sum = np.zeros(class_name, dtype=int)
fp_sum = np.zeros(class_name, dtype=int)
fn_sum = np.zeros(class_name, dtype=int)
d_sum = np.zeros(class_name, dtype=int)
t_sum = np.zeros(class_name, dtype=int)
prec = np.zeros(class_name, dtype=float)
rec = np.zeros(class_name, dtype=float)
return tp_sum, fp_sum, fn_sum, d_sum, t_sum,prec,rec
def get_xml_label_bnd(xmlPath):
if os.path.exists(xmlPath)!=1:
print(xmlPath)
et = ET.parse(xmlPath)
element = et.getroot()
element_objs = element.findall('object')
labelList=[]
boxes=np.zeros((1,4),dtype=int)
for i,element_obj in enumerate(element_objs):
node = element_obj.find('name')
label=node.text
labelList.append(label)
bbox = element_obj.find('bndbox')
if i==0:
boxes[0,:]=np.array([int(bbox.find('xmin').text),int(bbox.find('ymin').text),int(bbox.find('xmax').text),int(bbox.find('ymax').text)])
else:
box = np.array([int(bbox.find('xmin').text), int(bbox.find('ymin').text), int(bbox.find('xmax').text),
int(bbox.find('ymax').text)])
boxes=np.row_stack((boxes,box))
return labelList,boxes
def cal_iou(gt_boxes,box):
ixmin = np.maximum(gt_boxes[:, 0], box[0])
iymin = np.maximum(gt_boxes[:, 1], box[1])
ixmax = np.minimum(gt_boxes[:, 2], box[2])
iymax = np.minimum(gt_boxes[:, 3], box[3])
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih
# union
uni = ((box[2] - box[0] + 1.) * (box[3] - box[1] + 1.) +
(gt_boxes[:, 2] - gt_boxes[:, 0] + 1.) *
(gt_boxes[:, 3] - gt_boxes[:, 1] + 1.) - inters)
overlaps = inters / uni
ovmax = np.max(overlaps) # 重叠度IOU最大的
jmax = np.argmax(overlaps)
return ovmax,jmax
def voc_ap(rec, prec, use_07_metric=False):
if use_07_metric:
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = | np.max(prec[rec >= t]) | numpy.max |
"""
Class definition of XOR, the algorithm to perform inference in networks assuming a mixed effect of the community
and hierarchical latent structures.
"""
from __future__ import print_function
import sys
import time
import warnings
import numpy as np
import pandas as pd
import scipy.sparse
import sktensor as skt
import SpringRank as SR
import MultiTensor as MT
from termcolor import colored
from tools import delta_scores
from scipy.stats import poisson, entropy
from compute_metrics import save_metrics
EPS = 1e-8
# noinspection PyAttributeOutsideInit
class EitherOr(MT.MultiTensor):
def __init__(self, N=100, L=1, K=2, initialization=0, rseed=42, inf=1e10, err_max=1e-8, err=0.01, N_real=1,
tolerance=0.001, decision=5, max_iter=500, out_inference=False, out_folder='../data/output/',
in_folder=None, label='', assortative=False, verbose=0, fix_mu=False, fix_scores=False,
fix_communities=False, fix_means=False, fix_delta=False, beta0=None, c0=1., mu0=0.5, delta0=0.001,
solver='bicgstab', gamma=0., constrained=False, l0=0., l1=1., classification=True, randomize_mu=True,
lambda_u=5., lambda_v=5., lambda_w=10., cv=False, gt=False, input_s='../data/input/s.dat',
input_u='../data/input/u.dat', input_v='../data/input/v.dat',
input_w='../data/input/w.dat', input_Q='../data/input/sigma.dat'):
# ---- Attributes shared with MultiTensor ----
super().__init__(N = N, L = L, K = K, initialization = initialization, rseed = rseed, inf = inf, err = err,
err_max = err_max, N_real = N_real, tolerance = tolerance, decision = decision,
max_iter = max_iter, out_inference = out_inference, label = label, out_folder = out_folder,
in_folder = in_folder, assortative = assortative, verbose = verbose, input_u = input_u,
input_v = input_v, input_w = input_w, constrained = constrained, lambda_u = lambda_u,
lambda_v = lambda_v, lambda_w = lambda_w, cv = cv, gt = gt)
# ---- XOR-specific attributes ----
self.input_s = input_s # path of the input file s (when initialization=1)
self.input_Q = input_Q # path of the input file s (when initialization=1)
self.fix_scores = fix_scores # flag for fixing ranking latent variable s to ground truth values
self.fix_communities = fix_communities # flag for fixing community latent variables to ground truth values
self.fix_means = fix_means # flag for fixing the prior and posterior mean of sigma to ground truth value
self.fix_mu = fix_mu # flag for fixing the prior mean of sigma to ground truth value
self.fix_delta = fix_delta # flag for fixing the outgroup interaction mean delta_0 to ground truth value
self.beta = beta0 # initial value for the inverse temperature
self.gamma = gamma # regularization penalty - spring constant for the fictitious i <-> origin connections
self.l0 = l0 # resting length for the fictitious i <-> origin connections
self.l1 = l1 # resting length for the i <-> j connections
self.classification = classification # flag for computing classification metrics
self.randomize_mu = randomize_mu # flag for randomly generating mu
if solver not in {'spsolve', 'bicgstab'}: # solver used for the SR linear system
warnings.warn(f'Unknown parameter {solver} for argument solver. Setting solver = "bicgstab"')
solver = 'bicgstab'
self.solver = solver
if self.beta is not None:
if self.beta < 0:
raise ValueError('The inverse temperature beta has to be positive!')
else:
self.beta = 5
if (mu0 < 0) or (mu0 > 1):
raise ValueError('The sigma parameter has to be in [0,1]!')
# values of the parameters used during the update
self.delta_0 = delta0 # outgroup parameter
self.mu = mu0 # sigma parameter
self.Q = np.ones((self.L, self.N)) * mu0 # sigma parameter - posterior
self.c = c0 # sparsity coefficient
self.s = np.zeros(self.N, dtype = float) # ranking scores
# values of the parameters in the previous iteration
self.delta_0_old = delta0 # outgroup parameter
self.mu_old = mu0 # sigma parameter
self.Q_old = np.ones((self.L, self.N)) * mu0 # sigma parameter - posterior
self.c_old = c0 # sparsity coefficient
self.s_old = np.zeros(self.N, dtype = float) # ranking scores
# final values after convergence --> the ones that maximize the log-likelihood
self.delta_0_f = delta0 # outgroup parameter
self.mu_f = mu0 # sigma parameter
self.Q_f = np.ones((self.L, self.N)) * mu0 # sigma parameter - posterior
self.c_f = 1. # sparsity coefficient
self.s_f = np.zeros(self.N, dtype = float) # ranking scores
self.ratio_f = None # final ratio
def fit(self, data, nodes, mask=None):
"""
Model directed networks by using a probabilistic generative model that assume community and
ranking parameters. The inference is performed via EM algorithm.
Parameters
----------
data : ndarray/sptensor
Graph adjacency tensor.
nodes : list
List of nodes IDs.
mask : ndarray
Mask for cv.
Returns
-------
Iterable of dictionaries containing:
s_f : ndarray
Ranking scores vector.
u_f : ndarray
Out-going membership matrix.
v_f : ndarray
In-coming membership matrix.
w_f : ndarray
Affinity tensor.
c_f : float
Sparsity coefficient.
beta_f : float
Inverse temperature parameter.
gamma_f : float
Ranking regularization parameter.
mu_f : float
Prior sigma parameter.
Q_f : ndarray
Posterior sigma parameters.
delta0_f : float
Out-group interaction parameter.
maxL : float
Maximum log-likelihood.
K : int
Number of communities.
nodes_s : ndarray
Permuted node list according to inferred scores.
nodes_c : ndarray
Node list.
seed : int
Realization seed.
convergence : bool
Realization convergence flag.
maxit : int
Realization number of iteration.
constrained : bool
Realization flag for u,v,w regularization.
"""
self.model = '_XOR'
# initialization of the SR model
self.SR = SR.SpringRank(N = self.N, L = self.L, solver = self.solver, gamma = self.gamma, l0 = self.l0,
l1 = self.l1, inf = self.inf, verbose = self.verbose, get_beta = False,
out_inference = False, out_folder = self.out_folder, label = self.label)
# pre-processing of the data to handle the sparsity
data = MT.preprocess(data, self.verbose)
# save positions of the nonzero entries - tuple of np.ndarrays
if isinstance(data, skt.dtensor):
subs_nz = data.nonzero()
elif isinstance(data, skt.sptensor):
subs_nz = data.subs
for r in range(self.N_real):
# initialization of the random state
prng = np.random.RandomState(self.rseed)
# initialization of the maximum log-likelihood
maxL = -self.inf
# Initialize all variables
self._initialize(prng = prng)
self._update_old_variables()
self._update_cache(data, subs_nz, mask = mask)
# Convergence local variables
coincide, it = 0, 0
convergence = False
loglik = self.inf
if self.verbose == 2:
print(f'\n\nUpdating realization {r} ...', end = '\n\n')
time_start = time.time()
loglik_values = []
# --- single step iteration update ---
while not convergence and it < self.max_iter:
# main EM update: updates latent variables and calculates max difference new vs old
_ = self._update_em(data, subs_nz, mask = mask)
it, loglik, coincide, convergence = self._check_for_convergence(data, it, loglik, coincide, convergence,
subs_nz, mask = mask)
loglik_values.append(loglik)
if self.verbose == 2:
print(f'Nreal = {r} - Loglikelihood = {loglik} - iterations = {it} - '
f'time = {np.round(time.time() - time_start, 2)} seconds')
if self.verbose:
print(colored('End of the realization.', 'green'),
f'Nreal = {r} - Loglikelihood = {loglik} - iterations = {it} - '
f'time = {np.round(time.time() - time_start, 2)} seconds')
if maxL < loglik:
maxL = loglik
conv = convergence
self.final_it = it
self._update_optimal_parameters()
self.rseed += prng.randint(100000000)
self.maxL = maxL
if self.final_it == self.max_iter and not conv:
# convergence not reached
print(colored(
'Solution failed to converge in {0} EM steps for realization n.{1}!'.format(self.max_iter, r),
'blue'))
# end cycle over realizations
yield {
's': self.s_f, 'c': self.c_f, 'beta': self.beta, 'gamma': self.gamma,
'u': self.u_f, 'v': self.v_f, 'w': self.w_f,
'Q': self.Q_f, 'ratio': self.mu_f,
'delta0': self.delta_0_f, 'K': self.K,
'nodes_s': np.argsort(self.s_f)[::-1], 'nodes_c': nodes,
'seed': self.rseed, 'logL': self.maxL, 'convergence': conv,
'maxit': self.final_it, 'constrained': self.constrained
}
def _initialize(self, prng=None):
"""
Random initialization of the latent parameters.
Parameters
----------
prng : RandomState
Container for the Mersenne Twister pseudo-random number generator.
"""
if prng is None:
prng = np.random.RandomState(self.rseed)
self._randomize_c(prng = prng)
self._randomize_delta_0(prng = prng)
if self.initialization == 0:
if self.verbose > 0:
print('Variables s, u, v, w, Q are initialized randomly.')
self._randomize_s(prng = prng)
self._randomize_w(prng = prng)
self._randomize_u_v(prng = prng)
self._randomize_means(prng = prng)
elif self.initialization > 0:
if self.verbose > 0:
print('Selected initialization of s, u, v, w: from file.')
try:
if not self.fix_scores:
raise ValueError('Flag fix_scores set to False!')
self._initialize_s(self.input_s)
if self.verbose == 2:
print('s initialized from ', self.input_s)
except:
self._randomize_s(prng = prng)
if self.verbose == 2:
print('Error: s initialized randomly.')
try:
if not self.fix_communities:
raise ValueError('Flag fix_communities set to False!')
self._initialize_w(self.input_w)
if self.verbose == 2:
print('w initialized from ', self.input_w)
except:
self._randomize_w(prng = prng)
if self.verbose == 2:
print('Error: w initialized randomly.')
try:
if not self.fix_communities:
raise ValueError('Flag fix_communities set to False!')
self._initialize_u_v(self.input_u, self.input_v)
if self.verbose == 2:
print('u and v initialized from ', self.input_u, self.input_v)
except:
self._randomize_u_v(prng = prng)
if self.verbose == 2:
print('Error: u, v initialized randomly.')
if self.initialization == 2:
if self.verbose == 2:
print('Selected initialization of Q: from file.')
self._initialize_means(self.input_Q)
if self.verbose == 2:
print('Q initialized from ', self.input_Q)
else:
if self.verbose == 2:
print('Error: Q initialized randomly.')
self._randomize_means(prng = prng)
def _randomize_c(self, prng=None, a=0.01, b=1e4):
"""
Generate a random number in (a, b).
Parameters
----------
prng : RandomState
Container for the Mersenne Twister pseudo-random number generator.
"""
if prng is None:
prng = np.random.RandomState(self.rseed)
self.c = (b - a) * prng.random_sample(1)[0] + a
def _randomize_means(self, prng=None, a=0.1, b=0.9):
"""
Generate a random number in (a, b).
Parameters
----------
prng : RandomState
Container for the Mersenne Twister pseudo-random number generator.
"""
if not self.fix_means:
if prng is None:
prng = np.random.RandomState(self.rseed)
if self.randomize_mu:
self.mu = (b - a) * prng.random_sample(1)[0] + a
self.Q += self.mu - self.Q.mean()
self.Q[self.Q > 1] = 0.99
self.Q[self.Q < 0] = 2 * EPS
else:
self.Q = (b - a) * prng.random_sample(self.Q.shape) + a
if not self.fix_mu:
self.mu = np.mean(self.Q)
def _randomize_delta_0(self, prng=None, a=1e-3, b=0.5):
"""
Generate a random number in (a, b).
Parameters
----------
prng : RandomState
Container for the Mersenne Twister pseudo-random number generator.
"""
if not self.fix_delta:
if prng is None:
prng = np.random.RandomState(self.rseed)
self.delta_0 = (b - a) * prng.random_sample(1)[0] + a
def _randomize_s(self, prng=None):
"""
Assign a random number in [-inf, +inf] to each entry of the affinity tensor s.
Parameters
----------
prng : RandomState
Container for the Mersenne Twister pseudo-random number generator.
"""
if prng is None:
prng = np.random.RandomState(self.rseed)
self.s = (1 - 2 * prng.binomial(1, .5, self.s.shape)) * prng.random_sample(self.s.shape)
def _initialize_means(self, infile_name, prng=None):
"""
Initialize a posteriori sigma parameters Q from file.
Parameters
----------
infile_name : str
Path of the input file.
prng : RandomState
Container for the Mersenne Twister pseudo-random number generator.
"""
with open(infile_name, 'rb') as f:
dfQ = pd.read_csv(f, sep = '\s+', header = None, squeeze = True)
self.Q = dfQ.values.T[np.newaxis, :]
if prng is None:
prng = np.random.RandomState(self.rseed)
# Add noise to the initialization
self.Q[self.Q == 1] -= self.err * 0.001 * prng.random_sample(self.Q[self.Q == 1].shape)
self.Q[self.Q == 0] += self.err * 0.001 * prng.random_sample(self.Q[self.Q == 0].shape)
self.mu = np.mean(self.Q)
def _initialize_s(self, infile_name, prng=None):
"""
Initialize ranking vector s from file.
Parameters
----------
infile_name : str
Path of the input file.
prng : RandomState
Container for the Mersenne Twister pseudo-random number generator.
"""
with open(infile_name, 'rb') as f:
dfS = pd.read_csv(f, sep = '\s+', header = None)
self.s = dfS.values
self.s = self.s.flatten()
# Add noise to the initialization
max_entry = np.max(self.s)
if prng is None:
prng = np.random.RandomState(self.rseed)
self.s += max_entry * self.err * 0.001 * prng.random_sample(self.s.shape)
def _update_old_variables(self):
"""
Update values of the parameters in the previous iteration.
"""
self.s_old = np.copy(self.s)
self.c_old = np.copy(self.c)
self.Q_old = np.copy(self.Q)
self.mu_old = np.copy(self.mu)
self.delta_0_old = np.copy(self.delta_0)
self.u_old[self.u > 0] = np.copy(self.u[self.u > 0])
self.v_old[self.v > 0] = np.copy(self.v[self.v > 0])
self.w_old[self.w > 0] = np.copy(self.w[self.w > 0])
def _update_cache(self, data, subs_nz, com=True, rank=True, probs=True, mask=None):
"""
Update the cache used in the em_update.
Parameters
----------
data : sptensor/dtensor
Graph adjacency tensor.
subs_nz : tuple
Indices of elements of data that are non-zero.
com : bool
Flag for updating community related cache.
rank : bool
Flag for updating ranking related cache.
probs : bool
Flag for updating edge probabilities related cache.
mask : ndarray
Mask for cv.
"""
if probs:
# matrix containing Qi * Qj = Yij
self.QQt = np.einsum('ai,aj->aij', self.Q, self.Q)
low_values_indices = self.QQt < EPS # values are too low
self.QQt[low_values_indices] = EPS
# matrix containing Q_i for every j + Q_j for every i
self.Qs = np.vstack([self.Q] * self.N) + np.hstack([self.Q.T] * self.N)
low_values_indices = self.Qs < EPS # values are too low
self.Qs[low_values_indices] = EPS
self.Qs = self.Qs[np.newaxis, :, :]
# matrix containing QQt - (Q_i for every j + Q_j for every i) + 1 = X - Y
self.XmY = self.QQt - self.Qs + 1
if np.logical_or(self.QQt < 0, self.QQt > 1).any():
print(self.QQt[np.logical_or(self.QQt < 0, self.QQt > 1)])
if mask is not None:
# compute masked values of X - Y for community updates
self.XmY_masked = np.zeros_like(self.QQt)
self.XmY_masked[mask] = self.XmY[mask]
if rank:
# compute s_i - s_j
self.Ds = self._Ds()
# compute full SR exponential term
self.eH = self._eH()
if com:
# compute MT means for nonzero values
self.M_nz = self._M_nz(subs_nz)
# compute auxiliary variables
self.data_hat_Mnz = self._data_hat_Mnz(data, subs_nz)
def _Ds(self):
"""
Compute the ranking differences. Uses an external function in order
to speed up computations with Numba.
Returns
-------
delta_s : ndarray
Ranking differences matrix NxN, zero for null data entries.
"""
delta_s = delta_scores(self.N, self.s)
return delta_s
def _eH(self):
"""
Compute the SR mean exponential term for all entries.
Returns
-------
eH : ndarray
SR mean exponential term matrix NxN.
"""
return np.exp(-0.5 * self.beta * np.power(self.Ds - self.l1, 2))
def _data_hat_Mnz(self, data, subs_nz):
"""
Compute auxiliary variable data_hat_Mnz = data * (1 - Q) / M.
Parameters
----------
data : sptensor/dtensor
Graph adjacency tensor.
subs_nz : tuple
Indices of elements of data that are non-zero.
Returns
-------
data_hat_Mnz : sptensor/dtensor
Auxiliary tensor of the same shape and type of data.
"""
Z = np.copy(self.M_nz)
Z[Z == 0] = 1
if isinstance(data, skt.sptensor):
data_hat_Mnz = data.vals * self.XmY[subs_nz] / Z
if isinstance(data, skt.dtensor):
data_hat_Mnz = data[subs_nz].astype('float') * self.XmY[subs_nz] / Z
data_hat_Mnz[data_hat_Mnz == np.inf] = self.inf
return data_hat_Mnz
def _data_tilde(self, data, subs_nz):
"""
Compute auxiliary variable data_tilde = data * Q.
Parameters
----------
data : sptensor/dtensor
Graph adjacency tensor.
subs_nz : tuple
Indices of elements of data that are non-zero.
Returns
-------
data_tilde : scipy/ndarray
Auxiliary matrix, 2-dimensional.
"""
if self.L > 1:
raise NotImplementedError('SpringRank for tensors not implemented! Use 2-dimensional input.')
data_tilde = np.zeros((self.N, self.N), dtype = float)[np.newaxis, :, :]
if isinstance(data, skt.sptensor):
data_tilde[subs_nz] = data.vals * self.QQt[subs_nz]
elif isinstance(data, skt.dtensor):
data_tilde[subs_nz] = data[subs_nz] * self.QQt[subs_nz]
try:
# convert auxiliary tensor to scipy matrix if possible
data_tilde = scipy.sparse.csr_matrix(data_tilde[0, :, :])
except:
warnings.warn('The input parameter A could not be converted to scipy.sparse.csr_matrix. '
'Using a dense representation (numpy).')
data_tilde = data_tilde[0, :, :]
return data_tilde
def _update_em(self, data, subs_nz, mask=None):
"""
Update parameters via EM procedure.
Parameters
----------
data : sptensor/dtensor
Graph adjacency tensor.
subs_nz : tuple
Indices of elements of data that are non-zero.
mask : ndarray
Mask for cv.
Returns
-------
d_s : float
Maximum distance between the old and the new scores vector s.
d_u : float
Maximum distance between the old and the new membership matrix u.
d_v : float
Maximum distance between the old and the new membership matrix v.
d_w : float
Maximum distance between the old and the new affinity tensor w.
d_c : float
Distance between the old and the new SR sparsity coefficient c.
d_mu : float
Distance between the old and the new prior mean of sigma.
d_Q : float
Distance between the old and the new posterior mean of sigma.
"""
if not self.fix_scores:
d_s = self._update_s(self._data_tilde(data, subs_nz))
self._update_cache(data, subs_nz, com = False, probs = False)
else:
d_s = 0
d_c = self._update_c(self._data_tilde(data, subs_nz), mask = mask)
self._update_cache(data, subs_nz, com = False, probs = False)
if not self.fix_communities:
d_u = self._update_U(subs_nz, self.data_hat_Mnz, mask = mask)
self._update_cache(data, subs_nz, rank = False, probs = False)
d_v = self._update_V(subs_nz, self.data_hat_Mnz, mask = mask)
self._update_cache(data, subs_nz, rank = False, probs = False)
if self.initialization != 1:
if not self.assortative:
d_w = self._update_W(subs_nz, self.data_hat_Mnz, mask = mask)
else:
d_w = self._update_W_assortative(subs_nz, self.data_hat_Mnz, mask = mask)
else:
d_w = 0
self._update_cache(data, subs_nz, rank = False, probs = False)
else:
d_u, d_v, d_w = 0, 0, 0
if not self.fix_delta:
d_lam = self._update_delta_0(data, subs_nz, mask = mask)
else:
d_lam = 0
d_Q = self._update_Q(data)
if not self.fix_means:
d_mu = self._update_mu()
else:
d_Q = 0
d_mu = 0
self._update_cache(data, subs_nz, probs = 1 - self.fix_means, rank = False, mask = mask)
return d_s, d_u, d_v, d_w, d_c, d_lam, d_mu, d_Q
def _update_U(self, subs_nz, data, mask=None):
"""
Update out-going membership matrix.
Parameters
----------
subs_nz : tuple
Indices of elements of data that are non-zero.
data : sptensor/dtensor
Graph adjacency tensor.
mask : ndarray
Mask for cv.
Returns
-------
dist_u : float
Maximum distance between the old and the new membership matrix u.
"""
self.u *= self._update_membership(data, subs_nz, self.u, self.v, self.w, 1)
if mask is not None:
Du = np.einsum('aij,jq->iq', self.XmY_masked, self.v)
else:
Du = np.einsum('aij,jq->iq', self.XmY, self.v)
if not self.assortative:
w_k = np.einsum('akq->kq', self.w)
Z_uk = np.einsum('iq,kq->ik', Du, w_k)
else:
w_k = np.einsum('ak->k', self.w)
Z_uk = np.einsum('ik,k->ik', Du, w_k)
if not self.constrained:
non_zeros = Z_uk > EPS
self.u[Z_uk < EPS] = 0.
self.u[non_zeros] /= Z_uk[non_zeros]
else:
self.u /= Z_uk + self.delta_u
low_values_indices = self.u < self.err_max # values are too low
self.u[low_values_indices] = 0. # and set to 0.
assert (self.u <= self.inf).all()
dist_u = np.amax(abs(self.u - self.u_old))
self.u_old = np.copy(self.u)
return dist_u
def _update_V(self, subs_nz, data, mask=None):
"""
Update in-coming membership matrix.
Same as _update_U but with:
data <-> data_T
w <-> w_T
u <-> v
Parameters
----------
subs_nz : tuple
Indices of elements of data that are non-zero.
data : sptensor/dtensor
Graph adjacency tensor.
mask : ndarray
Mask for cv.
Returns
-------
dist_v : float
Maximum distance between the old and the new membership matrix v.
"""
self.v *= self._update_membership(data, subs_nz, self.u, self.v, self.w, 2)
if mask is not None:
Dv = np.einsum('aij,ik->jk', self.XmY_masked, self.u)
else:
Dv = np.einsum('aij,ik->jk', self.XmY, self.u)
if not self.assortative:
w_k = np.einsum('akq->kq', self.w)
Z_vk = np.einsum('jk,kq->jq', Dv, w_k)
else:
w_k = np.einsum('ak->k', self.w)
Z_vk = np.einsum('jk,k->jk', Dv, w_k)
if not self.constrained:
non_zeros = Z_vk > EPS
self.v[Z_vk < EPS] = 0.
self.v[non_zeros] /= Z_vk[non_zeros]
else:
self.v /= Z_vk + self.delta_v
low_values_indices = self.v < self.err_max # values are too low
self.v[low_values_indices] = 0. # and set to 0.
assert (self.v <= self.inf).all()
dist_v = np.amax(abs(self.v - self.v_old))
self.v_old = np.copy(self.v)
return dist_v
def _update_W(self, subs_nz, data, mask=None):
"""
Update affinity tensor.
Parameters
----------
subs_nz : tuple
Indices of elements of data that are non-zero.
data : sptensor/dtensor
Graph adjacency tensor.
mask : ndarray
Mask for cv.
Returns
-------
dist_w : float
Maximum distance between the old and the new affinity tensor w.
"""
sub_w_nz = self.w.nonzero()
uttkrp_DKQ = np.zeros_like(self.w)
UV = np.einsum('Ik,Iq->Ikq', self.u[subs_nz[1], :], self.v[subs_nz[2], :])
uttkrp_I = data[:, np.newaxis, np.newaxis] * UV
for _, k, q in zip(*sub_w_nz):
uttkrp_DKQ[:, k, q] += np.bincount(subs_nz[0], weights = uttkrp_I[:, k, q], minlength = self.L)
self.w *= uttkrp_DKQ
if mask is not None:
Z = np.einsum('aij,ik,jq->akq', self.XmY_masked, self.u, self.v)
else:
Z = np.einsum('aij,ik,jq->akq', self.XmY, self.u, self.v)
if not self.constrained:
non_zeros = Z > 0
self.w[non_zeros] /= Z[non_zeros]
else:
self.w /= Z + self.delta_w
low_values_indices = self.w < self.err_max # values are too low
self.w[low_values_indices] = 0. # and set to 0.
assert (self.w <= self.inf).all()
dist_w = np.amax(abs(self.w - self.w_old))
self.w_old = np.copy(self.w)
return dist_w
def _update_W_assortative(self, subs_nz, data, mask=None):
"""
Update affinity tensor (assuming assortativity).
Parameters
----------
subs_nz : tuple
Indices of elements of data that are non-zero.
data : sptensor/dtensor
Graph adjacency tensor.
mask : ndarray
Mask for cv.
Returns
-------
dist_w : float
Maximum distance between the old and the new affinity tensor w.
"""
uttkrp_DKQ = np.zeros_like(self.w)
UV = np.einsum('Ik,Ik->Ik', self.u[subs_nz[1], :], self.v[subs_nz[2], :])
uttkrp_I = data[:, np.newaxis] * UV
for k in range(self.K):
uttkrp_DKQ[:, k] += np.bincount(subs_nz[0], weights = uttkrp_I[:, k], minlength = self.L)
self.w *= uttkrp_DKQ
if mask is not None:
Z = np.einsum('aij,ik,jk->ak', self.XmY_masked, self.u, self.v)
else:
Z = np.einsum('aij,ik,jk->ak', self.XmY, self.u, self.v)
if not self.constrained:
non_zeros = Z > 0
self.w[non_zeros] /= Z[non_zeros]
else:
self.w /= Z + self.delta_w
low_values_indices = self.w < self.err_max # values are too low
self.w[low_values_indices] = 0. # and set to 0.
assert (self.w <= self.inf).all()
dist_w = np.amax(abs(self.w - self.w_old))
self.w_old = np.copy(self.w)
return dist_w
def _update_s(self, data):
"""
Main routine to calculate SpringRank by a solving linear system.
If gamma != 0, performs L2 regularization.
Parameters
----------
data : sptensor/dtensor
Graph adjacency tensor.
Returns
-------
dist_s : float
Maximum distance between the old and the new ranking vector s.
"""
# compute ranks update
self.s, _, _ = self.SR.fit(data)
# compute update improvement
dist_s = np.amax(abs(self.s - self.s_old))
# update variables
if isinstance(data, scipy.sparse.csr_matrix):
self.s_old = self.s.copy()
elif isinstance(data, np.ndarray):
self.s_old = np.copy(self.s)
return dist_s
def _update_c(self, data, mask=None):
"""
Compute the sparsity coefficient.
Parameters
----------
data : sptensor/dtensor
Graph adjacency tensor.
mask : ndarray
Mask for cv.
Returns
-------
dist_c : float
Sparsity coefficient.
"""
if mask is None:
denominator = (self.eH * self.QQt[0]).sum()
else:
denominator = (self.eH * self.QQt[0])[mask[0]].sum()
if denominator == 0:
self.c = self.inf
else:
self.c = data.sum() / denominator
# compute update improvement
dist_c = abs(self.c - self.c_old)
# update variable
self.c_old = np.copy(self.c)
return dist_c
def _update_mu(self):
"""
Compute the prior mean for sigma.
Returns
-------
dist_mu : float
"""
self.mu = np.mean(self.Q)
# compute update improvement
dist_mu = abs(self.mu - self.mu_old)
if self.mu < self.err_max:
self.mu = self.err_max
if 1 - self.mu < self.err_max:
self.mu = 1 - self.err_max
# update variable
self.mu_old = np.copy(self.mu)
return dist_mu
def _update_delta_0(self, data, subs_nz, mask=None):
den = 2 * self.QQt - self.Qs # X - 1 expectation
den[-den < self.err_max] = -self.err_max
if isinstance(data, skt.sptensor):
self.delta_0 = (data.vals * den[subs_nz]).sum()
elif isinstance(data, skt.dtensor):
self.delta_0 = (data[subs_nz] * den[subs_nz]).sum()
if mask is None:
self.delta_0 /= den.sum()
else:
self.delta_0 /= den[mask].sum()
assert (self.delta_0 <= self.inf) and (self.delta_0 > 0)
# compute update improvement
dist_lam = np.abs(self.delta_0 - self.delta_0_old)
# update variable
self.delta_0_old = np.copy(self.delta_0)
return dist_lam
def _update_Q(self, data):
"""
Compute the posterior mean for sigma.
Parameters
----------
data : sptensor/dtensor
Graph adjacency tensor.
Returns
-------
dist_Q : float
"""
self.S = (self.c * self.eH)[np.newaxis, :, :]
if self.w.ndim == 2:
M = np.einsum('ik,jk->ijk', self.u, self.v)
M = np.einsum('ijk,ak->aij', M, self.w)
else:
M = np.einsum('ik,jq->ijkq', self.u, self.v)
M = np.einsum('ijkq,akq->aij', M, self.w)
self.M = M
if not self.fix_means:
veclam = | np.ones((self.L, self.N, self.N)) | numpy.ones |
from __future__ import print_function
import numpy as np
import os
from navrep.tools.rings import generate_rings
from navrep.models.tcn import reset_graph, sample_hps_params, MDNTCN, get_pi_idx
from navrep.models.vae2d import ConvVAE
# parameters
TEMPERATURE = 0.5
_Z = 32
sequence_z_path = os.path.expanduser(
"~/navrep/datasets/M/ian/000_mus_logvars_robotstates_actions_rewards_dones.npz"
)
rnn_model_path = os.path.expanduser("~/navrep/models/M/tcn.json")
vae_model_path = os.path.expanduser("~/navrep/models/V/vae.json")
reset_graph()
tcn = MDNTCN(sample_hps_params, gpu_mode=False)
vae = ConvVAE(batch_size=1, is_training=False)
vae.load_json(vae_model_path)
tcn.load_json(rnn_model_path)
rings_def = generate_rings(64, 64)
# load sequence image encoding
arrays = np.load(sequence_z_path)
sequence_action = arrays["actions"]
sequence_mu = arrays["mus"]
sequence_logvar = arrays["logvars"]
sequence_restart = arrays["dones"]
sequence_z = sequence_mu + np.exp(sequence_logvar / 2.0) * np.random.randn(
*(sequence_mu.shape)
)
feed = {
tcn.input_z: np.reshape(sequence_z[:999], (1, 999, _Z)),
tcn.input_action: np.reshape(sequence_action[:999], (1, 999, 3)),
tcn.input_restart: np.reshape(sequence_restart[:999], (1, 999)),
}
[logmix, mean, logstd, logrestart] = tcn.sess.run(
[tcn.out_logmix, tcn.out_mean, tcn.out_logstd, tcn.out_restart_logits], feed
)
logmix = logmix.reshape((999, _Z, sample_hps_params.num_mixture))
mean = mean.reshape((999, _Z, sample_hps_params.num_mixture))
logstd = logstd.reshape((999, _Z, sample_hps_params.num_mixture))
logrestart = logrestart.reshape((999, 1))
OUTWIDTH = _Z
# adjust temperatures
logmix2 = np.copy(logmix) / TEMPERATURE
logmix2 -= logmix2.max()
logmix2 = | np.exp(logmix2) | numpy.exp |
"""GNSS utility functions, mostly based on satellite ephemerides.
Author: <NAME>
"""
try:
import autograd.numpy as np
except(ImportError):
print("""Package 'autograd' not found. 'autograd.numpy' is necessary for
coarse-time navigation via maximum-likelihood estimation. Falling
back to 'numpy'.""")
import numpy as np
import pymap3d as pm
try:
import mkl_fft as fft_lib
except(ImportError):
print("""Package 'mkl_fft' not found. Consider installing 'mkl_fft' with
'conda install -c intel mkl_fft' for faster FFT and IFFT. Falling
back to 'numpy.fft'.""")
import numpy.fft as fft_lib
def get_sat_pos_vel_acc(t, eph):
"""Calculate positions, velocities, and accelerations of satellites.
Accepts arrays for t / eph, i.e., can calculate multiple points in time
/ multiple satellites at once.
Does not interpolate GLONASS.
Implemented according to
<NAME>., et al. “Computing GPS Satellite Velocity and
Acceleration from the Broadcast Navigation Message.” Annual of
Navigation, vol. 66, no. 4, 2019, pp. 769–779.
https://www.gps.gov/technical/icwg/meetings/2019/09/GPS-SV-velocity-and-acceleration.pdf
Inputs:
t - GPS time(s) [s] (ignored for SBAS)
eph - Ephemeris as array(s)
Outputs:
positions - Satellite position(s) in ECEF XYZ as array(s) [m]
velocities - Satellite velocity/ies in ECEF XYZ as array(s) [m/s]
accelerations - Sat. acceleration(s) in ECEF XYZ as array(s) [m/s^2]
Author: <NAME>
"""
if not np.isnan(eph[2]).any(): # No SBAS / GLONASS
t = np.mod(t, 7 * 24 * 60 * 60)
cic = eph[13] # "cic"]
crs = eph[10] # "crs"]
Omega0 = eph[15] # "Omega0"]
Deltan = eph[4] # "Deltan"]
cis = eph[14] # "cis"]
M0 = eph[2] # "M0"]
i0 = eph[11] # "i0"]
cuc = eph[7] # "cuc"]
crc = eph[9] # "crc"]
e = eph[5] # "e"]
Omega = eph[6] # "Omega"]
cus = eph[8] # "cus"]
OmegaDot = eph[16] # "OmegaDot"]
sqrtA = eph[3] # "sqrtA"]
IDOT = eph[12] # "IDOT"]
toe = eph[20] # "toe"]
# Broadcast Navigation User Equations
# WGS 84 value of the earth’s gravitational constant for GPS user [m^3/s^2]
mu = 3.986005e14
# WGS 84 value of the earth’s rotation rate [rad/s]
OmegaeDot = 7.2921151467e-5
# Semi-major axis
A = sqrtA ** 2
# Computed mean motion [rad/s]
n0 = np.sqrt(mu / A ** 3)
# Time from ephemeris reference epoch
tk = np.array(t - toe)
# t is GPS system time at time of transmission, i.e., GPS time corrected
# for transit time (range/speed of light). Furthermore, tk shall be the
# actual total time difference between the time t and the epoch time toe,
# and must account for beginning or end of week crossovers. That is, if tk
# is greater than 302,400 seconds, subtract 604,800 seconds from tk. If tk
# is less than -302,400 seconds, add 604,800 seconds to tk.
with np.nditer(tk, op_flags=["readwrite"]) as it:
for tk_i in it:
if tk_i > 302400:
tk_i[...] = tk_i - 604800
elif tk_i < -302400:
tk_i[...] = tk_i + 604800
# Corrected mean motion
n = n0 + Deltan
# Mean anomaly
Mk = M0 + n * tk
# Kepler’s equation (Mk = Ek - e*np.sin(Ek)) solved for eccentric anomaly
# (Ek) by iteration:
# Initial value [rad]
Ek = Mk
# Refined value, three iterations, (j = 0,1,2)
for j in range(3):
Ek = Ek + (Mk - Ek + e * np.sin(Ek)) / (1 - e * np.cos(Ek))
# True anomaly (unambiguous quadrant)
nuk = 2 * np.arctan(np.sqrt((1 + e) / (1 - e)) * np.tan(Ek / 2))
# Argument of Latitude
Phik = nuk + Omega
# Argument of Latitude Correction
deltauk = cus * np.sin(2 * Phik) + cuc * np.cos(2 * Phik)
# Radius Correction
deltark = crs * np.sin(2 * Phik) + crc * np.cos(2 * Phik)
# Inclination Correction
deltaik = cis * np.sin(2 * Phik) + cic * np.cos(2 * Phik)
# Corrected Argument of Latitude
uk = Phik + deltauk
# Corrected Radius
rk = A * (1 - e * np.cos(Ek)) + deltark
# Corrected Inclination
ik = i0 + deltaik + IDOT * tk
# Positions in Orbital Plane
xkDash = rk * np.cos(uk)
ykDash = rk * np.sin(uk)
# Corrected longitude of ascending node
Omegak = Omega0 + (OmegaDot - OmegaeDot) * tk - OmegaeDot * toe
# Earth-fixed coordinates
xk = xkDash * np.cos(Omegak) - ykDash * np.cos(ik) * | np.sin(Omegak) | numpy.sin |
from os import environ, remove
from tempfile import NamedTemporaryFile, mktemp
from unittest import TestCase, main
from numpy import (
arange,
array,
e,
greater_equal,
less_equal,
log,
nan,
sqrt,
zeros,
)
from cogent3 import (
DNA,
PROTEIN,
RNA,
load_aligned_seqs,
make_aligned_seqs,
make_tree,
)
from cogent3.core.alignment import ArrayAlignment
from cogent3.core.alphabet import CharAlphabet
from cogent3.evolve.coevolution import (
DEFAULT_NULL_VALUE,
AAGapless,
aln_position_pairs_cmp_threshold,
aln_position_pairs_ge_threshold,
aln_position_pairs_le_threshold,
ancestral_state_alignment,
ancestral_state_pair,
ancestral_state_position,
ancestral_states_input_validation,
build_coevolution_matrix_filepath,
calc_pair_scale,
coevolution_matrix_to_csv,
coevolve_alignment,
coevolve_alignments,
coevolve_alignments_validation,
coevolve_pair,
coevolve_position,
count_ge_threshold,
count_le_threshold,
csv_to_coevolution_matrix,
filter_exclude_positions,
filter_non_parsimony_informative,
filter_threshold_based_multiple_interdependency,
freqs_from_aln,
freqs_to_array,
get_allowed_perturbations,
get_ancestral_seqs,
get_dg,
get_dgg,
get_positional_frequencies,
get_positional_probabilities,
get_subalignments,
identify_aln_positions_above_threshold,
ignore_excludes,
is_parsimony_informative,
join_positions,
ltm_to_symmetric,
make_weights,
merge_alignments,
mi,
mi_alignment,
mi_pair,
mi_position,
n_random_seqs,
nmi,
nmi_alignment,
nmi_pair,
nmi_position,
normalized_mi,
parse_coevolution_matrix_filepath,
pickle_coevolution_result,
probs_from_dict,
protein_dict,
resampled_mi_alignment,
sca_alignment,
sca_input_validation,
sca_pair,
sca_position,
unpickle_coevolution_result,
validate_alignment,
validate_alphabet,
validate_ancestral_seqs,
validate_position,
validate_tree,
)
from cogent3.maths.stats.number import CategoryCounter
__author__ = "<NAME>"
__copyright__ = "Copyright 2007-2022, The Cogent Project"
__credits__ = ["<NAME>"]
__license__ = "BSD-3"
__version__ = "2022.4.20a1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Beta"
from numpy.testing import assert_allclose, assert_equal
class CoevolutionTests(TestCase):
"""Tests of coevolution.py"""
def setUp(self):
"""Set up variables for us in tests"""
self.run_slow_tests = int(environ.get("TEST_SLOW_APPC", 0))
# Data used in SCA tests
self.dna_aln = ArrayAlignment(
data=list(zip(list(range(4)), ["ACGT", "AGCT", "ACCC", "TAGG"])),
moltype=DNA,
)
self.rna_aln = ArrayAlignment(
data=list(zip(list(range(4)), ["ACGU", "AGCU", "ACCC", "UAGG"])),
moltype=RNA,
)
self.protein_aln = ArrayAlignment(
data=list(zip(list(range(4)), ["ACGP", "AGCT", "ACCC", "TAGG"])),
moltype=PROTEIN,
)
self.dna_aln_gapped = ArrayAlignment(
data=list(zip(list(range(4)), ["A-CGT", "AGC-T", "-ACCC", "TAGG-"])),
moltype=DNA,
)
self.freq = ArrayAlignment(
data=list(
zip(
list(range(20)),
[
"TCT",
"CCT",
"CCC",
"CCC",
"CCG",
"CC-",
"AC-",
"AC-",
"AA-",
"AA-",
"GA-",
"GA-",
"GA-",
"GA-",
"GA-",
"G--",
"G--",
"G--",
"G--",
"G--",
],
)
),
moltype=PROTEIN,
)
self.two_pos = ArrayAlignment(
data=list(
zip(
list(map(str, list(range(20)))),
[
"TC",
"CC",
"CC",
"CC",
"CC",
"CC",
"AC",
"AC",
"AA",
"AA",
"GA",
"GA",
"GA",
"GA",
"GA",
"GT",
"GT",
"GT",
"GT",
"GT",
],
)
),
moltype=PROTEIN,
)
self.tree20 = make_tree(treestring=tree20_string)
self.gpcr_aln = gpcr_aln
self.myos_aln = myos_aln
# a made-up dict of base frequencies to use as the natural freqs
# for SCA calcs on DNA seqs
self.dna_base_freqs = dict(list(zip("ACGT", [0.25] * 4)))
self.rna_base_freqs = dict(list(zip("ACGU", [0.25] * 4)))
self.protein_aln4 = ArrayAlignment(
[("A1", "AACF"), ("A12", "AADF"), ("A123", "ADCF"), ("A111", "AAD-")],
moltype=PROTEIN,
)
self.rna_aln4 = ArrayAlignment(
[("A1", "AAUU"), ("A12", "ACGU"), ("A123", "UUAA"), ("A111", "AAA-")],
moltype=RNA,
)
self.dna_aln4 = ArrayAlignment(
[("A1", "AATT"), ("A12", "ACGT"), ("A123", "TTAA"), ("A111", "AAA?")],
moltype=DNA,
)
self.tree4 = make_tree(
treestring="((A1:0.5,A111:0.5):0.5,(A12:0.5,A123:0.5):0.5);"
)
def test_alignment_analyses_moltype_protein(self):
"""alignment methods work with moltype = PROTEIN"""
r = mi_alignment(self.protein_aln4)
self.assertEqual(r.shape, (4, 4))
r = nmi_alignment(self.protein_aln4)
self.assertEqual(r.shape, (4, 4))
r = sca_alignment(self.protein_aln4, cutoff=0.75)
self.assertEqual(r.shape, (4, 4))
r = ancestral_state_alignment(self.protein_aln4, self.tree4)
self.assertEqual(r.shape, (4, 4))
def test_alignment_analyses_moltype_rna(self):
"""alignment methods work with moltype = RNA"""
r = mi_alignment(self.rna_aln4)
self.assertEqual(r.shape, (4, 4))
r = nmi_alignment(self.rna_aln4)
self.assertEqual(r.shape, (4, 4))
r = sca_alignment(
self.rna_aln4,
cutoff=0.75,
alphabet="ACGU",
background_freqs=self.rna_base_freqs,
)
self.assertEqual(r.shape, (4, 4))
r = ancestral_state_alignment(self.rna_aln4, self.tree4)
self.assertEqual(r.shape, (4, 4))
def test_alignment_analyses_moltype_dna(self):
"""alignment methods work with moltype = DNA"""
r = mi_alignment(self.dna_aln4)
self.assertEqual(r.shape, (4, 4))
r = nmi_alignment(self.dna_aln4)
self.assertEqual(r.shape, (4, 4))
r = sca_alignment(
self.dna_aln4,
cutoff=0.75,
alphabet="ACGT",
background_freqs=self.dna_base_freqs,
)
self.assertEqual(r.shape, (4, 4))
r = ancestral_state_alignment(self.dna_aln4, self.tree4)
self.assertEqual(r.shape, (4, 4))
def test_join_positions(self):
"""join_positions functions as expected"""
self.assertEqual(
join_positions(list("ABCD"), list("WXYZ")), ["AW", "BX", "CY", "DZ"]
)
self.assertEqual(join_positions(list("AAA"), list("BBB")), ["AB", "AB", "AB"])
self.assertEqual(join_positions([], []), [])
def test_mi(self):
"""mi calculations function as expected with valid data"""
assert_allclose(mi(1.0, 1.0, 1.0), 1.0)
assert_allclose(mi(1.0, 1.0, 2.0), 0.0)
assert_allclose(mi(1.0, 1.0, 1.5), 0.5)
def test_normalized_mi(self):
"""normalized mi calculations function as expected with valid data"""
assert_allclose(normalized_mi(1.0, 1.0, 1.0), 1.0)
assert_allclose(normalized_mi(1.0, 1.0, 2.0), 0.0)
assert_allclose(normalized_mi(1.0, 1.0, 1.5), 0.3333, 3)
def test_mi_pair(self):
"""mi_pair calculates mi from a pair of columns"""
aln = ArrayAlignment(data={"1": "AB", "2": "AB"}, moltype=PROTEIN)
assert_allclose(mi_pair(aln, pos1=0, pos2=1), 0.0)
aln = ArrayAlignment(data={"1": "AB", "2": "BA"}, moltype=PROTEIN)
assert_allclose(mi_pair(aln, pos1=0, pos2=1), 1.0)
# order of positions doesn't matter (when it shouldn't)
aln = ArrayAlignment(data={"1": "AB", "2": "AB"}, moltype=PROTEIN)
assert_allclose(mi_pair(aln, pos1=0, pos2=1), mi_pair(aln, pos1=1, pos2=0))
aln = ArrayAlignment(data={"1": "AB", "2": "BA"}, moltype=PROTEIN)
assert_allclose(mi_pair(aln, pos1=0, pos2=1), mi_pair(aln, pos1=1, pos2=0))
def test_wrapper_functions_handle_invalid_parameters(self):
"""coevolve_*: functions error on missing parameters"""
# missing cutoff
aln = ArrayAlignment(data={"1": "AC", "2": "AC"}, moltype=PROTEIN)
self.assertRaises(ValueError, coevolve_pair, sca_pair, aln, 0, 1)
self.assertRaises(ValueError, coevolve_position, sca_position, aln, 0)
self.assertRaises(ValueError, coevolve_alignment, sca_alignment, aln)
self.assertRaises(ValueError, coevolve_alignments, sca_alignment, aln, aln)
def test_coevolve_pair(self):
"""coevolve_pair: returns same as pair methods called directly"""
aln = ArrayAlignment(data={"1": "AC", "2": "AC"}, moltype=PROTEIN)
t = make_tree(treestring="(1:0.5,2:0.5);")
cutoff = 0.50
# mi_pair == coevolve_pair(mi_pair,...)
assert_allclose(
coevolve_pair(mi_pair, aln, pos1=0, pos2=1), mi_pair(aln, pos1=0, pos2=1)
)
assert_allclose(
coevolve_pair(nmi_pair, aln, pos1=0, pos2=1), nmi_pair(aln, pos1=0, pos2=1)
)
assert_allclose(
coevolve_pair(ancestral_state_pair, aln, pos1=0, pos2=1, tree=t),
ancestral_state_pair(aln, pos1=0, pos2=1, tree=t),
)
assert_allclose(
coevolve_pair(sca_pair, aln, pos1=0, pos2=1, cutoff=cutoff),
sca_pair(aln, pos1=0, pos2=1, cutoff=cutoff),
)
def test_coevolve_position(self):
"""coevolve_position: returns same as position methods called directly"""
aln = ArrayAlignment(data={"1": "AC", "2": "AC"}, moltype=PROTEIN)
t = make_tree(treestring="(1:0.5,2:0.5);")
cutoff = 0.50
# mi_position == coevolve_position(mi_position,...)
assert_allclose(
coevolve_position(mi_position, aln, position=0),
mi_position(aln, position=0),
)
assert_allclose(
coevolve_position(nmi_position, aln, position=0),
nmi_position(aln, position=0),
)
assert_allclose(
coevolve_position(ancestral_state_position, aln, position=0, tree=t),
ancestral_state_position(aln, position=0, tree=t),
)
assert_allclose(
coevolve_position(sca_position, aln, position=0, cutoff=cutoff),
sca_position(aln, position=0, cutoff=cutoff),
)
def test_coevolve_alignment(self):
"""coevolve_alignment: returns same as alignment methods"""
aln = ArrayAlignment(data={"1": "AC", "2": "AC"}, moltype=PROTEIN)
t = make_tree(treestring="(1:0.5,2:0.5);")
cutoff = 0.50
# mi_alignment == coevolve_alignment(mi_alignment,...)
assert_allclose(coevolve_alignment(mi_alignment, aln), mi_alignment(aln))
assert_allclose(coevolve_alignment(mip_alignment, aln), mip_alignment(aln))
assert_allclose(coevolve_alignment(mia_alignment, aln), mia_alignment(aln))
assert_allclose(coevolve_alignment(nmi_alignment, aln), nmi_alignment(aln))
assert_allclose(
coevolve_alignment(ancestral_state_alignment, aln, tree=t),
ancestral_state_alignment(aln, tree=t),
)
assert_allclose(
coevolve_alignment(sca_alignment, aln, cutoff=cutoff),
sca_alignment(aln, cutoff=cutoff),
)
def test_coevolve_alignments_validation_idenifiers(self):
"""coevolve_alignments_validation: seq/tree validation functions"""
method = sca_alignment
aln1 = ArrayAlignment(data={"1": "AC", "2": "AD"}, moltype=PROTEIN)
aln2 = ArrayAlignment(data={"1": "EFW", "2": "EGY"}, moltype=PROTEIN)
t = make_tree(treestring="(1:0.5,2:0.5);")
# OK w/ no tree
coevolve_alignments_validation(method, aln1, aln2, 2, None)
# OK w/ tree
coevolve_alignments_validation(method, aln1, aln2, 2, None, tree=t)
# If there is a plus present in identifiers, we only care about the
# text before the colon
aln1 = ArrayAlignment(data={"1+a": "AC", "2+b": "AD"}, moltype=PROTEIN)
aln2 = ArrayAlignment(data={"1 + c": "EFW", "2 + d": "EGY"}, moltype=PROTEIN)
t = make_tree(treestring="(1+e:0.5,2 + f:0.5);")
# OK w/ no tree
coevolve_alignments_validation(method, aln1, aln2, 2, None)
# OK w/ tree
coevolve_alignments_validation(method, aln1, aln2, 2, None, tree=t)
# mismatch b/w alignments seq names
aln1 = ArrayAlignment(data={"3": "AC", "2": "AD"}, moltype=PROTEIN)
aln2 = ArrayAlignment(data={"1": "EFW", "2": "EGY"}, moltype=PROTEIN)
t = make_tree(treestring="(1:0.5,2:0.5);")
self.assertRaises(
AssertionError,
coevolve_alignments_validation,
method,
aln1,
aln2,
2,
None,
tree=t,
)
# mismatch b/w alignments and tree seq names
aln1 = ArrayAlignment(data={"1": "AC", "2": "AD"}, moltype=PROTEIN)
aln2 = ArrayAlignment(data={"1": "EFW", "2": "EGY"}, moltype=PROTEIN)
t = make_tree(treestring="(3:0.5,2:0.5);")
self.assertRaises(
AssertionError,
coevolve_alignments_validation,
method,
aln1,
aln2,
2,
None,
tree=t,
)
# mismatch b/w alignments in number of seqs
aln1 = ArrayAlignment(data={"1": "AC", "2": "AD", "3": "AA"}, moltype=PROTEIN)
aln2 = ArrayAlignment(data={"1": "EFW", "2": "EGY"}, moltype=PROTEIN)
t = make_tree(treestring="(1:0.5,2:0.5);")
self.assertRaises(
AssertionError, coevolve_alignments_validation, method, aln1, aln2, 2, None
)
self.assertRaises(
AssertionError,
coevolve_alignments_validation,
method,
aln1,
aln2,
2,
None,
tree=t,
)
# mismatch b/w alignments & tree in number of seqs
aln1 = ArrayAlignment(data={"1": "AC", "2": "AD"}, moltype=PROTEIN)
aln2 = ArrayAlignment(data={"1": "EFW", "2": "EGY"}, moltype=PROTEIN)
t = make_tree(treestring="(1:0.5,(2:0.5,3:0.25));")
self.assertRaises(
AssertionError,
coevolve_alignments_validation,
method,
aln1,
aln2,
2,
None,
tree=t,
)
def test_coevolve_alignments_validation_min_num_seqs(self):
"""coevolve_alignments_validation: ValueError on fewer than min_num_seqs"""
method = mi_alignment
# too few sequences -> ValueError
aln1 = ArrayAlignment(data={"1": "AC", "2": "AD"}, moltype=PROTEIN)
aln2 = ArrayAlignment(data={"1": "EFW", "2": "EGY"}, moltype=PROTEIN)
coevolve_alignments_validation(method, aln1, aln2, 1, None)
coevolve_alignments_validation(method, aln1, aln2, 2, None)
self.assertRaises(
ValueError, coevolve_alignments_validation, method, aln1, aln2, 3, None
)
def test_coevolve_alignments_validation_max_num_seqs(self):
"""coevolve_alignments_validation: min_num_seqs <= max_num_seqs"""
method = mi_alignment
# min_num_seqs > max_num_seqs-> ValueError
aln1 = ArrayAlignment(data={"1": "AC", "2": "AD"}, moltype=PROTEIN)
aln2 = ArrayAlignment(data={"1": "EFW", "2": "EGY"}, moltype=PROTEIN)
coevolve_alignments_validation(method, aln1, aln2, 1, None)
coevolve_alignments_validation(method, aln1, aln2, 1, 3)
coevolve_alignments_validation(method, aln1, aln2, 2, 3)
self.assertRaises(
ValueError, coevolve_alignments_validation, method, aln1, aln2, 3, 2
)
def test_coevolve_alignments_validation_moltypes(self):
"""coevolve_alignments_validation: valid for acceptable moltypes"""
aln1 = ArrayAlignment(data={"1": "AC", "2": "AU"}, moltype=RNA)
aln2 = ArrayAlignment(data={"1": "EFW", "2": "EGY"}, moltype=PROTEIN)
# different moltype
coevolve_alignments_validation(mi_alignment, aln1, aln2, 2, None)
coevolve_alignments_validation(nmi_alignment, aln1, aln2, 2, None)
coevolve_alignments_validation(resampled_mi_alignment, aln1, aln2, 2, None)
self.assertRaises(
AssertionError,
coevolve_alignments_validation,
sca_alignment,
aln1,
aln2,
2,
None,
)
self.assertRaises(
AssertionError,
coevolve_alignments_validation,
ancestral_state_alignment,
aln1,
aln2,
2,
None,
)
def test_coevolve_alignments(self):
"""coevolve_alignments: returns correct len(aln1) x len(aln2) matrix"""
aln1 = ArrayAlignment(data={"1": "AC", "2": "AD"}, moltype=PROTEIN)
aln2 = ArrayAlignment(data={"1": "EFW", "2": "EGY"}, moltype=PROTEIN)
combined_aln = ArrayAlignment(
data={"1": "ACEFW", "2": "ADEGY"}, moltype=PROTEIN
)
t = make_tree(treestring="(1:0.5,2:0.5);")
cutoff = 0.50
# MI
m = mi_alignment(combined_aln)
expected = array([[m[2, 0], m[2, 1]], [m[3, 0], m[3, 1]], [m[4, 0], m[4, 1]]])
assert_allclose(coevolve_alignments(mi_alignment, aln1, aln2), expected)
# MI (return_full=True)
assert_allclose(
coevolve_alignments(mi_alignment, aln1, aln2, return_full=True), m
)
# NMI
m = nmi_alignment(combined_aln)
expected = array([[m[2, 0], m[2, 1]], [m[3, 0], m[3, 1]], [m[4, 0], m[4, 1]]])
assert_allclose(coevolve_alignments(nmi_alignment, aln1, aln2), expected)
# AS
m = ancestral_state_alignment(combined_aln, tree=t)
expected = array([[m[2, 0], m[2, 1]], [m[3, 0], m[3, 1]], [m[4, 0], m[4, 1]]])
assert_allclose(
coevolve_alignments(ancestral_state_alignment, aln1, aln2, tree=t), expected
)
# SCA
m = sca_alignment(combined_aln, cutoff=cutoff)
expected = array([[m[2, 0], m[2, 1]], [m[3, 0], m[3, 1]], [m[4, 0], m[4, 1]]])
assert_allclose(
coevolve_alignments(sca_alignment, aln1, aln2, cutoff=cutoff), expected
)
def test_coevolve_alignments_watches_min_num_seqs(self):
"""coevolve_alignments: error on too few sequences"""
aln1 = ArrayAlignment(data={"1": "AC", "2": "AD"}, moltype=PROTEIN)
aln2 = ArrayAlignment(data={"1": "EFW", "2": "EGY"}, moltype=PROTEIN)
coevolve_alignments(mi_alignment, aln1, aln2)
coevolve_alignments(mi_alignment, aln1, aln2, min_num_seqs=0)
coevolve_alignments(mi_alignment, aln1, aln2, min_num_seqs=1)
coevolve_alignments(mi_alignment, aln1, aln2, min_num_seqs=2)
self.assertRaises(
ValueError, coevolve_alignments, mi_alignment, aln1, aln2, min_num_seqs=3
)
self.assertRaises(
ValueError, coevolve_alignments, mi_alignment, aln1, aln2, min_num_seqs=50
)
def test_coevolve_alignments_watches_max_num_seqs(self):
"""coevolve_alignments: filtering or error on too many sequences"""
aln1 = ArrayAlignment(data={"1": "AC", "2": "AD", "3": "YP"}, moltype=PROTEIN)
aln2 = ArrayAlignment(
data={"1": "ACP", "2": "EAD", "3": "PYP"}, moltype=PROTEIN
)
# keep all seqs
tmp_filepath = NamedTemporaryFile(
prefix="tmp_test_coevolution", suffix=".fasta"
).name
coevolve_alignments(
mi_alignment, aln1, aln2, max_num_seqs=3, merged_aln_filepath=tmp_filepath
)
self.assertEqual(load_aligned_seqs(tmp_filepath).num_seqs, 3)
# keep 2 seqs
coevolve_alignments(
mi_alignment, aln1, aln2, max_num_seqs=2, merged_aln_filepath=tmp_filepath
)
seqs = load_aligned_seqs(tmp_filepath)
self.assertEqual(seqs.num_seqs, 2)
# error if no sequence filter
self.assertRaises(
ValueError,
coevolve_alignments,
mi_alignment,
aln1,
aln2,
max_num_seqs=2,
merged_aln_filepath=tmp_filepath,
sequence_filter=None,
)
# clean up the temporary file
remove(tmp_filepath)
def test_coevolve_alignments_different_MolType(self):
"""coevolve_alignments: different MolTypes supported"""
aln1 = ArrayAlignment(data={"1": "AC", "2": "AU"}, moltype=RNA)
aln2 = ArrayAlignment(data={"1": "EFW", "2": "EGY"}, moltype=PROTEIN)
combined_aln = ArrayAlignment(data={"1": "ACEFW", "2": "AUEGY"})
t = make_tree(treestring="(1:0.5,2:0.5);")
# MI
m = mi_alignment(combined_aln)
expected = array([[m[2, 0], m[2, 1]], [m[3, 0], m[3, 1]], [m[4, 0], m[4, 1]]])
assert_allclose(coevolve_alignments(mi_alignment, aln1, aln2), expected)
# MI (return_full=True)
assert_allclose(
coevolve_alignments(mi_alignment, aln1, aln2, return_full=True), m
)
# NMI
m = nmi_alignment(combined_aln)
expected = array([[m[2, 0], m[2, 1]], [m[3, 0], m[3, 1]], [m[4, 0], m[4, 1]]])
assert_allclose(coevolve_alignments(nmi_alignment, aln1, aln2), expected)
def test_mi_pair_cols_default_exclude_handling(self):
"""mi_pair returns null_value on excluded by default"""
aln = ArrayAlignment(data={"1": "AB", "2": "-B"}, moltype=PROTEIN)
assert_allclose(mi_pair(aln, pos1=0, pos2=1), DEFAULT_NULL_VALUE)
aln = ArrayAlignment(data={"1": "-B", "2": "-B"}, moltype=PROTEIN)
assert_allclose(mi_pair(aln, pos1=0, pos2=1), DEFAULT_NULL_VALUE)
aln = ArrayAlignment(data={"1": "AA", "2": "-B"}, moltype=PROTEIN)
assert_allclose(mi_pair(aln, pos1=0, pos2=1), DEFAULT_NULL_VALUE)
aln = ArrayAlignment(data={"1": "AA", "2": "PB"}, moltype=PROTEIN)
assert_allclose(mi_pair(aln, pos1=0, pos2=1, excludes="P"), DEFAULT_NULL_VALUE)
def test_mi_pair_cols_non_default_exclude_handling(self):
"""mi_pair uses non-default exclude_handler when provided"""
aln = ArrayAlignment(data={"1": "A-", "2": "A-"}, moltype=PROTEIN)
assert_allclose(mi_pair(aln, pos1=0, pos2=1), DEFAULT_NULL_VALUE)
assert_allclose(
mi_pair(aln, pos1=0, pos2=1, exclude_handler=ignore_excludes), 0.0
)
def test_mi_pair_cols_and_entropies(self):
"""mi_pair calculates mi from a pair of columns and precalc entropies"""
aln = ArrayAlignment(data={"1": "AB", "2": "AB"}, moltype=PROTEIN)
assert_allclose(mi_pair(aln, pos1=0, pos2=1, h1=0.0, h2=0.0), 0.0)
aln = ArrayAlignment(data={"1": "AB", "2": "BA"}, moltype=PROTEIN)
assert_allclose(mi_pair(aln, pos1=0, pos2=1, h1=1.0, h2=1.0), 1.0)
# incorrect positional entropies provided to ensure that the
# precalculated values are used, and that entorpies are not
# caluclated on-the-fly.
aln = ArrayAlignment(data={"1": "AB", "2": "AB"}, moltype=PROTEIN)
assert_allclose(mi_pair(aln, pos1=0, pos2=1, h1=1.0, h2=1.0), 2.0)
def test_mi_pair_alt_calculator(self):
"""mi_pair uses alternate mi_calculator when provided"""
aln = ArrayAlignment(data={"1": "AB", "2": "AB"}, moltype=PROTEIN)
assert_allclose(mi_pair(aln, pos1=0, pos2=1), 0.0)
assert_allclose(
mi_pair(aln, pos1=0, pos2=1, mi_calculator=normalized_mi),
DEFAULT_NULL_VALUE,
)
def test_mi_position_valid_input(self):
"""mi_position functions with varied valid input"""
aln = ArrayAlignment(data={"1": "ACG", "2": "GAC"}, moltype=PROTEIN)
assert_allclose(mi_position(aln, 0), array([1.0, 1.0, 1.0]))
aln = ArrayAlignment(data={"1": "ACG", "2": "ACG"}, moltype=PROTEIN)
assert_allclose(mi_position(aln, 0), array([0.0, 0.0, 0.0]))
aln = ArrayAlignment(data={"1": "ACG", "2": "ACG"}, moltype=PROTEIN)
assert_allclose(mi_position(aln, 2), array([0.0, 0.0, 0.0]))
def test_mi_position_from_alignment_nmi(self):
"""mi_position functions w/ alternate mi_calculator"""
aln = ArrayAlignment(data={"1": "ACG", "2": "ACG"}, moltype=PROTEIN)
assert_allclose(mi_position(aln, 0), array([0.0, 0.0, 0.0]))
aln = ArrayAlignment(data={"1": "ACG", "2": "ACG"}, moltype=PROTEIN)
assert_allclose(
mi_position(aln, 0, mi_calculator=normalized_mi),
array([DEFAULT_NULL_VALUE, DEFAULT_NULL_VALUE, DEFAULT_NULL_VALUE]),
)
def test_mi_position_from_alignment_default_exclude_handling(self):
"""mi_position handles excludes by setting to null_value"""
aln = ArrayAlignment(data={"1": "ACG", "2": "G-C"}, moltype=PROTEIN)
assert_allclose(mi_position(aln, 0), array([1.0, DEFAULT_NULL_VALUE, 1.0]))
aln = ArrayAlignment(data={"1": "ACG", "2": "GPC"}, moltype=PROTEIN)
assert_allclose(
mi_position(aln, 0, excludes="P"), array([1.0, DEFAULT_NULL_VALUE, 1.0])
)
def test_mi_position_from_alignment_non_default_exclude_handling(self):
"""mi_position handles excludes w/ non-default method"""
aln = ArrayAlignment(data={"1": "ACG", "2": "G-C"}, moltype=PROTEIN)
assert_allclose(
mi_position(aln, 0, exclude_handler=ignore_excludes), array([1.0, 1.0, 1.0])
)
def test_mi_alignment_excludes(self):
"""mi_alignment handles excludes properly"""
expected = array(
[
[0.0, DEFAULT_NULL_VALUE, 0.0],
[DEFAULT_NULL_VALUE, DEFAULT_NULL_VALUE, DEFAULT_NULL_VALUE],
[0.0, DEFAULT_NULL_VALUE, 0.0],
]
)
# gap in second column
aln = ArrayAlignment(data={"1": "ACG", "2": "A-G"}, moltype=PROTEIN)
assert_allclose(mi_alignment(aln), expected)
# excludes = 'P'
aln = ArrayAlignment(data={"1": "ACG", "2": "APG"}, moltype=PROTEIN)
assert_allclose(mi_alignment(aln, excludes="P"), expected)
# gap in first column
expected = array(
[
[DEFAULT_NULL_VALUE, DEFAULT_NULL_VALUE, DEFAULT_NULL_VALUE],
[DEFAULT_NULL_VALUE, 0.0, 0.0],
[DEFAULT_NULL_VALUE, 0.0, 0.0],
]
)
aln = ArrayAlignment(data={"1": "-CG", "2": "ACG"}, moltype=PROTEIN)
assert_allclose(mi_alignment(aln), expected)
def test_mi_alignment_high(self):
"""mi_alignment detected perfectly correlated columns"""
expected = [[1.0, 1.0], [1.0, 1.0]]
aln = ArrayAlignment(data={"1": "AG", "2": "GA"}, moltype=PROTEIN)
assert_allclose(mi_alignment(aln), expected)
def test_mi_alignment_low(self):
"""mi_alignment detected in perfectly uncorrelated columns"""
expected = [[0.0, 0.0], [0.0, 1.0]]
aln = ArrayAlignment(data={"1": "AG", "2": "AC"}, moltype=PROTEIN)
assert_allclose(mi_alignment(aln), expected)
def test_resampled_mi_alignment(self):
"""resampled_mi_alignment returns without error"""
aln = ArrayAlignment(
data={"1": "ACDEF", "2": "ACFEF", "3": "ACGEF"}, moltype=PROTEIN
)
resampled_mi_alignment(aln)
aln = ArrayAlignment(
data={"1": "ACDEF", "2": "ACF-F", "3": "ACGEF"}, moltype=PROTEIN
)
resampled_mi_alignment(aln)
def test_coevolve_alignment(self):
"""coevolve_alignment functions as expected with varied input"""
aln1 = ArrayAlignment(
data={"1": "ACDEF", "2": "ACFEF", "3": "ACGEF"}, moltype=PROTEIN
)
# no kwargs passed
assert_allclose(coevolve_alignment(mi_alignment, aln1), mi_alignment(aln1))
# different method passed
assert_allclose(coevolve_alignment(nmi_alignment, aln1), nmi_alignment(aln1))
# kwargs passed
assert_allclose(
coevolve_alignment(mi_alignment, aln1, mi_calculator=nmi),
nmi_alignment(aln1),
)
def test_build_coevolution_matrix_filepath(self):
"""build_coevolution_matrix_filepath functions w/ varied input"""
self.assertEqual(build_coevolution_matrix_filepath("./blah.fasta"), "./blah")
self.assertEqual(build_coevolution_matrix_filepath("blah.fasta"), "./blah")
self.assertEqual(build_coevolution_matrix_filepath("blah"), "./blah")
self.assertEqual(build_coevolution_matrix_filepath("./blah"), "./blah")
self.assertEqual(
build_coevolution_matrix_filepath(
"./blah.fasta", output_dir="./duh/", method="xx", alphabet="yyy"
),
"./duh/blah.yyy.xx",
)
self.assertEqual(
build_coevolution_matrix_filepath(
"./blah.fasta",
output_dir="./duh/",
method="xx",
alphabet="yyy",
parameter=0.25,
),
"./duh/blah.yyy.xx",
)
self.assertEqual(
build_coevolution_matrix_filepath(
"./blah.fasta", output_dir="./duh/", method="xx"
),
"./duh/blah.xx",
)
self.assertEqual(
build_coevolution_matrix_filepath(
"./blah.fasta", output_dir="./duh/", method="sca", parameter=0.25
),
"./duh/blah.sca_25",
)
self.assertEqual(
build_coevolution_matrix_filepath(
"./blah.fasta",
output_dir="./duh/",
method="sca",
parameter=0.25,
alphabet="xx",
),
"./duh/blah.xx.sca_25",
)
# no trailing / to output_dir
self.assertEqual(
build_coevolution_matrix_filepath(
"./blah.fasta",
output_dir="./duh",
method="sca",
parameter=0.25,
alphabet="xx",
),
"./duh/blah.xx.sca_25",
)
self.assertRaises(
ValueError,
build_coevolution_matrix_filepath,
"./blah.fasta",
"./duh/",
"sca",
)
self.assertRaises(
ValueError,
build_coevolution_matrix_filepath,
"./blah.fasta",
"./duh/",
"sca",
"xx",
)
def test_pickle_coevolution_result_error(self):
"""pickle matrix: IOError handled correctly"""
m = array([[1, 2], [3, 4]])
self.assertRaises(IOError, pickle_coevolution_result, m, "")
def test_unpickle_coevolution_result_error(self):
"""unpickle matrix: IOError handled correctly"""
self.assertRaises(IOError, unpickle_coevolution_result, "invalid/file/path.pkl")
def test_pickle_and_unpickle(self):
"""unpickle(pickle(matrix)) == matrix"""
for expected in [4.5, array([1.2, 4.3, 5.5]), array([[1.4, 2.2], [3.0, 0.4]])]:
filepath = mktemp()
pickle_coevolution_result(expected, filepath)
actual = unpickle_coevolution_result(filepath)
assert_allclose(actual, expected)
remove(filepath)
def test_csv_coevolution_result_error(self):
"""matrix -> csv: IOError handled correctly"""
m = array([[1, 2], [3, 4]])
self.assertRaises(IOError, coevolution_matrix_to_csv, m, "")
def test_uncsv_coevolution_result_error(self):
"""csv -> matrix: IOError handled correctly"""
self.assertRaises(IOError, csv_to_coevolution_matrix, "invalid/file/path.pkl")
def test_csv_and_uncsv(self):
"""converting to/from csv matrix results in correct coevolution matrix"""
expected = array([[1.4, 2.2], [DEFAULT_NULL_VALUE, 0.4]])
filepath = mktemp()
coevolution_matrix_to_csv(expected, filepath)
actual = csv_to_coevolution_matrix(filepath)
assert_allclose(actual, expected)
remove(filepath)
def test_parse_coevolution_matrix_filepath(self):
"""Parsing matrix filepaths works as expected."""
expected = ("myosin_995", "a1_4", "nmi")
self.assertEqual(
parse_coevolution_matrix_filepath("pkls/myosin_995.a1_4.nmi.pkl"), expected
)
self.assertEqual(
parse_coevolution_matrix_filepath("pkls/myosin_995.a1_4.nmi.csv"), expected
)
expected = ("p53", "orig", "mi")
self.assertEqual(parse_coevolution_matrix_filepath("p53.orig.mi.pkl"), expected)
self.assertEqual(parse_coevolution_matrix_filepath("p53.orig.mi.csv"), expected)
def test_parse_coevolution_matrix_filepath_error(self):
"""Parsing matrix file paths handles invalid filepaths"""
self.assertRaises(
ValueError, parse_coevolution_matrix_filepath, "pkls/myosin_995.nmi.pkl"
)
self.assertRaises(
ValueError, parse_coevolution_matrix_filepath, "pkls/myosin_995.pkl"
)
self.assertRaises(
ValueError, parse_coevolution_matrix_filepath, "pkls/myosin_995"
)
self.assertRaises(ValueError, parse_coevolution_matrix_filepath, "")
def test_identify_aln_positions_above_threshold(self):
"""Extracting scores above threshold works as expected"""
m = array(
[
[
DEFAULT_NULL_VALUE,
DEFAULT_NULL_VALUE,
DEFAULT_NULL_VALUE,
DEFAULT_NULL_VALUE,
],
[0.3, 1.0, DEFAULT_NULL_VALUE, DEFAULT_NULL_VALUE],
[0.25, 0.75, 1.0, DEFAULT_NULL_VALUE],
[0.9, 0.751, 0.8, 1.0],
]
)
self.assertEqual(identify_aln_positions_above_threshold(m, 0.75, 0), [])
self.assertEqual(identify_aln_positions_above_threshold(m, 0.75, 1), [1])
self.assertEqual(identify_aln_positions_above_threshold(m, 0.75, 2), [1, 2])
self.assertEqual(
identify_aln_positions_above_threshold(m, 0.75, 3), [0, 1, 2, 3]
)
m = ltm_to_symmetric(m)
self.assertEqual(identify_aln_positions_above_threshold(m, 0.75, 0), [3])
self.assertEqual(identify_aln_positions_above_threshold(m, 0.75, 1), [1, 2, 3])
self.assertEqual(identify_aln_positions_above_threshold(m, 0.75, 2), [1, 2, 3])
self.assertEqual(
identify_aln_positions_above_threshold(m, 0.75, 3), [0, 1, 2, 3]
)
self.assertEqual(identify_aln_positions_above_threshold(m, 1.1, 0), [])
self.assertEqual(identify_aln_positions_above_threshold(m, -5.0, 0), [1, 2, 3])
self.assertEqual(
identify_aln_positions_above_threshold(m, -5.0, 1), [0, 1, 2, 3]
)
def test_count_ge_threshold(self):
"""count_ge_threshold works as expected"""
m = array([[DEFAULT_NULL_VALUE] * 3] * 3)
self.assertEqual(count_ge_threshold(m, 1.0), (0, 0))
self.assertEqual(
count_ge_threshold(m, DEFAULT_NULL_VALUE, DEFAULT_NULL_VALUE), (0, 0)
)
self.assertEqual(count_ge_threshold(m, 1.0, 42), (0, 9))
m = | array([[0, 1, 2], [3, 4, 5], [6, 7, 8]]) | numpy.array |
#!/usr/bin/env python
# encoding: utf-8
import numpy as np
import pandas as pd
import seaborn as sns
from IPython import embed as shell
import matplotlib
matplotlib.use("TkAgg")
from matplotlib import pyplot as plt
def get_DDM_traces(v=1, z=0.5, dc=0, dc_slope=0, sv=0.1, noise_sd=1, stim=0, nr_trials=1000, tmax=5.0, dt=0.01):
"""
DDM
v: mean drift rate
z: starting point
dc: drift criterion
"""
if stim == 0:
v = np.random.normal(-v,sv,nr_trials)
elif stim == 1:
v = np.random.normal(v,sv,nr_trials)
x = np.zeros((nr_trials, int(tmax/dt)))
x[:,:] = np.NaN
x[:,0] = z
for i in range((int(tmax/dt))-1):
x[:,i+1] = x[:,i] + ((v + dc + (dc_slope*dt*i) ) * dt) + (np.random.normal(0,noise_sd,nr_trials)*np.sqrt(dt))
return x
def get_OU_traces(v, ll, dc, z, noise_sd=1, pre_generated=False, stim=0, nr_trials=1000, tmax=5.0, dt=0.01):
"""
OU-model
v: mean drift rate
ll: Ornstein-Uhlenbeck process parameter (effective leak / self-excitation)
z: starting point
dc: drift criterion
"""
if stim == 0:
v = v[::-1]
x1 = np.zeros((nr_trials, int(tmax/dt)))
x2 = np.zeros((nr_trials, int(tmax/dt)))
x1[:,:] = np.NaN
x2[:,:] = np.NaN
x1[:,0] = z[0]
x2[:,0] = z[1]
for i in range((int(tmax/dt))-1):
if pre_generated:
x1[:,i+1] = x1[:,i] + v[0][:,i] + dc[0] - (ll[0]*x1[:,i])
x2[:,i+1] = x2[:,i] + v[1][:,i] + dc[1] - (ll[1]*x2[:,i])
else:
x1[:,i+1] = x1[:,i] + ((v[0] + dc[0] - (ll[0]*x1[:,i])) * dt) + (np.random.normal(0,noise_sd/np.sqrt(2),nr_trials)*np.sqrt(dt))
x2[:,i+1] = x2[:,i] + ((v[1] + dc[1] - (ll[1]*x2[:,i])) * dt) + (np.random.normal(0,noise_sd/np.sqrt(2),nr_trials)*np.sqrt(dt))
return x1-x2
def get_LCA_traces(v, k, w, dc, z, noise_sd=1, pre_generated=False, stim=0, nr_trials=1000, tmax=5.0, dt=0.01):
"""
LCA
"""
if stim == 0:
v = v[::-1]
x1 = np.zeros((nr_trials, int(tmax/dt)))
x2 = np.zeros((nr_trials, int(tmax/dt)))
x1[:,:] = np.NaN
x2[:,:] = np.NaN
x1[:,0] = z[0]
x2[:,0] = z[1]
for i in range((int(tmax/dt))-1):
if pre_generated:
x1[:,i+1] = np.clip(x1[:,i] + v[0][:,i] + dc[0] - (k[0]*x1[:,i]) - (w[1]*x2[:,i]), a_min=0, a_max=1e6)
x2[:,i+1] = np.clip(x2[:,i] + v[1][:,i] + dc[1] - (k[1]*x2[:,i]) - (w[0]*x1[:,i]), a_min=0, a_max=1e6)
else:
x1[:,i+1] = np.clip(x1[:,i] + ((v[0] + dc[0] - (k[0]*x1[:,i]) - (w[1]*x2[:,i])) * dt) + (np.random.normal(0,noise_sd,nr_trials)*np.sqrt(dt)), a_min=0, a_max=1e6)
x2[:,i+1] = np.clip(x2[:,i] + ((v[1] + dc[1] - (k[1]*x2[:,i]) - (w[0]*x1[:,i])) * dt) + (np.random.normal(0,noise_sd,nr_trials)*np.sqrt(dt)), a_min=0, a_max=1e6)
return x1, x2
def _bounds(a, lower_is_0=True, tmax=5, dt=0.01):
t = np.arange(0, tmax, dt)
b1 = np.ones(len(t)) * a
if lower_is_0:
b0 = np.zeros(len(t))
else:
b0 = -b1
return b1, b0
def _bounds_collapse_linear(a, c1, c0, lower_is_0=True, tmax=5, dt=0.01):
t = np.arange(0, tmax, dt)
b1 = (a)-(c1*t)
if lower_is_0:
b0 = 0+(c0*t)
else:
b0 = -b1
return b1, b0
def _bounds_collapse_hyperbolic(a, c, lower_is_0=True, tmax=5, dt=0.01):
t = | np.arange(0, tmax, dt) | numpy.arange |
import sounddevice as sd
import soundfile as sf
import numpy as np
import matplotlib.pyplot as plt
def quantize_a_law(data, bits):
start = -1
end = 1
bit_n = 2**bits-1
data = (data-start)/(end-start)
data = np.round(data*bit_n)/bit_n
data = (data*2)-1
return data
def encode_a_law(data):
x_data = []
for i in data:
if | np.abs(i) | numpy.abs |
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains the available built-in noisy
quantum channels supported by PennyLane, as well as their conventions.
"""
import warnings
import numpy as np
from pennylane.operation import AnyWires, Channel
class AmplitudeDamping(Channel):
r"""AmplitudeDamping(gamma, wires)
Single-qubit amplitude damping error channel.
Interaction with the environment can lead to changes in the state populations of a qubit.
This is the phenomenon behind scattering, dissipation, attenuation, and spontaneous emission.
It can be modelled by the amplitude damping channel, with the following Kraus matrices:
.. math::
K_0 = \begin{bmatrix}
1 & 0 \\
0 & \sqrt{1-\gamma}
\end{bmatrix}
.. math::
K_1 = \begin{bmatrix}
0 & \sqrt{\gamma} \\
0 & 0
\end{bmatrix}
where :math:`\gamma \in [0, 1]` is the amplitude damping probability.
**Details:**
* Number of wires: 1
* Number of parameters: 1
Args:
gamma (float): amplitude damping probability
wires (Sequence[int] or int): the wire the channel acts on
"""
num_wires = 1
grad_method = "F"
@property
def num_params(self):
return 1
@classmethod
def _kraus_matrices(cls, *params):
gamma = params[0]
if not 0.0 <= gamma <= 1.0:
raise ValueError("gamma must be between [0,1].")
K0 = np.diag([1, np.sqrt(1 - gamma)])
K1 = np.sqrt(gamma) * np.array([[0, 1], [0, 0]])
return [K0, K1]
class GeneralizedAmplitudeDamping(Channel):
r"""GeneralizedAmplitudeDamping(gamma, p, wires)
Single-qubit generalized amplitude damping error channel.
This channel models the exchange of energy between a qubit and its environment
at finite temperatures, with the following Kraus matrices:
.. math::
K_0 = \sqrt{p} \begin{bmatrix}
1 & 0 \\
0 & \sqrt{1-\gamma}
\end{bmatrix}
.. math::
K_1 = \sqrt{p}\begin{bmatrix}
0 & \sqrt{\gamma} \\
0 & 0
\end{bmatrix}
.. math::
K_2 = \sqrt{1-p}\begin{bmatrix}
\sqrt{1-\gamma} & 0 \\
0 & 1
\end{bmatrix}
.. math::
K_3 = \sqrt{1-p}\begin{bmatrix}
0 & 0 \\
\sqrt{\gamma} & 0
\end{bmatrix}
where :math:`\gamma \in [0, 1]` is the probability of damping and :math:`p \in [0, 1]`
is the probability of the system being excited by the environment.
**Details:**
* Number of wires: 1
* Number of parameters: 2
Args:
gamma (float): amplitude damping probability
p (float): excitation probability
wires (Sequence[int] or int): the wire the channel acts on
"""
num_wires = 1
grad_method = "F"
@property
def num_params(self):
return 2
@classmethod
def _kraus_matrices(cls, *params):
gamma, p = params
if not 0.0 <= gamma <= 1.0:
raise ValueError("gamma must be between [0,1].")
if not 0.0 <= p <= 1.0:
raise ValueError("p must be between [0,1].")
K0 = np.sqrt(p) * np.diag([1, np.sqrt(1 - gamma)])
K1 = np.sqrt(p) * np.sqrt(gamma) * np.array([[0, 1], [0, 0]])
K2 = np.sqrt(1 - p) * np.diag([np.sqrt(1 - gamma), 1])
K3 = np.sqrt(1 - p) * np.sqrt(gamma) * np.array([[0, 0], [1, 0]])
return [K0, K1, K2, K3]
class PhaseDamping(Channel):
r"""PhaseDamping(gamma, wires)
Single-qubit phase damping error channel.
Interaction with the environment can lead to loss of quantum information changes without any
changes in qubit excitations. This can be modelled by the phase damping channel, with
the following Kraus matrices:
.. math::
K_0 = \begin{bmatrix}
1 & 0 \\
0 & \sqrt{1-\gamma}
\end{bmatrix}
.. math::
K_1 = \begin{bmatrix}
0 & 0 \\
0 & \sqrt{\gamma}
\end{bmatrix}
where :math:`\gamma \in [0, 1]` is the phase damping probability.
**Details:**
* Number of wires: 1
* Number of parameters: 1
Args:
gamma (float): phase damping probability
wires (Sequence[int] or int): the wire the channel acts on
"""
num_wires = 1
grad_method = "F"
@property
def num_params(self):
return 1
@classmethod
def _kraus_matrices(cls, *params):
gamma = params[0]
if not 0.0 <= gamma <= 1.0:
raise ValueError("gamma must be between [0,1].")
K0 = np.diag([1, np.sqrt(1 - gamma)])
K1 = np.diag([0, np.sqrt(gamma)])
return [K0, K1]
class DepolarizingChannel(Channel):
r"""DepolarizingChannel(p, wires)
Single-qubit symmetrically depolarizing error channel.
This channel is modelled by the following Kraus matrices:
.. math::
K_0 = \sqrt{1-p} \begin{bmatrix}
1 & 0 \\
0 & 1
\end{bmatrix}
.. math::
K_1 = \sqrt{p/3}\begin{bmatrix}
0 & 1 \\
1 & 0
\end{bmatrix}
.. math::
K_2 = \sqrt{p/3}\begin{bmatrix}
0 & -i \\
i & 0
\end{bmatrix}
.. math::
K_3 = \sqrt{p/3}\begin{bmatrix}
1 & 0 \\
0 & -1
\end{bmatrix}
where :math:`p \in [0, 1]` is the depolarization probability and is equally
divided in the application of all Pauli operations.
**Details:**
* Number of wires: 1
* Number of parameters: 1
Args:
p (float): Each Pauli gate is applied with probability :math:`\frac{p}{3}`
wires (Sequence[int] or int): the wire the channel acts on
"""
num_wires = 1
grad_method = "A"
grad_recipe = ([[1, 0, 1], [-1, 0, 0]],)
@property
def num_params(self):
return 1
@classmethod
def _kraus_matrices(cls, *params):
p = params[0]
if not 0.0 <= p <= 1.0:
raise ValueError("p must be between [0,1]")
K0 = np.sqrt(1 - p) * np.eye(2)
K1 = np.sqrt(p / 3) * np.array([[0, 1], [1, 0]])
K2 = np.sqrt(p / 3) * np.array([[0, -1j], [1j, 0]])
K3 = np.sqrt(p / 3) * np.array([[1, 0], [0, -1]])
return [K0, K1, K2, K3]
class BitFlip(Channel):
r"""BitFlip(p, wires)
Single-qubit bit flip (Pauli :math:`X`) error channel.
This channel is modelled by the following Kraus matrices:
.. math::
K_0 = \sqrt{1-p} \begin{bmatrix}
1 & 0 \\
0 & 1
\end{bmatrix}
.. math::
K_1 = \sqrt{p}\begin{bmatrix}
0 & 1 \\
1 & 0
\end{bmatrix}
where :math:`p \in [0, 1]` is the probability of a bit flip (Pauli :math:`X` error).
**Details:**
* Number of wires: 1
* Number of parameters: 1
Args:
p (float): The probability that a bit flip error occurs.
wires (Sequence[int] or int): the wire the channel acts on
"""
num_wires = 1
grad_method = "A"
grad_recipe = ([[1, 0, 1], [-1, 0, 0]],)
@property
def num_params(self):
return 1
@classmethod
def _kraus_matrices(cls, *params):
p = params[0]
if not 0.0 <= p <= 1.0:
raise ValueError("p must be between [0,1]")
K0 = np.sqrt(1 - p) * np.eye(2)
K1 = np.sqrt(p) * np.array([[0, 1], [1, 0]])
return [K0, K1]
class ResetError(Channel):
r"""ResetError(p_0, p_1, wires)
Single-qubit Reset error channel.
This channel is modelled by the following Kraus matrices:
.. math::
K_0 = \sqrt{1-p_0-p_1} \begin{bmatrix}
1 & 0 \\
0 & 1
\end{bmatrix}
.. math::
K_1 = \sqrt{p_0}\begin{bmatrix}
1 & 0 \\
0 & 0
\end{bmatrix}
.. math::
K_2 = \sqrt{p_0}\begin{bmatrix}
0 & 1 \\
0 & 0
\end{bmatrix}
.. math::
K_3 = \sqrt{p_1}\begin{bmatrix}
0 & 0 \\
1 & 0
\end{bmatrix}
.. math::
K_4 = \sqrt{p_1}\begin{bmatrix}
0 & 0 \\
0 & 1
\end{bmatrix}
where :math:`p_0 \in [0, 1]` is the probability of a reset to 0,
and :math:`p_1 \in [0, 1]` is the probability of a reset to 1 error.
**Details:**
* Number of wires: 1
* Number of parameters: 2
Args:
p_0 (float): The probability that a reset to 0 error occurs.
p_1 (float): The probability that a reset to 1 error occurs.
wires (Sequence[int] or int): the wire the channel acts on
"""
num_wires = 1
grad_method = "F"
@property
def num_params(self):
return 2
@classmethod
def _kraus_matrices(cls, *params):
p_0, p_1 = params[0], params[1]
if not 0.0 <= p_0 <= 1.0:
raise ValueError("p_0 must be between [0,1]")
if not 0.0 <= p_1 <= 1.0:
raise ValueError("p_1 must be between [0,1]")
if not 0.0 <= p_0 + p_1 <= 1.0:
raise ValueError("p_0 + p_1 must be between [0,1]")
K0 = np.sqrt(1 - p_0 - p_1) * np.eye(2)
K1 = np.sqrt(p_0) * np.array([[1, 0], [0, 0]])
K2 = np.sqrt(p_0) * np.array([[0, 1], [0, 0]])
K3 = np.sqrt(p_1) * np.array([[0, 0], [1, 0]])
K4 = np.sqrt(p_1) * | np.array([[0, 0], [0, 1]]) | numpy.array |
###################################################################################
## Main sampler
## Depending on the number of MCMC states defined in the first run.
if __name__ == "__main__":
import nonstat_model_noXs.model_sim as utils
import nonstat_model_noXs.generic_samplers as sampler
import nonstat_model_noXs.priors as priors
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from pickle import load
from pickle import dump
from scipy.linalg import lapack
# Check whether the 'mpi4py' is installed
test_mpi = os.system("python -c 'from mpi4py import *' &> /dev/null")
if test_mpi != 0:
import sys
sys.exit("mpi4py import is failing, aborting...")
# get rank and size
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
thinning = 10; echo_interval = 20; n_updates = 50001
# Filename for storing the intermediate results
input_file='./nonstat_progress_'+str(rank)+'.pkl'
# Load data input
if rank==0:
with open(input_file, 'rb') as f:
Y = load(f)
cen = load(f)
cen_above = load(f)
initial_values = load(f)
sigma_m = load(f)
prop_sigma = load(f)
iter_current = load(f)
phi_trace = load(f)
tau_sqd_trace = load(f)
theta_c_trace = load(f)
beta_loc0_trace = load(f)
beta_loc1_trace = load(f)
beta_scale_trace = load(f)
beta_shape_trace = load(f)
Z_1t_trace = load(f)
R_1t_trace = load(f)
Y_onetime = load(f)
X_onetime = load(f)
X_s_onetime = load(f)
R_onetime = load(f)
Z_onetime = load(f)
f.close()
else:
with open(input_file, 'rb') as f:
Y = load(f)
cen = load(f)
cen_above = load(f)
initial_values = load(f)
sigma_m = load(f)
iter_current = load(f)
Z_1t_trace = load(f)
R_1t_trace = load(f)
Y_onetime = load(f)
X_onetime = load(f)
X_s_onetime = load(f)
R_onetime = load(f)
Z_onetime = load(f)
f.close()
# Bookkeeping
n_s = Y.shape[0]
n_t = Y.shape[1]
if n_t != size:
import sys
sys.exit("Make sure the number of cpus (N) = number of time replicates (n_t), i.e.\n srun -N python nonstat_sampler.py")
wh_to_plot_Xs = n_s*np.array([0.25,0.5,0.75])
wh_to_plot_Xs = wh_to_plot_Xs.astype(int)
# Filename for storing the intermediate results
filename='./nonstat_progress_'+str(rank)+'.pkl'
# Generate multiple independent random streams
random_generator = np.random.RandomState()
# Constants to control adaptation of the Metropolis sampler
c_0 = 10
c_1 = 0.8
offset = 3 # the iteration offset
r_opt_1d = .41
r_opt_2d = .35
eps = 1e-6 # a small number
# Hyper parameters for the prior of the mixing distribution parameters and
hyper_params_phi = np.array([0.5,0.7])
hyper_params_tau_sqd = np.array([0.1,0.1])
hyper_params_theta_c = np.array([0, 20])
hyper_params_theta_gev = 25
# hyper_params_range = np.array([0.5,1.5]) # in case where roughness is not updated
# Load latest values
initial_values = comm.bcast(initial_values,root=0) # Latest values are mostly in initial_values
phi = initial_values['phi']
gamma = initial_values['gamma']
tau_sqd = initial_values['tau_sqd']
prob_below = initial_values['prob_below']
prob_above = initial_values['prob_above']
Dist = initial_values['Dist']
theta_c = initial_values['theta_c']
Design_mat = initial_values['Design_mat']
beta_loc0 = initial_values['beta_loc0']
beta_loc1 = initial_values['beta_loc1']
Time = initial_values['Time']
beta_scale = initial_values['beta_scale']
beta_shape = initial_values['beta_shape']
n_covariates = len(beta_loc0)
if rank == 0:
X = np.empty((n_s,n_t))
X_s = np.empty((n_s,n_t))
Z = np.empty((n_s,n_t))
R = np.empty((n_t,))
# Eigendecomposition of the correlation matrix
tmp_vec = np.ones(n_s)
Cor = utils.corr_fn(Dist, theta_c)
# eig_Cor = np.linalg.eigh(Cor) #For symmetric matrices
# V = eig_Cor[1]
# d = eig_Cor[0]
cholesky_inv = lapack.dposv(Cor,tmp_vec)
# For current values of phi and gamma, obtain grids of survival probs and densities
grid = utils.density_interp_grid(phi, gamma, grid_size=800)
xp = grid[0]; den_p = grid[1]; surv_p = grid[2]
thresh_X = utils.qRW_me_interp(prob_below, xp, surv_p, tau_sqd, phi, gamma)
thresh_X_above = utils.qRW_me_interp(prob_above, xp, surv_p, tau_sqd, phi, gamma)
# Marginal GEV parameters: per location x time
loc0 = Design_mat @beta_loc0
loc1 = Design_mat @beta_loc1
Loc = np.tile(loc0, n_t) + np.tile(loc1, n_t)*np.repeat(Time,n_s)
Loc = Loc.reshape((n_s,n_t),order='F')
scale = Design_mat @beta_scale
Scale = np.tile(scale, n_t)
Scale = Scale.reshape((n_s,n_t),order='F')
Design_mat1 = np.c_[np.repeat(1,n_s), np.log(Design_mat[:,1])]
shape = Design_mat1 @beta_shape
Shape = np.tile(shape, n_t)
Shape = Shape.reshape((n_s,n_t),order='F')
# Initial trace objects
Z_1t_accept = np.zeros(n_s)
R_accept = 0
if rank == 0:
print("Number of time replicates = %d"%size)
theta_c_trace_within_thinning = np.empty((2,thinning)); theta_c_trace_within_thinning[:] = np.nan
beta_loc0_trace_within_thinning = np.empty((n_covariates,thinning)); beta_loc0_trace_within_thinning[:] = np.nan
beta_loc1_trace_within_thinning = np.empty((n_covariates,thinning)); beta_loc1_trace_within_thinning[:] = np.nan
beta_scale_trace_within_thinning = np.empty((n_covariates,thinning)); beta_scale_trace_within_thinning[:] = np.nan
beta_shape_trace_within_thinning = np.empty((n_covariates,thinning)); beta_shape_trace_within_thinning[:] = np.nan
phi_accept = 0
tau_sqd_accept = 0
theta_c_accept = 0
beta_loc0_accept = 0
beta_loc1_accept = 0
beta_scale_accept = 0
beta_shape_accept = 0
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# --------------------------- Start Metropolis Updates ------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
for iter in np.arange(iter_current+1,n_updates):
# Update X
# print(str(rank)+" "+str(iter)+" Gathered? "+str(np.where(~cen)))
X_onetime = utils.X_update(Y_onetime, cen[:,rank], cen_above[:,rank], xp, surv_p, tau_sqd, phi, gamma, Loc[:,rank], Scale[:,rank], Shape[:,rank])
# Update Z
tmp = utils.Z_update_onetime(Y_onetime, X_onetime, R_onetime, Z_onetime, cen[:,rank], cen_above[:,rank], prob_below, prob_above,
tau_sqd, phi, gamma, Loc[:,rank], Scale[:,rank], Shape[:,rank], xp, surv_p, den_p,
thresh_X, thresh_X_above, Cor, cholesky_inv, sigma_m['Z_onetime'], random_generator)
Z_1t_accept = Z_1t_accept + tmp
# Update R
Metr_R = sampler.static_metr(Y_onetime, R_onetime, utils.Rt_update_mixture_me_likelihood,
priors.R_prior, gamma, 2,
random_generator,
np.nan, sigma_m['R_1t'], False,
X_onetime, Z_onetime, cen[:,rank], cen_above[:,rank],
prob_below, prob_above, Loc[:,rank], Scale[:,rank], Shape[:,rank], tau_sqd, phi, gamma,
xp, surv_p, den_p, thresh_X, thresh_X_above)
R_accept = R_accept + Metr_R['acc_prob']
R_onetime = Metr_R['trace'][0,1]
X_s_onetime = (R_onetime**phi)*utils.norm_to_Pareto(Z_onetime)
# *** Gather items ***
X_s_recv = comm.gather(X_s_onetime,root=0)
X_recv = comm.gather(X_onetime, root=0)
Z_recv = comm.gather(Z_onetime, root=0)
R_recv = comm.gather(R_onetime, root=0)
if rank==0:
X_s[:] = np.vstack(X_s_recv).T
X[:] = np.vstack(X_recv).T
# Check whether X is negative
if np.any(X[~cen & ~cen_above]<0):
sys.exit("X value abnormalty "+str(phi)+" "+str(tau_sqd))
Z[:] = np.vstack(Z_recv).T
R[:] = R_recv
index_within = (iter-1)%thinning
# print('beta_shape_accept=',beta_shape_accept, ', iter=', iter)
# Update phi
Metr_phi = sampler.static_metr(Y, phi, utils.phi_update_mixture_me_likelihood, priors.interval_unif,
hyper_params_phi, 2,
random_generator,
np.nan, sigma_m['phi'], False,
R, Z, cen, cen_above,
prob_below, prob_above, Loc, Scale, Shape, tau_sqd, gamma)
phi_accept = phi_accept + Metr_phi['acc_prob']
phi = Metr_phi['trace'][0,1]
# Update gamma (TBD)
#
grid = utils.density_interp_grid(phi, gamma, grid_size=800)
xp = grid[0]; den_p = grid[1]; surv_p = grid[2]
X_s = (R**phi)*utils.norm_to_Pareto(Z)
# Update tau_sqd
Metr_tau_sqd = sampler.static_metr(Y, tau_sqd, utils.tau_update_mixture_me_likelihood, priors.invGamma_prior,
hyper_params_tau_sqd, 2,
random_generator,
np.nan, sigma_m['tau_sqd'], False,
X_s, cen, cen_above,
prob_below, prob_above, Loc, Scale, Shape,
phi, gamma, xp, surv_p, den_p)
tau_sqd_accept = tau_sqd_accept + Metr_tau_sqd['acc_prob']
tau_sqd = Metr_tau_sqd['trace'][0,1]
thresh_X = utils.qRW_me_interp(prob_below, xp, surv_p, tau_sqd, phi, gamma)
thresh_X_above = utils.qRW_me_interp(prob_above, xp, surv_p, tau_sqd, phi, gamma)
# Update theta_c
Metr_theta_c = sampler.static_metr(Z, theta_c, utils.theta_c_update_mixture_me_likelihood,
priors.interval_unif_multi, hyper_params_theta_c, 2,
random_generator,
prop_sigma['theta_c'], sigma_m['theta_c'], False,
Dist)
theta_c_accept = theta_c_accept + Metr_theta_c['acc_prob']
theta_c = Metr_theta_c['trace'][:,1]
theta_c_trace_within_thinning[:,index_within] = theta_c
if Metr_theta_c['acc_prob']>0:
Cor = utils.corr_fn(Dist, theta_c)
# eig_Cor = np.linalg.eigh(Cor) #For symmetric matrices
# V = eig_Cor[1]
# d = eig_Cor[0]
cholesky_inv = lapack.dposv(Cor,tmp_vec)
# Update beta_loc0
Metr_beta_loc0 = sampler.static_metr(Design_mat, beta_loc0, utils.loc0_gev_update_mixture_me_likelihood,
priors.unif_prior, hyper_params_theta_gev, 2,
random_generator,
prop_sigma['beta_loc0'], sigma_m['beta_loc0'], False,
Y, X_s, cen, cen_above, prob_below, prob_above,
tau_sqd, phi, gamma, loc1, Scale, Shape, Time, xp, surv_p, den_p,
thresh_X, thresh_X_above)
beta_loc0_accept = beta_loc0_accept + Metr_beta_loc0['acc_prob']
beta_loc0 = Metr_beta_loc0['trace'][:,1]
beta_loc0_trace_within_thinning[:,index_within] = beta_loc0
loc0 = Design_mat @beta_loc0
# Update beta_loc1
Metr_beta_loc1 = sampler.static_metr(Design_mat, beta_loc1, utils.loc1_gev_update_mixture_me_likelihood,
priors.unif_prior, hyper_params_theta_gev, 2,
random_generator,
prop_sigma['beta_loc1'], sigma_m['beta_loc1'], False,
Y, X_s, cen, cen_above, prob_below, prob_above,
tau_sqd, phi, gamma, loc0, Scale, Shape, Time, xp, surv_p, den_p,
thresh_X, thresh_X_above)
beta_loc1_accept = beta_loc1_accept + Metr_beta_loc1['acc_prob']
beta_loc1 = Metr_beta_loc1['trace'][:,1]
beta_loc1_trace_within_thinning[:,index_within] = beta_loc1
loc1 = Design_mat @beta_loc1
Loc = np.tile(loc0, n_t) + np.tile(loc1, n_t)*np.repeat(Time,n_s)
Loc = Loc.reshape((n_s,n_t),order='F')
# Update beta_scale
Metr_beta_scale = sampler.static_metr(Design_mat, beta_scale, utils.scale_gev_update_mixture_me_likelihood,
priors.unif_prior, hyper_params_theta_gev, 2,
random_generator,
prop_sigma['beta_scale'], sigma_m['beta_scale'], False,
Y, X_s, cen, cen_above, prob_below, prob_above,
tau_sqd, phi, gamma, Loc, Shape, Time, xp, surv_p, den_p,
thresh_X, thresh_X_above)
beta_scale_accept = beta_scale_accept + Metr_beta_scale['acc_prob']
beta_scale = Metr_beta_scale['trace'][:,1]
beta_scale_trace_within_thinning[:,index_within] = beta_scale
scale = Design_mat @beta_scale
Scale = np.tile(scale, n_t)
Scale = Scale.reshape((n_s,n_t),order='F')
# # Update beta_shape
# Metr_beta_shape = sampler.static_metr(Design_mat, beta_shape, utils.shape_gev_update_mixture_me_likelihood,
# priors.unif_prior, hyper_params_theta_gev, 2,
# random_generator,
# prop_sigma['beta_shape'], sigma_m['beta_shape'], False,
# Y, X_s, cen, cen_above, prob_below, prob_above,
# tau_sqd, phi, gamma, Loc, Scale, Time, xp, surv_p, den_p,
# thresh_X, thresh_X_above)
# beta_shape_accept = beta_shape_accept + Metr_beta_shape['acc_prob']
# beta_shape = Metr_beta_shape['trace'][:,1]
# beta_shape_trace_within_thinning[:,index_within] = beta_shape
# shape = Design_mat1 @beta_shape
# Shape = np.tile(shape, n_t)
# Shape = Shape.reshape((n_s,n_t),order='F')
# cen[:] = utils.which_censored(Y, Loc, Scale, Shape, prob_below)
# cen_above[:] = ~utils.which_censored(Y, Loc, Scale, Shape, prob_above)
# *** Broadcast items ***
phi = comm.bcast(phi,root=0)
xp = comm.bcast(xp,root=0)
den_p = comm.bcast(den_p,root=0)
surv_p = comm.bcast(surv_p,root=0)
tau_sqd = comm.bcast(tau_sqd,root=0)
thresh_X = comm.bcast(thresh_X,root=0)
thresh_X_above = comm.bcast(thresh_X_above,root=0)
theta_c = comm.bcast(theta_c,root=0)
# V = comm.bcast(V,root=0)
# d = comm.bcast(d,root=0)
Cor = comm.bcast(Cor,root=0)
cholesky_inv = comm.bcast(cholesky_inv,root=0)
Loc = comm.bcast(Loc,root=0)
Scale = comm.bcast(Scale,root=0)
Shape = comm.bcast(Shape,root=0)
# cen = comm.bcast(cen,root=0)
# cen_above = comm.bcast(cen_above,root=0)
# ----------------------------------------------------------------------------------------
# --------------------------- Summarize every 'thinning' steps ---------------------------
# ----------------------------------------------------------------------------------------
if (iter % thinning) == 0:
index = np.int(iter/thinning)
# Fill in trace objects
Z_1t_trace[:,index] = Z_onetime
R_1t_trace[index] = R_onetime
if rank == 0:
phi_trace[index] = phi
tau_sqd_trace[index] = tau_sqd
theta_c_trace[:,index] = theta_c
beta_loc0_trace[:,index] = beta_loc0
beta_loc1_trace[:,index] = beta_loc1
beta_scale_trace[:,index] = beta_scale
beta_shape_trace[:,index] = beta_shape
# Adapt via Shaby and Wells (2010)
gamma2 = 1 / (index + offset)**(c_1)
gamma1 = c_0*gamma2
sigma_m['Z_onetime'] = np.exp(np.log(sigma_m['Z_onetime']) + gamma1*(Z_1t_accept/thinning - r_opt_1d))
Z_1t_accept[:] = 0
sigma_m['R_1t'] = np.exp( | np.log(sigma_m['R_1t']) | numpy.log |
#!/usr/bin/env python
import numpy as np
import numpy.matlib
import pickle
import matplotlib.pyplot as plt
from gym_collision_avoidance.envs.policies.CADRL.scripts.neural_networks.test_data import generate_symmetric_sinusoids
from gym_collision_avoidance.envs.policies.CADRL.scripts.neural_networks.nn_training_param import NN_training_param
from gym_collision_avoidance.envs.policies.CADRL.scripts.neural_networks.multiagent_network_param import Multiagent_network_param
import os
import time
import copy
# fully connected nerual network with weight sharing for
# capturing symmetry in multiagent systems
class Neural_network_regr_multi:
def __init__(self, nn_training_param, plotting_func=None, X_vis=None):
self.set_training_param(nn_training_param)
self.plotting_func = plotting_func
self.X_vis = X_vis
# layer_info = [[num_types, nodes_per_type], [num_types, nodes_per_type]]
def initialize_network_param(self, layers_info, layers_type, multiagent_net_param=None):
self.id_num = -1
#print self.layers_dim
if multiagent_net_param is not None:
self.multiagent_net_param = multiagent_net_param
else:
self.multiagent_net_param = Multiagent_network_param(layers_info, layers_type)
# populate other fields from layers_info
self.num_layers = len(layers_info)
self.num_hidden_layers = self.num_layers - 2
self.layers_dim = []
for i in range(len(layers_info)):
self.layers_dim.append(int(np.sum(layers_info[i][:,0]*layers_info[i][:,1])))
print(self.layers_dim)
self.input_dim = self.layers_dim[0]
self.output_dim = self.layers_dim[-1]
self.initialize_nn_weights()
self.avg_vec = np.zeros((self.input_dim,))
self.std_vec = np.ones((self.input_dim,))
self.output_dim_weights = np.ones((self.output_dim,))
self.output_avg_vec = np.zeros((self.output_dim,))
self.output_std_vec = np.zeros((self.output_dim,))
# self.print_nn()
def save_neural_network(self, filename):
# save weights
nn_list = []
nn_list.append(self.W)
nn_list.append(self.b)
# save avg_vec and std_vec
nn_list.append(self.avg_vec)
nn_list.append(self.std_vec)
nn_list.append(self.output_avg_vec)
nn_list.append(self.output_std_vec)
nn_list.append(self.multiagent_net_param.layers_info)
nn_list.append(self.multiagent_net_param.layers_type)
nn_list.append(self.multiagent_net_param.symmetric_indices)
nn_list.append(self.multiagent_net_param.symmetric_indices_b)
nn_list.append(self.id_num)
pickle.dump(nn_list, open(filename, "wb"))
return
def load_neural_network(self, filename):
with open(filename, 'rb') as fo:
try:
nn_list = pickle.load(fo)
except UnicodeDecodeError: #python 3.x
fo.seek(0)
nn_list = pickle.load(fo, encoding='latin1')
self.W = nn_list[0]
self.b = nn_list[1]
self.avg_vec = nn_list[2]
self.std_vec = nn_list[3]
self.output_avg_vec = nn_list[4]
self.output_std_vec = nn_list[5]
# multiagent_net_param
layers_info = nn_list[6]
layers_type = nn_list[7]
symmetric_indices = nn_list[8]
symmetric_indices_b = nn_list[9]
self.multiagent_net_param = Multiagent_network_param(layers_info, layers_type, \
symmetric_indices=symmetric_indices, symmetric_indices_b=symmetric_indices_b)
self.id_num = nn_list[10]
# retrieve network params
self.num_hidden_layers = len(self.W) - 1
self.input_dim = self.W[0].shape[0]
self.output_dim = self.W[-1].shape[1]
#print 'input_dim, output_dim', input_dim, output_dim
#print 'hidden_layers_size' , hidden_layers_size
self.layers_dim = []
for i in range(len(layers_info)):
self.layers_dim.append(int(np.sum(layers_info[i][:,0]*layers_info[i][:,1])))
#print self.layers_dim
self.num_layers = self.num_hidden_layers + 2
# self.print_nn()
self.output_dim_weights = np.ones((self.output_dim,))
self.load_symBlocks()
return
def set_plotting_func(self, func, X_vis):
self.plotting_func = func
self.X_vis = X_vis
def set_training_stepsize(self, sgd_stepsize_mode='fixed_decay', sgd_step_c=0.1, sgd_step_epsilon=0.1):
self.nn_training_param.sgd_stepsize_mode = sgd_stepsize_mode
self.nn_training_param.sgd_step_c = sgd_step_c
self.nn_training_param.sdg_step_epsilon = sgd_step_epsilon
if sgd_stepsize_mode == 'momentum' or sgd_stepsize_mode == 'sum_of_grad' \
or sgd_stepsize_mode == 'rmsprop':
self.initialize_sum_of_grad()
def print_nn(self):
print('---------------------------------------------------------')
print('~~ neural_network_regr structure ~~')
print('id', self.id_num)
print('num_hidden_layers: %d' % self.num_hidden_layers)
print('layers_dim', self.layers_dim)
print('~~ neural_network_regr training param ~~')
print('sgd_step_size: %f' % self.nn_training_param.sgd_step_size)
print('reg_lambda: %f' % self.nn_training_param.reg_lambda)
print('nb_iter: %d' % self.nn_training_param.nb_iter)
print('sgd_batch_size: %d' % self.nn_training_param.sgd_batch_size)
print('w_scale: %f' % self.nn_training_param.w_scale)
print('avg_vec', self.avg_vec)
print('std_vec', self.std_vec)
print('out_avg_vec', self.output_avg_vec)
print('output_std_vec', self.output_std_vec)
print('---------------------------------------------------------')
def load_symBlocks(self):
self.sym_W = list()
self.sym_dW = list()
self.sym_b = list()
self.sym_db = list()
# ith layer, jth symmetry block
for i in range(self.num_hidden_layers+1):
sym_W_layer = list()
sym_dW_layer = list()
sym_b_layer = list()
sym_db_layer = list()
# W, dW
for j in range(len(self.multiagent_net_param.symmetric_indices[i])):
a = self.multiagent_net_param.symmetric_indices[i][j][0, 0]
b = self.multiagent_net_param.symmetric_indices[i][j][0, 1]
c = self.multiagent_net_param.symmetric_indices[i][j][0, 2]
d = self.multiagent_net_param.symmetric_indices[i][j][0, 3]
sym_W_layer.append(self.W[i][a:b,c:d].copy())
sym_dW_layer.append(np.zeros((b-a, d-c)))
# b, db
for k in range(len(self.multiagent_net_param.symmetric_indices_b[i])):
a = self.multiagent_net_param.symmetric_indices_b[i][k][0, 0]
b = self.multiagent_net_param.symmetric_indices_b[i][k][0, 1]
sym_b_layer.append(self.b[i][0,a:b].copy())
sym_db_layer.append(np.zeros((b-a, )))
self.sym_W.append(sym_W_layer)
self.sym_dW.append(sym_dW_layer)
self.sym_b.append(sym_b_layer)
self.sym_db.append(sym_db_layer)
def initialize_nn_weights(self):
# compute symmetric indices blocks
self.sym_W = list()
self.sym_dW = list()
self.sym_b = list()
self.sym_db = list()
# ith layer, jth symmetry block
for i in range(self.num_hidden_layers+1):
sym_W_layer = list()
sym_dW_layer = list()
sym_b_layer = list()
sym_db_layer = list()
# W, dW
for j in range(len(self.multiagent_net_param.symmetric_indices[i])):
num_rows = self.multiagent_net_param.symmetric_indices[i][j][0,1] - \
self.multiagent_net_param.symmetric_indices[i][j][0,0]
num_cols = self.multiagent_net_param.symmetric_indices[i][j][0,3] - \
self.multiagent_net_param.symmetric_indices[i][j][0,2]
sym_W_layer.append(self.nn_training_param.w_scale * \
(np.random.rand(num_rows, num_cols)-0.5))
sym_dW_layer.append(np.zeros((num_rows, num_cols)))
# b, db
for k in range(len(self.multiagent_net_param.symmetric_indices_b[i])):
num_cols = self.multiagent_net_param.symmetric_indices_b[i][k][0,1] - \
self.multiagent_net_param.symmetric_indices_b[i][k][0,0]
sym_b_layer.append(np.zeros((1, num_cols)))
sym_db_layer.append(np.zeros((1, num_cols)))
self.sym_W.append(sym_W_layer)
self.sym_dW.append(sym_dW_layer)
self.sym_b.append(sym_b_layer)
self.sym_db.append(sym_db_layer)
# neural network parameters
self.W = list()
self.dW = list()
self.b = list()
self.db = list()
for i in range(self.num_hidden_layers+1):
if self.multiagent_net_param.layers_type[i] == 'conn':
layer_input_dim = self.layers_dim[i]
layer_output_dim = self.layers_dim[i+1]
fan_in_weight = np.sqrt(2.0/layer_input_dim)
# print fan_in_weight
self.W.append(np.zeros((layer_input_dim, layer_output_dim)))
self.dW.append(np.zeros((layer_input_dim, layer_output_dim)))
self.b.append(np.zeros((1, layer_output_dim)))
self.db.append(np.zeros((1, layer_output_dim)))
elif self.multiagent_net_param.layers_type[i] == 'max':
self.W.append([])
self.dW.append([])
self.b.append([])
self.db.append([])
self.symIndices_2_mat()
def symIndices_2_mat(self):
for i in range(self.num_hidden_layers+1):
# W
for j in range(len(self.multiagent_net_param.symmetric_indices[i])):
for jj in range(self.multiagent_net_param.symmetric_indices[i][j].shape[0]):
a = self.multiagent_net_param.symmetric_indices[i][j][jj,0]
b = self.multiagent_net_param.symmetric_indices[i][j][jj,1]
c = self.multiagent_net_param.symmetric_indices[i][j][jj,2]
d = self.multiagent_net_param.symmetric_indices[i][j][jj,3]
# print '~~~', i, j
# print a,b,c,d
# # print self.sym_W[i][j].shape
# print self.W[i].shape
# print i, self.W[i].shape, a,b,c,d
# print self.W[i][a:b,c:d].shape, self.sym_W[i][j].shape
self.W[i][a:b,c:d] = self.sym_W[i][j]
# b
for k in range(len(self.multiagent_net_param.symmetric_indices_b[i])):
for kk in range(self.multiagent_net_param.symmetric_indices_b[i][k].shape[0]):
a = self.multiagent_net_param.symmetric_indices_b[i][k][kk,0]
b = self.multiagent_net_param.symmetric_indices_b[i][k][kk,1]
# print 'i,k,a,b', i, k, a, b
# print 'self.b[i].shape', self.b[i].shape
# print 'self.sym_b[i][k].shape', self.sym_b[i][k].shape
# print self.b[i][a:b].shape
self.b[i][0,a:b] = self.sym_b[i][k]
def dW_2_symIndices(self):
for i in range(self.num_hidden_layers+1):
# update sym_dW
for j in range(len(self.multiagent_net_param.symmetric_indices[i])):
self.sym_dW[i][j][:] = 0
for jj in range(self.multiagent_net_param.symmetric_indices[i][j].shape[0]):
a = self.multiagent_net_param.symmetric_indices[i][j][jj,0]
b = self.multiagent_net_param.symmetric_indices[i][j][jj,1]
c = self.multiagent_net_param.symmetric_indices[i][j][jj,2]
d = self.multiagent_net_param.symmetric_indices[i][j][jj,3]
self.sym_dW[i][j] += self.dW[i][a:b,c:d]
# update sym_db
for k in range(len(self.multiagent_net_param.symmetric_indices_b[i])):
self.sym_db[i][k][:] = 0
for kk in range(self.multiagent_net_param.symmetric_indices_b[i][k].shape[0]):
a = self.multiagent_net_param.symmetric_indices_b[i][k][kk,0]
b = self.multiagent_net_param.symmetric_indices_b[i][k][kk,1]
self.sym_db[i][k] += self.db[i][a:b]
def update_symIndices(self, param, step_size, iteration):
# method 1: fixed_decay
if param.sgd_stepsize_mode == 'fixed_decay':
# update step size (e.g. decrease every ... iterations)
if (iteration % 200) == 0:
step_size = step_size / 1.5
# print 'fixed decay, step size', step_size
# gradient udpate
for i in range(self.num_hidden_layers+1):
# update sym_dW
for j in range(len(self.multiagent_net_param.symmetric_indices[i])):
self.sym_W[i][j] -= step_size * self.sym_dW[i][j]
# update sym_db
for k in range(len(self.multiagent_net_param.symmetric_indices_b[i])):
self.sym_b[i][k] -= step_size * self.sym_db[i][k]
# method 2: sqrt_decay
elif param.sgd_stepsize_mode == 'sqrt_decay':
c = param.sgd_step_c
epsilon = param.sgd_step_epsilon
step_size = c / (np.sqrt(iteration) + epsilon)
# gradient udpate
for i in range(self.num_hidden_layers+1):
# update sym_dW
for j in range(len(self.multiagent_net_param.symmetric_indices[i])):
self.sym_W[i][j] -= step_size * self.sym_dW[i][j]
# update sym_db
for k in range(len(self.multiagent_net_param.symmetric_indices_b[i])):
self.sym_b[i][k] -= step_size * self.sym_db[i][k]
# method 3: sum of gradients
elif param.sgd_stepsize_mode == 'sum_of_grad':
c = param.sgd_step_c
epsilon = param.sgd_step_epsilon
for i in range(self.num_hidden_layers+1):
# update sym_dW
for j in range(len(self.multiagent_net_param.symmetric_indices[i])):
self.sum_sym_dW[i][j] += np.square(self.sym_dW[i][j])
self.sym_W[i][j] -= c / (np.sqrt(self.sum_sym_dW[i][j]) + epsilon) \
* self.sym_dW[i][j]
# update sym_db
for k in range(len(self.multiagent_net_param.symmetric_indices_b[i])):
self.sum_sym_db[i][k] += np.square(self.sym_db[i][k])
self.sym_b[i][k] -= c / (np.sqrt(self.sum_sym_db[i][k]) + epsilon) \
* self.sym_db[i][k]
# just for debugging
step_size = np.amax(c / (np.sqrt(self.sum_sym_dW[0][0]) + epsilon))
# method 4: momentum
elif param.sgd_stepsize_mode == 'momentum':
if step_size > 0.01:
alpha = 0.5
else:
alpha = 0.99
if (iteration % 200) == 0:
step_size = step_size / 1.5
for i in range(self.num_hidden_layers+1):
# update sym_dW
for j in range(len(self.multiagent_net_param.symmetric_indices[i])):
self.sum_sym_dW[i][j] = alpha * self.sum_sym_dW[i][j] \
- step_size * self.sym_dW[i][j]
self.sym_W[i][j] += self.sum_sym_dW[i][j]
# update sym_db
for k in range(len(self.multiagent_net_param.symmetric_indices_b[i])):
self.sum_sym_db[i][k] = alpha * self.sum_sym_db[i][k] \
- step_size * self.sym_db[i][k]
self.sym_b[i][k] += self.sum_sym_db[i][k]
# method 5: rmsprop
elif param.sgd_stepsize_mode == 'rmsprop':
alpha = 0.9
for i in range(self.num_hidden_layers+1):
# update sym_dW
for j in range(len(self.multiagent_net_param.symmetric_indices[i])):
self.sum_sym_dW[i][j] = alpha * self.sum_sym_dW[i][j] + \
(1-alpha) * np.square(self.sym_dW[i][j])
self.sym_W[i][j] -= 0.01 * step_size * self.sym_dW[i][j] / \
(np.sqrt(self.sum_sym_dW[i][j])+0.001)
# update sym_db
for k in range(len(self.multiagent_net_param.symmetric_indices_b[i])):
self.sum_sym_db[i][k] = alpha * self.sum_sym_db[i][k] + \
(1-alpha) * np.square(self.sym_db[i][k])
self.sym_b[i][k] -= 0.01 * step_size * self.sym_db[i][k] / \
(np.sqrt(self.sum_sym_db[i][k])+0.001)
else:
assert('unknown nerual network training type')
return step_size
def initialize_sum_of_grad(self):
# for sum of grad
self.sum_sym_dW = copy.deepcopy(self.sym_dW)
self.sum_sym_db = copy.deepcopy(self.sym_db)
for i in range(self.num_hidden_layers+1):
# update sym_dW
for j in range(len(self.multiagent_net_param.symmetric_indices[i])):
self.sum_sym_dW[i][j][:] = 0
# update sym_db
for k in range(len(self.multiagent_net_param.symmetric_indices_b[i])):
self.sum_sym_db[i][k][:] = 0
def initialize_derivatives(self):
self.dW = list()
self.db = list()
for i in range(self.num_hidden_layers+1):
if self.multiagent_net_param.layers_type[i] == 'conn':
layer_input_dim = self.layers_dim[i]
layer_output_dim = self.layers_dim[i+1]
self.dW.append(np.zeros((layer_input_dim, layer_output_dim)))
self.db.append(np.zeros((1, layer_output_dim)))
elif self.multiagent_net_param.layers_type[i] == 'max':
self.dW.append([])
self.db.append([])
def set_training_param(self, nn_training_param):
self.nn_training_param = nn_training_param
# compute shifts to the x-y variables
def compute_offset(self, X, Y, input_output_ranges):
if input_output_ranges is None:
self.avg_vec = np.mean(X, axis = 0)
self.std_vec = np.std(X, axis = 0)
self.output_avg_vec = np.mean(Y, axis = 0)
self.output_std_vec = np.std(Y, axis = 0)
else:
self.avg_vec = input_output_ranges[0]
self.std_vec = input_output_ranges[1]
self.output_avg_vec = input_output_ranges[2]
self.output_std_vec = input_output_ranges[3]
# debug
# print 'computing offset'
# print 'avg_vec', self.avg_vec
# print 'std_vec', self.std_vec
# print 'out_avg_vec', self.output_avg_vec
# print 'output_std_vec', self.output_std_vec
# if input_output_ranges is not None:
# avg_vec = input_output_ranges[0]
# std_vec = input_output_ranges[1]
# output_avg_vec = input_output_ranges[2]
# output_std_vec = input_output_ranges[3]
# print 'avg_vec', self.avg_vec - avg_vec
# print 'std_vec', self.std_vec - std_vec
# print 'out_avg_vec', self.output_avg_vec - output_avg_vec
# print 'output_std_vec', self.output_std_vec - output_std_vec
# raw_input()
# scale X (xRaw_2_x)
def xRaw_2_x(self, X_raw):
if len(X_raw.shape) > 1:
nb_examples = X_raw.shape[0]
else:
nb_examples = 1
X = (X_raw - np.matlib.repmat(self.avg_vec, nb_examples, 1)) \
/ np.matlib.repmat(self.std_vec, nb_examples, 1)
return X
# scale Y (yRaw_2_y)
def yRaw_2_y(self, Y_raw):
if len(Y_raw.shape) > 1:
nb_examples = Y_raw.shape[0]
else:
nb_examples = 1
Y = (Y_raw - np.matlib.repmat(self.output_avg_vec, nb_examples, 1)) \
/ np.matlib.repmat(self.output_std_vec, nb_examples, 1)
return Y
# scale Y (y_2_yraw)
def y_2_yRaw(self, Y):
if len(Y.shape) > 1:
nb_examples = Y.shape[0]
else:
nb_examples = 1
Y_raw = Y * np.matlib.repmat(self.output_std_vec, nb_examples, 1) \
+ np.matlib.repmat(self.output_avg_vec, nb_examples, 1)
return Y_raw
# back propagation
def backprop(self, X, Y, step_size, iteration):
if_nn_nav = False
# if X.shape[1] >= 7 + 8 and (X.shape[1] - 7 ) % 8 == 0:
# if_nn_nav = True
# num_other_agents = (X.shape[1] - 7 ) / 8
# agent_off_indices = []
# for i in range(1, num_other_agents+1):
# inds = np.where(X[:,7+(8*i)-1] < 1e-5)[0]
# agent_off_indices.append(inds)
# assert(np.all(X[inds, 7+8*(i-1):7+(8*i)]==0))
# print inds
# raw_input()
# training param
param = self.nn_training_param
# for forward/backward propogation
nb_layers = self.num_hidden_layers + 1
forward_prop_o = []
backward_prop_xi = []
for i in range(nb_layers):
forward_prop_o.append(np.empty([1,1]))
backward_prop_xi.append(np.empty([1,1]))
batch_size = X.shape[0]
out = X.copy()
y_out = Y.copy()
# one step back prop
for layer in range(nb_layers-1):
# RelU
# print 'layer', layer
# print 'self.W[layer].shape', self.W[layer].shape
# print 'out', out.shape
if self.multiagent_net_param.layers_type[layer] == 'conn':
tmp = np.dot(out, self.W[layer]) \
+ np.matlib.repmat(self.b[layer], batch_size, 1)
forward_prop_o[layer] = tmp * (tmp>0)
elif self.multiagent_net_param.layers_type[layer] == 'max':
num_pts = out.shape[0]
next_layer_size = np.sum(self.multiagent_net_param.layers_info[layer][:,1])
forward_prop_o[layer] = np.zeros((num_pts, next_layer_size))
cur_s_ind = 0
next_s_ind = 0
for ii in range(self.multiagent_net_param.layers_info[layer].shape[0]):
num_agents = self.multiagent_net_param.layers_info[layer][ii,0]
stride = self.multiagent_net_param.layers_info[layer][ii,1]
cur_e_ind = cur_s_ind + num_agents * stride
next_e_ind = next_s_ind + stride
# print '---'
# print out[:,cur_s_ind:cur_e_ind].shape
# # print block_form.shape
# print 'num_pts,', num_pts, 'stride', stride
# print forward_prop_o[layer][:,next_s_ind:next_e_ind].shape
block_form = np.reshape(out[:,cur_s_ind:cur_e_ind], (num_pts,-1,stride))
forward_prop_o[layer][:,next_s_ind:next_e_ind] = \
np.max(block_form, axis=1)
cur_s_ind = cur_e_ind
next_s_ind = next_e_ind
# print 'layer', layer
# print 'out', out
# print 'forward_prop_o[layer]', forward_prop_o[layer]
# raw_input()
# for more than one agent
# if if_nn_nav == True and self.multiagent_net_param.layers_info[layer+1].shape[0] == 2:
# stride = self.multiagent_net_param.layers_info[layer+1][1,1]
# start_ind = self.multiagent_net_param.layers_info[layer+1][0,1]
# for tt in range(num_other_agents):
# forward_prop_o[layer][agent_off_indices[tt],start_ind:start_ind+stride] = 0
# start_ind += stride
# raw_input()
# dropout
# p = 0.80
# dropout_mask = (np.random.rand(*forward_prop_o[layer].shape) < p) / p
# forward_prop_o[layer] *= dropout_mask
out = forward_prop_o[layer].copy()
# last layer, softmax
# print 'y_out.shape', y_out.shape
# print 'self.output_dim_weights', self.output_dim_weights
scores = y_out - \
(np.dot(forward_prop_o[-2], self.W[nb_layers-1]) + \
np.matlib.repmat(self.b[nb_layers-1], batch_size, 1))
scores = - np.matlib.repmat(self.output_dim_weights, batch_size, 1) * scores
# print scores.shape
# print expscores.shape
# print expscores.sum(axis=1).shape
# print np.matlib.repmat(expscores.sum(axis=1), k,1).transpose().shape
#### backward pass starting from the output, i.e. the class probabilities
ds = np.clip(scores, -1, 1) # partial derivative of loss wrt scores
ds = ds / batch_size
backward_prop_xi[nb_layers-1] = ds.copy()
for j in range(nb_layers-1, 0, -1):
if self.multiagent_net_param.layers_type[j] == 'conn':
# print 'j', j
# print 'backward_prop_xi[j].shape', backward_prop_xi[j].shape
# print 'forward_prop_o[j-1].transpose().shape', forward_prop_o[j-1].transpose().shape
# print '(param.reg_lambda * self.W[j]).shape', (param.reg_lambda * self.W[j]).shape
self.dW[j] = np.dot(forward_prop_o[j-1].transpose(), backward_prop_xi[j]) \
+ param.reg_lambda * self.W[j] #/ (self.W[j].shape[0] * self.W[j].shape[1])
# self.dW[j] = np.dot(forward_prop_o[j-1].transpose(), backward_prop_xi[j]) \
# + param.reg_lambda * 0.1 * (self.W[j]>0) / (self.W[j].shape[0] * self.W[j].shape[1])
# self.dW[j] = np.dot(forward_prop_o[j-1].transpose(), backward_prop_xi[j]) \
# + param.reg_lambda * 0.1 * np.sign(self.W[j])
self.db[j] = backward_prop_xi[j].sum(axis=0)
# compute xi for previous layer and threshold at 0 if O is <0 (ReLU gradient update)
backward_prop_xi[j-1] = numpy.dot(backward_prop_xi[j], self.W[j].transpose())
backward_prop_xi[j-1] = backward_prop_xi[j-1] * (forward_prop_o[j-1]>0)
elif self.multiagent_net_param.layers_type[j] == 'max':
# compute xi for previous layer for max operator
num_pts = backward_prop_xi[j].shape[0]
prev_layer_size = np.sum(self.multiagent_net_param.layers_info[j][:,0] \
* self.multiagent_net_param.layers_info[j][:,1])
backward_prop_xi[j-1] = np.zeros((num_pts, np.sum(prev_layer_size)))
cur_s_ind = 0
prev_s_ind = 0
for jj in range(self.multiagent_net_param.layers_info[j].shape[0]):
num_agents = self.multiagent_net_param.layers_info[j][jj,0]
stride = self.multiagent_net_param.layers_info[j][jj,1]
cur_e_ind = cur_s_ind + stride
for jjj in range(num_agents):
prev_e_ind = prev_s_ind + stride
# print backward_prop_xi[j-1][:,prev_s_ind:prev_e_ind].shape
# print 'what', cur_s_ind, cur_e_ind, backward_prop_xi[j][:,cur_s_ind:cur_e_ind].shape
# print forward_prop_o[j-1][:,prev_s_ind:prev_e_ind].shape
# print 'how', cur_s_ind, cur_e_ind, forward_prop_o[j][:,cur_s_ind:cur_e_ind].shape
backward_prop_xi[j-1][:,prev_s_ind:prev_e_ind] = \
backward_prop_xi[j][:,cur_s_ind:cur_e_ind] * \
(forward_prop_o[j-1][:,prev_s_ind:prev_e_ind] >= \
(forward_prop_o[j][:,cur_s_ind:cur_e_ind]))
prev_s_ind = prev_e_ind
cur_s_ind = cur_e_ind
# print 'forward_prop_o[j-1]', forward_prop_o[j-1]
# print 'forward_prop_o[j]', forward_prop_o[j]
# print 'backward_prop_xi[j-1]', backward_prop_xi[j-1]
# print 'backward_prop_xi[j]', backward_prop_xi[j]
# raw_input()
self.dW[0] = np.dot(X.transpose(), backward_prop_xi[0]) \
+ param.reg_lambda * self.W[0] #/ (self.W[0].shape[0] * self.W[0].shape[1])
# self.dW[0] = np.dot(X.transpose(), backward_prop_xi[0]) \
# + param.reg_lambda * 0.1 * (self.W[0]>0) / (self.W[0].shape[0] * self.W[0].shape[1])
self.db[0] = backward_prop_xi[0].sum(axis=0)
# update symmetirc_db
self.dW_2_symIndices()
#### subgradient updates
step_size = self.update_symIndices(param, step_size, iteration)
self.symIndices_2_mat()
return step_size
# training from scratch
def train_nn(self, dataset, ERM=0, dataset_test=None, ifPrint=True, input_output_ranges=None):
# unique training id
self.id_num = np.random.randint(1000)
''' process data '''
X = dataset[0]
Y = dataset[1]
nb_examples = X.shape[0]
# normalize dataset
self.compute_offset(X, Y, input_output_ranges)
X = self.xRaw_2_x(X)
Y = self.yRaw_2_y(Y)
# error checking
try:
assert(np.any(np.isnan(X)) == False)
assert(np.any(np.isnan(Y)) == False)
except:
print('X', X)
print('Y', Y)
assert(0)
param = self.nn_training_param
''' if using sum_of_gradient step_size '''
if param.sgd_stepsize_mode == 'sum_of_grad':
self.initialize_sum_of_grad()
if param.sgd_stepsize_mode == 'momentum':
self.initialize_sum_of_grad()
if param.sgd_stepsize_mode == 'rmsprop':
self.initialize_sum_of_grad()
''' training '''
# start training
step_size = param.sgd_step_size
t_start = time.time()
if ERM == 1:
num_samples = nb_examples
else:
num_samples = param.sgd_batch_size
# main loop
for i in range(param.nb_iter):
if ERM == 1 or param.sgd_batch_size > nb_examples: #if full gradient
batch_examples = np.arange(nb_examples)
batch_size = nb_examples
else: # else SGD with minibatch size
batch_size = param.sgd_batch_size
batch_examples = np.random.permutation(np.arange(nb_examples))[:batch_size]
#### forward pass starting from input
step_size = self.backprop(X[batch_examples,:], Y[batch_examples], step_size, i)
#### print to screen for debugging
if (i % np.ceil(param.nb_iter/100.0)) == 0 and ifPrint:
z_train, z_sq_loss = self.evaluate_network_loss(X, Y)
print('Iter %d, time elapsed: %f, Training disrete error: %f, square loss: %f, step_size_mode: %s, step size=%f' % \
(i, time.time()-t_start, z_train, z_sq_loss, param.sgd_stepsize_mode, step_size))
if self.plotting_func is not None and self.X_vis is not None:
title_string = 'iter %d' % i
figure_name = 'training'
Y_vis = self.make_prediction_raw(self.X_vis)
self.plotting_func(self.X_vis, Y_vis, title_string, figure_name=figure_name)
if dataset_test is not None:
X_test = dataset_test[0]
Y_test = dataset_test[1]
nb_test_ex = X_test.shape[0]
X_test = self.xRaw_2_x(X_test)
Y_test = self.yRaw_2_y(Y_test)
z_test, z_sq_test = self.evaluate_network_loss(X_test, Y_test)
print('Test discrete error: %f, test square loss: %f ' % (z_test, z_sq_test))
print(' ')
print('checking symmetry condition')
self.debug_symmemtric(X)
# evaluating network loss
def evaluate_network_loss(self, X, Y):
Y_hat = self.make_prediction(X)
sqloss = self.compute_sqloss(Y_hat, Y)
scores = Y - Y_hat
batch_size = Y.shape[0]
scores = np.matlib.repmat(self.output_dim_weights, batch_size, 1) * np.square(scores)
Y_hat = np.sum(scores, axis = 1)
threshold = 0.25
discrete_loss = (Y.squeeze() > 0.25).sum() / float(Y.shape[0])
return discrete_loss, sqloss
def make_prediction(self, X):
if len(X.shape) > 1:
nb_examples = X.shape[0]
else:
nb_examples = 1
X = X[np.newaxis,:]
if_nn_nav = False
# if X.shape[1] >= 7 + 8 and (X.shape[1] - 7 ) % 8 == 0:
# if_nn_nav = True
# num_other_agents = (X.shape[1] - 7 ) / 8
# agent_off_indices = []
# for i in range(1, num_other_agents+1):
# inds = np.where(X[:,7+(8*i)-1] < 1e-5)[0]
# agent_off_indices.append(inds)
# try:
# assert(np.all(X[inds, 7+8*(i-1):7+(8*i)]==0))
# except AssertionError:
# print inds
# print X[inds, 7+8*(i-1):7+(8*i)]
# assert(0)
nb_layers = self.num_hidden_layers + 1
out = X
for layer in range(nb_layers-1):
if self.multiagent_net_param.layers_type[layer] == 'conn':
tmp = np.dot(out, self.W[layer]) \
+ np.matlib.repmat(self.b[layer], nb_examples, 1)
out = tmp * (tmp>0)
elif self.multiagent_net_param.layers_type[layer] == 'max':
num_pts = out.shape[0]
next_layer_size = np.sum(self.multiagent_net_param.layers_info[layer][:,1])
out_next = np.zeros((num_pts, np.sum(next_layer_size)))
if num_pts == 0:
out = out_next
continue
cur_s_ind = 0
next_s_ind = 0
for ii in range(self.multiagent_net_param.layers_info[layer].shape[0]):
num_agents = self.multiagent_net_param.layers_info[layer][ii,0]
stride = self.multiagent_net_param.layers_info[layer][ii,1]
cur_e_ind = cur_s_ind + num_agents * stride
next_e_ind = next_s_ind + stride
# print '---'
# print out[:,cur_s_ind:cur_e_ind].shape
# # print block_form.shape
# print 'num_pts,', num_pts
# print out_next[:,next_s_ind:next_e_ind].shape
block_form = np.reshape(out[:,cur_s_ind:cur_e_ind], (num_pts,-1,stride))
# print out[:,cur_s_ind:cur_e_ind].shape
# print block_form.shape
# print 'num_pts,', num_pts
# print forward_prop_o[layer][:,next_s_ind:next_e_ind].shape
out_next[:,next_s_ind:next_e_ind] = \
np.max(block_form, axis=1)
cur_s_ind = cur_e_ind
next_s_ind = next_e_ind
# print 'layer', layer
# print 'out', out
# print 'out_next', out_next
out = out_next
# raw_input()
# if if_nn_nav == True and self.multiagent_net_param.layers_info[layer+1].shape[0] == 2:
# stride = self.multiagent_net_param.layers_info[layer+1][1,1]
# start_ind = self.multiagent_net_param.layers_info[layer+1][0,1]
# for tt in range(num_other_agents):
# out[agent_off_indices[tt],start_ind:start_ind+stride] = 0
# start_ind += stride
y_hat = np.dot(out, self.W[nb_layers-1]) + \
np.matlib.repmat(self.b[nb_layers-1], nb_examples, 1)
return y_hat
def compute_sqloss(self, Y_hat, Y):
# print Y_hat
# print 'Y', Y
# assert(0)
batch_size = Y.shape[0]
scores = Y - Y_hat
scores = np.matlib.repmat(self.output_dim_weights, batch_size, 1) * scores
sq_loss = 0.5 * np.sum(np.square(scores)) / batch_size
return sq_loss
# requires scaling the input dimension
def make_prediction_raw(self, X_raw):
X = self.xRaw_2_x(X_raw)
Y_scale = self.make_prediction(X)
Y_hat = self.y_2_yRaw(Y_scale)
return Y_hat
# debug, test whether network is symmentry
def debug_symmemtric(self, X_raw):
Y_nominal = self.make_prediction_raw(X_raw)
# preturb input
layer_info = self.multiagent_net_param.layers_info[0]
num_perturbations = 10
for perturb in range(num_perturbations):
# generate perturbation
start_ind = 0
X_raw_cp = X_raw.copy()
for i in range(layer_info.shape[0]):
num_type = layer_info[i,0]
stride = layer_info[i,1]
if num_type > 1:
other_ind = min(1, np.random.randint(num_type-1)+1)
# print 'layer_info', layer_info
# print 'i, num_type, stride', i, num_type, stride
# print 'start_ind', start_ind
# print 'other_ind', other_ind
X_raw_cp[:,start_ind:start_ind+stride] = \
X_raw[:,start_ind+other_ind*stride:start_ind+(other_ind+1)*stride]
X_raw_cp[:,start_ind+other_ind*stride:start_ind+(other_ind+1)*stride] = \
X_raw[:,start_ind:start_ind+stride]
# debug
# X_diff = X_raw_cp - X_raw
# print 'X_diff[1,:]', X_diff[1,:]
Y_hat = self.make_prediction_raw(X_raw_cp)
try:
assert(np.linalg.norm(Y_hat - Y_nominal)<1e-6)
except AssertionError:
print('symmetric condition not met')
print('X_raw, Y_nominal', X_raw, Y_nominal)
print('X_raw_cp, Y_raw', X_raw_cp, Y_hat)
assert(0)
# update start_ind
start_ind += num_type * stride
print('passed %d random cases' % num_perturbations)
if __name__ == '__main__':
print('hello world from neural_network.py')
file_dir = os.path.dirname(os.path.realpath(__file__))
plt.rcParams.update({'font.size': 18})
''' test on the spiral dataset '''
dataset_name = "/sinusoid_sum_1out";
sinusoid_dataset = pickle.load(open(file_dir+"/test_data" + dataset_name + "_dataset_train.p","rb"))
sinusoid_sum_X = sinusoid_dataset[0]
sinusoid_sum_Y = sinusoid_dataset[1]
sinusoid_sum_X_vis = pickle.load(open(file_dir+"/test_data" + dataset_name + "_dataset_vis.p", "rb"))
print('sinusoid_sum_X.shape', sinusoid_sum_X.shape)
print('sinusoid_sum_Y.shape', sinusoid_sum_Y.shape)
''' initializing neural network '''
sgd_step_size = 10.0/20.0
reg_lambda = 1.0/1000.0
nb_iter = 1000
sgd_batch_size = 50 #0
w_scale = 0.1
sgd_stepsize_mode = 'training data'
sgd_step_c = 0.1
sgd_step_epsilon = 0.1
nn_training_param = NN_training_param(sgd_step_size, reg_lambda, nb_iter, sgd_batch_size, w_scale)
# note: layers_info must be compatible with hidden_layers_size
layers_info = []
layers_type = []
layers_info.append(np.array([[2, 1]])); layers_type.append('conn')
layers_info.append(np.array([[2, 50]])); layers_type.append('conn')
# layers_info.append(np.array([[2, 50]])); layers_type.append('conn')
layers_info.append(np.array([[2, 50]])); layers_type.append('max')
layers_info.append( | np.array([[1, 50]]) | numpy.array |
import logging
import numpy as np
logger = logging.getLogger(__name__)
def fill_struct(obj=None, att_vals=None):
"""
Fill object with attributes in a dictionary.
If a struct is not given a new object will be created and filled.
If the given struct has a field in att_vals, the original field will stay,
unless specified otherwise in overwrite.
att_vals is a dictionary with string keys, and for each key:
if hasattr(s, key) and key in overwrite:
pass
else:
setattr(s, key, att_vals[key])
:param obj:
:param att_vals:
:param overwrite
:return:
"""
# TODO should consider making copy option - i.e that the input won't change
if obj is None:
obj = {}
if att_vals is None:
return obj
for key in att_vals.keys():
if key not in obj.keys():
obj[key] = att_vals[key]
return obj
def conj_grad(a_fun, b, cg_opt=None, init=None):
"""
Conjugate Gradient method to solve the linear system.
This is corresponding to the implemented version in the ASPIRE Matlab package.
:param a_fun: A function handle specifying the linear operation x -> Ax.
When multiple right-hand sides are supplied, this function takes as
input an array of shape (n, p), where n is the number of right-hand
sides and p is the dimension of the space.
:param b: The vector consisting of the right hand side of Ax = b. Again,
n different right-hand sides are given by supplying an array of shape
(n, p).
:param cg_opt: The parameters for the conjugate gradient method, including:
max_iter: Maximum number of iterations (default 50).
verbose: The extent to which information on progress should be
output to the terminal (default 1).
iter_callback: If non-empty, specifies a function to be called at
the end of every iteration. In this case, iter_callback must be a
function handle taking as single argument the info structure at
the current iteration. For information on the info structure,
see below (default []).
preconditioner: If non-empty, specifies a preconditioner to be
used in every iteration as a function handle defining the linear
operator x -> Px (default []).
rel_tolerance: The relative error at which to stop the algorithm,
even if it has not yet reached the maximum number of iterations
(default 1e-15).
store_iterates: Defines whether to store each intermediate results
in the info structure under the x, p and r fields. Since this
may require a large amount of memory, this is not recommended
(default false).
:param init: A structure specifying the starting point of the algorithm.
This can contain values of x or p that will be used for initialization
(default empty).
:return: The output result includes:
x: The result of the conjugate gradient method after max_iter iterations
or once the residual norm has decreased below rel_tolerance, relative.
obj: The value of the objective function at the last iteration.
info: A structure array containing intermediate information obtained
during each iteration. These fields include:
- iter: The iteration number.
- x (for store_iterates true): The value of x.
- r (for store_iterates true): The residual vector.
- p (for store_iterates true): The p vector.
- res: The square norm of the residual.
- obj: The objective function.
"""
def identity(input_x):
return input_x
default_opt = {
"verbose": 0,
"max_iter": 50,
"iter_callback": [],
"store_iterates": False,
"rel_tolerance": 1e-15,
"precision": b.dtype,
"preconditioner": identity,
}
cg_opt = fill_struct(cg_opt, default_opt)
default_init = {"x": None, "p": None}
init = fill_struct(default_init, init)
if init["x"] is None:
x = np.zeros(b.shape, dtype=b.dtype)
else:
x = init["x"]
b_norm = np.linalg.norm(b)
r = b.copy()
# Need the copy call to ensure that s and r are not identical in the case
# of an identity preconditioner.
s = cg_opt["preconditioner"](r.copy())
if np.any(x != 0):
if cg_opt["verbose"]:
logger.info("[CG] Calculating initial residual")
a_x = a_fun(x)
r = r - a_x
s = cg_opt["preconditioner"](r)
else:
a_x = np.zeros(x.shape, dtype=b.dtype)
obj = np.real(np.sum(x.conj() * a_x, -1) - 2 * np.real(np.sum(np.conj(b * x), -1)))
if init["p"] is None:
p = s.copy()
else:
p = init["p"]
info = fill_struct(att_vals={"iter": [0], "res": [np.linalg.norm(r)], "obj": [obj]})
if cg_opt["store_iterates"]:
info = fill_struct(info, att_vals={"x": [x], "r": [r], "p": [p]})
if cg_opt["verbose"]:
logger.info(
"[CG] Initialized. Residual: {}. Objective: {}".format(
np.linalg.norm(info["res"][0]), np.sum(info["obj"][0])
)
)
if b_norm == 0:
# Matlat code returns b_norm == 0, this break the Python code when b = 0
return x, obj, info
for i in range(1, cg_opt["max_iter"] + 1):
if cg_opt["verbose"]:
logger.info("[CG] Applying matrix & preconditioner")
a_p = a_fun(p)
old_gamma = np.real(np.sum(s.conj() * r, -1))
alpha = old_gamma / np.real(np.sum(p.conj() * a_p, -1))
x += alpha[..., np.newaxis] * p
a_x += alpha[..., np.newaxis] * a_p
r -= alpha[..., np.newaxis] * a_p
s = cg_opt["preconditioner"](r.copy())
new_gamma = np.real(np.sum(r.conj() * s, -1))
beta = new_gamma / old_gamma
p *= beta[..., np.newaxis]
p += s
obj = np.real(
np.sum(x.conj() * a_x, -1) - 2 * np.real(np.sum(np.conj(b * x), -1))
)
res = np.sqrt(np.sum(r**2, -1))
info["iter"].append(i)
info["res"].append(res)
info["obj"].append(obj)
if cg_opt["store_iterates"]:
info["x"].append(x)
info["r"].append(r)
info["p"].append(p)
if cg_opt["verbose"]:
logger.info(
"[CG] Iteration {}. Residual: {}. Objective: {}".format(
i, np.linalg.norm(info["res"][i]), | np.sum(info["obj"][i]) | numpy.sum |
# This module has been generated automatically from space group information
# obtained from the Computational Crystallography Toolbox
#
"""
Space groups
This module contains a list of all the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer arrays (rot, tn, td), where
rot is the rotation matrix and tn/td
are the numerator and denominator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.transposed_rotations = N.array([N.transpose(t[0])
for t in transformations])
self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.array_type
:return: a tuple (miller_indices, phase_factor) of two arrays
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.transposed_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(39, 'A b m 2', transformations)
space_groups[39] = sg
space_groups['A b m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(40, 'A m a 2', transformations)
space_groups[40] = sg
space_groups['A m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(41, 'A b a 2', transformations)
space_groups[41] = sg
space_groups['A b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(42, 'F m m 2', transformations)
space_groups[42] = sg
space_groups['F m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(43, 'F d d 2', transformations)
space_groups[43] = sg
space_groups['F d d 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(44, 'I m m 2', transformations)
space_groups[44] = sg
space_groups['I m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(45, 'I b a 2', transformations)
space_groups[45] = sg
space_groups['I b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(46, 'I m a 2', transformations)
space_groups[46] = sg
space_groups['I m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(47, 'P m m m', transformations)
space_groups[47] = sg
space_groups['P m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(48, 'P n n n :2', transformations)
space_groups[48] = sg
space_groups['P n n n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(49, 'P c c m', transformations)
space_groups[49] = sg
space_groups['P c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(50, 'P b a n :2', transformations)
space_groups[50] = sg
space_groups['P b a n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(51, 'P m m a', transformations)
space_groups[51] = sg
space_groups['P m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(52, 'P n n a', transformations)
space_groups[52] = sg
space_groups['P n n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(53, 'P m n a', transformations)
space_groups[53] = sg
space_groups['P m n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(54, 'P c c a', transformations)
space_groups[54] = sg
space_groups['P c c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(55, 'P b a m', transformations)
space_groups[55] = sg
space_groups['P b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(56, 'P c c n', transformations)
space_groups[56] = sg
space_groups['P c c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(57, 'P b c m', transformations)
space_groups[57] = sg
space_groups['P b c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(58, 'P n n m', transformations)
space_groups[58] = sg
space_groups['P n n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(59, 'P m m n :2', transformations)
space_groups[59] = sg
space_groups['P m m n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(60, 'P b c n', transformations)
space_groups[60] = sg
space_groups['P b c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(61, 'P b c a', transformations)
space_groups[61] = sg
space_groups['P b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(62, 'P n m a', transformations)
space_groups[62] = sg
space_groups['P n m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(63, 'C m c m', transformations)
space_groups[63] = sg
space_groups['C m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(64, 'C m c a', transformations)
space_groups[64] = sg
space_groups['C m c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(65, 'C m m m', transformations)
space_groups[65] = sg
space_groups['C m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(66, 'C c c m', transformations)
space_groups[66] = sg
space_groups['C c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(67, 'C m m a', transformations)
space_groups[67] = sg
space_groups['C m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(68, 'C c c a :2', transformations)
space_groups[68] = sg
space_groups['C c c a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(69, 'F m m m', transformations)
space_groups[69] = sg
space_groups['F m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(70, 'F d d d :2', transformations)
space_groups[70] = sg
space_groups['F d d d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(71, 'I m m m', transformations)
space_groups[71] = sg
space_groups['I m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(72, 'I b a m', transformations)
space_groups[72] = sg
space_groups['I b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(73, 'I b c a', transformations)
space_groups[73] = sg
space_groups['I b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(74, 'I m m a', transformations)
space_groups[74] = sg
space_groups['I m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(75, 'P 4', transformations)
space_groups[75] = sg
space_groups['P 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(76, 'P 41', transformations)
space_groups[76] = sg
space_groups['P 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(77, 'P 42', transformations)
space_groups[77] = sg
space_groups['P 42'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(78, 'P 43', transformations)
space_groups[78] = sg
space_groups['P 43'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(79, 'I 4', transformations)
space_groups[79] = sg
space_groups['I 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(80, 'I 41', transformations)
space_groups[80] = sg
space_groups['I 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(81, 'P -4', transformations)
space_groups[81] = sg
space_groups['P -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(82, 'I -4', transformations)
space_groups[82] = sg
space_groups['I -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(83, 'P 4/m', transformations)
space_groups[83] = sg
space_groups['P 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(84, 'P 42/m', transformations)
space_groups[84] = sg
space_groups['P 42/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(85, 'P 4/n :2', transformations)
space_groups[85] = sg
space_groups['P 4/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(86, 'P 42/n :2', transformations)
space_groups[86] = sg
space_groups['P 42/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(87, 'I 4/m', transformations)
space_groups[87] = sg
space_groups['I 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(88, 'I 41/a :2', transformations)
space_groups[88] = sg
space_groups['I 41/a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(89, 'P 4 2 2', transformations)
space_groups[89] = sg
space_groups['P 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(90, 'P 4 21 2', transformations)
space_groups[90] = sg
space_groups['P 4 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(91, 'P 41 2 2', transformations)
space_groups[91] = sg
space_groups['P 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(92, 'P 41 21 2', transformations)
space_groups[92] = sg
space_groups['P 41 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(93, 'P 42 2 2', transformations)
space_groups[93] = sg
space_groups['P 42 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(94, 'P 42 21 2', transformations)
space_groups[94] = sg
space_groups['P 42 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(95, 'P 43 2 2', transformations)
space_groups[95] = sg
space_groups['P 43 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(96, 'P 43 21 2', transformations)
space_groups[96] = sg
space_groups['P 43 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(97, 'I 4 2 2', transformations)
space_groups[97] = sg
space_groups['I 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(98, 'I 41 2 2', transformations)
space_groups[98] = sg
space_groups['I 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(99, 'P 4 m m', transformations)
space_groups[99] = sg
space_groups['P 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(100, 'P 4 b m', transformations)
space_groups[100] = sg
space_groups['P 4 b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(101, 'P 42 c m', transformations)
space_groups[101] = sg
space_groups['P 42 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(102, 'P 42 n m', transformations)
space_groups[102] = sg
space_groups['P 42 n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(103, 'P 4 c c', transformations)
space_groups[103] = sg
space_groups['P 4 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(104, 'P 4 n c', transformations)
space_groups[104] = sg
space_groups['P 4 n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(105, 'P 42 m c', transformations)
space_groups[105] = sg
space_groups['P 42 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(106, 'P 42 b c', transformations)
space_groups[106] = sg
space_groups['P 42 b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(107, 'I 4 m m', transformations)
space_groups[107] = sg
space_groups['I 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(108, 'I 4 c m', transformations)
space_groups[108] = sg
space_groups['I 4 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(109, 'I 41 m d', transformations)
space_groups[109] = sg
space_groups['I 41 m d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(110, 'I 41 c d', transformations)
space_groups[110] = sg
space_groups['I 41 c d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(111, 'P -4 2 m', transformations)
space_groups[111] = sg
space_groups['P -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(112, 'P -4 2 c', transformations)
space_groups[112] = sg
space_groups['P -4 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(113, 'P -4 21 m', transformations)
space_groups[113] = sg
space_groups['P -4 21 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(114, 'P -4 21 c', transformations)
space_groups[114] = sg
space_groups['P -4 21 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(115, 'P -4 m 2', transformations)
space_groups[115] = sg
space_groups['P -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(116, 'P -4 c 2', transformations)
space_groups[116] = sg
space_groups['P -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(117, 'P -4 b 2', transformations)
space_groups[117] = sg
space_groups['P -4 b 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(118, 'P -4 n 2', transformations)
space_groups[118] = sg
space_groups['P -4 n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(119, 'I -4 m 2', transformations)
space_groups[119] = sg
space_groups['I -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(120, 'I -4 c 2', transformations)
space_groups[120] = sg
space_groups['I -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(121, 'I -4 2 m', transformations)
space_groups[121] = sg
space_groups['I -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(122, 'I -4 2 d', transformations)
space_groups[122] = sg
space_groups['I -4 2 d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(123, 'P 4/m m m', transformations)
space_groups[123] = sg
space_groups['P 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(124, 'P 4/m c c', transformations)
space_groups[124] = sg
space_groups['P 4/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(125, 'P 4/n b m :2', transformations)
space_groups[125] = sg
space_groups['P 4/n b m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(126, 'P 4/n n c :2', transformations)
space_groups[126] = sg
space_groups['P 4/n n c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(127, 'P 4/m b m', transformations)
space_groups[127] = sg
space_groups['P 4/m b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(128, 'P 4/m n c', transformations)
space_groups[128] = sg
space_groups['P 4/m n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(129, 'P 4/n m m :2', transformations)
space_groups[129] = sg
space_groups['P 4/n m m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(130, 'P 4/n c c :2', transformations)
space_groups[130] = sg
space_groups['P 4/n c c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(131, 'P 42/m m c', transformations)
space_groups[131] = sg
space_groups['P 42/m m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(132, 'P 42/m c m', transformations)
space_groups[132] = sg
space_groups['P 42/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(133, 'P 42/n b c :2', transformations)
space_groups[133] = sg
space_groups['P 42/n b c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(134, 'P 42/n n m :2', transformations)
space_groups[134] = sg
space_groups['P 42/n n m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(135, 'P 42/m b c', transformations)
space_groups[135] = sg
space_groups['P 42/m b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(136, 'P 42/m n m', transformations)
space_groups[136] = sg
space_groups['P 42/m n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(137, 'P 42/n m c :2', transformations)
space_groups[137] = sg
space_groups['P 42/n m c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(138, 'P 42/n c m :2', transformations)
space_groups[138] = sg
space_groups['P 42/n c m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(139, 'I 4/m m m', transformations)
space_groups[139] = sg
space_groups['I 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(140, 'I 4/m c m', transformations)
space_groups[140] = sg
space_groups['I 4/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(141, 'I 41/a m d :2', transformations)
space_groups[141] = sg
space_groups['I 41/a m d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(142, 'I 41/a c d :2', transformations)
space_groups[142] = sg
space_groups['I 41/a c d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(143, 'P 3', transformations)
space_groups[143] = sg
space_groups['P 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(144, 'P 31', transformations)
space_groups[144] = sg
space_groups['P 31'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(145, 'P 32', transformations)
space_groups[145] = sg
space_groups['P 32'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(146, 'R 3 :H', transformations)
space_groups[146] = sg
space_groups['R 3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(147, 'P -3', transformations)
space_groups[147] = sg
space_groups['P -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(148, 'R -3 :H', transformations)
space_groups[148] = sg
space_groups['R -3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(149, 'P 3 1 2', transformations)
space_groups[149] = sg
space_groups['P 3 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(150, 'P 3 2 1', transformations)
space_groups[150] = sg
space_groups['P 3 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(151, 'P 31 1 2', transformations)
space_groups[151] = sg
space_groups['P 31 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(152, 'P 31 2 1', transformations)
space_groups[152] = sg
space_groups['P 31 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(153, 'P 32 1 2', transformations)
space_groups[153] = sg
space_groups['P 32 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(154, 'P 32 2 1', transformations)
space_groups[154] = sg
space_groups['P 32 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(155, 'R 3 2 :H', transformations)
space_groups[155] = sg
space_groups['R 3 2 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(156, 'P 3 m 1', transformations)
space_groups[156] = sg
space_groups['P 3 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(157, 'P 3 1 m', transformations)
space_groups[157] = sg
space_groups['P 3 1 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(158, 'P 3 c 1', transformations)
space_groups[158] = sg
space_groups['P 3 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(159, 'P 3 1 c', transformations)
space_groups[159] = sg
space_groups['P 3 1 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(160, 'R 3 m :H', transformations)
space_groups[160] = sg
space_groups['R 3 m :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(161, 'R 3 c :H', transformations)
space_groups[161] = sg
space_groups['R 3 c :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(162, 'P -3 1 m', transformations)
space_groups[162] = sg
space_groups['P -3 1 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(163, 'P -3 1 c', transformations)
space_groups[163] = sg
space_groups['P -3 1 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(164, 'P -3 m 1', transformations)
space_groups[164] = sg
space_groups['P -3 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(165, 'P -3 c 1', transformations)
space_groups[165] = sg
space_groups['P -3 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(166, 'R -3 m :H', transformations)
space_groups[166] = sg
space_groups['R -3 m :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(167, 'R -3 c :H', transformations)
space_groups[167] = sg
space_groups['R -3 c :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(168, 'P 6', transformations)
space_groups[168] = sg
space_groups['P 6'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(169, 'P 61', transformations)
space_groups[169] = sg
space_groups['P 61'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(170, 'P 65', transformations)
space_groups[170] = sg
space_groups['P 65'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(171, 'P 62', transformations)
space_groups[171] = sg
space_groups['P 62'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(172, 'P 64', transformations)
space_groups[172] = sg
space_groups['P 64'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(173, 'P 63', transformations)
space_groups[173] = sg
space_groups['P 63'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(174, 'P -6', transformations)
space_groups[174] = sg
space_groups['P -6'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(175, 'P 6/m', transformations)
space_groups[175] = sg
space_groups['P 6/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(176, 'P 63/m', transformations)
space_groups[176] = sg
space_groups['P 63/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(177, 'P 6 2 2', transformations)
space_groups[177] = sg
space_groups['P 6 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = | N.array([0,0,2]) | numpy.array |
# -*- coding: utf-8 -*-
"""
@author: <NAME>
@time: 2022-2-18 16:55
"""
import numpy as np
class HMM(object):
def __init__(self, state_num, observe_num, train_params = 'ste'):
self.s_n = state_num
self.o_n = observe_num
self.A = np.random.rand(self.s_n, self.s_n)
self.A = self.A/self.A.sum(axis = 1).reshape([-1,1])
self.B = np.random.rand(self.s_n, self.o_n)
self.B = self.B/self.B.sum(axis = 1).reshape([-1,1])
self.pi = np.random.rand(self.s_n)
self.pi = self.pi/sum(self.pi)
self.train_data = []
self.train_params = train_params
#input_data 格式为 [[o1,o2,o3,...,ox], [o1,o2,o3,...,oy]]
#支持多观测序列输入,观测序列的输入是index化后的值,需要提前根据实际的观测值字典去映射,解码出的隐状态值也是一样,都是index数据,需要再根据映射还原
def add_data(self, input_data):
self.train_data.extend(input_data)
#计算所有的前向概率
# [[o1,o2,o3,...,ot1], [o1,o2,o3,...,ot2]]
# [t1 * s_n, t2 * s_n]
def forward(self, o_seqs):
self.alpha = []
for seq in o_seqs:
alpha = np.zeros((len(seq),self.s_n))
for i in range(self.s_n):
alpha[0,i] = self.pi[i] * self.B[i,seq[0]]
for r in range(1,len(seq)):
for i in range(self.s_n):
alpha[r,i] = sum([alpha[r-1,j]*self.A[j,i] for j in range(self.s_n)])*self.B[i,seq[r]]
self.alpha.append(alpha)
#计算所有的后向概率
# [[o1,o2,o3,...,ot1], [o1,o2,o3,...,ot2]]
# [t1 * s_n, t2 * s_n]
def backward(self, o_seqs):
self.beta = []
for seq in o_seqs:
beta = np.zeros((len(seq),self.s_n))
for i in range(self.s_n):
beta[len(seq)-1,i] = 1
for r in range(len(seq)-2,-1,-1):
for i in range(self.s_n):
beta[r,i] = sum([self.A[i,j]*self.B[j,seq[r+1]]*beta[r+1,j] for j in range(self.s_n)])
self.beta.append(beta)
#给定模型参数和观测序列,时刻t的状态为xx的概率
# t * s_n
# 多条观测序列输入 则为[t1 * s_n, t2 * s_n, ... , tk * s_n]
def gamma_matrix(self):
self.gamma = []
for i in range(len(self.alpha)):
alpha = self.alpha[i]
beta = self.beta[i]
self.gamma.append(alpha*beta/sum(alpha[len(alpha)-1]))
#给定模型参数和观测序列,时刻t的状态为xx,且t+1的状态为yy的概率
# t * s_n * s_n
# 多条观测序列输入 则为[t1-1 * s_n * s_n, t2-1 * s_n * s_n, ... , tk-1 * s_n * s_n]
def ksi_matrix(self):
self.ksi = []
for i in range(len(self.train_data)):
seq = self.train_data[i]
alpha = self.alpha[i]
beta = self.beta[i]
ksi = np.zeros((len(seq)-1, self.s_n, self.s_n))
for t in range(len(seq)-1):
for i in range(self.s_n):
for j in range(self.s_n):
ksi[t,i,j] = alpha[t,i]*self.A[i,j]*self.B[j,seq[t+1]]*beta[t+1,j]/sum(alpha[len(alpha)-1])
self.ksi.append(ksi)
#EM思想 Baum-Welch算法
def train(self, maxStep = 10, delta = 0.01):
step = 0
while step < maxStep:
print("=============== step {} ===============".format(step))
#固定模型参数计算隐含数据
'''
self.forward(self.train_data)
'''
#这里estimate_prob中计算了forward,所以 不用单独计算一次forward
log_prob = [np.log(p) for p in self.estimate_prob(self.train_data)]
self.backward(self.train_data)
self.gamma_matrix()
self.ksi_matrix()
#固定隐含数据计算模型参数
new_pi = sum([gamma[0] for gamma in self.gamma])/len(self.gamma)
new_A = sum([ksi.sum(axis = 0) for ksi in self.ksi])/np.reshape(sum([gamma[:-1].sum(axis = 0) for gamma in self.gamma]), [-1,1])
sn_on_list = []
for i in range(len(self.train_data)):
seq = np.array(self.train_data[i])
gamma = self.gamma[i]
sn_on = []
for o in range(self.o_n):
sn_o = (np.reshape(seq == o, [-1,1]) * gamma).sum(axis = 0).reshape([-1,1])
sn_on.append(sn_o)
sn_on_list.append(np.concatenate(sn_on,axis = 1))
new_B = sum(sn_on_list)/np.reshape(sum([gamma.sum(axis = 0) for gamma in self.gamma]), [-1,1])
#误差小也停止
pi_error = np.sum( | np.square(new_pi - self.pi) | numpy.square |
"""
Implements Citation-KNN
"""
import numpy as np
import scipy.spatial.distance as dist
class CKNN(object):
"""
Citation-KNN
"""
def __init__(self):
self._bags = None
self._bag_predictions = None
self._labels = None
self._full_bags = None
self._DM = None
def fit(self, train_bags, train_labels, **kwargs):
"""
@param bags : a sequence of n bags; each bag is an m-by-k array-like
object containing m instances with k features
@param y : an array-like object of length n containing -1/+1 labels
"""
self._bags = train_bags
self._labels = train_labels
self._R = kwargs['references']
self._C = kwargs['citers']
def predict(self, Testbags):
"""
@param bags : a sequence of n bags; each bag is an m-by-k array-like
object containing m instances with k features
@return : an array of length n containing real-valued label predictions
@R : References
@C : Citers
"""
#Unir Bolsas de Training and Testing
train_bags = self._bags
full_bags = self._bags+Testbags
pred_labels = np.array([])
self._DM = self.DistanceMatrix(full_bags)
for num in range(len(self._bags),len(full_bags) ):
number = num
REFERENCES = self._DM[number,0:self._R]
CiteMatrix =self._DM[:,0:self._C]
CITERS,j = np.where(CiteMatrix == number)
LabelsTrainCiters = self._labels[CITERS[CITERS<len(train_bags)]]
LabelsTrainRef = self._labels[REFERENCES[REFERENCES<len(train_bags)]]
Rp = np.count_nonzero(LabelsTrainRef == 1)
Rn = np.count_nonzero(LabelsTrainRef == 0)
Cp = np.count_nonzero(LabelsTrainCiters == 1)
Cn = np.count_nonzero(LabelsTrainCiters == 0)
if Rp+Cp> Rn+Cn:
label_out = 1
else:
label_out = 0
pred_labels = | np.append(pred_labels,label_out) | numpy.append |
import sys
import unittest
import numpy as np
import luigi
import z5py
import nifty.ground_truth as ngt
import nifty.distributed as ndist
import nifty.tools as nt
try:
from ..base import BaseTest
except ValueError:
sys.path.append('..')
from base import BaseTest
class TestNodeLabels(BaseTest):
input_key = 'volumes/segmentation/groundtruth'
output_key = 'labels'
@staticmethod
def compute_overlaps(seg_a, seg_b, max_overlap=True):
seg_ids = np.unique(seg_a)
comp = ngt.overlap(seg_a, seg_b)
overlaps = [comp.overlapArrays(ida, True) for ida in seg_ids]
if max_overlap:
# the max overlap can be ambiguous, need to filtr for this
mask = np.array([ovlp[1][0] != ovlp[1][1] if ovlp[1].size > 1 else True
for ovlp in overlaps])
overlaps = np.array([ovlp[0][0] for ovlp in overlaps])
assert mask.shape == overlaps.shape
return overlaps, mask
else:
overlaps = {seg_id: ovlp for seg_id, ovlp in zip(seg_ids, overlaps)}
return overlaps
def load_data(self):
# compute the expected max overlaps
with z5py.File(self.input_path) as f:
ds_ws = f[self.ws_key]
ds_ws.n_threads = self.max_jobs
ws = ds_ws[:]
ds_inp = f[self.input_key]
ds_inp.n_threads = self.max_jobs
inp = ds_inp[:]
return ws, inp
def check_overlaps(self, ids, overlaps, overlaps_exp):
self.assertEqual(len(ids), len(overlaps))
self.assertEqual(len(overlaps), len(overlaps_exp))
for seg_id in ids:
this_ovlps = overlaps[seg_id]
ovlp_ids = np.array(list(this_ovlps.keys()))
ovlp_counts = np.array(list(this_ovlps.values()))
sorted_ids = np.argsort(ovlp_ids)
ovlp_ids = ovlp_ids[sorted_ids]
ovlp_counts = ovlp_counts[sorted_ids]
ovlp_ids_exp, ovlp_counts_exp = overlaps_exp[seg_id]
sorted_ids = np.argsort(ovlp_ids_exp)
ovlp_ids_exp = ovlp_ids_exp[sorted_ids]
ovlp_counts_exp = ovlp_counts_exp[sorted_ids]
self.assertTrue( | np.allclose(ovlp_ids, ovlp_ids_exp) | numpy.allclose |
#!/usr/bin/env python
# coding: utf-8
"""
compute mean similarity overall and within clusters
"""
import os
import argparse
import numpy
import pandas
import json
import matplotlib.pyplot as plt
from narps import Narps, hypnums
hypnames = ['%d' % i for i in hypnums]
hypnames[:2] = ['1/3', '2/4']
def get_similarity_summary(narps, corrtype='spearman'):
corr_summary = []
for i, hyp in enumerate(hypnums):
print('hyp', hyp)
# load correlations and cluster info
corrdata = pandas.read_csv(
os.path.join(
narps.dirs.dirs['output'],
'correlation_unthresh',
'%s_unthresh_hyp%d.csv' % (corrtype, hyp)),
index_col=0
)
jsonfile = os.path.join(
narps.dirs.dirs['output'],
'correlation_unthresh',
'unthresh_cluster_membership_spearman.json')
with open(jsonfile) as f:
clusterinfo = json.load(f)
# overall correlation
corrvals = corrdata.values
corrvals_triu = corrvals[numpy.triu_indices_from(corrvals, 1)]
corr_summary.append([hypnames[i],
'mean',
corrvals.shape[0],
numpy.mean(corrvals_triu)])
# plot histogram without zeros
plt.figure(figsize=(8, 8))
plt.hist(corrvals_triu, 50, (-1, 1))
histfile = os.path.join(
narps.dirs.dirs['figures'],
'correlation_unthresh',
'%s_unthresh_hyp%d_mean.png' % (corrtype, hyp))
if not os.path.exists(os.path.dirname(histfile)):
os.mkdir(os.path.dirname(histfile))
plt.savefig(histfile)
plt.close()
# per-cluster correlation
ci = clusterinfo['%d' % hyp]
for cluster in ci:
clusterdata = corrdata.loc[ci[cluster]][ci[cluster]]
assert (clusterdata.index == clusterdata.columns).all()
cluster_corrvals = clusterdata.values
cluster_corrvals_triu = cluster_corrvals[
numpy.triu_indices_from(cluster_corrvals, 1)]
corr_summary.append([hypnames[i],
'cluster%s' % cluster,
len(ci[cluster]),
| numpy.mean(cluster_corrvals_triu) | numpy.mean |
import wml_utils as wmlu
import os
import json
import numpy as np
import cv2 as cv
import object_detection2.visualization as odv
import copy
import img_utils as wmli
import random
import matplotlib.pyplot as plt
import sys
import cv2
from object_detection2.standard_names import *
import glob
def trans_odresult_to_annotations_list(data):
labels = data[RD_LABELS]
res = []
for i in range(len(labels)):
annotation = {}
annotation["category_id"] = labels[i]
annotation["segmentation"] = data[RD_FULL_SIZE_MASKS]
res.append(annotation)
return res
def trans_absolute_coord_to_relative_coord(image_info,annotations_list):
H = image_info['height']
W = image_info['width']
res_bbox = []
res_segmentation = []
res_labels = []
for ann in annotations_list:
box = ann['bbox']
xmin = box[0]/W
ymin = box[1]/H
xmax = (box[0]+box[2])/W
ymax = (box[1]+box[3])/H
res_bbox.append([ymin,xmin,ymax,xmax])
res_segmentation.append(ann['segmentation'])
res_labels.append(ann['category_id'])
if len(annotations_list)>0:
return np.array(res_bbox),np.array(res_labels),np.array(res_segmentation)
else:
return np.zeros([0,4],dtype=np.float32),np.zeros([0],dtype=np.int32),np.zeros([0,H,W],dtype=np.uint8)
def get_files(data_dir, img_suffix="jpg"):
files = wmlu.recurse_get_filepath_in_dir(data_dir, suffix=".json")
res = []
for file in files:
img_file = wmlu.change_suffix(file, img_suffix)
if os.path.exists(img_file):
res.append((img_file, file))
return res
'''
output:
image_info: {'height','width'}
annotations_list: [{'bbox','segmentation','category_id','points_x','points_y'}' #bbox[xmin,ymin,width,height] absolute coordinate,
'segmentation' [H,W]
'''
def read_labelme_data(file_path,label_text_to_id=lambda x:int(x),use_semantic=True):
annotations_list = []
image = {}
with open(file_path,"r",encoding="gb18030") as f:
print(file_path)
data_str = f.read()
try:
json_data = json.loads(data_str)
img_width = int(json_data["imageWidth"])
img_height = int(json_data["imageHeight"])
image["height"] = int(img_height)
image["width"] = int(img_width)
image["file_name"] = wmlu.base_name(file_path)
for shape in json_data["shapes"]:
mask = np.zeros(shape=[img_height,img_width],dtype=np.uint8)
all_points = np.array([shape["points"]]).astype(np.int32)
if len(all_points)<1:
continue
points = np.transpose(all_points[0])
x,y = np.vsplit(points,2)
x = np.reshape(x,[-1])
y = np.reshape(y,[-1])
x = np.minimum(np.maximum(0,x),img_width-1)
y = np.minimum(np.maximum(0,y),img_height-1)
xmin = np.min(x)
xmax = np.max(x)
ymin = np.min(y)
ymax = np.max(y)
segmentation = cv.drawContours(mask,all_points,-1,color=(1),thickness=cv.FILLED)
if label_text_to_id is not None:
label = label_text_to_id(shape["label"])
else:
label = shape["label"]
annotations_list.append({"bbox":(xmin,ymin,xmax-xmin+1,ymax-ymin+1),
"segmentation":segmentation,
"category_id":label,
"points_x":x,
"points_y":y})
except:
print(f"Read file {os.path.basename(file_path)} faild.")
pass
if use_semantic:
'''
Each pixel only belong to one classes, and the latter annotation will overwrite the previous
'''
if len(annotations_list) > 2:
mask = 1 - annotations_list[-1]['segmentation']
for i in reversed(range(len(annotations_list) - 1)):
annotations_list[i]['segmentation'] = np.logical_and(annotations_list[i]['segmentation'], mask)
mask = np.logical_and(mask, 1 - annotations_list[i]['segmentation'])
return image,annotations_list
def save_labelme_data(file_path,image_path,image,annotations_list,label_to_text=lambda x:str(x)):
data={}
shapes = []
data["version"] = "3.10.1"
data["flags"] = {}
for ann in annotations_list:
shape = {}
shape["label"] = label_to_text(ann["category_id"])
#shape["line_color"]=None
#shape["fill_color"]=None
shape["shape_type"]="polygon"
contours, hierarchy = cv.findContours(ann["segmentation"], cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)
for cont in contours:
points = cont
if len(cont.shape)==3 and cont.shape[1]==1:
points = np.squeeze(points,axis=1)
points = points.tolist()
shape["points"] = points
shapes.append(shape)
data["shapes"] = shapes
data["imagePath"] = os.path.basename(image_path)
data["imageWidth"] = image["width"]
data["imageHeight"] = image["height"]
with open(file_path,"w") as f:
json.dump(data,f)
def get_labels_and_bboxes(image,annotations_list):
labels = []
bboxes = []
width = image["width"]
height = image["height"]
for ann in annotations_list:
t_box = ann["bbox"]
xmin = t_box[0]/width
ymin = t_box[1]/height
xmax = xmin+t_box[2]/width
ymax = ymin+t_box[3]/height
bboxes.append([ymin,xmin,ymax,xmax])
labels.append(ann["category_id"])
return np.array(labels),np.array(bboxes)
def get_labels_bboxes_and_masks(image,annotations_list):
labels = []
bboxes = []
masks = []
width = image["width"]
height = image["height"]
for ann in annotations_list:
t_box = ann["bbox"]
xmin = t_box[0]/width
ymin = t_box[1]/height
xmax = xmin+t_box[2]/width
ymax = ymin+t_box[3]/height
bboxes.append([ymin,xmin,ymax,xmax])
labels.append(ann["category_id"])
masks.append(ann["segmentation"])
return np.array(labels), | np.array(bboxes) | numpy.array |
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 2 11:19:50 2018
@author: mayank
"""
import numpy as np
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics.pairwise import linear_kernel,rbf_kernel,manhattan_distances,polynomial_kernel,sigmoid_kernel,cosine_similarity,laplacian_kernel,paired_euclidean_distances,pairwise_distances
from sklearn.kernel_approximation import RBFSampler, Nystroem
from sklearn.utils import resample
from numpy.matlib import repmat
from sklearn.neighbors import NearestNeighbors
from sklearn.cluster import MiniBatchKMeans
from sklearn.decomposition import IncrementalPCA
from numpy.linalg import eigh
from sklearn.preprocessing import OneHotEncoder
from sparse import COO
from scipy.sparse import csr_matrix, lil_matrix
from scipy.sparse import issparse
from scipy.sparse import hstack
#%%
class utils:
# def __init__(self):
# return None
def add_bias(self,xTrain):
"""
Adds bias to the data
Parameters:
-----------
xTrain: 2D numpy ndarray/csr_matrix of shape (n_samples, n_features)
Returns:
--------
xTrain: 2D numpy ndarray/csr_matrix of shape (n_samples, n_features + 1)
"""
N = xTrain.shape[0]
if(xTrain.size!=0):
if(issparse(xTrain)==True):
xTrain = csr_matrix(hstack([xTrain,np.ones((N,1))]))
else:
xTrain=np.hstack((xTrain,np.ones((N,1))))
return xTrain
def logsig(self,x):
return 1 / (1 + np.exp(-x))
def saturate_fcn1(self,x,a = 2):
y = np.zeros(x.shape)
idx1 = (x <= a)*(x >=-a)
idx2 = x > a
idx3 = x < -a
y[idx1] = x[idx1]/(2*a) + 1.0/2.0
y[idx2] = 1
y[idx3] = 0
return y
def standardize(self,xTrain,centering):
"""
Transform the data so that each column has zero mean and unit standard deviation
Parameters:
-----------
xTrain: 2D numpy ndarray of shape (n_samples, n_features)
centering: bool,
whether to perform standardization,
if False, it returns me = np.zeros((xTrain.shape[1],))
and std_dev = np.ones((xTrain.shape[1],))
Returns:
--------
xTrain: 2D numpy ndarray of shape (n_samples, n_features)
me: mean of the columns
std_dev: standard deviation of the columns
"""
if(centering == True):
me=np.mean(xTrain,axis=0)
std_dev=np.std(xTrain,axis=0)
else:
me = np.zeros((xTrain.shape[1],))
std_dev = np.ones((xTrain.shape[1],))
#remove columns with zero std
idx=(std_dev!=0.0)
# print(idx.shape)
xTrain[:,idx]=(xTrain[:,idx]-me[idx])/std_dev[idx]
return xTrain,me,std_dev
def divide_into_batches_stratified(self,yTrain,batch_sz):
"""
Divides the data into batches such that each batch contains similar proportion of labels in it
Parameters:
----------
yTrain: np.ndarray labels for the datset of shape (n_samples, )
Returns:
--------
idx_batches: list
index of yTrain in each batch
sample_weights: np.ndarray of size (n_samples,)
weights for each sample in batch = 1/#class_j
num_batches: int
number of batches formed
"""
#data should be of the form samples X features
N=yTrain.shape[0]
num_batches=int(np.ceil(N/batch_sz))
sample_weights=list()
numClasses=np.unique(yTrain).size
idx_batches=list()
skf=StratifiedKFold(n_splits=num_batches, random_state=1, shuffle=True)
j=0
for train_index, test_index in skf.split(np.zeros(N), yTrain):
idx_batches.append(test_index)
class_weights=np.zeros((numClasses,))
sample_weights1=np.zeros((test_index.shape[0],))
temp=yTrain[test_index,]
for i in range(numClasses):
idx1=(temp==i)
class_weights[i]=1.0/(np.sum(idx1)+1e-09)#/idx.shape[0]
sample_weights1[idx1]=class_weights[i]
sample_weights.append(sample_weights1)
j+=1
return idx_batches,sample_weights,num_batches
def margin_kernel(self, X1, kernel_type = 'linear', gamma =1.0):
"""
Forms the kernel matrix using the samples X1
Parameters:
----------
X1: np.ndarray
data (n_samples,n_features) to form a kernel of shape (n_samples,n_samples)
kernel_type : str
type of kernel to be used
gamma: float
kernel parameter
Returns:
-------
X: np.ndarray
the kernel of shape (n_samples,n_samples)
"""
if(kernel_type == 'linear'):
X = linear_kernel(X1,X1)
elif(kernel_type == 'rbf'):
X = rbf_kernel(X1,X1,gamma)
elif(kernel_type == 'tanh'):
X = sigmoid_kernel(X1,X1,-gamma)
elif(kernel_type == 'sin'):
# X = np.sin(gamma*manhattan_distances(X1,X1))
X = np.sin(gamma*pairwise_distances(X1,X1)**2)
elif(kernel_type =='TL1'):
X = np.maximum(0,gamma - manhattan_distances(X1,X1))
else:
print('no kernel_type, returning None')
return None
return X
def kernel_transform(self, X1, X2 = None, kernel_type = 'linear_primal', n_components = 100, gamma = 1.0):
"""
Forms the kernel matrix using the samples X1
Parameters:
----------
X1: np.ndarray
data (n_samples1,n_features) to form a kernel of shape (n_samples1,n_samples1)
X2: np.ndarray
data (n_samples2,n_features) to form a kernel of shape (n_samples1,n_samples2)
kernel_type : str
type of kernel to be used
gamma: float
kernel parameter
Returns:
-------
X: np.ndarray
the kernel of shape (n_samples,n_samples)
"""
if(kernel_type == 'linear'):
X = linear_kernel(X1,X2)
elif(kernel_type == 'rbf'):
X = rbf_kernel(X1,X2,gamma)
elif(kernel_type == 'tanh'):
X = sigmoid_kernel(X1,X2,-gamma)
elif(kernel_type == 'sin'):
# X = np.sin(gamma*manhattan_distances(X1,X2))
X = np.sin(gamma*pairwise_distances(X1,X2)**2)
elif(kernel_type =='TL1'):
X = np.maximum(0,gamma - manhattan_distances(X1,X2))
elif(kernel_type == 'rff_primal'):
rbf_feature = RBFSampler(gamma=gamma, random_state=1, n_components = n_components)
X = rbf_feature.fit_transform(X1)
elif(kernel_type == 'nystrom_primal'):
#cannot have n_components more than n_samples1
if(n_components > X1.shape[0]):
raise ValueError('n_samples should be greater than n_components')
rbf_feature = Nystroem(gamma=gamma, random_state=1, n_components = n_components)
X = rbf_feature.fit_transform(X1)
elif(kernel_type == 'linear_primal'):
X = X1
else:
print('No kernel_type passed: using linear primal solver')
X = X1
return X
def generate_samples(self,X_orig,old_imbalance_ratio,new_imbalance_ratio):
"""
Generates samples based on new imbalance ratio, such that new imbalanced ratio is achieved
Parameters:
----------
X_orig: np.array (n_samples , n_features)
data matrix
old_imbalance_ratio: float
old imbalance ratio in the samples
new_imbalance_ratio: float
new imbalance ratio in the samples
Returns:
-------
X_orig: np.array (n_samples , n_features)
data matrix
X1: 2D np.array
newly generated samples of shape (int((new_imbalance_ratio/old_imbalance_ratio)*n_samples - n_samples), n_features )
"""
N=X_orig.shape[0]
M=X_orig.shape[1]
neighbors_thresh=10
if (new_imbalance_ratio < old_imbalance_ratio):
raise ValueError('new ratio should be greater than old ratio')
new_samples=int((new_imbalance_ratio/old_imbalance_ratio)*N - N)
#each point must generate these many samples
new_samples_per_point_orig=new_imbalance_ratio/old_imbalance_ratio - 1
new_samples_per_point=int(new_imbalance_ratio/old_imbalance_ratio - 1)
#check if the number of samples each point has to generate is > 1
X1=np.zeros((0,M))
if(new_samples_per_point_orig>0 and new_samples_per_point_orig<=1):
idx_samples=resample(np.arange(0,N), n_samples=int(N*new_samples_per_point_orig), random_state=1,replace=False)
X=X_orig[idx_samples,]
new_samples_per_point=1
N=X.shape[0]
else:
X=X_orig
if(N==1):
X1=repmat(X,new_samples,1)
elif(N>1):
if(N<=neighbors_thresh):
n_neighbors=int(N/2)
else:
n_neighbors=neighbors_thresh
nbrs = NearestNeighbors(n_neighbors=n_neighbors, algorithm='ball_tree').fit(X)
for i in range(N):
#for each point find its n_neighbors nearest neighbors
inds=nbrs.kneighbors(X[i,:].reshape(1,-1), n_neighbors, return_distance=False)
temp_data=X[inds[0],:]
std=np.std(temp_data,axis=0)
me=np.mean(temp_data,axis=0)
np.random.seed(i)
x_temp=me + std*np.random.randn(new_samples_per_point,M)
X1=np.append(X1,x_temp,axis=0)
return X_orig, X1
def upsample(self,X,Y,new_imbalance_ratio):
"""
Upsamples the data based on label array, for classification only
Parameters:
----------
X: np.array (n_samples, n_features)
2D data matrix
Y: np.array (n_samples, )
label array, takes values between [0, numClasses-1]
new_imbalance_ratio: float
new imbalance ratio in the data, takes values between [0.5,1]
Returns:
-------
X3: np.array (n_samples1, n_features)
new balanced 2D data matrix
Y3: np.array (n_samples1, )
new balanced label array
"""
#xTrain: samples X features
#yTrain : samples,
#for classification only
numClasses=np.unique(Y).size
class_samples=np.zeros((numClasses,))
X3=np.zeros((0,X.shape[1]))
Y3=np.zeros((0,))
#first find the samples per class per class
for i in range(numClasses):
idx1=(Y==i)
class_samples[i]=np.sum(idx1)
max_samples=np.max(class_samples)
# new_imbalance_ratio=0.5
# if(upsample_type==1):
old_imbalance_ratio_thresh=0.5
# else:
# old_imbalance_ratio_thresh=1
for i in range(numClasses):
idx1=(Y==i)
old_imbalance_ratio=class_samples[i]/max_samples
X1=X[idx1,:]
Y1=Y[idx1,]
if(idx1.size==1):
X1=np.reshape(X1,(1,X.shape[1]))
if(old_imbalance_ratio<=old_imbalance_ratio_thresh and class_samples[i]!=0):
X1,X2=self.generate_samples(X1,old_imbalance_ratio,new_imbalance_ratio)
new_samples=X2.shape[0]
Y2=np.ones((new_samples,))
Y2=Y2*Y1[0,]
#append original and generated samples
X3=np.append(X3,X1,axis=0)
X3=np.append(X3,X2,axis=0)
Y3=np.append(Y3,Y1,axis=0)
Y3=np.append(Y3,Y2,axis=0)
else:
#append original samples only
X3=np.append(X3,X1,axis=0)
Y3=np.append(Y3,Y1,axis=0)
Y3=np.array(Y3,dtype=np.int32)
return X3,Y3
def kmeans_select(self,X,represent_points,do_pca=False):
"""
Takes in data and number of prototype vectors and returns the indices of the prototype vectors.
The prototype vectors are selected based on the farthest distance from the kmeans centers
Parameters
----------
X: np.ndarray
shape = n_samples, n_features
represent_points: int
number of prototype vectors to return
do_pca: boolean
whether to perform incremental pca for dimensionality reduction before selecting prototype vectors
Returns
-------
sv: list
list of the prototype vector indices from the data array given by X
"""
# do_pca = self.do_pca_in_selection
N = X.shape[0]
if(do_pca == True):
if(X.shape[1]>50):
n_components = 50
ipca = IncrementalPCA(n_components=n_components, batch_size=np.min([128,X.shape[0]]))
X = ipca.fit_transform(X)
kmeans = MiniBatchKMeans(n_clusters=represent_points, batch_size=np.min([128,X.shape[0]]),random_state=0).fit(X)
centers = kmeans.cluster_centers_
labels = kmeans.labels_
sv= []
unique_labels = np.unique(labels).size
all_ind = np.arange(N)
for j in range(unique_labels):
X1 = X[labels == j,:]
all_ind_temp = all_ind[labels==j]
tempK = pairwise_distances(X1,np.reshape(centers[j,:],(1,X1.shape[1])))**2
inds = np.argmax(tempK,axis=0)
sv.append(all_ind_temp[inds[0]])
return sv
def renyi_select(self,X,represent_points,do_pca=False):
"""
Takes in data and number of prototype vectors and returns the indices of the prototype vectors.
The prototype vectors are selected based on maximization of quadratic renyi entropy, which can be
written in terms of log sum exp which is a tightly bounded by max operator. Now for rbf kernel,
the max_{ij}(-\|x_i-x_j\|^2) is equivalent to min_{ij}(\|x_i-x_j\|^2).
Parameters
----------
X: np.ndarray
shape = n_samples, n_features
represent_points: int
number of prototype vectors to return
do_pca: boolean
whether to perform incremental pca for dimensionality reduction before selecting prototype vectors
Returns
-------
sv: list
list of the prototype vector indices from the data array given by X
"""
# do_pca = self.do_pca_in_selection
N= X.shape[0]
capacity=represent_points
selectionset=set([])
set_full=set(list(range(N)))
| np.random.seed(1) | numpy.random.seed |
from scipy import interpolate
import collections
import numpy as np
import os
import re
import torch
import pylab as plt
import matplotlib.ticker as mtick
import math
import itertools
from tensorboard.backend.event_processing import event_accumulator
def get_run_names(logdir, patterns):
run_names = []
for pattern in patterns:
for root, subdirs, files in os.walk(logdir, followlinks=True):
if re.match(pattern, root):
run_names += [root]
# print(run_names)
run_names.sort()
return run_names
def get_run_names_events(logdir, patterns):
run_names = {}
for pattern in patterns:
for root, subdirs, files in os.walk(logdir, followlinks=True):
if re.match(pattern, root):
run_names[root] = []
for file in files:
if re.match('.*events\.out.*', file):
run_names[root].append(file)
run_names[root] = sorted(run_names[root])
# print(run_names)
return run_names
def get_data_pth(logdir, run_names, tag_names, batch_size=None):
data = []
for run_name in run_names:
d = {}
logdata = torch.load(run_name + '/log.pth.tar')
for tag_name in tag_names:
if tag_name not in logdata:
continue
js = logdata[tag_name]
d[tag_name] = np.array([[x[j] for x in js]
for j in range(1, 3)])
data += [d]
return data
def get_data_pth_events(logdir, run_names, tag_names, batch_size=None):
data = []
all_points = []
for run_name, events in run_names.items():
d = {}
points = {}
for event in events:
ea = event_accumulator.EventAccumulator(run_name+'/'+event,
size_guidance={ # see below regarding this argument
event_accumulator.COMPRESSED_HISTOGRAMS: 500,
event_accumulator.IMAGES: 4,
event_accumulator.AUDIO: 4,
event_accumulator.SCALARS: 0,
event_accumulator.HISTOGRAMS: 1,
})
ea.Reload()
for tag_name in tag_names:
if tag_name not in ea.Tags()['scalars']:
continue
scalar = ea.Scalars(tag_name)
if tag_name not in d:
d[tag_name] = np.array(
[[dp.step for dp in scalar], [dp.value for dp in scalar]])
points[tag_name] = [len(d[tag_name][0]) - 1]
else:
new_array = np.array([dp.step for dp in scalar])
indexes = new_array > d[tag_name][0][-1]
res1 = np.concatenate(
(d[tag_name][0], np.array([dp.step for dp in scalar])[indexes]))
res2 = np.concatenate(
(d[tag_name][1], np.array([dp.value for dp in scalar])[indexes]))
points[tag_name].append(len(res2) - 1)
d[tag_name] = (res1, res2)
data += [d]
all_points += [points]
return data, all_points
def plot_smooth(x, y, npts=100, order=3, points=None, vlines=None, *args, **kwargs):
points = np.array(points, dtype=int)
#plt.plot(x[points], y[points], 'o', )
x_smooth = np.linspace(x.min(), x.max(), npts)
tck = interpolate.splrep(x, y, k=order)
y_smooth = interpolate.splev(x_smooth, tck, der=0)
plt.plot(x_smooth, y_smooth, *args, **kwargs)
plt.ticklabel_format(axis="x", style="sci", scilimits=None)
def plot_smooth_o1(x, y, points=None, vlines=None, *args, **kwargs):
plot_smooth(x, y, 100, 1, points, vlines, *args, **kwargs)
def get_legend(lg_tags, run_name, lg_replace=[]):
lg = ""
for lgt in lg_tags:
res = ".*?($|,)" if ',' not in lgt and '$' not in lgt else ''
mg = re.search(lgt + res, run_name)
if mg:
lg += mg.group(0)
lg = lg.replace('_,', ',')
lg = lg.strip(',')
for a, b in lg_replace:
lg = lg.replace(a, b)
return lg
class OOMFormatter(mtick.ScalarFormatter):
def __init__(self, useOffset=None, useMathText=None, useLocale=None, acc_bits=None):
super().__init__(useOffset=useOffset, useMathText=useMathText, useLocale=useLocale)
if acc_bits is not None:
self.acc_bits = acc_bits
else:
self.acc_bits = 3
def __call__(self, x, pos=None):
"""
Return the format for tick value *x* at position *pos*.
"""
if len(self.locs) == 0:
return ''
else:
xp = (x - self.offset) / (10. ** self.orderOfMagnitude)
if abs(xp) < 1e-8:
xp = 0
if self._useLocale:
s = locale.format_string(self.format, (xp,))
else:
s = self.format % xp
return self.fix_minus(s)
def _set_format(self):
bits = self.acc_bits
# set the format string to format all the ticklabels
if len(self.locs) < 2:
# Temporarily augment the locations with the axis end points.
_locs = [*self.locs, *self.axis.get_view_interval()]
else:
_locs = self.locs
locs = (np.asarray(_locs) - self.offset) / 10. ** self.orderOfMagnitude
loc_range = np.ptp(locs)
# Curvilinear coordinates can yield two identical points.
if loc_range == 0:
loc_range = np.max(np.abs(locs))
# Both points might be zero.
if loc_range == 0:
loc_range = 1
if len(self.locs) < 2:
# We needed the end points only for the loc_range calculation.
locs = locs[:-2]
loc_range_oom = int(math.floor(math.log10(loc_range)))
# first estimate:
sigfigs = max(0, bits - loc_range_oom)
# refined estimate:
thresh = 10 ** (-bits) * 10 ** (loc_range_oom)
while sigfigs >= 0:
if np.abs(locs - np.round(locs, decimals=sigfigs)).max() < thresh:
sigfigs -= 1
else:
break
sigfigs = bits
self.format = '%1.' + str(sigfigs) + 'f'
if self._usetex or self._useMathText:
self.format = r'$\mathdefault{%s}$' % self.format
def plot_tag(data, plot_f, run_names, tag_name, lg_tags, ylim=None, color0=0,
ncolor=None, lg_replace=[], no_title=False, points=None, xlim=None, vlines=None, orders=None, acc_bits=None, markeroff=True):
xlabel = {}
ylabel = {'Tacc': 'Training Accuracy (%)', 'Terror': 'Training Error (%)',
'train/accuracy': 'Training Accuracy (%)',
'Vacc': 'Test Accuracy (%)', 'Verror': 'Test Error (%)',
'valid/accuracy': 'Test Accuracy (%)',
'loss': 'Loss',
'epoch': 'Epoch',
'Tloss': 'Loss', 'Vloss': 'Loss', 'lr': 'Learning rate',
'grad_bias': 'Gradient Diff norm',
'est_var': 'Average Variance',
'est_snr': 'Mean SNR',
'nb_error': 'NB Error',
'est_nvar': 'Mean Normalized Variance'}
titles = {'Tacc': 'Training Accuracy', 'Terror': 'Training Error',
'train/accuracy': 'Training Accuracy',
'Vacc': 'Test Accuracy', 'Verror': 'Test Error',
'loss': 'Loss',
'epoch': 'Epoch',
'Tloss': 'Loss on full training set', 'lr': 'Learning rate',
'Vloss': 'Loss on validation set',
'grad_bias': 'Optimization Step Bias',
'nb_error': 'Norm-based Variance Error',
'est_var': 'Optimization Step Variance',
'est_snr': 'Optimization Step SNR',
'est_nvar': 'Optimization Step Normalized Variance (w/o lr)',
}
yscale_log = ['Tloss', 'est_var'] # , 'est_var'
yscale_log_offset= ['Vloss'] # , 'est_var'
yscale_scalar= ['Vloss'] # , 'est_var'
yscale_base = []
# yscale_sci = ['est_bias', 'est_var']
plot_fs = {'Tacc': plot_f, 'Vacc': plot_f,
'Terror': plot_f, 'Verror': plot_f,
'Tloss': plot_f, 'Vloss': plot_f,
}
for k in list(ylabel.keys()):
if k not in xlabel:
xlabel[k] = 'Training Iteration'
if k not in plot_fs:
plot_fs[k] = plot_f
if k not in plot_fs:
plot_fs[k] = plt.plot
if not isinstance(data, list):
data = [data]
run_names = [run_names]
# color = ['blue', 'orangered', 'darkred', 'darkkhaki', 'darkblue', 'grey']
color = [[0.00784314, 0.24313725, 1.],
[1., 0.48627451, 0.],
[0.10196078, 0.78823529, 0.21960784],
[0.90980392, 0., 0.04313725],
[0.54509804, 0.16862745, 0.88627451]]
color = color[:ncolor]
#style = ['-', '--', ':', '-.']
style = ['-']
color = [[0.00784314, 0.24313725, 1.],
[1., 0.48627451, 0.],
[0.10196078, 0.78823529, 0.21960784],
[0.90980392, 0., 0.04313725],
[0.54509804, 0.16862745, 0.88627451]]
#style = ['-', '--', ':', '-.']
styles = ['-']
# markers =
colors = color
# styles = ['-', '--', ':', '-.']
markers = ['o', 'X', 'p', '*', 'd', 'v']
plt.rcParams.update({'font.size': 16})
plt.grid(linewidth=1)
legends = []
# extract run index
indexes = [int(run_names[i].split('/')[-1].split('_')[1])
for i in range(len(run_names))]
s_indexes = | np.argsort(indexes) | numpy.argsort |
from os import system
import numpy as np
import scipy.optimize as op
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection, Line3DCollection
from anytree import AnyNode, RenderTree
####################################################################
def concatenateVectors(X,Y):
return np.concatenate((X,Y),axis=1)
####################################################################
def getPlot():
return plt
####################################################################
def clearScreen():
system('cls')
return
####################################################################
def loadData(fileName):
data= np.loadtxt(fileName, delimiter=',',unpack=True,dtype=float)
data=data.T
if (len(data.shape)==1):
data.shape=(data.shape[0],1)
return data
####################################################################
def accurracy(Xy,NewXy):
Xy=np.sort(Xy,axis=0)
NewXy=np.sort(NewXy,axis=0)
Y1=Xy[:,-1]
Y2=NewXy[:,-1]
m=np.mean(np.where(Y1==Y2,1,0))
return m*100
####################################################################
def SplitTree(X, y,Level=1,Node=AnyNode(id="root",vPredictedClass=-1),ThresholdCount=1):
ri,ci=GetBestSplit(X,y,ThresholdCount)
if( ri!=-1 and ci!=-1):
SplitFeature=ci
SplitValue=X[ri,ci]
#PlotTreeSplit(X,SplitFeature,SplitValue,Level) #Plot While Training
X0=X[np.where(X[:,SplitFeature]<=SplitValue)]
Y0=y[np.where(X[:,SplitFeature]<=SplitValue)]
X1=X[np.where(X[:,SplitFeature]>SplitValue)]
Y1=y[np.where(X[:,SplitFeature]>SplitValue)]
s0 = AnyNode(id="Level_"+str(Level)+"_Left("+"X"+str(SplitFeature)+"<"+str(round(SplitValue,1))+")", parent=Node,vLevel=Level,vSplitFeature=SplitFeature,vOp="<",vSplitValue=SplitValue,vSplitSign=-1,vPredictedClass=-1)
s1 = AnyNode(id="Level_"+str(Level)+"_Right("+"X"+str(SplitFeature)+">"+str(round(SplitValue,1))+")", parent=Node,vLevel=Level,vSplitFeature=SplitFeature,vOp=">",vSplitValue=SplitValue,vSplitSign=1,vPredictedClass=-1)
s0=SplitTree(X0,Y0,Level+1,s0,ThresholdCount=ThresholdCount)
s1=SplitTree(X1,Y1,Level+1,s1,ThresholdCount=ThresholdCount)
else:
PredictedClass=0
PredictedClassLen=0
for i in range(int(y.max()+1)):
if (len(y[np.where(y==i)])>PredictedClassLen):
PredictedClass=i
PredictedClassLen=len(y[np.where(y==i)])
Node.vPredictedClass=PredictedClass
return Node
####################################################################
def PredictTree(X,y,Node):
if(len(Node.children)!=0):
SplitFeature=Node.children[0].vSplitFeature
SplitValue=Node.children[0].vSplitValue
X0=X[np.where(X[:,SplitFeature]<=SplitValue)]
Y0=y[np.where(X[:,SplitFeature]<=SplitValue)]
X1=X[np.where(X[:,SplitFeature]>SplitValue)]
Y1=y[np.where(X[:,SplitFeature]>SplitValue)]
newX1,newY1=PredictTree(X0,Y0,Node.children[0])
newX2,newY2=PredictTree(X1,Y1,Node.children[1])
newX= np.concatenate((newX1,newX2),axis=0)
newY=np.concatenate((newY1,newY2),axis=0)
else:
newX=X
for i in range(len(y)):
y[i]=Node.vPredictedClass
newY=y
return newX,newY
####################################################################
def PruneTree(X,y,Node,ThresholdCount):
if(len(Node.children)!=0):
SplitFeature=Node.children[0].vSplitFeature
SplitValue=Node.children[0].vSplitValue
X0=X[np.where(X[:,SplitFeature]<=SplitValue)]
Y0=y[np.where(X[:,SplitFeature]<=SplitValue)]
X1=X[np.where(X[:,SplitFeature]>SplitValue)]
Y1=y[np.where(X[:,SplitFeature]>SplitValue)]
if (X0.shape[0]<ThresholdCount or X1.shape[0]<ThresholdCount):
Node.children=[]
PredictedClass=0
PredictedClassLen=0
for i in range(int(y.max()+1)):
if (len(y[np.where(y==i)])>PredictedClassLen):
PredictedClass=i
PredictedClassLen=len(y[np.where(y==i)])
Node.vPredictedClass=PredictedClass
else:
PruneTree(X0,Y0,Node.children[0],ThresholdCount)
PruneTree(X1,Y1,Node.children[1],ThresholdCount)
return Node
####################################################################
def GetBestSplit(X,y,ThresholdCount):
ri=0
ci=0
for i in range(int(y.max()+1)):
if(len(y[np.where(y==i)])==len(y)):
ri=-1
ci=-1
if(X.shape[0]<=ThresholdCount):
ri=-1
ci=-1
if(ri!=-1 and ci!=-1):
G=np.zeros((X.shape))
for ri in range(G.shape[0]):
for ci in range(G.shape[1]):
G[ri,ci]=GetGiniScore(X,y,ri,ci)
ri=np.unravel_index(np.argmax(G, axis=None), G.shape)[0]
ci=np.unravel_index(np.argmax(G, axis=None), G.shape)[1]
return ri,ci
####################################################################
def GetGiniScore(X,y,ri,ci):
G0=0
G1=0
Y0=y[np.where(X[:,ci]<=X[ri,ci])]
Y1=y[np.where(X[:,ci]>X[ri,ci])]
if (len(Y0)!=0):
for i in range(int(y.max()+1)):
P=len(Y0[np.where(Y0==i)])/len(Y0)
G0=G0+P*P
if (len(Y1)!=0):
for i in range(int(y.max()+1)):
P=len(Y1[np.where(Y1==i)])/len(Y1)
G1=G1+P*P
G_Score=(len(Y0)/len(y)) * G0 + (len(Y1)/len(y)) * G1
return G_Score
####################################################################
def PlotTreeSplit(ax,X,SplitFeature,SplitValue,Level):
x_min, x_max = X[:, 0].min() , X[:, 0].max()
y_min, y_max = X[:, 1].min() , X[:, 1].max()
z_min, z_max = X[:, 2].min() , X[:, 2].max()
u = np.linspace(x_min, x_max, 2)
v = np.linspace(y_min, y_max, 2)
w = np.linspace(z_min, z_max, 2)
if (SplitFeature==0):
u = np.zeros(( len(v), len(w) ))
V,W=np.meshgrid(v,w)
for i in range(len(v)):
for j in range(len(w)):
u[i,j] =SplitValue
U = np.transpose(u)
if (SplitFeature==1):
v = np.zeros(( len(u), len(w) ))
U,W=np.meshgrid(u,w)
for i in range(len(u)):
for j in range(len(w)):
v[i,j] =SplitValue
V = np.transpose(v)
if (SplitFeature==2):
w = np.zeros(( len(u), len(v) ))
U,V=np.meshgrid(u,v)
for i in range(len(u)):
for j in range(len(v)):
w[i,j] =SplitValue
W = np.transpose(w)
ax.plot_surface(U,V,W,alpha=0.6,zorder=5)
ax.text(U[0][0], V[0][0], W[0][0], Level, color='red')
return
####################################################################
def PlotTree(ax,X,y,Node):
if(Node.id=="root"):
ax.scatter(X[np.where(y==0),0],X[np.where(y==0),1],X[ | np.where(y==0) | numpy.where |
import logging
import numpy
from bmipy import Bmi
from grpc_status import rpc_status
from google.protobuf import any_pb2
from google.rpc import code_pb2, status_pb2, error_details_pb2
import traceback
from grpc4bmi.reserve import reserve_values, reserve_grid_shape, reserve_grid_nodes, reserve_grid_padding, \
reserve_values_at_indices
from . import bmi_pb2, bmi_pb2_grpc
log = logging.getLogger(__name__)
class BmiServer(bmi_pb2_grpc.BmiServiceServicer):
"""
BMI Server class, wrapping an existing python implementation and exposing it via GRPC across the memory space (to
listening client processes). The class takes a package, module and class name and instantiates the BMI
implementation by assuming a default constructor with no arguments.
Args:
model: Bmi model object which must be wrapped by grpc
debug: If true then returns stacktrace in an error response.
The stacktrace is returned in the trailing metadata as a DebugInfo (https://github.com/googleapis/googleapis/blob/07244bb797ddd6e0c1c15b02b4467a9a5729299f/google/rpc/error_details.proto#L46-L52) message.
"""
def __init__(self, model, debug=False):
# type: (BmiServer, Bmi, bool) -> None
super(bmi_pb2_grpc.BmiServiceServicer, self).__init__()
self.bmi_model_ = model
self.debug = debug
def exception_handler(self, exc, context):
log.exception(exc)
detail = any_pb2.Any()
if self.debug:
detail.Pack(
error_details_pb2.DebugInfo(
stack_entries=traceback.format_stack(),
detail=repr(exc)
)
)
status = status_pb2.Status(
code=code_pb2.INTERNAL,
message=str(exc),
details=[detail]
)
context.abort_with_status(rpc_status.to_status(status))
def initialize(self, request, context):
ifile = str(request.config_file)
if not ifile:
ifile = None
try:
self.bmi_model_.initialize(ifile)
return bmi_pb2.Empty()
except Exception as e:
self.exception_handler(e, context)
def update(self, request, context):
try:
self.bmi_model_.update()
return bmi_pb2.Empty()
except Exception as e:
self.exception_handler(e, context)
def updateUntil(self, request, context):
try:
self.bmi_model_.update_until(request.time)
return bmi_pb2.Empty()
except Exception as e:
self.exception_handler(e, context)
def finalize(self, request, context):
try:
self.bmi_model_.finalize()
return bmi_pb2.Empty()
except Exception as e:
self.exception_handler(e, context)
def getComponentName(self, request, context):
try:
return bmi_pb2.GetComponentNameResponse(name=self.bmi_model_.get_component_name())
except Exception as e:
self.exception_handler(e, context)
def getInputItemCount(self, request, context):
try:
return bmi_pb2.GetCountResponse(count=self.bmi_model_.get_input_item_count())
except Exception as e:
self.exception_handler(e, context)
def getOutputItemCount(self, request, context):
try:
return bmi_pb2.GetCountResponse(count=self.bmi_model_.get_output_item_count())
except Exception as e:
self.exception_handler(e, context)
def getInputVarNames(self, request, context):
try:
return bmi_pb2.GetVarNamesResponse(names=self.bmi_model_.get_input_var_names())
except Exception as e:
self.exception_handler(e, context)
def getOutputVarNames(self, request, context):
try:
return bmi_pb2.GetVarNamesResponse(names=self.bmi_model_.get_output_var_names())
except Exception as e:
self.exception_handler(e, context)
def getTimeUnits(self, request, context):
try:
return bmi_pb2.GetTimeUnitsResponse(units=self.bmi_model_.get_time_units())
except Exception as e:
self.exception_handler(e, context)
def getTimeStep(self, request, context):
try:
return bmi_pb2.GetTimeStepResponse(interval=self.bmi_model_.get_time_step())
except Exception as e:
self.exception_handler(e, context)
def getCurrentTime(self, request, context):
try:
return bmi_pb2.GetTimeResponse(time=self.bmi_model_.get_current_time())
except Exception as e:
self.exception_handler(e, context)
def getStartTime(self, request, context):
try:
return bmi_pb2.GetTimeResponse(time=self.bmi_model_.get_start_time())
except Exception as e:
self.exception_handler(e, context)
def getEndTime(self, request, context):
try:
return bmi_pb2.GetTimeResponse(time=self.bmi_model_.get_end_time())
except Exception as e:
self.exception_handler(e, context)
def getVarGrid(self, request, context):
try:
return bmi_pb2.GetVarGridResponse(grid_id=self.bmi_model_.get_var_grid(request.name))
except Exception as e:
self.exception_handler(e, context)
def getVarType(self, request, context):
try:
return bmi_pb2.GetVarTypeResponse(type=self.bmi_model_.get_var_type(request.name))
except Exception as e:
self.exception_handler(e, context)
def getVarItemSize(self, request, context):
try:
return bmi_pb2.GetVarItemSizeResponse(size=self.bmi_model_.get_var_itemsize(request.name))
except Exception as e:
self.exception_handler(e, context)
def getVarUnits(self, request, context):
try:
return bmi_pb2.GetVarUnitsResponse(units=self.bmi_model_.get_var_units(request.name))
except Exception as e:
self.exception_handler(e, context)
def getVarNBytes(self, request, context):
try:
return bmi_pb2.GetVarNBytesResponse(nbytes=self.bmi_model_.get_var_nbytes(request.name))
except Exception as e:
self.exception_handler(e, context)
def getVarLocation(self, request, context):
location_name = self.bmi_model_.get_var_location(request.name)
location = bmi_pb2.GetVarLocationResponse.Location.Value(location_name.upper())
return bmi_pb2.GetVarLocationResponse(location=location)
def getValue(self, request, context):
try:
values = reserve_values(self.bmi_model_, request.name)
values = self.bmi_model_.get_value(request.name, values)
if values.dtype in (numpy.int64, numpy.int32, numpy.int16):
return bmi_pb2.GetValueResponse(values_int=bmi_pb2.IntArrayMessage(values=values.flatten()))
if values.dtype in (numpy.float32, numpy.float16):
return bmi_pb2.GetValueResponse(values_float=bmi_pb2.FloatArrayMessage(values=values.flatten()))
if values.dtype == numpy.float64:
return bmi_pb2.GetValueResponse(values_double=bmi_pb2.DoubleArrayMessage(values=values.flatten()))
raise NotImplementedError("Arrays with type %s cannot be transmitted through this GRPC channel" % values.dtype)
except Exception as e:
self.exception_handler(e, context)
def getValuePtr(self, request, context):
raise NotImplementedError("Array references cannot be transmitted through this GRPC channel")
def getValueAtIndices(self, request, context):
try:
indices = numpy.array(request.indices)
values = reserve_values_at_indices(self.bmi_model_, request.name, indices)
values = self.bmi_model_.get_value_at_indices(request.name, values, indices)
if values.dtype in (numpy.int64, numpy.int32, numpy.int16):
return bmi_pb2.GetValueAtIndicesResponse(values_int=bmi_pb2.IntArrayMessage(values=values.flatten()))
if values.dtype in (numpy.float32, numpy.float16):
return bmi_pb2.GetValueAtIndicesResponse(values_float=bmi_pb2.FloatArrayMessage(values=values.flatten()))
if values.dtype == numpy.float64:
return bmi_pb2.GetValueAtIndicesResponse(values_double=bmi_pb2.DoubleArrayMessage(values=values.flatten()))
raise NotImplementedError("Arrays with type %s cannot be transmitted through this GRPC channel" % values.dtype)
except Exception as e:
self.exception_handler(e, context)
def setValue(self, request, context):
try:
if request.HasField("values_int"):
array = numpy.array(request.values_int.values, dtype=numpy.int64)
self.bmi_model_.set_value(request.name, array)
if request.HasField("values_float"):
array = numpy.array(request.values_float.values, dtype=numpy.float32)
self.bmi_model_.set_value(request.name, array)
if request.HasField("values_double"):
array = numpy.array(request.values_double.values, dtype=numpy.float64)
self.bmi_model_.set_value(request.name, array)
return bmi_pb2.Empty()
except Exception as e:
self.exception_handler(e, context)
def setValueAtIndices(self, request, context):
try:
index_array = numpy.array(request.indices)
if request.HasField("values_int"):
array = numpy.array(request.values_int.values, dtype=numpy.int64)
self.bmi_model_.set_value_at_indices(request.name, index_array, array)
if request.HasField("values_float"):
array = numpy.array(request.values_float.values, dtype=numpy.float32)
self.bmi_model_.set_value_at_indices(request.name, index_array, array)
if request.HasField("values_double"):
array = numpy.array(request.values_double.values, dtype=numpy.float64)
self.bmi_model_.set_value_at_indices(request.name, index_array, array)
return bmi_pb2.Empty()
except Exception as e:
self.exception_handler(e, context)
def getGridSize(self, request, context):
try:
return bmi_pb2.GetGridSizeResponse(size=self.bmi_model_.get_grid_size(request.grid_id))
except Exception as e:
self.exception_handler(e, context)
def getGridRank(self, request, context):
try:
return bmi_pb2.GetGridRankResponse(rank=self.bmi_model_.get_grid_rank(request.grid_id))
except Exception as e:
self.exception_handler(e, context)
def getGridType(self, request, context):
try:
return bmi_pb2.GetGridTypeResponse(type=self.bmi_model_.get_grid_type(request.grid_id))
except Exception as e:
self.exception_handler(e, context)
def getGridShape(self, request, context):
try:
values = reserve_grid_shape(self.bmi_model_, request.grid_id)
return bmi_pb2.GetGridShapeResponse(shape=self.bmi_model_.get_grid_shape(request.grid_id, values))
except Exception as e:
self.exception_handler(e, context)
def getGridSpacing(self, request, context):
try:
values = reserve_grid_padding(self.bmi_model_, request.grid_id)
return bmi_pb2.GetGridSpacingResponse(spacing=self.bmi_model_.get_grid_spacing(request.grid_id, values))
except Exception as e:
self.exception_handler(e, context)
def getGridOrigin(self, request, context):
try:
values = reserve_grid_padding(self.bmi_model_, request.grid_id)
return bmi_pb2.GetGridOriginResponse(origin=self.bmi_model_.get_grid_origin(request.grid_id, values))
except Exception as e:
self.exception_handler(e, context)
def getGridX(self, request, context):
try:
values = reserve_grid_nodes(self.bmi_model_, request.grid_id, 0)
return bmi_pb2.GetGridPointsResponse(coordinates=self.bmi_model_.get_grid_x(request.grid_id, values))
except Exception as e:
self.exception_handler(e, context)
def getGridY(self, request, context):
try:
values = reserve_grid_nodes(self.bmi_model_, request.grid_id, 1)
return bmi_pb2.GetGridPointsResponse(coordinates=self.bmi_model_.get_grid_y(request.grid_id, values))
except Exception as e:
self.exception_handler(e, context)
def getGridZ(self, request, context):
try:
values = reserve_grid_nodes(self.bmi_model_, request.grid_id, 2)
return bmi_pb2.GetGridPointsResponse(coordinates=self.bmi_model_.get_grid_z(request.grid_id, values))
except Exception as e:
self.exception_handler(e, context)
def getGridNodeCount(self, request, context):
try:
return bmi_pb2.GetCountResponse(count=self.bmi_model_.get_grid_node_count(request.grid_id))
except Exception as e:
self.exception_handler(e, context)
def getGridEdgeCount(self, request, context):
try:
return bmi_pb2.GetCountResponse(count=self.bmi_model_.get_grid_edge_count(request.grid_id))
except Exception as e:
self.exception_handler(e, context)
def getGridFaceCount(self, request, context):
try:
return bmi_pb2.GetCountResponse(count=self.bmi_model_.get_grid_face_count(request.grid_id))
except Exception as e:
self.exception_handler(e, context)
def getGridEdgeNodes(self, request, context):
try:
size = 2 * self.bmi_model_.get_grid_edge_count(request.grid_id)
links = | numpy.empty(size, dtype=numpy.int64) | numpy.empty |
import sys
import numpy as np
import os
import time
from CNA import Utilities
sys.path.append("../../")
class patterns():
def __init__(self, frame = 0, System = None, Pattern_Input = None, MasterKey = None):
tick = time.time()
self.frame = frame
self.System = System
self.Pattern_Input = Pattern_Input
if self. System is not None:
self.filename = System['base_dir']+System['movie_file_name']
self.npz_dir = System['base_dir'] + 'CNA_npz'
with open(self.System['base_dir'] + 'CNA_Pattern_Info.txt', 'a') as f:
f.write('\n')
f.write(' # '*50)
f.write('\nComputing CNA patterns for frame %s.\n'%frame)
f.close()
else:
try:
self.filename = 'movie.xyz'
except FileNotFoundError:
with open(self.System['base_dir'] + 'CNA_Pattern_Info.txt', 'a') as f:
f.write('\nCould not find a suitable file to examine.\n')
f.close()
self.script_path = os.path.dirname(os.path.realpath(__file__))+'/'
self.cwd = os.getcwd()
if MasterKey is None:
self.MasterKey = Utilities.CNA_Masterkey().Key()
else:
self.MasterKey = MasterKey
if self.Pattern_Input['APPEND_DICTIONARY'] is True:
os.chdir(self.script_path)
os.chdir('../')
self.Temp_Dict = np.load(
'CNA_npz/pattern_dictionary.npz',
allow_pickle=True)
self.Pattern_Dict = {}
for file in self.Temp_Dict.files:
self.Pattern_Dict[file] = self.Temp_Dict[file]
elif (self.Pattern_Input['NEW_DICTIONARY'] is True):
self.Pattern_Dict = {}
self.Pattern_Dict = self.pattern_dictionary_maker()
self.dictionary_saver()
with open(self.System['base_dir'] + 'CNA_Pattern_Info.txt', 'a') as f:
f.write("\nGenerating CNA Patterns took %.3f seconds.\n" %(time.time()-tick))
os.chdir(self.cwd)
def run(self):
Info = self.Pattern_Dict[self.System['movie_file_name'][:-4]+'-'+str(self.frame)]
Pats = np.zeros(len(Info))
for i, atom in enumerate(Info):
for j, val in enumerate(atom):
if val:
Pats[i] = j+1
return Pats
def pattern_CNA_Reader(self):
"""
Armand
Formatting from the npz files, gives the cna patterns found and prints them.
This isnt meant to be run normally, add it within the filename loop when you
want to inspect a FEW files. Doesn't use the masterkey, so prepare to have a
LOT of data printed at you at once.
"""
self.CNA_arrays=np.load(self.npz_dir+'/CNA_'+self.filename[:-4]+'-'+str(self.frame)+'.npz', allow_pickle=True)
with open(self.System['base_dir'] + 'CNA_Pattern_Info.txt', 'a') as f:
f.write('\nTypes of CNA bonds found with each atom:\n')
f.close()
for i in range(len(self.CNA_arrays['signature_cna_count'])):
with open(self.System['base_dir'] + 'CNA_Pattern_Info.txt', 'a') as f:
f.write('\n%d Atoms had CNA patterns (no: %d)\n'%(self.CNA_arrays['\nSignature_cna_count'][i], i))
f.close()
non_zero_values=np.nonzero(self.CNA_arrays['signature_cna'][i])
for j in range(len(non_zero_values[0])):
with open(self.System['base_dir'] + 'CNA_Pattern_Info.txt', 'a') as f:
f.write('\n%s on %s of its bonds.\n' %(self.MasterKey[non_zero_values[0][j]],
self.CNA_arrays['signature_cna'][i][non_zero_values[0][j]]))
f.close()
with open(self.System['base_dir'] + 'CNA_Pattern_Info.txt', 'a') as f:
f.write('\nCoordination number: %s\n'%np.sum(self.CNA_arrays['signature_cna'][i]))
f.close()
def cna_pattern_master_key_maker(self):
"""
Armand
This function creates a new cna pattern masterkey by running through ALL
files within xyz_dir. This is meant for studying all cna patterns with the
variable SAVING_XYZ == True, not for Support Vector Clustering.
"""
self.CNA_arrays=np.load(self.npz_dir+'/CNA_'+self.filename[:-4]+'-'+str(self.frame)+'.npz', allow_pickle=True)
self.cna_patterns=[]
with open(self.System['base_dir'] + 'CNA_Pattern_Info.txt', 'a') as f:
f.write("\nCreating the CNA pattern master key...\n")
f.close()
#Looping over the atoms
for i in range(len(self.CNA_arrays['signature_cna_count'])):
#Creating the atomistic pattern list
self.atom_pattern=[]
#Finding the non zero CNA signatures, and looping over them
non_zero_values = np.nonzero(self.CNA_arrays['signature_cna'][i])
for j in range(len(non_zero_values[0])):
#Retrieving the CNA signature from the Master Key
cna_sign = self.MasterKey[non_zero_values[0][j]]
#Counting them
count = self.CNA_arrays['signature_cna'][i][non_zero_values[0][j]]
#Appending the tuples found within the list
self.atom_pattern.append((cna_sign,count))
#Checking if the atomic pattern is in the cna_pattern_masterkey
if self.atom_pattern not in self.cna_patterns:
self.cna_patterns.append(self.atom_pattern)
#Ordering the pattern masterkey by the Coordination Number
self.cna_pattern_array = np.asarray(self.cna_patterns)
self.cna_pattern_master_key=np.copy(self.cna_pattern_array)
a=[]
for i in range(len(self.cna_pattern_array)):
a.append(np.sum(self.cna_pattern_array[i],axis=0)[1])
l=np.asarray(a).argsort()[::-1]
for i in range(len(l)):
self.cna_pattern_master_key[i]=self.cna_pattern_array[l[i]]
#returning the pattern masterkey
return self.cna_pattern_master_key
def pattern_dictionary_maker(self):
"""
Armand
This is where the magic happens. The function first asks for a new MasterKey
or receives one from memory. The function goes over all files within xyz_dir,
and uses the npz files in npz_dir to find all of the atoms whose patterns
are in MasterKey.
"""
#READING THE MASTERKEY FROM PATTERN DICTIONARY NPZ
if (self.Pattern_Input['FROM_MEMORY'] is True):
self.Pattern_Dict['masterkey'] = np.load(
self.System['base_dir']
+ self.System['npz_dir']
+ 'pattern_dictionary.npz',
allow_pickle = True)['masterkey']
with open(self.System['base_dir'] + 'CNA_Pattern_Info.txt', 'a') as f:
f.write()('\nKey CNA Patterns found in memory:\n')
f.close()
#CREATING A NEW MASTERKEY
elif (self.Pattern_Input['FROM_MEMORY'] is False):
#USING THE MASTERKEY FOR SUPPORT VECTOR CLUSTERING
if (self.Pattern_Input['BULK_MASTERKEY'] is True):
self.Pattern_Dict = Utilities.Bulk_Masterkey(self.Pattern_Dict).Key()
with open(self.System['base_dir'] + 'CNA_Pattern_Info.txt', 'a') as f:
f.write('\nUsing bulk pattern dictionary from Utilities.\n')
#CREATING A NEW MASTERKEY FROM ALL PATTERNS FOUND WITHIN THE THE XYZ_DIR
if(self.Pattern_Input['BULK_MASTERKEY'] is False):
self.Pattern_Dict['masterkey'] = self.cna_pattern_master_key_maker(self.System, self.MasterKey)
with open(self.System['base_dir'] + 'CNA_Pattern_Info.txt', 'a') as f:
f.write('\nFound key CNA Patterns:\n')
#printing it
for key in self.Pattern_Dict['masterkey']:
with open(self.System['base_dir'] + 'CNA_Pattern_Info.txt', 'a+') as f:
f.write('\n')
f.write('\t'.join(str(item) for item in key))
f.write('\n')
f.close()
#Looping over all files again
with open(self.System['base_dir'] + 'CNA_Pattern_Info.txt', 'a') as f:
f.write('\nCalculating CNA Patterns of: '+self.System['movie_file_name']+'\n')
f.write('\n Reading CNA arrays from:\n' + self.npz_dir)
f.close()
self.CNA_arrays=np.load(self.npz_dir+'/CNA_'+self.System['movie_file_name'][:-4]+'-'+str(self.frame)+'.npz', allow_pickle=True)
#pattern_CNA_Reader(arrays_filename, MasterKey)
#Loading the CNA arrays
self.Pattern_Dict[self.System['movie_file_name'][:-4]+'-'+str(self.frame)] = np.zeros(
(len(self.CNA_arrays['particle_cnas']),
len(self.Pattern_Dict['masterkey'])),dtype=bool)
#Looping over the atoms
for i in range(len(self.CNA_arrays['particle_cnas'])):
#Creating the atomistic pattern list
self.atom_pattern=[]
#Finding the non zero CNA signatures, and looping over them
self.non_zero_values = | np.nonzero(self.CNA_arrays['particle_cnas'][i]) | numpy.nonzero |
import cv2
import sys
import os
import time
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from time import sleep
from keras.models import load_model
from scipy import stats
from collections import Counter
class EmotionFacePredictor():
'''
Class for handling model building and new data classification
'''
def __init__(self, home, cv2_path, model_path):
self.home = home # where script lives
self.cv2_path = cv2_path # where face processing files can be found (from cv2)
self.cascade_file = self.cv2_path+'haarcascade_frontalface_alt.xml'
self.model_path = model_path
self.emo_dict = {0:'Angry', 1: 'Fear', 2:'Happy', 3: 'Sad', 4:'Surprise', 5: 'Neutral'} # new dict of output labels
self.x_range = list(range(6))
self.emo_list = list(self.emo_dict.values()) # labels
def run_setup(self):
self.load_model()
self.load_face_cascade()
# plt.ion()
self.best_model._make_predict_function()
def load_model(self):
if os.path.exists(self.model_path):
self.best_model = load_model(self.model_path)
else:
print(f'Model not found check path:\n{self.model_path}')
def load_face_cascade(self):
if os.path.exists(self.cascade_file):
self.faceCascade = cv2.CascadeClassifier(self.cascade_file)
else:
print(f'Model not found check path:\n{self.cascade_file}')
def classify_faces_image(self, img):
self.img = cv2.imread(img)
self.gray = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY) # convert img to grayscale
faces = self.faceCascade.detectMultiScale(
self.gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30),
flags=cv2.CASCADE_SCALE_IMAGE
)
print(f'Found {len(faces)} faces')
if len(faces)>0:
# Create array to average responses
face_paths = []
df_probas = []
df_predict = []
cnt = 1
# Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(self.gray, (x, y), (x+w, y+h), (0, 255, 0), 2)
self.sub_face = self.gray[y:y+h, x:x+w]
sb2 = cv2.resize(self.sub_face, (48, 48))
sb3 = | np.expand_dims(sb2, axis=3) | numpy.expand_dims |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Utility functions models code
"""
import numpy as np
import numpy.lib.recfunctions as nprf
from six import integer_types
from six.moves import range
from sm2.compat.python import asstr2
from sm2.tools.linalg import pinv_extended, nan_dot, chain_dot # noqa:F841
from sm2.tools.data import _is_using_pandas, _is_recarray
from sm2.base.naming import make_dictnames as _make_dictnames
def not_ported(name, used=False, tested=False, msg=None, sandbox=False):
if msg is None:
msg = "{name} not ported from upstream".format(name=name)
if sandbox:
msg += ", as it is used only in neglected sandbox/example files."
elif not used and not tested:
msg += ", as it is neither used nor tested there."
elif not used:
msg += ", as it is not used there."
def func(*args, **kwargs): # pragma: no cover
# TODO: Maybe make a NotPortedError?
raise NotImplementedError(msg)
func.__name__ = name
return func
drop_missing = not_ported("drop_missing")
recipr0 = not_ported("drop_missing")
unsqueeze = not_ported("unsqueeze", used=1, tested=False)
_ensure_2d = not_ported("_ensure_2d", tested=False, sandbox=1)
# TODO: needs to better preserve dtype and be more flexible
# ie., if you still have a string variable in your array you don't
# want to cast it to float
# TODO: add name validator (ie., bad names for datasets.grunfeld)
def categorical(data, col=None, dictnames=False, drop=False, ):
"""
Returns a dummy matrix given an array of categorical variables.
Parameters
----------
data : array
A structured array, recarray, or array. This can be either
a 1d vector of the categorical variable or a 2d array with
the column specifying the categorical variable specified by the col
argument.
col : 'string', int, or None
If data is a structured array or a recarray, `col` can be a string
that is the name of the column that contains the variable. For all
arrays `col` can be an int that is the (zero-based) column index
number. `col` can only be None for a 1d array. The default is None.
dictnames : bool, optional
If True, a dictionary mapping the column number to the categorical
name is returned. Used to have information about plain arrays.
drop : bool
Whether or not keep the categorical variable in the returned matrix.
Returns
--------
dummy_matrix, [dictnames, optional]
A matrix of dummy (indicator/binary) float variables for the
categorical data. If dictnames is True, then the dictionary
is returned as well.
Notes
-----
This returns a dummy variable for EVERY distinct variable. If a
a structured or recarray is provided, the names for the new variable is the
old variable name - underscore - category name. So if the a variable
'vote' had answers as 'yes' or 'no' then the returned array would have to
new variables-- 'vote_yes' and 'vote_no'. There is currently
no name checking.
Examples
--------
>>> import numpy as np
>>> import sm2.api as sm
Univariate examples
>>> import string
>>> string_var = [string.ascii_lowercase[0:5], \
string.ascii_lowercase[5:10], \
string.ascii_lowercase[10:15], \
string.ascii_lowercase[15:20], \
string.ascii_lowercase[20:25]]
>>> string_var *= 5
>>> string_var = np.asarray(sorted(string_var))
>>> design = sm.tools.categorical(string_var, drop=True)
Or for a numerical categorical variable
>>> instr = np.floor(np.arange(10,60, step=2)/10)
>>> design = sm.tools.categorical(instr, drop=True)
With a structured array
>>> num = np.random.randn(25,2)
>>> struct_ar = np.zeros((25,1), dtype=[('var1', 'f4'),('var2', 'f4'), \
('instrument','f4'),('str_instr','a5')])
>>> struct_ar['var1'] = num[:,0][:,None]
>>> struct_ar['var2'] = num[:,1][:,None]
>>> struct_ar['instrument'] = instr[:,None]
>>> struct_ar['str_instr'] = string_var[:,None]
>>> design = sm.tools.categorical(struct_ar, col='instrument', drop=True)
Or
>>> design2 = sm.tools.categorical(struct_ar, col='str_instr', drop=True)
"""
if isinstance(col, (list, tuple)):
if len(col) != 1: # pragma: no cover
raise ValueError("Can only convert one column at a time")
col = col[0]
# TODO: add a NameValidator function
# catch recarrays and structured arrays
if data.dtype.names or data.__class__ is np.recarray:
if not col and np.squeeze(data).ndim > 1: # pragma: no cover
raise IndexError("col is None and the input array is not 1d")
if isinstance(col, integer_types):
col = data.dtype.names[col]
if col is None and data.dtype.names and len(data.dtype.names) == 1:
col = data.dtype.names[0]
tmp_arr = np.unique(data[col])
# if the cols are shape (#,) vs (#,1) need to add an axis and flip
_swap = True
if data[col].ndim == 1:
tmp_arr = tmp_arr[:, None]
_swap = False
tmp_dummy = (tmp_arr == data[col]).astype(float)
if _swap:
tmp_dummy = np.squeeze(tmp_dummy).swapaxes(1, 0)
if not tmp_arr.dtype.names: # TODO: how do we get to this code path?
tmp_arr = [asstr2(item) for item in np.squeeze(tmp_arr)]
elif tmp_arr.dtype.names:
tmp_arr = [asstr2(item) for item in np.squeeze(tmp_arr.tolist())]
# prepend the varname and underscore, if col is numeric attribute
# lookup is lost for recarrays...
if col is None:
try:
col = data.dtype.names[0]
except (AttributeError, TypeError, IndexError):
col = 'var'
# TODO: the above needs to be made robust because there could be many
# var_yes, var_no varaibles for instance.
tmp_arr = [col + '_' + item for item in tmp_arr]
# TODO: test this for rec and structured arrays!!!
if drop is True:
if len(data.dtype) <= 1:
if tmp_dummy.shape[0] < tmp_dummy.shape[1]:
tmp_dummy = np.squeeze(tmp_dummy).swapaxes(1, 0)
dt = list(zip(tmp_arr, [tmp_dummy.dtype.str] * len(tmp_arr)))
# preserve array type
return np.array(list(map(tuple, tmp_dummy.tolist())),
dtype=dt).view(type(data))
data = nprf.drop_fields(data, col, usemask=False,
asrecarray=type(data) is np.recarray)
data = nprf.append_fields(data, tmp_arr, data=tmp_dummy,
usemask=False,
asrecarray=type(data) is np.recarray)
return data
# handle ndarrays and catch array-like for an error
elif data.__class__ is np.ndarray or not isinstance(data, np.ndarray):
# TODO: Do we not allow subclasses of ndarray? why not just isinstance?
if not isinstance(data, np.ndarray): # pragma: no cover
# TODO: WTF isnt the error message the exact opposite of correct?
raise NotImplementedError("Array-like objects are not supported")
if isinstance(col, integer_types):
offset = data.shape[1] # need error catching here?
tmp_arr = np.unique(data[:, col])
tmp_dummy = (tmp_arr[:, np.newaxis] == data[:, col]).astype(float)
tmp_dummy = tmp_dummy.swapaxes(1, 0)
if drop is True:
offset -= 1
data = np.delete(data, col, axis=1).astype(float)
data = np.column_stack((data, tmp_dummy))
if dictnames is True:
col_map = _make_dictnames(tmp_arr, offset)
return data, col_map
return data
elif col is None and np.squeeze(data).ndim == 1:
tmp_arr = np.unique(data)
tmp_dummy = (tmp_arr[:, None] == data).astype(float)
tmp_dummy = tmp_dummy.swapaxes(1, 0)
if drop is True:
if dictnames is True:
col_map = _make_dictnames(tmp_arr)
return tmp_dummy, col_map
return tmp_dummy
else:
data = | np.column_stack((data, tmp_dummy)) | numpy.column_stack |
import salome
import SMESH
from salome.geom import geomBuilder
from salome.smesh import smeshBuilder
import sys
import math
import numpy as np
from numpy.linalg import norm
from numpy.random import uniform
from pathlib import Path
from auxiliaryFunctions import clusteringAlgorithm
from auxiliaryFunctions import getTranslationalRiskAngleRefAxis
from itertools import product
import os
salome.salome_init()
geompy = geomBuilder.New()
smesh = smeshBuilder.New()
def smallestLineOnFace(face):
bndVertices_Slm = geompy.ExtractShapes(face, geompy.ShapeType["VERTEX"], True)
indexList = [(0,1), (0,2), (0,3)]
distances = [geompy.MinDistance(bndVertices_Slm[i], bndVertices_Slm[j]) for i,j in indexList]
index = distances.index(min(distances))
p1 = bndVertices_Slm[indexList[index][0]]
p2 = bndVertices_Slm[indexList[index][1]]
line = geompy.MakeLineTwoPnt(p1,p2)
return line
class Line:
def __init__(self, Point1, Point2):
self.origin = Point1
self.dest = Point2
v1 = geompy.MakeVertex(*list(Point1))
v2 = geompy.MakeVertex(*list(Point2))
self.geom = geompy.MakeLineTwoPnt(v1, v2)
def addToStudy(self, name = 'Line'):
geompy.addToStudy(self.geom, name)
def extendLine(self, multiplier):
self.geom = geompy.ExtendEdge(self.geom, 0, multiplier)
# Obtain the Salome vertexes: New Entity-Explode-SubShapes Selection
[v1, v2] = geompy.ExtractShapes(self.geom, geompy.ShapeType["VERTEX"], True)
v1coords = geompy.PointCoordinates(v1)
v2coords = geompy.PointCoordinates(v2)
self.dest = v2coords if | np.allclose(v1coords, self.origin) | numpy.allclose |
from __future__ import print_function
import itertools
import math
import os
import random
import shutil
import tempfile
import unittest
import uuid
import numpy as np
import pytest
import tensorflow as tf
import coremltools
import coremltools.models.datatypes as datatypes
from coremltools.models import _MLMODEL_FULL_PRECISION, _MLMODEL_HALF_PRECISION
from coremltools.models import neural_network as neural_network
from coremltools.models.neural_network import flexible_shape_utils
from coremltools.models.utils import macos_version, is_macos
np.random.seed(10)
MIN_MACOS_VERSION_REQUIRED = (10, 13)
LAYERS_10_15_MACOS_VERSION = (10, 15)
def _get_unary_model_spec(x, mode, alpha=1.0):
input_dim = x.shape
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', datatypes.Array(*input_dim))]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_unary(name='unary', input_name='data',
output_name='output', mode=mode, alpha=alpha)
return builder.spec
class CorrectnessTest(unittest.TestCase):
def runTest(self):
pass
def _compare_shapes(self, np_preds, coreml_preds):
return np.squeeze(np_preds).shape == np.squeeze(coreml_preds).shape
def _compare_nd_shapes(self, np_preds, coreml_preds, shape=()):
if shape:
return coreml_preds.shape == shape
else:
# check if shape has 0 valued dimension
if np.prod(np_preds.shape) == 0 and np.prod(coreml_preds.shape) == 0:
return True
return coreml_preds.shape == np_preds.shape
def _compare_predictions(self, np_preds, coreml_preds, delta=.01):
np_preds = np_preds.flatten()
coreml_preds = coreml_preds.flatten()
for i in range(len(np_preds)):
max_den = max(1.0, np_preds[i], coreml_preds[i])
if np.abs(
np_preds[i] / max_den - coreml_preds[i] / max_den) > delta:
return False
return True
@staticmethod
def _compare_moments(model, inputs, expected, use_cpu_only=True, num_moments=10):
"""
This utility function is used for validate random distributions layers.
It validates the first 10 moments of prediction and expected values.
"""
def get_moment(data, k):
return np.mean(np.power(data - np.mean(data), k))
if isinstance(model, str):
model = coremltools.models.MLModel(model)
model = coremltools.models.MLModel(model, useCPUOnly=use_cpu_only)
prediction = model.predict(inputs, useCPUOnly=use_cpu_only)
for output_name in expected:
np_preds = expected[output_name]
coreml_preds = prediction[output_name]
np_moments = [get_moment(np_preds.flatten(), k) for k in range(num_moments)]
coreml_moments = [get_moment(coreml_preds.flatten(), k) for k in range(num_moments)]
np.testing.assert_almost_equal(np_moments, coreml_moments, decimal=2)
# override expected values to allow element-wise compares
for output_name in expected:
expected[output_name] = prediction[output_name]
def _test_model(self,
model,
input,
expected,
model_precision=_MLMODEL_FULL_PRECISION,
useCPUOnly=False,
output_name_shape_dict={},
validate_shapes_only=False):
model_dir = None
# if we're given a path to a model
if isinstance(model, str):
model = coremltools.models.MLModel(model)
# If we're passed in a specification, save out the model
# and then load it back up
elif isinstance(model, coremltools.proto.Model_pb2.Model):
model_dir = tempfile.mkdtemp()
model_name = str(uuid.uuid4()) + '.mlmodel'
model_path = os.path.join(model_dir, model_name)
coremltools.utils.save_spec(model, model_path)
model = coremltools.models.MLModel(model, useCPUOnly=useCPUOnly)
# If we want to test the half precision case
if model_precision == _MLMODEL_HALF_PRECISION:
model = coremltools.utils.convert_neural_network_weights_to_fp16(
model)
try:
prediction = model.predict(input, useCPUOnly=useCPUOnly)
for output_name in expected:
if self.__class__.__name__ == "SimpleTest":
assert (self._compare_shapes(expected[output_name],
prediction[output_name]))
else:
if output_name in output_name_shape_dict:
output_shape = output_name_shape_dict[output_name]
else:
output_shape = []
if len(output_shape) == 0 and len(expected[output_name].shape) == 0:
output_shape = (1,)
assert (self._compare_nd_shapes(expected[output_name],
prediction[output_name],
output_shape))
if not validate_shapes_only:
assert (self._compare_predictions(expected[output_name],
prediction[output_name]))
finally:
# Remove the temporary directory if we created one
if model_dir and os.path.exists(model_dir):
shutil.rmtree(model_dir)
@unittest.skipIf(not is_macos() or macos_version() < MIN_MACOS_VERSION_REQUIRED,
'macOS 10.13+ is required. Skipping tests.')
class SimpleTest(CorrectnessTest):
def test_tiny_upsample_linear_mode(self):
input_dim = (1, 1, 3) # (C,H,W)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_upsample(name='upsample',
scaling_factor_h=2, scaling_factor_w=3,
input_name='data', output_name='output',
mode='BILINEAR')
input = {
'data': np.reshape(np.array([1.0, 2.0, 3.0]), (1, 1, 3))
}
expected = {
'output': np.array(
[[1, 1.333, 1.666, 2, 2.333, 2.666, 3, 3, 3],
[1, 1.333, 1.6666, 2, 2.33333, 2.6666, 3, 3, 3]
])
}
self._test_model(builder.spec, input, expected)
self.assertEquals(len(input_dim), builder._get_rank('output'))
def test_LRN(self):
input_dim = (1, 3, 3)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', datatypes.Array(*input_dim))]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_lrn(name='lrn', input_name='data', output_name='output',
alpha=2, beta=3, local_size=1, k=8)
input = {
'data': np.ones((1, 3, 3))
}
expected = {
'output': 1e-3 * np.ones((1, 3, 3))
}
self._test_model(builder.spec, input, expected)
self.assertEqual(len(input_dim), builder._get_rank('output'))
def test_MVN(self):
input_dim = (2, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', datatypes.Array(*input_dim))]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_mvn(name='mvn', input_name='data', output_name='output',
across_channels=False, normalize_variance=False)
input = {
'data': np.reshape(np.arange(8, dtype=np.float32), (2, 2, 2))
}
expected = {
'output': np.reshape(np.arange(8) - np.array(
[1.5, 1.5, 1.5, 1.5, 5.5, 5.5, 5.5, 5.5]), (2, 2, 2))
}
self._test_model(builder.spec, input, expected)
def test_L2_normalize(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', datatypes.Array(*input_dim))]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_l2_normalize(name='mvn', input_name='data',
output_name='output')
input = {
'data': np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
}
expected = {
'output': np.reshape(np.arange(4, dtype=np.float32),
(1, 2, 2)) / np.sqrt(14)
}
self._test_model(builder.spec, input, expected)
def test_unary_sqrt(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.sqrt(x)}
spec = _get_unary_model_spec(x, 'sqrt')
self._test_model(spec, input, expected)
def test_unary_rsqrt(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': 1 / np.sqrt(x)}
spec = _get_unary_model_spec(x, 'rsqrt')
self._test_model(spec, input, expected)
def test_unary_inverse(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': 1 / x}
spec = _get_unary_model_spec(x, 'inverse')
self._test_model(spec, input, expected)
def test_unary_power(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': x ** 3}
spec = _get_unary_model_spec(x, 'power', 3)
self._test_model(spec, input, expected)
def test_unary_exp(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.exp(x)}
spec = _get_unary_model_spec(x, 'exp')
self._test_model(spec, input, expected)
def test_unary_log(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.log(x)}
spec = _get_unary_model_spec(x, 'log')
self._test_model(spec, input, expected)
def test_unary_abs(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.abs(x)}
spec = _get_unary_model_spec(x, 'abs')
self._test_model(spec, input, expected)
def test_unary_threshold(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.maximum(x, 2)}
spec = _get_unary_model_spec(x, 'threshold', 2)
self._test_model(spec, input, expected)
def test_split(self):
input_dim = (9, 2, 2)
x = np.random.rand(*input_dim)
input_features = [('data', datatypes.Array(*input_dim))]
output_names = []
output_features = []
for i in range(3):
out = 'out_' + str(i)
output_names.append(out)
output_features.append((out, None))
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_split(name='split', input_name='data',
output_names=output_names)
input = {'data': x}
expected = {
'out_0': x[0: 3, :, :],
'out_1': x[3: 6, :, :],
'out_2': x[6: 9, :, :]
}
self._test_model(builder.spec, input, expected)
for output_ in output_names:
self.assertEqual(len(input_dim), builder._get_rank(output_))
def test_scale_constant(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_scale(name='scale', W=5, b=45, has_bias=True,
input_name='data', output_name='output')
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': 5 * x + 45}
self._test_model(builder.spec, input, expected)
def test_scale_matrix(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
W = np.reshape(np.arange(5, 9), (1, 2, 2))
builder.add_scale(name='scale', W=W, b=None, has_bias=False,
input_name='data', output_name='output',
shape_scale=[1, 2, 2])
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': W * x}
self._test_model(builder.spec, input, expected)
def test_bias_constant(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_bias(name='bias', b=45, input_name='data',
output_name='output')
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': x + 45}
self._test_model(builder.spec, input, expected)
def test_bias_matrix(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
b = np.reshape(np.arange(5, 9), (1, 2, 2))
builder.add_bias(name='bias', b=b, input_name='data',
output_name='output',
shape_bias=[1, 2, 2])
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': x + b}
self._test_model(builder.spec, input, expected)
def test_load_constant(self, model_precision=_MLMODEL_FULL_PRECISION):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
b = np.reshape(np.arange(5, 9), (1, 2, 2))
builder.add_load_constant(name='load_constant', output_name='bias',
constant_value=b, shape=[1, 2, 2])
builder.add_elementwise(name='add', input_names=['data', 'bias'],
output_name='output', mode='ADD')
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': x + b}
self._test_model(builder.spec, input, expected, model_precision)
self.assertEqual(len(input_dim), builder._get_rank('output'))
def test_load_constant_half_precision(self):
self.test_load_constant(model_precision=_MLMODEL_HALF_PRECISION)
def test_min(self):
input_dim = (1, 2, 2)
input_features = [('data_0', datatypes.Array(*input_dim)),
('data_1', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_elementwise(name='min', input_names=['data_0', 'data_1'],
output_name='output', mode='MIN')
x1 = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
x2 = np.reshape(np.arange(2, 6, dtype=np.float32), (1, 2, 2))
input = {'data_0': x1, 'data_1': x2}
expected = {'output': np.minimum(x1, x2)}
self._test_model(builder.spec, input, expected)
self.assertEqual(len(input_dim), builder._get_rank('output'))
def test_conv_same_padding(self):
input_dim = (10, 15, 15)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
W = np.random.rand(3, 3, 10, 20)
builder.add_convolution(name='conv', kernel_channels=10,
output_channels=20,
height=3, width=3, stride_height=2,
stride_width=2,
border_mode='same', groups=1,
W=W, b=None, has_bias=False,
input_name='data', output_name='output',
same_padding_asymmetry_mode='TOP_LEFT_HEAVY')
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': np.random.rand(20, 8, 8)}
self._test_model(
builder.spec, input, expected, validate_shapes_only=True)
self.assertEqual(len(input_dim), builder._get_rank('output'))
def test_deconv_valid_padding(self):
input_dim = (10, 15, 15)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
W = np.random.rand(3, 3, 10, 20)
builder.add_convolution(name='deconv', kernel_channels=10,
output_channels=20,
height=3, width=3, stride_height=2,
stride_width=2,
border_mode='valid', groups=1,
W=W, b=None, has_bias=False,
is_deconv=True,
input_name='data', output_name='output',
padding_top=2, padding_bottom=3,
padding_left=2, padding_right=3)
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': np.random.rand(20, 26, 26)}
self._test_model(
builder.spec, input, expected, validate_shapes_only=True)
def test_deconv_non_unit_groups(self):
input_dim = (16, 15, 15)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features)
W = np.random.rand(3, 3, 16, 5)
builder.add_convolution(name='deconv', kernel_channels=16,
output_channels=20,
height=3, width=3, stride_height=2,
stride_width=2,
border_mode='valid', groups=4,
W=W, b=None, has_bias=False,
is_deconv=True,
input_name='data', output_name='output',
padding_top=2, padding_bottom=3,
padding_left=2, padding_right=3)
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': np.random.rand(20, 26, 26)}
self._test_model(
builder.spec, input, expected, validate_shapes_only=True)
def test_linear_activation(self):
input_dim = (10, 15, 15)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_activation(name='activation',
non_linearity='LINEAR',
input_name='data',
output_name='output', params=[34.0, 67.0])
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': 34.0 * x + 67.0}
self._test_model(builder.spec, input, expected)
def test_padding_constant(self):
input_dim = (1, 2, 3)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features)
builder.add_padding(name='pad',
left=1, right=0, top=2, bottom=0,
value=-1,
input_name='data',
output_name='output')
x = np.reshape(np.array([[1, 2, 3], [4, 5, 6]]), (1, 2, 3)).astype(
np.float32)
input = {'data': x}
y = np.reshape(
np.array([[-1, -1, -1, -1], [-1, -1, -1, -1], [-1, 1, 2, 3],
[-1, 4, 5, 6]]), (1, 4, 4)).astype(np.float32)
expected = {'output': y}
self._test_model(builder.spec, input, expected)
def test_padding_replication(self):
input_dim = (1, 2, 3)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_padding(name='pad',
left=1, top=2,
input_name='data',
output_name='output', padding_type='replication')
x = np.reshape(np.array([[1, 2, 3], [4, 5, 6]]), (1, 2, 3)).astype(
np.float32)
input = {'data': x}
y = np.reshape(np.array([[1, 1, 2, 3], [1, 1, 2, 3], [1, 1, 2, 3],
[4, 4, 5, 6]]), (1, 4, 4)).astype(np.float32)
expected = {'output': y}
self._test_model(builder.spec, input, expected)
def test_reshape_target_shape_3(self):
input_dim = (1, 2, 5) # (C,H,W)
target_dim = (10, 1, 1)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_reshape(name='reshape', input_name='data',
output_name='output', target_shape=target_dim,
mode=0)
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': np.reshape(x, (10, 1, 1))}
self._test_model(builder.spec, input, expected)
self.assertEqual(len(target_dim), builder._get_rank('output'))
def test_reshape_target_shape_4(self):
input_dim = (1, 2, 5) # (C,H,W)
target_dim = (1, 10, 1, 1)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_reshape(name='reshape', input_name='data',
output_name='output', target_shape=target_dim,
mode=0)
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': np.reshape(x, (1, 10, 1, 1))}
self._test_model(builder.spec, input, expected)
self.assertEqual(len(target_dim), builder._get_rank('output'))
def test_bias_matrix_cpu(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
b = np.reshape(np.arange(5, 9), (1, 2, 2))
builder.add_bias(name='bias', b=b, input_name='data',
output_name='output',
shape_bias=[1, 2, 2])
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': x + b}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_linear_activation_cpu(self):
input_dim = (10, 15, 15)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_activation(name='activation',
non_linearity='LINEAR',
input_name='data',
output_name='output', params=[34.0, 67.0])
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': 34.0 * x + 67.0}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
@unittest.skipIf(not is_macos() or macos_version() < LAYERS_10_15_MACOS_VERSION,
'macOS 10.15+ required. Skipping tests.')
class NewLayersSimpleTest(CorrectnessTest):
def test_shape_flexibility_range(self):
input_features = [('data', datatypes.Array(*(3,4)))]
builder = neural_network.NeuralNetworkBuilder(input_features,
[('output', None)], disable_rank5_shape_mapping=True)
builder.add_sin(name='sin', input_name='data', output_name='output')
spec = builder.spec
flexible_shape_utils.set_multiarray_ndshape_range(spec, feature_name='data',
lower_bounds=[1,1], upper_bounds=[-1,5])
shapes = [(3,4), (1,5), (60,5), (22,4), (5,3)]
for s in shapes:
x = np.random.rand(*s)
expected = {'output': np.sin(x)}
self._test_model(spec, {'data': x}, expected, useCPUOnly=True)
def test_shape_flexibility_enumeration(self, rank=4):
default_shape = tuple(np.random.randint(1, 15, size=rank))
input_features = [('data', datatypes.Array(*default_shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features=input_features,
output_features=[('output', None)],
disable_rank5_shape_mapping=True)
builder.add_sin(name='sin', input_name='data', output_name='output')
spec = builder.spec
shapes = [tuple(np.random.randint(1, 15, size=rank)),
tuple(np.random.randint(1, 15, size=rank))]
flexible_shape_utils.add_multiarray_ndshape_enumeration(
spec, feature_name='data', enumerated_shapes=shapes)
shapes.append(default_shape)
for s in shapes:
x = np.random.rand(*s)
expected = {'output': np.sin(x)}
self._test_model(spec, {'data': x}, expected, useCPUOnly=True)
def test_shape_flexibility_enumeration_rank3(self):
self.test_shape_flexibility_enumeration(rank=3)
def test_shape_flexibility_enumeration_rank2(self):
self.test_shape_flexibility_enumeration(rank=2)
def test_transpose_cpu(self):
for rank in range(1, 6):
axes = np.random.permutation(rank)
axes = [axis - rank if np.random.choice([True, False]) else axis for axis in axes]
input_shape = np.random.randint(low=2, high=6, size=rank)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_transpose(name='TransposeND',
axes=axes,
input_name='data',
output_name='output')
x = np.random.rand(*input_shape)
input = {'data': x}
expected = {'output': np.transpose(x, axes)}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_dynamic_weight_conv(self):
input_dim = (1, 3, 16, 16)
# weight layout: (output_channels, kernel_channels, height, width)
weight_dim = (4, 3, 3, 3)
output_dim = (1, 4, 14, 14)
kernel_channels = input_dim[0]
output_channels, kernel_channels, height, width = weight_dim
input_features = [
('input', datatypes.Array(*input_dim)),
('weight', datatypes.Array(*weight_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features,
output_features,
disable_rank5_shape_mapping=True)
builder.add_convolution(
name='two_input_conv_layer',
kernel_channels=kernel_channels,
output_channels=output_channels,
height=height,
width=width,
stride_height=1,
stride_width=1,
border_mode='valid',
groups=1,
W=None,
b=None,
has_bias=False,
input_name=['input', 'weight'],
output_name='output')
# Assigning everything to ones should cover the execution path
# and engine failures, but is not a complete check on numerics.
input_val = np.ones(input_dim)
weight_val = np.ones(weight_dim)
expected = np.ones(output_dim) * 27
feed_dict = {'input': input_val, 'weight': weight_val}
expected = {'output': expected}
self._test_model(builder.spec, feed_dict, expected, useCPUOnly=True)
self._test_model(builder.spec, feed_dict, expected, useCPUOnly=False)
@pytest.mark.xfail
def test_dynamic_weight_deconv(self):
# Expect to fail in Core ML 3
input_dim = (1, 1, 16, 16)
# weight layout: (output_channels, kernel_channels, height, width)
weight_dim = (1, 1, 3, 3)
output_dim = (1, 1, 18, 18)
output_channels, kernel_channels, height, width = weight_dim
input_features = [
('data', datatypes.Array(*input_dim)),
('weight', datatypes.Array(*weight_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features,
output_features,
disable_rank5_shape_mapping=True)
builder.add_convolution(
name='deconv',
kernel_channels=kernel_channels,
output_channels=output_channels,
height=height,
width=width,
stride_height=1,
stride_width=1,
border_mode='valid',
groups=1,
W=None,
b=None,
has_bias=False,
is_deconv=True,
input_name=['data', 'weight'],
output_name='output')
input_val = np.ones(input_dim)
weight_val = np.ones(weight_dim)
expected = np.ones(output_dim) * 27
feed_dict = {'data': input_val, 'weight': weight_val}
expected = {'output': expected}
self._test_model(builder.spec, feed_dict, expected)
def test_batched_mat_mul_cpu(self, cpu_only=True):
a_shapes = [(10,), (4, 10), (10,), (10,), (2, 3), (1, 3, 4),
(1, 3, 1, 2, 3), (2, 3, 1, 3, 4)]
b_shapes = [(10,), (10,), (10, 3), (2, 10, 3), (3, 4), (3, 2, 4, 5),
(1, 4, 3, 2), (2, 1, 2, 4, 5)]
out_shapes = [(1, 1), (4, 1), (1, 3), (2, 1, 3), (2, 4), (3, 2, 3, 5),
(1, 3, 4, 2, 2), (2, 3, 2, 3, 5)]
for a_shape, b_shape, outShape in zip(a_shapes, b_shapes, out_shapes):
input_shapes = [a_shape, b_shape]
input_features = [
('A', datatypes.Array(*input_shapes[0])),
('B', datatypes.Array(*input_shapes[1]))
]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_batched_mat_mul(name='batched_mat_mul',
input_names=['A', 'B'],
output_name='output',
transpose_a=False,
transpose_b=False)
a = np.random.rand(*input_shapes[0])
b = np.random.rand(*input_shapes[1])
input_ = {'A': a, 'B': b}
expected = {'output': np.array(np.matmul(a, b))}
shape_dict = {'output': outShape}
self._test_model(builder.spec, input_, expected, useCPUOnly=cpu_only,
output_name_shape_dict=shape_dict)
self.assertEqual(len(outShape), builder._get_rank('output'))
def test_batched_mat_mul_gpu(self):
self.test_batched_mat_mul_cpu(cpu_only=False)
def test_batched_mat_mul_with_transposes_cpu(self, cpu_only=True):
for transpose_a, transpose_b in itertools.product([True, False],
[True, False]):
a_shape = (3, 4)
b_shape = (4, 5)
a_shape = a_shape[::-1] if transpose_a else a_shape
b_shape = b_shape[::-1] if transpose_b else b_shape
input_shapes = [a_shape, b_shape]
input_features = [
('A', datatypes.Array(*input_shapes[0])),
('B', datatypes.Array(*input_shapes[1]))
]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_batched_mat_mul(
name='BatchedMatMul', input_names=['A', 'B'],
output_name='output', transpose_a=transpose_a,
transpose_b=transpose_b
)
a = np.random.rand(*input_shapes[0])
b = np.random.rand(*input_shapes[1])
inputs = {'A': a, 'B': b}
a = a.T if transpose_a else a
b = b.T if transpose_b else b
expected = {'output': np.matmul(a, b)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
def test_batched_mat_mul_with_transposes_gpu(self):
self.test_batched_mat_mul_with_transposes_cpu(cpu_only=False)
def test_batched_mat_mul_single_input_cpu(self,
model_precision=_MLMODEL_FULL_PRECISION,
cpu_only=True):
X1 = 11
X2 = 23
W = np.random.rand(X1, X2)
bias = np.random.rand(X2)
input_shapes = [(X1,), (5, X1), (2, 3, X1), (4, 1, X1), (12, 5, 8, X1),
(2, 3, 1, 5, X1)]
for input_shape in input_shapes:
x = np.random.rand(*input_shape)
np_out = np.matmul(x, W) + bias
expected = {'output': np_out}
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_batched_mat_mul(name='batched_mat_mul',
input_names=['data'],
output_name='output',
weight_matrix_rows=X1,
weight_matrix_columns=X2,
W=W, bias=bias)
inputs = {'data': x}
self._test_model(
builder.spec, inputs, expected,
model_precision=model_precision, useCPUOnly=cpu_only)
def test_batched_mat_mul_single_input_half_precision_cpu(self):
self.test_batched_mat_mul_single_input_cpu(
model_precision=_MLMODEL_HALF_PRECISION,
cpu_only=True)
def test_batched_mat_mul_single_input_gpu(self):
self.test_batched_mat_mul_single_input_cpu(model_precision=_MLMODEL_FULL_PRECISION, cpu_only=False)
def test_embedding_nd_cpu(
self, model_precision=_MLMODEL_FULL_PRECISION, use_cpu_only=True):
vocab_size = 10
embedding_size = 19
W = np.random.rand(embedding_size, vocab_size)
input_shapes = [(5, 1), (2, 3, 1), (4, 1, 1), (12, 5, 8, 1),
(2, 3, 1, 5, 1)]
for input_shape in input_shapes:
x = np.random.randint(vocab_size, size=input_shape)
np_out = np.take(np.transpose(W), np.squeeze(x, axis=-1), axis=0)
expected = {'output': np_out}
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_embedding_nd(name='embedding_nd',
input_name='data',
output_name='output',
vocab_size=vocab_size,
embedding_size=embedding_size,
W=W)
input = {'data': x.astype(np.float32)}
self._test_model(
builder.spec, input, expected,
model_precision=model_precision, useCPUOnly=use_cpu_only)
def test_embedding_nd_half_precision_cpu(self):
self.test_embedding_nd_cpu(
model_precision=_MLMODEL_HALF_PRECISION, use_cpu_only=True)
def test_embedding_nd_GPU(self):
self.test_embedding_nd_cpu(
model_precision=_MLMODEL_FULL_PRECISION, use_cpu_only=False)
def test_embedding_nd_half_precision_GPU(self):
self.test_embedding_nd_cpu(
model_precision=_MLMODEL_HALF_PRECISION, use_cpu_only=False)
def test_softmax_nan_bug_cpu(self):
input_shape = [2,2]
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
for axis in [0,1]:
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_softmax_nd(name='softmax_nd', input_name='data',
output_name='output', axis=axis)
x = np.array([[0.5, 0.5],[1e8, 1e8]])
input = {'data': x}
y = np.exp(x - np.max(x, axis=axis, keepdims=True))
y = y / np.sum(y, axis=axis, keepdims=True)
expected = {'output': y}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_softmax_nd_cpu(self, cpu_only=True):
for rank in range(1, 6):
for axis in range(-rank, rank):
input_shape = np.random.randint(low=2, high=5, size=rank)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_softmax_nd(name='softmax_nd', input_name='data',
output_name='output', axis=axis)
x = np.random.rand(*input_shape)
input = {'data': x}
y = np.exp(x - np.max(x, axis=axis, keepdims=True))
y = y / np.sum(y, axis=axis, keepdims=True)
expected = {'output': y}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_softmax_nd_gpu(self):
self.test_softmax_nd_cpu(cpu_only=False)
def test_concat_nd_cpu(self, cpu_only=True):
for rank in range(1, 6):
for axis in range(-rank, rank):
n_inputs = np.random.choice(range(2, 5))
output_shape = np.random.randint(low=2, high=5, size=rank)
output_shape[axis] = 0
input_shapes = []
input_features = []
input_names = []
for _ in range(n_inputs):
input_shapes.append(np.copy(output_shape))
input_shapes[-1][axis] = np.random.choice(range(2, 8))
output_shape[axis] += input_shapes[-1][axis]
for i, input_dim in enumerate(input_shapes):
input_name = 'input_%s' % str(i)
input_names.append(input_name)
input_features.append((input_name, datatypes.Array(*input_dim)))
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_concat_nd(name='concat_nd', input_names=input_names,
output_name='output', axis=axis)
input_tensors = []
for input_dim in input_shapes:
input_tensors.append(np.random.rand(*input_dim))
input = dict(zip(input_names, input_tensors))
expected = {'output': np.concatenate(input_tensors, axis)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_concat_nd_gpu(self):
self.test_concat_nd_cpu(cpu_only=False)
def test_fill_like_cpu(self, cpu_only=True):
for rank in range(1, 6):
target_shape = np.random.randint(low=2, high=6, size=rank)
value = float(np.random.rand())
input_features = [('tensor', datatypes.Array(*target_shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_fill_like(name='fill_like', input_name='tensor',
output_name='output', value=value)
tensor = np.random.rand(*target_shape)
input = {'tensor': tensor}
expected = {'output': np.zeros(target_shape) + value}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_fill_like_gpu(self):
self.test_fill_like_cpu(cpu_only=False)
def test_fill_static_cpu(self, cpu_only=True):
for rank in range(1, 6):
shape = np.random.randint(low=2, high=8, size=rank)
input_features = [('data', datatypes.Array(*shape))]
value = float(np.random.rand())
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_fill_static(name='fill_static', output_name='tmp',
output_shape=list(shape), value=value)
builder.add_elementwise('add_layer', ['data', 'tmp'], 'output', mode='ADD')
data = np.random.rand(*shape)
input = {'data': data}
expected = {'output': data + value}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
self.assertEqual(len(shape), builder._get_rank('output'))
def test_fill_static_gpu(self):
self.test_fill_static_cpu(cpu_only=False)
def test_fill_dynamic_cpu(self, cpu_only=True):
for rank in range(1, 6):
input_shape = np.random.randint(low=2, high=8, size=rank)
value = float(np.random.rand())
input_features = [('shape', datatypes.Array(len(input_shape)))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_fill_dynamic(name='fill_dynamic', input_name='shape',
output_name='output', value=value)
input = {'shape': np.array(input_shape, dtype='float')}
expected = {'output': np.zeros(input_shape) + value}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
self.assertEqual(builder._get_rank('output'), -1)
def test_fill_dynamic_gpu(self):
self.test_fill_dynamic_cpu(cpu_only=False)
def test_broadcast_to_like_cpu(self, cpu_only=True):
for rank in range(1, 6):
input_shape = np.random.randint(low=2, high=8, size=rank)
mask = [np.random.choice([True, False, False]) for _ in range(rank)]
input_shape = np.where(mask, 1, input_shape)
target_rank = np.random.randint(low=rank, high=6)
target_shape = [np.random.randint(low=2, high=8) if (-i > rank or input_shape[i] == 1)
else input_shape[i] for i in range(-1, -target_rank - 1, -1)][::-1]
input_features = [('data', datatypes.Array(*input_shape)),
('tensor', datatypes.Array(*target_shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_broadcast_to_like(name='broadcast_to_like',
input_names=['data', 'tensor'],
output_name='output')
data = np.random.rand(*input_shape)
tensor = np.random.rand(*target_shape)
inputs = {'data': data, 'tensor': tensor}
expected = {'output': np.broadcast_to(data, target_shape)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
def test_broadcast_to_like_gpu(self):
self.test_broadcast_to_like_cpu(cpu_only=False)
def test_broadcast_to_static_cpu(self, cpu_only=True):
for rank in range(1, 6):
input_shape = np.random.randint(low=2, high=8, size=rank)
mask = [np.random.choice([True, False, False]) for _ in range(rank)]
input_shape = np.where(mask, 1, input_shape)
target_rank = np.random.randint(low=rank, high=6)
target_shape = [np.random.randint(low=2, high=8) if (-i > rank or input_shape[i] == 1)
else input_shape[i] for i in range(-1, -target_rank - 1, -1)][::-1]
input_features = [('data', datatypes.Array(*input_shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_broadcast_to_static(name='broadcast_to_static',
input_name='data',
output_name='output',
output_shape=list(target_shape))
data = np.random.rand(*input_shape)
input = {'data': data}
expected = {'output': np.broadcast_to(data, target_shape)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
self.assertEqual(target_rank, builder._get_rank('output'))
def test_broadcast_to_static_gpu(self):
self.test_broadcast_to_static_cpu(cpu_only=False)
def test_broadcast_to_dynamic_cpu(self, cpu_only=True):
for rank in range(1, 6):
input_shape = np.random.randint(low=2, high=8, size=rank)
mask = [np.random.choice([True, False, False]) for _ in range(rank)]
input_shape = np.where(mask, 1, input_shape)
target_rank = np.random.randint(low=rank, high=6)
target_shape = [np.random.randint(low=2, high=8) if (-i > rank or input_shape[i] == 1)
else input_shape[i] for i in range(-1, -target_rank - 1, -1)][::-1]
input_features = [('data', datatypes.Array(*input_shape)),
('shape', datatypes.Array(len(target_shape)))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_broadcast_to_dynamic(name='broadcast_to_dynamic',
input_names=['data', 'shape'],
output_name='output')
data = np.random.rand(*input_shape)
inputs = {'data': data, 'shape': np.array(target_shape, dtype='float')}
expected = {'output': np.broadcast_to(data, target_shape)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
self.assertEqual(builder._get_rank('output'), -1)
def test_broadcast_to_dynamic_gpu(self):
self.test_broadcast_to_dynamic_cpu(cpu_only=False)
# Test Rank being set to unknown when one of the input rank is unknown
# For max rank case
def test_unknown_rank(self, cpu_only=True):
for rank in range(1, 6):
input_shape = np.random.randint(low=2, high=8, size=rank)
mask = [np.random.choice([True, False, False]) for _ in range(rank)]
input_shape = np.where(mask, 1, input_shape)
target_rank = np.random.randint(low=rank, high=6)
target_shape = [np.random.randint(low=2, high=8) if (-i > rank or input_shape[i] == 1)
else input_shape[i] for i in range(-1, -target_rank - 1, -1)][::-1]
input_features = [('x', datatypes.Array(*input_shape)),
('shape', datatypes.Array(len(target_shape)))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_broadcast_to_dynamic(name='broadcast_to_dynamic',
input_names=['x', 'shape'],
output_name='y')
condition = np.random.randint(0, 2, input_shape).astype(np.float32)
builder.add_load_constant_nd(name='load_constant_condition',
output_name='condition',
constant_value=condition,
shape=input_shape)
builder.add_where_broadcastable(name='where',
input_names=['condition', 'x', 'y'],
output_name='output')
self.assertEqual(builder._get_rank('output'), -1)
def test_trigonometry_cpu(self, cpu_only=True):
ops = ['sin', 'cos', 'tan',
'asin', 'acos', 'atan',
'sinh', 'cosh', 'tanh',
'asinh', 'acosh', 'atanh']
for op in ops:
for rank in range(1, 6):
shape = np.random.randint(low=2, high=8, size=rank)
input_features = [('data', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)], disable_rank5_shape_mapping=True)
x = np.random.rand(*shape)
if op == 'sin':
builder.add_sin(name=op, input_name='data', output_name='output')
expected = {'output': np.sin(x)}
elif op == 'cos':
builder.add_cos(name=op, input_name='data', output_name='output')
expected = {'output': np.cos(x)}
elif op == 'tan':
builder.add_tan(name=op, input_name='data', output_name='output')
expected = {'output': np.tan(x)}
elif op == 'asin':
builder.add_asin(name=op, input_name='data', output_name='output')
expected = {'output': np.arcsin(x)}
elif op == 'acos':
builder.add_acos(name=op, input_name='data', output_name='output')
expected = {'output': np.arccos(x)}
elif op == 'atan':
builder.add_atan(name=op, input_name='data', output_name='output')
expected = {'output': np.arctan(x)}
elif op == 'sinh':
builder.add_sinh(name=op, input_name='data', output_name='output')
expected = {'output': np.sinh(x)}
elif op == 'cosh':
builder.add_cosh(name=op, input_name='data', output_name='output')
expected = {'output': np.cosh(x)}
elif op == 'tanh':
builder.add_tanh(name=op, input_name='data', output_name='output')
expected = {'output': np.tanh(x)}
elif op == 'asinh':
builder.add_asinh(name=op, input_name='data', output_name='output')
expected = {'output': np.arcsinh(x)}
elif op == 'acosh':
x = np.random.choice([10, np.e, 1], tuple(shape)).astype(np.float32)
builder.add_acosh(name=op, input_name='data', output_name='output')
expected = {'output': np.arccosh(x)}
elif op == 'atanh':
builder.add_atanh(name=op, input_name='data', output_name='output')
expected = {'output': np.arctanh(x)}
self._test_model(builder.spec, {'data': x}, expected, useCPUOnly=cpu_only)
def test_trigonometry_gpu(self):
self.test_trigonometry_cpu(cpu_only=False)
def test_exp2_cpu(self, cpu_only=True):
for rank in range(1, 6):
shape = np.random.randint(low=2, high=8, size=rank)
input_features = [('data', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_exp2(name='exp2', input_name='data', output_name='output')
x = np.random.rand(*shape)
input = {'data': x}
expected = {'output': np.exp2(x)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_exp2_gpu(self):
self.test_exp2_cpu(cpu_only=False)
def test_elementwise_binary_cpu(self, cpu_only=True):
input_names = ['A', 'B']
test_cases = ['greater', 'less', 'equal', 'not_equal', 'greater_equal',
'less_equal', 'logical_and', 'logical_or', 'logical_xor',
'add', 'subtract', 'multiply', 'divide', 'power',
'maximum', 'minimum', 'floor_divide', 'mod']
for test_case in test_cases:
for _ in range(10):
rank_a = np.random.randint(low=1, high=6)
rank_b = np.random.randint(low=1, high=6)
rank_out = max(rank_a, rank_b)
shape_a = np.random.randint(low=2, high=8, size=rank_a)
shape_b = np.random.randint(low=2, high=8, size=rank_b)
for i in range(-1, -rank_out - 1, -1):
dims = []
if -i <= rank_a: dims.append(shape_a[i])
if -i <= rank_b: dims.append(shape_b[i])
dim = np.random.choice(dims)
if -i <= rank_a: shape_a[i] = np.random.choice([1, dim])
if -i <= rank_b: shape_b[i] = np.random.choice([1, dim])
input_shapes = [shape_a, shape_b]
input_features = [('A', datatypes.Array(*input_shapes[0])),
('B', datatypes.Array(*input_shapes[1]))]
builder = neural_network.NeuralNetworkBuilder(input_features, [
('output', None)], disable_rank5_shape_mapping=True)
func = getattr(np, test_case)
if test_case == 'greater':
builder.add_greater_than(test_case, input_names=input_names,
output_name='output')
elif test_case == 'less':
builder.add_less_than(test_case, input_names=input_names,
output_name='output')
elif test_case == 'equal':
builder.add_equal(test_case, input_names=input_names,
output_name='output')
elif test_case == 'not_equal':
builder.add_not_equal(test_case, input_names=input_names,
output_name='output')
elif test_case == 'greater_equal':
builder.add_greater_than(test_case, input_names=input_names,
output_name='output',
use_greater_than_equal=True)
elif test_case == 'less_equal':
builder.add_less_than(test_case, input_names=input_names,
output_name='output',
use_less_than_equal=True)
elif test_case == 'logical_and':
builder.add_logical(test_case, input_names=input_names,
output_name='output', mode='AND')
elif test_case == 'logical_or':
builder.add_logical(test_case, input_names=input_names,
output_name='output', mode='OR')
elif test_case == 'logical_xor':
builder.add_logical(test_case, input_names=input_names,
output_name='output', mode='XOR')
elif test_case == 'add':
builder.add_add_broadcastable(test_case, input_names=input_names,
output_name='output')
elif test_case == 'subtract':
builder.add_subtract_broadcastable(test_case,
input_names=input_names,
output_name='output')
elif test_case == 'multiply':
builder.add_multiply_broadcastable(test_case,
input_names=input_names,
output_name='output')
elif test_case == 'divide':
builder.add_divide_broadcastable(test_case,
input_names=input_names,
output_name='output')
elif test_case == 'power':
builder.add_pow_broadcastable(test_case,
input_names=input_names,
output_name='output')
elif test_case == 'maximum':
builder.add_max_broadcastable(test_case,
input_names=input_names,
output_name='output')
elif test_case == 'minimum':
builder.add_min_broadcastable(test_case,
input_names=input_names,
output_name='output')
elif test_case == 'floor_divide':
builder.add_floor_div_broadcastable(test_case,
input_names=input_names,
output_name='output')
elif test_case == 'mod':
builder.add_mod_broadcastable(test_case,
input_names=input_names,
output_name='output')
a = np.random.rand(*input_shapes[0])
b = np.random.rand(*input_shapes[1])
input = {'A': a, 'B': b}
expected = {'output': func(a, b, dtype=np.float32)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_elementwise_binary_gpu(self):
self.test_elementwise_binary_cpu(cpu_only=False)
def test_elementwise_boolean_unary_cpu(self, cpu_only=True):
input_names = ['input']
shapes = [(1, 2, 3, 1), (3, 1, 2, 1, 2), (1, 2, 1, 3), (2, 3),
(2, 1, 1), (2, 3, 4), (2, 4), (1,), (1,)]
test_cases = ['greater', 'less', 'equal', 'not_equal', 'greater_equal',
'less_equal']
for test_case in test_cases:
for shape in shapes:
input_features = [('input', datatypes.Array(*shape))]
b = np.random.rand()
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
func = getattr(np, test_case)
if test_case == 'greater':
builder.add_greater_than(test_case, input_names=input_names,
output_name='output', alpha=b)
elif test_case == 'less':
builder.add_less_than(test_case, input_names=input_names,
output_name='output', alpha=b)
elif test_case == 'equal':
builder.add_equal(test_case, input_names=input_names,
output_name='output', alpha=b)
elif test_case == 'not_equal':
builder.add_not_equal(test_case, input_names=input_names,
output_name='output', alpha=b)
elif test_case == 'greater_equal':
builder.add_greater_than(test_case, input_names=input_names,
output_name='output',
use_greater_than_equal=True,
alpha=b)
elif test_case == 'less_equal':
builder.add_less_than(test_case, input_names=input_names,
output_name='output',
use_less_than_equal=True, alpha=b)
a = np.random.rand(*shape)
input = {'input': a}
expected = {'output': func(a, b, dtype=np.float32)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_elementwise_boolean_unary_gpu(self):
self.test_elementwise_boolean_unary_cpu(cpu_only=False)
def test_logical_not_cpu(self, cpu_only=True):
input_names = ['input']
shapes = [(1, 2, 3, 1), (3, 1, 2, 1, 2), (1, 2, 1, 3), (2, 3),
(2, 1, 1), (2, 3, 4), (2, 4), (1,), (1,)]
for shape in shapes:
input_features = [('input', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_logical('logical_not', input_names=input_names,
output_name='output', mode='NOT')
a = np.random.rand(*shape)
input = {'input': a}
expected = {'output': np.logical_not(a)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_logical_not_gpu(self):
self.test_logical_not_cpu(cpu_only=False)
def test_stack_cpu(self, cpu_only=True):
for input_rank in range(1, 5):
for axis in range(-input_rank - 1, input_rank + 1):
n_inputs = np.random.choice(range(2, 5))
input_shape = np.random.randint(low=2, high=5, size=input_rank)
input_features = []
input_names = []
for i in range(n_inputs):
input_name = 'input_%s' % str(i)
input_names.append(input_name)
input_features.append(
(input_name, datatypes.Array(*input_shape)))
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_stack(name='stack', input_names=input_names,
output_name='output', axis=axis)
input_tensors = []
for _ in range(n_inputs):
input_tensors.append(np.random.rand(*input_shape))
input = dict(zip(input_names, input_tensors))
expected = {'output': np.stack(input_tensors, axis)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
self.assertEqual(input_rank + 1, builder._get_rank('output'))
def test_stack_gpu(self):
self.test_stack_cpu(cpu_only=False)
def test_ceil_cpu(self, cpu_only=True):
for rank in range(1, 6):
shape = np.random.randint(low=2, high=8, size=rank)
input_features = [('data', datatypes.Array(*shape))]
output_features = [('output', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_ceil(name='ceil', input_name='data', output_name='output')
x = np.random.rand(*shape)
inputs = {'data': x}
expected = {'output': np.ceil(x)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
self.assertEqual(rank, builder._get_rank('output'))
def test_ceil_gpu(self):
self.test_ceil_cpu(cpu_only=False)
def test_floor_cpu(self, cpu_only=True):
for rank in range(1, 6):
shape = np.random.randint(low=2, high=8, size=rank)
input_features = [('data', datatypes.Array(*shape))]
output_features = [('output', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_floor(name='floor', input_name='data', output_name='output')
x = np.random.rand(*shape)
inputs = {'data': x}
expected = {'output': np.floor(x)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
def test_floor_gpu(self):
self.test_floor_cpu(cpu_only=False)
def test_round_cpu(self, cpu_only=True):
for rank in range(1, 6):
shape = np.random.randint(low=2, high=8, size=rank)
input_features = [('data', datatypes.Array(*shape))]
output_features = [('output', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_round(name='round', input_name='data', output_name='output')
x = np.float32(np.random.rand(*shape) * np.random.randint(low=-100, high=101))
inputs = {'data': x}
expected = {'output': np.around(x)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
def test_round_gpu(self):
self.test_round_cpu(cpu_only=False)
def test_sign_cpu(self, cpu_only=True):
for rank in range(1, 6):
shape = np.random.randint(low=2, high=8, size=rank)
input_features = [('data', datatypes.Array(*shape))]
output_features = [('output', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_sign(name='sign', input_name='data', output_name='output')
x = np.random.choice([-np.random.rand(1), 0.0, np.random.rand(1)],
tuple(shape)).astype(np.float32)
inputs = {'data': x}
expected = {'output': np.sign(x)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
def test_sign_gpu(self):
self.test_sign_cpu(cpu_only=False)
def test_clip_cpu(self, cpu_only=True):
for rank in range(1, 6):
shape = np.random.randint(low=2, high=6, size=rank)
input_features = [('data', datatypes.Array(*shape))]
output_features = [('output', datatypes.Array(*shape))]
x = np.random.rand(*shape)
min_value = np.percentile(x, 25)
max_value = np.percentile(x, 75)
input = {'data': x}
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_clip(name='clip', input_name='data', output_name='output',
min_value=min_value, max_value=max_value)
expected = {'output': np.clip(x, min_value, max_value)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_clip_gpu(self):
self.test_clip_cpu(cpu_only=False)
def test_split_nd_cpu(self, cpu_only=True):
for rank in range(1, 6):
for axis in range(-rank, rank):
n_outputs = np.random.choice(range(2, 4))
input_shape = np.random.randint(low=2, high=5, size=rank)
input_shape[axis] = 0
output_shapes = []
output_features = []
output_names = []
almost_equal = random.choice([True, False])
remainder = np.random.choice(
range(1, n_outputs)) if almost_equal else 0
value = np.random.choice(range(2, 5))
for k in range(n_outputs):
output_shapes.append(np.copy(input_shape))
output_shapes[-1][
axis] = value + 1 if k < remainder else value
input_shape[axis] += output_shapes[-1][axis]
for i in range(n_outputs):
output_name = 'output_%s' % str(i)
output_names.append(output_name)
output_features.append(
(output_name, None))
input_features = [('data', datatypes.Array(*input_shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_split_nd(name='split_nd', input_name='data',
output_names=output_names, axis=axis,
num_splits=n_outputs)
x = np.random.rand(*input_shape)
input = {'data': x}
expected = dict(
zip(
output_names, np.array_split(x, n_outputs, axis=axis)
if almost_equal else np.split(x, n_outputs, axis=axis)
)
) # Explicitly trying to compare against both versions of numpy split
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
for output_ in output_names:
self.assertEqual(rank, builder._get_rank(output_))
def test_split_nd_gpu(self):
self.test_split_nd_cpu(cpu_only=False)
def test_split_nd_with_split_sizes_cpu(self, cpu_only=True):
for rank in range(1, 6):
for axis in range(-rank, rank):
n_outputs = np.random.choice(range(2, 4))
input_shape = np.random.randint(low=2, high=5, size=rank)
input_shape[axis] = 0
output_shapes, output_features, output_names = [], [], []
sections, split_sizes = [], []
for _ in range(n_outputs):
output_shapes.append(np.copy(input_shape))
output_shapes[-1][axis] = np.random.choice(range(2, 5))
input_shape[axis] += output_shapes[-1][axis]
sections.append(input_shape[axis])
split_sizes.append(output_shapes[-1][axis])
sections.pop()
for i in range(n_outputs):
output_name = 'output_%s' % str(i)
output_names.append(output_name)
output_features.append(
(output_name, None))
input_features = [('data', datatypes.Array(*input_shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_split_nd(name='split_nd', input_name='data',
output_names=output_names, axis=axis,
split_sizes=split_sizes)
x = np.random.rand(*input_shape)
input = {'data': x}
expected = dict(
zip(output_names, np.split(x, sections, axis=axis)))
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
for output_ in output_names:
self.assertEqual(rank, builder._get_rank(output_))
def test_split_nd_with_split_sizes_gpu(self):
self.test_split_nd_with_split_sizes_cpu(cpu_only=False)
def test_slice_static_cpu(self, cpu_only=True):
for rank in range(1, 6):
for _ in range(200):
input_shape = np.array([5 for _ in range(rank)])
objs, strides, begin_masks, end_ids, end_masks, begin_ids = [], [], [], [], [], []
for dim in range(rank):
stride = random.choice([-3, -1, 1, 2])
begin_mask = random.choice([True, False])
end_mask = random.choice([True, False])
length = 0
while length <= 0:
begin_id = np.random.randint(low=-input_shape[dim],
high=input_shape[dim])
end_id = np.random.randint(low=-input_shape[dim],
high=input_shape[dim])
obj = slice(None if begin_mask else begin_id,
None if end_mask else end_id, stride)
length = np.arange(input_shape[dim])[(obj,)].shape[0]
objs.append(obj), strides.append(stride), begin_masks.append(
begin_mask)
end_masks.append(end_mask), begin_ids.append(
begin_id), end_ids.append(end_id)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_slice_static('slice_static', 'data', 'output',
begin_ids=begin_ids, end_ids=end_ids, strides=strides,
begin_masks=begin_masks, end_masks=end_masks)
x = np.random.rand(*input_shape)
inputs = {'data': x}
expected = {'output': x[tuple(objs)]}
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
self.assertEqual(rank, builder._get_rank('output'))
def test_slice_static_gpu(self):
self.test_slice_static_cpu(cpu_only=False)
def test_slice_dynamic_cpu(self, cpu_only=True):
for rank in range(1, 6):
input_shape = np.array([5 for _ in range(rank)])
objs, strides, begin_masks, end_ids, end_masks, begin_ids = [], [], [], [], [], []
for dim in range(rank):
stride = random.choice([-3, -1, 1, 2])
begin_mask = random.choice([True, False])
end_mask = random.choice([True, False])
length = 0
while length <= 0:
begin_id = np.random.randint(low=-input_shape[dim],
high=input_shape[dim])
end_id = np.random.randint(low=-input_shape[dim],
high=input_shape[dim])
obj = slice(None if begin_mask else begin_id,
None if end_mask else end_id, stride)
length = np.arange(input_shape[dim])[(obj,)].shape[0]
objs.append(obj), strides.append(stride), begin_masks.append(
begin_mask)
end_masks.append(end_mask), begin_ids.append(
begin_id), end_ids.append(end_id)
# test different number of inputs, from 2 inputs up to 6 inputs
# when num_inputs == 2, begin_ids are inputs, rest are read from parameters
# when num_inputs == 6, all read from inputs, none are read from parameters
for num_inputs in [2, 3, 4, 5, 6]:
x = np.random.rand(*input_shape)
input_features = [('data', datatypes.Array(*input_shape))]
input_names = ['data']
inputs = dict()
inputs['data'] = x
if num_inputs == 2:
input_features = [('data', datatypes.Array(*input_shape)),
('begin_ids', datatypes.Array(len(begin_ids)))]
input_names = ['data', 'begin_ids']
inputs['begin_ids'] = np.array(begin_ids, dtype=np.int32)
elif num_inputs == 3:
input_features = [('data', datatypes.Array(*input_shape)),
('begin_ids', datatypes.Array(len(begin_ids))),
('end_ids', datatypes.Array(len(end_ids)))]
input_names = ['data', 'begin_ids', 'end_ids']
inputs['begin_ids'] = np.array(begin_ids, dtype=np.int32)
inputs['end_ids'] = np.array(end_ids, dtype=np.int32)
elif num_inputs == 4:
input_features = [('data', datatypes.Array(*input_shape)),
('begin_ids', datatypes.Array(len(begin_ids))),
('end_ids', datatypes.Array(len(end_ids))),
('strides', datatypes.Array(len(strides)))]
input_names = ['data', 'begin_ids', 'end_ids', 'strides']
inputs['begin_ids'] = np.array(begin_ids, dtype=np.int32)
inputs['end_ids'] = np.array(end_ids, dtype=np.int32)
inputs['strides'] = np.array(strides, dtype=np.int32)
elif num_inputs == 5:
input_features = [('data', datatypes.Array(*input_shape)),
('begin_ids', datatypes.Array(len(begin_ids))),
('end_ids', datatypes.Array(len(end_ids))),
('strides', datatypes.Array(len(strides))),
('begin_masks', datatypes.Array(len(begin_masks)))]
input_names = ['data', 'begin_ids', 'end_ids', 'strides', 'begin_masks']
inputs['begin_ids'] = np.array(begin_ids, dtype=np.int32)
inputs['end_ids'] = np.array(end_ids, dtype=np.int32)
inputs['strides'] = np.array(strides, dtype=np.int32)
inputs['begin_masks'] = np.array(begin_masks, dtype=np.int32)
elif num_inputs == 6:
input_features = [('data', datatypes.Array(*input_shape)),
('begin_ids', datatypes.Array(len(begin_ids))),
('end_ids', datatypes.Array(len(end_ids))),
('strides', datatypes.Array(len(strides))),
('begin_masks', datatypes.Array(len(begin_masks))),
('end_masks', datatypes.Array(len(end_masks)))]
input_names = ['data', 'begin_ids', 'end_ids',
'strides', 'begin_masks', 'end_masks']
inputs['begin_ids'] = np.array(begin_ids, dtype=np.int32)
inputs['end_ids'] = np.array(end_ids, dtype=np.int32)
inputs['strides'] = np.array(strides, dtype=np.int32)
inputs['begin_masks'] = np.array(begin_masks, dtype=np.int32)
inputs['end_masks'] = np.array(end_masks, dtype=np.int32)
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
if num_inputs == 2:
builder.add_slice_dynamic('slice_dynamic', input_names, 'output',
end_ids=end_ids, strides=strides,
begin_masks=begin_masks, end_masks=end_masks)
elif num_inputs == 3:
builder.add_slice_dynamic('slice_dynamic', input_names, 'output',
strides=strides, begin_masks=begin_masks,
end_masks=end_masks)
elif num_inputs == 4:
builder.add_slice_dynamic('slice_dynamic', input_names, 'output',
begin_masks=begin_masks, end_masks=end_masks)
elif num_inputs == 5:
builder.add_slice_dynamic('slice_dynamic', input_names, 'output',
end_masks=end_masks)
elif num_inputs == 6:
builder.add_slice_dynamic('slice_dynamic', input_names, 'output')
expected = {'output': x[tuple(objs)]}
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
self.assertEqual(rank, builder._get_rank('output'))
def test_slice_dynamic_gpu(self):
self.test_slice_dynamic_cpu(cpu_only=False)
def test_tile_cpu(self, cpu_only=True):
for rank in range(1, 6):
input_shape = np.random.randint(low=2, high=5, size=rank)
for rep_rank in range(1,rank+1):
reps = list(np.random.randint(low=1, high=9, size=rep_rank))
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_tile('Tile', 'data', 'output', reps)
x = np.random.rand(*input_shape)
input = {'data': x}
expected = {'output': np.tile(x, reps)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_tile_gpu(self):
self.test_tile_cpu(cpu_only=False)
def test_sliding_windows_cpu(self, cpu_only=True):
def numpy_sliding_windows(a, np_axis, np_size, np_step):
n = (a.shape[np_axis] - np_size) // np_step + 1
shape = list(a.shape)
shape[np_axis] = n
if np_axis < 0:
np_axis += len(shape)
shape.insert(np_axis + 1, np_size)
strides = list(a.strides)
effstride = strides[np_axis] * np_step
strides.insert(np_axis, effstride)
return np.lib.stride_tricks.as_strided(a, shape, strides)
for rank in range(1, 5):
for axis in range(-rank, rank):
input_shape = np.random.randint(low=2, high=5, size=rank)
output_shape = list(input_shape)
window_size = np.random.randint(low=1, high=input_shape[axis])
length = 0
while length <= 0:
step = np.random.randint(low=1, high=input_shape[axis])
length = (input_shape[axis] - window_size) // step + 1
output_shape[axis] = length
pos_axis = axis if axis >= 0 else axis + rank
output_shape.insert(pos_axis + 1, window_size)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_sliding_windows('sliding_windows',
input_name='data',
output_name='output',
axis=axis,
window_size=window_size,
step=step)
x = np.random.rand(*input_shape)
input = {'data': x}
expected = {'output': numpy_sliding_windows(x, axis, window_size, step)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
self.assertEqual(rank+1, builder._get_rank('output'))
def test_sliding_windows_gpu(self):
self.test_sliding_windows_cpu(cpu_only=False)
def test_range_static_cpu(self, cpu_only=True):
params = [(-10.4, 23, 12.2), (0, 1000, 1), (50.5, 90.5, 1.5), (5, 8, 2),
(5, 8, 98), (5, 8, 1.5), (10, 5, -0.6), (24, -65, -2)]
for param in params:
start, end, step = param
input_features = [('multiplicative_input', datatypes.Array(1))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_range_static('range_static', 'output_range',
end=end, start=start, step=step)
builder.add_multiply_broadcastable(
name='multiply_broadcastable',
input_names=['multiplicative_input', 'output_range'],
output_name='output')
# save the model
model_dir = tempfile.mkdtemp()
model_path = os.path.join(model_dir, 'test_layer.mlmodel')
coremltools.utils.save_spec(builder.spec, model_path)
inputs = dict()
inputs['multiplicative_input'] = np.ones((1,), dtype=np.float64)
expected = {'output': np.arange(start, end, step)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
self.assertEqual(1, builder._get_rank('output'))
def test_range_static_gpu(self):
self.test_range_static_cpu(cpu_only=False)
def test_range_dynamic_cpu(self, cpu_only=True):
params = [(-10.4, 23, 12.2), (0, 1000, 1), (50.5, 90.5, 1.5), (5, 8, 2),
(5, 8, 98), (5, 8, 1.5), (10, 5, -0.6), (24, -65, -2)]
# input size == 1: end is input, start and step are read from parameters
# input size == 2: end, start are inputs, step is read from parameters
# input size == 3: start, end, step are all inputs, none of the parameters are used.
for num_inputs in [1, 2, 3]:
for param in params:
inputs = dict()
start, end, step = param
if num_inputs == 1:
input_features = [('end', datatypes.Array(1))]
elif num_inputs == 2:
input_features = [('end', datatypes.Array(1)),
('start', datatypes.Array(1))]
elif num_inputs == 3:
input_features = [('end', datatypes.Array(1)),
('start', datatypes.Array(1)),
('step', datatypes.Array(1))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
if num_inputs == 1:
inputs['end'] = end * np.ones((1,), dtype=np.float64)
builder.add_range_dynamic('range_dynamic',
output_name='output',
input_names=['end'],
start=start, step=step)
elif num_inputs == 2:
inputs['end'] = end * np.ones((1,), dtype=np.float64)
inputs['start'] = start * np.ones((1,), dtype=np.float64)
builder.add_range_dynamic('range_dynamic',
output_name='output',
input_names=['end', 'start'],
step=step)
elif num_inputs == 3:
inputs['end'] = end * np.ones((1,), dtype=np.float64)
inputs['start'] = start * np.ones((1,), dtype=np.float64)
inputs['step'] = step * np.ones((1,), dtype=np.float64)
builder.add_range_dynamic('range_dynamic',
output_name='output',
input_names=['end', 'start', 'step'])
expected = {'output': np.arange(start, end, step)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
self.assertEqual(1, builder._get_rank('output'))
def test_range_dynamic_gpu(self):
self.test_range_dynamic_cpu(cpu_only=False)
def test_linear_activation_different_ranks_cpu(self, cpu_only=True):
for input_dim in [(10, 15), (10, 15, 2, 3),
(10, 2, 4, 15, 1, 4), (6,)]:
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', datatypes.Array(*input_dim))]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_activation(name='activation',
non_linearity='LINEAR',
input_name='data',
output_name='output', params=[34.0, 67.0])
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': 34.0 * x + 67.0}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_linear_activation_different_ranks_gpu(self):
self.test_linear_activation_different_ranks_cpu(cpu_only=False)
def test_topk_cpu(self, cpu_only=True):
test_input_shapes = [(9,), (8, 6), (9, 8, 10), (5, 9, 7, 9), (12, 8, 6, 6, 7)]
K = [3, 5]
axes = [[0], [0, 1], [1, 2], [0, 3, 1], [1, 3, 4]]
for ii, input_shape in enumerate(test_input_shapes):
for k in K:
for n_inputs in [1, 2]:
for bottom_k_flag in [False, True]:
for axis in axes[ii]:
for negative_axis in [False, True]:
if negative_axis:
axis = axis - len(input_shape)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('values', None), ('indices', None)]
input_names = ['data']
output_names = ['values', 'indices']
if n_inputs == 2:
input_names.append('k_in')
input_features.append(('k_in', datatypes.Array(1)))
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
if n_inputs == 2:
builder.add_topk('topk', input_names, output_names,
axis=axis, use_bottom_k=bottom_k_flag)
else:
builder.add_topk('topk', input_names, output_names,
k=k, axis=axis, use_bottom_k=bottom_k_flag)
data = np.random.randint(low=0, high=int(np.prod(input_shape)), size=input_shape)
data = data.astype(np.float32)
input = {'data': data}
if n_inputs == 2:
input['k_in'] = k * np.ones([1], dtype=np.float32)
# numpy reference values
if bottom_k_flag:
ref_indices = np.argsort(data, axis=axis)
else:
ref_indices = np.argsort(-data, axis=axis)
slc = [slice(None)] * len(input_shape)
slc[axis] = slice(0, k)
ref_indices = ref_indices[tuple(slc)]
ref_values = np.take_along_axis(data, ref_indices, axis=axis)
expected = {'values': ref_values, 'indices': ref_indices}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_topk_gpu(self):
self.test_topk_cpu(cpu_only=False)
def test_const_pad_cpu(self, cpu_only=True):
def get_reference(data, pads, value):
with tf.Graph().as_default(), tf.Session() as sess:
x = tf.placeholder(tf.float32, shape=data.shape)
p = tf.placeholder(tf.int32, shape=pads.shape)
y = tf.pad(x, p, mode='CONSTANT', constant_values=value)
return sess.run(y, feed_dict={x: data, p: pads})
value = 34.0
shapes = [(3,), (4, 5), (2, 4, 5), (12, 6, 3, 5, 7), (1, 24, 2, 4, 8)]
ctr = 0
for shape in shapes:
rank = len(shape)
for force_zeros_in_end in [0, 2, 6]:
for max_pad_value in range(1, 6):
for n_inputs in [1, 2]:
pads = np.random.randint(low=0, high=max_pad_value, size=(rank, 2))
if force_zeros_in_end > 2 * rank:
continue
# pads = np.reshape(np.array([1,1,1,0,0,1]), (rank, 2))
if force_zeros_in_end != 0:
pads[-force_zeros_in_end:] = 0
data = np.random.rand(*shape)
reference = get_reference(data, pads, value)
ctr += 1
input_features = [('data', datatypes.Array(*shape))]
output_features = [('output', None)]
input_names = ['data']
if n_inputs == 2:
input_names.append('pads')
input_features.append(('pads', datatypes.Array(2*rank,)))
builder = neural_network.NeuralNetworkBuilder(input_features, output_features, disable_rank5_shape_mapping=True)
if n_inputs == 2:
builder.add_constant_pad('pad', input_names, 'output', value=value)
else:
builder.add_constant_pad('pad', input_names, 'output', value=value, pad_amounts=pads.flatten())
input = {'data': data}
if n_inputs == 2:
input['pads'] = pads.flatten().astype(np.float)
expected = {'output': reference}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_const_pad_gpu(self):
self.test_const_pad_cpu(cpu_only=False)
def test_const_pad_mode2_cpu(self, cpu_only=True):
def get_reference(data, output_shape, value, left_pad=False):
with tf.Graph().as_default(), tf.Session() as sess:
x = tf.placeholder(tf.float32, shape=data.shape)
p = tf.placeholder(tf.int32, shape=(len(output_shape), 2))
y = tf.pad(x, p, mode='CONSTANT', constant_values=value)
pads = np.zeros((len(output_shape), 2))
if left_pad:
pads[:, 0] = np.array(output_shape) - np.array(data.shape)
else:
pads[:, 1] = np.array(output_shape) - np.array(data.shape)
return sess.run(y, feed_dict={x: data, p: pads})
value = 34.0
shapes = [(3,), (4, 5), (2, 4, 5), (12, 6, 3, 5, 7), (1, 24, 2, 4, 8)]
out_shapes = [(5,), (4, 8), (2, 4, 10), (20, 6, 7, 10, 7), (5, 24, 10, 4, 10)]
ctr = 0
for ii, shape in enumerate(shapes):
rank = len(shape)
for left_pad in [True, False]:
for n_inputs in [1, 2]:
data = np.random.rand(*shape)
reference = get_reference(data, out_shapes[ii], value, left_pad)
pads = np.zeros((rank, 2))
tmp = np.zeros((rank))
for i in range(rank):
if out_shapes[ii][i] == shape[i]:
tmp[i] = 0
else:
tmp[i] = out_shapes[ii][i]
if left_pad:
pads[:, 0] = tmp
else:
pads[:, 1] = tmp
ctr += 1
input_features = [('data', datatypes.Array(*shape))]
output_features = [('output', None)]
input_names = ['data']
if n_inputs == 2:
input_names.append('pads')
input_features.append(('pads', datatypes.Array(2*rank,)))
builder = neural_network.NeuralNetworkBuilder(input_features, output_features, disable_rank5_shape_mapping=True)
if n_inputs == 2:
builder.add_constant_pad('pad', input_names, 'output', value=value, pad_to_given_output_size_mode=True)
else:
builder.add_constant_pad('pad', input_names, 'output', value=value, pad_amounts=pads.flatten(), pad_to_given_output_size_mode=True)
input = {'data': data}
if n_inputs == 2:
input['pads'] = pads.flatten().astype(np.float)
expected = {'output': reference}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_const_pad_mode2_gpu(self):
self.test_const_pad_mode2_cpu(cpu_only=False)
def test_nms_cpu(self, cpu_only=True):
def _compute_iou_matrix(boxes):
# input is (N,4), in order [center_w, center_h, width, height]
assert len(boxes.shape) == 2
assert boxes.shape[1] == 4
boxes = boxes.astype(np.float)
center_w, center_h, width, height = np.split(boxes, 4, axis=1) # outs are all (N,1)
top = center_h + 0.5 * height
bottom = center_h - 0.5 * height
left = center_w - 0.5 * width
right = center_w + 0.5 * width
area = width * height
hB = np.minimum(top, np.transpose(top))
wB = np.minimum(right, np.transpose(right))
hA = np.maximum(bottom, np.transpose(bottom))
wA = np.maximum(left, np.transpose(left))
intersection_area = np.maximum(0, hB - hA) * np.maximum(0, wB - wA)
union_area = area + np.transpose(area) - intersection_area
iou = intersection_area / union_area
return iou
def _nms_TF(boxes, scores, iou_threshold, score_threshold, per_class_suppression, M):
# boxes is (B,N,4), in order [center_w, center_h, width, height]
# scores is (B,N,C)
# output shapes: (B,M,4), (B,M,C), (B,M), (B,)
'''
this is implementation of CoreML's NMS layer
'''
B, N, C = scores.shape
iou_threshold = iou_threshold.astype(np.float32)
score_threshold = score_threshold.astype(np.float32)
# convert box ids to TF style
center_w, center_h, width, height = np.split(boxes, 4, axis=-1) # outs are all (B,N,1)
y1 = center_h - 0.5 * height
y2 = center_h + 0.5 * height
x1 = center_w - 0.5 * width
x2 = center_w + 0.5 * width
boxes_tf = np.concatenate((y1, x1, y2, x2), axis=-1) # (B,N,4)
out1 = np.zeros((B, M, 4))
out2 = np.zeros((B, M, C))
out3 = -1 * np.ones((B, M))
out4 = np.zeros((B,))
for b in range(B):
box_coord_matrix = boxes_tf[b, :, :] # (N,4)
score_vector = np.max(scores[b, :, :], axis=-1) # (N,)
if not per_class_suppression:
# this is the simple case as TF directly supports it
with tf.Graph().as_default(), tf.Session() as sess:
box_coord_matrix_pl = tf.placeholder(tf.float32, shape=box_coord_matrix.shape)
score_vector_pl = tf.placeholder(tf.float32, shape=score_vector.shape)
ids_g = tf.image.non_max_suppression(box_coord_matrix_pl,
score_vector_pl,
max_output_size=M, iou_threshold=iou_threshold,
score_threshold=score_threshold)
ids = sess.run(ids_g, feed_dict={box_coord_matrix_pl: box_coord_matrix, score_vector_pl: score_vector})
else:
# this is slightly complicated as TF does not directly support it
class_ids = np.argmax(scores[b, :, :], axis=-1) # (N,)
sorted_score_ids = np.argsort(-score_vector)
box_coord_matrix2 = np.take(box_coord_matrix, sorted_score_ids, axis=0)
score_vector2 = np.take(score_vector, sorted_score_ids)
class_ids = np.take(class_ids, sorted_score_ids)
classes_seen = dict()
ids_intermediate = np.array([], dtype=np.int)
for n in range(N):
if class_ids[n] in classes_seen:
continue
c = class_ids[n]
classes_seen[c] = True
current_class_ids = np.where(class_ids == c)[0]
if len(current_class_ids) > 0:
feed_in1 = np.take(box_coord_matrix2, current_class_ids, axis=0)
feed_in2 = np.take(score_vector2, current_class_ids)
with tf.Graph().as_default(), tf.Session() as sess:
box_coord_matrix_pl = tf.placeholder(tf.float32, shape=feed_in1.shape)
score_vector_pl = tf.placeholder(tf.float32, shape=feed_in2.shape)
cur_ids_g = tf.image.non_max_suppression(box_coord_matrix_pl,
score_vector_pl,
max_output_size=M, iou_threshold=iou_threshold,
score_threshold=score_threshold)
cur_ids = sess.run(cur_ids_g, feed_dict={box_coord_matrix_pl: feed_in1,
score_vector_pl: feed_in2})
from_sort_ids = np.take(current_class_ids, cur_ids)
ids_intermediate = np.append(ids_intermediate, from_sort_ids)
ids_intermediate.sort()
ids = np.take(sorted_score_ids, ids_intermediate)
xx = len(ids)
if xx == 0:
ids = np.array([np.argmax(score_vector)])
xx = 1
if xx > M:
ids = ids[:M]
xx = len(ids)
out1[b, :xx, :] = np.take(boxes[b, :, :], ids, axis=0)
out2[b, :xx, :] = np.take(scores[b, :, :], ids, axis=0)
out3[b, :xx] = ids
out4[b] = xx
return out1, out2, out3, out4
iou_threshold_percentile = [0, 30, 80, 100]
score_threshold_percentile_arr = [0, 40, 100]
N_M_pairs_to_test = [[100, 48], [100, 112]] # N : boxes in, M: max boxes out
number_of_test = 0
for N_M in N_M_pairs_to_test:
for B in [1, 5]:
for C in [1, 7]:
N, M = N_M
boxes = np.random.rand(B, N, 4)
scores = np.random.rand(B, N, C)
iou_matrix = _compute_iou_matrix(boxes[0, :, :]) # (N,N)
iou_matrix = iou_matrix[~np.eye(iou_matrix.shape[0], dtype=bool)].reshape(iou_matrix.shape[0], -1)
for per_class_suppression in [False, True]:
for iou_thresh in iou_threshold_percentile:
for score_thresh in score_threshold_percentile_arr:
for is_dynamic in [False, True]:
if score_thresh == 0:
score_threshold = np.min(scores) - 1
elif score_thresh == 100:
score_threshold = np.max(scores) + 1
else:
score_threshold = np.percentile(scores, score_thresh) + .01
if iou_thresh == 0:
iou_threshold = np.maximum(np.min(iou_matrix) - .01, 0.0)
else:
iou_threshold = np.percentile(iou_matrix, iou_thresh) + .01
number_of_test += 1
tf_boxes, tf_scores, tf_ids, tf_num_boxes = _nms_TF(boxes, scores, iou_threshold,
score_threshold,
per_class_suppression,
M)
expected = dict()
expected['selected_boxes'] = tf_boxes
expected['selected_scores'] = tf_scores
expected['selected_box_ids'] = tf_ids
expected['number_of_boxes'] = tf_num_boxes
# define CoreML model
input_features = [('boxes', datatypes.Array(B,N,4)), ('scores', datatypes.Array(B,N,C))]
output_features = [('selected_boxes', None), ('selected_scores', None),
('selected_box_ids', None), ('number_of_boxes', None)]
input_names = ['boxes', 'scores']
if is_dynamic:
input_names.extend(['iou_threshold', 'score_threshold', 'max_boxes'])
input_features.append(('iou_threshold', datatypes.Array(1, )))
input_features.append(('score_threshold', datatypes.Array(1, )))
input_features.append(('max_boxes', datatypes.Array(1, )))
builder = neural_network.NeuralNetworkBuilder(input_features, output_features,
disable_rank5_shape_mapping=True)
input_dict = dict()
input_dict['boxes'] = boxes
input_dict['scores'] = scores
if is_dynamic:
builder.add_nms('nms', input_names,
['selected_boxes', 'selected_scores', 'selected_box_ids','number_of_boxes'],
per_class_suppression=per_class_suppression)
input_dict['iou_threshold'] = iou_threshold * np.ones([1], dtype=np.float)
input_dict['score_threshold'] = score_threshold * np.ones([1], dtype=np.float)
input_dict['max_boxes'] = M * np.ones([1], dtype=np.float)
else:
builder.add_nms('nms', input_names,
['selected_boxes', 'selected_scores', 'selected_box_ids','number_of_boxes'],
iou_threshold=iou_threshold, score_threshold=score_threshold,
max_boxes=M, per_class_suppression=per_class_suppression)
self._test_model(builder.spec, input_dict, expected, useCPUOnly=cpu_only)
def test_nms_gpu(self):
self.test_nms_cpu(cpu_only=False)
def test_rank_preserving_reshape(self):
input_shapes = [(20, 10), (20, 10, 5), (10, 3, 5)]
target_shapes = [(5, -1), (0, 2, 25), (25, 0, -1)]
output_shapes = [(5, 40), (20, 2, 25), (25, 3, 2)]
for i in range(len(input_shapes)):
input_features = [('data', datatypes.Array(*input_shapes[i]))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_rank_preserving_reshape(
name='rank_preserving_reshape', input_name='data',
output_name='output', output_shape=target_shapes[i])
x = np.random.rand(*input_shapes[i])
input = {'data': x}
expected = {'output': np.reshape(x, output_shapes[i])}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
self.assertEqual(len(output_shapes[i]), builder._get_rank('output'))
def test_expand_dims(self):
input_shapes = [(10, 5), (10, 5), (10, 5), (10, 5), (10,)]
axes = [(0, 1), (0, 2), (2, 0), (-2, -1), (1, 0, -2)]
output_shapes = [(1, 1, 10, 5), (1, 10, 1, 5), (1, 10, 1, 5), (10, 5, 1, 1), (1, 1, 1, 10)]
for i in range(len(input_shapes)):
input_features = [('data', datatypes.Array(*input_shapes[i]))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_expand_dims(
name='expand_dims', input_name='data', output_name='output',
axes=axes[i]
)
x = np.random.rand(*input_shapes[i])
input = {'data': x}
expected = {'output': np.reshape(x, output_shapes[i])}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
self.assertEqual(len(output_shapes[i]), builder._get_rank('output'))
def test_squeeze(self):
input_shapes = [(1, 1, 10, 5), (1, 10, 1, 5), (10, 5, 1, 1),
(10, 5, 1, 1), (1,), (10, 5, 1, 1), (3, 1, 7)]
axes = [(0, 1), (0, 2), (-2, -1), (-1, -2), (0,), (3, -2), (1,)]
output_shapes = [(10, 5), (10, 5), (10, 5), (10, 5), (1,), (10, 5), (3, 7)]
for i in range(len(input_shapes)):
input_features = [('data', datatypes.Array(*input_shapes[i]))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_squeeze(name='squeeze_layer', input_name='data',
output_name='output', axes=list(axes[i]))
x = np.random.rand(*input_shapes[i])
input = {'data': x}
expected = {'output': np.reshape(x, output_shapes[i])}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
self.assertEqual(len(output_shapes[i]), builder._get_rank('output'))
def test_squeeze_all(self):
input_shapes = [
(1, 1, 10, 5), (1, 10, 1, 5), (10, 5, 1, 1), (10, 5, 1, 1), (1,),
(10, 5, 1, 1), (3, 1, 7), (3,), (5, 6)
]
for input_shape in input_shapes:
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_squeeze(name='squeeze_layer', input_name='data',
output_name='output', squeeze_all=True)
x = np.random.rand(*input_shape)
input = {'data': x}
reference = np.squeeze(x)
if not reference.shape:
reference = np.reshape(reference, (1,))
expected = {'output': reference}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
self.assertEqual(-1, builder._get_rank('output'))
def test_argmax_argmin(self):
test_input_shapes = [(9,), (8, 6), (9, 8, 10), (5, 9, 7, 9), (12, 8, 6, 6, 7)]
# (1+2+3+4+5) * 2^3 = 120 test cases
for input_shape in test_input_shapes:
for negative_axis in [False, True]:
for mode in ['argmax', 'argmin']:
for keep_dims in [True, False]:
for axis in np.arange(len(input_shape)):
if negative_axis:
axis_val = axis - len(input_shape)
else:
axis_val = axis
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
x = np.random.rand(*input_shape)
if mode == 'argmax':
builder.add_argmax('argmax', 'data', 'output', axis=axis_val, keepdims=keep_dims)
np_out = np.argmax(x, axis=axis_val)
else:
builder.add_argmin('argmin', 'data', 'output', axis=axis_val, keepdims=keep_dims)
np_out = np.argmin(x, axis=axis_val)
if keep_dims:
np_out = np.expand_dims(np_out, axis=axis_val)
elif len(input_shape) == 1:
np_out = np.expand_dims(np_out, axis=axis_val)
input = {'data': x}
expected = {'output': np_out}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
self.assertEqual(len(np_out.shape), builder._get_rank('output'))
def test_get_shape(self):
dims = [1, 2, 3, 4, 5]
for rank in range(1, len(dims) + 1):
input_shape = dims[:rank]
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_get_shape(name='get_shape_layer', input_name='data',
output_name='output')
feed = {'data': np.random.rand(*input_shape)}
expected = {'output': np.array(input_shape)}
self._test_model(builder.spec, feed, expected, useCPUOnly=True)
self.assertEqual(1, builder._get_rank('output'))
def test_load_constant_nd(self):
dims = [2, 3, 4, 5, 6]
for rank in range(1, len(dims) + 1):
input_shape = dims[:rank]
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_load_constant_nd('load_const_nd_layer', 'tmp',
constant_value=np.ones(input_shape),
shape=input_shape)
builder.add_elementwise('add_layer', ['data', 'tmp'], 'output',
mode='ADD')
feed = {'data': np.random.rand(*input_shape)}
expected = {'output': feed['data'] + 1}
self._test_model(builder.spec, feed, expected, useCPUOnly=True)
self.assertEqual(rank, builder._get_rank('output'))
@unittest.skip('fix')
def test_simple_array_alloc_scatter(self):
alloc_shape = [2, 3, 4]
value_shape = [1, 3, 4]
input_features = [('alloc_shape', datatypes.Array(len(alloc_shape))),
('value', datatypes.Array(*value_shape)),
('index', datatypes.Array(1))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features, disable_rank5_shape_mapping=True)
builder.add_fill_dynamic(name='fill_dynamic_layer', input_name='alloc_shape',
output_name='array', value=np.float(0.0))
# CoreML input order: container (array), indices, slices (value)
builder.add_scatter(name='scatter_layer',
input_names=['array', 'index', 'value'],
output_name='output')
value = np.random.rand(*value_shape).astype('float')
feed = {'alloc_shape': np.array(alloc_shape, dtype='float'),
'value': value,
'index': np.array([1], dtype='float')}
ref = np.zeros(alloc_shape)
ref[1, :, :] = value
expected = {'output': ref}
self._test_model(builder.spec, feed, expected, useCPUOnly=True)
def test_erf_activation_cpu(self, cpu_only=True):
input_features = [('data', datatypes.Array(10, 45))]
output_features = [('output', datatypes.Array(10, 45))]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features, disable_rank5_shape_mapping=True)
builder.add_erf(name='erf', input_name='data',
output_name='output')
x = np.random.rand(10, 45)
input = {'data': x}
expected = {
'output': np.asarray([math.erf(i) for i in
x.flatten().tolist()]).reshape(10, 45)
}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_erf_activation_gpu(self):
self.test_erf_activation_cpu(cpu_only=False)
def test_gelu_activation(self):
for mode in ['EXACT', 'TANH_APPROXIMATION', 'SIGMOID_APPROXIMATION']:
for rank in range(1, 6):
shape = np.random.randint(low=2, high=5, size=rank)
input_features = [('data', datatypes.Array(*shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features, disable_rank5_shape_mapping=True)
builder.add_gelu(name='gelu', input_name='data',
output_name='output', mode=mode)
x = np.random.rand(*shape)
input = {'data': x}
exact = np.asarray([0.5 * i * (1.0 + math.erf(i / math.sqrt(2)))
for i in x.flatten().tolist()]).reshape(*shape)
expected = {'output': exact}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_lower_triangular_cpu(self, cpu_only=True):
for rank in range(2, 6):
for k in range(-3, 4):
shape = np.random.randint(low=2, high=6, size=rank)
input_features = [('data', datatypes.Array(*shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features, disable_rank5_shape_mapping=True)
builder.add_lower_triangular('tril', 'data', 'output', k=k)
x = np.random.rand(*shape)
input = {'data': x}
expected = {'output': np.tril(x, k=k)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_lower_triangular_gpu(self):
self.test_lower_triangular_cpu(cpu_only=False)
def test_upper_triangular_cpu(self, cpu_only=True):
for rank in range(2, 6):
for k in range(-3, 4):
shape = np.random.randint(low=2, high=6, size=rank)
input_features = [('data', datatypes.Array(*shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features, disable_rank5_shape_mapping=True)
builder.add_upper_triangular('triu', 'data', 'output', k=k)
x = np.random.rand(*shape)
input = {'data': x}
expected = {'output': np.triu(x, k=k)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_upper_triangular_gpu(self):
self.test_upper_triangular_cpu(cpu_only=False)
def test_where_broadcastable_cpu(self, cpu_only=True):
for _ in range(150):
rank_cond = np.random.randint(low=1, high=6)
rank_true = np.random.randint(low=1, high=6)
rank_false = np.random.randint(low=1, high=6)
rank_out = max(rank_cond, rank_true, rank_false)
shape_cond = np.random.randint(low=2, high=8, size=rank_cond)
shape_true = np.random.randint(low=2, high=8, size=rank_true)
shape_false = np.random.randint(low=2, high=8, size=rank_false)
for i in range(-1, -rank_out - 1, -1):
dims = []
if -i <= rank_cond: dims.append(shape_cond[i])
if -i <= rank_true: dims.append(shape_true[i])
if -i <= rank_false: dims.append(shape_false[i])
dim = np.random.choice(dims)
if -i <= rank_cond: shape_cond[i] = np.random.choice([1, dim])
if -i <= rank_true: shape_true[i] = np.random.choice([1, dim])
if -i <= rank_false: shape_false[i] = np.random.choice([1, dim])
input_features = [
('cond', datatypes.Array(*shape_cond)),
('true', datatypes.Array(*shape_true)),
('false', datatypes.Array(*shape_false))
]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features, disable_rank5_shape_mapping=True)
builder.add_where_broadcastable('if_broadcastable', input_names=['cond', 'true', 'false'],
output_name='output')
cond = np.random.choice([1.0, 0.0], size=shape_cond)
true = np.random.rand(*shape_true)
false = np.random.rand(*shape_false)
input = {'cond': cond, 'true': true, 'false': false}
expected = {'output': np.where(cond, true, false)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
self.assertEqual(len(expected['output'].shape), builder._get_rank('output'))
def test_where_broadcastable_gpu(self):
self.test_where_broadcastable_cpu(cpu_only=False)
def test_random_normal_like_cpu(self, cpu_only=True):
mean, stddev, seed = 0., 1., 42
for rank in range(5, -1, -1):
if rank > 0:
low_factor = np.random.randint(low=2, high=4)
low = int(np.power(1000, 1. / rank)) * low_factor
high = int(np.power(2000, 1. / rank)) * np.random.randint(low=low_factor, high=4)
shape = np.random.randint(low=low, high=high, size=rank)
else: # one extra test to test more moments
shape = np.array([10, 10, 10, 10, 10000])
input_features = [('tensor', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)], disable_rank5_shape_mapping=True)
builder.add_random_normal_like(name='random_normal_like',
input_name='tensor',
output_name='output',
mean=mean, stddev=stddev, seed=seed)
inputs = {'tensor': np.random.rand(*shape)}
expected = {'output': np.random.normal(mean, stddev, shape)}
if rank > 0:
CorrectnessTest._compare_moments(builder.spec, inputs, expected, num_moments=2)
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
else: # one extra test to test more moments
CorrectnessTest._compare_moments(builder.spec, inputs, expected, num_moments=6)
def test_random_normal_like_gpu(self):
self.test_random_normal_like_cpu(cpu_only=False)
def test_random_normal_static_cpu(self, cpu_only=True):
mean, stddev, seed = 0., 1., 42
for rank in range(1, 6):
low_factor = np.random.randint(low=2, high=4)
low = int(np.power(1000, 1. / rank)) * low_factor
high = int(np.power(2000, 1. / rank)) * np.random.randint(low=low_factor, high=4)
shape = np.random.randint(low=low, high=high, size=rank)
input_features = [('data', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)], disable_rank5_shape_mapping=True)
builder.add_random_normal_static(name='random_normal_static',
output_name='tmp',
output_shape=list(shape),
mean=mean, stddev=stddev, seed=seed)
builder.add_elementwise('add_layer', ['data', 'tmp'], 'output', mode='ADD')
data = np.zeros(shape)
inputs = {'data': data}
expected = {'output': data + np.random.normal(mean, stddev, shape)}
CorrectnessTest._compare_moments(builder.spec, inputs, expected, num_moments=2)
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
self.assertEqual(rank, builder._get_rank('output'))
def test_random_normal_static_gpu(self):
self.test_random_normal_static_cpu(cpu_only=False)
def test_random_normal_dynamic_cpu(self, cpu_only=True):
mean, stddev, seed = 0., 1., 42
for rank in range(1, 6):
low_factor = np.random.randint(low=2, high=4)
low = int(np.power(1000, 1. / rank)) * low_factor
high = int(np.power(2000, 1. / rank)) * np.random.randint(low=low_factor, high=4)
shape = np.random.randint(low=low, high=high, size=rank)
input_features = [('shape', datatypes.Array(len(shape)))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)], disable_rank5_shape_mapping=True)
builder.add_random_normal_dynamic(name='random_normal_dynamic',
input_names=['shape'],
output_name='output',
mean=mean, stddev=stddev, seed=seed)
inputs = {'shape': np.array(shape, np.float)}
expected = {'output': np.random.normal(mean, stddev, shape)}
CorrectnessTest._compare_moments(builder.spec, inputs, expected, num_moments=2)
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
self.assertEqual(-1, builder._get_rank('output'))
def test_random_normal_dynamic_gpu(self):
self.test_random_normal_dynamic_cpu(cpu_only=False)
def test_random_uniform_like_cpu(self, cpu_only=True):
minval, maxval, seed = 0., 1., 42
for rank in range(1, 6):
low_factor = np.random.randint(low=2, high=4)
low = int(np.power(1000, 1. / rank)) * low_factor
high = int(np.power(2000, 1. / rank)) * np.random.randint(low=low_factor, high=4)
shape = np.random.randint(low=low, high=high, size=rank)
input_features = [('tensor', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)], disable_rank5_shape_mapping=True)
builder.add_random_uniform_like(name='random_uniform_like',
input_name='tensor',
output_name='output',
minval=minval, maxval=maxval, seed=seed)
tensor = np.random.rand(*shape)
inputs = {'tensor': tensor}
expected = {'output': np.random.uniform(minval, maxval, shape)}
CorrectnessTest._compare_moments(builder.spec, inputs, expected)
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
self.assertEqual(rank, builder._get_rank('output'))
def test_random_uniform_like_gpu(self):
self.test_random_uniform_like_cpu(cpu_only=False)
def test_random_uniform_static_cpu(self, cpu_only=True):
minval, maxval, seed = 0., 1., 42
for rank in range(1, 6):
low_factor = np.random.randint(low=2, high=4)
low = int(np.power(1000, 1. / rank)) * low_factor
high = int(np.power(2000, 1. / rank)) * np.random.randint(low=low_factor, high=4)
shape = np.random.randint(low=low, high=high, size=rank)
input_features = [('data', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)], disable_rank5_shape_mapping=True)
builder.add_random_uniform_static(name='random_uniform_static',
output_name='tmp',
output_shape=list(shape),
minval=minval, maxval=maxval, seed=seed)
builder.add_elementwise('add_layer', ['data', 'tmp'], 'output', mode='ADD')
data = np.zeros(shape)
inputs = {'data': data}
expected = {'output': data + np.random.uniform(minval, maxval, shape)}
CorrectnessTest._compare_moments(builder.spec, inputs, expected)
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
self.assertEqual(rank, builder._get_rank('output'))
def test_random_uniform_static_gpu(self):
self.test_random_uniform_static_cpu(cpu_only=False)
def test_random_uniform_dynamic_cpu(self, cpu_only=True):
minval, maxval, seed = 0., 1., 42
for rank in range(1, 6):
low_factor = np.random.randint(low=2, high=4)
low = int(np.power(1000, 1. / rank)) * low_factor
high = int(np.power(2000, 1. / rank)) * np.random.randint(low=low_factor, high=4)
shape = np.random.randint(low=low, high=high, size=rank)
input_features = [('shape', datatypes.Array(len(shape)))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)], disable_rank5_shape_mapping=True)
builder.add_random_uniform_dynamic(name='random_uniform_dynamic',
input_names=['shape'],
output_name='output',
minval=minval, maxval=maxval, seed=seed)
inputs = {'shape': np.array(shape, np.float)}
expected = {'output': np.random.uniform(minval, maxval, shape)}
CorrectnessTest._compare_moments(builder.spec, inputs, expected)
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
self.assertEqual(-1, builder._get_rank('output'))
def test_random_uniform_dynamic_gpu(self):
self.test_random_uniform_dynamic_cpu(cpu_only=False)
def test_random_bernoulli_like_cpu(self, cpu_only=True):
prob, seed = 0.5, 42
for rank in range(1, 6):
low_factor = np.random.randint(low=2, high=4)
low = int(np.power(1000, 1. / rank)) * low_factor
high = int(np.power(2000, 1. / rank)) * np.random.randint(low=low_factor, high=4)
shape = np.random.randint(low=low, high=high, size=rank)
input_features = [('tensor', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)], disable_rank5_shape_mapping=True)
builder.add_random_bernoulli_like(name='random_bernoulli_like',
input_name='tensor',
output_name='output',
prob=prob, seed=seed)
tensor = np.random.rand(*shape)
inputs = {'tensor': tensor}
expected = {'output': np.random.binomial(1, prob, shape)}
CorrectnessTest._compare_moments(builder.spec, inputs, expected)
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
def test_random_bernoulli_like_gpu(self):
self.test_random_bernoulli_like_cpu(cpu_only=False)
def test_random_bernoulli_static_cpu(self, cpu_only=True):
prob, seed = 0.5, 42
for rank in range(1, 6):
low_factor = np.random.randint(low=2, high=4)
low = int(np.power(1000, 1. / rank)) * low_factor
high = int(np.power(2000, 1. / rank)) * np.random.randint(low=low_factor, high=4)
shape = np.random.randint(low=low, high=high, size=rank)
input_features = [('data', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)], disable_rank5_shape_mapping=True)
builder.add_random_bernoulli_static(name='random_bernoulli_static', output_name='tmp',
output_shape=list(shape), prob=prob, seed=seed)
builder.add_elementwise('add_layer', ['data', 'tmp'], 'output', mode='ADD')
data = np.zeros(shape)
inputs = {'data': data}
expected = {'output': data + np.random.binomial(1, prob, shape)}
CorrectnessTest._compare_moments(builder.spec, inputs, expected)
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
def test_random_bernoulli_static_gpu(self):
self.test_random_bernoulli_static_cpu(cpu_only=False)
def test_random_bernoulli_dynamic_cpu(self, cpu_only=True):
prob, seed = 0.5, 42
for rank in range(1, 6):
low_factor = np.random.randint(low=2, high=4)
low = int(np.power(1000, 1. / rank)) * low_factor
high = int(np.power(2000, 1. / rank)) * np.random.randint(low=low_factor, high=4)
shape = np.random.randint(low=low, high=high, size=rank)
input_features = [('shape', datatypes.Array(len(shape)))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)], disable_rank5_shape_mapping=True)
builder.add_random_bernoulli_dynamic(name='random_bernoulli_dynamic',
input_names=['shape'],
output_name='output',
prob=prob, seed=seed)
inputs = {'shape': np.array(shape, np.float)}
expected = {'output': np.random.binomial(1, prob, shape)}
CorrectnessTest._compare_moments(builder.spec, inputs, expected)
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
def test_random_bernoulli_dynamic_gpu(self):
self.test_random_bernoulli_dynamic_cpu(cpu_only=False)
def test_categorical_distribution_cpu_shapes(self):
for rank in range(1, 6):
shape = np.random.randint(low=2, high=8, size=rank)
num_samples = np.random.randint(low=10, high=1000)
input_features = [('data', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)], disable_rank5_shape_mapping=True)
builder.add_categorical_distribution(name='categorical_distribution',
input_name='data',
output_name='output',
num_samples=num_samples)
x = np.random.randint(low=0, high=20, size=shape).astype(np.float32)
inputs = {'data': x}
shape[-1] = num_samples
expected = {'output': np.random.rand(*shape)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=True, validate_shapes_only=True)
def test_categorical_distribution_cpu_logits(self):
def softmax(data):
e_data = np.exp(data - np.max(data))
return e_data / e_data.sum()
num_samples, num_class = 50000, 10
input_name, output_name = 'data', 'output'
shapes = [(2, num_class), (2, 1, num_class), (1, 2, num_class),
(2, 1, 1, num_class), (1, 2, 1, num_class), (1, 1, 2, num_class),
(2, 1, 1, 1, num_class), (1, 2, 1, 1, num_class),
(1, 1, 2, 1, num_class), (1, 1, 1, 2, num_class)]
for shape in shapes:
input_features = [('data', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)], disable_rank5_shape_mapping=True)
builder.add_categorical_distribution(name='categorical_distribution',
input_name=input_name,
output_name=output_name,
num_samples=num_samples,
is_logits=True,
seed=42)
x = np.random.rand(*shape)
inputs = {input_name: x}
model = builder.spec
if isinstance(model, str):
model = coremltools.models.MLModel(model)
model = coremltools.models.MLModel(model, useCPUOnly=True)
prediction = model.predict(inputs, useCPUOnly=True)
# validate each distribution separately
logits = x.reshape(2, num_class)
probs = [softmax(logits[0]), softmax(logits[1])]
ref0 = np.random.multinomial(num_samples, probs[0])
ref1 = np.random.multinomial(num_samples, probs[1])
pre0 = prediction[output_name].reshape(2, num_samples)[0]
pre1 = prediction[output_name].reshape(2, num_samples)[1]
expected = {output_name: np.stack((pre0, pre1))}
# convert to bincount and validate probabilities
pre0 = np.bincount(np.array(pre0).astype(np.int), minlength=num_class)
pre1 = np.bincount(np.array(pre1).astype(np.int), minlength=num_class)
assert np.allclose(np.true_divide(pre0, num_samples), probs[0], atol=1e-2)
assert np.allclose(np.true_divide(pre0, num_samples),
np.true_divide(ref0, num_samples), atol=1e-2)
assert np.allclose(np.true_divide(pre1, num_samples), probs[1], atol=1e-2)
assert np.allclose(np.true_divide(pre1, num_samples),
np.true_divide(ref1, num_samples), atol=1e-2)
self._test_model(model, inputs, expected, useCPUOnly=True,
output_name_shape_dict={'output': prediction['output'].shape})
def test_categorical_distribution_cpu_probs(self):
def softmax(data):
e_data = np.exp(data - np.max(data))
return e_data / e_data.sum()
num_samples, num_class = 50000, 10
input_name, output_name = 'data', 'output'
shapes = [(2, num_class), (2, 1, num_class), (1, 2, num_class),
(2, 1, 1, num_class), (1, 2, 1, num_class), (1, 1, 2, num_class),
(2, 1, 1, 1, num_class), (1, 2, 1, 1, num_class),
(1, 1, 2, 1, num_class), (1, 1, 1, 2, num_class)]
for shape in shapes:
input_features = [('data', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)], disable_rank5_shape_mapping=True)
builder.add_categorical_distribution(name='categorical_distribution',
input_name=input_name,
output_name=output_name,
num_samples=num_samples,
is_logits=False,
seed=42)
x = np.random.rand(*shape)
probs = x.reshape(2, num_class)
probs[0], probs[1] = softmax(probs[0]), softmax(probs[1])
inputs = {input_name: np.reshape(probs, shape)}
model = builder.spec
if isinstance(model, str):
model = coremltools.models.MLModel(model)
model = coremltools.models.MLModel(model, useCPUOnly=True)
prediction = model.predict(inputs, useCPUOnly=True)
# validate each distribution separately
probs = probs.reshape(2, num_class)
ref0 = np.random.multinomial(num_samples, probs[0])
ref1 = np.random.multinomial(num_samples, probs[1])
pre0 = prediction[output_name].reshape(2, num_samples)[0]
pre1 = prediction[output_name].reshape(2, num_samples)[1]
expected = {output_name: np.stack((pre0, pre1))}
# convert to bincount and validate probabilities
pre0 = np.bincount(np.array(pre0).astype(np.int), minlength=num_class)
pre1 = np.bincount(np.array(pre1).astype(np.int), minlength=num_class)
assert np.allclose(np.true_divide(pre0, num_samples), probs[0], atol=1e-2)
assert np.allclose(np.true_divide(pre0, num_samples),
np.true_divide(ref0, num_samples), atol=1e-2)
assert np.allclose(np.true_divide(pre1, num_samples), probs[1], atol=1e-2)
assert np.allclose(np.true_divide(pre1, num_samples),
np.true_divide(ref1, num_samples), atol=1e-2)
self._test_model(model, inputs, expected, useCPUOnly=True,
output_name_shape_dict={'output': prediction['output'].shape})
def test_reverse_cpu(self, cpu_only=True):
for rank in range(1, 6):
for _ in range(20):
input_shape = np.random.randint(low=2, high=8, size=rank)
reverse_dim = [np.random.choice([True, False]) for _ in range(rank)]
axes = [i for i in range(rank) if reverse_dim[i] == True]
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_reverse('reverse', 'data', 'output', reverse_dim)
x = np.random.rand(*input_shape)
input = {'data': x}
expected = {'output': np.flip(x, axis=axes)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_reverse_gpu(self):
self.test_reverse_cpu(cpu_only=False)
def test_matrix_band_part_cpu(self, cpu_only=True):
for rank in range(2, 6):
for _ in range(20):
num_lower = np.random.randint(low=-7, high=8)
num_upper = np.random.randint(low=-7, high=8)
shape = np.random.randint(low=2, high=6, size=rank)
input_features = [('data', datatypes.Array(*shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features, disable_rank5_shape_mapping=True)
builder.add_matrix_band_part('matrix_band_part', 'data', 'output',
num_lower=num_lower, num_upper=num_upper)
x = np.random.rand(*shape)
input = {'data': x}
rows, cols = shape[-2:]
band = np.ones((rows, cols))
for m in range(rows):
for n in range(cols):
band[m, n] = (num_lower < 0 or (m - n) <= num_lower) and (num_upper < 0 or (n - m) <= num_upper)
expected = {'output': np.multiply(band, x)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_matrix_band_part_gpu(self):
self.test_matrix_band_part_cpu(cpu_only=False)
def test_flatten_to_2d_cpu(self, cpu_only=True):
for rank in range(1, 6):
for axis in range(-rank, rank + 1):
shape = np.random.randint(low=2, high=6, size=rank)
input_features = [('data', datatypes.Array(*shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features, disable_rank5_shape_mapping=True)
builder.add_flatten_to_2d('flatten_to_2d', 'data', 'output', axis=axis)
x = np.random.rand(*shape)
np_axis = axis + rank if axis < 0 else axis
pl, pr = 1, 1
for i in range(0, np_axis):
pl *= shape[i]
for i in range(np_axis, len(shape)):
pr *= shape[i]
new_shape = [pl, pr]
ref = x.reshape(new_shape)
input = {'data': x}
expected = {'output': ref}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
self.assertEqual(2, builder._get_rank('output'))
def test_flatten_to_2d_gpu(self):
self.test_flatten_to_2d_cpu(cpu_only=False)
def test_reshape_like_cpu(self, cpu_only=True):
for rank in range(1, 6):
for _ in range(20):
input_shape = np.random.randint(low=2, high=8, size=rank)
n = int(np.prod(input_shape))
divisors = [d for d in range(1, n) if n % d == 0]
target_rank = np.random.randint(low=2, high=6)
target_shape = [1]
for i in range(target_rank - 1):
dim_size = np.random.choice(divisors)
while n % (np.prod(target_shape) * dim_size) != 0:
dim_size = np.random.choice(divisors)
target_shape.append(dim_size)
target_shape[0] = n // np.prod(target_shape)
np.random.shuffle(target_shape)
input_features = [('data', datatypes.Array(*input_shape)),
('tensor', datatypes.Array(*target_shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_reshape_like(name='reshape_like',
input_names=['data', 'tensor'],
output_name='output')
data = np.random.rand(*input_shape)
tensor = np.random.rand(*target_shape)
inputs = {'data': data, 'tensor': tensor}
expected = {'output': np.reshape(data, target_shape)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
self.assertEqual(target_rank, builder._get_rank('output'))
def test_reshape_like_gpu(self):
self.test_reshape_like_cpu(cpu_only=False)
def test_reshape_static_cpu(self, cpu_only=True):
for rank in range(1, 6):
for _ in range(20):
input_shape = np.random.randint(low=2, high=8, size=rank)
n = int(np.prod(input_shape))
divisors = [d for d in range(1, n) if n % d == 0]
target_rank = np.random.randint(low=2, high=6)
target_shape = [1]
for i in range(target_rank - 1):
dim_size = np.random.choice(divisors)
while n % (np.prod(target_shape) * dim_size) != 0:
dim_size = np.random.choice(divisors)
target_shape.append(dim_size)
target_shape[0] = -1
np.random.shuffle(target_shape)
input_features = [('data', datatypes.Array(*input_shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_reshape_static(name='reshape_static',
input_name='data',
output_name='output',
output_shape=target_shape)
data = np.random.rand(*input_shape)
inputs = {'data': data}
expected = {'output': np.reshape(data, target_shape)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
self.assertEqual(len(target_shape), builder._get_rank('output'))
def test_reshape_static_gpu(self):
self.test_reshape_static_cpu(cpu_only=False)
def test_reshape_dynamic_cpu(self, cpu_only=True):
for rank in range(1, 6):
for _ in range(20):
input_shape = np.random.randint(low=2, high=8, size=rank)
n = int(np.prod(input_shape))
divisors = [d for d in range(1, n) if n % d == 0]
target_rank = np.random.randint(low=2, high=6)
target_shape = [1]
for i in range(target_rank - 1):
dim_size = np.random.choice(divisors)
while n % (np.prod(target_shape) * dim_size) != 0:
dim_size = np.random.choice(divisors)
target_shape.append(dim_size)
target_shape[0] = -1
np.random.shuffle(target_shape)
input_features = [('data', datatypes.Array(*input_shape)),
('shape', datatypes.Array(len(target_shape)))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_reshape_dynamic(name='reshape_dynamic',
input_names=['data', 'shape'],
output_name='output')
data = np.random.rand(*input_shape)
inputs = {'data': data, 'shape': np.array(target_shape, dtype='float')}
expected = {'output': np.reshape(data, target_shape)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
self.assertEqual(-1, builder._get_rank('output'))
def test_reshape_dynamic_gpu(self):
self.test_reshape_dynamic_cpu(cpu_only=False)
def test_reduce_sum_cpu(self, cpu_only=True):
for rank in range(1, 6):
axes_list = [axes for length in range(1, rank + 1) for axes in itertools.combinations(range(rank), length)]
axes_list.append(None)
for axes in axes_list:
if axes:
axes = tuple([axis if np.random.choice([True, False]) else axis - rank for axis in axes])
reduce_all = False
else:
reduce_all = True
for keep_dims in [True, False]:
input_shape = np.random.randint(low=2, high=5, size=rank)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_reduce_sum('reduce', 'data', 'output', axes, keepdims=keep_dims, reduce_all=reduce_all)
x = np.random.rand(*input_shape)
input = {'data': x}
expected = {'output': np.add.reduce(x, axes, keepdims=keep_dims)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
expected_rank = len(expected['output'].shape)
if expected_rank == 0:
expected_rank = 1
self.assertEqual(expected_rank, builder._get_rank('output'))
def test_reduce_sum_gpu(self):
self.test_reduce_sum_cpu(cpu_only=False)
def test_reduce_prod_cpu(self, cpu_only=True):
for rank in range(1, 6):
axes_list = [axes for length in range(1, rank + 1) for axes in itertools.combinations(range(rank), length)]
axes_list.append(None)
for axes in axes_list:
if axes:
axes = tuple([axis if np.random.choice([True, False]) else axis - rank for axis in axes])
reduce_all = False
else:
reduce_all = True
for keep_dims in [True, False]:
input_shape = np.random.randint(low=2, high=5, size=rank)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_reduce_prod('reduce', 'data', 'output', axes, keepdims=keep_dims,
reduce_all=reduce_all)
x = np.random.rand(*input_shape)
input = {'data': x}
expected = {'output': np.multiply.reduce(x, axes, keepdims=keep_dims)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
expected_rank = len(expected['output'].shape)
if expected_rank == 0:
expected_rank = 1
self.assertEqual(expected_rank, builder._get_rank('output'))
def test_reduce_prod_gpu(self):
self.test_reduce_prod_cpu(cpu_only=False)
def test_reduce_mean_cpu(self, cpu_only=True):
for rank in range(1, 6):
axes_list = [axes for length in range(1, rank + 1) for axes in itertools.combinations(range(rank), length)]
axes_list.append(None)
for axes in axes_list:
if axes:
axes = tuple([axis if np.random.choice([True, False]) else axis - rank for axis in axes])
reduce_all = False
else:
reduce_all = True
for keep_dims in [True, False]:
input_shape = np.random.randint(low=2, high=5, size=rank)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_reduce_mean('reduce', 'data', 'output', axes, keepdims=keep_dims,
reduce_all=reduce_all)
x = np.random.rand(*input_shape)
input = {'data': x}
expected = {'output': np.mean(x, axes, keepdims=keep_dims)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_reduce_mean_gpu(self):
self.test_reduce_mean_cpu(cpu_only=False)
def test_reduce_max_cpu(self, cpu_only=True):
for rank in range(1, 6):
axes_list = [axes for length in range(1, rank + 1) for axes in itertools.combinations(range(rank), length)]
axes_list.append(None)
for axes in axes_list:
if axes:
axes = tuple([axis if | np.random.choice([True, False]) | numpy.random.choice |
import numpy as np
def bind_function(input_data, smallest, largest):
return (input_data - smallest) / (largest - smallest)
def sigmoid(t):
return 1/(1+np.exp(-t))
def sigmoid_derivative(p):
return p * (1 - p)
class NeuralNetwork:
def __init__(self, x, y, learning_rate):
self.input = x
self.bind_values(x)
self.weights1 = [[0.2, 0.3, 0.2], [0.1, 0.1, 0.1]]
self.weights2 = [[0.5, 0.1]]
self.target = y
self.bind_values(y)
self.learning_rate = learning_rate
@staticmethod
def bind_values(values):
min_value = values[np.unravel_index(
np.argmin(values, axis=None), values.shape)]
max_value = values[np.unravel_index(
np.argmax(values, axis=None), values.shape)]
new_values = np.array([bind_function(i, min_value, max_value)
for i in np.nditer(values)], dtype=np.float64).reshape(values.shape)
np.put(values, range(values.size), new_values)
def feed_forward(self):
new_inputs = []
for epoch in self.input:
epoch = np.array(epoch, ndmin=2).T
layer1 = sigmoid( | np.dot(self.weights1, epoch) | numpy.dot |
import numpy as np
import pandas
from ggplot import *
"""
In this question, you need to:
1) implement the compute_cost() and gradient_descent() procedures
2) Select features (in the predictions procedure) and make predictions.
"""
def normalize_features(df):
"""
Normalize the features in the data set.
"""
mu = df.mean()
sigma = df.std()
if (sigma == 0).any():
raise Exception("One or more features had the same value for all samples, and thus could " + \
"not be normalized. Please do not include features with only a single value " + \
"in your model.")
df_normalized = (df - df.mean()) / df.std()
return df_normalized, mu, sigma
def compute_cost(features, values, theta):
"""
Compute the cost function given a set of features / values,
and the values for our thetas.
This can be the same code as the compute_cost function in the lesson #3 exercises,
but feel free to implement your own.
"""
m = len(values)
sum_of_square_errors = np.square( | np.dot(features, theta) | numpy.dot |
import numpy as np
import matplotlib.pyplot as plt
class PayOff:
def __init__(self, TheOptionsType_, Strike_):
'''
Inputs:
=========
TheOptionsType_: string (European call, European put, Binary call, Binary put)
Strike_: float strike price
'''
self.__TheOptionsType = TheOptionsType_
self.__Strike = Strike_
def __call__(self,spot):
# Overloading the ( ) operator
'''
inputs:
=========
spot: numpy array of spot prices
returns:
=========
payoff value for each option
'''
if self.__TheOptionsType == 'European call':
return np.maximum(spot - self.__Strike,0)
elif self.__TheOptionsType == 'European put':
return | np.maximum(self.__Strike - spot,0) | numpy.maximum |
from __future__ import division
import matplotlib
matplotlib.use('TkAgg')
import multiprocessing as mp
import itertools
import numpy as np
from scipy import interpolate
from pylab import flipud
import pandas as pd
try:
from pandas import Categorical
except ImportError:
from pandas.core.categorical import Categorical
import re
from collections import defaultdict
from multiflexxlib import plotting
from multiflexxlib import ub
from multiflexxlib.ub import UBMatrix, etok, ktoe, angle_to_q
import pyclipper
import matplotlib.pyplot as plt
import matplotlib.patches as mpl_patches
import matplotlib.path as mpl_path
from matplotlib.collections import PatchCollection
from matplotlib.colors import LogNorm
from matplotlib.widgets import Button
from mpl_toolkits.axisartist import Subplot
from mpl_toolkits.axisartist.grid_helper_curvelinear import GridHelperCurveLinear
import pickle
import sys
import os
import pkg_resources
from multiflexxlib._version import __version__
try:
import tkinter
from tkinter import filedialog
except ImportError:
import Tkinter as tkinter
import tkFileDialog as filedialog
import logging
logger = logging.getLogger()
logger.setLevel('INFO')
logger.addHandler(logging.StreamHandler(sys.stdout))
BIN_ADAPTIVE = 'adaptive'
BIN_REGULAR = 'regular'
NUM_CHANNELS = 31
EF_LIST = [2.5, 3.0, 3.5, 4.0, 4.5]
CHANNEL_SEPARATION = 2.5
NORM_FACTOR = [1.0, 1.16, 1.23, 1.30, 1.27]
# Apeture angle correction
try:
DETECTOR_WORKING = np.loadtxt(pkg_resources.resource_filename(__name__, 'res/alive.csv'))
except IOError:
print('Dead detector map not found - assuming all working.')
DETECTOR_WORKING = np.ones([NUM_CHANNELS, len(EF_LIST)])
try:
WEIGHTS = np.loadtxt(pkg_resources.resource_filename(__name__, 'res/weights.csv'), delimiter=',')
except IOError:
print('Boundary angle channel strategy not defined - assuming equal weights.')
WEIGHTS = np.ones([NUM_CHANNELS, len(EF_LIST)])
try:
INTENSITY_COEFFICIENT = np.loadtxt(pkg_resources.resource_filename(__name__, 'res/int_corr.csv'), delimiter=',')
except IOError:
print('Intensity correction matrix not found - assuming all ones.')
INTENSITY_COEFFICIENT = np.ones([NUM_CHANNELS, len(EF_LIST)])
# TODO: do something with this abomination
INTENSITY_COEFFICIENT = INTENSITY_COEFFICIENT / NORM_FACTOR
def _nan_float(string):
try:
return float(string)
except ValueError:
if '*' in string:
return np.NaN
else:
raise
def _nan_int(string):
try:
return int(string)
except ValueError:
if '*' in string:
return np.NaN
else:
raise
def _extract_ki_from_header(en, fx, kfix):
e_fix = ktoe(kfix)
if fx == 2:
ei = e_fix + en
return etok(ei)
elif fx == 1:
ei = e_fix - en
return etok(ei)
else:
raise ValueError('Invalid FX value: 2 for fix kf, 1 for fix ki, got %d' % fx)
def _number_to_scan(num):
if isinstance(num, int):
return '{:06d}'.format(num)
else:
return num
def _parse_flatcone_line(line):
data = np.array([_nan_int(x) for x in line.split()])
array = np.reshape(data, (-1, len(EF_LIST)))[0: -1, :] # throws out last line which is only artifact
ang_channels = np.asarray([np.arange(1, NUM_CHANNELS + 1)]).T # starts at 1 to match stickers
array_with_ch_no = np.hstack([ang_channels, array])
dataframe_flatcone = pd.DataFrame(data=array_with_ch_no, columns=['aCh', 'e1', 'e2', 'e3', 'e4', 'e5'])
dataframe_flatcone.set_index('aCh', inplace=True)
return dataframe_flatcone
def _parse_param_line(line):
line_name = line[0:5]
line_body = line[6:].strip()
if line_name == 'COMND':
no_points = int(re.findall('(?<=NP)[\s\t0-9]*', line_body)[0].strip())
return line_name, {'value': line_body, 'NP': no_points}
elif '=' not in line_body:
return line_name, line_body
else:
equations = line_body.split(',')
line_dict = {}
for eq in equations:
param_name, value_raw = [x.strip() for x in eq.split('=')]
try:
value = _nan_float(value_raw)
except ValueError:
value = value_raw
line_dict[param_name] = value
return line_name, line_dict
def parse_ill_data(file_object, start_flag='DATA_:\n'):
"""
Parses ILL TASMAD scan files.
:param file_object: Handle to opened file or stream. Or alternately path to scan file.
:param start_flag: Start flag of data section. Omit for default.
:return: (header_dict, dataframe)
"""
# first parse headers
try:
file_object.seek(0, 0)
except AttributeError:
file_object = open(file_object, 'r')
text_data = file_object.read()
headers = re.findall('^[A-Z_]{5}:.*', text_data, re.MULTILINE)
header_dict = defaultdict(dict)
for line in headers:
line_name, line_body = _parse_param_line(line)
if type(line_body) is dict:
header_dict[line_name].update(line_body)
else:
header_dict[line_name].update({'value': line_body})
# then parse scan parameters and counts
data_section = text_data[text_data.find(start_flag) + len(start_flag) + 1:]
column_names = data_section.splitlines()[0].split()
# line only w 0-9, . -, spc, tab
parameters_text_lines = re.findall('^[0-9*\-\s\t.]+?$', data_section, re.MULTILINE)
parameters_value_array = np.asarray([[_nan_float(num) for num in line.split()] for line in parameters_text_lines])
data_frame = pd.DataFrame(data=parameters_value_array, columns=column_names)
data_frame['PNT'] = data_frame['PNT'].astype('int16')
df_clean = data_frame.T.drop_duplicates().T
# parse flatcone data if present
flat_all = re.findall('(?<=flat: )[0-9w\s\t\n*]+(?=endflat)', text_data, re.MULTILINE)
flat_number_lines = len(flat_all)
if len(df_clean) == 0:
raise ValueError('file %s does contain any data.' % file_object.name)
if len(df_clean) - flat_number_lines <= 1: # sanity check: only 1 missing flatcone line is acceptable
flat_frames = []
for nth, line in enumerate(flat_all):
try:
flat_frames.append(_parse_flatcone_line(line))
except ValueError:
raise ValueError('point %d in file %s is faulty.' % (nth + 1, file_object.name))
if len(df_clean) - flat_number_lines == 1:
df_clean.drop(df_clean.index[-1], inplace=True) # if only one line is missing then just drop last line
df_clean = df_clean.assign(flat=flat_frames)
else:
pass
return dict(header_dict), df_clean
def ub_from_header(scan_header):
# type: ((dict, Scan)) -> UBMatrix
"""
Make a UBMatrix object from TASMAD scan header.
:param scan_header:
:return: UBMatrix object
"""
if isinstance(scan_header, Scan):
scan_header = scan_header.header
param = scan_header['PARAM']
lattice_parameters = [param['AS'], param['BS'], param['CS'], param['AA'], param['BB'], param['CC']]
hkl1 = [float(param['AX']), float(param['AY']), float(param['AZ'])]
hkl2 = [float(param['BX']), float(param['BY']), float(param['BZ'])]
ub_matrix = UBMatrix(lattice_parameters, hkl1, hkl2)
return ub_matrix
class Scan(object):
"""
Reads a TASMAD scan file, extracts metadata and do essential conversions. Assumes const-Ei scan!
Usually not instantiated on its own. Use read_mf_scan() or read_mf_scans() instead.
"""
def __init__(self, file_name, ub_matrix=None, intensity_matrix=None, a3_offset=0.0, a4_offset=0.0):
"""
Scan object.
:param file_name: File name of TASMAD scan file.
:param ub_matrix: UBMatrix object to be used. Omit to generate from file header.
:param intensity_matrix: Intensity correction matrix to be used. Omit to use default.
:return: Scan object.
Examples:
>>> import multiflexxlib as mfl
>>> s1 = mfl.Scan('068577') # opens scan file 068577
>>> s2 = mfl.Scan(68577) # also possible to provide filename in number form. Will be padded to full length.
>>> u = mfl.UBMatrix([4.05, 4.05, 4.05, 90, 90, 90], [1, 0, 0], [0, 0, 1])
>>> s3 = mfl.Scan(68577, ub_matrix=u, a3_offset=1.2) # Applies a custom UBMatrix and add 1.2 degrees to all A3
angles.
>>> s3.a3_offset = 1.95 # a3_offset and a4_offset can be set after creation.
"""
file_name = _number_to_scan(file_name)
f = open(file_name)
self.header, self.data = parse_ill_data(f)
self.file_name = os.path.abspath(file_name)
self._a3_offset = a3_offset
self._a4_offset = a4_offset
self._apply_offsets(a3_offset, a4_offset)
if 'flat' not in self.data.columns:
raise AttributeError('%s does not contain MultiFLEXX data.' % file_name)
elif 'A3' not in self.header['STEPS'].keys():
raise AttributeError('%s is not A3 scan.' % file_name)
elif 'EI' in self.header['STEPS'].keys():
raise AttributeError('%s is not a const-E scan.' % file_name)
if intensity_matrix:
self.intensity_matrix = intensity_matrix
else:
self.intensity_matrix = INTENSITY_COEFFICIENT
if not ub_matrix:
self.ub_matrix = ub_from_header(self.header)
else:
self.ub_matrix = ub_matrix
self.converted_dataframes = []
self._update_data_array()
print('finished loading %s, a3_offset = %.2f, a4_offset = %.2f' %
(file_name, self.a3_offset, self.a4_offset))
@property
def ki(self):
try:
ki = self.data.iloc[0]['KI']
except KeyError:
try:
ki = etok(self.data.iloc[0]['EI'])
except KeyError:
ki = _extract_ki_from_header(self.header['POSQE']['EN'], self.header['PARAM']['FX'],
self.header['PARAM']['KFIX'])
return ki
@property
def tt(self):
try:
tt = self.data.iloc[-1]['TT'] # takes final value as signature value for the scan
except KeyError:
tt = None
return tt
@property
def mag(self):
try:
mag = self.data.iloc[-1]['MAG']
except KeyError:
mag = None
return mag
@property
def ei(self):
"""
Initial Energy (Ei) of scan.
:return: Ei in meV
"""
return ktoe(self.ki)
@property
def np_planned(self):
"""
Total planned points in scan based on command.
:return: Integer steps.
"""
return self.header['COMND']['NP']
@property
def np_actual(self):
"""
Actual finished points. Different from planned if scan is unfinished.
:return: Integer steps.
"""
return len(self.data)
@property
def scan_number(self):
"""
Scan number.
:return: String of scan file name, which should be numeric for TASMAD files.
"""
return os.path.split(self.file_name)[1]
@property
def a3_offset(self):
return self._a3_offset
@property
def a4_offset(self):
return self._a4_offset
@a3_offset.setter
def a3_offset(self, value):
a3_offset_old = self.a3_offset
a3_offset_new = value
a3_add = a3_offset_new - a3_offset_old
self._apply_offsets(a3_add, 0.0)
self._update_data_array()
self._a3_offset = a3_offset_new
@a4_offset.setter
def a4_offset(self, value):
a4_offset_old = self.a3_offset
a4_offset_new = value
a4_add = a4_offset_new - a4_offset_old
self._apply_offsets(0.0, a4_add)
self._update_data_array()
self._a4_offset = a4_offset_new
@property
def planned_locus_list(self):
kf_list = [etok(e) for e in EF_LIST]
a3_start, a3_end_actual, a3_end_planned = self.a3_ranges
a4_start, a4_end_actual, a4_end_planned = self.a4_ranges
return [calculate_locus(self.ki, kf, a3_start, a3_end_planned, a4_start, a4_end_planned,
self.ub_matrix, expand_a3=True) for kf in kf_list]
@property
def actual_locus_list(self):
kf_list = [etok(e) for e in EF_LIST]
a3_start, a3_end_actual, a3_end_planned = self.a3_ranges
a4_start, a4_end_actual, a4_end_planned = self.a4_ranges
return [calculate_locus(self.ki, kf, a3_start, a3_end_actual, a4_start, a4_end_actual,
self.ub_matrix) for kf in kf_list]
def _apply_offsets(self, a3_offset, a4_offset):
self.data.A3 = self.data.A3 + a3_offset
self.data.A4 = self.data.A4 + a4_offset
def _update_data_array(self):
num_ch = NUM_CHANNELS
channel_separation = CHANNEL_SEPARATION
num_flat_frames = len(self.data)
# an numpy array caching a3, a4 angles and monitor counts, shared across all energy channels
a3_a4_mon_array = np.zeros([num_flat_frames * num_ch, 3])
a4_angle_mask = np.linspace(-channel_separation * (num_ch - 1) / 2,
channel_separation * (num_ch - 1) / 2, num_ch)
for i in range(num_flat_frames):
a3_a4_mon_array[i * num_ch: (i + 1) * num_ch, 0] = self.data.loc[i, 'A3']
a3_a4_mon_array[i * num_ch: (i + 1) * num_ch, 1] = self.data.loc[i, 'A4'] + a4_angle_mask
a3_a4_mon_array[i * num_ch: (i + 1) * num_ch, 2] = self.data.loc[i, 'M1']
data_template = pd.DataFrame(index=range(num_flat_frames * num_ch),
columns=['A3', 'A4', 'MON', 'px', 'py', 'pz', 'h', 'k', 'l',
'counts', 'valid', 'coeff', 'ach', 'point'], dtype='float64')
data_template.loc[:, ['A3', 'A4', 'MON']] = a3_a4_mon_array
self.converted_dataframes = [data_template.copy() for _ in range(len(EF_LIST))]
for ef_channel_num, ef in enumerate(EF_LIST):
qs = self.ub_matrix.angle_to_q(self.ki, etok(ef), a3_a4_mon_array[:, 0], a3_a4_mon_array[:, 1])
self.converted_dataframes[ef_channel_num].loc[:, ['px', 'py', 'pz']] = self.ub_matrix.convert(qs, 'sp').T
self.converted_dataframes[ef_channel_num].loc[:, ['h', 'k', 'l']] = self.ub_matrix.convert(qs, 'sr').T
coefficient = INTENSITY_COEFFICIENT
detector_working = DETECTOR_WORKING
for ef_channel_num in range(len(EF_LIST)):
dataframe = self.converted_dataframes[ef_channel_num]
counts = np.zeros(num_ch * num_flat_frames, dtype='float64')
valid = np.zeros(num_ch * num_flat_frames, dtype='float64')
coeff = | np.zeros(num_ch * num_flat_frames, dtype='float64') | numpy.zeros |
import os
import re
import numpy as np
import scipy.io as sio
from scipy.fftpack import fft
import pandas as pd
from .movie import Movie, FullFieldFlashMovie
pd.set_option('display.width', 1000)
pd.set_option('display.max_columns', 100)
#################################################
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
##################################################
def compute_FFT_OneCycle(FR, TF, downsample):
one_cyc = np.int(((1000. / downsample) / TF))
FR_cyc = list(chunks(FR, one_cyc))
if (TF == 15. or TF == 8.):
FR_cyc = FR_cyc[:-1]
FR_cyc_avg = np.mean(FR_cyc, axis=0)
y = FR_cyc_avg
AMP = 2 * np.abs(fft(y) / len(y))
F0 = 0.5 * AMP[0]
assert (F0 - np.mean(y) < 1.e-4)
F1 = AMP[1]
return F0, F1
##################################################
def create_ff_mov(frame_rate, tst, tend, xrng, yrng):
ff_mov_on = FullFieldFlashMovie(np.arange(xrng), np.arange(yrng), tst, tend, frame_rate=frame_rate,
max_intensity=1).full(t_max=tend) # +0.5)
ff_mov_off = FullFieldFlashMovie(np.arange(xrng), np.arange(yrng), tst, tend, frame_rate=frame_rate,
max_intensity=-1).full(t_max=tend) # +0.5)
return ff_mov_on, ff_mov_off
##################################################
def create_grating_movie_list(gr_dir_name):
gr_fnames = os.listdir(gr_dir_name)
gr_fnames_ord = sorted(gr_fnames, key=lambda x: (int(re.sub('\D', '', x)), x))
gr_mov_list = []
for fname in gr_fnames_ord[:5]:
movie_file = os.path.join(gr_dir_name, fname)
m_file = sio.loadmat(movie_file)
m_data_raw = m_file['mov'].T
swid = np.shape(m_data_raw)[1]
res = int(np.sqrt(swid / (8 * 16)))
m_data = np.reshape(m_data_raw, (3000, 8 * res, 16 * res))
m1 = Movie(m_data[:500, :, :], row_range=np.linspace(0, 120, m_data.shape[1], endpoint=True), col_range=np.linspace(0, 120, m_data.shape[2], endpoint=True), frame_rate=1000.)
gr_mov_list.append(m1)
return gr_mov_list
"""
##################################################
metrics_dir = os.path.join(os.path.dirname(__file__), 'cell_metrics')
def get_data_metrics_for_each_subclass(ctype):
# Load csv file into dataframe
if ctype.find('_sus') >= 0:
prs_fn = os.path.join(metrics_dir, '{}_cells_v3.csv'.format(ctype))
else:
prs_fn = os.path.join(metrics_dir, '{}_cell_data.csv'.format(ctype))
prs_df = pd.read_csv(prs_fn)
N_class, nmet = np.shape(prs_df)
# Group data by subclasses based on max F0 vals
exp_df = prs_df.iloc[:, [13, 14, 17, 18, 28, 45, 46, 47, 48, 49, 50, 51, 52, 53,
54]].copy() # Bl_lat,Wh_lat,Bl_si, wh_si, spont, 5 F0s, 5 F1s
sub_df = exp_df.iloc[:, [5, 6, 7, 8, 9]]
exp_df['max_tf'] = sub_df.idxmax(axis=1).values # sub_df.idxmax(axis=1)
exp_means = exp_df.groupby(['max_tf']).mean()
exp_std = exp_df.groupby(['max_tf']).std()
exp_nsub = exp_df.groupby(['max_tf']).size()
max_ind_arr = np.where(exp_nsub == np.max(exp_nsub))
max_nsub_ind = max_ind_arr[0][0]
# Get means and std dev for subclasses
exp_prs_dict = {}
for scn in np.arange(len(exp_nsub)):
f0_exp = exp_means.iloc[scn, 5:10].values
f1_exp = exp_means.iloc[scn, 10:].values
spont_exp = exp_means.iloc[scn, 4:5].values
if ctype.find('OFF') >= 0:
si_exp = exp_means.iloc[scn, 2:3].values
ttp_exp = exp_means.iloc[scn, 0:1].values
elif ctype.find('ON') >= 0:
si_exp = exp_means.iloc[scn, 3:4].values
ttp_exp = exp_means.iloc[scn, 1:2].values
else:
si_exp = np.NaN * np.ones((1, 5))
ttp_exp = np.NaN * np.ones((1, 2))
nsub = exp_nsub.iloc[scn]
if nsub == 1:
f0_std = np.mean(exp_std.iloc[max_nsub_ind, 5:10].values) * np.ones((1, 5))
f1_std = np.mean(exp_std.iloc[max_nsub_ind, 10:].values) * np.ones((1, 5))
spont_std = np.mean(exp_std.iloc[max_nsub_ind, 4:5].values) * np.ones((1, 5))
if ctype.find('OFF') >= 0:
si_std = np.mean(exp_std.iloc[max_nsub_ind, 2:3].values) * np.ones((1, 5))
elif ctype.find('ON') >= 0:
si_std = np.mean(exp_std.iloc[max_nsub_ind, 3:4].values) * np.ones((1, 5))
else:
si_std = np.NaN * np.ones((1, 5))
else:
f0_std = exp_std.iloc[scn, 5:10].values
f1_std = exp_std.iloc[scn, 10:].values
spont_std = exp_std.iloc[scn, 4:5].values
if ctype.find('OFF') >= 0:
si_std = exp_std.iloc[scn, 2:3].values
elif ctype.find('ON') >= 0:
si_std = exp_std.iloc[scn, 3:4].values
else:
si_std = np.NaN * np.ones((1, 5))
if ctype.find('t') >= 0:
tcross = 40.
si_inf_exp = (si_exp - tcross / 200.) * (200. / (200. - tcross - 40.))
elif ctype.find('s') >= 0:
tcross = 60.
si_inf_exp = (si_exp - tcross / 200.) * (200. / (200. - tcross - 40.))
else:
si_inf_exp = np.nan
dict_key = exp_means.iloc[scn].name[3:]
exp_prs_dict[dict_key] = {}
exp_prs_dict[dict_key]['f0_exp'] = f0_exp
exp_prs_dict[dict_key]['f1_exp'] = f1_exp
exp_prs_dict[dict_key]['spont_exp'] = spont_exp
exp_prs_dict[dict_key]['si_exp'] = si_exp
exp_prs_dict[dict_key]['si_inf_exp'] = si_inf_exp
exp_prs_dict[dict_key]['ttp_exp'] = ttp_exp
exp_prs_dict[dict_key]['f0_std'] = f0_std
exp_prs_dict[dict_key]['f1_std'] = f1_std
exp_prs_dict[dict_key]['spont_std'] = spont_std
exp_prs_dict[dict_key]['si_std'] = si_std
exp_prs_dict[dict_key]['nsub'] = nsub
exp_prs_dict[dict_key]['N_class'] = N_class
return exp_prs_dict
"""
##################################################
def check_optim_results_against_bounds(bounds, opt_wts, opt_kpeaks):
bds_wts0 = bounds[0]
bds_wts1 = bounds[1]
bds_kp0 = bounds[2]
bds_kp1 = bounds[3]
opt_wts0 = opt_wts[0]
opt_wts1 = opt_wts[1]
opt_kp0 = opt_kpeaks[0]
opt_kp1 = opt_kpeaks[1]
if (opt_wts0 == bds_wts0[0] or opt_wts0 == bds_wts0[1]):
prm_on_bds = 'w0'
elif (opt_wts1 == bds_wts1[0] or opt_wts1 == bds_wts1[1]):
prm_on_bds = 'w1'
elif (opt_kp0 == bds_kp0[0] or opt_kp0 == bds_kp0[1]):
prm_on_bds = 'kp0'
elif (opt_kp1 == bds_kp1[0] or opt_kp1 == bds_kp1[1]):
prm_on_bds = 'kp1'
else:
prm_on_bds = 'None'
return prm_on_bds
def cross_from_above(x, threshold):
"""Return the indices into *x* where *x* crosses some threshold from above."""
x = | np.asarray(x) | numpy.asarray |
# Copyright 2016-present CERN – European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from math import sqrt
from typing import Union, Tuple, Sequence, List, Callable, Optional
import matplotlib as plt
import numpy as np
from matplotlib.ticker import MaxNLocator
from pandas import Timedelta
from qf_lib.analysis.common.abstract_document import AbstractDocument
from qf_lib.common.utils.error_handling import ErrorHandling
from qf_lib.backtesting.fast_alpha_model_tester.scenarios_generator import ScenariosGenerator
from qf_lib.backtesting.portfolio.trade import Trade
from qf_lib.common.utils.miscellaneous.constants import DAYS_PER_YEAR_AVG
from qf_lib.common.utils.numberutils.is_finite_number import is_finite_number
from qf_lib.common.utils.returns.max_drawdown import max_drawdown
from qf_lib.common.utils.returns.sqn import sqn, sqn_for100trades, avg_nr_of_trades_per1y
from qf_lib.containers.dataframe.prices_dataframe import PricesDataFrame
from qf_lib.containers.dataframe.qf_dataframe import QFDataFrame
from qf_lib.containers.series.qf_series import QFSeries
from qf_lib.containers.series.simple_returns_series import SimpleReturnsSeries
from qf_lib.documents_utils.document_exporting.element.chart import ChartElement
from qf_lib.documents_utils.document_exporting.element.df_table import DFTable
from qf_lib.documents_utils.document_exporting.element.heading import HeadingElement
from qf_lib.documents_utils.document_exporting.element.new_page import NewPageElement
from qf_lib.documents_utils.document_exporting.pdf_exporter import PDFExporter
from qf_lib.plotting.charts.chart import Chart
from qf_lib.plotting.charts.histogram_chart import HistogramChart
from qf_lib.plotting.charts.line_chart import LineChart
from qf_lib.plotting.decorators.axes_formatter_decorator import AxesFormatterDecorator, PercentageFormatter
from qf_lib.plotting.decorators.axes_label_decorator import AxesLabelDecorator
from qf_lib.plotting.decorators.axes_locator_decorator import AxesLocatorDecorator
from qf_lib.plotting.decorators.axes_position_decorator import AxesPositionDecorator
from qf_lib.plotting.decorators.data_element_decorator import DataElementDecorator
from qf_lib.plotting.decorators.legend_decorator import LegendDecorator
from qf_lib.plotting.decorators.line_decorators import VerticalLineDecorator
from qf_lib.plotting.decorators.title_decorator import TitleDecorator
from qf_lib.settings import Settings
@ErrorHandling.class_error_logging()
class TradeAnalysisSheet(AbstractDocument):
"""
Creates a PDF containing main statistics of the trades.
Parameters
-------------
settings: Settings
settings of the project
pdf_exporter: PDFExporter
tool that creates the pdf with the result
nr_of_assets_traded: int
number of assets traded
trades: Sequence[Trade]
list of trades
start_date: datetime
end_date: datetime
title: str
title of the document, will be a part of the filename. Do not use special characters
"""
def __init__(self, settings: Settings, pdf_exporter: PDFExporter, nr_of_assets_traded: int, trades: Sequence[Trade],
start_date: datetime, end_date: datetime, initial_risk: Optional[float] = None, title: str = "Trades"):
super().__init__(settings, pdf_exporter, title)
self.start_date = start_date
self.end_date = end_date
self.initial_risk = initial_risk
self.trades = sorted(trades, key=lambda t: (t.end_time, t.start_time))
self.nr_of_assets_traded = nr_of_assets_traded
def build_document(self):
self._add_header()
self._add_returns_distribution()
self._add_stats_table()
self._add_simulation_results()
def _add_returns_distribution(self):
if self.initial_risk is not None:
returns = SimpleReturnsSeries(data=[t.percentage_pnl / self.initial_risk for t in self.trades])
title = "Distribution of R multiples, Initial risk = {:.2%}".format(self.initial_risk)
returns_histogram = self._get_distribution_plot(returns, title)
else:
returns = SimpleReturnsSeries(data=[t.percentage_pnl for t in self.trades])
title = "Distribution of returns [%]"
returns_histogram = self._get_distribution_plot(returns, title)
# Format the x-axis so that its labels are shown as a percentage in case of percentage returns
axes_formatter_decorator = AxesFormatterDecorator(x_major=PercentageFormatter(), key="axes_formatter")
returns_histogram.add_decorator(axes_formatter_decorator)
self.document.add_element(ChartElement(returns_histogram, figsize=self.full_image_size, dpi=self.dpi))
def _add_stats_table(self):
statistics = [] # type: List[Tuple]
def append_to_statistics(measure_description: str, function: Callable, trades_containers,
percentage_style: bool = False):
style_format = "{:.2%}" if percentage_style else "{:.2f}"
returned_values = (function(tc) for tc in trades_containers)
returned_values = (value if is_finite_number(value) else 0.0 for value in returned_values)
statistics.append((measure_description, *(style_format.format(val) for val in returned_values)))
# Prepare trades data frame, used to generate all statistics
trades_df = QFDataFrame.from_records(
data=[(t.start_time, t.end_time, t.percentage_pnl, t.direction) for t in self.trades],
columns=["start time", "end time", "percentage pnl", "direction"]
)
# In case if the initial risk is not set all the return statistic will be computed using the percentage pnl,
# otherwise the r_multiply = percentage pnl / initial risk is used
unit = "%" if self.initial_risk is None else "R"
trades_df["returns"] = trades_df["percentage pnl"] if self.initial_risk is None \
else trades_df["percentage pnl"] / self.initial_risk
# Filter out only long and only
long_trades_df = trades_df[trades_df["direction"] > 0]
short_trades_df = trades_df[trades_df["direction"] < 0]
all_dfs = [trades_df, long_trades_df, short_trades_df]
append_to_statistics("Number of trades", len, all_dfs)
append_to_statistics("% of trades number", lambda df: len(df) / len(trades_df) if len(trades_df) > 0 else 0,
all_dfs, percentage_style=True)
period_length_in_years = Timedelta(self.end_date - self.start_date) / Timedelta(days=1) / DAYS_PER_YEAR_AVG
append_to_statistics("Avg number of trades per year", lambda df: len(df) / period_length_in_years, all_dfs)
append_to_statistics("Avg number of trades per year per asset",
lambda df: len(df) / period_length_in_years / self.nr_of_assets_traded, all_dfs)
def percentage_of_positive_trades(df: QFDataFrame):
return len(df[df["returns"] > 0]) / len(df) if len(df) > 0 else 0.0
append_to_statistics("% of positive trades", percentage_of_positive_trades, all_dfs, percentage_style=True)
def percentage_of_negative_trades(df: QFDataFrame):
return len(df[df["returns"] < 0]) / len(df) if len(df) > 0 else 0.0
append_to_statistics("% of negative trades", percentage_of_negative_trades, all_dfs, percentage_style=True)
def avg_trade_duration(df: QFDataFrame):
trades_duration = (df["end time"] - df["start time"]) / Timedelta(days=1)
return trades_duration.mean()
append_to_statistics("Average trade duration [days]", avg_trade_duration, all_dfs)
append_to_statistics("Average trade return [{}]".format(unit), lambda df: df["returns"].mean(), all_dfs,
percentage_style=(self.initial_risk is None))
append_to_statistics("Std trade return [{}]".format(unit), lambda df: df["returns"].std(), all_dfs,
percentage_style=(self.initial_risk is None))
def avg_positive_trade_return(df: QFDataFrame):
positive_trades = df[df["returns"] > 0]
return positive_trades["returns"].mean()
append_to_statistics("Average positive return [{}]".format(unit), avg_positive_trade_return, all_dfs,
percentage_style=(self.initial_risk is None))
def avg_negative_trade_return(df: QFDataFrame):
negative_trades = df[df["returns"] < 0]
return negative_trades["returns"].mean()
append_to_statistics("Average negative return [{}]".format(unit), avg_negative_trade_return, all_dfs,
percentage_style=(self.initial_risk is None))
append_to_statistics("Best trade return [{}]".format(unit), lambda df: df["returns"].max(), all_dfs,
percentage_style=(self.initial_risk is None))
append_to_statistics("Worst trade return [{}]".format(unit), lambda df: df["returns"].min(), all_dfs,
percentage_style=(self.initial_risk is None))
append_to_statistics("SQN (per trade) [{}]".format(unit), lambda df: sqn(df["returns"]), all_dfs,
percentage_style=(self.initial_risk is None))
append_to_statistics("SQN (per 100 trades) [{}]".format(unit), lambda df: sqn_for100trades(df["returns"]),
all_dfs, percentage_style=(self.initial_risk is None))
def sqn_per_year(returns: QFSeries):
sqn_per_year_value = sqn(returns) * sqrt(avg_nr_of_trades_per1y(returns, self.start_date, self.end_date))
return sqn_per_year_value
append_to_statistics("SQN (per year) [{}]".format(unit), lambda df: sqn_per_year(df["returns"]), all_dfs,
percentage_style=(self.initial_risk is None))
statistics_df = QFDataFrame.from_records(statistics, columns=["Measure", "All trades", "Long trades",
"Short trades"])
table = DFTable(statistics_df, css_classes=['table', 'left-align'])
table.add_columns_classes(["Measure"], 'wide-column')
self.document.add_element(table)
def _add_simulation_results(self):
"""
Generate a data frame consisting of a certain number of "scenarios" (each scenario denotes one single equity
curve).
"""
self.document.add_element(NewPageElement())
self.document.add_element(HeadingElement(level=1, text="Monte Carlo simulations\n"))
self.document.add_element(HeadingElement(level=2, text="Average number of trades per year: {}\n".format(
int(self._average_number_of_trades_per_year()))))
if self.initial_risk is not None:
self.document.add_element(HeadingElement(level=2, text="Initial risk: {:.2%}".format(self.initial_risk)))
scenarios_df, total_returns = self._get_scenarios()
# Plot all the possible paths on a chart
all_paths_chart = self._get_simulation_plot(scenarios_df)
self.document.add_element(ChartElement(all_paths_chart, figsize=self.full_image_size, dpi=self.dpi))
# Plot the distribution plot
distribution_plot = self._get_distribution_plot(
total_returns, title="Monte Carlo Simulations Distribution (one year % return)", bins=200, crop=True)
# Format the x-axis so that its labels are shown as a percentage in case of percentage returns
axes_formatter_decorator = AxesFormatterDecorator(x_major=PercentageFormatter(), key="axes_formatter")
distribution_plot.add_decorator(axes_formatter_decorator)
self.document.add_element(ChartElement(distribution_plot, figsize=self.full_image_size, dpi=self.dpi))
simulations_summary_table = self._get_monte_carlos_simulator_outputs(scenarios_df, total_returns)
self.document.add_element(simulations_summary_table)
# Extract the results of each of the scenarios and summarize the data in the tables
dist_summary_tables = self._get_distribution_summary_table(total_returns)
self.document.add_element(dist_summary_tables)
# Add the "Chances of dropping below" and "Simulations summary" tables
ruin_chances_table = self._get_chances_of_dropping_below_table(scenarios_df)
self.document.add_element(ruin_chances_table)
def _get_scenarios(self, num_of_scenarios: int = 2500) -> Tuple[PricesDataFrame, SimpleReturnsSeries]:
# Generate scenarios, each of which consists of a certain number of trades, equal to the average number
# of trades per year
scenarios_generator = ScenariosGenerator()
trade_returns = [trade.percentage_pnl for trade in self.trades]
# Generate the scenarios
scenarios_df = scenarios_generator.make_scenarios(
trade_returns,
scenarios_length=int(self._average_number_of_trades_per_year()),
num_of_scenarios=num_of_scenarios
)
scenarios_df = scenarios_df.to_prices()
return scenarios_df, scenarios_df.iloc[-1] / scenarios_df.iloc[0] - 1.0
def _average_number_of_trades_per_year(self):
""" Computes the average number of trades per year. """
number_of_trades = len(self.trades)
period_length_in_years = Timedelta(self.end_date - self.start_date) / Timedelta(days=1) / DAYS_PER_YEAR_AVG
return number_of_trades / period_length_in_years
def _get_simulation_plot(self, scenarios_df: PricesDataFrame) -> Chart:
chart = LineChart(log_scale=True)
for _, scenario in scenarios_df.items():
data_element = DataElementDecorator(scenario, linewidth=0.5)
chart.add_decorator(data_element)
# Add a legend
legend = LegendDecorator(key="legend_decorator")
# Add Ensemble average
ensemble_avg = scenarios_df.mean(axis=1)
ensemble_avg_data_element = DataElementDecorator(ensemble_avg, color="#e1e5f4", linewidth=3)
chart.add_decorator(ensemble_avg_data_element)
legend.add_entry(ensemble_avg_data_element, "Ensemble average")
# Add Expectation (vol adjusted)
trade_returns = QFSeries(data=[trade.percentage_pnl for trade in self.trades])
std = trade_returns.std()
expectation_adj_series = np.ones(len(ensemble_avg)) * (trade_returns.mean() - 0.5 * std * std)
expectation_adj_series = SimpleReturnsSeries(data=expectation_adj_series, index=ensemble_avg.index)
expectation_adj_series = expectation_adj_series.to_prices()
data_element = DataElementDecorator(expectation_adj_series, color="#46474b", linewidth=2)
chart.add_decorator(data_element)
legend.add_entry(data_element, "Expectation (vol adjusted)")
# Add title
title_decorator = TitleDecorator("Monte Carlo Simulations (log scale)", key="title")
chart.add_decorator(title_decorator)
position_decorator = AxesPositionDecorator(*self.full_image_axis_position)
chart.add_decorator(position_decorator)
chart.add_decorator(legend)
return chart
def _get_distribution_plot(self, data_series: SimpleReturnsSeries, title: str, bins: Union[int, str] = 50,
crop: bool = False):
colors = Chart.get_axes_colors()
if crop:
start_x = np.quantile(data_series, 0.01)
end_x = np.quantile(data_series, 0.99)
chart = HistogramChart(data_series, bins=bins, start_x=start_x, end_x=end_x)
else:
chart = HistogramChart(data_series, bins=bins)
# Only show whole numbers on the y-axis.
y_axis_locator = MaxNLocator(integer=True)
axes_locator_decorator = AxesLocatorDecorator(y_major=y_axis_locator, key="axes_locator")
chart.add_decorator(axes_locator_decorator)
# Add an average line.
avg_line = VerticalLineDecorator(data_series.mean(), color=colors[1],
key="average_line_decorator", linestyle="--", alpha=0.8)
chart.add_decorator(avg_line)
# Add a legend.
legend = LegendDecorator(key="legend_decorator")
legend.add_entry(avg_line, "Mean")
chart.add_decorator(legend)
# Add a title.
title_decorator = TitleDecorator(title, key="title")
chart.add_decorator(title_decorator)
chart.add_decorator(AxesLabelDecorator(title, "Occurrences"))
position_decorator = AxesPositionDecorator(*self.full_image_axis_position)
chart.add_decorator(position_decorator)
return chart
def _get_distribution_summary_table(self, scenarios_results: SimpleReturnsSeries) -> DFTable:
rows = []
percentage_list = [0.05, 0.1, 0.2, 0.3]
for percentage in percentage_list:
rows.append(("{:.0%} Tail".format(percentage),
"{:.2%}".format(np.quantile(scenarios_results, percentage))))
rows.append(("50%", "{:.2%}".format(np.quantile(scenarios_results, 0.5))))
for percentage in reversed(percentage_list):
rows.append(("{:.0%} Top".format(percentage),
"{:.2%}".format(np.quantile(scenarios_results, (1.0 - percentage)))))
table = DFTable(data=QFDataFrame.from_records(rows, columns=["Measure", "Value"]),
css_classes=['table', 'left-align'])
table.add_columns_classes(["Measure"], 'wide-column')
return table
def _get_chances_of_dropping_below_table(self, scenarios_df: PricesDataFrame) -> DFTable:
_, all_scenarios_number = scenarios_df.shape
rows = []
crop_table = False
for percentage in | np.linspace(0.1, 0.9, 9) | numpy.linspace |
import collections
import numpy as np
from guesswhat.statistics.abstract_plotter import *
import seaborn as sns
import pandas as pd
class SuccessDialogueLength(AbstractPlotter):
def __init__(self, path, games, logger, suffix):
super(SuccessDialogueLength, self).__init__(path, self.__class__.__name__, suffix)
status_list = []
status_count = collections.defaultdict(int)
length_list = []
for game in games:
length_list.append(len(game.questions))
status_count[game.status] += 1
status_list.append(game.status)
success = np.array([s == "success" for s in status_list]) + 0
failure = np.array([s == "failure" for s in status_list]) + 0
incomp = np.array([s == "incomplete" for s in status_list]) + 0
sns.set_style("whitegrid", {"axes.grid": False})
if sum(incomp) > 0:
columns = ['Size of Dialogues', 'Success', 'Failure', 'Incomplete']
data = np.array([length_list, success, failure, incomp]).transpose()
else:
columns = ['Size of Dialogues', 'Success', 'Failure']
data = | np.array([length_list, success, failure]) | numpy.array |
__copyright__ = """
Copyright (C) 2020 University of Illinois Board of Trustees
Copyright (C) 2021 <NAME>
"""
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import sys
import cantera as ct
import numpy as np # noqa: F401
import pyrometheus as pyro
import pytest
try:
import jax
except ImportError:
numpy_list = [np]
jnp = None
else:
import jax.numpy as jnp # noqa: F401
jax.config.update("jax_enable_x64", 1)
numpy_list = [np, jnp]
def make_jax_pyro_class(ptk_base_cls, usr_np):
if usr_np != jnp:
return ptk_base_cls(usr_np)
class PyroJaxNumpy(ptk_base_cls):
def _pyro_make_array(self, res_list):
"""This works around (e.g.) numpy.exp not working with object arrays of numpy
scalars. It defaults to making object arrays, however if an array
consists of all scalars, it makes a "plain old" :class:`numpy.ndarray`.
See ``this numpy bug <https://github.com/numpy/numpy/issues/18004>`__
for more context.
"""
from numbers import Number
# Needed to play nicely with Jax, which frequently creates
# arrays of shape () when handed numbers
all_numbers = all(
isinstance(e, Number)
or (isinstance(e, self.usr_np.ndarray) and e.shape == ())
for e in res_list)
if all_numbers:
return self.usr_np.array(res_list, dtype=self.usr_np.float64)
result = self.usr_np.empty_like(res_list, dtype=object,
shape=(len(res_list),))
# 'result[:] = res_list' may look tempting, however:
# https://github.com/numpy/numpy/issues/16564
for idx in range(len(res_list)):
result[idx] = res_list[idx]
return result
def _pyro_norm(self, argument, normord):
"""This works around numpy.linalg norm not working with scalars.
If the argument is a regular ole number, it uses :func:`numpy.abs`,
otherwise it uses ``usr_np.linalg.norm``.
"""
# Wrap norm for scalars
from numbers import Number
if isinstance(argument, Number):
return self.usr_np.abs(argument)
# Needed to play nicely with Jax, which frequently creates
# arrays of shape () when handed numbers
if isinstance(argument, self.usr_np.ndarray) and argument.shape == ():
return self.usr_np.abs(argument)
return self.usr_np.linalg.norm(argument, normord)
return PyroJaxNumpy(usr_np=usr_np)
# Write out all the mechanisms for inspection
@pytest.mark.parametrize("mechname", ["uiuc", "sanDiego"])
def test_generate_mechfile(mechname):
"""This "test" produces the mechanism codes."""
sol = ct.Solution(f"mechs/{mechname}.cti", "gas")
with open(f"mechs/{mechname}.py", "w") as mech_file:
code = pyro.gen_thermochem_code(sol)
print(code, file=mech_file)
@pytest.mark.parametrize("mechname", ["uiuc", "sanDiego"])
@pytest.mark.parametrize("usr_np", numpy_list)
def test_get_rate_coefficients(mechname, usr_np):
"""This function tests that pyrometheus-generated code
computes the rate coefficients matching Cantera
for given temperature and composition"""
sol = ct.Solution(f"mechs/{mechname}.cti", "gas")
ptk_base_cls = pyro.get_thermochem_class(sol)
ptk = make_jax_pyro_class(ptk_base_cls, usr_np)
# Test temperatures
temp = np.linspace(500.0, 3000.0, 10)
for t in temp:
# Set new temperature in Cantera
sol.TP = t, ct.one_atm
# Concentrations
y = sol.Y
rho = sol.density
c = ptk.get_concentrations(rho, y)
# Get rate coefficients and compare
k_ct = sol.forward_rate_constants
k_pm = ptk.get_fwd_rate_coefficients(t, c)
print(k_ct)
print(np.abs((k_ct-k_pm)/k_ct))
assert np.linalg.norm((k_ct-k_pm)/k_ct, np.inf) < 1e-14
return
@pytest.mark.parametrize("mechname", ["uiuc", "sanDiego"])
@pytest.mark.parametrize("usr_np", numpy_list)
def test_get_pressure(mechname, usr_np):
"""This function tests that pyrometheus-generated code
computes the Cantera-predicted pressure for given density,
temperature, and mass fractions
"""
# Create Cantera and pyrometheus objects
sol = ct.Solution(f"mechs/{mechname}.cti", "gas")
ptk_base_cls = pyro.get_thermochem_class(sol)
ptk = make_jax_pyro_class(ptk_base_cls, usr_np)
# Temperature, equivalence ratio, oxidizer ratio, stoichiometry ratio
t = 300.0
phi = 2.0
alpha = 0.21
nu = 0.5
# Species mass fractions
i_fu = ptk.species_index("H2")
i_ox = ptk.species_index("O2")
i_di = ptk.species_index("N2")
x = np.zeros(ptk.num_species)
x[i_fu] = (alpha * phi) / (nu + alpha * phi)
x[i_ox] = nu * x[i_fu] / phi
x[i_di] = (1.0 - alpha) * x[i_ox] / alpha
# Get equilibrium composition
sol.TPX = t, ct.one_atm, x
sol.equilibrate("UV")
t, rho, y = sol.TDY
p_ct = sol.P
# Compute pressure with pyrometheus and compare to Cantera
p_pm = ptk.get_pressure(rho, t, y)
assert abs(p_ct - p_pm) / p_ct < 1.0e-12
@pytest.mark.parametrize("mechname", ["uiuc", "sanDiego"])
@pytest.mark.parametrize("usr_np", numpy_list)
def test_get_thermo_properties(mechname, usr_np):
"""This function tests that pyrometheus-generated code
computes thermodynamic properties c_p, s_r, h_rt, and k_eq
correctly by comparing against Cantera"""
# Create Cantera and pyrometheus objects
sol = ct.Solution(f"mechs/{mechname}.cti", "gas")
ptk_base_cls = pyro.get_thermochem_class(sol)
ptk = make_jax_pyro_class(ptk_base_cls, usr_np)
# Loop over temperatures
temp = | np.linspace(500.0, 3000.0, 10) | numpy.linspace |
import librosa
import librosa.display
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from matplotlib.pyplot import specgram
import keras
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Embedding
from keras.layers import LSTM
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
from keras.layers import Input, Flatten, Dropout, Activation
from keras.layers import Conv1D, MaxPooling1D, AveragePooling1D
from keras.models import Model
from keras.callbacks import ModelCheckpoint
from sklearn.metrics import confusion_matrix
from keras import regularizers
import pandas as pd
import os
import glob
import time
import numpy as np
import os
bookmark=0
y=[]
##Classification on Expanded Ravdess Dataset ( converting Audio Visual recordings to audio only recordings)
####################################################################
##################################################################
###############################################################
#############################################################
############################################################
import os
import pandas as pd
import glob
import time
import numpy as np
path = 'Ravdess_Expanded'
lst = []
import librosa
start_time = time.time()
for subdir, dirs, files in os.walk(path):
for file in files:
try:
#Load librosa array, obtain mfcss, store the file and the mcss information in a new array
X, sample_rate = librosa.load(os.path.join(subdir,file), res_type='kaiser_fast')
mfccs = np.mean(librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=40).T,axis=0)
# The instruction below converts the labels (from 1 to 8) to a series from 0 to 7
# This is because our predictor needs to start from 0 otherwise it will try to predict also 0.
file = int(file[7:8]) - 1
arr = mfccs, file
lst.append(arr)
# If the file is not valid, skip it
except ValueError:
continue
print("--- Data loaded. Loading time: %s seconds ---" % (time.time() - start_time))
X, y = zip(*lst)
X=np.asarray(X)
y=np.asarray(y)
y=np.reshape(y,(-1,1))
z=np.concatenate((X,y),axis=1)
dataset=pd.DataFrame(z)
dataset=dataset.dropna()
array=[i for i in range(1440,2878)]
df=dataset.drop(array,axis=0)
X=df.iloc[:,:40].values
y=df.iloc[:,40].values
import pickle
with open('X.pickle', 'wb') as f:
pickle.dump([X], f)
with open('y.pickle', 'wb') as f:
pickle.dump([y], f)
pickle_in = open("X.pickle","rb")
X = pickle.load(pickle_in)
pickle_in1 = open("y.pickle","rb")
y = pickle.load(pickle_in1)
X= | np.asarray(X) | numpy.asarray |
import clustering
import numpy as np
from sklearn import datasets
import matplotlib.pyplot as plt
import dionysus as dion
import random
def plot_all(data, diagrams):
fig = plt.figure(figsize=(20, 10))
for i in range(len(data)):
num = 241 + i
ax = plt.subplot(num)
plt.scatter(data[i][:, 0], data[i][:, 1])
ax = plt.subplot(num + 4)
plot_diagram(diagrams[i], ax, lims=[0, 1.5, 0, 1.75])
fig.suptitle("Datasets with corresponding persistence diagrams")
plt.show()
def compute_diagrams(data):
diagrams = []
for i in range(len(data)):
print("Processing data: " + str(i))
filtration = dion.fill_rips(data[i], 2, 3.0)
homology = dion.homology_persistence(filtration)
diagram = dion.init_diagrams(homology, filtration)
diagrams.append(diagram[1])
print()
return diagrams
def plot_clusters(M):
plt.scatter(M[0].T[0], M[0].T[1], c='r', label='Rings')
plt.scatter(M[1].T[0], M[1].T[1], c='b', label='Noise')
plt.xlim([0, 1.5])
plt.ylim([0, 1.75])
plt.plot([0.1, 1.2], [0.1, 1.2])
plt.legend()
plt.title("Persistence Diagram Cluster Centres")
plt.show()
def gen_data(seed, noise=0.05, n_samples=100):
print("\nGenerating data...\n")
np.random.seed(seed)
random.seed(seed)
data = []
data.append(datasets.make_circles(n_samples=n_samples, factor=0.99, noise=noise, random_state=seed)[0])
data.append(datasets.make_circles(n_samples=n_samples, factor=0.99, noise=noise, random_state=seed + 1)[0])
data.append(np.random.normal(size=(100, 2), scale=0.5))
data.append(0.9 * np.random.normal(size=(100, 2), scale=0.5))
return data
def gen_data2(seed, noise, n_samples):
dataset = []
np.random.seed(seed)
random.seed(seed)
# Noise
data = | np.random.normal(size=(100, 2), scale=0.5) | numpy.random.normal |
import os
import json
import argparse
import logging
from pathlib import Path
import time
from typing import Tuple
import pandas as pd
import numpy as np
import tqdm
from melloddy_tuner.utils import hash_reference_set
from melloddy_tuner.utils.helper import (
load_config,
load_key,
make_dir,
read_input_file,
create_log_files,
sanity_check_assay_sizes,
sanity_check_assay_type,
sanity_check_uniqueness,
save_df_as_csv,
)
from melloddy_tuner.utils.config import ConfigDict
from multiprocessing import Pool
def init_arg_parser():
"""Argparser module to load commandline arguments.
Returns:
[Namespace]: Arguments from argparser
"""
parser = argparse.ArgumentParser(description="smiles standardization")
parser.add_argument(
"-assay",
"--assay_file",
type=str,
help="path of the assay metadata file T0",
required=True,
)
parser.add_argument(
"-a",
"--activity_file",
type=str,
help="path of the activity data file T1",
required=True,
)
parser.add_argument(
"-mt",
"--mapping_table",
type=str,
help="path of the mapping table T5",
required=True,
)
parser.add_argument(
"-c", "--config_file", type=str, help="path of the config file", required=True
)
parser.add_argument(
"-k", "--key_file", type=str, help="path of the key file", required=True
)
parser.add_argument(
"-o",
"--output_dir",
type=str,
help="path to the generated output directory",
required=True,
)
parser.add_argument(
"-r", "--run_name", type=str, help="name of your current run", required=True
)
parser.add_argument(
"-rh",
"--ref_hash",
type=str,
help="path to the reference hash key file provided by the consortium. (ref_hash.json)",
)
parser.add_argument(
"-ni",
"--non_interactive",
help="Enables an non-interactive mode for cluster/server usage",
action="store_true",
default=False,
)
parser.add_argument(
"-cpu", "--number_cpu", type=int, help="number of CPUs", default=1
)
args = parser.parse_args()
return args
def most_common_qualifier(qualifiers: list) -> str:
"""Determines the most common qualifier, in case of a tie including '=' returns '='
Input:
qualifiers - list of qualifiers, accepted values '<', '>' and '='
Output:
str: the most common qualifier. In case of a tie prefers '='. If a tie is between '<' and '>' - returns None
"""
counts = []
for qual in ["<", ">", "="]:
counts.append((qual, qualifiers.count(qual)))
counts.sort(key=lambda tup: tup[1], reverse=True)
if counts[0][1] > counts[1][1]:
return counts[0][0]
elif counts[0][0] == "=" or counts[1][0] == "=" or counts[2][1] == counts[0][1]:
return "="
else:
return None
def aggr_median(values, qualifiers) -> Tuple:
"""Identifies median of values and the most common qualifier"""
return np.median(values), most_common_qualifier(list(qualifiers))
def aggr_min(values, qualifiers) -> Tuple:
"""Identifies the minimum value and teh corresponding qualifier"""
return values[np.argmin(values)], qualifiers[np.argmin(values)]
def aggr_max(values, qualifiers) -> Tuple:
"""Identifies the maximum values and the corresponding qualifier
If '<' qualifier is present, only those elements ae considered
"""
if (">" in qualifiers) or ("=" in qualifiers):
mask = np.array([i for i in range(len(qualifiers)) if qualifiers[i] != "<"])
ind = mask[np.argmax(np.array(values)[mask])]
aggr_value = values[ind]
aggr_qualifier = qualifiers[ind]
else:
aggr_value = values[ | np.argmax(values) | numpy.argmax |
# %%
import os
import sys
from collections import Counter
from datetime import datetime, timedelta
from glob import glob
from pathlib import Path
from zipfile import ZipFile
# data wrangling
import geopandas as gpd
import pandas as pd
import numpy as np
import requests
from urllib.error import HTTPError
# data maniuplation
from convertbng.util import convert_lonlat
# logging
from shapely.geometry import Point
import con_checks as con
import geo_checks as geo
# %%
timestr = datetime.now().strftime("%Y_%m_%d")
src_home = Path('./OpenNaPTAN/src/')
data_home = Path('./OpenNaPTAN/data/')
base_path = (f'{os.getcwd()}')
download_home = str(os.path.join(Path.home(), "Downloads"))
naptan_csv_url = 'http://naptan.app.dft.gov.uk/DataRequest/Naptan.ashx?format=csv'
naptan_xml_url = 'http://naptan.app.dft.gov.uk/Datarequest/naptan.ashx'
# config options
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', 5)
# %%
def intersect_list_to_masks(df, col_name, value_list):
"""[summary] This is a filter function, that performs an inner join when
given a list object and returns a filtered dataframe of the values in the
given list. You must pass a valid column name and a valid list of strings
expected in that column, will filter out all values not in the list
from the given column, returning a dataframe with only the found entries,
that match the list given values in the given column.
Arguments:
colName {[str]} -- [the pandas column name, as a string]
value_list {[list]} -- [the list of strings to filter the dataframe.]
Returns:
[gdf] -- [a filtered gdf, with only the found list values within. ]
"""
# uses numpy 1d intersection to filter an entire list of strings
mask = df[col_name].apply(lambda x: np.intersect1d(x, value_list).size > 0)
failed_nodes = df[mask]
return failed_nodes
# %%
def downloads_naptan_data(format='csv', local_authority_code=''):
"""[summary] Downloads naptan csv files from the app.dft.gov.uk site.
Assumes no longer is required for accessing the data this route.
Args:
format (str, optional): [description]. Defaults to 'csv'.
local_authority_code (str, optional): [description]. Defaults to ''.
Raises:
NotImplementedError: [description]
NotImplementedError: [description]
ve: [description]
"""
dir = str(os.path.join(Path.home(), "Downloads"))
file = Path(f'{download_home}/{timestr}_Naptan_Data.zip')
try:
# let's check if the naptan zip file exists.
if not file.exists():
print('Downloading the entire Naptan Dataset.')
url = 'http://naptan.app.dft.gov.uk/DataRequest/Naptan.ashx?format=csv'
response = requests.get(url)
with open(os.path.join(dir, file), 'wb') as f:
f.write(response.content)
response.close()
# the below isn't supported yet.
elif local_authority_code.isdigit():
url = (f'http://naptan.app.dft.gov.uk/DataRequest/Naptan.ashx?format={format}&LA={local_authority_code}')
raise NotImplementedError
# the below isn't support yet.
elif format == 'xml':
url = (f'http://naptan.app.dft.gov.uk/DataRequest/Naptan.ashx?format={format}&LA={local_authority_code}')
raise NotImplementedError
else:
return(f'Naptan Data has been for {timestr} has been downloaded.')
except ConnectionError as ce:
sys.exit(f' {ce} No internet connection was found.')
except ConnectionRefusedError as cre:
sys.exit(f'{cre} This system is not allowed to access the Naptan Site.')
except HTTPError as httperror:
sys.exit(f'{httperror} the Naptan download server is unavailable.')
except ValueError as ve:
raise ve
sys.exit('Site is not valid.')
# %%
def extract_naptan_files():
"""[summary] Extracts the downloaded naptan zip file.
Arguments:
naptanzipfile {[type]} -- [description]
dest_dir {[type]} -- [description]
"""
zip_file = Path(f'{download_home}/{timestr}_Naptan_Data.zip')
destination = Path(f'{os.getcwd()}/data/{timestr}_Naptan_Data')
try:
if zip_file.is_file() and zip_file.suffix == '.zip':
with ZipFile(zip_file, "r") as zipobj:
# Extract all the contents of zip file in the working directory
zipobj.extractall(destination)
print(f'Extracting all {destination} files in archive.')
except FileNotFoundError:
sys.exit('Naptan archive file not found.')
except FileExistsError:
sys.exit('File already exists')
except Exception as e:
sys.exit(e)
# %%
def check_naptan_files():
"""[summary] Lists the Naptan files available at the specificed location.
If some files are missing/ or can't be open this should flag a warning.
Arguments:
file_list_location {[Path]} -- [description]
file_ext {[file extension]} -- [description]
Returns:
[type] -- [description]
"""
# TODO check if files are readable
file_list_location = (f'{os.getcwd()}/data/{timestr}_Naptan_Data/')
file_ext = 'csv'
expected_file_names = ['AirReferences',
'AlternativeDescriptors',
'AreaHierarchy',
'CoachReferences',
'FerryReferences',
'Flexible',
'HailRide',
'LocalityMainAccessPoints',
'MetroReferences',
'RailReferences',
'StopAreas',
'StopAvailability',
'StopLocalities',
'StopPlusbusZones',
'Stops',
'StopsInArea']
naptan_file_names = []
# we print out if all the expected files are found and if so in the
# system where.
for expected in expected_file_names:
npfile = Path(f'{file_list_location}{expected}.{file_ext}')
if npfile.is_file() and npfile.exists():
naptan_file_names.append(npfile.stem)
print(f'{npfile.name} as a {file_ext} has been found.')
else:
print(f'The {npfile} is missing or the file extension is wrong.')
# %%
def convert_xml_to_df(xml_doc):
# TODO -- convert xml into a pandas dataframe for easier verification.
""" Description: We can take in the naptan data as a
Args: xml_doc
Returns: returns a panda dataframe of the xml document.
"""
attr = xml_doc.attrib
doc_dict = pd.DataFrame
for xml in xml_doc.iter('document'):
doc_dir = attr.copy()
doc_dir.update(xml.attrib)
doc_dict['data'] = xml.text
return doc_dict
# %%
def file_pep8_cleaning(home, ext):
"""Description: takes a directory and file extension and then renames them
according to
Args:
home: the target directory, only one at once
ext: a file type extension, only one at once
Returns:
"""
home = Path(home)
os.chdir(home)
flist = []
for p in Path().iterdir():
if (p.is_file() and p.suffix == ext):
g = Path(p).stem
flist.append(g)
h = string.capwords(g)
i = h.title()
j = '_'.join([s[0].upper() + s[1:] for s in i.split(' ')])
to_file = Path(home, j)
flist.append(to_file)
p.rename(to_file)
with open((home, 'update_list.txt'), 'w+') as file:
file.write(flist)
# %%
def convert_to_lat_long(df):
"""Descriptions: Converts 100,000+ coordinates in a dataframe
into accurate longitude and latitude adding the columns where they are
missing from a dataframe.
Arguments:
df {[type]} -- [description]
file_name {[type]} -- [description]
"""
easting_np = np.array(df.Easting)
northing_np = np.array(df.Northing)
res_list_np = convert_lonlat(easting_np, northing_np)
df['Longitude'], df['Latitude'] = res_list_np[0], res_list_np[1]
# drop the easting and northing columns, now we are done with them.
df = df.drop(columns=['Easting', 'Northing'], axis=1)
return df
# %%
def deactivated_nodes(df):
"""[summary] - Returns a dataframe of only active, pending, or new nodes
or deleted stops from the last 3 years, for representative sampling.
deleted nodes are removed for the sake of this test. This test is also not,
concerned with reporting errors, as this is a data cleaning function
Arguments:
df {[geopanda dataframe]} -- [The Naptan master dataframe.]
Returns:
[type] -- [description]
"""
# TODO filter this to stops with a modification date time within the last 3
# years so that there is a represenative sample of deactived stops.
try:
exp_date = (datetime.now() - timedelta(days=365*3))
# we filter all the missing deleted stops that are older than 3 yrs.
mask = ~((df['Status'] == 'del') &
(df['ModificationDateTime'] <= exp_date))
active_nodes = df[mask]
# TODO needs to be integrated with reporting function.
# inactive_nodes = df[~mask]
# report.nodes_error_reporting('Inactive Nodes',
# inactive_nodes)
return active_nodes
except FileNotFoundError as file_missing:
raise file_missing
sys.exit(f'{file_missing}')
# %%
def calculate_naptan_geometry(df):
"""[summary] Takes in a dataframe and returns a dataframe
with a geometry column calculate from using lat and lon CRS.
"""
try:
geom_list = [Point(lon, lat) for lon, lat in zip(df["Longitude"],
df["Latitude"])]
gdf = gpd.GeoDataFrame(df,
geometry=geom_list,
crs={"init": "EPSG:4326"})
return gdf
except ValueError:
print('Value Error could not be converted.')
pass
else:
print('Naptan Geometry conversion failed.')
# %%
def get_centroid_naptan_area(df):
"""[summary] to determine where the folium map should be centred, when
generating an ipython view.
Arguments:
df_subframe {[type]} -- [description]
Returns:
[rep_centroid] -- [a fix within geometry point, representative of
all the points in the area.]
"""
length = df['Geometry'].shape[0]
sum_x = np.sum(df['Latitude'])
sum_y = | np.sum(df['Longitude']) | numpy.sum |
import numpy as np
import pandas as pd
from numba import njit
import pytest
import os
from collections import namedtuple
from itertools import product, combinations
from vectorbt import settings
from vectorbt.utils import checks, config, decorators, math, array, random, enum, data, params
from tests.utils import hash
seed = 42
# ############# config.py ############# #
class TestConfig:
def test_config(self):
conf = config.Config({'a': 0, 'b': {'c': 1}}, frozen=False)
conf['b']['d'] = 2
conf = config.Config({'a': 0, 'b': {'c': 1}}, frozen=True)
conf['a'] = 2
with pytest.raises(Exception) as e_info:
conf['d'] = 2
with pytest.raises(Exception) as e_info:
conf.update(d=2)
conf.update(d=2, force_update=True)
assert conf['d'] == 2
conf = config.Config({'a': 0, 'b': {'c': 1}}, read_only=True)
with pytest.raises(Exception) as e_info:
conf['a'] = 2
with pytest.raises(Exception) as e_info:
del conf['a']
with pytest.raises(Exception) as e_info:
conf.pop('a')
with pytest.raises(Exception) as e_info:
conf.popitem()
with pytest.raises(Exception) as e_info:
conf.clear()
with pytest.raises(Exception) as e_info:
conf.update(a=2)
assert isinstance(conf.merge_with(dict(b=dict(d=2))), config.Config)
assert conf.merge_with(dict(b=dict(d=2)), read_only=True).read_only
assert conf.merge_with(dict(b=dict(d=2)))['b']['d'] == 2
conf = config.Config({'a': 0, 'b': {'c': [1, 2]}})
conf['a'] = 1
conf['b']['c'].append(3)
conf['b']['d'] = 2
assert conf == {'a': 1, 'b': {'c': [1, 2, 3], 'd': 2}}
conf.reset()
assert conf == {'a': 0, 'b': {'c': [1, 2]}}
def test_merge_dicts(self):
assert config.merge_dicts({'a': 1}, {'b': 2}) == {'a': 1, 'b': 2}
assert config.merge_dicts({'a': 1}, {'a': 2}) == {'a': 2}
assert config.merge_dicts({'a': {'b': 2}}, {'a': {'c': 3}}) == {'a': {'b': 2, 'c': 3}}
assert config.merge_dicts({'a': {'b': 2}}, {'a': {'b': 3}}) == {'a': {'b': 3}}
def test_configured(self):
class H(config.Configured):
def __init__(self, a, b=2, **kwargs):
super().__init__(a=a, b=b, **kwargs)
assert H(1).config == {'a': 1, 'b': 2}
assert H(1).copy(b=3).config == {'a': 1, 'b': 3}
assert H(1).copy(c=4).config == {'a': 1, 'b': 2, 'c': 4}
assert H(pd.Series([1, 2, 3])) == H(pd.Series([1, 2, 3]))
assert H(pd.Series([1, 2, 3])) != H(pd.Series([1, 2, 4]))
assert H(pd.DataFrame([1, 2, 3])) == H(pd.DataFrame([1, 2, 3]))
assert H(pd.DataFrame([1, 2, 3])) != H(pd.DataFrame([1, 2, 4]))
assert H(pd.Index([1, 2, 3])) == H(pd.Index([1, 2, 3]))
assert H(pd.Index([1, 2, 3])) != H(pd.Index([1, 2, 4]))
assert H(np.array([1, 2, 3])) == H(np.array([1, 2, 3]))
assert H(np.array([1, 2, 3])) != H(np.array([1, 2, 4]))
assert H(None) == H(None)
assert H(None) != H(10.)
# ############# decorators.py ############# #
class TestDecorators:
def test_class_or_instancemethod(self):
class G:
@decorators.class_or_instancemethod
def g(self_or_cls):
if isinstance(self_or_cls, type):
return True # class
return False # instance
assert G.g()
assert not G().g()
def test_custom_property(self):
class G:
@decorators.custom_property(some='key')
def cache_me(self): return np.random.uniform()
assert 'some' in G.cache_me.kwargs
assert G.cache_me.kwargs['some'] == 'key'
def test_custom_method(self):
class G:
@decorators.custom_method(some='key')
def cache_me(self): return np.random.uniform()
assert 'some' in G.cache_me.kwargs
assert G.cache_me.kwargs['some'] == 'key'
def test_cached_property(self):
np.random.seed(seed)
class G:
@decorators.cached_property
def cache_me(self): return np.random.uniform()
g = G()
cached_number = g.cache_me
assert g.cache_me == cached_number
class G:
@decorators.cached_property(hello="world", hello2="world2")
def cache_me(self): return np.random.uniform()
assert 'hello' in G.cache_me.kwargs
assert G.cache_me.kwargs['hello'] == 'world'
g = G()
g2 = G()
class G3(G):
pass
g3 = G3()
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
# clear_cache method
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
G.cache_me.clear_cache(g)
assert g.cache_me != cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
# test blacklist
# instance + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append((g, 'cache_me'))
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append('cache_me')
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# instance
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append(g)
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# class + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append((G, 'cache_me'))
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# class
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append(G)
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# class name + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append('G.cache_me')
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# class name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append('G')
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# improper class name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append('g')
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# kwargs
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append({'hello': 'world'})
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append({'hello': 'world', 'hello2': 'world2'})
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append({'hello': 'world', 'hello2': 'world2', 'hello3': 'world3'})
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# disabled globally
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# test whitelist
# instance + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append((g, 'cache_me'))
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append('cache_me')
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
# instance
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append(g)
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# class + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append((G, 'cache_me'))
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# class
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append(G)
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# class name + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append('G.cache_me')
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# class name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append('G')
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# improper class name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append('g')
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
# kwargs
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append({'hello': 'world'})
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append({'hello': 'world', 'hello2': 'world2'})
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me == cached_number
assert g2.cache_me == cached_number2
assert g3.cache_me == cached_number3
settings.caching.reset()
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append({'hello': 'world', 'hello2': 'world2', 'hello3': 'world3'})
cached_number = g.cache_me
cached_number2 = g2.cache_me
cached_number3 = g3.cache_me
assert g.cache_me != cached_number
assert g2.cache_me != cached_number2
assert g3.cache_me != cached_number3
settings.caching.reset()
def test_cached_method(self):
np.random.seed(seed)
class G:
@decorators.cached_method
def cache_me(self, b=10): return np.random.uniform()
g = G()
cached_number = g.cache_me
assert g.cache_me == cached_number
class G:
@decorators.cached_method(hello="world", hello2="world2")
def cache_me(self, b=10): return np.random.uniform()
assert 'hello' in G.cache_me.kwargs
assert G.cache_me.kwargs['hello'] == 'world'
g = G()
g2 = G()
class G3(G):
pass
g3 = G3()
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
# clear_cache method
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
G.cache_me.clear_cache(g)
assert g.cache_me() != cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
# test blacklist
# function
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append(g.cache_me)
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# instance + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append((g, 'cache_me'))
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append('cache_me')
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# instance
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append(g)
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# class + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append((G, 'cache_me'))
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# class
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append(G)
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# class name + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append('G.cache_me')
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# class name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append('G')
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# improper class name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append('g')
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# kwargs
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append({'hello': 'world'})
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append({'hello': 'world', 'hello2': 'world2'})
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['blacklist'].append({'hello': 'world', 'hello2': 'world2', 'hello3': 'world3'})
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# disabled globally
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# test whitelist
# function
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append(g.cache_me)
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# instance + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append((g, 'cache_me'))
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append('cache_me')
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
# instance
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append(g)
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# class + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append((G, 'cache_me'))
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# class
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append(G)
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# class name + name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append('G.cache_me')
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# class name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append('G')
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# improper class name
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append('g')
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# kwargs
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append({'hello': 'world'})
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append({'hello': 'world', 'hello2': 'world2'})
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() == cached_number
assert g2.cache_me() == cached_number2
assert g3.cache_me() == cached_number3
settings.caching.reset()
G.cache_me.clear_cache(g)
G.cache_me.clear_cache(g2)
G3.cache_me.clear_cache(g3)
settings.caching['enabled'] = False
settings.caching['whitelist'].append({'hello': 'world', 'hello2': 'world2', 'hello3': 'world3'})
cached_number = g.cache_me()
cached_number2 = g2.cache_me()
cached_number3 = g3.cache_me()
assert g.cache_me() != cached_number
assert g2.cache_me() != cached_number2
assert g3.cache_me() != cached_number3
settings.caching.reset()
# disabled by non-hashable args
G.cache_me.clear_cache(g)
cached_number = g.cache_me(b=np.zeros(1))
assert g.cache_me(b=np.zeros(1)) != cached_number
def test_traverse_attr_kwargs(self):
class A:
@decorators.custom_property(some_key=0)
def a(self): pass
class B:
@decorators.cached_property(some_key=0, child_cls=A)
def a(self): pass
@decorators.custom_method(some_key=1)
def b(self): pass
class C:
@decorators.cached_method(some_key=0, child_cls=B)
def b(self): pass
@decorators.custom_property(some_key=1)
def c(self): pass
assert hash(str(decorators.traverse_attr_kwargs(C))) == 16728515581653529580
assert hash(str(decorators.traverse_attr_kwargs(C, key='some_key'))) == 16728515581653529580
assert hash(str(decorators.traverse_attr_kwargs(C, key='some_key', value=1))) == 703070484833749378
assert hash(str(decorators.traverse_attr_kwargs(C, key='some_key', value=(0, 1)))) == 16728515581653529580
# ############# checks.py ############# #
class TestChecks:
def test_is_pandas(self):
assert not checks.is_pandas(0)
assert not checks.is_pandas(np.array([0]))
assert checks.is_pandas(pd.Series([1, 2, 3]))
assert checks.is_pandas(pd.DataFrame([1, 2, 3]))
def test_is_series(self):
assert not checks.is_series(0)
assert not checks.is_series(np.array([0]))
assert checks.is_series(pd.Series([1, 2, 3]))
assert not checks.is_series(pd.DataFrame([1, 2, 3]))
def test_is_frame(self):
assert not checks.is_frame(0)
assert not checks.is_frame(np.array([0]))
assert not checks.is_frame(pd.Series([1, 2, 3]))
assert checks.is_frame(pd.DataFrame([1, 2, 3]))
def test_is_array(self):
assert not checks.is_array(0)
assert checks.is_array(np.array([0]))
assert checks.is_array(pd.Series([1, 2, 3]))
assert checks.is_array(pd.DataFrame([1, 2, 3]))
def test_is_numba_func(self):
def test_func(x):
return x
@njit
def test_func_nb(x):
return x
assert not checks.is_numba_func(test_func)
assert checks.is_numba_func(test_func_nb)
def test_is_hashable(self):
assert checks.is_hashable(2)
assert not checks.is_hashable(np.asarray(2))
def test_is_index_equal(self):
assert checks.is_index_equal(
pd.Index([0]),
pd.Index([0])
)
assert not checks.is_index_equal(
pd.Index([0]),
pd.Index([1])
)
assert not checks.is_index_equal(
pd.Index([0], name='name'),
pd.Index([0])
)
assert checks.is_index_equal(
pd.Index([0], name='name'),
pd.Index([0]),
strict=False
)
assert not checks.is_index_equal(
pd.MultiIndex.from_arrays([[0], [1]]),
pd.Index([0])
)
assert checks.is_index_equal(
pd.MultiIndex.from_arrays([[0], [1]]),
pd.MultiIndex.from_arrays([[0], [1]])
)
assert checks.is_index_equal(
pd.MultiIndex.from_arrays([[0], [1]], names=['name1', 'name2']),
pd.MultiIndex.from_arrays([[0], [1]], names=['name1', 'name2'])
)
assert not checks.is_index_equal(
pd.MultiIndex.from_arrays([[0], [1]], names=['name1', 'name2']),
pd.MultiIndex.from_arrays([[0], [1]], names=['name3', 'name4'])
)
def test_is_default_index(self):
assert checks.is_default_index(pd.DataFrame([[1, 2, 3]]).columns)
assert checks.is_default_index(pd.Series([1, 2, 3]).to_frame().columns)
assert checks.is_default_index(pd.Index([0, 1, 2]))
assert not checks.is_default_index(pd.Index([0, 1, 2], name='name'))
def test_is_equal(self):
assert checks.is_equal(np.arange(3), np.arange(3), np.array_equal)
assert not checks.is_equal(np.arange(3), None, np.array_equal)
assert not checks.is_equal(None, np.arange(3), np.array_equal)
assert checks.is_equal(None, None, np.array_equal)
def test_is_namedtuple(self):
assert checks.is_namedtuple(namedtuple('Hello', ['world'])(*range(1)))
assert not checks.is_namedtuple((0,))
def test_method_accepts_argument(self):
def test(a, *args, b=2, **kwargs):
pass
assert checks.method_accepts_argument(test, 'a')
assert not checks.method_accepts_argument(test, 'args')
assert checks.method_accepts_argument(test, '*args')
assert checks.method_accepts_argument(test, 'b')
assert not checks.method_accepts_argument(test, 'kwargs')
assert checks.method_accepts_argument(test, '**kwargs')
assert not checks.method_accepts_argument(test, 'c')
def test_assert_in(self):
checks.assert_in(0, (0, 1))
with pytest.raises(Exception) as e_info:
checks.assert_in(2, (0, 1))
def test_assert_numba_func(self):
def test_func(x):
return x
@njit
def test_func_nb(x):
return x
checks.assert_numba_func(test_func_nb)
with pytest.raises(Exception) as e_info:
checks.assert_numba_func(test_func)
def test_assert_not_none(self):
checks.assert_not_none(0)
with pytest.raises(Exception) as e_info:
checks.assert_not_none(None)
def test_assert_type(self):
checks.assert_type(0, int)
checks.assert_type(np.zeros(1), (np.ndarray, pd.Series))
checks.assert_type(pd.Series([1, 2, 3]), (np.ndarray, pd.Series))
with pytest.raises(Exception) as e_info:
checks.assert_type(pd.DataFrame([1, 2, 3]), (np.ndarray, pd.Series))
def test_assert_subclass(self):
class A:
pass
class B(A):
pass
class C(B):
pass
checks.assert_subclass(B, A)
checks.assert_subclass(C, B)
checks.assert_subclass(C, A)
with pytest.raises(Exception) as e_info:
checks.assert_subclass(A, B)
def test_assert_type_equal(self):
checks.assert_type_equal(0, 1)
checks.assert_type_equal(np.zeros(1), np.empty(1))
with pytest.raises(Exception) as e_info:
checks.assert_type(0, np.zeros(1))
def test_assert_dtype(self):
checks.assert_dtype(np.zeros(1), np.float)
checks.assert_dtype(pd.Series([1, 2, 3]), np.int)
checks.assert_dtype(pd.DataFrame({'a': [1, 2], 'b': [3, 4]}), np.int)
with pytest.raises(Exception) as e_info:
checks.assert_dtype(pd.DataFrame({'a': [1, 2], 'b': [3., 4.]}), np.int)
def test_assert_subdtype(self):
checks.assert_subdtype([0], np.number)
checks.assert_subdtype(np.array([1, 2, 3]), np.number)
checks.assert_subdtype(pd.DataFrame({'a': [1, 2], 'b': [3., 4.]}), np.number)
with pytest.raises(Exception) as e_info:
checks.assert_subdtype(np.array([1, 2, 3]), np.float)
with pytest.raises(Exception) as e_info:
checks.assert_subdtype(pd.DataFrame({'a': [1, 2], 'b': [3., 4.]}), np.float)
def test_assert_dtype_equal(self):
checks.assert_dtype_equal([1], [1, 1, 1])
checks.assert_dtype_equal(pd.Series([1, 2, 3]), pd.DataFrame([[1, 2, 3]]))
checks.assert_dtype_equal(pd.DataFrame([[1, 2, 3.]]), pd.DataFrame([[1, 2, 3.]]))
with pytest.raises(Exception) as e_info:
checks.assert_dtype_equal(pd.DataFrame([[1, 2, 3]]), pd.DataFrame([[1, 2, 3.]]))
def test_assert_ndim(self):
checks.assert_ndim(0, 0)
checks.assert_ndim(np.zeros(1), 1)
checks.assert_ndim(pd.Series([1, 2, 3]), (1, 2))
checks.assert_ndim(pd.DataFrame([1, 2, 3]), (1, 2))
with pytest.raises(Exception) as e_info:
checks.assert_ndim(np.zeros((3, 3, 3)), (1, 2))
def test_assert_len_equal(self):
checks.assert_len_equal([[1]], [[2]])
checks.assert_len_equal([[1]], [[2, 3]])
with pytest.raises(Exception) as e_info:
checks.assert_len_equal([[1]], [[2], [3]])
def test_assert_shape_equal(self):
checks.assert_shape_equal(0, 1)
checks.assert_shape_equal([1, 2, 3], np.asarray([1, 2, 3]))
checks.assert_shape_equal([1, 2, 3], pd.Series([1, 2, 3]))
checks.assert_shape_equal(np.zeros((3, 3)), pd.Series([1, 2, 3]), axis=0)
checks.assert_shape_equal(np.zeros((2, 3)), pd.Series([1, 2, 3]), axis=(1, 0))
with pytest.raises(Exception) as e_info:
checks.assert_shape_equal(np.zeros((2, 3)), pd.Series([1, 2, 3]), axis=(0, 1))
def test_assert_index_equal(self):
checks.assert_index_equal(pd.Index([1, 2, 3]), pd.Index([1, 2, 3]))
with pytest.raises(Exception) as e_info:
checks.assert_index_equal(pd.Index([1, 2, 3]), pd.Index([2, 3, 4]))
def test_assert_meta_equal(self):
index = ['x', 'y', 'z']
columns = ['a', 'b', 'c']
checks.assert_meta_equal(np.array([1, 2, 3]), np.array([1, 2, 3]))
checks.assert_meta_equal(pd.Series([1, 2, 3], index=index), pd.Series([1, 2, 3], index=index))
checks.assert_meta_equal(pd.DataFrame([[1, 2, 3]], columns=columns), pd.DataFrame([[1, 2, 3]], columns=columns))
with pytest.raises(Exception) as e_info:
checks.assert_meta_equal(pd.Series([1, 2]), pd.DataFrame([1, 2]))
with pytest.raises(Exception) as e_info:
checks.assert_meta_equal(pd.DataFrame([1, 2]), pd.DataFrame([1, 2, 3]))
with pytest.raises(Exception) as e_info:
checks.assert_meta_equal(pd.DataFrame([1, 2, 3]), pd.DataFrame([1, 2, 3], index=index))
with pytest.raises(Exception) as e_info:
checks.assert_meta_equal(pd.DataFrame([[1, 2, 3]]), pd.DataFrame([[1, 2, 3]], columns=columns))
def test_assert_array_equal(self):
index = ['x', 'y', 'z']
columns = ['a', 'b', 'c']
checks.assert_array_equal(np.array([1, 2, 3]), np.array([1, 2, 3]))
checks.assert_array_equal(pd.Series([1, 2, 3], index=index), pd.Series([1, 2, 3], index=index))
checks.assert_array_equal(pd.DataFrame([[1, 2, 3]], columns=columns),
pd.DataFrame([[1, 2, 3]], columns=columns))
with pytest.raises(Exception) as e_info:
checks.assert_array_equal(np.array([1, 2]), np.array([1, 2, 3]))
def test_assert_level_not_exists(self):
i = pd.Index(['x', 'y', 'z'], name='i')
multi_i = pd.MultiIndex.from_arrays([['x', 'y', 'z'], ['x2', 'y2', 'z2']], names=['i', 'i2'])
checks.assert_level_not_exists(i, 'i2')
checks.assert_level_not_exists(multi_i, 'i3')
with pytest.raises(Exception) as e_info:
checks.assert_level_not_exists(i, 'i')
checks.assert_level_not_exists(multi_i, 'i')
def test_assert_equal(self):
checks.assert_equal(0, 0)
checks.assert_equal(False, False)
with pytest.raises(Exception) as e_info:
checks.assert_equal(0, 1)
def test_assert_dict_valid(self):
checks.assert_dict_valid(dict(a=2, b=3), [['a', 'b', 'c']])
with pytest.raises(Exception) as e_info:
checks.assert_dict_valid(dict(a=2, b=3, d=4), [['a', 'b', 'c']])
checks.assert_dict_valid(dict(a=2, b=3, c=dict(d=4, e=5)), [['a', 'b', 'c'], ['d', 'e']])
with pytest.raises(Exception) as e_info:
checks.assert_dict_valid(dict(a=2, b=3, c=dict(d=4, f=5)), [['a', 'b', 'c'], ['d', 'e']])
# ############# math.py ############# #
class TestMath:
def test_is_close(self):
a = 0.3
b = 0.1 + 0.2
# test scalar
assert math.is_close_nb(a, a)
assert math.is_close_nb(a, b)
assert math.is_close_nb(-a, -b)
assert not math.is_close_nb(-a, b)
assert not math.is_close_nb(a, -b)
assert math.is_close_nb(1e10 + a, 1e10 + b)
# test np.nan
assert not math.is_close_nb(np.nan, b)
assert not math.is_close_nb(a, np.nan)
# test np.inf
assert not math.is_close_nb(np.inf, b)
assert not math.is_close_nb(a, np.inf)
assert not math.is_close_nb(-np.inf, b)
assert not math.is_close_nb(a, -np.inf)
assert not math.is_close_nb(-np.inf, -np.inf)
assert not math.is_close_nb(np.inf, np.inf)
assert not math.is_close_nb(-np.inf, np.inf)
def test_is_close_or_less(self):
a = 0.3
b = 0.1 + 0.2
# test scalar
assert math.is_close_or_less_nb(a, a)
assert math.is_close_or_less_nb(a, b)
assert math.is_close_or_less_nb(-a, -b)
assert math.is_close_or_less_nb(-a, b)
assert not math.is_close_or_less_nb(a, -b)
assert math.is_close_or_less_nb(1e10 + a, 1e10 + b)
# test np.nan
assert not math.is_close_or_less_nb(np.nan, b)
assert not math.is_close_or_less_nb(a, np.nan)
# test np.inf
assert not math.is_close_or_less_nb(np.inf, b)
assert math.is_close_or_less_nb(a, np.inf)
assert math.is_close_or_less_nb(-np.inf, b)
assert not math.is_close_or_less_nb(a, -np.inf)
assert not math.is_close_or_less_nb(-np.inf, -np.inf)
assert not math.is_close_or_less_nb(np.inf, np.inf)
assert math.is_close_or_less_nb(-np.inf, np.inf)
def test_is_less(self):
a = 0.3
b = 0.1 + 0.2
# test scalar
assert not math.is_less_nb(a, a)
assert not math.is_less_nb(a, b)
assert not math.is_less_nb(-a, -b)
assert math.is_less_nb(-a, b)
assert not math.is_less_nb(a, -b)
assert not math.is_less_nb(1e10 + a, 1e10 + b)
# test np.nan
assert not math.is_less_nb(np.nan, b)
assert not math.is_less_nb(a, np.nan)
# test np.inf
assert not math.is_less_nb(np.inf, b)
assert math.is_less_nb(a, np.inf)
assert math.is_less_nb(-np.inf, b)
assert not math.is_less_nb(a, -np.inf)
assert not math.is_less_nb(-np.inf, -np.inf)
assert not math.is_less_nb(np.inf, np.inf)
assert math.is_less_nb(-np.inf, np.inf)
def test_is_addition_zero(self):
a = 0.3
b = 0.1 + 0.2
assert not math.is_addition_zero_nb(a, b)
assert math.is_addition_zero_nb(-a, b)
assert math.is_addition_zero_nb(a, -b)
assert not math.is_addition_zero_nb(-a, -b)
def test_add_nb(self):
a = 0.3
b = 0.1 + 0.2
assert math.add_nb(a, b) == a + b
assert math.add_nb(-a, b) == 0
assert math.add_nb(a, -b) == 0
assert math.add_nb(-a, -b) == -(a + b)
# ############# array.py ############# #
class TestArray:
def test_is_sorted(self):
assert array.is_sorted(np.array([0, 1, 2, 3, 4]))
assert array.is_sorted(np.array([0, 1]))
assert array.is_sorted(np.array([0]))
assert not array.is_sorted(np.array([1, 0]))
assert not array.is_sorted(np.array([0, 1, 2, 4, 3]))
# nb
assert array.is_sorted_nb(np.array([0, 1, 2, 3, 4]))
assert array.is_sorted_nb(np.array([0, 1]))
assert array.is_sorted_nb(np.array([0]))
assert not array.is_sorted_nb(np.array([1, 0]))
assert not array.is_sorted_nb(np.array([0, 1, 2, 4, 3]))
def test_insert_argsort_nb(self):
a = np.random.uniform(size=1000)
A = a.copy()
I = np.arange(len(A))
array.insert_argsort_nb(A, I)
np.testing.assert_array_equal(np.sort(a), A)
np.testing.assert_array_equal(a[I], A)
def test_get_ranges_arr(self):
np.testing.assert_array_equal(
array.get_ranges_arr(0, 3),
np.array([0, 1, 2])
)
np.testing.assert_array_equal(
array.get_ranges_arr(0, [1, 2, 3]),
np.array([0, 0, 1, 0, 1, 2])
)
np.testing.assert_array_equal(
array.get_ranges_arr([0, 3], [3, 6]),
np.array([0, 1, 2, 3, 4, 5])
)
def test_uniform_summing_to_one_nb(self):
@njit
def set_seed():
np.random.seed(seed)
set_seed()
np.testing.assert_array_almost_equal(
array.uniform_summing_to_one_nb(10),
np.array([
5.808361e-02, 9.791091e-02, 2.412011e-05, 2.185215e-01,
2.241184e-01, 2.456528e-03, 1.308789e-01, 1.341822e-01,
8.453816e-02, 4.928569e-02
])
)
assert np.sum(array.uniform_summing_to_one_nb(10)) == 1
def test_renormalize(self):
assert array.renormalize(0, [0, 10], [0, 1]) == 0
assert array.renormalize(10, [0, 10], [0, 1]) == 1
np.testing.assert_array_equal(
array.renormalize( | np.array([0, 2, 4, 6, 8, 10]) | numpy.array |
import BeeVeeH.bvh as BVHLIB
import math
import copy
import numpy as np
from collections import Iterable
class BVHChannel(object):
ChannelTransformMatrixMap = {
'Xposition': lambda x: np.array([[1, 0, 0, x],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]]),
'Yposition': lambda x: np.array([[1, 0, 0, 0],
[0, 1, 0, x],
[0, 0, 1, 0],
[0, 0, 0, 1]]),
'Zposition': lambda x: np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, x],
[0, 0, 0, 1]]),
'Xrotation': lambda x: np.array([[1, 0, 0, 0],
[0, math.cos(math.radians(x)), -math.sin(math.radians(x)), 0],
[0, math.sin(math.radians(x)), math.cos(math.radians(x)), 0],
[0, 0, 0, 1]]),
'Yrotation': lambda x: np.array([[math.cos(math.radians(x)), 0, math.sin(math.radians(x)), 0],
[0, 1, 0, 0],
[-math.sin(math.radians(x)), 0, math.cos(math.radians(x)), 0],
[0, 0, 0, 1]]),
'Zrotation': lambda x: np.array([[math.cos(math.radians(x)), -math.sin(math.radians(x)), 0, 0],
[math.sin(math.radians(x)), math.cos(math.radians(x)), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
}
def __init__(self, name):
super().__init__()
self.name = name
self.value = 0.0
def set_value(self, value):
self.value = value
def matrix(self):
return BVHChannel.ChannelTransformMatrixMap[self.name](self.value)
def str(self):
return 'Channel({name}) = {value}'.format(name=self.name, value=self.value)
class BVHNode(object):
def __init__(self, key, name, offsets, channel_names, children, weight=1):
super().__init__()
self.key = key
self.name = name
self.children = children # []
self.channels = [BVHChannel(cn) for cn in channel_names] # []
self.offsets = offsets # x, y, z
# weight for calculate frame-frame distance
self.weight = weight
self.coordinates = []
self.localTrans = []
def search_node(self, name):
if self.name == name:
return self
for child in self.children:
result = child.search_node(name)
if result:
return result
return None
def filter(self, key):
for child in self.children:
if child.key == key:
yield child
def __load_frame(self, frame_data_array):
'''
this function modify frame_data_array, so
make sure you only call load_frame instead of this
'''
for channel in self.channels:
channel.set_value(frame_data_array.pop(0))
for child in self.children:
child.__load_frame(frame_data_array)
def load_frame(self, frame_data_array):
frame_data_array = copy.copy(frame_data_array)
self.__load_frame(frame_data_array)
def apply_transformation(self, parent_tran_matrix=np.identity(4)):
# calculate local trans
self.localTrans = np.identity(4)
for channel in self.channels:
self.localTrans = np.dot(self.localTrans, channel.matrix())
# calculate total trans
tran_matrix = np.dot(parent_tran_matrix, self.localTrans)
# calculate coordinates
cor = np.array([self.offsets]).T
self.coordinates = np.dot(tran_matrix, np.append(cor, [[1]], axis=0))[:3]
# iterate the children
for child in self.children:
child.apply_transformation(tran_matrix)
def str(self, show_coordinates=False):
s = 'Node({name}), offset({offset})\n'\
.format(name=self.name,
offset=', '.join([str(o) for o in self.offsets]))
if show_coordinates:
try:
s = s + '\tWorld coordinates: (%.2f, %.2f, %.2f)\n' % (self.coordinates[0],
self.coordinates[1],
self.coordinates[2])
except Exception as e:
print('World coordinates is not available, call apply_transformation() first')
s = s + '\tChannels:\n'
for channel in self.channels:
s = s + '\t\t' + channel.str() + '\n'
for child in self.children:
lines = child.str(show_coordinates=show_coordinates).split('\n')
for line in lines:
s = s + '\t' + line + '\n'
return s
def distance(node_a, node_b):
assert(node_a.name == node_b.name and node_a.weight == node_b.weight)
distance = | np.linalg.norm(node_a.coordinates - node_b.coordinates) | numpy.linalg.norm |
import numpy as np
from knapsack import knapsack_dp
import math
from scipy import stats
def generate_summary(ypred, cps, n_frames, nfps, positions, proportion=0.15, method='knapsack'):
"""Generate keyshot-based video summary i.e. a binary vector.
Args:
---------------------------------------------
- ypred: predicted importance scores.
- cps: change points, 2D matrix, each row contains a segment.
- n_frames: original number of frames.
- nfps: number of frames per segment.
- positions: positions of subsampled frames in the original video.
- proportion: length of video summary (compared to original video length).
- method: defines how shots are selected, ['knapsack', 'rank'].
"""
n_segs = cps.shape[0]
frame_scores = np.zeros((n_frames), dtype=np.float32)
if positions.dtype != int:
positions = positions.astype(np.int32)
if positions[-1] != n_frames:
positions = np.concatenate([positions, [n_frames]])
for i in range(len(positions) - 1):
pos_left, pos_right = positions[i], positions[i+1]
if i == len(ypred):
frame_scores[pos_left:pos_right] = 0
else:
frame_scores[pos_left:pos_right] = ypred[i]
seg_score = []
for seg_idx in range(n_segs):
start, end = int(cps[seg_idx,0]), int(cps[seg_idx,1]+1)
scores = frame_scores[start:end]
seg_score.append(float(scores.mean()))
limits = int(math.floor(n_frames * proportion))
if method == 'knapsack':
picks = knapsack_dp(seg_score, nfps, n_segs, limits)
elif method == 'rank':
order = np.argsort(seg_score)[::-1].tolist()
picks = []
total_len = 0
for i in order:
if total_len + nfps[i] < limits:
picks.append(i)
total_len += nfps[i]
else:
raise KeyError("Unknown method {}".format(method))
summary = np.zeros((1), dtype=np.float32) # this element should be deleted
for seg_idx in range(n_segs):
nf = nfps[seg_idx]
if seg_idx in picks:
tmp = np.ones((nf), dtype=np.float32)
else:
tmp = np.zeros((nf), dtype=np.float32)
summary = np.concatenate((summary, tmp))
summary = np.delete(summary, 0) # delete the first element
return summary
def kendaltau(machine_summary, user_summary):
machine_summary = machine_summary.astype(np.float32)
user_summary = user_summary.astype(np.float32)
n_users = user_summary.shape[0]
human_tau = []
# print("shape machinesumm:{} \t shape usersum:{}".format(machine_summary.shape, user_summary.shape))
for idx0 in range(n_users - 1):
u0_sort_order = np.argsort(user_summary[idx0])
u0_ranking = np.argsort(user_summary[idx0, u0_sort_order])
for idx1 in range(idx0+1, n_users):
u1_ranking = np.argsort(user_summary[idx1, u0_sort_order])
human_tau.append(stats.kendalltau(u0_ranking, u1_ranking))
human_avg_score = | np.mean(human_tau) | numpy.mean |
import unittest
import numpy as np
import numpy.testing as npt
import wisdem.drivetrainse.layout as lay
import wisdem.drivetrainse.drive_structure as ds
from wisdem.commonse import gravity
npts = 12
class TestDirectStructure(unittest.TestCase):
def setUp(self):
self.inputs = {}
self.outputs = {}
self.discrete_inputs = {}
self.discrete_outputs = {}
self.opt = {}
self.discrete_inputs["upwind"] = True
self.inputs["L_12"] = 2.0
self.inputs["L_h1"] = 1.0
self.inputs["L_generator"] = 3.25
# self.inputs['L_2n'] = 1.5
# self.inputs['L_grs'] = 1.1
# self.inputs['L_gsn'] = 1.1
self.inputs["L_hss"] = 0.75
self.inputs["L_gearbox"] = 1.2
self.inputs["overhang"] = 6.25
self.inputs["drive_height"] = 4.875
self.inputs["tilt"] = 4.0
self.inputs["access_diameter"] = 0.9
myones = np.ones(5)
self.inputs["lss_diameter"] = 3.3 * myones
self.inputs["lss_wall_thickness"] = 0.45 * myones
self.inputs["hss_diameter"] = 1.6 * np.ones(3)
self.inputs["hss_wall_thickness"] = 0.25 * np.ones(3)
self.inputs["nose_diameter"] = 2.2 * myones
self.inputs["nose_wall_thickness"] = 0.1 * myones
self.inputs["bedplate_wall_thickness"] = 0.06 * np.ones(npts)
self.inputs["bedplate_flange_width"] = 1.5
self.inputs["bedplate_flange_thickness"] = 0.05
# self.inputs['bedplate_web_height'] = 1.0
self.inputs["bedplate_web_thickness"] = 0.05
self.inputs["D_top"] = 6.5
self.inputs["hub_diameter"] = 4.0
self.inputs["other_mass"] = 200e3
self.inputs["mb1_mass"] = 10e3
self.inputs["mb1_I"] = 10e3 * 0.5 * 2 ** 2 * np.ones(3)
self.inputs["mb2_mass"] = 10e3
self.inputs["mb2_I"] = 10e3 * 0.5 * 1.5 ** 2 * np.ones(3)
self.inputs["mb1_max_defl_ang"] = 0.008
self.inputs["mb2_max_defl_ang"] = 0.008
self.inputs["m_stator"] = 100e3
self.inputs["cm_stator"] = -0.3
self.inputs["I_stator"] = np.array([1e6, 5e5, 5e5, 0.0, 0.0, 0.0])
self.inputs["generator_rotor_mass"] = 100e3
self.inputs["cm_rotor"] = -0.3
self.inputs["generator_rotor_I"] = np.array([1e6, 5e5, 5e5, 0.0, 0.0, 0.0])
self.inputs["generator_stator_mass"] = 100e3
self.inputs["cm_rotor"] = -0.3
self.inputs["generator_stator_I"] = np.array([1e6, 5e5, 5e5, 0.0, 0.0, 0.0])
self.inputs["generator_mass"] = 200e3
self.inputs["generator_I"] = np.array([2e6, 1e6, 1e6, 0.0, 0.0, 0.0])
self.inputs["gearbox_mass"] = 100e3
self.inputs["gearbox_I"] = np.array([1e6, 5e5, 5e5])
self.inputs["brake_mass"] = 10e3
self.inputs["brake_I"] = np.array([1e4, 5e3, 5e3])
self.inputs["carrier_mass"] = 10e3
self.inputs["carrier_I"] = np.array([1e4, 5e3, 5e3])
self.inputs["gear_ratio"] = 1.0
self.inputs["F_mb1"] = np.array([2409.750e3, -1716.429e3, 74.3529e3]).reshape((3, 1))
self.inputs["F_mb2"] = np.array([2409.750e3, -1716.429e3, 74.3529e3]).reshape((3, 1))
self.inputs["M_mb1"] = np.array([-1.83291e7, 6171.7324e3, 5785.82946e3]).reshape((3, 1))
self.inputs["M_mb2"] = np.array([-1.83291e7, 6171.7324e3, 5785.82946e3]).reshape((3, 1))
self.inputs["hub_system_mass"] = 100e3
self.inputs["hub_system_cm"] = 2.0
self.inputs["hub_system_I"] = np.array([2409.750e3, -1716.429e3, 74.3529e3, 0.0, 0.0, 0.0])
self.inputs["F_hub"] = np.array([2409.750e3, 0.0, 74.3529e2]).reshape((3, 1))
self.inputs["M_hub"] = np.array([-1.83291e4, 6171.7324e2, 5785.82946e2]).reshape((3, 1))
self.inputs["lss_E"] = self.inputs["hss_E"] = self.inputs["bedplate_E"] = 210e9
self.inputs["lss_G"] = self.inputs["hss_G"] = self.inputs["bedplate_G"] = 80.8e9
self.inputs["lss_rho"] = self.inputs["hss_rho"] = self.inputs["bedplate_rho"] = 7850.0
self.inputs["lss_Xy"] = self.inputs["hss_Xy"] = self.inputs["bedplate_Xy"] = 250e6
self.opt["gamma_f"] = 1.35
self.opt["gamma_m"] = 1.3
self.opt["gamma_n"] = 1.0
def compute_layout(self, direct=True):
myobj = lay.DirectLayout() if direct else lay.GearedLayout()
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
for k in self.outputs.keys():
self.inputs[k] = self.outputs[k]
def testBaseF_BaseM(self):
self.inputs["tilt"] = 0.0
self.inputs["F_mb1"] = np.zeros(3).reshape((3, 1))
self.inputs["F_mb2"] = np.zeros(3).reshape((3, 1))
self.inputs["M_mb1"] = np.zeros(3).reshape((3, 1))
self.inputs["M_mb2"] = np.zeros(3).reshape((3, 1))
self.compute_layout()
myobj = ds.Nose_Stator_Bedplate_Frame(modeling_options=self.opt, n_dlcs=1)
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0)
npt.assert_almost_equal(self.outputs["base_M"][0], 0.0)
npt.assert_almost_equal(self.outputs["base_M"][-1], 0.0)
F0 = self.outputs["base_F"]
M0 = self.outputs["base_M"]
self.inputs["other_mass"] += 500e3
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0)
npt.assert_almost_equal(self.outputs["base_F"][2], F0[2] - 500e3 * gravity)
npt.assert_almost_equal(self.outputs["base_M"][0], 0.0)
npt.assert_almost_equal(self.outputs["base_M"][1], M0[1])
npt.assert_almost_equal(self.outputs["base_M"][2], 0.0)
self.inputs["M_mb1"] = 10e3 * np.arange(1, 4).reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0)
npt.assert_almost_equal(self.outputs["base_F"][2], F0[2] - 500e3 * gravity)
npt.assert_almost_equal(self.outputs["base_M"], M0 + self.inputs["M_mb1"], decimal=0)
self.inputs["M_mb2"] = 20e3 * np.arange(1, 4).reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0)
npt.assert_almost_equal(self.outputs["base_F"][2], F0[2] - 500e3 * gravity)
npt.assert_almost_equal(self.outputs["base_M"], M0 + self.inputs["M_mb1"] + self.inputs["M_mb2"], decimal=-1)
self.inputs["F_mb1"] = np.array([30e2, 40e2, 50e2]).reshape((3, 1))
self.inputs["F_mb2"] = np.array([30e2, 40e2, 50e2]).reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 2 * self.inputs["F_mb2"][:2])
npt.assert_almost_equal(self.outputs["base_F"][2], F0[2] - 500e3 * gravity + 2 * 50e2)
def testBaseF_BaseM_withTilt(self):
self.inputs["tilt"] = 5.0
self.inputs["F_mb1"] = np.zeros(3).reshape((3, 1))
self.inputs["F_mb2"] = np.zeros(3).reshape((3, 1))
self.inputs["M_mb1"] = np.zeros(3).reshape((3, 1))
self.inputs["M_mb2"] = np.zeros(3).reshape((3, 1))
self.compute_layout()
myobj = ds.Nose_Stator_Bedplate_Frame(modeling_options=self.opt, n_dlcs=1)
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["base_M"][0], 0.0)
npt.assert_almost_equal(self.outputs["base_M"][-1], 0.0)
F0 = self.outputs["base_F"]
M0 = self.outputs["base_M"]
self.inputs["other_mass"] += 500e3
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["base_F"][2], F0[2] - 500e3 * gravity)
npt.assert_almost_equal(self.outputs["base_M"][0], 0.0)
npt.assert_almost_equal(self.outputs["base_M"][1], M0[1])
npt.assert_almost_equal(self.outputs["base_M"][2], 0.0)
self.inputs["M_mb1"] = 10e3 * np.arange(1, 4).reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["base_F"][2], F0[2] - 500e3 * gravity)
npt.assert_almost_equal(self.outputs["base_M"][1], M0[1] + self.inputs["M_mb1"][1], decimal=0)
self.inputs["M_mb2"] = 20e3 * np.arange(1, 4).reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["base_F"][2], F0[2] - 500e3 * gravity)
npt.assert_almost_equal(
self.outputs["base_M"][1], M0[1] + self.inputs["M_mb1"][1] + self.inputs["M_mb2"][1], decimal=-1
)
self.inputs["F_mb1"] = np.array([30e2, 40e2, 50e2]).reshape((3, 1))
self.inputs["F_mb2"] = np.array([30e2, 40e2, 50e2]).reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][1], 2 * self.inputs["F_mb2"][1])
def testBaseF_BaseM_Downwind(self):
self.inputs["tilt"] = 0.0
self.discrete_inputs["upwind"] = False
self.inputs["F_mb1"] = np.zeros(3).reshape((3, 1))
self.inputs["F_mb2"] = np.zeros(3).reshape((3, 1))
self.inputs["M_mb1"] = np.zeros(3).reshape((3, 1))
self.inputs["M_mb2"] = np.zeros(3).reshape((3, 1))
self.compute_layout()
myobj = ds.Nose_Stator_Bedplate_Frame(modeling_options=self.opt, n_dlcs=1)
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0)
npt.assert_almost_equal(self.outputs["base_M"][0], 0.0)
npt.assert_almost_equal(self.outputs["base_M"][-1], 0.0)
F0 = self.outputs["base_F"]
M0 = self.outputs["base_M"]
self.inputs["other_mass"] += 500e3
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0)
npt.assert_almost_equal(self.outputs["base_F"][2], F0[2] - 500e3 * gravity)
npt.assert_almost_equal(self.outputs["base_M"][0], 0.0)
npt.assert_almost_equal(self.outputs["base_M"][1], M0[1])
npt.assert_almost_equal(self.outputs["base_M"][2], 0.0)
self.inputs["M_mb1"] = 10e3 * np.arange(1, 4).reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0)
npt.assert_almost_equal(self.outputs["base_F"][2], F0[2] - 500e3 * gravity)
npt.assert_almost_equal(self.outputs["base_M"], M0 + self.inputs["M_mb1"], decimal=0)
self.inputs["M_mb2"] = 20e3 * np.arange(1, 4).reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0)
npt.assert_almost_equal(self.outputs["base_F"][2], F0[2] - 500e3 * gravity)
npt.assert_almost_equal(self.outputs["base_M"], M0 + self.inputs["M_mb1"] + self.inputs["M_mb2"], decimal=-1)
self.inputs["F_mb1"] = np.array([30e2, 40e2, 50e2]).reshape((3, 1))
self.inputs["F_mb2"] = np.array([30e2, 40e2, 50e2]).reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 2 * self.inputs["F_mb2"][:2])
npt.assert_almost_equal(self.outputs["base_F"][2], F0[2] - 500e3 * gravity + 2 * 50e2)
def testBaseF_BaseM_withTilt_Downwind(self):
self.inputs["tilt"] = 5.0
self.discrete_inputs["upwind"] = False
self.inputs["F_mb1"] = np.zeros(3).reshape((3, 1))
self.inputs["F_mb2"] = np.zeros(3).reshape((3, 1))
self.inputs["M_mb1"] = np.zeros(3).reshape((3, 1))
self.inputs["M_mb2"] = np.zeros(3).reshape((3, 1))
self.compute_layout()
myobj = ds.Nose_Stator_Bedplate_Frame(modeling_options=self.opt, n_dlcs=1)
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["base_M"][0], 0.0)
npt.assert_almost_equal(self.outputs["base_M"][-1], 0.0)
F0 = self.outputs["base_F"]
M0 = self.outputs["base_M"]
self.inputs["other_mass"] += 500e3
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["base_F"][2], F0[2] - 500e3 * gravity)
npt.assert_almost_equal(self.outputs["base_M"][0], 0.0)
npt.assert_almost_equal(self.outputs["base_M"][1], M0[1])
npt.assert_almost_equal(self.outputs["base_M"][2], 0.0)
self.inputs["M_mb1"] = 10e3 * np.arange(1, 4).reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["base_F"][2], F0[2] - 500e3 * gravity)
npt.assert_almost_equal(self.outputs["base_M"][1], M0[1] + self.inputs["M_mb1"][1], decimal=0)
self.inputs["M_mb2"] = 20e3 * np.arange(1, 4).reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["base_F"][2], F0[2] - 500e3 * gravity)
npt.assert_almost_equal(
self.outputs["base_M"][1], M0[1] + self.inputs["M_mb1"][1] + self.inputs["M_mb2"][1], decimal=-1
)
self.inputs["F_mb1"] = np.array([30e2, 40e2, 50e2]).reshape((3, 1))
self.inputs["F_mb2"] = np.array([30e2, 40e2, 50e2]).reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][1], 2 * self.inputs["F_mb2"][1])
def testBaseF_BaseM_Geared(self):
self.inputs["tilt"] = 0.0
self.inputs["F_mb1"] = np.zeros(3).reshape((3, 1))
self.inputs["F_mb2"] = np.zeros(3).reshape((3, 1))
self.inputs["F_torq"] = np.zeros(3).reshape((3, 1))
self.inputs["F_generator"] = np.zeros(3).reshape((3, 1))
self.inputs["M_mb1"] = np.zeros(3).reshape((3, 1))
self.inputs["M_mb2"] = np.zeros(3).reshape((3, 1))
self.inputs["M_torq"] = np.zeros(3).reshape((3, 1))
self.inputs["M_generator"] = np.zeros(3).reshape((3, 1))
self.compute_layout(False)
myobj = ds.Bedplate_IBeam_Frame(modeling_options=self.opt, n_dlcs=1)
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2, 0], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["base_M"][[0, 2], 0], 0.0, decimal=2)
F0 = self.outputs["base_F"][:, 0]
M0 = self.outputs["base_M"][:, 0]
self.inputs["other_mass"] += 500e3
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2, 0], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["base_F"][2, 0], F0[2] - 500e3 * gravity)
npt.assert_almost_equal(self.outputs["base_M"][[0, 2], 0], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["base_M"][1], M0[1])
self.inputs["M_mb1"] = 10e3 * np.arange(1, 4).reshape((3, 1))
self.inputs["M_mb2"] = 20e3 * np.arange(1, 4).reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2, 0], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["base_F"][2, 0], F0[2] - 500e3 * gravity, decimal=0)
# npt.assert_almost_equal(self.outputs['base_M'], M0+self.inputs['M_mb1']+self.inputs['M_mb2'], decimal=-1)
self.inputs["F_mb1"] = self.inputs["F_mb2"] = self.inputs["F_generator"] = self.inputs["F_torq"] = np.array(
[30e2, 40e2, 50e2]
).reshape((3, 1))
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2, 0], 4 * self.inputs["F_mb1"][:2, 0], decimal=1)
npt.assert_almost_equal(self.outputs["base_F"][2, 0], F0[2] - 500e3 * gravity + 4 * 50e2, decimal=0)
def testBaseF_BaseM_withTilt_Geared(self):
self.inputs["tilt"] = 5.0
self.inputs["F_mb1"] = np.zeros(3).reshape((3, 1))
self.inputs["F_mb2"] = np.zeros(3).reshape((3, 1))
self.inputs["F_torq"] = np.zeros(3).reshape((3, 1))
self.inputs["F_generator"] = np.zeros(3).reshape((3, 1))
self.inputs["M_mb1"] = np.zeros(3).reshape((3, 1))
self.inputs["M_mb2"] = np.zeros(3).reshape((3, 1))
self.inputs["M_torq"] = np.zeros(3).reshape((3, 1))
self.inputs["M_generator"] = np.zeros(3).reshape((3, 1))
self.compute_layout(False)
myobj = ds.Bedplate_IBeam_Frame(modeling_options=self.opt, n_dlcs=1)
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2, 0], 0.0, decimal=2)
npt.assert_almost_equal(self.outputs["base_M"][[0, 2], 0], 0.0, decimal=2)
F0 = self.outputs["base_F"][:, 0]
M0 = self.outputs["base_M"][:, 0]
self.inputs["other_mass"] += 500e3
myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_almost_equal(self.outputs["base_F"][:2, 0], 0.0, decimal=1)
| npt.assert_almost_equal(self.outputs["base_F"][2, 0], F0[2] - 500e3 * gravity) | numpy.testing.assert_almost_equal |
# Copyright 2021 The TensorFlow Quantum Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests that specifically target tfq_inner_product_grad."""
import copy
import numpy as np
from absl.testing import parameterized
import tensorflow as tf
import cirq
from tensorflow_quantum.core.ops.math_ops import inner_product_op
from tensorflow_quantum.python import util
class InnerProductAdjGradTest(tf.test.TestCase, parameterized.TestCase):
"""Tests tfq_inner_product_grad."""
def test_inner_product_grad_inputs(self):
"""Makes sure that inner_product_adj_grad fails on bad inputs."""
n_qubits = 5
batch_size = 5
n_other_programs = 3
symbol_names = ['alpha']
qubits = cirq.GridQubit.rect(1, n_qubits)
prev_grad = np.ones((batch_size, n_other_programs))
circuit_batch, resolver_batch = \
util.random_symbol_circuit_resolver_batch(
qubits, symbol_names, batch_size)
symbol_values_array = np.array(
[[resolver[symbol]
for symbol in symbol_names]
for resolver in resolver_batch])
other_batch = [
util.random_circuit_resolver_batch(qubits, n_other_programs)[0]
for i in range(batch_size)
]
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'programs must be rank 1'):
# Circuit tensor has too many dimensions.
inner_product_op._inner_product_grad(
util.convert_to_tensor([circuit_batch]),
symbol_names, symbol_values_array,
util.convert_to_tensor(other_batch), prev_grad)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'symbol_names must be rank 1.'):
# symbol_names tensor has too many dimensions.
inner_product_op._inner_product_grad(
util.convert_to_tensor(circuit_batch),
np.array([symbol_names]), symbol_values_array,
util.convert_to_tensor(other_batch), prev_grad)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'symbol_values must be rank 2.'):
# symbol_values_array tensor has too many dimensions.
inner_product_op._inner_product_grad(
util.convert_to_tensor(circuit_batch), symbol_names,
np.array([symbol_values_array]),
util.convert_to_tensor(other_batch), prev_grad)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'symbol_values must be rank 2.'):
# symbol_values_array tensor has too few dimensions.
inner_product_op._inner_product_grad(
util.convert_to_tensor(circuit_batch),
symbol_names, symbol_values_array[0],
util.convert_to_tensor(other_batch), prev_grad)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'other_programs must be rank 2.'):
# other_programs tensor has too few dimensions.
inner_product_op._inner_product_grad(
util.convert_to_tensor(circuit_batch),
symbol_names, symbol_values_array,
util.convert_to_tensor(circuit_batch), prev_grad)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'other_programs must be rank 2.'):
# pauli_sums tensor has too many dimensions.
inner_product_op._inner_product_grad(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array,
util.convert_to_tensor([[x] for x in other_batch]), prev_grad)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'Unparseable proto'):
# circuit tensor has the right type but invalid values.
inner_product_op._inner_product_grad(
['junk'] * batch_size, symbol_names, symbol_values_array,
util.convert_to_tensor(other_batch), prev_grad)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'Could not find symbol in parameter map'):
# symbol_names tensor has the right type but invalid values.
inner_product_op._inner_product_grad(
util.convert_to_tensor(circuit_batch),
['junk'], symbol_values_array,
util.convert_to_tensor(other_batch), prev_grad)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'not found in reference circuit'):
# other_programs tensor has the right type but operates on
# qubits that the reference ciruit doesn't have.
new_qubits = [cirq.GridQubit(5, 5), cirq.GridQubit(9, 9)]
new_circuits, _ = util.random_circuit_resolver_batch(
new_qubits, batch_size)
inner_product_op._inner_product_grad(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array,
util.convert_to_tensor([[x] for x in new_circuits]), prev_grad)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'not found in paired circuit'):
# other_programs tensor has the right type but operates on
# qubits that the reference ciruit doesn't have.
new_qubits = cirq.GridQubit.rect(1, n_qubits - 1)
new_circuits, _ = util.random_circuit_resolver_batch(
new_qubits, batch_size)
inner_product_op._inner_product_grad(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array,
util.convert_to_tensor([[x] for x in new_circuits]), prev_grad)
with self.assertRaisesRegex(TypeError, 'Cannot convert'):
# circuits tensor has the wrong type.
inner_product_op._inner_product_grad(
[1.0] * batch_size, symbol_names, symbol_values_array,
util.convert_to_tensor(other_batch), prev_grad)
with self.assertRaisesRegex(TypeError, 'Cannot convert'):
# symbol_names tensor has the wrong type.
inner_product_op._inner_product_grad(
util.convert_to_tensor(circuit_batch),
[0.1234], symbol_values_array,
util.convert_to_tensor(other_batch), prev_grad)
with self.assertRaisesRegex(tf.errors.UnimplementedError, ''):
# symbol_values tensor has the wrong type.
inner_product_op._inner_product_grad(
util.convert_to_tensor(circuit_batch),
symbol_names, [['junk']] * batch_size,
util.convert_to_tensor(other_batch), prev_grad)
with self.assertRaisesRegex(TypeError, 'Cannot convert'):
# other_programs tensor has the wrong type.
inner_product_op._inner_product_grad(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array, [[1.0]] * batch_size, prev_grad)
with self.assertRaisesRegex(TypeError, 'missing'):
# we are missing an argument.
# pylint: disable=no-value-for-parameter
inner_product_op._inner_product_grad(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array, prev_grad)
# pylint: enable=no-value-for-parameter
with self.assertRaisesRegex(TypeError, 'positional arguments'):
# pylint: disable=too-many-function-args
inner_product_op._inner_product_grad(
util.convert_to_tensor(circuit_batch),
symbol_names, symbol_values_array,
util.convert_to_tensor(other_batch), prev_grad, [])
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
expected_regex='do not match'):
# batch programs has wrong batch size.
inner_product_op._inner_product_grad(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array,
util.convert_to_tensor(other_batch[:int(batch_size * 0.5)]),
prev_grad)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
expected_regex='do not match'):
# batch programs has wrong batch size.
inner_product_op._inner_product_grad(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array[::int(batch_size * 0.5)],
util.convert_to_tensor(other_batch), prev_grad)
with self.assertRaisesRegex(
tf.errors.InvalidArgumentError,
expected_regex='Found symbols in other_programs'):
# other_programs has symbols.
inner_product_op._inner_product_grad(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array,
util.convert_to_tensor([[x] for x in circuit_batch]), prev_grad)
res = inner_product_op._inner_product_grad(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array.astype(np.float64),
util.convert_to_tensor(other_batch), prev_grad)
self.assertDTypeEqual(res, np.complex64)
@parameterized.parameters([
{
'n_qubits': 5,
'batch_size': 1,
'inner_dim_size': 5
},
{
'n_qubits': 5,
'batch_size': 10,
'inner_dim_size': 1
},
{
'n_qubits': 10,
'batch_size': 10,
'inner_dim_size': 2
},
{
'n_qubits': 5,
'batch_size': 10,
'inner_dim_size': 5
},
])
def test_correctness_with_symbols(self, n_qubits, batch_size,
inner_dim_size):
"""Tests that inner_product works with symbols."""
symbol_names = ['alpha', 'beta', 'gamma']
n_params = len(symbol_names)
qubits = cirq.GridQubit.rect(1, n_qubits)
circuit_batch, resolver_batch = \
util.random_symbol_circuit_resolver_batch(
qubits, symbol_names, batch_size)
other_batch = [
util.random_circuit_resolver_batch(qubits, inner_dim_size)[0]
for i in range(batch_size)
]
symbol_values_array = np.array(
[[resolver[symbol]
for symbol in symbol_names]
for resolver in resolver_batch])
programs = util.convert_to_tensor(circuit_batch)
other_programs = util.convert_to_tensor(other_batch)
symbol_names_tensor = tf.convert_to_tensor(symbol_names,
dtype=tf.dtypes.string)
symbol_values = tf.convert_to_tensor(symbol_values_array)
prev_grad = tf.cast(tf.random.normal((batch_size, inner_dim_size)),
tf.complex64)
out = inner_product_op._inner_product_grad(programs,
symbol_names_tensor,
symbol_values,
other_programs, prev_grad)
out_arr = | np.zeros((batch_size, n_params), dtype=np.complex64) | numpy.zeros |
import numpy as np
class GoGame:
def __init__(self, game_size=19, handicap=None, rule='weiqi'):
# assert game_size in [9, 13, 19]
assert rule in ['weiqi', 'wuzi']
self.end_game = False
self.game_size = game_size
# game_board represent the current state of the game, 1 represent black stone, 2 represent white stone
self.game_board = np.zeros(shape=(game_size, game_size), dtype=np.int8)
# boardered is game_board with boarders, boarders are '3's. It has a new coordinate system which (x, y) = (x + 1, y + 1)
self.boardered = None
# map storing information of whether a stone have qi or not
self.qi_map = np.zeros(shape=(game_size, game_size), dtype=np.int8)
# group_map show which group each stone belows to
self.group_map = np.zeros(shape=(game_size, game_size), dtype=np.int8)
self.group_indexes = []
# da_jie is a boolean whether there is a jie
self.da_jie = False
self.game_history = []
self.player = 1 # black goes
self.rule = rule
if handicap:
self.handicap = []
self.player = 2 # white goes first when handicap
def add_boarder(self):
# this method give boader to the game_board matrix, which will generate the boadered map of all stones
vertical = np.ones(shape=(1, self.game_size), dtype=np.int8).T
self.boardered = np.column_stack((3 * vertical, self.game_board, 3 * vertical))
horizontal = | np.ones(shape=(1, self.game_size + 2)) | numpy.ones |
"""
miscelallaneous functions and classes to extract connectivity metrics
Author: <NAME>, PhD [<EMAIL>], https://twitter.com/davemomi
"""
import numpy as np
import pandas as pd
from math import pi
import glob
import seaborn as sns
import matplotlib.pyplot as plt
import bct as bct
class Connectivity_metrics(object):
def __init__(self, matrices_files, net_label_txt, labels_dic):
self.matrices_files = matrices_files
self.net_label_txt = net_label_txt
self.labels_dic = labels_dic
def nodes_overall_conn(self, make_symmetric=True, upper_threshold=None,
lower_threshold=None):
'''
computing the overall connectivity of each node
regardless of network affiliation
Parameters
----------
make_symmetric: Boolean|
True indicate that the matrix is either upper
or lower triangular and need to be symmetrize
False indicate that the matrix is a full matrix already
upper_threshold: int |
an integer value ranging from 0 to 100 representing the
percentage of values as respect to maximum. The value
under that threshold will be 0 (Default is None)
lower_threshold: int |
an integer value ranging from 0 to 100 representing the
percentage of values as respect to maximum. The value
above that threshold will be 0 (Default is None)
Returns
-------
float data : numpy array |
numpy array (dim number of subject X number of node)
representing the connectivity of each node regardless
of network affiliation
'''
self.nodes_conn = []
for subj in range(len(self.matrices_files)):
self.matrix = pd.read_csv(self.matrices_files[subj], sep= ' ', header=None)
self.matrix = np.array(self.matrix)
if make_symmetric==True:
self.matrix = self.matrix + self.matrix.T - np.diag(self.matrix.diagonal())
else:
self.matrix = self.matrix
self.max=np.max(self.matrix.flatten())
if upper_threshold==None:
self.matrix= self.matrix
else:
self.matrix[self.matrix < upper_threshold*self.max/100 ] = 0
if lower_threshold==None:
self.matrix= self.matrix
else:
self.matrix[self.matrix > lower_threshold*self.max/100 ] = 0
np.fill_diagonal(self.matrix,0)
for nodes in range(self.matrix.shape[0]):
self._node_conn = np.sum(self.matrix[nodes])
self.nodes_conn.append(self._node_conn)
self.nodes_conn = np.array(self.nodes_conn)
self.nodes_conn = self.nodes_conn.reshape(len(self.matrices_files), self.matrix.shape[0])
return self.nodes_conn
def node_inner_conn(self, sbj_number, nodes_number, make_symmetric=True,
upper_threshold=None, lower_threshold=None):
'''
computing the connectivity of each node with its own network
Parameters
----------
sbj_number: int |
number of subjects
nodes_number: int|
number of nodes
make_symmetric: Boolean|
True indicate that the matrix is either upper
or lower triangular and need to be symmetrize
False indicate that the matrix is a full matrix already
upper_threshold: int |
an integer value ranging from 0 to 100 representing the
percentage of values as respect to maximum. The value
under that threshold will be 0 (Default is None)
lower_threshold: int |
an integer value ranging from 0 to 100 representing the
percentage of values as respect to maximum. The value
above that threshold will be 0 (Default is None)
Returns
-------
float data : numpy array |
numpy array (dim number of subject X number of node)
representing the connectivity of each node with its own
network
'''
with open(self.net_label_txt) as f:
net=f.read().splitlines()
self.all_conn = np.zeros([sbj_number, nodes_number])
for subj in range(len(self.matrices_files)):
self.matrix = pd.read_csv(self.matrices_files[subj], sep= ' ', header=None)
self.matrix = np.array(self.matrix)
if make_symmetric==True:
self.matrix = self.matrix + self.matrix.T - np.diag(self.matrix.diagonal())
else:
self.matrix = self.matrix
self.max=np.max(self.matrix.flatten())
if upper_threshold==None:
self.matrix= self.matrix
else:
self.matrix[self.matrix < upper_threshold*self.max/100 ] = 0
if lower_threshold==None:
self.matrix= self.matrix
else:
self.matrix[self.matrix > lower_threshold*self.max/100 ] = 0
np.fill_diagonal(self.matrix,0)
for network in net:
for nodes in self.labels_dic[network]:
self.sub_matrix =self.matrix[nodes]
self.streamlines_sum = | np.sum(self.sub_matrix[self.labels_dic[network]]) | numpy.sum |
import numpy as np
import scipy
import matplotlib.pyplot as plt
import statsmodels.api as sm
from matplotlib.collections import PatchCollection
from matplotlib.patches import Rectangle
import matplotlib.gridspec as gridspec
from hydroDL import utils
import string
import os
# manually add package
# os.environ[
# 'PROJ_LIB'] = r'C:\pythonenvir\pkgs\proj4-5.2.0-ha925a31_1\Library\share'
from mpl_toolkits import basemap
def plotBoxFig(data,
label1=None,
label2=None,
colorLst='rbkgcmy',
title=None,
figsize=(8, 6),
sharey=True,
legOnly=False):
nc = len(data)
fig, axes = plt.subplots(ncols=nc, sharey=sharey, figsize=figsize)
for k in range(0, nc):
ax = axes[k] if nc > 1 else axes
temp = data[k]
if type(temp) is list:
for kk in range(len(temp)):
tt = temp[kk]
if tt is not None and tt != []:
tt = tt[~np.isnan(tt)]
temp[kk] = tt
else:
temp[kk] = []
else:
temp = temp[~np.isnan(temp)]
bp = ax.boxplot(temp, patch_artist=True, notch=True, showfliers=False)
for kk in range(0, len(bp['boxes'])):
plt.setp(bp['boxes'][kk], facecolor=colorLst[kk])
if label1 is not None:
ax.set_xlabel(label1[k])
else:
ax.set_xlabel(str(k))
ax.set_xticks([])
# ax.ticklabel_format(axis='y', style='sci')
if label2 is not None:
ax.legend(bp['boxes'], label2, loc='best')
if legOnly is True:
ax.legend(bp['boxes'], label2, bbox_to_anchor=(1, 0.5))
if title is not None:
fig.suptitle(title)
return fig
def plotTS(t,
y,
*,
ax=None,
tBar=None,
figsize=(12, 4),
cLst='rbkgcmy',
markerLst=None,
legLst=None,
title=None,
linewidth=2):
newFig = False
if ax is None:
fig = plt.figure(figsize=figsize)
ax = fig.subplots()
newFig = True
if type(y) is np.ndarray:
y = [y]
for k in range(len(y)):
tt = t[k] if type(t) is list else t
yy = y[k]
legStr = None
if legLst is not None:
legStr = legLst[k]
if markerLst is None:
if True in np.isnan(yy):
ax.plot(tt, yy, '*', color=cLst[k], label=legStr)
else:
ax.plot(
tt, yy, color=cLst[k], label=legStr, linewidth=linewidth)
else:
if markerLst[k] is '-':
ax.plot(
tt, yy, color=cLst[k], label=legStr, linewidth=linewidth)
else:
ax.plot(
tt, yy, color=cLst[k], label=legStr, marker=markerLst[k])
# ax.set_xlim([np.min(tt), np.max(tt)])
if tBar is not None:
ylim = ax.get_ylim()
tBar = [tBar] if type(tBar) is not list else tBar
for tt in tBar:
ax.plot([tt, tt], ylim, '-k')
if legLst is not None:
ax.legend(loc='best')
if title is not None:
ax.set_title(title)
if newFig is True:
return fig, ax
else:
return ax
def plotVS(x,
y,
*,
ax=None,
title=None,
xlabel=None,
ylabel=None,
titleCorr=True,
plot121=True,
doRank=False,
figsize=(8, 6)):
if doRank is True:
x = scipy.stats.rankdata(x)
y = scipy.stats.rankdata(y)
corr = scipy.stats.pearsonr(x, y)[0]
pLr = np.polyfit(x, y, 1)
xLr = np.array([np.min(x), np.max(x)])
yLr = np.poly1d(pLr)(xLr)
if ax is None:
fig = plt.figure(figsize=figsize)
ax = fig.subplots()
else:
fig = None
if title is not None:
if titleCorr is True:
title = title + ' ' + r'$\rho$={:.2f}'.format(corr)
ax.set_title(title)
else:
if titleCorr is True:
ax.set_title(r'$\rho$=' + '{:.2f}'.format(corr))
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
# corr = np.corrcoef(x, y)[0, 1]
ax.plot(x, y, 'b.')
ax.plot(xLr, yLr, 'r-')
if plot121 is True:
plot121Line(ax)
return fig, ax
def plot121Line(ax, spec='k-'):
xlim = ax.get_xlim()
ylim = ax.get_ylim()
vmin = np.min([xlim[0], ylim[0]])
vmax = np.max([xlim[1], ylim[1]])
ax.plot([vmin, vmax], [vmin, vmax], spec)
def plotMap(data,
*,
ax=None,
lat=None,
lon=None,
title=None,
cRange=None,
shape=None,
pts=None,
figsize=(8, 4),
plotColorBar=True):
if cRange is not None:
vmin = cRange[0]
vmax = cRange[1]
else:
temp = flatData(data)
vmin = np.percentile(temp, 5)
vmax = np.percentile(temp, 95)
if ax is None:
fig, ax = plt.figure(figsize=figsize)
if len(data.squeeze().shape) == 1:
isGrid = False
else:
isGrid = True
mm = basemap.Basemap(
llcrnrlat= | np.min(lat) | numpy.min |
import os
import cv2
import xml.etree.ElementTree as ET
import numpy as np
def argsProcessor():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--dataPath", help="DataPath")
parser.add_argument("-o", "--outputFiles", help="outputFiles", default="bar")
return parser.parse_args()
if __name__ == '__main__':
args = argsProcessor()
dir = args.dataPath
output_dir = args.outputFiles
if (not os.path.isdir(output_dir)):
os.mkdir(output_dir)
import csv
with open(args.outputFiles+"/gt.csv", 'a') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
for folder in os.listdir(dir):
a = 0
print (str(folder))
if (os.path.isdir(dir + "/" + folder)):
for file in os.listdir(dir + "/" + folder):
images_dir = dir + "/" + folder + "/" + file
if (os.path.isdir(images_dir)):
list_gt = []
tree = ET.parse(images_dir + "/" + file + ".gt")
root = tree.getroot()
for a in root.iter("frame"):
if a.attrib["rejected"] != "false":
print ("Some frames are not valid")
0/0
list_gt.append(a)
#print list_gt
for image in os.listdir(images_dir):
if image.endswith(".jpg"):
try:
# Now we have opened the file and GT. Write code to create multiple files and scale gt
list_of_points = {}
img = cv2.imread(images_dir + "/" + image)
#print image[0:-4]
for point in list_gt[int(float(image[0:-4])) - 1].iter("point"):
myDict = point.attrib
list_of_points[myDict["name"]] = (
int(float(myDict['x'])), int(float(myDict['y'])))
doc_height = min(list_of_points["bl"][1] - list_of_points["tl"][1],
list_of_points["br"][1] - list_of_points["tr"][1])
doc_width = min(list_of_points["br"][0] - list_of_points["bl"][0],
list_of_points["tr"][0] - list_of_points["tl"][0])
ptr1 = (min(list_of_points["tl"][0], list_of_points["bl"][0], list_of_points["tr"][0], list_of_points["br"][0]),
min(list_of_points["tr"][1], list_of_points["tl"][1], list_of_points["br"][1], list_of_points["bl"][1]))
ptr2 = (max(list_of_points["tl"][0], list_of_points["bl"][0], list_of_points["tr"][0], list_of_points["br"][0]),max(list_of_points["tr"][1], list_of_points["tl"][1], list_of_points["br"][1], list_of_points["bl"][1]))
start_x = np.random.randint(0,ptr1[0]-2)
start_y = np.random.randint(0,ptr1[1]-2)
end_x = np.random.randint(ptr2[0]+2, img.shape[1])
end_y = np.random.randint(ptr2[1]+2, img.shape[0])
myGt = | np.asarray((list_of_points["tl"], list_of_points["tr"], list_of_points["br"], list_of_points["bl"])) | numpy.asarray |
import argparse
import glob
import os
import pickle
import sys
import time
from itertools import product
import matplotlib.pyplot as plt
import multiprocessing as mp
import numpy as np
import pandas as pd
import seaborn as sns
import statsmodels.nonparametric.api as smnp
import swifter
import utils
import graphs
N_PROC = 10
BASE_DIR = '/home/johnmcbride/projects/Scales/Data_compare/'
RAW_DIR = '/home/johnmcbride/projects/Scales/Toy_model/Data/Raw/'
PRO_DIR = '/home/johnmcbride/projects/Scales/Toy_model/Data/Processed/'
REAL_DIR = os.path.join(BASE_DIR, 'Processed/Real', 'Samples')
DIST_DIR = os.path.join(BASE_DIR, 'Processed/Real', 'Sample_dist')
def calc_relative_entropy(pk, qk):
RE = 0.0
for i in range(len(pk)):
if pk[i] <= 0 or qk[i] <= 0:
pass
else:
RE += pk[i] * np.log(pk[i] / qk[i])
return RE
def calc_jensen_shannon_distance(pk, qk):
mk = 0.5 * (pk + qk)
return (0.5 * (calc_relative_entropy(pk, mk) + calc_relative_entropy(qk, mk))) ** 0.5
def smooth_dist_kde(df, cat='pair_ints', hist=False, nbins=1202):
X = [float(x) for y in df.loc[:,cat] for x in y.split(';')]
kde = smnp.KDEUnivariate( | np.array(X) | numpy.array |
from online_inference import SecondBackend, build_network, inference
import numpy as np
import csv
from PIL import Image
import matplotlib.pyplot as plt
import matplotlib.patches as patches
BOX_COLOUR_SCHEME = {
'Car': '#00FF00', # Green
'Pedestrian': '#00FFFF', # Teal
'Cyclist': '#FFFF00' # Yellow
}
fig_size = (16, 9)
gt_classes = ['Car', 'Pedestrian']
class ObjectLabel:
"""Object Label Class
1 type Describes the type of object: 'Car', 'Van', 'Truck',
'Pedestrian', 'Person_sitting', 'Cyclist', 'Tram',
'Misc' or 'DontCare'
1 truncated Float from 0 (non-truncated) to 1 (truncated), where
truncated refers to the object leaving image boundaries
1 occluded Integer (0,1,2,3) indicating occlusion state:
0 = fully visible, 1 = partly occluded
2 = largely occluded, 3 = unknown
1 alpha Observation angle of object, ranging [-pi..pi]
4 bbox 2D bounding box of object in the image (0-based index):
contains left, top, right, bottom pixel coordinates
3 dimensions 3D object dimensions: height, width, length (in meters)
3 location 3D object location x,y,z in camera coordinates (in meters)
1 rotation_y Rotation ry around Y-axis in camera coordinates [-pi..pi]
1 score Only for results: Float, indicating confidence in
detection, needed for p/r curves, higher is better.
"""
def __init__(self):
self.type = "" # Type of object
self.truncation = 0.
self.occlusion = 0.
self.alpha = 0.
self.x1 = 0.
self.y1 = 0.
self.x2 = 0.
self.y2 = 0.
self.h = 0.
self.w = 0.
self.l = 0.
self.t = (0., 0., 0.)
self.ry = 0.
self.score = 0.
def __eq__(self, other):
"""Compares the given object to the current ObjectLabel instance.
:param other: object to compare to this instance against
:return: True, if other and current instance is the same
"""
if not isinstance(other, ObjectLabel):
return False
if self.__dict__ != other.__dict__:
return False
else:
return True
def visualization(image, display=True):
"""Forms the plot figure and axis for the visualization
Keyword arguments:
:param image_dir -- directory of image files in the wavedata
:param display -- display the image in non-blocking fashion
:param fig_size -- (optional) size of the figure
"""
def set_plot_limits(axes, image):
# Set the plot limits to the size of the image, y is inverted
axes.set_xlim(0, image.shape[1])
axes.set_ylim(image.shape[0], 0)
# Create the figure
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=fig_size, sharex=True)
fig.subplots_adjust(left=0.0, bottom=0.0, right=1.0, top=1.0, hspace=0.0)
# plot images
ax1.imshow(image)
ax2.imshow(image)
set_plot_limits(ax1, image)
set_plot_limits(ax2, image)
if display:
plt.show(block=False)
return fig, ax1, ax2
def visualization_single_plot(image, display=True):
"""Forms the plot figure and axis for the visualization
Keyword arguments:
:param image_dir -- directory of image files in the wavedata
:param img_idx -- index of the image file to present
:param flipped -- flag to enable image flipping
:param display -- display the image in non-blocking fashion
:param fig_size -- (optional) size of the figure
"""
# Create the figure
fig, ax = plt.subplots(1, figsize=fig_size, facecolor='black')
fig.subplots_adjust(left=0.0, bottom=0.0, right=1.0, top=1.0,
hspace=0.0, wspace=0.0)
# Set axes settings
ax.set_axis_off()
ax.set_xlim(0, image.shape[1])
ax.set_ylim(image.shape[0], 0)
# plot images
ax.imshow(image)
if display:
plt.show(block=False)
return fig, ax
def project_to_image(point_cloud, p):
""" Projects a 3D point cloud to 2D points for plotting
:param point_cloud: 3D point cloud (3, N)
:param p: Camera matrix (3, 4)
:return: pts_2d: the image coordinates of the 3D points in the shape (2, N)
"""
pts_2d = np.dot(p, np.append(point_cloud,
np.ones((1, point_cloud.shape[1])),
axis=0))
pts_2d[0, :] = pts_2d[0, :] / pts_2d[2, :]
pts_2d[1, :] = pts_2d[1, :] / pts_2d[2, :]
pts_2d = np.delete(pts_2d, 2, 0)
return pts_2d
def compute_box_corners_3d(object_label):
"""Computes the 3D bounding box corner positions from an ObjectLabel
:param object_label: ObjectLabel to compute corners from
:return: a numpy array of 3D corners if the box is in front of the camera,
an empty array otherwise
"""
# Compute rotational matrix
rot = np.array([[+np.cos(object_label.ry), 0, +np.sin(object_label.ry)],
[0, 1, 0],
[-np.sin(object_label.ry), 0, + | np.cos(object_label.ry) | numpy.cos |
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 28 17:08:13 2015
@author: jordan
"""
import os
import sys
import glob
from datetime import datetime, timedelta
import warnings
import numpy as np
from astropy.table import Table, Column, hstack
from astropy.stats import sigma_clipped_stats, gaussian_fwhm_to_sigma
from astropy.wcs import WCS
from astropy.wcs.utils import proj_plane_pixel_area
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.modeling import models, fitting
from astroquery.vizier import Vizier
import scipy.odr as odr
from skimage.measure import moments
from matplotlib import pyplot as plt
from photutils import daofind, aperture_photometry, CircularAperture, CircularAnnulus
import emcee
import corner
import pdb
# Add the AstroImage class
from astroimage.astroimage import AstroImage
# This is the location where all pyPol data will be saved
pyPol_data = 'C:\\Users\\Jordan\\FITS_data\\PRISM_data\\pyPol_data\\201501\\'
# Set the saturation limit for this image (a property of the detector)
satLimit = 12e3
satLimit = 16e3
# satLimit = 60e3
# Setup new directory for polarimetry data
polarimetryDir = os.path.join(pyPol_data, 'Polarimetry')
if (not os.path.isdir(polarimetryDir)):
os.mkdir(polarimetryDir, 0o755)
polAngDir = os.path.join(polarimetryDir, 'polAngImgs')
if (not os.path.isdir(polAngDir)):
os.mkdir(polAngDir, 0o755)
stokesDir = os.path.join(polarimetryDir, 'stokesImgs')
if (not os.path.isdir(stokesDir)):
os.mkdir(stokesDir, 0o755)
################################################################################
# Import the utility functions to be used here...
from utils_08b import *
# Build a dictionary contaning references to these transformation functions
USNOBtransforms = dict(zip(['V', 'R' , 'V-R'],
[USNOB_V, USNOB_R, USNOB_VR]))
# Read in the indexFile data and select the filenames
print('\nReading file index from disk')
indexFile = os.path.join(pyPol_data, 'reducedFileIndex.csv')
fileIndex = Table.read(indexFile, format='ascii.csv')
fileList = fileIndex['Filename']
# Determine which parts of the fileIndex pertain to science images
useFiles = np.logical_and((fileIndex['Use'] == 1), (fileIndex['Dither'] == 'ABBA'))
# Cull the file index to only include files selected for use
fileIndex = fileIndex[np.where(useFiles)]
# Group the fileIndex by...
# 1. Target
# 2. Waveband
# 3. Dither (pattern)
fileIndexByTarget = fileIndex.group_by(['Target', 'Dither'])
# Use the following data for final calibration
# Bands and zero point flux [in Jy = 10^(-26) W /(m^2 Hz)]
# Following table from Bessl, Castelli & Plez (1998)
# Passband Effective wavelength (microns) Zero point (Jy)
# U 0.366 1790
# B 0.438 4063
# V 0.545 3636
# R 0.641 3064
# I 0.798 2416
# J 1.22 1589
# H 1.63 1021
# K 2.19 640
zeroFlux = dict(zip(['U', 'B', 'V', 'R' , 'I' ],
[1790.0, 4063.0, 3636.0, 3064.0, 2416.0]))
wavelength = dict(zip(['U', 'B', 'V', 'R' , 'I' ],
[0.366, 0.438, 0.545, 0.798, 0.798]))
# Following table from Hu (2011)
# Data from Gaomeigu Observational Station
# Passband | K'(lambda) [mag/airmass] | K'' [mag/(color*airmass)]
# U 0.560 +/- 0.023 0.061 +/- 0.004
# B 0.336 +/- 0.021 0.012 +/- 0.003
# V 0.198 +/- 0.024 -0.015 +/- 0.004
# R 0.142 +/- 0.021 -0.067 +/- 0.005
# I 0.093 +/- 0.020 0.023 +/- 0.006
# Following table from Schmude (1994)
# Data from Texas A & M University Observatory
# Passband | K(lambda) [mag/airmass] | dispersion on K(lambda)
# U 0.60 +/- 0.05 0.120
# B 0.40 +/- 0.06 0.165
# V 0.26 +/- 0.03 0.084
# R 0.19 +/- 0.03 0.068
# I 0.16 +/- 0.02 0.055
kappa = dict(zip(['U', 'B', 'V', 'R' ],
[0.60, 0.40, 0.26, 0.19 ]))
# Loop through each group
groupKeys = fileIndexByTarget.groups.keys
for group in fileIndexByTarget.groups:
# Grab the current target information
thisTarget = str(np.unique(group['Target'].data)[0])
print('\nProcessing images for {0}'.format(thisTarget))
# Look for a photometric star catalog for this target
catFile = os.path.join(stokesDir, thisTarget + '_stars.csv')
if os.path.isfile(catFile):
starCatalog = Table.read(catFile, format='ascii.csv')
else:
print('Could not find catalog file for this target')
print('Please re-run script "08a_selectPhotStars.py"')
continue
# Search for all Stokes Intensity images
Ifile = os.path.join(stokesDir, thisTarget + '*I.fits')
Ifiles = glob.glob(Ifile)
# Search for all the Stokes U images
Ufile = os.path.join(stokesDir, thisTarget + '*U.fits')
Ufiles = glob.glob(Ufile)
# Search for all the Stokes Q images
Qfile = os.path.join(stokesDir, thisTarget + '*Q.fits')
Qfiles = glob.glob(Qfile)
# Read in all the Stokes Images found for this target, and strip the
# waveband from the header of each
stokesIimgs = [AstroImage(file1) for file1 in Ifiles]
waveBands = [img.header['FILTNME3'].strip() for img in stokesIimgs]
# Read in the Stokes U images
stokesUimgs = [AstroImage(file1) for file1 in Ufiles]
# Read in the Stokes Q images
stokesQimgs = [AstroImage(file1) for file1 in Qfiles]
# Compose a dictionary of stokes I, U, and Q images
stokesIdict = dict(zip(waveBands, stokesIimgs))
stokesUdict = dict(zip(waveBands, stokesUimgs))
stokesQdict = dict(zip(waveBands, stokesQimgs))
del stokesIimgs, stokesUimgs, stokesQimgs, waveBands
# Grab the WCS info from the header of the stokes Images
wcsDict = {}
yr2000 = datetime(2000,1,1)
deltaTime = timedelta(0)
for key, img in stokesIdict.items():
wcsDict[key] = WCS(img.header)
thisDate = img.header['DATE'].split('T')[0]
thisDate = datetime.strptime(thisDate, '%Y-%m-%d')
deltaTime += (thisDate - yr2000)
# Divide accumulated time vectors by number of measurements
secPerYr = 365.25*24*60*60
deltaTime = deltaTime.total_seconds()/(float(len(stokesIdict))*secPerYr)
# Form a "catalog" of position entries for matching
ra1 = starCatalog['_RAJ2000'].data.data*u.deg
dec1 = starCatalog['_DEJ2000'].data.data*u.deg
# Propagate proper motions into ra1 and dec1 positions
pmRA = starCatalog['pmRA'].data.data*(1e-3)*u.arcsec
pmDE = starCatalog['pmRA'].data.data*(1e-3)*u.arcsec
ra = ra1 + pmRA*deltaTime
dec = dec1 + pmDE*deltaTime
# Determine PSF properties for each image
# Initalize a 2D gaussian model and fitter
g_init = models.Gaussian2D(amplitude = 2e2,
x_mean = 8.0,
y_mean = 8.0,
x_stddev = 3.0,
y_stddev = 3.0,
theta = 0.0)
fit_g = fitting.LevMarLSQFitter()
#####
#####
# PERHAPS I NEED TO ALIGN THE IMAGES *BEFORE* I PERFORM PHOTOMETRY.
# THAT WAY, THERE ARE NO EXTRA TRANSFORMATIONS APPLIED TO THE IMAGE BETWEEN
# CALIBRATION AND SAVING TO DISK.
#####
#####
# 1) Loop through all the images
# 2) Determine more accurate star pixel positions (store in dictionary)
# 3) Determine star PSF properties (store in dictionary)
PSF_FWHMs = []
xyStarsDict = {}
keepStarsDict = {}
for key, img in stokesIdict.items():
# Convert the stellar celestial coordinates to pixel positions
xStars, yStars = wcsDict[key].wcs_world2pix(ra, dec, 0)
# Grab the image shape
ny, nx = img.arr.shape
# Loop through each star
starFWHMs = []
xStars1 = []
yStars1 = []
keepStars = []
for xs, ys in zip(xStars, yStars):
# Cut out a 16x16 pixel region around this estimated location
x0 = np.int(np.round(xs)) - 8 if np.int(np.round(xs)) - 8 >= 1 else 1
y0 = np.int(np.round(ys)) - 8 if np.int(np.round(ys)) - 8 >= 1 else 1
# Compute upper bounds based on lower bounds
x1 = x0 + 16
y1 = y0 + 16
# Double check that upper bounds don't break the rules either
if x1 > nx - 2:
x1 = nx - 2
x0 = x1 - 16
if y1 > ny - 2:
y1 = ny - 2
y0 = y1 - 16
# Cut out the actual star patch
patch = img.arr[y0:y1, x0:x1]
# Estimate the local sky value
bigPatch = img.arr[y0-1:y1+1, x0-1:x1+1]
padPatch = np.pad(patch, ((1,1), (1,1)), mode='constant')
skyPatch = bigPatch - padPatch
skyPix = (np.abs(skyPatch) > 1e-3)
if np.sum(skyPix) > 0:
skyInds = np.where(skyPix)
skyVals = skyPatch[skyInds]
else:
print('Cannot find sky')
pdb.set_trace()
skyVal = np.median(skyVals)
# Do a centroid estimate to find the star position
m = moments(patch - skyVal, 1)
xcen = (m[1, 0]/m[0, 0]) + x0
ycen = (m[0, 1]/m[0, 0]) + y0
# Re-cut a 16x16 pixel region around this corrected star position
x0 = np.int(np.round(xcen)) - 8 if np.int(np.round(xcen)) - 8 >= 0 else 0
y0 = np.int(np.round(ycen)) - 8 if np.int(np.round(ycen)) - 8 >= 0 else 0
# Compute upper bounds based on lower bounds
x1 = x0 + 16
y1 = y0 + 16
# Double check that upper bounds don't break the rules either
if x1 > nx - 1:
x1 = nx - 1
x0 = x1 - 16
if y1 > ny - 1:
y1 = ny - 1
y0 = y1 - 16
# Cut out the actual star patch
patch = img.arr[y0:y1, x0:x1]
# Redo a centroid estimate to find the star position.
# Use this value to test whether or not the Gaussian fit is good.
m = moments(patch - skyVal, 1)
xcen = (m[1, 0]/m[0, 0])
ycen = (m[0, 1]/m[0, 0])
xcen1 = xcen + x0
ycen1 = ycen + y0
# Fit a Gaussian to the star cutout
with warnings.catch_warnings():
# Ignore model linearity warning from the fitter
warnings.simplefilter('ignore')
yy, xx = np.mgrid[0:patch.shape[0], 0:patch.shape[1]]
g_fit = fit_g(g_init, xx, yy, patch - skyVal)
# Test whether the fitted gaussian is close to the epected location
xFit, yFit = (g_fit.x_mean.value, g_fit.y_mean.value)
fitDist = np.sqrt((xcen - xFit)**2 + (ycen - yFit)**2)
# Test for fitting and saturation problems
fitBool = (fitDist < 2.5)
satBool = (patch.max() < satLimit) and (patch.min() > -100)
thisKeepBool = fitBool and satBool
if thisKeepBool == True:
keepStars.append(True)
xStars1.append(xcen1)
yStars1.append(ycen1)
else:
# Build the problem analysis string
probString = ''
if fitBool:
probString = probString + 'fitting '
if satBool:
if len(probString) > 0:
probString = probString + 'and saturation'
else:
probString = probString + 'saturation '
probString = probString + 'problems'
print('skipping star at ({0:4d}, {1:4d}): for {2}'.format(
np.int(xs.round()), np.int(ys.round()), probString))
keepStars.append(False)
xStars1.append(-1)
yStars1.append(-1)
continue
# Store the Gaussian fitted PSF properties in the starFWHMs list
thisSigma = np.sqrt(np.abs(g_fit.x_stddev.value*g_fit.y_stddev.value))
thisFWHM = thisSigma/gaussian_fwhm_to_sigma
starFWHMs.append(thisFWHM)
# Store the mean PSF value in the FWHMlist
mean, med, std = sigma_clipped_stats(starFWHMs)
# mode = 3.0*med - 2.0*mean
# mode = 2.5*med - 1.5*mean
# PSF_FWHMs.append(mode)
PSF_FWHMs.append(mean)
# Store star positions in the xyStarsDict
xyStars = np.array([(xs, ys) for xs, ys in zip(xStars1, yStars1)])
xyStarsDict[key] = xyStars
# Store the star test booleans in the keepStarDict
keepStarsDict[key] = keepStars
# Grab maximum stellar PSF width and use apRad = 2.5*FWHM for photometry
maxFWHM = np.max(PSF_FWHMs)
apRad = 2.5*maxFWHM
anInRad = apRad + 2.0
anOutRad = apRad + 4.0
# Cull the starCatalog entry to only include non-saturated stars.
# First check which stars passed the test in ALL bands.
keepStars = np.ones(len(starCatalog), dtype=bool)
for key, val in keepStarsDict.items():
# Make sure the keep tests are all passed
keepStars = np.logical_and(keepStars, val)
# Make sure the stars are also far enough from the edge using the newly
# determined aperture radius to compute the edge criterion.
ny, nx = stokesIdict[key].arr.shape
edgeCut = np.ceil(anOutRad)
edgeBool = xyStarsDict[key][:,0] > edgeCut
edgeBool = np.logical_and(edgeBool,
xyStarsDict[key][:,0] < nx - 1 - edgeCut)
edgeBool = np.logical_and(edgeBool,
xyStarsDict[key][:,1] > edgeCut)
edgeBool = np.logical_and(edgeBool,
xyStarsDict[key][:,1] < ny - 1 - edgeCut)
# Combine the edge test with the previously determined photometry test
keepStars = np.logical_and(keepStars, edgeBool)
# Test if at least 4 stars passed the tests in all bands
if np.sum(keepStars) >= 4:
# Cull the star catalog to only include detected stars
keepInds = np.where(keepStars)
starCatalog = starCatalog[keepInds]
# Also cull the list of star positions to match between all bands
xyStarsDict1 = xyStarsDict.copy()
for key, val in xyStarsDict.items():
xyStarsDict1[key] = val[keepInds]
# Delete temporary variables
xyStarsDict = xyStarsDict1.copy()
del xyStarsDict1
else:
print('Fewer than 4 stars passed the quality tests in all bands.')
print('Color photometry for this target is impossible')
pdb.set_trace()
# Separate out O, J, E, F magnitudes for predicting V and R bands
# Surveys used for USNO-B1.0:
# ----------------------------------------------------------
# # Name Emuls B/R Wavelen. Zones Fld Dates Epoch
# (nm) (Dec) Obs.
# ----------------------------------------------------------
# 0 = POSS-I 103a-O (B) 350-500 -30..+90 936 1949-1965 (1st)
# 1 = POSS-I 103a-E (R) 620-670 -30..+90 936 1949-1965 (1st)
# 2 = POSS-II IIIa-J (B) 385-540 +00..+87 897 1985-2000 (2nd)
# 3 = POSS-II IIIa-F (R) 610-690 +00..+87 897 1985-1999 (2nd)
# 4 = SERC-J IIIa-J (B) 385-540 -90..-05 606 1978-1990 (2nd)
# 5 = ESO-R IIIa-F (R) 630-690 -90..-05 624 1974-1994 (1st)
# 6 = AAO-R IIIa-F (R) 590-690 -90..-20 606 1985-1998 (2nd)
# 7 = POSS-II IV-N (I) 730-900 +05..+87 800 1989-2000 (N/A)
# 8 = SERC-I IV-N (I) 715-900 -90..+00 892 1978-2002 (N/A)
# 9 = SERC-I* IV-N (I) 715-900 +05..+20 25 1981-2002 (N/A)
# --------------------------------------------------
# Note: Check that the confirmed sources all come from the expected
# surveys. If not, then stop and re-evaluate.
# First grab all the sources used in this data (minus masked points)
B1src = np.unique(starCatalog['B1S'].data.filled(255))[-2::-1]
R1src = np.unique(starCatalog['R1S'].data.filled(255))[-2::-1]
B2src = np.unique(starCatalog['B2S'].data.filled(255))[-2::-1]
R2src = np.unique(starCatalog['R2S'].data.filled(255))[-2::-1]
# Now test if all the specified sources are the expected ones
B1eqO = all([src in [0] for src in B1src])
R1eqE = all([src in [1] for src in R1src])
B2eqJ = all([src in [2, 4] for src in B2src])
R2eqF = all([src in [3, 5, 6] for src in R2src])
if (B1eqO and R1eqE and B2eqJ and R2eqF):
# If the sources are all the expected ones, then parse the emulsions
Omags = starCatalog['B1mag'].data.data
Emags = starCatalog['R1mag'].data.data
Jmags = starCatalog['B2mag'].data.data
Fmags = starCatalog['R2mag'].data.data
# Build a dictionary of USNO-B1.0 magnitudes
USNOBmagDict = dict(zip(['O', 'E', 'J', 'F' ],
[Omags, Emags, Jmags, Fmags]))
else:
# Insert a pause if one of the sources is wrong...
print('There are some unexpected sources for the magnitudes')
print('...stopping...')
pdb.set_trace()
# 1) Loop through all the images.
# 2) Do aperture photometry on the stars
# 3) Store photometry in photDict
# Initalize a dictionary to store the airmass corrected (AMC) stokes I imgs
stokesIdict_AMC = {}
# Initalize a dictionary to store the photometry tables
photDict = {}
for key, img in stokesIdict.items():
# Now that all the pre-requisites for photometry have been met, it is time
# to apply a waveband based airmass correction and normalize by the exposure
# time. The result, stored in the img1 variable, should be used for all
# subsequent photometry
atmExtMag = kappa[key]*img.header['AIRMASS']
expTime = img.header['EXPTIME']
img1 = img*((10.0**(0.4*atmExtMag))/expTime)
# Store corrected image in the stokesIdict_AMC dictionary
stokesIdict_AMC[key] = img1
# Grab the star positions
xyStars = xyStarsDict[key]
xStars, yStars = xyStars[:,0], xyStars[:,1]
# Establish circular apertures for photometry
apertures = CircularAperture(xyStars, r = apRad)
annulus_apertures = CircularAnnulus(xyStars,
r_in = anInRad, r_out = anOutRad)
# Perform the basic photometry
rawflux_table = aperture_photometry(img1.arr, apertures,
error=img1.sigma)
bkgflux_table = aperture_photometry(img1.arr, annulus_apertures,
error=img1.sigma)
phot_table = hstack([rawflux_table, bkgflux_table],
table_names=['raw', 'bkg'])
# Compute background contribution and subtract from raw photometry
bkg_mean = phot_table['aperture_sum_bkg'] / annulus_apertures.area()
bkg_sig = phot_table['aperture_sum_err_bkg'] / annulus_apertures.area()
bkg_sum = bkg_mean * apertures.area()
bkg_sig = bkg_sig * apertures.area()
# Compute the variance in the background pixels for each star
ny, nx = img1.arr.shape
yy, xx = np.mgrid[0:ny, 0:nx]
bkg_var = []
# Loop through each star and add the local variance to the uncertainty
for xy in xyStars:
xs, ys = xy
distFromStar = np.sqrt((xx - xs)**2 + (yy - ys)**2)
skyPixInds = np.where(np.logical_and(
(distFromStar > anInRad), (distFromStar < anOutRad)))
bkg_var.append(np.var(img1.arr[skyPixInds]))
# Convert the background variance into an array
bkg_var = np.array(bkg_var)
# Compute the final photometry and its uncertainty
final_sum = phot_table['aperture_sum_raw'] - bkg_sum
final_sig = np.sqrt(phot_table['aperture_sum_err_raw']**2
+ bkg_sig**2
+ bkg_var)
phot_table['residual_aperture_sum'] = final_sum
phot_table['residual_aperture_sum_err'] = final_sig
# Compute the signal-to-noise ratio and find the stars with SNR < 3.0
SNR = final_sum/final_sig
# Now estimate the photometry from USNO-B1.0 and store it for later use
catMags, sigCatMags = USNOBtransforms[key](USNOBmagDict)
phot_table[key+'_catMag'] = catMags
phot_table[key+'_sigCatMag'] = sigCatMags
# Loop through all the stars and detect any duplicate entries. Mark each
# entry with a semi-unique 'Star ID'
# Extract the star positions from the photometry table
# (this is redundant but a nice confirmation that these will be right)
xStars = phot_table['xcenter_raw']
yStars = phot_table['ycenter_raw']
# Initalize an empty list to store the starIDs
starIDs = -1*np.ones(len(phot_table), dtype=int)
for ind, row in enumerate(phot_table):
# Skip over any rows that have been previously treated
if starIDs[ind] > 0: continue
# Compute the distance between the current star and all other stars
xs, ys = row['xcenter_raw'], row['ycenter_raw']
dist = np.sqrt((xs - xStars)**2 + (ys - yStars)**2).value
if np.sum(dist < 2.0) > 0:
# Mark all stars within 2.0 pixels of the current star with an
# identical ID.
IDinds = np.where(dist < 2.0)
starIDs[IDinds] = ind
# Add the StarID column to the phot_table
phot_table.add_column(Column(name='star_id', data=starIDs), index=0)
# plt.ion()
# plt.imshow(stokesIdict[key].arr, vmin=0,vmax=800,cmap='gray_r')
# plt.scatter(phot_table['xcenter_raw'], phot_table['ycenter_raw'],
# marker='x', color='red')
# pdb.set_trace()
# Sort the phot_table by starID
sortInds = phot_table['star_id'].data.argsort()
phot_table = phot_table[sortInds]
# Store this photometry table in the dictionary for later use
photDict[key] = phot_table
# ###########################################################################
# # PRINT OUT THE PHOTOMETRY TO CHECK FOR CONSISTENCY
# ###########################################################################
# xFmtStr = '{x[0]:>6}.{x[1]:<3}'
# yFmtStr = '{y[0]:>6}.{y[1]:<3}'
# starFmtStr = '{star[0]:>9}.{star[1]:<3}'
# bkgFmtStr = '{bkg[0]:>9}.{bkg[1]:<3}'
# snrFmtStr = '{snr[0]:>9}.{snr[1]:<3}'
# print('final photometry is...')
# print(' x y Star Flux Bkg Flux SNR')
# print('===========================================================')
# printStr = xFmtStr + yFmtStr + starFmtStr + bkgFmtStr + snrFmtStr
# for i in range(len(SNR)):
# xVal = str(xStars[i]).split('.')
# xVal[1] = (xVal[1])[0:3]
# yVal = str(yStars[i]).split('.')
# yVal[1] = (yVal[1])[0:3]
# starVal = str(final_sum[i]).split('.')
# starVal[1] = (starVal[1])[0:3]
# bkgVal = str(bkg_sum[i]).split('.')
# bkgVal[1] = (bkgVal[1])[0:3]
# snrVal = str(SNR[i]).split('.')
# snrVal[1] = (snrVal[1])[0:3]
# print(printStr.format(x = xVal, y = yVal, star = starVal,
# bkg = bkgVal, snr = snrVal))
# I need to simultaneously solve a set of linear regressions for photometric
# zero-point magnitudes and color correction terms
#
# E.g.
# (V_corrected - V_apparent) = a_0 + a_1 * (V_apparent - R_apparent)
# and
# (R_corrected - R_apparent) = a_2 + a_3 * (V_apparent - R_apparent)
# and
# (V_corrected - R_corrected) = a_4 + a_5 * (V_apparent - R_apparent)
#
# Grab all the successfully measured bandpasses
bandKeys1 = [key for key in stokesIdict.keys()]
# Ensure that they're in wavelength order
# Start by constructing an array with
# Column 0: list of wavebands
# Column 1: list of wavelengths for that bands
bandLamArr = np.array([[key, val] for key, val in wavelength.items()])
# Figure our how to sort this array by increasing wavelength, and create a
# list of possible wavebands in that sorted order
sortArr = bandLamArr[:,1].argsort()
bandList = (bandLamArr[:,0].flatten())[sortArr]
# Loop through the wavebands and construct a wavelength ordered list of
# observed waveband keys in the stokesIdict dictionary.
bandKeys = []
for band in bandList:
if band in bandKeys1:
bandKeys.append(band)
# Loop through the bands and construct keys for a "color dictionary"
colorKeys = []
for ind, band1 in enumerate(bandKeys[0:len(bandKeys)-1]):
# Only worry about colors from adjacent wavebands, one index over
band2 = bandKeys[ind+1]
colorKeys.append('-'.join([band1, band2]))
# Prepare for the linear regressions to be done on each band and color
# Define the model to be used in the fitting
def lineFunc(B, x):
return B[1] + B[0]*x
# Set up ODR with the model and data.
lineModel = odr.Model(lineFunc)
# loop through each linear regression
for colorKey in colorKeys:
print('Preparing the model outliers with MCMC')
# Setup the walker count, burn-in steps, and production steps
n_walkers = 100
n_burn_in_steps = 1000
n_steps = 2000
# Treat the photometric regressions for this set of bands...
# Establish the boundaries of acceptable parameters for the prior
labels = [
r"$\theta$",
r"$b_p$",
r"$P_b$",
r"$M_x$",
r"$\ln V_x$",
r"$M_y$",
r"$\ln V_y$"]
# Create a separate set of labels and indices for those parameters which
# will be plotted in the posterior distribution "corner plot"
plotLabels = [
r"$\theta$",
r"$b_p$",
r"$P_b$",
r"$\ln V_y$"]
plotInds = np.array([0,1,2,6])
bounds1 = [(-1.0, 1.0), # Theta (angle of the line slope)
(18.0, 28.0), # b_perp (min-dist(line-origin))
(0.0, 1.0), # Pb (Probability of sampling an outliers)
(-8.0, +8.0), # Mx (<x> of outlier distribution)
(-2.0, 5.0), # lnVx (log-x-variance of outlier distribution)
(-8.0, +8.0), # My (<y> of outlier distribution)
(-2.0, 5.0)] # lnVy (log-y-variance of outlier distribution)
bounds2 = [(+0.0, +1.5), # Theta (angle of the line slope)
(18.0, 28.0), # b_perp (min-dist(line-origin))
(0.0, 1.0), # Pb (Probability of sampling an outliers)
(-8.0, +8.0), # Mx (<x> of outlier distribution)
(-2.0, 5.0), # lnVx (log-x-variance of outlier distribution)
(-8.0, +8.0), # My (<y> of outlier distribution)
(-2.0, 5.0)] # lnVy (log-y-variance of outlier distribution)
boundsC = [(-0.5, +1.0), # Theta (angle of the line slope)
(-0.4, +0.75), # b_perp (min-dist(line-origin))
(0.0, 1.0), # Pb (Probability of sampling an outliers)
(-8.0, +8.0), # Mx (<x> of outlier distribution)
(-2.0, 5.0), # lnVx (log-x-variance of outlier distribution)
(-8.0, +8.0), # My (<y> of outlier distribution)
(-2.0, 5.0)] # lnVy (log-y-variance of outlier distribution)
# Parse the bands used in this color
band1, band2 = colorKey.split('-')
# Grab the photometry table for these two bands
phot_table1 = photDict[band1]
phot_table2 = photDict[band2]
# Double check that the star IDs are all matched up
if len(phot_table1) != len(phot_table2):
print('Photometry tables do not match!')
pdb.set_trace()
totalMatch = np.sum(phot_table1['star_id'].data == phot_table2['star_id'].data)
if totalMatch < len(phot_table1):
print('Photometry tables do not match!')
pdb.set_trace()
# Since we have confirmed that all the starIDs match up, we will store
# the values from the first phot_table
starIDs = phot_table['star_id'].data
# Grab the fluxes for the calibration stars for these two bands
flux1 = phot_table1['residual_aperture_sum'].data
flux2 = phot_table2['residual_aperture_sum'].data
sigFlux1 = phot_table1['residual_aperture_sum_err'].data
sigFlux2 = phot_table2['residual_aperture_sum_err'].data
# Compute the instrumental magnitudes for these two bands
instMags1 = -2.5*np.log10(flux1)
instMags2 = -2.5*np.log10(flux2)
sigInstMags1 = 2.5*np.abs(sigFlux1/(flux1*np.log(10)))
sigInstMags2 = 2.5*np.abs(sigFlux2/(flux2*np.log(10)))
# Now grab the catalog magnitudes for the calibration stars
catMags1 = phot_table1[band1+'_catMag'].data
catMags2 = phot_table2[band2+'_catMag'].data
sigCatMags1 = phot_table1[band1+'_sigCatMag'].data
sigCatMags2 = phot_table2[band2+'_sigCatMag'].data
# Begin by culling any data from extremely unexpected regions
# Compute the catalog colors for these stars
catColors, sig_catColors = USNOBtransforms[colorKey](USNOBmagDict)
# Compute the band1 - band2 color
xTest = instMags1 - instMags2
yTest = catColors
# Set some boundaries for acceptable color-color data
# slope1, intercept1 = np.tan(0.561), 0.0055/np.cos(0.561)
# slope2, intercept2 = np.tan(0.658), 0.233/np.cos(0.658)
slope1, intercept1 = np.tan(0.45), 0.00/np.cos(0.45)
slope2, intercept2 = np.tan(0.70), 0.25/np.cos(0.70)
keepPts = (yTest > (slope1*xTest + intercept1 - 0.25))
keepPts = np.logical_and(keepPts,
(yTest < slope2*xTest + intercept2 + 0.25))
keepInds = np.where(keepPts)
# Now perform the actual data cuts
starIDs = starIDs[keepInds]
instMags1 = instMags1[keepInds]
instMags2 = instMags2[keepInds]
sigInstMags1 = sigInstMags1[keepInds]
sigInstMags2 = sigInstMags2[keepInds]
catMags1 = catMags1[keepInds]
catMags2 = catMags2[keepInds]
sigCatMags1 = sigCatMags1[keepInds]
sigCatMags2 = sigCatMags2[keepInds]
catColors = catColors[keepInds]
sig_catColors = sig_catColors[keepInds]
########################################################################
############################# COLOR-COLOR ##############################
########################################################################
print('Running initial Color-Color regression')
# Compute the colors for these stars
xC = instMags1 - instMags2
yC = catColors
sxC = np.sqrt(sigInstMags1**2 + sigInstMags2**2)
syC = sig_catColors
### THIS CODE SIMPLY DISPLAYS THE DATA TO THE USER TO SEE IF
### THE SELECTED "GOOD-DATA" REGION IS ACCEPTABLE.
###
# slope1, intercept1 = np.tan(0.45), 0.00/np.cos(0.45)
# slope2, intercept2 = np.tan(0.70), 0.25/np.cos(0.70)
# plt.errorbar(xC, yC, xerr=sxC, yerr=syC, fmt='None', ecolor='k')
# plt.plot(xC, slope1*xC + intercept1 - 0.25, color='k')
# plt.plot(xC, slope2*xC + intercept2 + 0.25, color='k')
# pdb.set_trace()
# plt.close('all')
# continue
# Perform the MCMC sampling of the posterior
data = (xC, yC, sxC, syC)
samplerC = MCMCfunc(data, boundsC,
n_walkers=n_walkers,
n_burn_in_steps=n_burn_in_steps,
n_steps=n_steps)
# Plot the posteriors to see if a reasonable result was obtained.
# plotSamples = samplerC.flatchain[:,plotInds]
# plotBounds = np.array(boundsC)[plotInds]
# corner.corner(plotSamples, bins=100,
# range=plotBounds,
# labels=plotLabels)
#
# # Save the figure to disk
# fname = os.path.join(stokesDir, thisTarget + '_MCMC.png')
# plt.savefig(fname, dpi=300)
# plt.close('all')
# Compute the posterior probability that each data-point is "good"
norm = 0.0
post_probC = np.zeros(len(data[0]))
for i in range(samplerC.chain.shape[1]):
for j in range(samplerC.chain.shape[0]):
ll_fg, ll_bg = samplerC.blobs[i][j]
post_probC += np.exp(ll_fg - np.logaddexp(ll_fg, ll_bg))
norm += 1
post_probC /= norm
# Loop through all entries and eliminate the less probable of all
# *PAIRED* entries.
keepBool = np.zeros(len(post_probC), dtype=bool)
for ind, idNum in enumerate(starIDs):
# Skip over already treated indices
if keepBool[ind] == True: continue
# Test which starIDs equal *THIS* starID
testBool = (starIDs == idNum)
if np.sum(testBool) > 1:
# If this ID number is shared by more than one entry, then
# figure out which entry is more probable and keep only that one
testIDs = np.where(starIDs == idNum)
testProbs = post_probC[testIDs]
testBool = testProbs == testProbs.max()
testInds = np.where(testBool)[0]
keepBool[testIDs] = testBool
else:
keepBool[ind] = True
# Now that we've eliminated duplicate data-points, let's eliminate data
# with less than a 50% posterior probability of being "good"
keepBool = np.logical_and(keepBool, (post_probC > 0.50))
keepInds = np.where(keepBool)
# Now cull the data and re-do the fit
print('Culling duplicate and probable outlier data')
starIDs = starIDs[keepInds]
instMags1 = instMags1[keepInds]
instMags2 = instMags2[keepInds]
sigInstMags1 = sigInstMags1[keepInds]
sigInstMags2 = sigInstMags2[keepInds]
catMags1 = catMags1[keepInds]
catMags2 = catMags2[keepInds]
sigCatMags1 = sigCatMags1[keepInds]
sigCatMags2 = sigCatMags2[keepInds]
catColors = catColors[keepInds]
sig_catColors = sig_catColors[keepInds]
########################################################################
################################ BAND 1 ################################
########################################################################
print('Running {0}-band regression'.format(band1))
x1 = instMags1 - instMags2
y1 = catMags1 - instMags1
sx1 = np.sqrt(sigInstMags1**2 + sigInstMags2**2)
sy1 = np.sqrt(sigCatMags1**2 + sigInstMags1**2)
# Perform the MCMC sampling of the posterior
data = (x1, y1, sx1, sy1)
sampler1 = MCMCfunc(data, bounds1,
n_walkers=n_walkers,
n_burn_in_steps=n_burn_in_steps,
n_steps=n_steps)
# # Plot the posteriors to see if a reasonable result was obtained.
# plt.ion()
# plotSamples = sampler1.flatchain[:,plotInds]
# plotBounds = np.array(bounds1)[plotInds]
# corner.corner(plotSamples, bins=100,
# range=plotBounds,
# labels=plotLabels)
# pdb.set_trace()
# plt.close('all')
# Compute the posterior probability that each data-point is "good"
norm = 0.0
post_prob1 = np.zeros(len(data[0]))
for i in range(sampler1.chain.shape[1]):
for j in range(sampler1.chain.shape[0]):
ll_fg, ll_bg = sampler1.blobs[i][j]
post_prob1 += np.exp(ll_fg - np.logaddexp(ll_fg, ll_bg))
norm += 1
post_prob1 /= norm
# Track the outliers from the band-1 MCMC fit
keepBool = (post_prob1 > 0.5)
########################################################################
################################ BAND 2 ################################
########################################################################
print('Running {0}-band regression'.format(band2))
x2 = instMags1 - instMags2
y2 = catMags2 - instMags2
sx2 = np.sqrt(sigInstMags1**2 + sigInstMags2**2)
sy2 = np.sqrt(sigCatMags2**2 + sigInstMags2**2)
# Perform the MCMC sampling of the posterior
data = (x2, y2, sx2, sy2)
sampler2 = MCMCfunc(data, bounds2,
n_walkers=n_walkers,
n_burn_in_steps=n_burn_in_steps,
n_steps=n_steps)
# # Plot the posteriors to see if a reasonable result was obtained.
# plotSamples = sampler2.flatchain[:,plotInds]
# plotBounds = np.array(bounds1)[plotInds]
# corner.corner(plotSamples, bins=100,
# range=plotBounds,
# labels=plotLabels)
# pdb.set_trace()
# plt.close('all')
# Compute the posterior probability that each data-point is "good"
norm = 0.0
post_prob2 = np.zeros(len(data[0]))
for i in range(sampler2.chain.shape[1]):
for j in range(sampler2.chain.shape[0]):
ll_fg, ll_bg = sampler2.blobs[i][j]
post_prob2 += np.exp(ll_fg - np.logaddexp(ll_fg, ll_bg))
norm += 1
post_prob2 /= norm
# Track the outliers from the band-2 MCMC fit
keepBool = np.logical_and(keepBool, (post_prob2 > 0.5))
########################################################################
############################# COLOR-COLOR ##############################
########################################################################
# Begin by culling any data marked as outliers in band1 or band2 MCMC.
keepInds = np.where(keepBool)
# Now cull the data and perform ODR fits
print('Culling probable outlier data')
starIDs = starIDs[keepInds]
instMags1 = instMags1[keepInds]
instMags2 = instMags2[keepInds]
sigInstMags1 = sigInstMags1[keepInds]
sigInstMags2 = sigInstMags2[keepInds]
catMags1 = catMags1[keepInds]
catMags2 = catMags2[keepInds]
sigCatMags1 = sigCatMags1[keepInds]
sigCatMags2 = sigCatMags2[keepInds]
catColors = catColors[keepInds]
sig_catColors = sig_catColors[keepInds]
# Make sure to identically cull the posterior probabilities too!
post_prob1 = post_prob1[keepInds]
post_prob2 = post_prob2[keepInds]
print('Running final color-color regression')
# Compute the colors for these stars
xC = instMags1 - instMags2
yC = catColors
sxC = np.sqrt(sigInstMags1**2 + sigInstMags2**2)
syC = sig_catColors
# Perform the MCMC sampling of the posterior
data = (xC, yC, sxC, syC)
samplerC = MCMCfunc(data, boundsC,
n_walkers=n_walkers,
n_burn_in_steps=n_burn_in_steps,
n_steps=n_steps)
# Compute the posterior probability that each data-point is "good"
norm = 0.0
post_probC = np.zeros(len(data[0]))
for i in range(samplerC.chain.shape[1]):
for j in range(samplerC.chain.shape[0]):
ll_fg, ll_bg = samplerC.blobs[i][j]
post_probC += np.exp(ll_fg - np.logaddexp(ll_fg, ll_bg))
norm += 1
post_probC /= norm
# # Track the outliers from the color-color MCMC fit
# keepBool = np.logical_and(keepBool, (post_probC > 0.5))
if np.sum(post_probC < 0.5) > 0:
print('Color-Color fit still has some outliers?!')
pdb.set_trace()
# Grab the "confidence intervals" for each parameter
truthRanges = [(v[1], v[2]-v[1], v[1]-v[0]) for v in
zip(*np.percentile(samplerC.flatchain, [16, 50, 84], axis=0))]
truths = [t[0] for t in truthRanges]
# Convert these data to slope-intercept space
tmpData = np.array([
np.tan(samplerC.flatchain[:,0]),
samplerC.flatchain[:,1]/np.cos(samplerC.flatchain[:,0])])
# Compute the median slope and intercept and the covariance matrix
truthsC = np.percentile(tmpData, 50, axis=1)
covC = | np.cov(tmpData) | numpy.cov |
#!/usr/bin/env python
#
# Copyright (c) 2019 Opticks Team. All Rights Reserved.
#
# This file is part of Opticks
# (see https://bitbucket.org/simoncblyth/opticks).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
cie.py: converts wavelength spectra into XYZ and RGB colorspaces
===================================================================
Conversion of the binned wavelength spectra into XYZ (using
CIE weighting functions) and then RGB produces a spectrum
[FIXED] Unphysical color repetition
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Uniform scaling by maximal single X,Y,Z or R,G,B
prior to clipping gets rid of the unphysical color repetition
but theres kinda a between the green and the blue, where cyan
should be
#hRGB_raw /= hRGB_raw[0,:,0].max() # scaling by maximal red, results in muted spectrum
#hRGB_raw /= hRGB_raw[0,:,1].max() # scaling by maximal green, OK
#hRGB_raw /= hRGB_raw[0,:,2].max() # scaling by maximal blue, similar to green by pumps the blues and nice yellow
The entire spectral locus is outside sRGB gamut (the triangle),
so all bins are being clipped.
Not clipping produces a psychedelic mess.
[ISSUE] Blue/Green transition looks unphysical
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Need better way to handle out of gamut ?
Raw numbers show that green ramps up thru 430..480 nm but
its all negative, so that info is clipped.
::
In [68]: np.set_printoptions(linewidth=150)
In [75]: np.hstack([wd[:-1,None],c.raw[0],c.xyz[0],c.rgb[0]])
Out[75]:
array([[ 350. , 0. , 0.016, 0.102, 0. , 0. , 0. , -0. , 0. , 0. ],
[ 370. , 0.015, 0.105, 1.922, 0. , 0. , 0.001, -0.001, 0. , 0.001],
[ 390. , 1.873, 0.582, 20.444, 0.001, 0. , 0.011, -0.003, 0. , 0.012],
[ 410. , 49.306, 2.691, 205.061, 0.028, 0.002, 0.115, 0.03 , -0.019, 0.123],
[ 430. , 273.393, 10.384, 1386.823, 0.153, 0.006, 0.779, 0.1 , -0.105, 0.83 ],
[ 450. , 343.75 , 33.415, 1781.385, 0.193, 0.019, 1. , 0.098, -0.11 , 1.064],
[ 470. , 191.832, 89.944, 1294.473, 0.108, 0.05 , 0.727, -0.091, 0.021, 0.764],
[ 490. , 32.012, 213.069, 465.525, 0.018, 0.12 , 0.261, -0.256, 0.218, 0.253],
[ 510. , 16.48 , 500.611, 155.962, 0.009, 0.281, 0.088, -0.446, 0.522, 0.036],
[ 530. , 159.607, 869.052, 43.036, 0.09 , 0.488, 0.024, -0.472, 0.829, -0.069],
[ 550. , 433.715, 994.463, 8.758, 0.243, 0.558, 0.005, -0.072, 0.812, -0.095],
[ 570. , 772.904, 950.107, 1.308, 0.434, 0.533, 0.001, 0.586, 0.58 , -0.084],
[ 590. , 1021.039, 762.587, 0.143, 0.573, 0.428, 0. , 1.199, 0.248, -0.055],
[ 610. , 1000.205, 500.338, 0.012, 0.561, 0.281, 0. , 1.388, -0.017, -0.026],
[ 630. , 656.21 , 263.667, 0.001, 0.368, 0.148, 0. , 0.966, -0.079, -0.01 ],
[ 650. , 283.632, 110.045, 0. , 0.159, 0.062, 0. , 0.421, -0.038, -0.004],
[ 670. , 80.766, 36.117, 0. , 0.045, 0.02 , 0. , 0.116, -0.006, -0.002],
[ 690. , 17.024, 11.172, 0. , 0.01 , 0.006, 0. , 0.021, 0.003, -0.001]])
Chromatic Adaption
~~~~~~~~~~~~~~~~~~~~
* http://www.brucelindbloom.com/index.html?Eqn_RGB_XYZ_Matrix.html
Refs
~~~~~
https://github.com/colour-science/colour/issues/191
http://www.scipy-lectures.org/advanced/image_processing/index.html
http://www.scipy-lectures.org/packages/scikit-image/index.html#scikit-image
http://www.scipy.org/scikits.html
separate from scipy, but under the "brand"
"""
import os, logging, numpy as np
log = logging.getLogger(__name__)
np.set_printoptions(linewidth=150)
import matplotlib.pyplot as plt
import ciexyz.ciexyz as _cie
from env.graphics.ciexyz.XYZ import Spectrum
from env.graphics.ciexyz.RGB import RGB
class CIE(object):
def __init__(self, colorspace="sRGB/D65", whitepoint=None):
cs = RGB(colorspace)
self.x2r = cs.x2r
self.whitepoint = whitepoint
def hist0d_XYZ(self,w, nb=100):
X = np.sum(_cie.X(w))
Y = np.sum(_cie.Y(w))
Z = np.sum(_cie.Z(w))
hX = np.repeat(X, nb)
hY = np.repeat(Y, nb)
hZ = np.repeat(Z, nb)
raw = np.dstack([hX,hY,hZ])
self.raw = np.copy(raw)
return raw
def hist1d_XYZ(self,w,x,xb):
hX, hXx = np.histogram(x,bins=xb, weights=_cie.X(w))
hY, hYx = np.histogram(x,bins=xb, weights=_cie.Y(w))
hZ, hZx = np.histogram(x,bins=xb, weights=_cie.Z(w))
assert np.all(hXx == xb) & np.all(hYx == xb ) & np.all(hZx == xb)
raw = np.dstack([hX,hY,hZ])
self.raw = np.copy(raw)
return raw
def hist2d_XYZ(self,w,x,y,xb,yb):
bins = [xb,yb]
hX, hXx, hXy = np.histogram2d(x,y,bins=bins, weights=_cie.X(w))
hY, hYx, hYy = np.histogram2d(x,y,bins=bins, weights=_cie.Y(w))
hZ, hZx, hZy = np.histogram2d(x,y,bins=bins, weights=_cie.Z(w))
assert np.all(hXx == xb) & np.all(hYx == xb ) & np.all(hZx == xb)
assert np.all(hXy == yb) & np.all(hYy == yb ) & | np.all(hZy == yb) | numpy.all |
"""
In this module, we implement forward stepwise model selection for $K$ steps.
The main goal of this is to produce a set of linear inequality constraints satisfied by
$y$ after $K$ steps.
"""
import warnings
from copy import copy
import numpy as np
from scipy.stats import norm as ndist
# local imports
from ..constraints.affine import (constraints,
gibbs_test,
stack as stack_con,
gaussian_hit_and_run)
from ..distributions.chain import parallel_test, serial_test
from ..distributions.chisq import quadratic_test
from ..distributions.discrete_family import discrete_family
DEBUG = False
class forward_step(object):
"""
Forward stepwise model selection.
"""
def __init__(self, X, Y,
subset=None,
fixed_regressors=None,
intercept=True,
covariance=None):
"""
Parameters
----------
X : ndarray
Shape (n,p) -- the design matrix.
Y : ndarray
Shape (n,) -- the response.
subset : ndarray (optional)
Shape (n,) -- boolean indicator of which cases to use.
Defaults to np.ones(n, np.bool)
fixed_regressors: ndarray (optional)
Shape (n, *) -- fixed regressors to regress out before
computing score.
intercept : bool
Remove intercept -- this effectively includes np.ones(n) to fixed_regressors.
covariance : ndarray (optional)
Covariance matrix of errors. Defaults to np.identity(n).
Returns
-------
FS : `selection.algorithms.forward_step.forward_step`
Notes
-----
"""
self.subset = subset
self.X, self.Y = X, Y
n, p = self.X.shape
if fixed_regressors is not None:
fixed_regressors = np.asarray(fixed_regressors).reshape((n,-1))
if intercept:
if fixed_regressors is not None:
fixed_regressors = np.hstack([fixed_regressors, np.ones((n, 1))])
else:
fixed_regressors = np.ones((n, 1))
if fixed_regressors is not None:
self.fixed_regressors = np.hstack(fixed_regressors)
if self.fixed_regressors.ndim == 1:
self.fixed_regressors = self.fixed_regressors.reshape((-1,1))
# regress out the fixed regressors
# TODO should be fixed for subset
# should we adjust within the subset or not?
self.fixed_pinv = np.linalg.pinv(self.fixed_regressors)
self.Y = self.Y - np.dot(self.fixed_regressors,
np.dot(self.fixed_pinv, self.Y))
self.X = self.X - np.dot(self.fixed_regressors,
np.dot(self.fixed_pinv, self.X))
else:
self.fixed_regressors = None
if self.subset is not None:
self.working_X = self.X.copy()[subset]
self.subset_X = self.X.copy()[subset]
self.subset_Y = self.Y.copy()[subset]
self.subset_selector = np.identity(self.X.shape[0])[subset]
self.subset_fixed = self.fixed_regressors[subset]
else:
self.working_X = self.X.copy()
self.subset_Y = self.Y.copy()
self.subset_X = self.X.copy()
self.subset_fixed = self.fixed_regressors
# scale columns of X to have length 1
self.working_X /= np.sqrt((self.working_X**2).sum(0))[None, :]
self.variables = [] # the sequence of selected variables
self.Z = [] # the achieved Z scores
self.Zfunc = [] # the linear functionals of Y that achieve the Z scores
self.signs = [] # the signs of the achieved Z scores
self.covariance = covariance # the covariance of errors
self._resid_vector = self.subset_Y.copy() # the current residual -- already adjusted for fixed regressors
# setup for iteration
self.identity_constraints = [] # this will store linear functionals that identify the variables
self.inactive = np.ones(p, np.bool) # current inactive set
self.maxZ_offset = np.array([np.ones(p) * np.inf, np.ones(p) * np.inf]) # stored for computing
# the limits of maxZ selected test
self.maxZ_constraints = []
def step(self,
compute_maxZ_pval=False,
use_identity=False,
ndraw=8000,
burnin=2000,
sigma_known=True,
accept_reject_params=(100, 15, 2000)):
"""
Parameters
----------
compute_maxZ_pval : bool
Compute a p-value for this step? Requires MCMC sampling.
use_identity : bool
If computing a p-value condition on the identity of the variable?
ndraw : int (optional)
Defaults to 1000.
burnin : int (optional)
Defaults to 1000.
sigma_known : bool
Is $\sigma$ assumed known?
accept_reject_params : tuple
If not () should be a tuple (num_trial, min_accept, num_draw).
In this case, we first try num_trial accept-reject samples,
if at least min_accept of them succeed, we just draw num_draw
accept_reject samples.
"""
working_X, Y = self.working_X, self.subset_Y
resid_vector = self._resid_vector
n, p = working_X.shape
# up to now inactive
inactive = self.inactive
# compute Z scores
scale = self.scale = np.sqrt(np.sum(working_X**2, 0))
scale[~inactive] = np.inf # should never be used in any case
Zfunc = working_X.T # [inactive]
Zstat = np.dot(Zfunc, Y) / scale # [inactive]
winning_var = np.argmax(np.fabs(Zstat))
winning_sign = np.sign(Zstat[winning_var])
winning_func = Zfunc[winning_var] / scale[winning_var] * winning_sign
realized_maxZ = Zstat[winning_var] * winning_sign
self.Z.append(realized_maxZ)
if self.subset is not None:
self.Zfunc.append(winning_func.dot(self.subset_selector))
else:
self.Zfunc.append(winning_func)
# keep track of identity for testing
# variables other than the last one added
# this adds a constraint to self.identity_constraints
# losing_vars are variables that are inactive (i.e. not in self.variables)
# and did not win in this step
losing_vars = inactive.copy()
losing_vars[winning_var] = False
identity_linpart = np.vstack([
working_X[:,losing_vars].T / scale[losing_vars,None] -
winning_func,
-working_X[:,losing_vars].T / scale[losing_vars,None] -
winning_func,
- winning_func.reshape((1,-1))])
if self.subset is not None:
identity_linpart = np.dot(identity_linpart,
self.subset_selector)
identity_con = constraints(identity_linpart,
np.zeros(identity_linpart.shape[0]))
if not identity_con(self.Y):
raise ValueError('identity fail!')
self.identity_constraints.append(identity_linpart)
# form the maxZ constraint
XI = self.subset_X[:,self.inactive]
linear_part = np.vstack([XI.T, -XI.T])
if self.subset is not None:
linear_part = np.dot(linear_part,
self.subset_selector)
inactive_offset = self.maxZ_offset[:, self.inactive]
maxZ_con = constraints(linear_part, | np.hstack(inactive_offset) | numpy.hstack |
# ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by <NAME> (<EMAIL>)
# Modified by <NAME> (https://github.com/xingyizhou/CenterTrack/blob/master/src/lib/utils/image.py)
# Then modified by <NAME>
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from lib.utils.ddd_utils import compute_box_3d, project_to_image, alpha2rot_y
from lib.utils.ddd_utils import draw_box_3d, unproject_2d_to_3d
from tools.convert_pixset import box3d_from_loc_dim_rot
import numpy as np
import cv2
import random
import torch
from matplotlib.patches import Polygon
from pioneer.common import linalg
from matplotlib import pyplot as plt
from pioneer.das.api.samples import Image
from pioneer.das.api.platform import Platform
def flip(img):
return img[:, :, ::-1].copy()
# @numba.jit(nopython=True, nogil=True)
def transform_preds_with_trans(coords, trans):
# target_coords = np.concatenate(
# [coords, np.ones((coords.shape[0], 1), np.float32)], axis=1)
target_coords = np.ones((coords.shape[0], 3), np.float32)
target_coords[:, :2] = coords
target_coords = np.dot(trans, target_coords.transpose()).transpose()
return target_coords[:, :2]
def transform_preds(coords, center, scale, output_size):
target_coords = np.zeros(coords.shape)
trans = get_affine_transform(center, scale, 0, output_size, inv=1)
for p in range(coords.shape[0]):
target_coords[p, 0:2] = affine_transform(coords[p, 0:2], trans)
return target_coords
def get_affine_transform(
center, scale, rot, output_size, shift= | np.array([0, 0], dtype=np.float32) | numpy.array |
#!/usr/bin/env python
"""
Unit tests for Interval Data Class
- test_create_interval
- test_interval_properties
- test_interval_conversion
- test_interval_hash
- test_interval_contains
- test_interval_overlaps
- test_interval_intersect
- test_interval_union
- test_interval_random_values
- test_interval_random_intervals
- test_interval_from_intersect
- test_interval_from_union
- test_interval_from_object
- test_interval_from_text
"""
from dataclasses import asdict, astuple
from typing import List
from unittest import TestCase
from numpy import mean
from sources.core import Interval
class TestInterval(TestCase):
test_intervals: List[Interval]
overlaps: List[List[bool]]
def setUp(self):
self.test_intervals = []
self.test_intervals.append(Interval(-10, 25))
self.test_intervals.append(Interval(10.5, 45))
self.test_intervals.append(Interval(-10.5, 45))
self.test_intervals.append(Interval(10.5, -45))
self.test_intervals.append(Interval(-10.5, -45))
self.test_intervals.append(Interval(5, 5))
self.test_intervals.append(Interval(5, 6))
self.overlaps = [
[True, True, True, True, False, True, True],
[True, True, True, False, False, False, False],
[True, True, True, True, False, True, True],
[True, False, True, True, True, True, True],
[False, False, False, True, True, False, False],
[True, False, True, True, False, True, False],
[True, False, True, True, False, False, True]
]
def test_create_interval(self):
test_intervals = []
test_intervals.append(Interval(10.5, 20))
test_intervals.append(Interval(20, '10.5'))
test_intervals.append(Interval(**{'lower': 10.5, 'upper': 20}))
test_intervals.append(Interval(*('20', '10.5')))
for interval in test_intervals:
self.assertEqual(interval.lower, 10.5)
self.assertEqual(interval.upper, 20)
self.assertTrue(isinstance(interval.lower, float))
self.assertTrue(isinstance(interval.upper, float))
def test_interval_properties(self):
for interval in self.test_intervals:
#print(f'{interval}: length={interval.length} midpoint={interval.midpoint}')
self.assertEqual(interval.length, interval.upper - interval.lower)
self.assertEqual(interval.midpoint, | mean([interval.lower, interval.upper]) | numpy.mean |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.