prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import math
import numpy as np
from bigml.laminar.constants import NUMERIC, CATEGORICAL
MODE_CONCENTRATION = 0.1
MODE_STRENGTH = 3
MEAN = "mean"
STANDARD_DEVIATION = "stdev"
ZERO = "zero_value"
ONE = "one_value"
def index(alist, value):
try:
return alist.index(value)
except ValueError:
return None
def one_hot(vector, possible_values):
idxs = list(enumerate(index(possible_values, v) for v in vector))
valid_pairs = [x for x in idxs if x[1] is not None]
outvec = np.zeros((len(idxs), len(possible_values)), dtype=np.float32)
for v in valid_pairs:
outvec[v[0], v[1]] = 1
return outvec
def standardize(vector, mn, stdev):
newvec = vector - mn
if stdev > 0:
newvec = newvec / stdev
fill_dft = lambda x: 0.0 if math.isnan(x) else x
newvec = | np.vectorize(fill_dft) | numpy.vectorize |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2021 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Reliability calibration plugins."""
import operator
import warnings
import iris
import numpy as np
import scipy
from improver import BasePlugin, PostProcessingPlugin
from improver.calibration.utilities import (
check_forecast_consistency,
create_unified_frt_coord,
filter_non_matching_cubes,
)
from improver.metadata.probabilistic import (
find_threshold_coordinate,
probability_is_above_or_below,
)
from improver.metadata.utilities import generate_mandatory_attributes
from improver.utilities.cube_manipulation import MergeCubes, collapsed
class ConstructReliabilityCalibrationTables(BasePlugin):
"""A plugin for creating and populating reliability calibration tables."""
def __init__(
self,
n_probability_bins=5,
single_value_lower_limit=False,
single_value_upper_limit=False,
):
"""
Initialise class for creating reliability calibration tables. These
tables include data columns entitled observation_count,
sum_of_forecast_probabilities, and forecast_count, defined below.
n_probability_bins (int):
The total number of probability bins required in the reliability
tables. If single value limits are turned on, these are included in
this total.
single_value_lower_limit (bool):
Mandates that the lowest bin should be single valued,
with a small precision tolerance, defined as 1.0E-6.
The bin is thus 0 to 1.0E-6.
single_value_upper_limit (bool):
Mandates that the highest bin should be single valued,
with a small precision tolerance, defined as 1.0E-6.
The bin is thus (1 - 1.0E-6) to 1.
"""
self.single_value_tolerance = 1.0e-6
self.probability_bins = self._define_probability_bins(
n_probability_bins, single_value_lower_limit, single_value_upper_limit
)
self.table_columns = np.array(
["observation_count", "sum_of_forecast_probabilities", "forecast_count"]
)
self.expected_table_shape = (len(self.table_columns), n_probability_bins)
def __repr__(self):
"""Represent the configured plugin instance as a string."""
bin_values = ", ".join(
["[{:1.2f} --> {:1.2f}]".format(*item) for item in self.probability_bins]
)
result = "<ConstructReliabilityCalibrationTables: " "probability_bins: {}>"
return result.format(bin_values)
def _define_probability_bins(
self, n_probability_bins, single_value_lower_limit, single_value_upper_limit
):
"""
Define equally sized probability bins for use in a reliability table.
The range 0 to 1 is divided into ranges to give n_probability bins.
If single_value_lower_limit and / or single_value_upper_limit are True,
additional bins corresponding to values of 0 and / or 1 will be created,
each with a width defined by self.single_value_tolerance.
Args:
n_probability_bins (int):
The total number of probability bins desired in the
reliability tables. This number includes the extrema bins
(equals 0 and equals 1) if single value limits are turned on,
in which case the minimum number of bins is 3.
single_value_lower_limit (bool):
Mandates that the lowest bin should be single valued,
with a small precision tolerance, defined as 1.0E-6.
The bin is thus 0 to 1.0E-6.
single_value_upper_limit (bool):
Mandates that the highest bin should be single valued,
with a small precision tolerance, defined as 1.0E-6.
The bin is thus (1 - 1.0E-6) to 1.
Returns:
numpy.ndarray:
An array of 2-element arrays that contain the bounds of the
probability bins. These bounds are non-overlapping, with
adjacent bin boundaries spaced at the smallest representable
interval.
Raises:
ValueError: If trying to use both single_value_lower_limit and
single_value_upper_limit with 2 or fewer probability bins.
"""
if single_value_lower_limit and single_value_upper_limit:
if n_probability_bins <= 2:
msg = (
"Cannot use both single_value_lower_limit and "
"single_value_upper_limit with 2 or fewer "
"probability bins."
)
raise ValueError(msg)
n_probability_bins = n_probability_bins - 2
elif single_value_lower_limit or single_value_upper_limit:
n_probability_bins = n_probability_bins - 1
bin_lower = np.linspace(0, 1, n_probability_bins + 1, dtype=np.float32)
bin_upper = np.nextafter(bin_lower, 0, dtype=np.float32)
bin_upper[-1] = 1.0
bins = np.stack([bin_lower[:-1], bin_upper[1:]], 1).astype(np.float32)
if single_value_lower_limit:
bins[0, 0] = np.nextafter(self.single_value_tolerance, 1, dtype=np.float32)
lowest_bin = np.array([0, self.single_value_tolerance], dtype=np.float32)
bins = np.vstack([lowest_bin, bins]).astype(np.float32)
if single_value_upper_limit:
bins[-1, 1] = np.nextafter(
1.0 - self.single_value_tolerance, 0, dtype=np.float32
)
highest_bin = np.array(
[1.0 - self.single_value_tolerance, 1], dtype=np.float32
)
bins = | np.vstack([bins, highest_bin]) | numpy.vstack |
from __future__ import print_function
import numpy as np
import pandas as pd
from sklearn import metrics
class Options(object):
"""Options used by the model."""
def __init__(self):
# Model options.
# Embedding dimension.
self.embedding_size = 32
# The initial learning rate.
self.learning_rate = 1.
# Number of epochs to train. After these many epochs, the learning
# rate decays linearly to zero and the training stops.
self.epochs_to_train = 100
# Number of examples for one training step.
self.batch_size = 128
self.log_path = './ctr.log'
def read_file(path, infinite=True):
while True:
fi = open(path,'r')
for line in fi:
yield map(int,line.replace('\n', '').split(' '))
if infinite == False:
break
yield None
def ctr_batch_generator(opts, train=True):
if train:
file_reader = read_file(opts.train_path, True)
else:
file_reader = read_file(opts.test_path, False)
while True:
batch = np.ndarray(shape=(opts.batch_size, opts.sequence_length))
labels = np.ndarray(shape=(opts.batch_size))
for i in xrange(opts.batch_size):
single_sample = file_reader.next()
if single_sample is None:
break
target = single_sample[0]
temp = single_sample[1:opts.sequence_length]
if len(temp) < opts.sequence_length:
gap = opts.sequence_length - len(temp)
temp = np.array(temp + [0] * gap)
assert len(temp) == opts.sequence_length
batch[i] = temp
labels[i] = target
if len(labels) == opts.batch_size and single_sample is not None:
yield | np.array(batch) | numpy.array |
"""
@author: <NAME> (University of Sydney)
-------------------------------------------------------------------------
AMICAL: Aperture Masking Interferometry Calibration and Analysis Library
-------------------------------------------------------------------------
Matched filter pipeline method.
All AMI related function, the most important are:
- make_mf: compute splodge positions for a given mask,
- tri_pix: compute unique closing triangle for a given splodge.
--------------------------------------------------------------------
"""
import os
from pathlib import Path
import numpy as np
from matplotlib import pyplot as plt
from munch import munchify as dict2class
from termcolor import cprint
from amical.dpfit import leastsqFit
from amical.get_infos_obs import get_mask
from amical.get_infos_obs import get_pixel_size
from amical.get_infos_obs import get_wavelength
from amical.mf_pipeline.idl_function import array_coords
from amical.mf_pipeline.idl_function import dist
from amical.tools import gauss_2d_asym
from amical.tools import linear
from amical.tools import norm_max
from amical.tools import plot_circle
def _plot_mask_coord(xy_coords, maskname, instrument):
if instrument == "NIRISS":
marker = "H"
D = 6.5
else:
D = 8.0
marker = "o"
fig = plt.figure(figsize=(6, 5.5))
plt.title(f"{instrument} - mask {maskname}", fontsize=14)
for i in range(xy_coords.shape[0]):
plt.scatter(
xy_coords[i][0],
xy_coords[i][1],
s=1e2,
c="None",
edgecolors="navy",
marker=marker,
)
plt.text(xy_coords[i][0] + 0.1, xy_coords[i][1] + 0.1, i)
plt.xlabel("Aperture x-coordinate [m]", fontsize=12)
plt.ylabel("Aperture y-coordinate [m]", fontsize=12)
plt.axis([-D / 2.0, D / 2.0, -D / 2.0, D / 2.0])
plt.tight_layout()
return fig
def _compute_uv_coord(
xy_coords, index_mask, filt, pixelSize, npix, round_uv_to_pixel=False
):
"""Compute the expected u-v coordinated on the detector. If `round_uv_to_pixel`
is True, the closest integer position is used."""
n_baselines = index_mask.n_baselines
bl2h_ix = index_mask.bl2h_ix
u_real = np.zeros(n_baselines)
v_real = np.zeros(n_baselines)
for i in range(n_baselines):
if not round_uv_to_pixel:
u_real[i] = (
xy_coords[bl2h_ix[0, i], 0] - xy_coords[bl2h_ix[1, i], 0]
) / filt[0]
v_real[i] = (
xy_coords[bl2h_ix[0, i], 1] - xy_coords[bl2h_ix[1, i], 1]
) / filt[0]
else:
onepix = 1.0 / (npix * pixelSize)
onepix_xy = onepix * filt[0]
new_xy = (xy_coords / onepix_xy).astype(int) * onepix_xy
u_real[i] = (new_xy[bl2h_ix[0, i], 0] - new_xy[bl2h_ix[1, i], 0]) / filt[0]
v_real[i] = (new_xy[bl2h_ix[0, i], 1] - new_xy[bl2h_ix[1, i], 1]) / filt[0]
return u_real, v_real
def _peak_fft_method(
i, npix, xy_coords, wl, index_mask, pixelsize, innerpix, innerpix_center
):
mf = np.zeros([npix, npix])
n_holes = index_mask.n_holes
bl2h_ix = index_mask.bl2h_ix
npix = mf.shape[0]
sum_xy = np.sum(xy_coords, axis=0) / n_holes
shift_fact = np.ones([n_holes, 2])
shift_fact[:, 0] = sum_xy[0]
shift_fact[:, 1] = sum_xy[1]
xy_coords2 = xy_coords.copy()
xy_coords2 -= shift_fact
for j in range(len(wl)):
xyh = xy_coords2[bl2h_ix[1, i], :] / wl[j] * pixelsize * npix + npix // 2
delta = xyh - np.floor(xyh)
ap1 = np.zeros([npix, npix])
x1 = int(xyh[1])
y1 = int(xyh[0])
ap1[x1, y1] = (1.0 - delta[0]) * (1.0 - delta[1])
ap1[x1, y1 + 1] = delta[0] * (1.0 - delta[1])
ap1[x1 + 1, y1] = (1.0 - delta[0]) * delta[1]
ap1[x1 + 1, y1 + 1] = delta[0] * delta[1]
xyh = xy_coords2[bl2h_ix[0, i], :] / wl[j] * pixelsize * npix + npix // 2
delta = xyh - np.floor(xyh)
ap2 = np.zeros([npix, npix])
x2 = int(xyh[1])
y2 = int(xyh[0])
ap2[x2, y2] = (1.0 - delta[0]) * (1.0 - delta[1])
ap2[x2, y2 + 1] = delta[0] * (1.0 - delta[1])
ap2[x2 + 1, y2] = (1.0 - delta[0]) * delta[1]
ap2[x2 + 1, y2 + 1] = delta[0] * delta[1]
n_elts = npix**2
tmf = np.fft.fft2(ap1) / n_elts * np.conj(np.fft.fft2(ap2) / n_elts)
tmf = np.fft.fft2(tmf)
mf = mf + np.real(tmf)
mf_flat = norm_max(mf.ravel())
mf_centered = norm_max(np.fft.fftshift(mf).ravel())
mf_centered[innerpix_center] = 0.0
mf_flat[innerpix] = 0.0
dic_mf = {"flat": mf_flat, "centered": mf_centered}
return dic_mf
def _peak_square_method(i, npix, u, v, pixelsize, innerpix, innerpix_center):
mf = np.zeros([npix, npix])
uv = np.array([v[i], u[i]]) * pixelsize * npix
uv = (uv + npix) % npix
uv_int = np.array(np.floor(uv), dtype=int)
uv_frac = uv - uv_int
mf[uv_int[0], uv_int[1]] = (1 - uv_frac[0]) * (1 - uv_frac[1])
mf[uv_int[0], (uv_int[1] + 1) % npix] = (1 - uv_frac[0]) * uv_frac[1]
mf[(uv_int[0] + 1) % npix, uv_int[1]] = uv_frac[0] * (1 - uv_frac[1])
mf[(uv_int[0] + 1) % npix, (uv_int[1] + 1) % npix] = uv_frac[0] * uv_frac[1]
mf = np.roll(mf, [0, 0])
mf_flat = norm_max(mf.ravel())
mf_centered = norm_max(np.fft.fftshift(mf).ravel())
mf_flat[innerpix] = 0.0
mf_centered[innerpix_center] = 0.0
dic_mf = {"flat": mf_flat, "centered": mf_centered}
return dic_mf
def _peak_one_method(i, npix, u, v, pixelsize, innerpix, innerpix_center):
mf = np.zeros([npix, npix])
uv = np.array([v[i], u[i]]) * pixelsize * npix
uv = (uv + npix) % npix
uv_int = np.array(np.round(uv), dtype=int)
mf[uv_int[0], uv_int[1]] = 1
mf = np.roll(mf, [0, 0])
mf_flat = norm_max(mf.ravel())
mf_centered = norm_max(np.fft.fftshift(mf).ravel())
mf_flat[innerpix] = 0.0
mf_centered[innerpix_center] = 0.0
dic_mf = {"flat": mf_flat, "centered": mf_centered}
return dic_mf
def _peak_gauss_method(
i,
npix,
u,
v,
filt,
index_mask,
pixelsize,
innerpix,
innerpix_center,
fw_splodge=0.7,
hole_diam=0.8,
):
mf = np.zeros([npix, npix])
n_holes = index_mask.n_holes
l_B = np.sqrt(u**2 + v**2)
minbl = np.min(l_B) * filt[0]
if n_holes >= 15:
sampledisk_r = minbl / 2 / filt[0] * pixelsize * npix * 0.9
else:
sampledisk_r = minbl / 2.0 / filt[0] * pixelsize * npix * fw_splodge
xspot = float(np.round(v[i] * pixelsize * npix + npix / 2.0))
yspot = float(np.round(u[i] * pixelsize * npix + npix / 2.0))
mf = plot_circle(mf, xspot, yspot, sampledisk_r, display=False)
mf = np.roll(mf, npix // 2, axis=0)
mf = np.roll(mf, npix // 2, axis=1)
X = [np.arange(npix), np.arange(npix), 1]
splodge_fwhm = hole_diam / filt[0] * pixelsize * npix / 1.9
param = {
"A": 1,
"x0": -npix // 2 + yspot,
"y0": -npix // 2 + xspot,
"fwhm_x": splodge_fwhm,
"fwhm_y": splodge_fwhm,
"theta": 0,
}
gauss = gauss_2d_asym(X, param)
gauss = np.roll(gauss, npix // 2, axis=0)
gauss = np.roll(gauss, npix // 2, axis=1)
mfg = gauss / np.sum(gauss)
mf_gain_flat = mfg.ravel()
mf_gain_centered = norm_max(np.fft.fftshift(mfg).ravel())
mf_flat = norm_max(mf.ravel())
mf_centered = norm_max(np.fft.fftshift(mf).ravel())
mf_flat[innerpix] = 0.0
mf_gain_flat[innerpix] = 0.0
mf_centered[innerpix_center] = 0.0
mf_gain_centered[innerpix_center] = 0.0
mf = {
"flat": mf_flat,
"centered": mf_centered,
"gain_f": mf_gain_flat,
"gain_c": mf_gain_centered,
}
return dict2class(mf)
def _normalize_gain(
mf_flat, mf_centered, pixelvector, pixelvector_c, normalize_pixelgain=True
):
if normalize_pixelgain:
pixelgain = mf_flat[pixelvector] / np.sum(mf_flat[pixelvector])
pixelgain_c = mf_centered[pixelvector_c] / np.sum(mf_centered[pixelvector_c])
else:
pixelgain = (
mf_flat[pixelvector]
* np.max(mf_flat[pixelvector])
/ np.sum(mf_flat[pixelvector] ** 2)
)
pixelgain_c = (
mf_centered[pixelvector_c]
* np.max(mf_centered[pixelvector_c])
/ np.sum(mf_centered[pixelvector_c] ** 2)
)
return pixelgain, pixelgain_c
def _compute_center_splodge(
npix,
pixelsize,
filt,
hole_diam=0.8,
):
tmp = dist(npix)
innerpix = np.array(
np.array(np.where(tmp < (hole_diam / filt[0] * pixelsize * npix) * 0.9)) * 0.6,
dtype=int,
)
x, y = np.meshgrid(npix, npix)
dist_c = np.sqrt((x - npix // 2) ** 2 + (y - npix // 2) ** 2)
inner_pos = np.array(
np.where(dist_c < (hole_diam / filt[0] * pixelsize * npix) * 0.9)
)
innerpix_center = np.array(inner_pos * 0.6, dtype=int)
return innerpix, innerpix_center
def _make_overlap_mat(mf, n_baselines, display=False):
overmat = np.zeros(
[n_baselines, n_baselines], dtype=[("real", float), ("imag", float)]
)
# Now find the overlap matrices
for i in range(n_baselines):
pix_on = np.where(mf["norm"][:, :, i] != 0.0)
for j in range(n_baselines):
t1 = np.sum(mf["norm"][:, :, i][pix_on] * mf["norm"][:, :, j][pix_on])
t2 = np.sum(mf["norm"][:, :, i][pix_on] * mf["conj"][:, :, j][pix_on])
overmat["real"][i, j] = t1 + t2
overmat["imag"][i, j] = t1 - t2
mf_rmat = np.linalg.inv(overmat["real"])
mf_imat = np.linalg.inv(overmat["imag"])
mf_rmat[np.where(mf_rmat < 1e-6)] = 0.0
mf_imat[np.where(mf_imat < 1e-6)] = 0.0
mf_rmat[mf_rmat >= 2] = 2
mf_imat[mf_imat >= 2] = 2
mf_imat[mf_imat <= -2] = -2
if display:
plt.figure(figsize=(6, 6))
plt.title("Overlap matrix", fontsize=14)
plt.imshow(mf_imat, cmap="gray", origin="upper")
plt.ylabel("# baselines", fontsize=12)
plt.xlabel("# baselines", fontsize=12)
plt.tight_layout()
return mf_rmat, mf_imat
def make_mf(
maskname,
instrument,
filtname,
npix,
i_wl=None,
peakmethod="fft",
n_wl=3,
theta_detector=0,
cutoff=1e-4,
hole_diam=0.8,
fw_splodge=0.7,
scaling=1,
diag_plot=False,
verbose=False,
display=True,
save_to=None,
filename=None,
):
"""
Summary:
--------
Compute the match filter mf which give the indices of the peak positions (mf.pvct)
and the associated gains (mf.gvct) in the image. Contains also the u-v coordinates,
wavelengths informations, holes mask positions (mf.xy_coords), centered mf (mf.cpvct,
mf.gpvct), etc.
Parameters:
-----------
`maskname`: str
Name of the mask (number of holes),\n
`instrument`: str
Instrument used (default = jwst),\n
`filtname`: str
Name of the filter,\n
`npix`: int
Size of the image,\n
`peakmethod` {str}:
3 methods are used to sample the u-v space: 'fft' uses fft between individual holes to compute
the expected splodge positions; 'square' compute the splodge in a square using the expected
fraction of pixel to determine its weight; 'gauss' considers a gaussian splodge (with a gaussian
weight) to get the same splodge side for each n(n-1)/2 baselines,\n
`n_wl`: int
number of wavelengths to use to simulate bandwidth,\n
`theta_detector`: float
Angle [deg] to rotate the mask compare to the detector (if the mask is not
perfectly aligned with the detector, e.g.: VLT/VISIR) ,\n
`cutoff`: float
cutoff limit between noise and signal pixels in simulated transforms,\n
`hole_diam`: float
Diameter of a single aperture (0.8 for JWST),\n
`fw_splodge` {float}:
Relative size of the splodge used to compute multiple triangle indices and the fwhm
of the 'gauss' technique,\n
"""
# Get detector, filter and mask informations
# ------------------------------------------
pixelsize = get_pixel_size(instrument) # Pixel size of the detector [rad]
if pixelsize is np.nan:
cprint("Error: Pixel size unknown for %s." % instrument, "red")
return None
# Wavelength of the filter (filt[0]: central, filt[1]: width)
filt = get_wavelength(instrument, filtname)
if instrument == "SPHERE-IFS":
if isinstance(i_wl, (int, np.integer)):
filt = [filt[i_wl], 0.001 * filt[i_wl]]
else:
filt = [np.mean(filt[i_wl[0] : i_wl[1]]), filt[i_wl[1]] - filt[i_wl[0]]]
xy_coords = get_mask(instrument, maskname) # mask coordinates
x_mask = xy_coords[:, 0] * scaling
y_mask = xy_coords[:, 1] * scaling
x_mask_rot = x_mask * np.cos(np.deg2rad(theta_detector)) + y_mask * np.sin(
np.deg2rad(theta_detector)
)
y_mask_rot = -x_mask * np.sin(np.deg2rad(theta_detector)) + y_mask * np.cos(
np.deg2rad(theta_detector)
)
xy_coords_rot = []
for i in range(len(x_mask)):
xy_coords_rot.append([x_mask_rot[i], y_mask_rot[i]])
xy_coords = np.array(xy_coords_rot)
if display:
_plot_mask_coord(xy_coords, maskname, instrument)
if save_to is not None:
figname = os.path.join(save_to, Path(filename).stem)
plt.savefig(f"{figname}_{1}.pdf")
n_holes = xy_coords.shape[0]
index_mask = compute_index_mask(n_holes)
n_baselines = index_mask.n_baselines
n_bispect = index_mask.n_bispect
ncp_i = int((n_holes - 1) * (n_holes - 2) / 2)
if verbose:
cprint("---------------------------", "cyan")
cprint(
"%s (%s): %i holes masks" % (instrument.upper(), filtname, n_holes), "cyan"
)
cprint("---------------------------", "cyan")
cprint(
"nbl = %i, nbs = %i, ncp_i = %i, ncov = %i"
% (n_baselines, n_bispect, ncp_i, index_mask.n_cov),
"cyan",
)
# Consider the filter to be made up of n_wl wavelengths
wl = np.arange(n_wl) / n_wl * filt[1]
wl = wl - np.mean(wl) + filt[0]
Sum, Sum_c = 0, 0
mf_ix = np.zeros([2, n_baselines], dtype=int) # matched filter
mf_ix_c = np.zeros([2, n_baselines], dtype=int) # matched filter
if verbose:
print("\n- Calculating sampling of", n_holes, "holes array...")
innerpix, innerpix_center = _compute_center_splodge(
npix, pixelsize, filt, hole_diam=hole_diam
)
u, v = _compute_uv_coord(
xy_coords, index_mask, filt, pixelsize, npix, round_uv_to_pixel=False
)
mf_pvct = mf_gvct = mfc_pvct = mfc_gvct = None
for i in range(n_baselines):
args = {
"i": i,
"npix": npix,
"pixelsize": pixelsize,
"innerpix": innerpix,
"innerpix_center": innerpix_center,
}
if peakmethod == "fft":
ind_peak = _peak_fft_method(
xy_coords=xy_coords, wl=wl, index_mask=index_mask, **args
)
elif peakmethod == "square":
ind_peak = _peak_square_method(u=u, v=v, **args)
elif peakmethod == "unique":
ind_peak = _peak_one_method(u=u, v=v, **args)
elif peakmethod == "gauss":
ind_peak = _peak_gauss_method(
u=u,
v=v,
filt=filt,
index_mask=index_mask,
fw_splodge=fw_splodge,
**args,
hole_diam=hole_diam,
)
else:
cprint(
"Error: choose the extraction method 'gauss', 'fft' or 'square'.", "red"
)
return None
# Compute the cutoff limit before saving the gain map
pixelvector = np.where(ind_peak["flat"] >= cutoff)[0]
pixelvector_c = np.where(ind_peak["centered"] >= cutoff)[0]
# Now normalise the pixel gain, so that using the matched filter
# on an ideal splodge is equivalent to just looking at the peak...
if peakmethod == "gauss":
pixelgain, pixelgain_c = _normalize_gain(
ind_peak["gain_f"], ind_peak["gain_c"], pixelvector, pixelvector_c
)
else:
pixelgain, pixelgain_c = _normalize_gain(
ind_peak["flat"], ind_peak["centered"], pixelvector, pixelvector_c
)
mf_ix[0, i] = Sum
Sum = Sum + len(pixelvector)
mf_ix[1, i] = Sum
mf_ix_c[0, i] = Sum_c
Sum_c = Sum_c + len(pixelvector_c)
mf_ix_c[1, i] = Sum_c
if i == 0:
mf_pvct = list(pixelvector)
mf_gvct = list(pixelgain)
mfc_pvct = list(pixelvector_c)
mfc_gvct = list(pixelgain_c)
else:
mf_pvct.extend(list(pixelvector))
mf_gvct.extend(list(pixelgain))
mfc_pvct.extend(list(pixelvector_c))
mfc_gvct.extend(list(pixelgain_c))
mf = np.zeros(
[npix, npix, n_baselines],
dtype=[("norm", float), ("conj", float), ("norm_c", float), ("conj_c", float)],
)
for i in range(n_baselines):
mf_tmp = np.zeros([npix, npix])
mf_tmp_c = np.zeros([npix, npix])
ind = mf_pvct[mf_ix[0, i] : mf_ix[1, i]]
ind_c = mfc_pvct[mf_ix_c[0, i] : mf_ix_c[1, i]]
mf_tmp.ravel()[ind] = mf_gvct[mf_ix[0, i] : mf_ix[1, i]]
mf_tmp_c.ravel()[ind_c] = mfc_gvct[mf_ix_c[0, i] : mf_ix_c[1, i]]
mf_tmp = mf_tmp.reshape([npix, npix])
mf_tmp_c = mf_tmp_c.reshape([npix, npix])
mf["norm"][:, :, i] = np.roll(mf_tmp, 0, axis=1)
mf["norm_c"][:, :, i] = np.roll(mf_tmp_c, 0, axis=1)
mf_temp_rot = np.roll(np.roll(np.rot90(np.rot90(mf_tmp)), 1, axis=0), 1, axis=1)
mf_temp_rot_c = np.roll(
np.roll(np.rot90(np.rot90(mf_tmp_c)), 1, axis=0), 1, axis=1
)
mf["conj"][:, :, i] = mf_temp_rot
mf["conj_c"][:, :, i] = mf_temp_rot_c
norm = np.sqrt(np.sum(mf["norm"][:, :, i] ** 2))
mf["norm"][:, :, i] = mf["norm"][:, :, i] / norm
mf["conj"][:, :, i] = mf["conj"][:, :, i] / norm
mf["norm_c"][:, :, i] = mf["norm_c"][:, :, i] / norm
mf["conj_c"][:, :, i] = mf["conj_c"][:, :, i] / norm
rmat, imat = _make_overlap_mat(mf, n_baselines, display=diag_plot)
mf_tot = np.sum(mf["norm"], axis=2) + np.sum(mf["conj"], axis=2)
mf_tot_m = np.sum(mf["norm"], axis=2) - np.sum(mf["conj"], axis=2)
im_uv = np.roll(np.fft.fftshift(mf_tot), 1, axis=1)
if display:
plt.figure(figsize=(9, 7))
plt.title("(u-v) plan - mask %s" % (maskname), fontsize=14)
plt.imshow(im_uv, origin="lower")
plt.plot(npix // 2 + 1, npix // 2, "r+")
plt.ylabel("Y [pix]") # , fontsize=12)
plt.xlabel("X [pix]") # , fontsize=12)
plt.tight_layout()
out = {
"cube": mf["norm"],
"imat": imat,
"rmat": rmat,
"uv": im_uv,
"tot": mf_tot,
"tot_m": mf_tot_m,
"pvct": mf_pvct,
"gvct": mf_gvct,
"cpvct": mfc_pvct,
"cgvct": mfc_gvct,
"ix": mf_ix,
"u": u * filt[0],
"v": v * filt[0],
"wl": filt[0],
"e_wl": filt[1],
"pixelSize": pixelsize,
"xy_coords": xy_coords,
}
return dict2class(out)
def compute_index_mask(n_holes, verbose=False):
"""
This function generates index arrays for an N-hole mask.
Parameters:
-----------
`n_holes`: int
number of holes in the array.
Returns:
--------
`n_baselines`: int
The number of different baselines (n_holes*(n_holes-1)/2),\n
`n_bispect`: int
The number of bispectrum elements (n_holes*(n_holes-1)*(n_holes-2)/6),\n
`n_cov`: int
The number of bispectrum covariance
(n_holes*(n_holes-1)*(n_holes-2)*(n_holes-3)/4),\n
`h2bl_ix`: numpy.array
Holes to baselines index,\n
`bl2h_ix`: numpy.array
Baselines to holes index,\n
`bs2bl_ix`: numpy.array
Bispectrum to baselines index,\n
`bl2bs_ix` : numpy.array
Baselines to bispectrum index,\n
`bscov2bs_ix`: numpy.array,
Bispectrum covariance to bispectrum index.
"""
n_baselines = int(n_holes * (n_holes - 1) / 2)
n_bispect = int(n_holes * (n_holes - 1) * (n_holes - 2) / 6)
n_cov = int(n_holes * (n_holes - 1) * (n_holes - 2) * (n_holes - 3) / 4)
# Given a pair of holes i,j h2bl_ix(i,j) gives the number of the baseline
h2bl_ix = np.zeros([n_holes, n_holes], dtype=int)
count = 0
for i in range(n_holes - 1):
for j in np.arange(i + 1, n_holes):
h2bl_ix[i, j] = int(count)
count = count + 1
if verbose:
print(h2bl_ix.T) # transpose to display as IDL
# Given a baseline, bl2h_ix gives the 2 holes that go to make it up
bl2h_ix = np.zeros([2, n_baselines], dtype=int)
count = 0
for i in range(n_holes - 1):
for j in np.arange(i + 1, n_holes):
bl2h_ix[0, count] = int(i)
bl2h_ix[1, count] = int(j)
count = count + 1
if verbose:
print(bl2h_ix.T) # transpose to display as IDL
# Given a point in the bispectrum, bs2bl_ix gives the 3 baselines which
# make the triangle. bl2bs_ix gives the index of all points in the
# bispectrum containing a given baseline.
bs2bl_ix = np.zeros([3, n_bispect], dtype=int)
temp = np.zeros([n_baselines], dtype=int) # N_baselines * a count variable
if verbose:
print("Indexing bispectrum...")
bl2bs_ix = np.zeros([n_baselines, n_holes - 2], dtype=int)
count = 0
for i in range(n_holes - 2):
for j in np.arange(i + 1, n_holes - 1):
for k in np.arange(j + 1, n_holes):
bs2bl_ix[0, count] = int(h2bl_ix[i, j])
bs2bl_ix[1, count] = int(h2bl_ix[j, k])
bs2bl_ix[2, count] = int(h2bl_ix[i, k])
bl2bs_ix[bs2bl_ix[0, count], temp[bs2bl_ix[0, count]]] = count
bl2bs_ix[bs2bl_ix[1, count], temp[bs2bl_ix[1, count]]] = count
bl2bs_ix[bs2bl_ix[2, count], temp[bs2bl_ix[2, count]]] = count
temp[bs2bl_ix[0, count]] = temp[bs2bl_ix[0, count]] + 1
temp[bs2bl_ix[1, count]] = temp[bs2bl_ix[1, count]] + 1
temp[bs2bl_ix[2, count]] = temp[bs2bl_ix[2, count]] + 1
count += 1
if verbose:
print(bl2bs_ix.T) # transpose to display as IDL
print("Indexing the bispectral covariance...")
bscov2bs_ix = np.zeros([2, n_cov], dtype=int)
count = 0
for i in range(n_bispect - 1):
for j in np.arange(i + 1, n_bispect):
if (
(bs2bl_ix[0, i] == bs2bl_ix[0, j])
or (bs2bl_ix[1, i] == bs2bl_ix[0, j])
or (bs2bl_ix[2, i] == bs2bl_ix[0, j])
or (bs2bl_ix[0, i] == bs2bl_ix[1, j])
or (bs2bl_ix[1, i] == bs2bl_ix[1, j])
or (bs2bl_ix[2, i] == bs2bl_ix[1, j])
or (bs2bl_ix[0, i] == bs2bl_ix[2, j])
or (bs2bl_ix[1, i] == bs2bl_ix[2, j])
or (bs2bl_ix[2, i] == bs2bl_ix[2, j])
):
bscov2bs_ix[0, count] = i
bscov2bs_ix[1, count] = j
count += 1
if verbose:
print(bscov2bs_ix.T)
indices_mask = dict2class(
{
"n_baselines": n_baselines,
"n_bispect": n_bispect,
"n_cov": n_cov,
"h2bl_ix": h2bl_ix,
"bl2h_ix": bl2h_ix,
"bs2bl_ix": bs2bl_ix,
"bl2bs_ix": bl2bs_ix,
"bscov2bs_ix": bscov2bs_ix,
"n_holes": n_holes,
}
)
return indices_mask
def give_peak_info2d(mf, n_baselines, dim1, dim2):
"""
Transform mf.pvct indices from flatten 1-D array to 2-D coordinates and the
associated gains.
Parameters:
-----------
`mf` {object class}:
Match filter class (see make_mf function),\n
`n_baselines` {int}:
Number of baselines,\n
`dim1`, `dim2` {int}:
Size of the 2-D image.\n
Returns:
--------
`l_peak` {list}:
List of the n_baselines peak positions (2-D) and gains.
"""
x, y = np.arange(dim1), np.arange(dim2)
X, Y = np.meshgrid(x, y)
List_peak = []
for j in range(n_baselines):
l_x = X.ravel()[mf.pvct[mf.ix[0, j] : mf.ix[1, j]]] # .astype(int)
l_y = Y.ravel()[mf.pvct[mf.ix[0, j] : mf.ix[1, j]]] # .astype(int)
g = mf.gvct[mf.ix[0, j] : mf.ix[1, j]]
peak = [[int(l_y[k]), int(l_x[k]), g[k]] for k in range(len(l_x))]
List_peak.append(np.array(peak))
return np.array(List_peak, dtype=object)
def clos_unique(closing_tri_pix):
"""Compute the list of unique triplets in multiple triangle list"""
l, l_i = [], []
for i in range(closing_tri_pix.shape[1]):
p1 = str(closing_tri_pix[0, i])
p2 = str(closing_tri_pix[1, i])
p3 = str(closing_tri_pix[2, i])
p = np.sort(closing_tri_pix[:, i])
p1 = str(p[0])
p2 = str(p[1])
p3 = str(p[2])
val = p1 + p2 + p3
if val not in l:
l.append(val)
l_i.append(i)
else:
pass
return closing_tri_pix[:, l_i]
def tri_pix(array_size, sampledisk_r, verbose=True, display=True):
"""Compute all combination of triangle for a given splodge size"""
if array_size % 2 == 1:
cprint("\n! Warnings: image dimension must be even (%i)" % array_size, "red")
cprint("Possible triangle inside the splodge should be incorrect.\n", "red")
d = np.zeros([array_size, array_size])
d = plot_circle(d, array_size // 2, array_size // 2, sampledisk_r, display=False)
pvct_flat = np.where(d.ravel() > 0)
npx = len(pvct_flat[0])
for px1 in range(npx):
thispix1 = np.array(array_coords(pvct_flat[0][px1], array_size))
roll1 = np.roll(d, int(array_size // 2 - thispix1[0]), axis=0)
roll2 = np.roll(roll1, int(array_size // 2 - thispix1[1]), axis=1)
xcor_12 = d + roll2
valid_b12_vct = np.where(xcor_12.T.ravel() > 1)
thisntriv = len(valid_b12_vct[0])
thistrivct = | np.zeros([3, thisntriv]) | numpy.zeros |
# call this script with `python -m evaluation.evaluate_poselines_globalaction`
import numpy as np
import cv2
import pandas as pd
import datetime
import torch
from tqdm import tqdm
from tqdm.std import trange
from . import eval_utils
from itertools import combinations
from compoelem.detect.openpose.lib.utils.common import BodyPart, Human, CocoPart
def neg_cos_dist(r_tick, s_tick):
a = r_tick.flatten()
b = s_tick.flatten()
return 1 - np.dot(a, b)/(np.linalg.norm(a)*np.linalg.norm(b)) # type: ignore, manually checked it works!
def flipped_cosine_min_dist(r_tick, s_tick):
s_star = np.array([[-kp[0], kp[1]] for kp in s_tick])
return min(
neg_cos_dist(r_tick, s_tick),
neg_cos_dist(r_tick, s_star),
)
def openpose_to_nparray(human: Human):
keypoints = [
[human.body_parts[i].x, human.body_parts[i].y] if i in human.body_parts else np.array([0,0]) for i in range(0, 18)
]
return np.array(keypoints)
def isNoneKp(kp):
return kp[0] == 0 and kp[1] == 0
def neck_norm_poses(r, s):
ROOT_POINT = CocoPart.Neck.value
r_root = r[ROOT_POINT]
s_root = s[ROOT_POINT]
RSHOULDER = CocoPart.RShoulder.value
LSHOULDER = CocoPart.LShoulder.value
if(isNoneKp(r_root)): # extension to the paper: if neck point is missing we try to esitmate it with the midpoint of left and right shoulder
if(isNoneKp(r[RSHOULDER]) or isNoneKp(r[LSHOULDER])):
raise ValueError("neck point and shoulder point missing, normalization not possible, skipping that pose")
else:
r_root = [(r[RSHOULDER][0]+r[LSHOULDER][0])/2, (r[RSHOULDER][1]+r[LSHOULDER][1])/2]
if(isNoneKp(s_root)): # extension to the paper: if neck point is missing we try to esitmate it with the midpoint of left and right shoulder
if(isNoneKp(s[RSHOULDER]) or isNoneKp(s[LSHOULDER])):
raise ValueError("neck point and shoulder point missing, normalization not possible, skipping that pose")
else:
r_root = [(s[RSHOULDER][0]+s[LSHOULDER][0])/2, (s[RSHOULDER][1]+s[LSHOULDER][1])/2]
r_tick = []
s_tick = []
for r_i, s_i in zip(r, s):
if(not isNoneKp(r_i) or not isNoneKp(s_i)): # if i€I_r,s
r_tick.append(r_i - r_root)
s_tick.append(s_i - s_root)
else: # else case
r_tick.append(np.array([0, 0]))
s_tick.append(np.array([0, 0]))
return np.array(r_tick), np.array(s_tick)
def compare_dist_min(poses_i1, poses_i2): #in paper this is dist_min(i1, i2), we do not input images but rather input the precomputed poses directly
poses_i1 = np.array([openpose_to_nparray(human) for human in poses_i1]) # output shape of each item is (18, 2) since we are using the 18 openpose keypoint model
poses_i2 = np.array([openpose_to_nparray(human) for human in poses_i2])
dist = []
combinations = []
for idx_r, r in enumerate(poses_i1):
for idx_s, s in enumerate(poses_i2):
try:
r_tick, s_tick = neck_norm_poses(r, s)
except ValueError as e: # "neck point missing, normalization not possible, skipping that pose" => this edge case is not mentioned in the paper but was the only sensible decision I think
#print(e)
continue
dist.append(flipped_cosine_min_dist(r_tick, s_tick))
combinations.append((idx_r, idx_s))
if(len(dist) == 0):
return (2, []) #maximum possible neg cos dist
else:
# return min(dist)
am = np.argmin(np.array(dist))
return (dist[am], combinations[am])
def compare_dist_bipart(poses_i1, poses_i2): #in paper this is dist_t(i1,i2)
t = 0.05
poses_i1 = np.array([openpose_to_nparray(human) for human in poses_i1]) # output shape of each item is (18, 2) since we are using the 18 openpose keypoint model
poses_i2 = np.array([openpose_to_nparray(human) for human in poses_i2])
all_dist = []
all_combinations = []
for idx_r, r in enumerate(poses_i1):
dist = []
combinations = []
for idx_s, s in enumerate(poses_i2):
try:
r_tick, s_tick = neck_norm_poses(r, s)
except ValueError as e: # "neck point missing, normalization not possible, skipping that pose" => this edge case is not mentioned in the paper but was the only sensible decision I think
#print(e)
continue
dist.append(flipped_cosine_min_dist(r_tick, s_tick))
combinations.append((idx_r, idx_s))
if len(dist) == 0:
all_dist.append(t) #dist can be empty if r has no neck point. => return t as there is no pose matching
else:
am = np.argmin(np.array(dist))
if dist[am] <= t:
all_dist.append(dist[am])
all_combinations.append(combinations[am])
else:
all_dist.append(t)
dist_sum = np.sum(all_dist)
return (dist_sum, all_combinations)
def verify_inliers(r,s,transformation):
inlier_threshold = calc_ransac_inlier_threshold(r,s)
# apply transformation on all keypoints of s (in paper: projection to the query image)
# then: A pair of keypoints is considered consistent with a transformation when the keypoint from the potential image match is within a specified distance from the query image keypoint.
# This threshold distance is relative with respect to the estimated query image pose size and is therefore different for each pose in the query image.
s_transformed = np.array([([*si,1] @ transformation)[0:2] for si in s])
inlier_mask = [np.linalg.norm(ri-si) < inlier_threshold and (ri[0] != 0 or ri[1] != 0) and (si[0] != 0 or si[1] != 0) for ri, si in zip(r, s_transformed)] #with if: only check points where both points calculated by openpose
# return => indices of consistent keypoints
return np.array(range(0, len(r)))[inlier_mask]
def calc_ransac_inlier_threshold(r,s):
# To determine the inlier threshold for RANSAC, relative query pose size with respect to the canonical pose size is estimated.
# For a canonical pose, the distances between connected pose keypoints are known.
# The relative query pose size is computed as a median of the ratios between distances of connected keypoints detected in the query pose and corresponding distances in the canonical pose.
# from above:( This threshold distance is relative with respect to the estimated query image pose size and is therefore different for each pose in the query image.)
# ???? => => What is the canonical pose?
return 0.01 # TODO implement function, this fixed value is just for testing
def calc_geometric_transformation(r, s): #maybe also two kp from each
# INFO r/s has to be of shape (2, 2) for initial calc and (amount of inliers, 2) for reestimate
# The transformation consists of scale, translation and horizontal flip. => return transformation matrix here???
# Using two keypoint correspondences (two keypoint pairs?), the transformation is estimated in terms of least-squares.
# => An exact solution to the system of equations does not exist as the transformation
# has three degrees of freedom and there are four equations resulting in an overdetermined system.
# ???? Really not sure about this part here. I think this is what we need for the least square fit but not sure about it.
# How to limit the affine transformation to only scale and translation?
# A should be of the shape:
# [[a, 0, e]]
# [[0, b, f]]
# [[0, 0, 1]]
# with a and b defining x and y scale. And e and f defining x and y translation.
R = np.hstack([r, np.ones((r.shape[0], 1))])
S = np.hstack([s, np.ones((s.shape[0], 1))])
A, residuals, rank, singular_values = np.linalg.lstsq(S, R, rcond=None) # we want to transform s points to query image. So perform `s @ A => r'`
print(A)
# zero out these values to get only scale and translation
# print("A",A)
# A[0,1] = 0
# A[1,0] = 0
# A[2,0] = 0
# A[2,1] = 0
# A[2,2] = 1
# print("A",A)
return A, sum(residuals) # seems like residuals is always an empty array => check np.linalg.lstsq again
# def reestimate_geometric_transformation_least_square(r_inliers, s_inliers): => is the same as calc_geometric_transformation
# # Once a transformation with a sufficient number of inliers is found, all keypoint correspondences consistent with it are used to re-estimate the transformation in terms of least squares
# pass
def estimate_geometric_transformation_ransac(r,s):
s_star = | np.array([[-kp[0], kp[1]] for kp in s]) | numpy.array |
import pickle
import numpy as np
import pandas as pd
from src.src_vvCV_MDMP.vv_CV_MDMP import *
from South_Function.South_function_trainer import *
##
# Example for vv_CV_MDMP
def my_func_1(X):
return 1 + X+ X**2 + torch.sin(X * math.pi) * torch.exp(-1.* X.pow(2))
def my_func_2(X):
return 1.5 + X+ 1.5*(X**2) + 1.75*torch.sin(X * math.pi) * torch.exp(-1.* X.pow(2))
## Varying one of distributions -- check the effect of the closeness of target distributions
mu_1_sit0 = torch.zeros(1,1)
cov_1_sit0= torch.eye(1)
mu_2_sit0 = torch.zeros(1,1)
cov_2_sit0= torch.eye(1)
means_tuple_sit0 = (mu_1_sit0, mu_2_sit0)
covs_tuple_sit0 = (cov_1_sit0, cov_2_sit0)
mu_1_sit1 = torch.zeros(1,1)
cov_1_sit1= torch.eye(1)
mu_2_sit1 = torch.zeros(1,1)
cov_2_sit1= torch.eye(1) * 1.1
means_tuple_sit1 = (mu_1_sit1, mu_2_sit1)
covs_tuple_sit1 = (cov_1_sit1, cov_2_sit1)
mu_1_sit2 = torch.zeros(1,1)
cov_1_sit2= torch.eye(1)
mu_2_sit2 = torch.zeros(1,1)
cov_2_sit2= torch.eye(1) * 1.15
means_tuple_sit2 = (mu_1_sit2, mu_2_sit2)
covs_tuple_sit2 = (cov_1_sit2, cov_2_sit2)
mu_1_sit3 = torch.zeros(1,1)
cov_1_sit3= torch.eye(1)
mu_2_sit3 = torch.zeros(1,1)
cov_2_sit3= torch.eye(1) * 1.2
means_tuple_sit3 = (mu_1_sit3, mu_2_sit3)
covs_tuple_sit3 = (cov_1_sit3, cov_2_sit3)
mu_1_sit4 = torch.zeros(1,1)
cov_1_sit4= torch.eye(1)
mu_2_sit4 = torch.zeros(1,1)
cov_2_sit4= torch.eye(1) * 1.25
means_tuple_sit4 = (mu_1_sit4, mu_2_sit4)
covs_tuple_sit4 = (cov_1_sit4, cov_2_sit4)
tuple_of_meanscovstuple = ((means_tuple_sit0, covs_tuple_sit0), (means_tuple_sit1, covs_tuple_sit1),(means_tuple_sit2, covs_tuple_sit2), (means_tuple_sit3, covs_tuple_sit3), (means_tuple_sit4, covs_tuple_sit4))
#
true_vals = torch.Tensor([[2, 3],[2, 3.15], [2, 3.225], [2, 3.3], [2, 3.375]])
true_vals.size() # 2
true_vals[0].size()
# Initialize the class
no_replica = 100
set_of_ss = 50
no_sets = 5
my_example = toy_example_MDMP(funcs= (my_func_1, my_func_2), sample_size_per_dist = set_of_ss, num_rep = no_replica, \
vv_CV_model=VV_CV_vectorvaluedfuncs_model_MDMP, \
vv_CV_obj = penalized_ls_objective_vectorvaluedfunc_MDMP, \
prior_kernel = stein_matrix_valued_kernel , base_kernel=rbf_kernel, \
batch_size_tune = 5, flag_if_use_medianheuristic=False, beta_cstkernel=0, lr_tune=0.05,\
epochs_tune=30, verbose_tune=False, \
regularizer_const = 1e-3, regularizer_const_FB=1, batch_size=5, lr=1e-3, epochs=400, \
verbose=False)
# Run the algorithm and save outputs
MyvvCV_ests, MysvCV_ests, MysvCV_closed_form_sols = my_example.varying_distrbutions_multiruns(tuple_of_meanscovstuple)
#
MSE_MyvvCV_ests = torch.zeros(len(tuple_of_meanscovstuple))
MSE_MysvCV_ests = torch.zeros(len(tuple_of_meanscovstuple))
MSE_MysvCV_closed_form_sols = torch.zeros(len(tuple_of_meanscovstuple))
#
MSE_MyvvCV_ests_std = torch.zeros(len(tuple_of_meanscovstuple))
MSE_MysvCV_ests_std = torch.zeros(len(tuple_of_meanscovstuple))
MSE_MysvCV_closed_form_sols_std = torch.zeros(len(tuple_of_meanscovstuple))
#
for i in range(len(tuple_of_meanscovstuple)):
cur_task_true_vals = true_vals[i].unsqueeze(dim=0)
assert cur_task_true_vals.size() == torch.Size([1, len(tuple_of_meanscovstuple[0][0])])
MSE_MyvvCV_ests[i] = (MyvvCV_ests[i,:,:] - cur_task_true_vals).pow(2).mean()
MSE_MyvvCV_ests_std[i] = (MyvvCV_ests[i,:,:] - cur_task_true_vals).pow(2).std()/((len(tuple_of_meanscovstuple) * torch.ones(1)).sqrt())
MSE_MysvCV_ests[i] = (MysvCV_ests[i,:,:] - cur_task_true_vals).pow(2).mean()
MSE_MysvCV_ests_std[i] = (MysvCV_ests[i,:,:] - cur_task_true_vals).pow(2).std()/((len(tuple_of_meanscovstuple) * torch.ones(1)).sqrt())
MSE_MysvCV_closed_form_sols[i] = (MysvCV_closed_form_sols[i,:,:] - cur_task_true_vals).pow(2).mean()
MSE_MysvCV_closed_form_sols_std[i] = (MysvCV_closed_form_sols[i,:,:] - cur_task_true_vals).pow(2).std()/((len(tuple_of_meanscovstuple) * torch.ones(1)).sqrt())
MSE_dat = torch.stack((MSE_MyvvCV_ests, MSE_MysvCV_ests, MSE_MysvCV_closed_form_sols), dim=0).detach().numpy()
MSE_dat
# Plot
# Form a pd.dataframe
for i in range(no_sets):
# vv-CV
VV_cvest_funcidx_methodidx_f1 = list(zip(np.abs(MyvvCV_ests[i, :, 0].detach().numpy() - true_vals[i, 0].detach().numpy())**2, np.repeat('vv-CV', no_replica), np.repeat("Set {}".format(i), no_replica)))
cur_vv_CV_est_f1_df = pd.DataFrame(data=VV_cvest_funcidx_methodidx_f1, columns=['cv_est', 'method_idx', 'setting'])
if i == 0:
vv_CV_est_f1_df = cur_vv_CV_est_f1_df
if i >= 1:
vv_CV_est_f1_df = vv_CV_est_f1_df.append(cur_vv_CV_est_f1_df)
VV_cvest_funcidx_methodidx_f2 = list(zip(np.abs(MyvvCV_ests[i, :, 1].detach().numpy() - true_vals[i, 1].detach().numpy())**2, np.repeat('vv-CV', no_replica), np.repeat("Set {}".format(i), no_replica)))
cur_vv_CV_est_f2_df = pd.DataFrame(data=VV_cvest_funcidx_methodidx_f2, columns=['cv_est', 'method_idx', 'setting'])
if i == 0:
vv_CV_est_f2_df = cur_vv_CV_est_f2_df
if i >= 1:
vv_CV_est_f2_df = vv_CV_est_f2_df.append(cur_vv_CV_est_f2_df)
vv_CV_est_giant_f1f2 = vv_CV_est_f1_df.append(vv_CV_est_f2_df)
# CF -- should use sv-CV_closed form sols
CF_cvest_funcidx_methodidx_f1 = list(zip(np.abs(MysvCV_closed_form_sols[i, :, 0].detach().numpy() - true_vals[i, 0].detach().numpy())**2, np.repeat('CF', no_replica), np.repeat("Set {}".format(i), no_replica)))
cur_CF_est_f1_df = pd.DataFrame(data=CF_cvest_funcidx_methodidx_f1, columns=['cv_est', 'method_idx', 'setting'])
if i == 0:
CF_est_f1_df = cur_CF_est_f1_df
if i >= 1:
CF_est_f1_df = CF_est_f1_df.append(cur_CF_est_f1_df)
CF_cvest_funcidx_methodidx_f2 = list(zip(np.abs(MysvCV_closed_form_sols[i, :, 1].detach().numpy() - true_vals[i, 1].detach().numpy())**2, np.repeat('CF', no_replica), np.repeat("Set {}".format(i), no_replica)))
cur_CF_est_f2_df = pd.DataFrame(data=CF_cvest_funcidx_methodidx_f2, columns=['cv_est', 'method_idx', 'setting'])
if i == 0:
CF_est_f2_df = cur_CF_est_f2_df
if i >= 1:
CF_est_f2_df = CF_est_f2_df.append(cur_CF_est_f2_df)
CF_est_giant_f1f2 = CF_est_f1_df.append(CF_est_f2_df)
# sv-CV
SV_cvest_funcidx_methodidx_f1 = list(zip(np.abs(MysvCV_ests[i, :, 0].detach().numpy() - true_vals[i, 0].detach().numpy())**2, | np.repeat('CV', no_replica) | numpy.repeat |
import cv2
import numpy as np
import argparse
import os
'''
Script to apply color transfer from a source reference image to a target input image
Theory:
https://www.scss.tcd.ie/Rozenn.Dahyot/pdf/pitie08bookchapter.pdf
https://www.cse.cuhk.edu.hk/leojia/all_final_papers/color_cvpr05.PDF
http://www.inf.ed.ac.uk/teaching/courses/vis/lecture_notes/lecture6.pdf
'''
def read_image(image):
if isinstance(image, str):
# read images as BGR
return cv2.imread(image, cv2.IMREAD_COLOR)
elif isinstance(image, np.ndarray):
# use np image
return image
#elif pil .Image...:
else:
raise ValueError("Unexpected image type. Either a path or a np.ndarray are supported")
def scale_img(source=None, target=None):
"""
Scale a source image to the same size as a target image
"""
#raise ValueError("source and target shapes must be equal")
#expand source to target size
width = int(target.shape[1])
height = int(target.shape[0])
dim = (width, height)
return cv2.resize(source, dim, interpolation = cv2.INTER_AREA)
def expand_img(image=None):
# expand dimensions if grayscale
if len(image.shape) < 3:
return image[:,:,np.newaxis]
else:
return image
def _imstats(image, calc='direct'):
"""
Calculate mean and standard deviation of an image along each channel.
Using individual channels there's a very small difference with array forms,
doesn't change the results
Parameters:
-------
image: NumPy array OpenCV image
calc: how to perform the canculation (differences are minimal,
only included for completion)
Returns:
-------
Mean (mu) and standard deviations (sigma)
"""
if calc == 'reshape':
# reshape image from (H x W x 3) to (3 x HW) for vectorized operations
image = image.astype("float32").reshape(-1, 3).T
# calculate mean
mu = np.mean(image, axis=1, keepdims=False)
# calculate standard deviation
sigma = np.std(image, axis=1, keepdims=False)
elif calc == 'direct':
# calculate mean
mu = np.mean(image, axis=(0, 1), keepdims=True)
# calculate standard deviation
sigma = np.std(image, axis=(0, 1), keepdims=True)
elif calc == 'split':
# compute the mean and standard deviation of each channel independently
(l, a, b) = cv2.split(image)
(lMean, lStd) = (l.mean(), l.std())
(aMean, aStd) = (a.mean(), a.std())
(bMean, bStd) = (b.mean(), b.std())
mu = [lMean, aMean, bMean]
sigma = [lStd, aStd, bStd]
# return the color statistics
return (mu, sigma)
def _scale_array(arr, clip=True, new_range=(0, 255)):
"""
Trim NumPy array values to be in [0, 255] range with option of
clipping or scaling.
Parameters:
-------
arr: array to be trimmed to new_range (default: [0, 255] range)
clip: if True, array will be limited with np.clip.
if False then input array will be min-max scaled to
range [max([arr.min(), 0]), min([arr.max(), 255])]
by default
new_range: range to be used for scaling
Returns:
-------
NumPy array that has been scaled to be in [0, 255] range
"""
if clip:
# scaled = arr.copy()
# scaled[scaled < 0] = 0
# scaled[scaled > 255] = 255
scaled = np.clip(arr, new_range[0], new_range[1])
# scaled = np.clip(arr, 0, 255)
else:
scale_range = (max([arr.min(), new_range[0]]), min([arr.max(), new_range[1]]))
scaled = _min_max_scale(arr, new_range=new_range)
return scaled
def _min_max_scale(arr, new_range=(0, 255)):
"""
Perform min-max scaling to a NumPy array
Parameters:
-------
arr: NumPy array to be scaled to [new_min, new_max] range
new_range: tuple of form (min, max) specifying range of
transformed array
Returns:
-------
NumPy array that has been scaled to be in
[new_range[0], new_range[1]] range
"""
# get array's current min and max
mn = arr.min()
mx = arr.max()
# check if scaling needs to be done to be in new_range
if mn < new_range[0] or mx > new_range[1]:
# perform min-max scaling
scaled = (new_range[1] - new_range[0]) * (arr - mn) / (mx - mn) + new_range[0]
else:
# return array if already in range
scaled = arr
return scaled
def im2double(im):
if im.dtype == 'uint8':
out = im.astype('float') / 255
elif im.dtype == 'uint16':
out = im.astype('float') / 65535
elif im.dtype == 'float':
out = im
else:
assert False
out = np.clip(out, 0, 1)
return out
def bgr2ycbcr(img, only_y=True):
'''bgr version of matlab rgb2ycbcr
Python opencv library (cv2) cv2.COLOR_BGR2YCrCb has
different parameters with MATLAB color convertion.
only_y: only return Y channel
Input:
uint8, [0, 255]
float, [0, 1]
'''
in_img_type = img.dtype
img_ = img.astype(np.float32)
if in_img_type != np.uint8:
img_ *= 255.
# convert
if only_y:
# mat = [24.966, 128.553, 65.481])
# rlt = np.dot(img_ , mat)/ 255.0 + 16.0
rlt = np.dot(img_ , [24.966, 128.553, 65.481]) / 255.0 + 16.0
else:
# mat = np.array([[24.966, 128.553, 65.481],[112, -74.203, -37.797], [-18.214, -93.786, 112.0]])
# mat = mat.T/255.0
# offset = np.array([[[16, 128, 128]]])
# rlt = np.dot(img_, mat) + offset
# rlt = np.clip(rlt, 0, 255)
## rlt = np.rint(rlt).astype('uint8')
rlt = np.matmul(img_ , [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786],
[65.481, -37.797, 112.0]]) / 255.0 + [16, 128, 128]
# to make ycrcb like cv2
# rlt = rlt[:, :, (0, 2, 1)]
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
return rlt.astype(in_img_type)
def ycbcr2rgb_(img):
'''same as matlab ycbcr2rgb
Input:
uint8, [0, 255]
float, [0, 1]
'''
in_img_type = img.dtype
img_ = img.astype(np.float32)
if in_img_type != np.uint8:
img_ *= 255.
# convert
rlt = np.matmul(img_ , [[0.00456621, 0.00456621, 0.00456621], [0, -0.00153632, 0.00791071],
[0.00625893, -0.00318811, 0]]) * 255.0 + [-222.921, 135.576, -276.836]
# xform = np.array([[1, 0, 1.402], [1, -0.34414, -.71414], [1, 1.772, 0]])
# img_[:, :, [1, 2]] -= 128
# rlt = img_.dot(xform.T)
np.putmask(rlt, rlt > 255, 255)
np.putmask(rlt, rlt < 0, 0)
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
return rlt.astype(in_img_type)
def ycbcr2rgb(img, only_y=True):
'''
bgr version of matlab ycbcr2rgb
Python opencv library (cv2) cv2.COLOR_YCrCb2BGR has
different parameters with MATLAB color convertion.
Input:
uint8, [0, 255]
float, [0, 1]
'''
in_img_type = img.dtype
img_ = img.astype(np.float32)
if in_img_type != np.uint8:
img_ *= 255.
# to make ycrcb like cv2
# rlt = rlt[:, :, (0, 2, 1)]
# convert
mat = np.array([[24.966, 128.553, 65.481],[112, -74.203, -37.797], [-18.214, -93.786, 112.0]])
mat = np.linalg.inv(mat.T) * 255
offset = np.array([[[16, 128, 128]]])
rlt = np.dot((img_ - offset), mat)
rlt = np.clip(rlt, 0, 255)
## rlt = np.rint(rlt).astype('uint8')
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
return rlt.astype(in_img_type)
def replace_channels(source=None, target=None, ycbcr = True, hsv = False, transfersv = False):
"""
Extracts channels from source img and replaces the same channels
from target, then returns the converted image.
Args:
target: bgr numpy array of input image.
source: bgr numpy array of reference image.
ycbcr: replace the color channels (Cb and Cr)
hsv: replace the hue channel
transfersv: if using hsv option, can also transfer the
mean/std of the S and V channels
Returns:
target: transfered bgr numpy array of input image.
"""
target = read_image(target)
source = read_image(source)
if source.shape != target.shape:
source = scale_img(source, target)
if ycbcr:
# ycbcr_in = bgr2ycbcr(target, only_y=False)
ycbcr_in = cv2.cvtColor(target, cv2.COLOR_BGR2YCR_CB)
# if keep_y:
y_in, _, _ = cv2.split(ycbcr_in)
# ycbcr_ref = bgr2ycbcr(source, only_y=False)
ycbcr_ref = cv2.cvtColor(source, cv2.COLOR_BGR2YCR_CB)
# if histo_match:
# ycbcr_ref = histogram_matching(reference=ycbcr_ref, image=ycbcr_in)
# ycbcr_out = stats_transfer(target=ycbcr_in, source=ycbcr_ref)
# if keep_y:
_, cb_out, cr_out = cv2.split(ycbcr_ref)
ycbcr_out = cv2.merge([y_in, cb_out, cr_out])
# target = ycbcr2rgb(ycbcr_out)
target = cv2.cvtColor(ycbcr_out, cv2.COLOR_YCR_CB2BGR)
if hsv:
hsv_in = cv2.cvtColor(target, cv2.COLOR_BGR2HSV)
_, s_in, v_in = cv2.split(hsv_in)
# h_in, s_in, v_in = cv2.split(hsv_in)
hsv_ref = cv2.cvtColor(source, cv2.COLOR_BGR2HSV)
h_out, _, _ = cv2.split(hsv_ref)
if transfersv:
hsv_out = stats_transfer(target=hsv_in, source=hsv_ref)
_, s_out, v_out = cv2.split(hsv_out)
hsv_out = cv2.merge([h_out, s_out, v_out])
else:
hsv_out = cv2.merge([h_out, s_in, v_in])
target = cv2.cvtColor(hsv_out, cv2.COLOR_HSV2BGR)
return target.astype('uint8')
def hue_transfer(source=None, target=None):
""" Extracts hue from source img and applies mean and
std transfer from target, then returns image with converted y.
Args:
target: bgr numpy array of input image.
source: bgr numpy array of reference image.
Returns:
img_arr_out: transfered bgr numpy array of input image.
"""
target = read_image(target)
source = read_image(source)
hsv_in = cv2.cvtColor(target, cv2.COLOR_BGR2HSV)
_, s_in, v_in = cv2.split(hsv_in)
# h_in, s_in, v_in = cv2.split(hsv_in)
hsv_ref = cv2.cvtColor(source, cv2.COLOR_BGR2HSV)
hsv_out = stats_transfer(target=hsv_in, source=hsv_ref)
h_out, _, _ = cv2.split(hsv_out)
# h_out, s_out, v_out = cv2.split(hsv_out)
hsv_out = cv2.merge([h_out, s_in, v_in])
# hsv_out = cv2.merge([h_in, s_out, v_out])
img_arr_out = cv2.cvtColor(hsv_out, cv2.COLOR_HSV2BGR)
return img_arr_out.astype('uint8')
def luminance_transfer(source=None, target=None):
""" Extracts luminance from source img and applies mean and
std transfer from target, then returns image with converted y.
Args:
target: bgr numpy array of input image.
source: bgr numpy array of reference image.
Returns:
img_arr_out: transfered bgr numpy array of input image.
"""
target = read_image(target)
source = read_image(source)
# ycbcr_in = bgr2ycbcr(target, only_y=False)
ycbcr_in = cv2.cvtColor(target, cv2.COLOR_BGR2YCR_CB)
_, cb_in, cr_in = cv2.split(ycbcr_in)
# ycbcr_ref = bgr2ycbcr(source, only_y=False)
ycbcr_ref = cv2.cvtColor(source, cv2.COLOR_BGR2YCR_CB)
ycbcr_out = stats_transfer(target=ycbcr_in, source=ycbcr_ref)
y_out, _, _ = cv2.split(ycbcr_out)
ycbcr_out = cv2.merge([y_out, cb_in, cr_in])
# img_arr_out = ycbcr2rgb(ycbcr_out)
img_arr_out = cv2.cvtColor(ycbcr_out, cv2.COLOR_YCR_CB2BGR)
return img_arr_out.astype('uint8')
def ycbcr_transfer(source=None, target=None, keep_y=True, histo_match=False):
""" Convert img from rgb space to ycbcr space, apply mean and
std transfer, then convert back.
Args:
target: bgr numpy array of input image.
source: bgr numpy array of reference image.
keep_y: option to keep the original target y channel unchanged.
histo_match: option to do histogram matching before transfering the
image statistics (if combined with keep_y, only color channels
are modified).
Returns:
img_arr_out: transfered bgr numpy array of input image.
"""
target = read_image(target)
source = read_image(source)
# ycbcr_in = bgr2ycbcr(target, only_y=False)
ycbcr_in = cv2.cvtColor(target, cv2.COLOR_BGR2YCR_CB)
if keep_y:
y_in, _, _ = cv2.split(ycbcr_in)
# ycbcr_ref = bgr2ycbcr(source, only_y=False)
ycbcr_ref = cv2.cvtColor(source, cv2.COLOR_BGR2YCR_CB)
if histo_match:
ycbcr_ref = histogram_matching(reference=ycbcr_ref, image=ycbcr_in)
ycbcr_out = stats_transfer(target=ycbcr_in, source=ycbcr_ref)
if keep_y:
_, cb_out, cr_out = cv2.split(ycbcr_out)
ycbcr_out = cv2.merge([y_in, cb_out, cr_out])
# img_arr_out = ycbcr2rgb(ycbcr_out)
img_arr_out = cv2.cvtColor(ycbcr_out, cv2.COLOR_YCR_CB2BGR)
return img_arr_out.astype('uint8')
def lab_transfer(source=None, target=None):
""" Convert img from rgb space to lab space, apply mean and
std transfer, then convert back.
Args:
target: bgr numpy array of input image.
source: bgr numpy array of reference image.
Returns:
img_arr_out: transfered bgr numpy array of input image.
"""
target = read_image(target)
source = read_image(source)
lab_in = cv2.cvtColor(target, cv2.COLOR_BGR2LAB)
lab_ref = cv2.cvtColor(source, cv2.COLOR_BGR2LAB)
lab_out = stats_transfer(target=lab_in, source=lab_ref)
img_arr_out = cv2.cvtColor(lab_out, cv2.COLOR_LAB2BGR)
return img_arr_out.astype('uint8')
def stats_transfer(source=None, target=None):
""" Adapt target's (mean, std) to source's (mean, std).
img_o = (img_i - mean(img_i)) / std(img_i) * std(img_r) + mean(img_r).
Args:
target: bgr numpy array of input image.
source: bgr numpy array of reference image.
Returns:
img_arr_out: transfered bgr numpy array of input image.
"""
target = read_image(target)
source = read_image(source)
mean_in, std_in = _imstats(target)
mean_ref, std_ref = _imstats(source)
img_arr_out = (target - mean_in) / std_in * std_ref + mean_ref
# clip
img_arr_out = _scale_array(img_arr_out)
return img_arr_out.astype('uint8')
def _match_cumulative_cdf(source, template):
"""
Return modified source array so that the cumulative density function of
its values matches the cumulative density function of the template.
"""
src_values, src_unique_indices, src_counts = np.unique(source.ravel(),
return_inverse=True,
return_counts=True)
tmpl_values, tmpl_counts = np.unique(template.ravel(), return_counts=True)
# calculate normalized quantiles for each array
src_quantiles = np.cumsum(src_counts) / source.size
tmpl_quantiles = np.cumsum(tmpl_counts) / template.size
# use linear interpolation of cdf to find new pixel values = interp(image, bins, cdf)
interp_a_values = np.interp(src_quantiles, tmpl_quantiles, tmpl_values)
# reshape to original image shape and return
return interp_a_values[src_unique_indices].reshape(source.shape)
def histogram_matching(reference=None, image=None, clip=None):
"""
Adjust an image so that its cumulative histogram matches that of another.
The adjustment is applied separately for each channel.
(https://en.wikipedia.org/wiki/Histogram_matching)
Parameters
----------
image : ndarray
Input image. Can be gray-scale or in color.
reference : ndarray
Image to match histogram of. Must have the same number of channels as
image.
Returns
-------
matched : ndarray
Transformed input image.
Raises
------
ValueError
Thrown when the number of channels in the input image and the reference
differ.
References
----------
.. [1] http://paulbourke.net/miscellaneous/equalisation/
.. [2] https://github.com/scikit-image/scikit-image/blob/master/skimage/exposure/histogram_matching.py
"""
image = read_image(image) # target
reference = read_image(reference) # ref
# expand dimensions if grayscale
image = expand_img(image)
reference = expand_img(reference)
if image.ndim != reference.ndim:
raise ValueError('Image and reference must have the same number '
'of channels.')
if image.shape[-1] != reference.shape[-1]:
raise ValueError('Number of channels in the input image and '
'reference image must match!')
matched = np.empty(image.shape, dtype=image.dtype)
for channel in range(image.shape[-1]):
matched_channel = _match_cumulative_cdf(image[..., channel],
reference[..., channel])
matched[..., channel] = matched_channel
if clip:
matched = _scale_array(matched, clip=clip)
return matched.astype("uint8")
def SOTransfer(source, target, steps=10, batch_size=5, reg_sigmaXY=16.0, reg_sigmaV=5.0, clip=False):
"""
Color Transform via Sliced Optimal Transfer, ported by @iperov
https://dcoeurjo.github.io/OTColorTransfer
source - any float range any channel image
target - any float range any channel image, same shape as src
steps - number of solver steps
batch_size - solver batch size
reg_sigmaXY - apply regularization and sigmaXY of filter, otherwise set to 0.0
reg_sigmaV - sigmaV of filter
return value
"""
source = read_image(source).astype("float32")
target = read_image(target).astype("float32")
if not np.issubdtype(source.dtype, np.floating):
raise ValueError("source value must be float")
if not np.issubdtype(target.dtype, np.floating):
raise ValueError("target value must be float")
# expand dimensions if grayscale
target = expand_img(image=target)
source = expand_img(image=source)
#expand source to target size if smaller
if source.shape != target.shape:
source = scale_img(source, target)
target_dtype = target.dtype
h,w,c = target.shape
new_target = target.copy()
for step in range (steps):
advect = np.zeros ((h*w,c), dtype=target_dtype)
for batch in range (batch_size):
dir = np.random.normal(size=c).astype(target_dtype)
dir /= np.linalg.norm(dir)
projsource = np.sum(new_target*dir, axis=-1).reshape((h*w))
projtarget = np.sum(source*dir, axis=-1).reshape((h*w))
idSource = np.argsort(projsource)
idTarget = np.argsort(projtarget)
a = projtarget[idTarget]-projsource[idSource]
for i_c in range(c):
advect[idSource,i_c] += a * dir[i_c]
new_target += advect.reshape((h,w,c)) / batch_size
new_target = _scale_array(new_target, clip=clip)
if reg_sigmaXY != 0.0:
target_diff = new_target-target
new_target = target + cv2.bilateralFilter (target_diff, 0, reg_sigmaV, reg_sigmaXY)
#new_target = _scale_array(new_target, clip=clip)
return new_target.astype("uint8")
class Regrain:
def __init__(self, smoothness=1):
'''
Regraining post-process to match color of resulting image and
gradient of the source image.
Automated colour grading using colour distribution transfer.
<NAME> , <NAME> and <NAME> (2007) Computer Vision and Image
Understanding.
https://github.com/frcs/colour-transfer/blob/master/regrain.m
Parameters:
smoothness (default=1, smoothness>=0): sets the fidelity of the
original gradient field. e.g. smoothness = 0 implies resulting
image = graded image.
'''
self.nbits = [4, 16, 32, 64, 64, 64]
self.smoothness = smoothness
self.level = 0
# self.eps = 2.2204e-16
def regrain(self, source=None, target=None):
'''
Keep gradient of target and color of source.
https://github.com/frcs/colour-transfer/blob/master/regrain.m
Resulting image = regrain(I_original, I_graded, [self.smoothness])
'''
source = read_image(source) # ref
target = read_image(target) # target
#expand source to target size if smaller
if source.shape != target.shape:
source = scale_img(source, target)
target = target / 255.
source = source / 255.
img_arr_out = np.copy(target)
img_arr_out = self.regrain_rec(img_arr_out, target, source, self.nbits, self.level)
# clip
img_arr_out = _scale_array(img_arr_out, new_range=(0,1))
img_arr_out = (255. * img_arr_out).astype('uint8')
return img_arr_out
def regrain_rec(self, img_arr_out, target, source, nbits, level):
'''
Direct translation of matlab code.
https://github.com/frcs/colour-transfer/blob/master/regrain.m
'''
[h, w, _] = target.shape
h2 = (h + 1) // 2
w2 = (w + 1) // 2
if len(nbits) > 1 and h2 > 20 and w2 > 20:
#Note: could use matlab-like bilinear imresize instead, cv2 has no antialias
resize_arr_in = cv2.resize(target, (w2, h2), interpolation=cv2.INTER_LINEAR)
resize_arr_col = cv2.resize(source, (w2, h2), interpolation=cv2.INTER_LINEAR)
resize_arr_out = cv2.resize(img_arr_out, (w2, h2), interpolation=cv2.INTER_LINEAR)
resize_arr_out = self.regrain_rec(resize_arr_out, resize_arr_in, resize_arr_col, nbits[1:], level+1)
img_arr_out = cv2.resize(resize_arr_out, (w, h), interpolation=cv2.INTER_LINEAR)
img_arr_out = self.solve(img_arr_out, target, source, nbits[0], level)
return img_arr_out
def solve(self, img_arr_out, target, source, nbit, level, eps=1e-6):
'''
Direct translation of matlab code.
https://github.com/frcs/colour-transfer/blob/master/regrain.m
'''
[width, height, c] = target.shape
first_pad_0 = lambda arr : np.concatenate((arr[:1, :], arr[:-1, :]), axis=0)
first_pad_1 = lambda arr : np.concatenate((arr[:, :1], arr[:, :-1]), axis=1)
last_pad_0 = lambda arr : | np.concatenate((arr[1:, :], arr[-1:, :]), axis=0) | numpy.concatenate |
#!/usr/bin/env python
# FormatCBFMultiTileHierarchy.py
#
# Reads a multi-tile CBF image, discovering it's detector geometery
# automatically, and builds a hierarchy if present
#
# $Id:
#
from __future__ import absolute_import, division, print_function
import pycbf
from dxtbx.format.FormatCBFMultiTile import FormatCBFMultiTile
from dxtbx.format.FormatStill import FormatStill
from dxtbx.model import Detector
from libtbx.utils import Sorry
from scitbx.matrix import col, sqr
class FormatCBFMultiTileHierarchy(FormatCBFMultiTile):
'''An image reading class multi-tile CBF files'''
@staticmethod
def understand(image_file):
'''Check to see if this looks like an CBF format image, i.e. we can
make sense of it.'''
cbf_handle = pycbf.cbf_handle_struct()
cbf_handle.read_widefile(image_file, pycbf.MSG_DIGEST)
#check if multiple arrays
if cbf_handle.count_elements() <= 1:
return False
# we need the optional column equipment_component to build a hierarchy
try:
cbf_handle.find_category("axis")
cbf_handle.find_column("equipment_component")
except Exception as e:
if "CBF_NOTFOUND" in str(e):
return False
else:
raise e
return True
def __init__(self, image_file, **kwargs):
'''Initialise the image structure from the given file.'''
from dxtbx import IncorrectFormatError
if not self.understand(image_file):
raise IncorrectFormatError(self, image_file)
FormatCBFMultiTile.__init__(self, image_file, **kwargs)
def _start(self):
'''Parent class will open the image file as a cbf file handle, and keep
the handle somewhere safe.'''
FormatCBFMultiTile._start(self)
def _get_change_of_basis(self, axis_id):
""" Get the 4x4 homogenous coordinate matrix for a given axis. Assumes
the cbf handle has been intialized
@param axis_id axis name of basis to get """
cbf = self._get_cbf_handle()
axis_type = cbf.get_axis_type(axis_id)
offset = col(cbf.get_axis_offset(axis_id))
vector = col(cbf.get_axis_vector(axis_id)).normalize()
setting, increment = cbf.get_axis_setting(axis_id)
# change of basis matrix in homologous coordinates
cob = None
if axis_type == "rotation":
r3 = vector.axis_and_angle_as_r3_rotation_matrix(setting + increment, deg = True)
cob = sqr((r3[0], r3[1], r3[2], offset[0],
r3[3], r3[4], r3[5], offset[1],
r3[6], r3[7], r3[8], offset[2],
0, 0, 0, 1))
elif axis_type == "translation":
translation = offset + vector * (setting + increment)
cob = sqr((1,0,0,translation[0],
0,1,0,translation[1],
0,0,1,translation[2],
0,0,0,1))
else:
raise Sorry("Unrecognized vector type: %d"%axis_type)
return cob
def _get_cummulative_change_of_basis(self, axis_id):
""" Get the 4x4 homogenous coordinate matrix for a given axis, combining it with the change of
basis matrices of parent axes with the same equipment component as the given axis. Assumes
the cbf handle has been intialized
@param axis_id axis name of basis to get
@return (parent, change of basis matrix), where parent is None if the parent in the cbf file
is ".". Parent is the axis that the top level axis in this chain of dependent axis depends on
"""
cbf = self._get_cbf_handle()
cob = self._get_change_of_basis(axis_id)
parent_id = cbf.get_axis_depends_on(axis_id)
if parent_id == ".":
return None, cob
eq_comp = cbf.get_axis_equipment_component(axis_id)
parent_eq_comp = cbf.get_axis_equipment_component(parent_id)
if eq_comp == parent_eq_comp:
non_matching_parent, parent_cob = self._get_cummulative_change_of_basis(parent_id)
return non_matching_parent, parent_cob * cob
return parent_id, cob
def _add_panel_group(self, group_id, d):
""" Adds a panel group to the detector d. If the group's parent hasn't been
added yet, recursively add parents to the detector until the detector itself
is reached.
@param group_id name of a cbf axis
@param d detector object
"""
# group_id will only be "." if the panel being worked on has the same equipment_component name as the
# last axis in the hierarchy, which isn't really sensible
assert group_id != "."
name = group_id
for subobj in d.iter_preorder():
if subobj.get_name() == name:
return subobj
parent, cob = self._get_cummulative_change_of_basis(group_id)
if parent is None:
pg = d.hierarchy() # root object for the detector
try:
pg.get_D_matrix() # test to see if we've initialized the detector basis yet
except RuntimeError as e:
assert "DXTBX_ASSERT(D_)" in str(e)
else:
assert False # shouldn't be reached. Detector should be initialized only once.
else:
parent_pg = self._add_panel_group(parent, d)
pg = parent_pg.add_group()
# set up the dxtbx d matrix. Note use of homogenous coordinates.
origin = col((cob * col((0,0,0,1)))[0:3])
fast = col((cob * col((1,0,0,1)))[0:3]) - origin
slow = col((cob * col((0,1,0,1)))[0:3]) - origin
pg.set_local_frame(
fast.elems,
slow.elems,
origin.elems)
pg.set_name(name)
return pg
def _detector(self):
'''Return a working detector instance.'''
cbf = self._get_cbf_handle()
d = Detector()
# find the panel elment names. Either array ids or section ids
cbf.find_category("array_structure_list")
try:
cbf.find_column("array_section_id")
except Exception as e:
if "CBF_NOTFOUND" not in str(e): raise e
cbf.find_column("array_id")
panel_names = []
for i in xrange(cbf.count_rows()):
cbf.select_row(i)
if cbf.get_typeofvalue() == 'null':
continue
val = cbf.get_value()
if val not in panel_names:
panel_names.append(val)
# the cbf detector objects are not guaranteed to be in the same order
# as this array of panel names. re-iterate, associating root axes of
# detector objects with panel names
detector_axes = []
for i in xrange(len(panel_names)):
cbf_detector = cbf.construct_detector(i)
axis0 = cbf_detector.get_detector_surface_axes(0)
detector_axes.append(axis0)
cbf_detector.__swig_destroy__(cbf_detector)
panel_names_detectororder = []
cbf.find_category("array_structure_list")
for detector_axis in detector_axes:
cbf.find_column("axis_set_id")
cbf.find_row(detector_axis)
try:
cbf.find_column("array_section_id")
except Exception as e:
if "CBF_NOTFOUND" not in str(e): raise e
cbf.find_column("array_id")
panel_names_detectororder.append(cbf.get_value())
for panel_name in panel_names:
cbf_detector = cbf.construct_detector(panel_names_detectororder.index(panel_name))
# code adapted below from dxtbx.model.detector.DetectorFactory.imgCIF_H
pixel = (cbf_detector.get_inferred_pixel_size(1),
cbf_detector.get_inferred_pixel_size(2))
axis0 = cbf_detector.get_detector_surface_axes(0)
axis1 = cbf_detector.get_detector_surface_axes(1)
assert cbf.get_axis_depends_on(axis0) == axis1
try:
size = tuple(cbf.get_image_size_fs(i))
except Exception as e:
if "CBF_NOTFOUND" in str(e):
# no array data in the file, it's probably just a cbf header. Get the image size elsewhere
size = [0,0]
cbf.find_category("array_structure_list")
for axis in [axis0, axis1]:
cbf.find_column("axis_set_id")
cbf.find_row(axis)
cbf.find_column("precedence")
idx = int(cbf.get_value()) - 1
cbf.find_column("dimension")
size[idx] = int(cbf.get_value())
assert size[0] != 0 and size[1] != 0
else:
raise e
parent, cob = self._get_cummulative_change_of_basis(axis0)
pg = self._add_panel_group(parent, d)
p = pg.add_panel()
fast = cbf.get_axis_vector(axis0)
slow = cbf.get_axis_vector(axis1)
origin = (cob * col((0,0,0,1)))[0:3]
p.set_local_frame(fast, slow, origin)
try:
cbf.find_category('array_intensities')
cbf.find_column('undefined_value')
underload = cbf.get_doublevalue()
overload = cbf.get_overload(0)
trusted_range = (underload, overload)
except Exception:
trusted_range = (0.0, 0.0)
p.set_pixel_size(tuple(map(float, pixel)))
p.set_image_size(size)
p.set_trusted_range(tuple(map(float, trusted_range)))
p.set_name(panel_name)
#p.set_px_mm_strategy(px_mm) FIXME
cbf_detector.__swig_destroy__(cbf_detector)
del(cbf_detector)
return d
def _beam(self):
'''Return a working beam instance.'''
return self._beam_factory.imgCIF_H(self._get_cbf_handle())
def get_raw_data(self):
if self._raw_data is None:
import numpy
from scitbx.array_family import flex
from libtbx.containers import OrderedDict
self._raw_data = []
cbf = self._get_cbf_handle()
cbf.find_category('array_structure')
cbf.find_column('encoding_type')
cbf.select_row(0)
types = []
for i in xrange(cbf.count_rows()):
types.append(cbf.get_value())
cbf.next_row()
assert len(types) == cbf.count_rows()
# read the data
data = OrderedDict()
cbf.find_category("array_data")
for i in xrange(cbf.count_rows()):
cbf.find_column("array_id")
name = cbf.get_value()
cbf.find_column("data")
assert cbf.get_typeofvalue().find('bnry') > -1
if types[i] == 'signed 32-bit integer':
array_string = cbf.get_integerarray_as_string()
array = flex.int(numpy.fromstring(array_string, numpy.int32))
parameters = cbf.get_integerarrayparameters_wdims_fs()
array_size = (parameters[11], parameters[10], parameters[9])
elif types[i] == 'signed 64-bit real IEEE':
array_string = cbf.get_realarray_as_string()
array = flex.double( | numpy.fromstring(array_string, numpy.float) | numpy.fromstring |
import data_generation
import galsim
import numpy as np
import metacal
import pickle
import sys
from multiprocessing import Pool
import pandas as pd
import matplotlib.pyplot as plt
# matplotlib.rc('xtick', labelsize=20)
# matplotlib.rc('ytick', labelsize=20)
plt.rcParams.update({'font.size': 18})
import os.path
import seaborn as sns
# TABLE-MODIFYING FUNCTIONS
def element_columns(dataframe):
"""
Adds as columns the 4 individual elements of the shear response matrix
"""
for i in range(0, 2):
for j in range(0, 2):
dataframe['R_' + str(i + 1) + str(j + 1)] = list(map(lambda r: r[i][j], dataframe['R']))
return dataframe
def generate_df(results):
"""
Takes in the results array and returns a pandas dataframe with columns
for each parameter
"""
# Loading the results table into a Pandas DataFrame
results_df = pd.DataFrame(results, columns=['original_gal', 'oshear_g1', 'oshear_g2', 'true_psf', 'deconv_psf', 'reconv_psf', 'shear_estimation_psf', 'cshear_dg1', 'cshear_dg2', 'shear_estimator', 'pixel_scale', 'R', 'reconvolved_noshear', 'reconvolved_noshear_e1', 'reconvolved_noshear_e2'])
return element_columns(results_df)
# INDIVIDUAL PLOTTING FUNCTIONS
def save_fig_to_plots(figname):
"""
Function for my own sanity, used for saving files to a specific directory
without overwriting any old ones.
"""
# finding file version
version = 1
if not os.path.exists('plots/' + figname + '.png'):
plt.savefig('plots/' + figname + '.png')
else:
while os.path.exists('plots/' + figname + '(' + str(version) + ').png'):
version += 1
plt.savefig('plots/' + figname + '(' + str(version) + ').png')
def plot_R_elements(dataframe, xaxis_column, color_column, filename, x_units='', color_units='arcseconds'):
"""
Generates a plot of each element of the shear response matrix as a function of the parameter
"xaxis_column"
"""
fig, axs = plt.subplots(2, 2, figsize=(8, 8))
# fixing plotting scales
diagmax = np.max([np.max(dataframe['R_11']), np.max(dataframe['R_22'])])
# print(diagmax)
diagmin = np.min([np.min(dataframe['R_11']), np.min(dataframe['R_22'])])
offdiagmax = np.max([np.max(dataframe['R_21']), np.max(dataframe['R_12'])])
# print(offdiagmax)
offdiagmin = np.min([np.min(dataframe['R_21']), np.min(dataframe['R_12'])])
scaling_factor = 1.01
im = None
for i in range(2):
for j in range(2):
element_string = 'R_' + str(i + 1) + str(j + 1)
axs[i][j].set_title(element_string)
im = axs[i][j].scatter(dataframe[xaxis_column], dataframe[element_string], c=dataframe[color_column], cmap='viridis', vmin=np.min(dataframe[color_column]), vmax=np.max(dataframe[color_column]))
axs[i][j].tick_params(labelright=True)
axs[i][j].set_xlabel(f"{xaxis_column} [{x_units}]")
if i == j:
axs[i][j].set_ylim(top=2 + scaling_factor * (diagmax - 2), bottom=diagmin)
else:
axs[i][j].set_ylim(top=scaling_factor * offdiagmax, bottom=offdiagmin)
cbaxes = fig.add_axes([0.2, 0.1, 0.6, 0.01])
cb = fig.colorbar(im, ax=axs[:], orientation='horizontal', shrink=0.5, cax=cbaxes)
plt.subplots_adjust(hspace=0.3, wspace=0.4, bottom=0.2)
cb.set_label(f"{color_column} [{color_units}]")
fig.suptitle(f"Shear response matrix element values vs {xaxis_column}")
save_fig_to_plots(filename)
plt.show()
def all_gaussian(dataframe):
"""
Plots the elements of the shear response matrix R for a master dataframe of
gaussian original galaxies and Gaussian PSFs for different ratios of galaxy size
to PSF size
"""
dataframe['gal_sigma'] = [gal.sigma for gal in dataframe['original_gal']]
dataframe['psf_sigma'] = [psf.sigma for psf in dataframe['true_psf']]
dataframe['gal_psf_ratio'] = dataframe['gal_sigma'] / dataframe['psf_sigma']
plot_R_elements(dataframe, 'gal_psf_ratio', 'gal_sigma', 'all_gaussian_gal_psf_ratio')
def all_moffat(dataframe):
"""
Plots the elements of the shear response matrix R for a master dataframe of
gaussian original galaxies and Moffat PSFs for different ratios of galaxy size
to PSF size
"""
dataframe['gal_fwhm'] = [gal.fwhm for gal in dataframe['original_gal']]
dataframe['moffat_psf_fwhm'] = [psf.fwhm for psf in dataframe['true_psf']]
dataframe['gal_psf_ratio'] = dataframe['gal_fwhm'] / dataframe['moffat_psf_fwhm']
# print(dataframe['reconvolved_noshear_e1'])
# print(dataframe['reconvolved_noshear_e2'])
plot_R_elements(dataframe, 'gal_psf_ratio', 'gal_fwhm', 'moffat_psfs_gal_psf_ratio')
def all_gaussian_different_ellipticies(dataframe, plotname):
"""
Takes in the master dataframe, and generates a plot of m = (estimated_gi - true_gi) / true_gi
for each element
"""
dataframe['gal_sigma'] = [gal.sigma for gal in dataframe['original_gal']]
dataframe['psf_sigma'] = [psf.sigma for psf in dataframe['true_psf']]
dataframe['gal_psf_ratio'] = dataframe['gal_sigma'] / dataframe['psf_sigma']
R_inv_list = [np.linalg.inv(R) for R in dataframe['R']]
R_inv_array = np.asarray(R_inv_list)
estimated_ellip_vec_list = []
for i in range(len(dataframe['R'])):
e1 = dataframe['reconvolved_noshear_e1'][i]
e2 = dataframe['reconvolved_noshear_e2'][i]
estimated_ellip_vec_list.append(np.array([[e1],[e2]]))
estimated_ellip_vec_array = np.asarray(estimated_ellip_vec_list)
estimated_shear_array = R_inv_array @ estimated_ellip_vec_array
estimated_e1 = estimated_shear_array[:,0, 0]
estimated_e2 = estimated_shear_array[:,1, 0]
estimated_g1 = estimated_e1 / 2
estimated_g2 = estimated_e2 / 2
true_g1 = dataframe['oshear_g1'].to_numpy()[:]
true_g2 = dataframe['oshear_g2'].to_numpy()[:]
# print(estimated_g1)
# print(estimated_g2)
# print(true_g1)
# print(true_g2)
fig, axs = plt.subplots(1, 2, figsize=(15, 9))
# axs[0].scatter(true_g1, estimated_g1 - true_g1)
axs[0].set_xlabel('true_g1')
axs[0].set_ylabel('[(estimated_g1 - true_g1)/true_g1]' )
axs[0].set_ylabel(r'$(\frac{{g_1}_{est} - {g_1}_{est}}{{g_1}_{true}})$')
axs[0].set_title('g1')
# axs[1].scatter(true_g2, estimated_g2 - true_g2)
axs[1].set_xlabel('true_g2')
axs[1].set_ylabel(r'$(\frac{{g_2}_{est} - {g_2}_{est}}{{g_2}_{true}})$')
axs[1].set_title('g2')
y1 = (estimated_g1 - true_g1)/true_g1
y2 = (estimated_g2 - true_g2)/true_g2
im = axs[0].scatter(true_g1, y1, c=dataframe['gal_psf_ratio'][:], cmap='cividis')
im = axs[1].scatter(true_g2, y2, c=dataframe['gal_psf_ratio'][:], cmap='cividis')
plt.subplots_adjust(hspace=1.5, wspace=0.3)
for ax in axs:
ax.set_ylim(bottom=1e-3, top=1e-1)
ax.set_yscale('log')
# m1, b1 = np.polyfit(true_g1, estimated_g1 - true_g1, 1)
# m2, b2 = np.polyfit(true_g1, estimated_g1 - true_g1, 1)
# axs[0].plot(true_g1, m1 * true_g1 + b1, label=f"m1 = {m1}, b1 = {b1}")
# axs[1].plot(true_g2, m2 * true_g2 + b2, label=f"m2 = {m2}, b2 = {b2}")
# axs[0].legend()
# axs[1].legend()
# print('m1: ', m1)
# print('m2: ', m2)
# axs[0].plot([0, 0.1], [0, 0.1], label='estimated g1 = true g1')
# axs[1].plot([0, 0.1], [0, 0.1], label='estimated g2 = true g2')
# axs[0].legend()
# axs[1].legend()
# cbaxes = fig.add_axes([0.2, 0.1, 0.6, 0.01])
cb = fig.colorbar(im, ax=axs[:], orientation='horizontal', shrink=0.45) #, cax=cbaxes)
cb.set_label('galaxy size to psf size ratio')
fig.suptitle(r'$(\frac{{g_i,}_{est} - {g_i,}_{est}}{{g_i,}_{true}})$ by element')
# save_fig_to_plots(plotname)
plt.show()
def all_gaussian_varying_cshear_oshear_pixelscale(dataframe, filename, pixel_scale=0.2, cshear_dg=0.01):
"""
INCOMPLETE
"""
print(dataframe.columns)
print(dataframe.shape)
filtered = dataframe
# pixel scale filter
filtered = filtered[filtered['pixel_scale'] == pixel_scale]
# cshear_dg filter
filtered = filtered[filtered['cshear_dg1'] == cshear_dg]
all_gaussian_different_ellipticies(filtered, filename)
pass
def generate_images(dataframe):
"""
Generates images of one of the cases where R11 and R22 were the highest
"""
# find the row with the parameters that generated the highest R11
max_R_11 = np.max(dataframe['R_11'])
max_R_11_combo = dataframe[dataframe['R_11'] == max_R_11]
image_dict = {}
true_galaxy = max_R_11_combo['original_gal'].values[0]
image_dict['true_galaxy'] = true_galaxy
true_psf = max_R_11_combo['true_psf'].values[0]
image_dict['true_psf'] = true_psf
convolved_galaxy = galsim.Convolve(true_galaxy, true_psf)
image_dict['convolved_galaxy'] = convolved_galaxy
deconvolved_galaxy = galsim.Convolve(convolved_galaxy, galsim.Convolve(max_R_11_combo['deconv_psf'].values[0])) # TODO could be a possible problem line
image_dict['deconvolved_galaxy'] = deconvolved_galaxy
reconvolved_galaxy = galsim.Convolve(deconvolved_galaxy, max_R_11_combo['reconv_psf'].values[0])
image_dict['reconvolved_galaxy'] = reconvolved_galaxy
# important parameters
print('\n' * 4)
print('original galaxy sigma: ', true_galaxy.sigma)
print('true psf sigma: ', true_psf.sigma)
print('\n' * 4)
pixel_scale = 0.2
maximum_list = []
minimum_list = []
for name, obj in image_dict.items():
image_array = obj.drawImage(scale=pixel_scale).array
image_dict[name] = image_array
maximum_list.append(np.max(image_array))
minimum_list.append(np.min(image_array))
vmax = | np.max(maximum_list) | numpy.max |
# Copyright (c) 2021. <NAME>, Ghent University
from typing import List
import numpy as np
from matplotlib import pyplot as plt
plt.rcParams.update({"figure.max_open_warning": 0}) # ignore warning for too many open figures
__all__ = [
"grid_parameters",
"block_shaped",
"refine_axis",
"rc_from_blocks",
"blocks_from_rc",
"blocks_from_rc_3d",
"get_centroids",
"contour_extract",
"contours_vertices",
"refine_machine",
]
def grid_parameters(
x_lim: list = None, y_lim: list = None, grf: float = 1
) -> (np.array, int, int):
"""Generates grid parameters given dimensions.
:param x_lim: X limits
:param y_lim: Y limits
:param grf: Cell dimension
:return: (cell centers, number of rows, number of columns)
"""
if y_lim is None:
y_lim = [0, 1000]
else:
y_lim = y_lim
if x_lim is None:
x_lim = [0, 1500]
else:
x_lim = x_lim
grf = grf # Cell dimension
nrow = int(np.diff(y_lim) / grf) # Number of rows
ncol = int( | np.diff(x_lim) | numpy.diff |
import numpy as np
from constants import cgs_constants
from numpy import exp, sin, einsum
pi = np.pi
q = cgs_constants['q']
c = cgs_constants['c']
## Convert units to cgs from mks
class field_solver_2D(object):
def __init__(self):
self.name = '2-d electrostatic field solver'
def compute_mode_coefficients(self, fields, particles):
## Setup the coefficients for the
kx1 = np.einsum('m, p -> mp', fields.k_x_vector, particles.x)
ky1 = np.einsum('n, p -> np', fields.k_y_vector, particles.y)
trash, kx_mat = np.meshgrid(particles.x, fields.k_x_vector) ## 1/cm
trash, ky_mat = np.meshgrid(particles.y, fields.k_y_vector) ## 1/cm
exp_x = np.exp(1j * kx1) * particles.lambda_twiddle(kx_mat, particles.x_extent) / fields.lambda_x_0 ## no units
exp_y = np.exp(1j * ky1) * particles.lambda_twiddle(ky_mat, particles.y_extent) / fields.lambda_y_0 ## no units
ptcl_exponential = np.einsum('mp, np -> mn', exp_x, exp_y) ## no units
unscalled_coefficients = einsum('xy, xy -> xy', ptcl_exponential, fields.k_sq_inv) ## no units
fields.mode_coefficients = - unscalled_coefficients * particles.weight * 4. * pi * q * np.sqrt(2)/ ((particles.gamma ** 2) ) ## statC s / cm
return
def compute_phi_mesh(self, fields, **kwargs):
if "xmax" in kwargs:
xmax = kwargs["xmax"]
else:
xmax = fields.lambda_x_0
if "ymax" in kwargs:
ymax = kwargs["ymax"]
else:
ymax = fields.lambda_y_0
if "n_grid" in kwargs:
n_grid = kwargs["n_grid"]
else:
n_grid = 10
xarray = np.linspace(-xmax, xmax, n_grid)
yarray = np.linspace(-ymax, ymax, n_grid)
XX, YY = np.meshgrid(xarray, yarray)
phi = fields.mode_coefficients
#statcolomb s / cm
kx4 = np.einsum('m,i -> mi', fields.k_x_vector, xarray) ## no units
ky4 = np.einsum('n,j -> nj', fields.k_y_vector, yarray) ## no units
exp_x = np.exp(-1j * kx4) ## no units
exp_y = np.exp(-1j * ky4) ## no units
#Field components are exp(-i(kxx + kyy))
phi_modes = np.einsum('mi, nj -> mnij', exp_x, exp_y) ## no units
#now multiply by the sigma matrix, component-wise - using shared mn indices
phi_vals = np.einsum('mn,mnij->ij',phi, phi_modes) ## statC s / cm
# statcolomb s / cm
fields.phi_grid = phi_vals - np.min(phi_vals)
fields.x_grid = XX
fields.y_grid = YY
return
def compute_psi_particles(self, fields, particles):
phi = fields.mode_coefficients
## statC s / cm
kx4 = np.einsum('m,i -> mi', fields.k_x_vector, particles.x) ## no units
ky4 = np.einsum('n,i -> ni', fields.k_y_vector, particles.y) ## no units
exp_x = np.exp(-1j * kx4) ## no units
exp_y = np.exp(-1j * ky4) ## no units
modes = np.einsum('mp, np -> mnp', exp_x, exp_y) ## no units
psi_vals = np.einsum('mn, mnp -> p', phi, modes)
fields.psi_vals = psi_vals
return
def compute_grad_psi(self, fields, particles):
phi = fields.mode_coefficients
## statC s / cm
kx4 = | np.einsum('m,i -> mi', fields.k_x_vector, particles.x) | numpy.einsum |
from .units import dimension, dimension_name, SI_symbol, pg_units
from .interfaces.astra import write_astra
from .interfaces.opal import write_opal
from .readers import particle_array
from .writers import write_pmd_bunch, pmd_init
from h5py import File
import numpy as np
import scipy.constants
mass_of = {'electron': 0.51099895000e6 # eV/c
}
c_light = 299792458.
e_charge = scipy.constants.e
charge_of = {'electron': e_charge, 'positron':-e_charge}
charge_state = {'electron': -1}
#-----------------------------------------
# Classes
class ParticleGroup:
"""
Particle Group class
Initialized on on openPMD beamphysics particle group:
h5 = open h5 handle, or str that is a file
data = raw data
The fundamental bunch data is stored in __dict__ with keys
np.array: x, px, y, py, z, pz, t, status, weight
str: species
where:
x, y, z are positions in units of [m]
px, py, pz are momenta in units of [eV/c]
t is time in [s]
weight is the macro-charge weight in [C], used for all statistical calulations.
species is a proper species name: 'electron', etc.
Derived data can be computed as attributes:
.gamma, .beta, .beta_x, .beta_y, .beta_z: relativistic factors [1].
.r, .theta: cylidrical coordinates [m], [1]
.pr, .ptheta: cylindrical momenta [1]
.energy : total energy [eV]
.kinetic_energy: total energy - mc^2 in [eV].
.p: total momentum in [eV/c]
.mass: rest mass in [eV]
.xp, .yp: Slopes x' = dx/dz = dpx/dpz and y' = dy/dz = dpy/dpz [1].
Statistics of any of these are calculated with:
.min(X)
.max(X)
.ptp(X)
.avg(X)
.std(X)
.cov(X, Y, ...)
with a string X as the name any of the properties above.
Useful beam physics quantities are given as attributes:
.norm_emit_x
.norm_emit_y
.higher_order_energy_spread
.average_current
All attributes can be accessed with brackets:
[key]
Additional keys are allowed for convenience:
['min_prop'] will return .min('prop')
['max_prop'] will return .max('prop')
['ptp_prop'] will return .ptp('prop')
['mean_prop'] will return .avg('prop')
['sigma_prop'] will return .std('prop')
['cov_prop1__prop2'] will return .cov('prop1', 'prop2')[0,1]
Units for all attributes can be accessed by:
.units(key)
Particles are often stored at the same time (i.e. from a t-based code),
or with the same z position (i.e. from an s-based code.)
Routines:
drift_to_z(z0)
drift_to_t(t0)
help to convert these. If no argument is given, particles will be drifted to the mean.
"""
def __init__(self, h5=None, data=None):
if h5:
# Allow filename
if isinstance(h5, str) and os.path.exists(h5):
with File(h5, 'r') as hh5:
pp = particle_paths(hh5)
assert len(pp) == 1, f'Number of particle paths in {h5}: {len(pp)}'
data = load_bunch_data(hh5[pp[0]])
else:
# Try dict
data = load_bunch_data(h5)
else:
# Fill out data. Exclude species.
data = full_data(data)
species = list(set(data['species']))
# Allow for empty data (len=0). Otherwise, check species.
if len(species) >= 1:
assert len(species) == 1, f'mixed species are not allowed: {species}'
data['species'] = species[0]
self._settable_array_keys = ['x', 'px', 'y', 'py', 'z', 'pz', 't', 'status', 'weight']
self._settable_scalar_keys = ['species']
self._settable_keys = self._settable_array_keys + self._settable_scalar_keys
for key in self._settable_keys:
self.__dict__[key] = data[key]
@property
def n_particle(self):
"""Total number of particles. Same as len """
return len(self)
@property
def n_alive(self):
"""Number of alive particles, defined by status == 1"""
return len(np.where(self.status==1)[0])
@property
def n_dead(self):
"""Number of alive particles, defined by status != 1"""
return self.n_particle - self.n_alive
def units(self, key):
"""Returns the units of any key"""
return pg_units(key)
@property
def mass(self):
"""Rest mass in eV"""
return mass_of[self.species]
@property
def species_charge(self):
"""Species charge in C"""
return charge_of[self.species]
@property
def charge(self):
return np.sum(self.weight)
# Relativistic properties
@property
def p(self):
"""Total momemtum in eV/c"""
return np.sqrt(self.px**2 + self.py**2 + self.pz**2)
@property
def energy(self):
"""Total energy in eV"""
return np.sqrt(self.px**2 + self.py**2 + self.pz**2 + self.mass**2)
@property
def kinetic_energy(self):
"""Kinetic energy in eV"""
return self.energy - self.mass
# Slopes. Note that these are relative to pz
@property
def xp(self):
return self.px/self.pz
@property
def yp(self):
return self.py/self.pz
# Cylindrical coordinates. Note that these are ali
@property
def r(self):
return np.hypot(self.x, self.y)
@property
def theta(self):
return np.arctan2(self.y, self.x)
@property
def pr(self):
return np.hypot(self.px, self.py)
@property
def ptheta(self):
return np.arctan2(self.py, self.px)
# Relativistic quantities
@property
def gamma(self):
"""Relativistic gamma"""
return self.energy/self.mass
@property
def beta(self):
"""Relativistic beta"""
return self.p/self.energy
@property
def beta_x(self):
"""Relativistic beta, x component"""
return self.px/self.energy
@property
def beta_y(self):
"""Relativistic beta, y component"""
return self.py/self.energy
@property
def beta_z(self):
"""Relativistic beta, z component"""
return self.pz/self.energy
def delta(self, key):
"""Attribute (array) relative to its mean"""
return getattr(self, key) - self.avg(key)
# Statistical property functions
def min(self, key):
"""Minimum of any key"""
return np.min(getattr(self, key))
def max(self, key):
"""Maximum of any key"""
return np.max(getattr(self, key))
def ptp(self, key):
"""Peak-to-Peak = max - min of any key"""
return np.ptp(getattr(self, key))
def avg(self, key):
"""Statistical average"""
dat = getattr(self, key) # equivalent to self.key for accessing properties above
if np.isscalar(dat):
return dat
return np.average(dat, weights=self.weight)
def std(self, key):
"""Standard deviation (actually sample)"""
dat = getattr(self, key)
if np.isscalar(dat):
return 0
avg_dat = self.avg(key)
return np.sqrt(np.average( (dat - avg_dat)**2, weights=self.weight))
def cov(self, *keys):
"""
Covariance matrix from any properties
Example:
P = ParticleGroup(h5)
P.cov('x', 'px', 'y', 'py')
"""
dats = np.array([ getattr(self, key) for key in keys ])
return np.cov(dats, aweights=self.weight)
# Beam statistics
@property
def norm_emit_x(self):
"""Normalized emittance in the x plane"""
mat = self.cov('x', 'px')
return np.sqrt(mat[0,0]*mat[1,1]-mat[0,1]**2)/self.mass
@property
def norm_emit_y(self):
mat = self.cov('y', 'py')
"""Normalized emittance in the y plane"""
return np.sqrt(mat[0,0]*mat[1,1]-mat[0,1]**2)/self.mass
@property
def higher_order_energy_spread(self, order=2):
"""
Fits a quadratic (order=2) to the Energy vs. time, subtracts it, finds the rms of the residual in eV.
If all particles are at the same
"""
if self.std('z') < 1e-12:
# must be at a screen. Use t
t = self.t
else:
# All particles at the same time. Use z to calc t
t = self.z/c_light
energy = self.energy
best_fit_coeffs = np.polynomial.polynomial.polyfit(t, energy, order)
best_fit = np.polynomial.polynomial.polyval(t, best_fit_coeffs)
return np.std(energy - best_fit)
@property
def average_current(self):
"""
Simple average current in A: charge / dt, with dt = (max_t - min_t)
If particles are in t coordinates, will try dt = (max_z - min_z)*c_light*beta_z
"""
dt = self.t.ptp() # ptp 'peak to peak' is max - min
if dt == 0:
# must be in t coordinates. Calc with
dt = self.z.ptp() / (self.avg('beta_z')*c_light)
return self.charge / dt
def __getitem__(self, key):
"""
Returns a property or statistical quantity that can be computed:
P['x'] returns the x array
P['sigmx_x'] returns the std(x) scalar
P['norm_emit_x'] returns the norm_emit_x scalar
Parts can also be given. Example: P[0:10] returns a new ParticleGroup with the first 10 elements.
"""
# Allow for non-string operations:
if not isinstance(key, str):
return particle_parts(self, key)
if key.startswith('cov_'):
subkeys = key[4:].split('__')
assert len(subkeys) == 2, f'Too many properties in covariance request: {key}'
return self.cov(*subkeys)[0,1]
elif key.startswith('delta_'):
return self.delta(key[6:])
elif key.startswith('sigma_'):
return self.std(key[6:])
elif key.startswith('mean_'):
return self.avg(key[5:])
elif key.startswith('min_'):
return self.min(key[4:])
elif key.startswith('max_'):
return self.max(key[4:])
elif key.startswith('ptp_'):
return self.ptp(key[4:])
else:
return getattr(self, key)
def where(self, x):
return self[np.where(x)]
# TODO: should the user be allowed to do this?
#def __setitem__(self, key, value):
# assert key in self._settable_keyes, 'Error: you cannot set:'+str(key)
#
# if key in self._settable_array_keys:
# assert len(value) == self.n_particle
# self.__dict__[key] = value
# elif key ==
# print()
# Simple 'tracking'
def drift(self, delta_t):
"""
Drifts particles by time delta_t
"""
self.x = self.x + self.beta_x * c_light * delta_t
self.y = self.y + self.beta_y * c_light * delta_t
self.z = self.z + self.beta_z * c_light * delta_t
self.t = self.t + delta_t
def drift_to_z(self, z=None):
if not z:
z = self.avg('z')
dt = (z - self.z) / (self.beta_z * c_light)
self.drift(dt)
# Fix z to be exactly this value
self.z = np.full(self.n_particle, z)
def drift_to_t(self, t=None):
"""
Drifts all particles to the same t
If no z is given, particles will be drifted to the average t
"""
if not t:
t = self.avg('t')
dt = t - self.t
self.drift(dt)
# Fix t to be exactly this value
self.t = np.full(self.n_particle, t)
# Writers
def write_astra(self, filePath, verbose=False):
write_astra(self, filePath, verbose=verbose)
def write_opal(self, filePath, verbose=False):
write_opal(self, filePath, verbose=verbose)
# openPMD
def write(self, h5, name=None):
"""
Writes to an open h5 handle, or new file if h5 is a str.
"""
if isinstance(h5, str):
g = File(h5, 'w')
pmd_init(g, basePath='/', particlesPath='/' )
else:
g = h5
write_pmd_bunch(g, self, name=name)
# New constructors
def split(self, n_chunks = 100, key='z'):
return split_particles(self, n_chunks=n_chunks, key=key)
# Resample
def resample(self, n):
"""
Resamples n particles.
"""
return resample(self, n)
# Internal sorting
def _sort(self, key):
"""Sorts internal arrays by key"""
ixlist = np.argsort(self[key])
for k in self._settable_array_keys:
self.__dict__[k] = self[k][ixlist]
# Operator overloading
def __add__(self, other):
"""
Overloads the + operator to join particle groups.
Simply calls join_particle_groups
"""
return join_particle_groups(self, other)
def __len__(self):
return len(self[self._settable_array_keys[0]])
def __str__(self):
s = f'ParticleGroup with {self.n_particle} particles with total charge {self.charge} C'
return s
def __repr__(self):
memloc = hex(id(self))
return f'<ParticleGroup with {self.n_particle} particles at {memloc}>'
#-----------------------------------------
# helper funcion for ParticleGroup class
def load_bunch_data(h5):
"""
Load particles into structured numpy array.
"""
n = len(h5['position/x'])
attrs = dict(h5.attrs)
data = {}
data['species'] = attrs['speciesType'].decode('utf-8') # String
n_particle = int(attrs['numParticles'])
data['total_charge'] = attrs['totalCharge']*attrs['chargeUnitSI']
for key in ['x', 'px', 'y', 'py', 'z', 'pz', 't']:
data[key] = particle_array(h5, key)
if 'particleStatus' in h5:
data['status'] = particle_array(h5, 'particleStatus')
else:
data['status'] = np.full(n_particle, 1)
# Make sure weight is populated
if 'weight' in h5:
weight = particle_array(h5, 'weight')
if len(weight) == 1:
weight = np.full(n_particle, weight[0])
else:
weight = np.full(n_particle, data['total_charge']/n_particle)
data['weight'] = weight
return data
def full_data(data, exclude=None):
"""
Expands keyed data into np arrays, assuring that the lengths of all items are the same.
Allows for some keys to be scalars or length 1, and fills them out with np.full.
"""
full_data = {}
scalars = {}
for k, v in data.items():
if np.isscalar(v):
scalars[k] = v
elif len(v) == 1:
scalars[k] = v[0]
else:
# must be array
full_data[k] = np.array(v)
# Check for single particle
if len(full_data) == 0:
return {k:np.array([v]) for k, v in scalars.items()}
# Array data should all have the same length
nlist = [len(v) for _, v in full_data.items()]
assert len(set(nlist)) == 1, f'arrays must have the same length. Found len: { {k:len(v) for k, v in full_data.items()} }'
for k, v in scalars.items():
full_data[k] = np.full(nlist[0], v)
return full_data
def split_particles(particle_group, n_chunks = 100, key='z'):
"""
Splits a particle group into even chunks. Returns a list of particle groups.
Useful for creating slice statistics.
"""
# Sorting
zlist = getattr(particle_group, key)
iz = np.argsort(zlist)
# Split particles into chunks
plist = []
for chunk in | np.array_split(iz, n_chunks) | numpy.array_split |
# astar implementation with some inspiration from https://medium.com/@nicholas.w.swift/easy-a-star-pathfinding-7e6689c7f7b2
import os
import gflags
import sys
import numpy as np
from numpy import linalg as LA
import math
from operator import attrgetter
import cv2
import load_params_demo
import utils_demo
argv = gflags.FLAGS(sys.argv)
#colors
blue = [255, 0, 0]
red = [0, 0, 255]
green = [0, 255, 0]
white = [255, 255, 255]
black = [0, 0, 0]
class Measure_params:
def __init__(self, range, n_seg, centroid, slice_tol, ray_seg):
self.range = range
self.n_seg = n_seg
self.centroid = centroid
self.slice_tol = slice_tol
self.ray_seg = ray_seg
class Measurement:
def __init__(self):
self.geometry = self.Geometry()
self.coords = self.Coords()
self.slice_tol = None
self.ray = self.Ray()
class Geometry:
def __init__(self):
self.range = None
self.n_seg = None
self.centroid = None
self.slice_tol = None
self.ray_seg = None
class Coords:
def __init__(self):
self.point_coords = None
self.slice_coords = None
self.slice_keep_coords = None
self.slice_obs_coords = None
self.see_obs_list = None
self.pts_obs_list = None
class Ray:
def __init__(self):
self.edge_x = None
self.edge_y = None
self.ray_x = None
self.ray_y = None
def init(self, params, time_step, seq, img_coords, output_dir, resize=None):
# img = cv2.imread(output_dir + '{}_s{}_output.png'.format(time_step, seq))
img = cv2.imread(os.path.join(output_dir, '{}_s{}_output.png'.format(time_step, seq)))
obs_map = utils_demo.process_for_astar(img)
if resize is not None:
obs_map = cv2.resize(obs_map, resize.dim, interpolation=cv2.INTER_AREA)
obs_map = refine_map(obs_map)
self.geometry.range = params.range
self.geometry.n_seg = params.n_seg
self.geometry.centroid = params.centroid
self.geometry.ray_seg = params.ray_seg
self.geometry.slice_tol = params.slice_tol
#get all points from truth map that are within the circle
img_coords = np.asarray(img_coords)
X = img_coords[:, 1] #column
Y = img_coords[:, 0] #row
cx = self.geometry.centroid[1]
cy = self.geometry.centroid[0]
check = np.square((cx - X)) + np.square((cy - Y))
check = np.where(check < self.geometry.range ** 2)[0]
self.coords.point_coords = img_coords[check] #this is a numpy array
X = self.coords.point_coords[:, 1]
Y = self.coords.point_coords[:, 0]
sliceno = np.int32((math.pi + np.arctan2(Y - cy, X - cx)) * (self.geometry.n_seg / (2 * math.pi)) - \
self.geometry.slice_tol)
slice_coords = []
for un in np.arange(self.geometry.n_seg):
slice_coords.append(self.coords.point_coords[np.where(sliceno == un)[0]])
self.coords.slice_coords = slice_coords
#unit vectors for rays
thetas = np.linspace(0, 2*math.pi, self.geometry.n_seg, endpoint=False)
thetas = thetas + (thetas[1] - thetas[0]) / 2.
thetas = np.flip(thetas) + math.pi
ux = np.cos(thetas)
uy = np.sin(thetas)
if len(slice_coords) != thetas.shape[0]:
print("centroid", self.geometry.centroid)
assert len(slice_coords) == thetas.shape[0]
# exit(1)
self.ray.edge_x = np.int32(ux * self.geometry.range + cx)
self.ray.edge_y = np.int32(uy * self.geometry.range + cy)
#calculate rays
ray_disc = np.linspace(0, self.geometry.range, self.geometry.ray_seg, endpoint=True)
ray_disc = ray_disc.reshape((1, ray_disc.shape[0]))
ux = ux.reshape((ux.shape[0], 1))
uy = uy.reshape((uy.shape[0], 1))
self.ray.ray_x = np.multiply(ux, ray_disc).astype(np.int32) + cx
self.ray.ray_y = np.multiply(uy, ray_disc).astype(np.int32) + cy
slice_keep_coords = []
slice_obs_coords = []
see_obs_list = [] #"viewed" pixels which are obstacles
pts_obs_list = [] #list of points which are detected to hit an obstacle first
slice_coords = slice_coords[::-1]
for row_x, row_y, slice in zip(self.ray.ray_x, self.ray.ray_y, slice_coords):
if len(slice) == 0:
continue
x_keep = np.where((0 < row_x) & (row_x < obs_map.shape[1]))[0] # which indexes to keep from that row for x
row_x = row_x[x_keep]
row_y = row_y[x_keep]
y_keep = | np.where((0 < row_y) & (row_y < obs_map.shape[0])) | numpy.where |
from .ops import pTA1Op
from .compat import tt
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import colors
from matplotlib.animation import FuncAnimation
from mpl_toolkits.axes_grid1 import make_axes_locatable
import os
try:
from IPython.display import HTML
except:
pass
def RAxisAngle(axis, theta):
axis = np.array(axis)
axis /= np.sqrt(np.sum(axis ** 2))
cost = np.cos(theta)
sint = np.sin(theta)
return np.array(
[
[
cost + axis[0] * axis[0] * (1 - cost),
axis[0] * axis[1] * (1 - cost) - axis[2] * sint,
axis[0] * axis[2] * (1 - cost) + axis[1] * sint,
],
[
axis[1] * axis[0] * (1 - cost) + axis[2] * sint,
cost + axis[1] * axis[1] * (1 - cost),
axis[1] * axis[2] * (1 - cost) - axis[0] * sint,
],
[
axis[2] * axis[0] * (1 - cost) - axis[1] * sint,
axis[2] * axis[1] * (1 - cost) + axis[0] * sint,
cost + axis[2] * axis[2] * (1 - cost),
],
]
)
def latlon_to_xyz(lat, lon):
"""Convert lat-lon points in radians to Cartesian points."""
lat = np.atleast_1d(lat)
lon = np.atleast_1d(lon)
R1 = RAxisAngle([1.0, 0.0, 0.0], -lat)
R2 = RAxisAngle([0.0, 1.0, 0.0], lon)
return np.einsum("ij...,jl...,l->i...", R2, R1, np.array([0.0, 0.0, 1.0]))
def compute_moll_grid(my, mx):
"""Compute the polynomial basis on a Mollweide grid."""
x, y = np.meshgrid(
np.sqrt(2) * np.linspace(-2, 2, mx),
np.sqrt(2) * np.linspace(-1, 1, my),
)
# Make points off-grid nan
a = np.sqrt(2)
b = 2 * np.sqrt(2)
y[(y / a) ** 2 + (x / b) ** 2 > 1] = np.nan
# https://en.wikipedia.org/wiki/Mollweide_projection
theta = np.arcsin(y / np.sqrt(2))
lat = np.arcsin((2 * theta + np.sin(2 * theta)) / np.pi)
lon0 = 3 * np.pi / 2
lon = lon0 + np.pi * x / (2 * np.sqrt(2) * np.cos(theta))
# Back to Cartesian, this time on the *sky*
x = np.reshape(np.cos(lat) * np.cos(lon), [1, -1])
y = np.reshape(np.cos(lat) * np.sin(lon), [1, -1])
z = np.reshape(np.sin(lat), [1, -1])
R = RAxisAngle([1.0, 0.0, 0.0], -np.pi / 2)
return R @ np.concatenate((x, y, z))
def mollweide_transform(my=150, mx=300):
x, y, z = compute_moll_grid(my=my, mx=mx)
M = np.pi * pTA1Op()(x, y, z).eval()
return M
def latlon_transform(lat, lon):
x, y, z = latlon_to_xyz(lat, lon)
x = x.reshape(-1)
y = y.reshape(-1)
z = z.reshape(-1)
M = np.pi * pTA1Op()(x, y, z).eval()
return M
def get_moll_latitude_lines(dlat=np.pi / 6, npts=1000, niter=100):
res = []
latlines = np.arange(-np.pi / 2, np.pi / 2, dlat)[1:]
for lat in latlines:
theta = lat
for n in range(niter):
theta -= (2 * theta + np.sin(2 * theta) - np.pi * np.sin(lat)) / (
2 + 2 * np.cos(2 * theta)
)
x = np.linspace(-2 * np.sqrt(2), 2 * | np.sqrt(2) | numpy.sqrt |
import numpy as np
import math
def parse_POSCAR(POSCAR="POSCAR"):
""" Parse the POSCAR (or CONTCAR) file to extract crystal strucure
information. Currently only support VASP 5 format.
Return lattice matrix, lattic constants, angles between lattice vectors,
dictionary of atom numbers and dictionary of atomc coordinates.
Arguments:
-------------------
POSCAR : str
Input file name. Must be VASP 5 format.
Returns:
-------------------
latt_mat : array(float), dim = (3, 3)
Matrix consisting of lacttice vectors a, b and c.
latt_consts : list[float], dim = (1, 3)
a, b, c directions lattice constants of the cell.
angles : list[float], dim = (1, 3)
alpa (between b and c), beta (between a and c) and gamma (between a and b)
crystal angles (deg).
atomNum_Dict : dict['str': int]
zip atomNames and atomNums to form the dictionary.
Each key represents one atomic species.
atomCoor_Dict : dict['str': 2D array]
Each key represents one atomic species. Values are 2D arrays
of atomic coordinates. Dimension of 2D array is contingent to atomNums.
"""
fin = open(POSCAR, 'r')
poscar = fin.read().splitlines()
scaling_para = float(poscar[1])
abc = np.array([[float(i) for i in line.split()] for line in poscar[2:5]])
# lattice constants in angstrom
latt_mat = abc * scaling_para
length_a = np.linalg.norm(latt_mat[0, :], 2)
length_b = | np.linalg.norm(latt_mat[1, :], 2) | numpy.linalg.norm |
import torch
import torch.nn.parallel
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
class Upsample(nn.Module):
def __init__(self, channels, pad_type='repl', filt_size=4, stride=2):
super().__init__()
self.filt_size = filt_size
self.filt_odd = np.mod(filt_size, 2) == 1
self.pad_size = int((filt_size - 1) / 2)
self.stride = stride
self.off = int((self.stride - 1) / 2.)
self.channels = channels
if(self.filt_size==1):
a = np.array([1.,])
elif(self.filt_size==2):
a = np.array([1., 1.])
elif(self.filt_size==3):
a = np.array([1., 2., 1.])
elif(self.filt_size==4):
a = np.array([1., 3., 3., 1.])
elif(self.filt_size==5):
a = np.array([1., 4., 6., 4., 1.])
elif(self.filt_size==6):
a = np.array([1., 5., 10., 10., 5., 1.])
elif(self.filt_size==7):
a = np.array([1., 6., 15., 20., 15., 6., 1.])
filt = torch.Tensor(a)
filt = filt * (stride**2)
self.register_buffer('filt', filt[None, None, :, :].repeat((self.channels, 1, 1, 1)))
self.pad = get_pad_layer(pad_type)([1, 1, 1, 1])
def forward(self, inp):
ret_val = F.conv_transpose2d(self.pad(inp), self.filt, stride=self.stride, padding=1 + self.pad_size, groups=inp.shape[1])[:, :, 1:, 1:]
if(self.filt_odd):
return ret_val
else:
return ret_val[:, :, :-1, :-1]
def get_pad_layer(pad_type):
if(pad_type in ['refl','reflect']):
PadLayer = nn.ReflectionPad2d
elif(pad_type in ['repl','replicate']):
PadLayer = nn.ReplicationPad2d
elif(pad_type=='zero'):
PadLayer = nn.ZeroPad2d
else:
print('Pad type [%s] not recognized'%pad_type)
return PadLayer
class Upsample1D(nn.Module):
def __init__(self, pad_type='reflect', filt_size=3, stride=2, channels=None, pad_off=0):
super(Upsample1D, self).__init__()
self.filt_size = filt_size
self.pad_off = pad_off
self.pad_sizes = [int(1. * (filt_size - 1) / 2), int(np.ceil(1. * (filt_size - 1) / 2))]
self.pad_sizes = [pad_size + pad_off for pad_size in self.pad_sizes]
self.stride = stride
self.off = int((self.stride - 1) / 2.)
self.channels = channels
# print('Filter size [%i]' % filt_size)
if(self.filt_size == 1):
a = np.array([1., ])
elif(self.filt_size == 2):
a = np.array([1., 1.])
elif(self.filt_size == 3):
a = np.array([1., 2., 1.])
elif(self.filt_size == 4):
a = np.array([1., 3., 3., 1.])
elif(self.filt_size == 5):
a = | np.array([1., 4., 6., 4., 1.]) | numpy.array |
from brian2 import *
from brian2.equations import refractory
from brian2.monitors import spikemonitor
#import random
import matplotlib.pyplot as plt
import numpy as np
def visualise_connectivity(S):
Ns = len(S.source)
Nt = len(S.target)
figure(figsize=(10, 4))
subplot(121)
plot(zeros(Ns), arange(Ns), 'ok', ms=10)
plot(ones(Nt), arange(Nt), 'ok', ms=10)
for i, j in zip(S.i, S.j):
plot([0, 1], [i, j], '-k')
xticks([0, 1], ['Source', 'Target'])
ylabel('Neuron index')
xlim(-0.1, 1.1)
ylim(-1, max(Ns, Nt))
subplot(122)
plot(S.i, S.j, 'ok')
xlim(-1, Ns)
ylim(-1, Nt)
xlabel('Source neuron index')
ylabel('Target neuron index')
#This code uses the Brian2 neuromorphic simulator code to implement
# a version of cyclic shift binding and unbinding based on the
# paper :High-Dimensional Computing with Sparse Vectors" by Laiho et al 2016.
# The vector representation is a block structure comprising slots
# where the number of slots is the vector dimension. In each slot there are a
# number of possible bit positions with one bit set per slot.
# In this implementation we implement the cyclic shift binding and unbinding
# operations in Brian2 by representing each slot as a neuron and the time delay
# of the neuron's spike as the bit position.
# To ensure that the Brian2 network is performing correctly the first section of the code
# computes the expected sparse bound vector.
# The neuromorphic equivalent is implemented as two Brian2 networks. The first network (net1) implements
# the cyclic binding and the second netwok (net2) implements the cyclic shift unbinding and the clean-up memory
# operation which compares the unbound vector with all the memory vectors to find the best match.
# The sparse bound vector resulting from net1 is passed to net2.
# Initialise the network parameters
slots_per_vector = 500 # This is the number of neurons used to represent a vector
bits_per_slot = 512 # This is the number of bit positions
mem_size = 1000 # The number of vectors against which the resulting unbound vector is compared
Num_bound = 20 # The number of vectors that are to be bound
input_delay = bits_per_slot # Time delay between adding cyclically shifted vectors to construct the bound vector is set to 'bits_per_slot' milliseconds.
#NB all timings use milliseconds and we can use a random seed if required.
#np.random.seed(654321)
y_low=0 # This is used to select the lowest index of the range of neurons that are to be displayed
y_high=slots_per_vector-1 # This is used to select the highest index of the range of neurons that are to be displayed
delta = (Num_bound)*bits_per_slot #This determins the time period over which the Brian2 simulation is to be run.
# Generate a random matrix (P_matrix) which represents all of the sparse vectors that are to be used.
# This matrix has columns equal to the number of slots in each vector with the number of rows equal to the memory size (mem_size)
P_matrix = np.random.randint(0, bits_per_slot, size=(mem_size,slots_per_vector))
#print(P_matrix)
'''
for n in range(0,Num_bound):
print(P_matrix[n])
print()
'''
#This section of the code implements the cyclic shift binding in the Brian2 network (net1)
net1=Network()
#We first create an array of time delays which will be used to select the first Num_bound vectors from
# the P_matrix with a time delay (input_delay) between each vector.
array1 = np.ones(mem_size)*slots_per_vector*bits_per_slot
for b in range(0,Num_bound):
array1[b] = (Num_bound-b-1)*input_delay
# print (array1[b])
#We use the array1 timedelay matrix to trigger a SpikeGeneratorGroup of neurons that generates the
# required spike triggers and add this to the network.
P = SpikeGeneratorGroup(mem_size,np.arange(mem_size), (array1)*ms)
net1.add(P)
#We now define the set of equation and reset definitions that will be used to generate the neuron action
#potentials and spike reset operations. Note that we make use of the Brian2 refractory operation.
equ1 = '''
dv/dt = -v/tau : 1
tau : second
'''
# The G1 neuron group are the neurons that generate the sparse vectors tht will be bound. To do this each neuron represents
# one slot of the sparse vector and the synaptic connections (SP1) on the dendrite represent the time delay of the corresponding spike.
# The time delays are obtained from the P_matrix (SP1.delay). The input to this part of the neuromorphic circuit are the
# sequence of spikes from the 'P' spike generator group. A 'P' spike excites an axon which is connected to all the G1 neurons
# (SP1.connect). The output from the group is recursively fed to the next neuron to provide the cyclic shift. This then gives two
# possible spikes in the next cycle. Using the refractory property of the neuron only the first of these generates a spike.
G1 = NeuronGroup(slots_per_vector, equ1,
threshold='v >= 0.5', reset='v=0.0', method='euler',refractory = 't%(bits_per_slot*ms)')
G1.v = 0.0
G1.tau = 1.0*ms
net1.add(G1)
SP1 = Synapses(P, G1, 'w : 1', on_pre='v = 1.0' )
range_array1 = range(0,slots_per_vector)
for n in range(0,mem_size):
SP1.connect(i=n,j=range_array1)
SP1.delay = np.reshape(P_matrix,mem_size*slots_per_vector)*ms
net1.add(SP1)
# To perform the cyclic shift and superposition operations the output from G2 is recurrently fed back such that the output from neuron_0
# feeds to the input of neuron_1 etc. Because Brian2 introduces a time delay of 0.1ms when performing this operation the delay for this
# feedback is the input_delay minus 0.1ms (S3.delay)
S11 = Synapses(G1, G1, 'w : 1', on_pre='v +=1.0' )
for n in range(0,slots_per_vector):
S11.connect(i=n,j=(n+1)%(slots_per_vector))
S11.delay = (input_delay-0.1)*ms
net1.add(S11)
# The following spike and state monitors are defined.
SMP = SpikeMonitor(P)
net1.add(SMP)
M1 = StateMonitor(G1, 'v', record=True)
net1.add(M1)
SM1= SpikeMonitor(G1)
net1.add(SM1)
# Network 1 is now run for delta milliseconds.
net1.run(delta*ms)
# Obtain the sparse vector timings from the SM5 monitor and print the timings so that they can be compared with the theoretical values.
array2 = | np.array([SM1.i,SM1.t/ms]) | numpy.array |
"""Calculate reflected line profiles in the galactic center."""
import fractions
import matplotlib.pyplot as plt
import numpy as np
from astropy import constants as c
from astropy import units as u
from astropy.table import Table
from matplotlib import cm
from matplotlib.gridspec import GridSpec
from PyAstronomy.modelSuite import KeplerEllipseModel
from pylab import (arccos, axis, clf, copy, cos, exp, figure, hist, plot, rand,
savefig, scatter, show, sin, sqrt, subplot, transpose,
xlabel, ylabel)
from scipy.integrate import quad
plt.rcParams['text.usetex'] = True
plt.rcParams['text.latex.unicode'] = True
# unit conversion: (use astropy for this?)
radians = np.pi / 180. # deg to radians
meters = 1.0 / (((1.0 * u.meter).si.value / (
1.0 * u.lyr / 365.25).si.value)) # light days to meters
kg = c.M_sun.si.value # kg/solar mass
grav = c.G.si.value # m^3/kg/s^2 gravitational constant
eV = (1.0 * u.eV).si.value # electron volt
h = c.h.si.value # Planck's constant
kb = c.k_B.si.value # Boltzmann's constant
cc = c.c.si.value # Speed of light
day = 86400. # seconds in a day
year = 3.154 * 10**7. # seconds in a year
m_to_km = 1.0e-3 # meters to km
pc = (1.0 * u.pc).si.value / (1.0 * u.meter).si.value # pc to meters
gc_dist = 8.0e3 * pc # in light days
def ionizing_luminosity_fraction(temp, cutoff=13.6):
"""Calculate the total ionizing luminosity given a temp/cutoff energey."""
nulow = cutoff * eV / h
nuhi = 100 * nulow
value = (2.0 * h / cc ** 2) * quad(
lambda nu: nu ** 3 / (np.expm1(
h * nu / (kb * temp))), nulow, nuhi)[0] / (
2 * (np.pi * kb * temp) ** 4 / (15 * h ** 3 * cc ** 2))
return value
def find_nearest_idx(array, value):
"""Find nearest value."""
idx = (np.abs(array - value)).argmin()
return idx
def lcm(a, b):
"""Lowest common multiple."""
return int(np.round(abs(a * b) / fractions.gcd(a, b))) if a and b else 0
def get_cmap(n, name='hsv'):
"""Return a function that maps 0, 1, ..., n-1 to a distinct RGB color.
The keyword argument name must be a standard mpl colormap name.
"""
return plt.cm.get_cmap(name, n)
def star_position(kems, times):
"""Return the star positions for a given time."""
# load in the star position file here:
# calculate the position in cartesian coords:
# random positions in a 20x20x20 box = x, y, z
"""
num_stars = 10
box_size = 20.
positions = np.transpose(np.array([
rand(num_stars) * box_size - box_size / 2.,
rand(num_stars) * box_size - box_size / 2.,
rand(num_stars) * box_size - box_size / 2.])) * meters
"""
if not isinstance(times, list):
time = [times]
else:
time = times
time = np.array(time)
positions = np.array([
k.evaluate(time).tolist() for k in kems]) * 1.9e14
rot = [[0, -1], [1, 0]]
positions = np.array([np.matmul(rot, pos[:2]).tolist() +
[pos[2]] for pos in positions])
# rotated_positions = [rotation_transform(x) for x in positions]
# print positions
# print positions[:,0] # all the x-coords
# print positions[0,:] # x,y,z of the first star
return positions
def star_luminosity(star_data):
"""Return the star luminosities."""
# load in the star luminosity file here:
# Find the dimmest star in Habibi:
min_l = np.inf
for xi, x in enumerate(star_data):
if x[2] is not None and x[2]['log_l'] < min_l:
min_l = x[2]['log_l']
min_k = x[2]['k_magnitude']
min_t = x[2]['temperature']
min_l = 10.0 ** min_l
# luminosities in solar luminosities.
luminosities = [
(min_l * 10.0 ** ((float(x[1]['kmag']) - float(min_k)) / 2.5))
if x[2] is None else (10.0 ** x[2]['log_l']) for x in star_data]
temps = [min_t if x[2] is None else x[2]['temperature'] for x in star_data]
luminosities *= np.array(list(map(
lambda x: ionizing_luminosity_fraction(x), temps)))
return luminosities
def rotate(x, y, co, si):
"""Rotate x, y position given cos/sin of angle."""
xx = co * x + si * y
yy = -si * x + co * y
return [xx, yy]
def rotate_vecs(u, aa, bb, transpose=False):
"""Rotate vector u to the direction of vector v."""
rots = []
if isinstance(u[0], (int, float)):
ui = [u]
else:
ui = u
if transpose:
ui = list(map(list, zip(*ui)))
a = np.array(aa) / np.linalg.norm(aa)
b = np.array(bb) / np.linalg.norm(bb)
v = np.cross(a, b)
vx = np.array([
[0, -v[2], v[1]],
[v[2], 0, -v[0]],
[-v[1], v[0], 0]
])
c = np.dot(a, b)
theta = np.arccos(c)
rot = np.eye(3) + np.sin(theta) * vx + np.matmul(vx, vx) * (1.0 - c)
for uu in ui:
rots.append(np.matmul(rot, np.array(uu)))
if transpose:
rots = list(map(list, zip(*rots)))
return tuple(rots)
def gas_model(num_clouds, params, other_params, lambdaCen, plot_flag=True):
"""Retrieve the gas positions and velocities."""
[mu, F, beta, theta_o,
kappa, mbh, f_ellip, f_flow, theta_e] = params
[angular_sd_orbiting, radial_sd_orbiting,
angular_sd_flowing, radial_sd_flowing] = other_params
# Schwarzschild radius
Rs = 2. * grav * mbh / cc**2.
# First calculate the geometry of the emission:
r = mu * F + (1. - F) * mu * beta**2. * \
np.random.gamma(beta**(-2.), 1, num_clouds)
phi = 2. * np.pi * rand(num_clouds)
x = r * cos(phi)
y = r * sin(phi)
z = r * 0.
# *pow(u3[i], openingBendPower));
angle = arccos(cos(theta_o) + (1. - cos(theta_o)) * rand(num_clouds))
cos1 = cos(angle)
sin1 = sin(angle)
u1 = rand(num_clouds)
cos2 = cos(2. * np.pi * u1)
sin2 = sin(2. * np.pi * u1)
# rotate to puff up:
[x, z] = rotate(x, z, cos1, sin1)
# rotate to restore axisymmetry:
[x, y] = rotate(x, y, cos2, sin2)
# rotate to observer plane:
x, y, z = rotate_vecs([x, y, z], [0, 0, 1], disk_ang_mom, transpose=True)
# weights for the different points
# w = 0.5 + kappa * x / sqrt(x * x + y * y + z * z)
# w /= sum(w)
# if plot_flag:
# # larger points correspond to more emission from the point
# ptsize = 5
# shade = 0.5
# clf()
# subplot(2, 2, 1) # edge-on view 1, observer at +infinity of x-axis
# scatter(x / meters, y / meters, ptsize, alpha=shade)
# xlabel('x')
# ylabel('y')
# subplot(2, 2, 2) # edge-on view 2, observer at +infinity of x-axis
# scatter(x / meters, z / meters, ptsize, alpha=shade)
# xlabel('x')
# ylabel('z')
# subplot(2, 2, 3) # view of observer looking at plane of sky
# scatter(y / meters, z / meters, ptsize, alpha=shade)
# xlabel('y')
# ylabel('z')
# subplot(2, 2, 4) # plot the radial distribution of emission
# hist(r / meters, 100)
# xlabel("r")
# ylabel("p(r)")
# show()
# Now calculate velocities of the emitting gas:
radius1 = sqrt(2. * grav * mbh / r)
radius2 = sqrt(grav * mbh / r)
vr = copy(x) * 0.
vphi = copy(x) * 0.
u5 = rand(num_clouds)
n1 = np.random.normal(size=num_clouds)
n2 = np.random.normal(size=num_clouds)
for i in range(0, num_clouds):
if u5[i] < f_ellip:
# we give this point particle a near-circular orbit
theta = 0.5 * np.pi + angular_sd_orbiting * n1[i]
vr[i] = radius1[i] * cos(theta) * exp(radial_sd_orbiting * n2[i])
vphi[i] = radius2[i] * sin(theta) * exp(radial_sd_orbiting * n2[i])
else:
if f_flow < 0.5:
# we give this point particle an inflowing orbit
theta = np.pi - theta_e + angular_sd_flowing * n1[i]
vr[i] = radius1[i] * cos(theta) * \
exp(radial_sd_flowing * n2[i])
vphi[i] = radius2[i] * \
sin(theta) * exp(radial_sd_flowing * n2[i])
else:
# we give this point particle an outflowing orbit
theta = 0. + theta_e + angular_sd_flowing * n1[i]
vr[i] = radius1[i] * cos(theta) * \
exp(radial_sd_flowing * n2[i])
vphi[i] = radius2[i] * \
sin(theta) * exp(radial_sd_flowing * n2[i])
# Convert vr, vphi to Cartesians:
vx = vr * cos(phi) - vphi * sin(phi)
vy = vr * sin(phi) + vphi * cos(phi)
vz = vr * 0.
# apply rotations
vx, vy, vz = rotate_vecs(
[vx, vy, vz], [0, 0, 1], disk_ang_mom, transpose=True)
vx = np.array(vx)
vy = np.array(vy)
vz = np.array(vz)
# Sign of vz depends on whether disk is co- or counter-rotating.
# Positive vz corresponds to counter-clockwise rotation about the +y axis.
wavelength_values = relativity(vz, r, Rs, lambdaCen)
return [x, y, z, vx, vy, vz, wavelength_values]
def compute_gas_flux(gas_coords, star_data, times, params, bins, fig_name,
plot_flag=True):
"""Calculate the flux contribution from each point particle.
Assumptions: light travel time from stars to gas plus the
recombination time is shorter than the time it takes the
stars to move in their orbits.
"""
[stellar_wind_radius, kappa] = params
# gas_flux = np.zeros((np.size(gas_coords[0]), np.size(times)))
# load in the star luminosities (if they are constant)
star_luminosities = star_luminosity(star_data)
# loop over times we want spectra
star_pos_models = [x[0] for x in star_data]
num_stars = len(star_position(star_pos_models, times[0]))
gas_flux = np.zeros((np.size(gas_coords[0]), np.size(times)))
star_gas_flux = np.zeros(
(np.size(gas_coords[0]), np.size(times), num_stars))
for i in range(np.size(times)):
star_positions = star_position(star_pos_models, times[i])
gas_flux_values = np.zeros(
(np.size(gas_coords[0]), np.size(star_positions)))
# loop over the stars
for j in range(len(star_positions)):
r = sqrt((star_positions[j, 0] - gas_coords[0])**2. +
(star_positions[j, 1] - gas_coords[1])**2. +
(star_positions[j, 2] - gas_coords[2])**2.)
exclude = np.zeros(len(gas_coords[0]))
exclude[r >= stellar_wind_radius * meters] = 1.0
# weights for the different points
w = 0.5 + kappa * (gas_coords[0] - star_positions[j, 0]) / r
# w /= sum(w)
gas_flux_values[:, j] = w * exclude * \
star_luminosities[j] / (r * r)
star_gas_flux[:, i, j] = gas_flux_values[:, j]
gas_flux[:, i] = np.sum(gas_flux_values, axis=1)
[spectra, wavelength_bins] = make_spectrum(gas_coords, gas_flux, times,
bins, plot_flag=False)
# time2 = time.clock()
# print(time2, time2-time1)
# exit()
current_star_positions = star_position(star_pos_models, 2018.0)
current_star_distances = [
np.linalg.norm(x) for x in current_star_positions]
csd_js = np.argsort(current_star_distances)
star_colors = np.array([
cm.plasma(2.0 * float(j) / (len(star_data) - 1))
for j in range(len(star_data))])
# make a spectrum for each star
star_spectra = make_star_spectrum(gas_coords, star_gas_flux, times,
bins, num_stars, plot_flag=False)
# make a light curve (integrate over wavelength) for each star
# sorted by color scheme!
star_lightcurve = np.sum(star_spectra[:, :, csd_js], axis=1)
full_lightcurve = np.sum(spectra, axis=1)
# normalize
star_lightcurve /= np.max(full_lightcurve)
full_lightcurve /= np.max(full_lightcurve)
###################################################################
# set up the plot first
if plot_flag:
shade = 0.5
min_ptsize = 1.0
max_ptsize = 8.0
ssize = 5.0
boxsize = 2.0
fig = figure(figsize=(9, 9), facecolor='white')
grid_width = lcm(2, len(selected_times))
h_width = int(np.round(grid_width / len(selected_times)))
gs = GridSpec(4, grid_width)
# edge-on view 1, observer at +infinity of x-axis
axy = subplot(gs[:2, :3], autoscale_on=False, aspect='equal')
for si, star in enumerate(np.array(star_data)[csd_js]):
elpts = [star_position([
np.array(star_pos_models)[csd_js][si]], t)[
0] / meters for t in np.linspace(0, star[1]['period'],
400)]
plot([x[0] for x in elpts], [x[1] for x in elpts],
lw=0.5, c=star_colors[si])
sxy = scatter([0.0], [0.0], alpha=shade,
edgecolors='black', linewidths=0.5)
pxy = scatter([0.0], [0.0], c='r', s=ssize ** 2,
edgecolors='black', linewidths=0.5)
axis('equal')
xlabel('$x$')
ylabel('$y$')
# edge-on view 2, observer at +infinity of x-axis
axz = subplot(gs[:2, 3:], autoscale_on=False, aspect='equal')
for si, star in enumerate(np.array(star_data)[csd_js]):
elpts = [star_position([
np.array(star_pos_models)[csd_js][si]], t)[
0] / meters for t in np.linspace(0, star[1]['period'],
150)]
plot([x[0] for x in elpts], [x[2] for x in elpts],
lw=0.5, c=star_colors[si])
sxz = scatter([0.0], [0.0], alpha=shade,
edgecolors='black', linewidths=0.5)
pxz = scatter([0.0], [0.0], c='r', s=ssize ** 2,
edgecolors='black', linewidths=0.5)
axis('equal')
xlabel('$x$')
ylabel('$z$')
# view of observer looking at plane of sky
# ayz = subplot(2, 3, 3, autoscale_on=False, aspect='equal')
# for si, star in enumerate(np.array(star_data)[csd_js]):
# elpts = [star_position([
# np.array(star_pos_models)[csd_js][si]], t)[
# 0] / meters for t in np.linspace(0, star[1]['period'],
# 150)]
# plot([x[1] for x in elpts], [x[2] for x in elpts],
# lw=0.5, c=star_colors[si])
# syz = scatter([0.0], [0.0], alpha=shade,
# edgecolors='black', linewidths=0.5)
# pyz = scatter([0.0], [0.0], c='r', s=ssize ** 2,
# edgecolors='black', linewidths=0.5)
# axis('equal')
# xlabel('$y$')
# ylabel('$z$')
# avpl = subplot(2, 3, 4) # plot the vx vs. gas flux
# vpl = scatter([0.0], [0.0], alpha=shade, s=min_ptsize,
# edgecolors='black', linewidths=0.5)
# xlabel("$v_x \\,\\,\\, {\\rm (10,000 km/s)}$")
# ylabel("$\\rm Gas \\,\\,\\, Flux \\,\\,\\, (normalized)$")
ahpl = subplot(gs[2, :]) # light curve of star fluxes
plot(times, np.log10(full_lightcurve), '--', color='k', lw=2)
for j in range(0, num_stars):
plot(times, np.log10(star_lightcurve[:, j]), '-',
color=star_colors[j])
# vl = axvline(x=times[0], color='r')
ahpl.set_xlim(np.min(selected_times), np.max(selected_times))
ahpl.set_ylim(-2, 0.2)
xlabel("$\\rm Time \\,\\,\\, (years)$")
ylabel("$\\rm Gas \\,\\,\\, Flux \\,\\,\\, (normalized)$")
sppl = np.empty(len(selected_is), dtype=object)
for i in range(len(sppl)):
sppl[i] = subplot(
gs[3, (i * h_width):(
(i + 1) * h_width)]) # histogram of gas flux
sline, = plot([0.0, 0.0])
xlabel("$\\lambda \\,\\,\\, (\\AA )$")
ylabel("$\\rm Line \\,\\,\\, Flux \\,\\,\\, (normalized)$")
fig.tight_layout()
# loop over times we want spectra
mid_i = int(np.floor(len(selected_is) / 2.0))
if plot_flag:
i = selected_is[mid_i]
star_positions = star_position(star_pos_models, times[i])
# larger points correspond to more emission from the point
gas_flux_norm = gas_flux[:, i] / sum(gas_flux[:, i])
ptsizes = 2 * num_clouds * gas_flux_norm ** 2
ptsizes = min_ptsize ** 2 + (max_ptsize ** 2 - min_ptsize ** 2) * (
ptsizes - min(ptsizes)) / (max(ptsizes) - min(ptsizes))
xy = transpose(np.array(gas_coords[:2]) / meters)
sxy.set_sizes(ptsizes)
sxy.set_offsets(xy)
xy = star_positions[csd_js, :2] / meters
pxy.set_offsets(xy)
pxy.set_facecolors(star_colors)
axy.set_xlim(-boxsize, boxsize)
axy.set_ylim(-boxsize, boxsize)
for xi, xxyy in enumerate(xy):
axy.text(xxyy[0] + 0.05 * boxsize, xxyy[1] + 0.05 * boxsize,
star_data[csd_js[xi]][1]['name'], clip_on=True)
xz = transpose(np.array(gas_coords[:3:2]) / meters)
sxz.set_sizes(ptsizes)
sxz.set_offsets(xz)
xz = star_positions[csd_js, :3:2] / meters
pxz.set_offsets(xz)
pxz.set_facecolors(star_colors)
axz.set_xlim(-boxsize, boxsize)
axz.set_ylim(-boxsize, boxsize)
for xi, xxzz in enumerate(xz):
axz.text(xxzz[0] + 0.05 * boxsize, xxzz[1] + 0.05 * boxsize,
star_data[csd_js[xi]][1]['name'], clip_on=True)
# yz = transpose(np.array(gas_coords[1:3]) / meters)
# syz.set_sizes(ptsizes)
# syz.set_offsets(yz)
# yz = star_positions[csd_js, 1:3] / meters
# pyz.set_offsets(yz)
# pyz.set_facecolors(star_colors)
# ayz.set_xlim(-boxsize, boxsize)
# ayz.set_ylim(-boxsize, boxsize)
# vx = transpose(
# np.array([gas_coords[4] / 10000000.,
# gas_flux_norm * 1000.]))
# vpl.set_offsets(vx)
# maxx = np.max(np.abs(vx[:, 0]))
# maxy = np.max(vx[:, 1])
# avpl.set_xlim(-maxx, maxx)
# avpl.set_ylim(0, maxy)
# No `set_data` for `hist`. The only reason we need to replot this
# is to get the colors right... change this?
# ahpl.cla()
# vl.set_xdata(times[i])
# axvline(x=times[i], color='r')
# hist(gas_coords[4] / 10000000., weights=gas_flux_norm,
# bins=int(num_clouds / 100))
# ahpl.relim()
# ahpl.autoscale_view(True, True, True)
# sline.set_data(wavelength_bins, spectra[i])
# minx = np.min(wavelength_bins)
# maxx = np.max(wavelength_bins)
# miny = np.min(spectra[i])
# maxy = np.max(spectra[i])
# sppl.set_xlim(minx, maxx)
# sppl.set_ylim(miny, maxy)
for tii, i in enumerate(selected_is):
sppl[tii].cla()
aspectra = star_spectra[i, :, csd_js]
aspectra = np.add.accumulate(aspectra, axis=0)
aspectra /= | np.max(aspectra) | numpy.max |
# Utility Functions
# Authors: <NAME>
# Edited by: <NAME>
'''
Used by the user to define channels that are hard coded for analysis.
'''
# Imports necessary for this function
import numpy as np
import re
from itertools import combinations
def splitpatient(patient):
stringtest = patient.find('seiz')
if stringtest == -1:
stringtest = patient.find('sz')
if stringtest == -1:
stringtest = patient.find('aw')
if stringtest == -1:
stringtest = patient.find('aslp')
if stringtest == -1:
stringtest = patient.find('_')
if stringtest == -1:
print("Not sz, seiz, aslp, or aw! Please add additional naming possibilities, or tell data gatherers to rename datasets.")
else:
pat_id = patient[0:stringtest]
seiz_id = patient[stringtest:]
# remove any underscores
pat_id = re.sub('_', '', pat_id)
seiz_id = re.sub('_', '', seiz_id)
return pat_id, seiz_id
def returnindices(pat_id, seiz_id=None):
included_indices, onsetelecs, clinresult = returnnihindices(
pat_id, seiz_id)
if included_indices.size == 0:
included_indices, onsetelecs, clinresult = returnlaindices(
pat_id, seiz_id)
if included_indices.size == 0:
included_indices, onsetelecs, clinresult = returnummcindices(
pat_id, seiz_id)
if included_indices.size == 0:
included_indices, onsetelecs, clinresult = returnjhuindices(
pat_id, seiz_id)
if included_indices.size == 0:
included_indices, onsetelecs, clinresult = returntngindices(
pat_id, seiz_id)
return included_indices, onsetelecs, clinresult
def returntngindices(pat_id, seiz_id):
included_indices = np.array([])
onsetelecs = None
clinresult = -1
if pat_id == 'id001ac':
# included_indices = np.concatenate((np.arange(0,4), np.arange(5,55),
# np.arange(56,77), np.arange(78,80)))
included_indices = np.array([0, 1, 5, 6, 7, 8, 9, 10, 11, 12, 13,
15, 16, 17, 18, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
32, 33, 35, 36, 37, 38, 39, 40, 41, 42, 43, 45, 46, 47, 48,
49, 50, 51, 52, 53, 58, 59, 60, 61, 62, 63, 64, 65, 66, 68,
69, 70, 71, 72, 73, 74, 75, 76, 78, 79])
elif pat_id == 'id002cj':
# included_indices = np.array(np.arange(0,184))
included_indices = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
30, 31, 32, 33, 34, 35, 36, 37, 38,
45, 46, 47, 48, 49, 50, 51, 52, 53,
60, 61, 62, 63, 64, 65, 66, 67, 70, 71, 72, 73, 74, 75, 76, 85, 86, 87, 88, 89,
90, 91, 92, 93, 100, 101, 102, 103, 104, 105,
106, 107, 108, 115, 116, 117, 118, 119,
120, 121, 122, 123, 129, 130, 131, 132, 133,
134, 135, 136, 137,
# np.arange(143, 156)
143, 144, 145, 146, 147,
148, 149, 150, 151, 157, 158, 159, 160, 161,
162, 163, 164, 165, 171, 172, 173, 174, 175,
176, 177, 178, 179, 180, 181, 182])
elif pat_id == 'id003cm':
included_indices = np.concatenate((np.arange(0,13), np.arange(25,37),
np.arange(40,50), np.arange(55,69), np.arange(70,79)))
elif pat_id == 'id004cv':
# removed OC'10, SC'5, CC'14/15
included_indices = np.concatenate((np.arange(0,23), np.arange(25,39),
np.arange(40,59), np.arange(60,110)))
elif pat_id == 'id005et':
included_indices = np.concatenate((np.arange(0,39), np.arange(39,47),
np.arange(52,62), np.arange(62,87)))
elif pat_id == 'id006fb':
included_indices = np.concatenate((np.arange(10,19), np.arange(40,50),
np.arange(115,123)))
elif pat_id == 'id008gc':
included_indices = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16, 17,
18, 19, 20, 21, 22, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 48, 49, 50, 51, 52, 53, 54, 56, 57, 58, 61, 62, 63, 64, 65,
71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 83, 84, 85, 86, 87, 88, 89, 90, 92, 93,
94, 95, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 110, 111])
elif pat_id == 'id009il':
included_indices = np.concatenate((np.arange(0,10), np.arange(10,152)))
elif pat_id == 'id010js':
included_indices = np.concatenate((np.arange(0,14),
np.arange(15,29), np.arange(30,42), np.arange(43,52),
np.arange(53,65), np.arange(66,75), np.arange(76,80),
np.arange(81,85), np.arange(86,94), np.arange(95,98),
np.arange(99,111),
np.arange(112,124)))
elif pat_id == 'id011ml':
included_indices = np.concatenate((np.arange(0,18), np.arange(21,68),
np.arange(69,82), np.arange(82,125)))
elif pat_id == 'id012pc':
included_indices = np.concatenate((np.arange(0,4), np.arange(9,17),
np.arange(18,28), np.arange(31,41), np.arange(44,56),
np.arange(57,69), np.arange(70,82), np.arange(83,96),
np.arange(97,153)))
elif pat_id == 'id013pg':
included_indices = np.array([2, 3, 4, 5, 15, 18, 19, 20, 21, 23, 24,
25, 30, 31, 32, 33, 34, 35, 36, 37, 38, 50, 51, 52, 53, 54, 55, 56,
57, 58, 60, 61, 62, 63, 64, 65, 66, 67, 68, 70, 71, 72, 73, 74, 75,
76, 77, 78])
elif pat_id == 'id014rb':
included_indices = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67,
68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101,
102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115,
116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129,
130, 131, 132, 133, 135, 136, 140, 141, 142, 143, 144, 145, 146,
147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159,
160, 161, 162, 163, 164])
elif pat_id == 'id015sf':
included_indices = np.concatenate((np.arange(0,37), np.arange(38,77),
np.arange(78,121)))
return included_indices, onsetelecs, clinresult
def returnnihindices(pat_id, seiz_id):
included_indices = np.array([])
onsetelecs = None
clinresult = -1
if pat_id == 'pt1':
included_indices = np.concatenate((np.arange(0, 36), np.arange(41, 43),
np.arange(45, 69), np.arange(71, 95)))
onsetelecs = set(['ATT1', 'ATT2', 'AD1', 'AD2', 'AD3', 'AD4',
'PD1', 'PD2', 'PD3', 'PD4'])
resectelecs = set(['ATT1', 'ATT2', 'ATT3', 'ATT4', 'ATT5', 'ATT6', 'ATT7', 'ATT8',
'AST1', 'AST2', 'AST3', 'AST4',
'PST1', 'PST2', 'PST3', 'PST4',
'AD1', 'AD2', 'AD3', 'AD4',
'PD1', 'PD2', 'PD3', 'PD4',
'PLT5', 'PLT6', 'SLT1'])
clinresult = 1
elif pat_id == 'pt2':
# [1:14 16:19 21:25 27:37 43 44 47:74]
included_indices = np.concatenate((np.arange(0, 14), np.arange(15, 19),
np.arange(
20, 25), np.arange(
26, 37), np.arange(
42, 44),
np.arange(46, 74)))
onsetelecs = set(['MST1', 'PST1', 'AST1', 'TT1'])
resectelecs = set(['TT1', 'TT2', 'TT3', 'TT4', 'TT6', 'TT6',
'G1', 'G2', 'G3', 'G4', 'G9', 'G10', 'G11', 'G12', 'G18', 'G19',
'G20', 'G26', 'G27',
'AST1', 'AST2', 'AST3', 'AST4',
'MST1', 'MST2', 'MST3', 'MST4'])
clinresult = 1
elif pat_id == 'pt3':
# [1:19 21:37 42:43 46:69 71:107]
included_indices = np.concatenate((np.arange(0, 19), np.arange(20, 37),
np.arange(41, 43), np.arange(45, 69), np.arange(70, 107)))
onsetelecs = set(['SFP1', 'SFP2', 'SFP3',
'IFP1', 'IFP2', 'IFP3',
'MFP2', 'MFP3',
'OF1', 'OF2', 'OF3', 'OF4'])
resectelecs = set(['FG1', 'FG2', 'FG9', 'FG10', 'FG17', 'FG18', 'FG25',
'SFP1', 'SFP2', 'SFP3', 'SFP4', 'SFP5', 'SFP6', 'SFP7', 'SFP8',
'MFP1', 'MFP2', 'MFP3', 'MFP4', 'MFP5', 'MFP6',
'IFP1', 'IFP2', 'IFP3', 'IFP4',
'OF3', 'OF4'])
clinresult = 1
elif pat_id == 'pt4':
# [1:19 21:37 42:43 46:69 71:107]
included_indices = np.concatenate((np.arange(0, 19), np.arange(20, 26),
np.arange(28, 36)))
onsetelecs = set([])
resectelecs = set([])
clinresult = -1
elif pat_id == 'pt5':
included_indices = np.concatenate((np.arange(0, 19), np.arange(20, 26),
np.arange(28, 36)))
onsetelecs = set([])
resectelecs = set([])
clinresult = -1
elif pat_id == 'pt6':
# [1:36 42:43 46 52:56 58:71 73:95]
included_indices = np.concatenate((np.arange(0, 36), np.arange(41, 43),
np.arange(45, 46), np.arange(51, 56), | np.arange(57, 71) | numpy.arange |
import os
import numpy as np
import matplotlib.pyplot as plt
import scipy.integrate
import pandas as pd
import math
#===INITIALISE===
pltno = 100 # how many gillespie sims do you want?
endTime = 1000 # When should Gillespie stop? 25k seconds is ca 20 cycles (24000)
k0 = 0.2 #s^-1
k1 = 0.01 #s^-1
m0 = 20 # initial condition
t0 = 0 # intitial time
mnewmegastore = []
mnewstore = []
mstore = []
mmegastore = [m0]
tstore = []
tmegastore = [t0]
cycle = 1200
def gil(t0,tmax,y0):
# get stuff in the correct data format
t0 = float(t0)
tmax = float(tmax)
y0 = float(y0)
t = t0
m = y0 # initialise substance
nthCycle = 0
# main loop
while t < tmax:
newt = t - (nthCycle*cycle)
if newt >= cycle:
m = np.random.binomial(m,0.5)
nthCycle += 1
mnewstore.append(m)
#print('new mnewstore:',mnewstore)
r0 = k0
r1 = k1*m
t0step = np.random.exponential(1/r0)
if r1 == 0:
t1step = math.inf
else:
t1step = np.random.exponential(1/r1)
if t0step < t1step:
m += 1
t += t0step
else:
m -= 1
t += t1step
'''
r0 = k0
r1 = k1*m
rtot = r0+r1
p0 = r0/rtot
p1 = r1/rtot
tstep = np.random.exponential(1/rtot)
t += tstep
rand0 = np.random.uniform()
rand1 = np.random.uniform()
if rand0 < p0:
m += 1
if rand1 < p1:
m -= 1
if m < 0:
m = 0
'''
# STORE DATA
global mstore
global tstore
mstore.append(m)
tstore.append(t)
def megagil(n,t0,tmax,y0):
# initialise correct size of store nested list
global mmegastore
global tmegastore
global mnewmegastore
mmegastore = [m0] * n
tmegastore = [None] * n
mnewmegastore = [None] * n
for i in range(n):
#print("\n \n \n \n \n \n \n \n \n \n \n") #if you want to clear the console in a hacky way.
megagilprog = (float(i+1)/float(n))*100
print("Progress: "+str(megagilprog)+"%")
if megagilprog > 99.9999:
print("Gillespie completed. Now generating stats and plots...")
gil(t0,tmax,y0)
global mstore
mmegastore[i] = mstore
mstore=[]
global tstore
tmegastore[i] = tstore
tstore=[]
global mnewstore
mnewmegastore[i] = mnewstore
mnewstore=[]
# HOW WOULD AVERAGING WORK? MAYBE NOT THE SAME AMOUNT OF TIME POINTS. ONLY AVERAGE FIRST 100s??
# to average: unfold megagil listed list. create a list per iteration point. average out these points. save stats about those too.
timevec = []
linter = []
def stats(llist):
global avglist
global stduplist
global stddownlist
global maxlist
global minlist
avglist=[]
stduplist=[]
stddownlist=[]
maxlist=[]
minlist=[]
actlist = llist
actlist = [list(i) for i in zip(*llist)] # unfold the llist (with * operator), then zip it. last timesteps if lists are different length
for i in range(len(actlist)):
"""
statsprog = (float(i+1)/float(len(actlist)))*100
print("\n \n \n \n \n \n \n \n \n \n \n") #if you want to clear the console in a hacky way.
print("Progress: "+str(statsprog)+"%")
if statsprog > 99.9999:
print("Stats calc completed.")
"""
avglist.append(np.average(actlist[i]))
stduplist.append(np.average(actlist[i])+ | np.std(actlist[i]) | numpy.std |
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
import pandas as pd
from sklearn.model_selection import train_test_split
import numpy as np
from pathlib import Path
from torch.autograd import Variable
from tqdm import tqdm
import matplotlib.pyplot as plt
from data_utils import LanesDataset
from model import UNet
from loss import DiscriminativeLoss, CrossEntropyLoss2d
from utils import gen_color_img
root_dir = '/home/anudeep/lane-detection/dataset/'
df = pd.read_csv(os.path.join(root_dir,'data/paths.csv'))
X_train, X_test, y_train, y_test = train_test_split(
df.img_paths, df.mask_paths, test_size=0.2, random_state=42)
# loading data
test_dataset = LanesDataset( | np.array(X_test[0:3]) | numpy.array |
"""Various VR utilities."""
import queue
import threading
import time
from asyncio.streams import StreamReader
from typing import Sequence, Dict
import struct
import numpy as np
import serial.threaded
from pykalman import KalmanFilter
from copy import copy
try:
import cv2
from displayarray import read_updates
from displayarray import display
HAVE_VOD = True
except Exception as e:
print(f"failed to import displayarray and/or opencv, reason: {e}")
print("camera based tracking methods will not be available")
HAVE_VOD = False
from itertools import islice, takewhile
import re
from typing import Optional
def format_str_for_write(input_str: str) -> bytes:
"""Format a string for writing to SteamVR's stream."""
if len(input_str) < 1:
return "".encode("utf-8")
if input_str[-1] != "\n":
return (input_str + "\n").encode("utf-8")
return input_str.encode("utf-8")
async def read(reader: StreamReader, read_len: int = 20) -> str:
"""Read one line from reader asynchronously."""
data = []
temp = " "
while "\n" not in temp and temp != "":
temp = await reader.read(read_len)
temp = temp.decode("utf-8")
data.append(temp)
time.sleep(0) # allows thread switching
return "".join(data)
def read2(reader, read_len=20):
"""Read one line from reader asynchronously."""
data = []
temp = " "
while "\n" not in temp and temp != "":
temp = reader.recv(read_len)
temp = temp.decode("utf-8")
data.append(temp)
time.sleep(0) # allows thread switching
return "".join(data)
async def read3(reader: StreamReader, read_len: int = 20) -> str:
"""Read one line from reader asynchronously."""
data = bytearray()
temp = b" "
while b"\n" not in temp and temp != b"":
temp = await reader.read(read_len)
data.extend(temp)
# time.sleep(0) # allows thread switching
return data
def make_rotmat(angls, dtype=np.float64):
"""
Rotate a set of points around the x, y, then z axes.
:param points: a point dictionary, such as: [[0, 0, 0], [1, 0, 0]]
:param angles: the degrees to rotate on the x, y, and z axis
"""
rotx = np.array(
[
[1, 0, 0],
[0, np.cos(angls[0]), -np.sin(angls[0])],
[0, np.sin(angls[0]), | np.cos(angls[0]) | numpy.cos |
import sys, datetime, os, math
import numpy as np
import classifiers
import data_processing as data_proc
from keras.models import model_from_json
from sklearn import metrics
from sklearn.feature_extraction import DictVectorizer
import matplotlib.pyplot as plt
from keras.preprocessing.text import Tokenizer
import keras.backend as K
from collections import Counter, OrderedDict
from pandas import read_csv
from numpy.random import seed, shuffle
path = os.getcwd()[:os.getcwd().rfind('/')]
def load_file(filename):
file = open(filename, 'r')
text = file.read()
file.close()
return text.split("\n")
def save_file(lines, filename):
data = '\n'.join(lines)
file = open(filename, 'w')
file.write(data)
file.close()
def load_data_panda(filename, shuffle_sets=False):
print("Reading data from file %s..." % filename)
data = read_csv(filename, sep="\t+", header=None, engine='python')
data.columns = ["Set", "Label", "Text"]
print('The shape of this data set is: ', data.shape)
x_train, labels_train = np.array(data["Text"]), np.array(data["Label"])
if shuffle_sets:
np.random.seed(12346598)
indices = np.arange(len(x_train))
np.random.shuffle(indices)
x_train = x_train[indices]
labels_train = labels_train[indices]
return x_train, labels_train
def save_as_dataset(data, labels, filename):
lines = []
first_word = "TrainSet" if "train" in filename else "TestSet"
for i in range(len(labels)):
if data[i] is not None:
lines.append(first_word + '\t' + str(labels[i]) + '\t' + str(data[i]))
data = '\n'.join(lines)
file = open(filename, 'w')
file.write(data)
file.close()
def save_dictionary(dictionary, filename):
lines = []
for k, v in dictionary.items():
lines.append(k + '\t' + str(v))
file = open(filename, 'w')
file.write('\n'.join(lines))
file.close()
def load_dictionary(filename):
dictionary = {}
file = open(filename, 'r')
lines = file.read()
file.close()
for line in lines.split("\n"):
key, value = line.split("\t")
dictionary[key] = value
return dictionary
def save_model(model, json_name, h5_weights_name):
model_json = model.to_json()
with open(json_name, "w") as json_file:
json_file.write(model_json)
model.save_weights(h5_weights_name)
print("Saved model with json name %s, and weights %s" % (json_name, h5_weights_name))
def load_model(json_name, h5_weights_name, verbose=False):
# In case of saved model (not to json or yaml)
# model = models.load_model(model_path, custom_objects={'f1_score': f1_score})
loaded_model_json = open(json_name, 'r').read()
model = model_from_json(loaded_model_json)
model.load_weights(h5_weights_name)
if verbose:
print("Loaded model with json name %s, and weights %s" % (json_name, h5_weights_name))
return model
# Given any number of dicts, shallow copy and merge into a new dict,
# precedence goes to key value pairs in latter dicts.
# This is in case a Python3.5 version is NOT used. (needed for my access to the zCSF cluster)
def merge_dicts(*dict_args):
result = {}
for dictionary in dict_args:
result.update(dictionary)
return result
# Just a primitive batch generator
def batch_generator(x, y, batch_size):
seed(1655483)
size = x.shape[0]
x_copy = x.copy()
y_copy = y.copy()
indices = np.arange(size)
np.random.shuffle(indices)
x_copy = x_copy[indices]
y_copy = y_copy[indices]
i = 0
while True:
if i + batch_size <= size:
yield x_copy[i:i + batch_size], y_copy[i:i + batch_size]
i += batch_size
else:
i = 0
indices = np.arange(size)
np.random.shuffle(indices)
x_copy = x_copy[indices]
y_copy = y_copy[indices]
continue
def shuffle_data(labels, n):
seed(532908)
indices = range(len(labels))
pos_indices = [i for i in indices if labels[i] == 1]
neg_indices = [i for i in indices if labels[i] == 0]
shuffle(pos_indices)
shuffle(neg_indices)
top_n = pos_indices[0:n] + neg_indices[0:n]
shuffle(top_n)
return top_n
# Get some idea about the max and mean length of the tweets (useful for deciding on the sequence length)
def get_max_len_info(tweets, average=False):
sum_of_length = sum([len(l.split()) for l in tweets])
avg_tweet_len = sum_of_length / float(len(tweets))
print("Mean of train tweets: ", avg_tweet_len)
max_tweet_len = len(max(tweets, key=len).split())
print("Max tweet length is = ", max_tweet_len)
if average:
return avg_tweet_len
return max_tweet_len
def get_classes_ratio(labels):
positive_labels = sum(labels)
negative_labels = len(labels) - sum(labels)
ratio = [max(positive_labels, negative_labels) / float(negative_labels),
max(positive_labels, negative_labels) / float(positive_labels)]
print("Class ratio: ", ratio)
return ratio
def get_classes_ratio_as_dict(labels):
ratio = Counter(labels)
ratio_dict = {0: float(max(ratio[0], ratio[1]) / ratio[0]), 1: float(max(ratio[0], ratio[1]) / ratio[1])}
print('Class ratio: ', ratio_dict)
return ratio_dict
def extract_features_from_dict(train_features, test_features):
# Transform the list of feature-value mappings to a vector
vector = DictVectorizer(sparse=False)
# Learn a list of feature name -> indices mappings and transform X_train_features
x_train_features = vector.fit_transform(train_features).tolist()
# Just transform the X_test_features, based on the list fitted on X_train_features
# Disadvantage: named features not encountered during fit_transform will be silently ignored.
x_test_features = vector.transform(test_features).tolist()
print('Size of the feature sets: train = ', len(x_train_features[0]), ', test = ', len(x_test_features[0]))
return x_train_features, x_test_features
def feature_scaling(features):
scaled_features = []
max_per_col = []
for i in range(len(features[0])):
maxx = max([abs(f[i]) for f in features])
if maxx == 0.0:
maxx = 1.0
max_per_col.append(maxx)
for f in features:
scaled_features.append([float(f[i]) / float(max_per_col[i]) for i in range(len(f))])
return scaled_features
def run_supervised_learning_models(train_features, train_labels, test_features, test_labels,
make_feature_analysis=False, feature_names=None, top_features=0, plot_name="coeff"):
class_ratio = get_classes_ratio_as_dict(train_labels) # alternatively, can be set class_ratio = 'balanced'
classifiers.linear_svm_grid(train_features, train_labels, test_features, test_labels, class_ratio,
make_feature_analysis, feature_names, top_features, plot_name)
classifiers.logistic_regression_grid(train_features, train_labels, test_features, test_labels, class_ratio,
make_feature_analysis, feature_names, top_features, plot_name)
# classifiers.nonlinear_svm(train_features, train_labels, test_features, test_labels, class_ratio,
# make_feature_analysis, feature_names, top_features, plot_name)
# Convert tweets into an array of indices of shape (m, max_tweet_length)
def tweets_to_indices(tweets, word_to_index, max_tweet_len):
m = tweets.shape[0]
tweet_indices = np.zeros((m, max_tweet_len))
for i in range(m):
sentence_words = [w.lower() for w in tweets[i].split()]
j = 0
for w in sentence_words:
tweet_indices[i, j] = word_to_index[w]
j = j + 1
return tweet_indices
def encode_text_as_matrix(train_tweets, test_tweets, mode, max_num_words=None, lower=False, char_level=False):
# Create the tokenizer
tokenizer = Tokenizer(num_words=max_num_words, filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n',
lower=lower, split=" ", char_level=char_level)
# Fit the tokenizer on the documents
tokenizer.fit_on_texts(train_tweets)
# Encode each example using a 'mode' scoring method (mode can be count, binary, freq, tf-idf)
x_train = tokenizer.texts_to_matrix(train_tweets, mode=mode)
x_test = tokenizer.texts_to_matrix(test_tweets, mode=mode)
return tokenizer, x_train, x_test
def encode_text_as_word_indexes(train_tweets, test_tweets, max_num_words=None, lower=False, char_level=False):
# Create the tokenizer
tokenizer = Tokenizer(num_words=max_num_words, filters='', lower=lower, split=" ", char_level=char_level)
# Fit the tokenizer on the documents
tokenizer.fit_on_texts(train_tweets)
# Encode each example as a sequence of word indexes based on the vocabulary of the tokenizer
x_train = tokenizer.texts_to_sequences(train_tweets)
x_test = tokenizer.texts_to_sequences(test_tweets)
return tokenizer, x_train, x_test
# Build random vector mappings of a vocabulary
def build_random_word2vec(tweets, embedding_dim=100, variance=1):
print("\nBuilding random vector of mappings with dimension %d..." % embedding_dim)
word2vec_map = {}
seed(1457873)
words = set((' '.join(tweets)).split())
for word in words:
embedding_vector = word2vec_map.get(word)
if embedding_vector is None:
word2vec_map[word] = np.random.uniform(-variance, variance, size=(embedding_dim,))
return word2vec_map
# Load a set of pre-trained embeddings (can be GLoVe or emoji2vec)
def load_vectors(filename='glove.6B.100d.txt'):
print("\nLoading vector mappings from %s..." % filename)
word2vec_map = {}
if 'emoji' in filename:
f = open(path + '/models/emoji2vec/' + filename)
else: # by default, load the GLoVe embeddings
f = open(path + '/res/glove/' + filename)
for line in f:
values = line.split()
word = values[0]
weights = np.asarray(values[1:], dtype='float32')
word2vec_map[word] = weights
f.close()
print('Found %s word vectors and with embedding dimmension %s'
% (len(word2vec_map), next(iter(word2vec_map.values())).shape[0]))
return word2vec_map
# Compute the word-embedding matrix
def get_embedding_matrix(word2vec_map, word_to_index, embedding_dim, init_unk=True, variance=None):
# Get the variance of the embedding map
if init_unk and variance is None:
variance = embedding_variance(word2vec_map)
print("Word vectors have variance ", variance)
# Initialize the embedding matrix as a numpy array of zeros of shape (vocab_len, dimensions of word vectors)
embedding_matrix = np.zeros((len(word_to_index) + 1, embedding_dim))
for word, i in word_to_index.items():
embedding_vector = word2vec_map.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
elif init_unk:
# Unknown tokens are initialized randomly by sampling from a uniform distribution [-var, var]
seed(1337603)
embedding_matrix[i] = np.random.uniform(-variance, variance, size=(1, embedding_dim))
# else:
# print("Not found: ", word)
return embedding_matrix
# Get the vec representation of a set of tweets based on a specified embedding (can be a word or emoji mapping)
def get_tweets_embeddings(tweets, vec_map, embedding_dim=100, init_unk=False, variance=None, weighted_average=True):
# Get the variance of the embedding map
if init_unk and variance is None:
variance = embedding_variance(vec_map)
print("Vector mappings have variance ", variance)
# If set, calculate the tf-idf weight of each embedding, otherwise, no weighting (all weights are 1.0)
if weighted_average:
weights = get_tf_idf_weights(tweets, vec_map)
else:
weights = {k: 1.0 for k in vec_map.keys()}
tw_emb = np.zeros((len(tweets), embedding_dim))
for i, tw in enumerate(tweets):
total_valid = 0
for word in tw.split():
embedding_vector = vec_map.get(word)
if embedding_vector is not None:
tw_emb[i] = tw_emb[i] + embedding_vector * weights[word]
total_valid += 1
elif init_unk:
seed(1337603)
tw_emb[i] = np.random.uniform(-variance, variance, size=(1, embedding_dim))
# else:
# print("Not found: ", word)
# Get the average embedding representation for this tweet
tw_emb[i] /= float(max(total_valid, 1))
return tw_emb
# Based on the deepmoji project, predicting emojis for each tweet -- done using their pre-trained weights
# Here we extract the relevant emojis (with an individual probability of being accurate over teh set threshold)
def get_deepmojis(filename, threshold=0.05):
print("\nGetting deep-mojis for each tweet in %s..." % filename)
df = read_csv(path + "/res/deepmoji/" + filename, sep='\t')
pred_mappings = load_file(path + "/res/emoji/wanted_emojis.txt")
emoji_pred = []
for index, row in df.iterrows():
tw_pred = []
for top in range(5):
if row['Pct_%d' % (top+1)] >= threshold:
tw_pred.append(row['Emoji_%d' % (top + 1)])
emoji_pred.append([pred_mappings[t] for t in tw_pred])
print("Couldn't find a strong emoji prediction for %d emojis" % len([pred for pred in emoji_pred if pred == []]))
return emoji_pred
# Just a dummy function that I used for the demo (printing the predicted deepmojis for each tweet in my demo set)
def get_demo_emojis(filename, data):
deepmojis = load_file(path + "/res/datasets/demo/deepmoji_" + filename)
emojis = data_proc.extract_emojis(data)
all_emojis = [deepmojis[i] if emojis[i] == ''
else emojis[i] + ' ' + deepmojis[i] for i in range(len(emojis))]
all_emojis = [' '.join(set(e.split())) for e in all_emojis]
for d, e in zip(data[20:40], all_emojis[20:40]):
print("Tweet: ", d)
print("Predicted emojis: ", e, "\n")
return all_emojis
# Calculate the variance of an embedding (like glove, word2vec, emoji2vec, etc)
# Used to sample new uniform distributions of vectors in the interval [-variance, variance]
def embedding_variance(vec_map):
variance = np.sum([np.var(vec) for vec in vec_map.values()]) / len(vec_map)
return variance
# Shuffle the words in all tweets
def shuffle_words(tweets):
shuffled = []
for tweet in tweets:
words = [word for word in tweet.split()]
np.random.shuffle(words)
shuffled.append(' '.join(words))
return shuffled
# Get the tf-idf weighting scheme (used to measure the contribution of a word in a tweet => weighted sum of embeddings)
def get_tf_idf_weights(tweets, vec_map):
df = {}
for tw in tweets:
words = set(tw.split())
for word in words:
if word not in df:
df[word] = 0.0
df[word] += 1.0
idf = OrderedDict()
for word in vec_map.keys():
n = 1.0
if word in df:
n += df[word]
score = math.log(len(tweets) / float(n))
idf[word] = score
return idf
# Compute the similarity of 2 vectors, both of shape (n, )
def cosine_similarity(u, v):
dot = np.dot(u, v)
norm_u = np.sqrt(np.sum(u ** 2))
norm_v = np.sqrt(np.sum(v ** 2))
cosine_distance = dot / (norm_u * norm_v)
return cosine_distance
# Convert emojis to unicode representations by removing any variation selectors
# Info: http://www.unicode.org/charts/PDF/UFE00.pdf
def convert_emoji_to_unicode(emoji):
unicode_emoji = emoji.encode('unicode-escape')
find1 = unicode_emoji.find(b"\\ufe0f")
unicode_emoji = unicode_emoji[:find1] if find1 != -1 else unicode_emoji
find2 = unicode_emoji.find(b"\\ufe0e")
unicode_emoji = unicode_emoji[:find2] if find2 != -1 else unicode_emoji
return unicode_emoji
# Performs the word analogy task: a is to b as c is to ____.
def make_analogy(a, b, c, vec_map):
a = convert_emoji_to_unicode(a)
b = convert_emoji_to_unicode(b)
c = convert_emoji_to_unicode(c)
e_a, e_b, e_c = vec_map[a], vec_map[b], vec_map[c]
max_cosine_sim = -100
best = None
best_list = {}
for v in vec_map.keys():
# The best match shouldn't be one of the inputs, so pass on them.
if v in [a, b, c]:
continue
# Compute cosine similarity between the vector (e_b - e_a) and the vector ((w's vector representation) - e_c)
cosine_sim = cosine_similarity(e_b - e_a, vec_map[v] - e_c)
best_list[v] = cosine_sim
if cosine_sim > max_cosine_sim:
max_cosine_sim = cosine_sim
best = v
sorted_keys = sorted(best_list, key=best_list.get, reverse=True)
print("Top 5 most similar emojis: ", [r.decode('unicode-escape') for r in sorted_keys[:5]])
print(str.format('{} - {} + {} = {}', a.decode('unicode-escape'), b.decode('unicode-escape'),
c.decode('unicode-escape'), best.decode('unicode-escape')), "\n\n")
# Get the Euclidean distance between two vectors
def euclidean_distance(u_vector, v_vector):
distance = np.sqrt(np.sum([(u - v) ** 2 for u, v in zip(u_vector, v_vector)]))
return distance
# Given a tweet, return the scores of the most similar/dissimilar pairs of words
def get_similarity_measures(tweet, vec_map, weighted=False, verbose=True):
# Filter a bit the tweet so that no punctuation and no stopwords are included
stopwords = data_proc.get_stopwords_list()
filtered_tweet = list(set([w.lower() for w in tweet.split()
if w.isalnum() and w not in stopwords and w.lower() in vec_map.keys()]))
# Compute similarity scores between any 2 words in filtered tweet
similarity_scores = []
max_words = []
min_words = []
max_score = -100
min_score = 100
for i in range(len(filtered_tweet) - 1):
wi = filtered_tweet[i]
for j in range(i + 1, len(filtered_tweet)):
wj = filtered_tweet[j]
similarity = cosine_similarity(vec_map[wi], vec_map[wj])
if weighted:
similarity /= euclidean_distance(vec_map[wi], vec_map[wj])
similarity_scores.append(similarity)
if max_score < similarity:
max_score = similarity
max_words = [wi, wj]
if min_score > similarity:
min_score = similarity
min_words = [wi, wj]
if verbose:
print("Filtered tweet: ", filtered_tweet)
if max_score != -100:
print("Maximum similarity is ", max_score, " between words ", max_words)
else:
print("No max! Scores are: ", similarity_scores)
if min_score != 100:
print("Minimum similarity is ", min_score, " between words ", min_words)
else:
print("No min! Scores are: ", similarity_scores)
return max_score, min_score
# Custom metric function adjusted from https://stackoverflow.com/questions/43547402/how-to-calculate-f1-macro-in-keras
def f1_score(y_true, y_pred):
# Recall metric. Only computes a batch-wise average of recall,
# a metric for multi-label classification of how many relevant items are selected.
def recall(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
# Precision metric. Only computes a batch-wise average of precision,
# a metric for multi-label classification of how many selected items are relevant.
def precision(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
precision = precision(y_true, y_pred)
recall = recall(y_true, y_pred)
return 2 * ((precision*recall) / (precision+recall))
# This code allows you to see the mislabelled examples
def analyse_mislabelled_examples(x_test, y_test, y_pred):
for i in range(len(y_test)):
num = np.argmax(y_pred[i])
if num != y_test[i]:
print('Expected:', y_test[i], ' but predicted ', num)
print(x_test[i])
def print_statistics(y, y_pred):
accuracy = metrics.accuracy_score(y, y_pred)
precision = metrics.precision_score(y, y_pred, average='weighted')
recall = metrics.recall_score(y, y_pred, average='weighted')
f_score = metrics.f1_score(y, y_pred, average='weighted')
print('Accuracy: %.3f\nPrecision: %.3f\nRecall: %.3f\nF_score: %.3f\n'
% (accuracy, precision, recall, f_score))
print(metrics.classification_report(y, y_pred))
return accuracy, precision, recall, f_score
def plot_training_statistics(history, plot_name, also_plot_validation=False, acc_mode='acc', loss_mode='loss'):
# Plot Accuracy
plt.figure()
plt.plot(history.history[acc_mode], 'k-', label='Training Accuracy')
if also_plot_validation:
plt.plot(history.history['val_' + acc_mode], 'r--', label='Validation Accuracy')
plt.title('Training vs Validation Accuracy')
else:
plt.title('Training Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(loc='center right')
plt.ylim([0.0, 1.0])
plt.savefig(path + plot_name + "_acc.png")
print("Plot for accuracy saved to %s" % (path + plot_name + "_acc.png"))
# Plot Loss
plt.figure()
plt.plot(history.history[loss_mode], 'k-', label='Training Loss')
if also_plot_validation:
plt.plot(history.history['val_' + loss_mode], 'r--', label='Validation Loss')
plt.title('Training vs Validation Loss')
else:
plt.title('Training Loss ')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(loc='center right')
plt.savefig(path + plot_name + "_loss.png")
print("Plot for loss saved to %s" % (path + plot_name + "_loss.png"))
# This is used to plot the coefficients that have the greatest impact on a classifier like SVM
# feature_names = a dictionary of indices/feature representations to words (or whatever you're extracting features from)
def plot_coefficients(classifier, feature_names, top_features=20, plot_name="/bow_models/bow_binary_"):
# Get the top most positive/negative coefficients
coef = classifier.coef_.ravel()
top_positive_coefficients = np.argsort(coef)[-top_features:]
top_negative_coefficients = np.argsort(coef)[:top_features]
top_coefficients = np.hstack([top_negative_coefficients, top_positive_coefficients])
x_names = [feature_names[feature] for feature in top_coefficients]
# Plot the coefficients
plt.figure(figsize=(15, 5))
colors = ['red' if c < 0 else 'blue' for c in coef[top_coefficients]]
plt.bar(np.arange(2 * top_features), coef[top_coefficients], color=colors)
plt.xticks( | np.arange(0, 2 * top_features) | numpy.arange |
'''
Independent Component Analysis (ICA):
This script computes ICA using the INFOMAX criteria.
The preprocessing steps include demeaning and whitening.
'''
import numpy as np
from numpy import dot
from numpy.linalg import matrix_rank, inv
from numpy.random import permutation
from scipy.linalg import eigh
# Theano Imports
import theano.tensor as T
import theano
from theano import shared
# Global constants
EPS = 1e-18
MAX_W = 1e8
ANNEAL = 0.9
MAX_STEP = 500
MIN_LRATE = 1e-6
W_STOP = 1e-6
class ica_gpu(object):
"""
Infomax ICA for one data modality
"""
def __init__(self, n_comp=10, verbose=False):
# Theano initialization
self.T_weights = shared(np.eye(n_comp, dtype=np.float32))
self.T_bias = shared(np.ones((n_comp, 1), dtype=np.float32))
T_p_x_white = T.fmatrix()
T_lrate = T.fscalar()
T_block = T.fscalar()
T_unmixed = T.dot(self.T_weights, T_p_x_white) + T.addbroadcast(self.T_bias, 1)
T_logit = 1 - 2 / (1 + T.exp(-T_unmixed))
T_out = self.T_weights + T_lrate * \
T.dot(T_block * T.identity_like(self.T_weights) + T.dot(T_logit, T.transpose(T_unmixed)), self.T_weights)
T_bias_out = self.T_bias + T_lrate * T.reshape(T_logit.sum(axis=1), (-1, 1))
T_max_w = T.max(self.T_weights)
T_isnan = T.any(T.isnan(self.T_weights))
self.w_up_fun = theano.function([T_p_x_white, T_lrate, T_block],
[T_max_w, T_isnan],
updates=[(self.T_weights, T_out),
(self.T_bias, T_bias_out)],
allow_input_downcast=True)
T_matrix = T.fmatrix()
T_cov = T.dot(T_matrix, T.transpose(T_matrix))/T_block
self.cov_fun = theano.function([T_matrix, T_block], T_cov, allow_input_downcast=True)
self.loading = None
self.sources = None
self.weights = None
self.n_comp = n_comp
self.verbose = verbose
def __pca_whiten(self, x2d):
""" data Whitening
*Input
x2d : 2d data matrix of observations by variables
n_comp: Number of components to retain
*Output
Xwhite : Whitened X
white : whitening matrix (Xwhite = np.dot(white,X))
dewhite : dewhitening matrix (X = np.dot(dewhite,Xwhite))
"""
NSUB, NVOX = x2d.shape
x2d_demean = x2d - x2d.mean(axis=1).reshape((-1, 1))
# cov = dot(x2d_demean, x2d_demean.T) / ( NVOX -1 )
cov = self.cov_fun(x2d_demean, NVOX-1)
w, v = eigh(cov, eigvals=(NSUB-self.n_comp, NSUB-1))
D = np.diag(1./(np.sqrt(w)))
white = dot(D, v.T)
D = np.diag(np.sqrt(w))
dewhite = dot(v, D)
x_white = dot(white, x2d_demean)
return (x_white, white, dewhite)
def __w_update(self, x_white, lrate1):
""" Update rule for infomax
This function recieves parameters to update W1
* Input
W1: unmixing matrix (must be a square matrix)
Xwhite1: whitened data
bias1: current estimated bias
lrate1: current learning rate
startW1: in case update blows up it will start again from startW1
* Output
W1: updated mixing matrix
bias: updated bias
lrate1: updated learning rate
"""
error = 0
NVOX = x_white.shape[1]
NCOMP = x_white.shape[0]
block1 = int(np.floor(np.sqrt(NVOX / 3)))
permute1 = permutation(NVOX)
p_x_white = x_white[:, permute1].astype(np.float32)
for start in range(0, NVOX, block1):
if start + block1 < NVOX:
tt2 = start + block1
else:
tt2 = NVOX
block1 = NVOX - start
max_w, isnan = self.w_up_fun(p_x_white[:, start:tt2], lrate1, block1)
# Checking if W blows up
if isnan or max_w > MAX_W:
# print("Numeric error! restarting with lower learning rate")
lrate1 = lrate1 * ANNEAL
self.T_weights.set_value(np.eye(NCOMP, dtype=np.float32))
self.T_bias.set_value(np.zeros((NCOMP, 1), dtype=np.float32))
error = 1
if lrate1 > 1e-6 and \
matrix_rank(x_white) < NCOMP:
# print("Data 1 is rank defficient"
# ". I cannot compute " +
# str(NCOMP) + " components.")
return (0, 1)
if lrate1 < 1e-6:
# print("Weight matrix may"
# " not be invertible...")
return (0, 1)
return(lrate1, error)
def __infomax(self, x_white):
"""Computes ICA infomax in whitened data
Decomposes x_white as x_white=AS
*Input
x_white: whitened data (Use PCAwhiten)
verbose: flag to print optimization updates
*Output
A : mixing matrix
S : source matrix
W : unmixing matrix
"""
NCOMP = self.n_comp
# Initialization
self.T_weights.set_value(np.eye(NCOMP, dtype=np.float32))
weights = | np.eye(NCOMP) | numpy.eye |
import numpy as np
import matplotlib.pyplot as plt
# Read forward Euler data
class ConvergenceData(object):
def __init__(self, filename=None):
self.data = {"cfl": [],
"f_ee error": [],
"f_xx error": [],
"f_eebar error": [],
"f_xxbar error": []}
if filename:
self.readfrom(filename)
def readfrom(self, filename):
f = open(filename, "r")
while True:
entry = [f.readline().strip() for i in range(9)]
if not entry[0]:
break
for line in entry:
ls = line.split(":")
name = ls[0].strip()
value = ls[-1].strip()
for k in self.data.keys():
if name == k:
self.data[k].append(float(value))
f.close()
for k in self.data.keys():
self.data[k] = np.array(self.data[k])
def get(self, key):
return self.data[key]
def keys(self):
return self.data.keys()
def error_keys(self):
return [k for k in self.data.keys() if k != "cfl"]
def average_convergence(self, key):
# get the average convergence order for the keyed quantity
err = self.get(key)
cfl = self.get("cfl")
orders = []
for i in range(len(err)-1):
order = np.log10(err[i+1]/err[i]) / np.log10(cfl[i+1]/cfl[i])
orders.append(order)
orders = np.array(orders)
order_average = np.average(orders)
return order_average
def plot_on_axis(self, axis, key, label, color):
log_cfl = np.log10(self.get("cfl"))
log_err = np.log10(self.get(key))
axis.plot(log_cfl, log_err, label=label, marker="o", linestyle="None", color=color)
order = self.average_convergence(key)
iMaxErr = | np.argmax(log_err) | numpy.argmax |
"""
Monitoring algorithms for Quicklook pipeline
"""
import numpy as np
import scipy.ndimage
import yaml
from lvmspec.quicklook.qas import MonitoringAlg, QASeverity
from lvmspec.quicklook import qlexceptions
from lvmspec.quicklook import qllogger
import os,sys
import datetime
from astropy.time import Time
from lvmspec.qa import qalib
from lvmspec.io import qa
qlog=qllogger.QLLogger("QuickLook",0)
log=qlog.getlog()
def qlf_post(qadict):
"""
A general function to HTTP post the QA output dictionary, intended for QLF
requires environmental variables: QLF_API_URL, QLF_USER, QLF_PASSWD
Args:
qadict: returned dictionary from a QA
"""
#- Check for environment variables and set them here
if "QLF_API_URL" in os.environ:
qlf_url=os.environ.get("QLF_API_URL")
if "QLF_USER" not in os.environ or "QLF_PASSWD" not in os.environ:
log.warning("Environment variables are not set for QLF. Set QLF_USER and QLF_PASSWD.")
else:
qlf_user=os.environ.get("QLF_USER")
qlf_passwd=os.environ.get("QLF_PASSWD")
log.debug("Environment variables are set for QLF. Now trying HTTP post.")
#- All set. Now try to HTTP post
try:
import requests
response=requests.get(qlf_url)
#- Check if the api has json
api=response.json()
#- proceed with post
job={"name":"QL","status":0,"dictionary":qadict} #- QLF should disintegrate dictionary
response=requests.post(api['job'],json=job,auth=(qlf_user,qlf_passwd))
except:
log.error("Skipping HTTP post... Exception",exc_info=true)
else:
log.warning("Skipping QLF. QLF_API_URL must be set as environment variable")
class Get_RMS(MonitoringAlg):
def __init__(self,name,config,logger=None):
if name is None or name.strip() == "":
name="RMS"
from lvmspec.image import Image as im
kwargs=config['kwargs']
parms=kwargs['param']
key=kwargs['refKey'] if 'refKey' in kwargs else "NOISE_AMP"
status=kwargs['statKey'] if 'statKey' in kwargs else "NOISE_STAT"
kwargs["SAMI_RESULTKEY"]=key
kwargs["SAMI_QASTATUSKEY"]=status
if "ReferenceMetrics" in kwargs:
r=kwargs["ReferenceMetrics"]
if key in r:
kwargs["REFERENCE"]=r[key]
if "NOISE_WARN_RANGE" in parms and "NOISE_NORMAL_RANGE" in parms:
kwargs["RANGES"]=[(np.asarray(parms["NOISE_WARN_RANGE"]),QASeverity.WARNING),
(np.asarray(parms["NOISE_NORMAL_RANGE"]),QASeverity.NORMAL)]# sorted by most severe to least severe
MonitoringAlg.__init__(self,name,im,config,logger)
def run(self,*args,**kwargs):
if len(args) == 0 :
raise qlexceptions.ParameterException("Missing input parameter")
if not self.is_compatible(type(args[0])):
raise qlexceptions.ParameterException("Incompatible parameter type. Was expecting lvmspec.image.Image got {}".format(type(args[0])))
input_image=args[0]
if "paname" not in kwargs:
paname=None
else:
paname=kwargs["paname"]
if "ReferenceMetrics" in kwargs: refmetrics=kwargs["ReferenceMetrics"]
else: refmetrics=None
amps=False
if "amps" in kwargs:
amps=kwargs["amps"]
if "param" in kwargs: param=kwargs["param"]
else: param=None
if "qlf" in kwargs:
qlf=kwargs["qlf"]
else: qlf=False
if "qafile" in kwargs: qafile = kwargs["qafile"]
else: qafile = None
if "qafig" in kwargs: qafig=kwargs["qafig"]
else: qafig = None
return self.run_qa(input_image,paname=paname,amps=amps,qafile=qafile,qafig=qafig, param=param, qlf=qlf, refmetrics=refmetrics)
def run_qa(self,image,paname=None,amps=False,qafile=None, qafig=None,param=None,qlf=False, refmetrics=None):
retval={}
retval["EXPID"] = '{0:08d}'.format(image.meta["EXPID"])
retval["PANAME"] = paname
retval["QATIME"] = datetime.datetime.now().isoformat()
retval["CAMERA"] = image.meta["CAMERA"]
retval["PROGRAM"] = image.meta["PROGRAM"]
retval["FLAVOR"] = image.meta["FLAVOR"]
retval["NIGHT"] = image.meta["NIGHT"]
kwargs=self.config['kwargs']
# return rms values in rms/sqrt(exptime)
rmsccd=qalib.getrms(image.pix/np.sqrt(image.meta["EXPTIME"])) #- should we add dark current and/or readnoise to this as well?
if param is None:
log.debug("Param is None. Using default param instead")
param = {
"NOISE_NORMAL_RANGE":[-1.0, 1.0],
"NOISE_WARN_RANGE":[-2.0, 2.0]
}
retval["PARAMS"] = param
if "REFERENCE" in kwargs:
retval['PARAMS']['NOISE_AMP_REF']=kwargs["REFERENCE"]
expnum=[]
rms_row=[]
rms_amps=[]
rms_over_amps=[]
overscan_values=[]
#- get amp/overcan boundary in pixels
from lvmspec.preproc import _parse_sec_keyword
for kk in ['1','2','3','4']:
thisampboundary=_parse_sec_keyword(image.meta["CCDSEC"+kk])
thisoverscanboundary=_parse_sec_keyword(image.meta["BIASSEC"+kk])
for i in range(image.pix[thisoverscanboundary].shape[0]):
rmsrow = qalib.getrms(image.pix[thisoverscanboundary][i]/np.sqrt(image.meta["EXPTIME"]))
rms_row.append(rmsrow)
rms_thisover_thisamp=qalib.getrms(image.pix[thisoverscanboundary]/np.sqrt(image.meta["EXPTIME"]))
rms_thisamp=qalib.getrms(image.pix[thisampboundary]/np.sqrt(image.meta["EXPTIME"]))
rms_amps.append(rms_thisamp)
rms_over_amps.append(rms_thisover_thisamp)
rmsover=np.max(rms_over_amps)
rmsdiff_err='NORMAL'
if amps:
rms_amps=[]
rms_over_amps=[]
overscan_values=[]
#- get amp/overcan boundary in pixels
from lvmspec.preproc import _parse_sec_keyword
for kk in ['1','2','3','4']:
thisampboundary=_parse_sec_keyword(image.meta["CCDSEC"+kk])
thisoverscanboundary=_parse_sec_keyword(image.meta["BIASSEC"+kk])
rms_thisover_thisamp=qalib.getrms(image.pix[thisoverscanboundary]/np.sqrt(image.meta["EXPTIME"]))
thisoverscan_values=np.ravel(image.pix[thisoverscanboundary]/np.sqrt(image.meta["EXPTIME"]))
rms_thisamp=qalib.getrms(image.pix[thisampboundary]/np.sqrt(image.meta["EXPTIME"]))
rms_amps.append(rms_thisamp)
rms_over_amps.append(rms_thisover_thisamp)
overscan_values+=thisoverscan_values.tolist()
rmsover=np.std(overscan_values)
# retval["METRICS"]={"RMS":rmsccd,"NOISE":rmsover,"RMS_AMP":np.array(rms_amps),"NOISE_AMP":np.array(rms_over_amps),"RMS_ROW":rms_row,"EXPNUM_WARN":expnum}
retval["METRICS"]={"RMS":rmsccd,"NOISE":rmsover,"RMS_AMP":np.array(rms_amps),"NOISE_AMP":np.array(rms_over_amps),"RMS_ROW":rms_row,"NOISE_STAT":rmsdiff_err,"EXPNUM_WARN":expnum}
else:
# retval["METRICS"]={"RMS":rmsccd,"NOISE":rmsover,"RMS_ROW":rms_row,"EXPNUM_WARN":expnum}
retval["METRICS"]={"RMS":rmsccd,"NOISE":rmsover,"RMS_ROW":rms_row,"NOISE_STAT":rmsdiff_err,"EXPNUM_WARN":expnum}
if qlf:
qlf_post(retval)
if qafile is not None:
outfile = qa.write_qa_ql(qafile,retval)
log.debug("Output QA data is in {}".format(outfile))
if qafig is not None:
from lvmspec.qa.qa_plots_ql import plot_RMS
plot_RMS(retval,qafig)
log.debug("Output QA fig {}".format(qafig))
return retval
def get_default_config(self):
return {}
class Count_Pixels(MonitoringAlg):
def __init__(self,name,config,logger=None):
if name is None or name.strip() == "":
name="COUNTPIX"
from lvmspec.image import Image as im
kwargs=config['kwargs']
parms=kwargs['param']
key=kwargs['refKey'] if 'refKey' in kwargs else "NPIX_AMP"
status=kwargs['statKey'] if 'statKey' in kwargs else "NPIX_STAT"
kwargs["SAMI_RESULTKEY"]=key
kwargs["SAMI_QASTATUSKEY"]=status
if "ReferenceMetrics" in kwargs:
r=kwargs["ReferenceMetrics"]
if key in r:
kwargs["REFERENCE"]=r[key]
if "NPIX_WARN_RANGE" in parms and "NPIX_NORMAL_RANGE" in parms:
kwargs["RANGES"]=[(np.asarray(parms["NPIX_WARN_RANGE"]),QASeverity.WARNING),
(np.asarray(parms["NPIX_NORMAL_RANGE"]),QASeverity.NORMAL)]# sorted by most severe to least severe
MonitoringAlg.__init__(self,name,im,config,logger)
def run(self,*args,**kwargs):
if len(args) == 0 :
raise qlexceptions.ParameterException("Missing input parameter")
if not self.is_compatible(type(args[0])):
raise qlexceptions.ParameterException("Incompatible input. Was expecting {} got {}".format(type(self.__inpType__),type(args[0])))
input_image=args[0]
if "paname" not in kwargs:
paname=None
else:
paname=kwargs["paname"]
if "ReferenceMetrics" in kwargs: refmetrics=kwargs["ReferenceMetrics"]
else: refmetrics=None
amps=False
if "amps" in kwargs:
amps=kwargs["amps"]
if "param" in kwargs: param=kwargs["param"]
else: param=None
if "qlf" in kwargs:
qlf=kwargs["qlf"]
else: qlf=False
if "qafile" in kwargs: qafile = kwargs["qafile"]
else: qafile = None
if "qafig" in kwargs: qafig=kwargs["qafig"]
else: qafig = None
return self.run_qa(input_image,paname=paname,amps=amps,qafile=qafile,qafig=qafig, param=param, qlf=qlf, refmetrics=refmetrics)
def run_qa(self,image,paname=None,amps=False,qafile=None,qafig=None, param=None, qlf=False, refmetrics=None):
retval={}
retval["PANAME"] = paname
retval["QATIME"] = datetime.datetime.now().isoformat()
retval["EXPID"] = '{0:08d}'.format(image.meta["EXPID"])
retval["CAMERA"] = image.meta["CAMERA"]
retval["PROGRAM"] = image.meta["PROGRAM"]
retval["FLAVOR"] = image.meta["FLAVOR"]
retval["NIGHT"] = image.meta["NIGHT"]
kwargs=self.config['kwargs']
if param is None:
log.debug("Param is None. Using default param instead")
param = {
"CUTLO":3, # low threshold for number of counts in sigmas
"CUTHI":10,
"NPIX_NORMAL_RANGE":[200.0, 500.0],
"NPIX_WARN_RANGE":[50.0, 650.0]
}
retval["PARAMS"] = param
if "REFERENCE" in kwargs:
retval['PARAMS']['NPIX_AMP_REF']=kwargs["REFERENCE"]
#- get the counts over entire CCD in counts per second
npixlo=qalib.countpix(image.pix,nsig=param['CUTLO']) #- above 3 sigma in counts
npixhi=qalib.countpix(image.pix,nsig=param['CUTHI']) #- above 10 sigma in counts
npix_err='NORMAL'
#- get the counts for each amp
if amps:
npixlo_amps=[]
npixhi_amps=[]
#- get amp boundary in pixels
from lvmspec.preproc import _parse_sec_keyword
for kk in ['1','2','3','4']:
ampboundary=_parse_sec_keyword(image.meta["CCDSEC"+kk])
npixlo_thisamp=qalib.countpix(image.pix[ampboundary]/image.meta["EXPTIME"],nsig=param['CUTLO'])
npixlo_amps.append(npixlo_thisamp)
npixhi_thisamp=qalib.countpix(image.pix[ampboundary]/image.meta["EXPTIME"],nsig=param['CUTHI'])
npixhi_amps.append(npixhi_thisamp)
# retval["METRICS"]={"NPIX_LOW":npixlo,"NPIX_HIGH":npixhi,"NPIX_AMP": npixlo_amps,"NPIX_HIGH_AMP": npixhi_amps}
retval["METRICS"]={"NPIX_LOW":npixlo,"NPIX_HIGH":npixhi,"NPIX_AMP": npixlo_amps,"NPIX_HIGH_AMP": npixhi_amps,"NPIX_STAT":npix_err}
else:
# retval["METRICS"]={"NPIX_LOW":npixlo,"NPIX_HIGH":npixhi}
retval["METRICS"]={"NPIX_LOW":npixlo,"NPIX_HIGH":npixhi,"NPIX_STAT":npix_err}
if qlf:
qlf_post(retval)
if qafile is not None:
outfile = qa.write_qa_ql(qafile,retval)
log.debug("Output QA data is in {}".format(outfile))
if qafig is not None:
from lvmspec.qa.qa_plots_ql import plot_countpix
plot_countpix(retval,qafig)
log.debug("Output QA fig {}".format(qafig))
return retval
def get_default_config(self):
return {}
class Integrate_Spec(MonitoringAlg):
def __init__(self,name,config,logger=None):
if name is None or name.strip() == "":
name="INTEG"
from lvmspec.frame import Frame as fr
kwargs=config['kwargs']
parms=kwargs['param']
key=kwargs['refKey'] if 'refKey' in kwargs else "INTEG_AVG"
status=kwargs['statKey'] if 'statKey' in kwargs else "MAGDIFF_STAT"
kwargs["SAMI_RESULTKEY"]=key
kwargs["SAMI_QASTATUSKEY"]=status
if "ReferenceMetrics" in kwargs:
r=kwargs["ReferenceMetrics"]
if key in r:
kwargs["REFERENCE"]=r[key]
if "MAGDIFF_WARN_RANGE" in parms and "MAGDIFF_NORMAL_RANGE" in parms:
kwargs["RANGES"]=[(np.asarray(parms["MAGDIFF_WARN_RANGE"]),QASeverity.WARNING),
(np.asarray(parms["MAGDIFF_NORMAL_RANGE"]),QASeverity.NORMAL)]# sorted by most severe to least severe
MonitoringAlg.__init__(self,name,fr,config,logger)
def run(self,*args,**kwargs):
if len(args) == 0 :
raise qlexceptions.ParameterException("Missing input parameter")
if not self.is_compatible(type(args[0])):
raise qlexceptions.ParameterException("Incompatible input. Was expecting {}, got {}".format(type(self.__inpType__),type(args[0])))
fibermap=kwargs['FiberMap']
input_frame=args[0]
if "paname" not in kwargs:
paname=None
else:
paname=kwargs["paname"]
if "ReferenceMetrics" in kwargs: refmetrics=kwargs["ReferenceMetrics"]
else: refmetrics=None
amps=False
if "amps" in kwargs:
amps=kwargs["amps"]
if "param" in kwargs: param=kwargs["param"]
else: param=None
dict_countbins=None
if "dict_countbins" in kwargs:
dict_countbins=kwargs["dict_countbins"]
if "qlf" in kwargs:
qlf=kwargs["qlf"]
else: qlf=False
if "qafile" in kwargs: qafile = kwargs["qafile"]
else: qafile = None
if "qafig" in kwargs: qafig=kwargs["qafig"]
else: qafig = None
return self.run_qa(fibermap,input_frame,paname=paname,amps=amps, dict_countbins=dict_countbins, qafile=qafile,qafig=qafig, param=param, qlf=qlf, refmetrics=refmetrics)
def run_qa(self,fibermap,frame,paname=None,amps=False,dict_countbins=None, qafile=None,qafig=None, param=None, qlf=False, refmetrics=None):
retval={}
retval["PANAME" ] = paname
retval["QATIME"] = datetime.datetime.now().isoformat()
retval["EXPID"] = '{0:08d}'.format(frame.meta["EXPID"])
retval["CAMERA"] = frame.meta["CAMERA"]
retval["PROGRAM"] = frame.meta["PROGRAM"]
retval["FLAVOR"] = frame.meta["FLAVOR"]
retval["NIGHT"] = frame.meta["NIGHT"]
kwargs=self.config['kwargs']
ra = fibermap["RA_TARGET"]
dec = fibermap["DEC_TARGET"]
#- get the integrals for all fibers
flux=frame.flux
wave=frame.wave
integrals=np.zeros(flux.shape[0])
for ii in range(len(integrals)):
integrals[ii]=qalib.integrate_spec(wave,flux[ii])
#- average integrals over fibers of each object type and get imaging magnitudes
integ_avg_tgt=[]
mag_avg_tgt=[]
for T in ["ELG","QSO","LRG","STD"]:
fibers=np.where(frame.fibermap['OBJTYPE']==T)[0]
if len(fibers) < 1:
log.warning("no {} fibers found.".format(T))
magnitudes=frame.fibermap['MAG'][fibers]
mag_avg=np.mean(magnitudes)
mag_avg_tgt.append(mag_avg)
integ=integrals[fibers]
integ_avg=np.mean(integ)
integ_avg_tgt.append(integ_avg)
if T == "STD":
starfibers=fibers
int_stars=integ
int_average=integ_avg
# simple, temporary magdiff calculation (to be corrected...)
magdiff_avg=[]
for i in range(len(mag_avg_tgt)):
mag_fib=-2.5*np.log(integ_avg_tgt[i]/frame.meta["EXPTIME"])+30.
if mag_avg_tgt[i] != np.nan:
magdiff=mag_fib-mag_avg_tgt[i]
else:
magdiff=nan
magdiff_avg.append(magdiff)
if param is None:
log.debug("Param is None. Using default param instead")
param = {
"MAGDIFF_NORMAL_RANGE":[-0.5, 0.5],
"MAGDIFF_WARN_RANGE":[-1.0, 1.0]
}
retval["PARAMS"] = param
if "REFERENCE" in kwargs:
retval['PARAMS']['MAGDIFF_TGT_REF']=kwargs["REFERENCE"]
magdiff_avg_amp = [0.0]
magdiff_err='NORMAL'
#- get the counts for each amp
if amps:
#- get the fiducial boundary
leftmax = dict_countbins["LEFT_MAX_FIBER"]
rightmin = dict_countbins["RIGHT_MIN_FIBER"]
bottommax = dict_countbins["BOTTOM_MAX_WAVE_INDEX"]
topmin = dict_countbins["TOP_MIN_WAVE_INDEX"]
fidboundary = qalib.slice_fidboundary(frame,leftmax,rightmin,bottommax,topmin)
int_avg_amps=np.zeros(4)
for amp in range(4):
wave=frame.wave[fidboundary[amp][1]]
select_thisamp=starfibers[(starfibers >= fidboundary[amp][0].start) & (starfibers < fidboundary[amp][0].stop)]
stdflux_thisamp=frame.flux[select_thisamp,fidboundary[amp][1]]
if len(stdflux_thisamp)==0:
continue
else:
integ_thisamp=np.zeros(stdflux_thisamp.shape[0])
for ii in range(stdflux_thisamp.shape[0]):
integ_thisamp[ii]=qalib.integrate_spec(wave,stdflux_thisamp[ii])
int_avg_amps[amp]=np.mean(integ_thisamp)
# retval["METRICS"]={"RA":ra,"DEC":dec, "INTEG":int_stars, "INTEG_AVG":int_average,"INTEG_AVG_AMP":int_avg_amps, "STD_FIBERID": starfibers.tolist(),"MAGDIFF_TGT":magdiff_avg,"MAGDIFF_AVG_AMP":magdiff_avg_amp}
retval["METRICS"]={"RA":ra,"DEC":dec, "INTEG":int_stars, "INTEG_AVG":int_average,"INTEG_AVG_AMP":int_avg_amps, "STD_FIBERID": starfibers.tolist(),"MAGDIFF_TGT":magdiff_avg,"MAGDIFF_AVG_AMP":magdiff_avg_amp,"MAGDIFF_STAT":magdiff_err}
else:
# retval["METRICS"]={"RA":ra,"DEC":dec, "INTEG":int_stars,"INTEG_AVG":int_average,"STD_FIBERID":starfibers.tolist(),"MAGDIFF_TGT":magdiff_avg}
retval["METRICS"]={"RA":ra,"DEC":dec, "INTEG":int_stars,"INTEG_AVG":int_average,"STD_FIBERID":starfibers.tolist(),"MAGDIFF_TGT":magdiff_avg,"MAGDIFF_STAT":magdiff_err}
if qlf:
qlf_post(retval)
if qafile is not None:
outfile = qa.write_qa_ql(qafile,retval)
log.debug("Output QA data is in {}".format(outfile))
if qafig is not None:
from lvmspec.qa.qa_plots_ql import plot_integral
plot_integral(retval,qafig)
log.debug("Output QA fig {}".format(qafig))
return retval
def get_default_config(self):
return {}
class Sky_Continuum(MonitoringAlg):
def __init__(self,name,config,logger=None):
if name is None or name.strip() == "":
name="SKYCONT"
from lvmspec.frame import Frame as fr
kwargs=config['kwargs']
parms=kwargs['param']
key=kwargs['refKey'] if 'refKey' in kwargs else "SKYCONT"
status=kwargs['statKey'] if 'statKey' in kwargs else "SKYCONT_STAT"
kwargs["SAMI_RESULTKEY"]=key
kwargs["SAMI_QASTATUSKEY"]=status
if "ReferenceMetrics" in kwargs:
r=kwargs["ReferenceMetrics"]
if key in r:
kwargs["REFERENCE"]=r[key]
if "SKYCONT_WARN_RANGE" in parms and "SKYCONT_NORMAL_RANGE" in parms:
kwargs["RANGES"]=[(np.asarray(parms["SKYCONT_WARN_RANGE"]),QASeverity.WARNING),
(np.asarray(parms["SKYCONT_NORMAL_RANGE"]),QASeverity.NORMAL)]# sorted by most severe to least severe
MonitoringAlg.__init__(self,name,fr,config,logger)
def run(self,*args,**kwargs):
if len(args) == 0 :
raise qlexceptions.ParameterException("Missing input parameter")
if not self.is_compatible(type(args[0])):
raise qlexceptions.ParameterException("Incompatible input. Was expecting {}, got {}".format(type(self.__inpType__),type(args[0])))
fibermap=kwargs['FiberMap']
input_frame=args[0]
camera=input_frame.meta["CAMERA"]
wrange1=None
wrange2=None
if "wrange1" in kwargs:
wrange1=kwargs["wrange1"]
if "wrange2" in kwargs:
wrange2=kwargs["wrange2"]
if wrange1==None:
if camera[0]=="b": wrange1= "4000,4500"
if camera[0]=="r": wrange1= "5950,6200"
if camera[0]=="z": wrange1= "8120,8270"
if wrange2==None:
if camera[0]=="b": wrange2= "5250,5550"
if camera[0]=="r": wrange2= "6990,7230"
if camera[0]=="z": wrange2= "9110,9280"
paname=None
if "paname" in kwargs:
paname=kwargs["paname"]
if "ReferenceMetrics" in kwargs: refmetrics=kwargs["ReferenceMetrics"]
else: refmetrics=None
amps=False
if "amps" in kwargs:
amps=kwargs["amps"]
if "param" in kwargs: param=kwargs["param"]
else: param=None
dict_countbins=None
if "dict_countbins" in kwargs:
dict_countbins=kwargs["dict_countbins"]
if "qlf" in kwargs:
qlf=kwargs["qlf"]
else: qlf=False
if "qafile" in kwargs: qafile = kwargs["qafile"]
else: qafile = None
if "qafig" in kwargs: qafig=kwargs["qafig"]
else: qafig=None
return self.run_qa(fibermap,input_frame,wrange1=wrange1,wrange2=wrange2,paname=paname,amps=amps, dict_countbins=dict_countbins,qafile=qafile,qafig=qafig, param=param, qlf=qlf, refmetrics=refmetrics)
def run_qa(self,fibermap,frame,wrange1=None,wrange2=None,
paname=None,amps=False,dict_countbins=None,
qafile=None,qafig=None, param=None, qlf=False,
refmetrics=None):
#- qa dictionary
retval={}
retval["PANAME" ]= paname
retval["QATIME"] = datetime.datetime.now().isoformat()
retval["EXPID"] = '{0:08d}'.format(frame.meta["EXPID"])
retval["CAMERA"] = frame.meta["CAMERA"]
retval["PROGRAM"] = frame.meta["PROGRAM"]
retval["FLAVOR"] = frame.meta["FLAVOR"]
retval["NIGHT"] = frame.meta["NIGHT"]
kwargs=self.config['kwargs']
ra = fibermap["RA_TARGET"]
dec = fibermap["DEC_TARGET"]
if param is None:
log.debug("Param is None. Using default param instead")
from lvmspec.io import read_params
desi_params = read_params()
param = {}
for key in ['B_CONT','R_CONT', 'Z_CONT', 'SKYCONT_WARN_RANGE', 'SKYCONT_ALARM_RANGE']:
param[key] = desi_params['qa']['skysub']['PARAMS'][key]
retval["PARAMS"] = param
if "REFERENCE" in kwargs:
retval['PARAMS']['SKYCONT_REF']=kwargs["REFERENCE"]
skyfiber, contfiberlow, contfiberhigh, meancontfiber, skycont = qalib.sky_continuum(
frame, wrange1, wrange2)
skycont_err = 'NORMAL'
if amps:
leftmax = dict_countbins["LEFT_MAX_FIBER"]
rightmin = dict_countbins["RIGHT_MIN_FIBER"]
bottommax = dict_countbins["BOTTOM_MAX_WAVE_INDEX"]
topmin = dict_countbins["TOP_MIN_WAVE_INDEX"]
fidboundary = qalib.slice_fidboundary(frame,leftmax,rightmin,bottommax,topmin)
k1=np.where(skyfiber < fidboundary[0][0].stop)[0]
maxsky_index=max(k1)
contamp1=np.mean(contfiberlow[:maxsky_index])
contamp3=np.mean(contfiberhigh[:maxsky_index])
if fidboundary[1][0].start >=fidboundary[0][0].stop:
k2=np.where(skyfiber > fidboundary[1][0].start)[0]
minsky_index=min(k2)
contamp2=np.mean(contfiberlow[minsky_index:])
contamp4=np.mean(contfiberhigh[minsky_index:])
else:
contamp2=0
contamp4=0
skycont_amps=np.array((contamp1,contamp2,contamp3,contamp4)) #- in four amps regions
# retval["METRICS"]={"RA":ra,"DEC":dec, "SKYFIBERID": skyfiber.tolist(), "SKYCONT":skycont, "SKYCONT_FIBER":meancontfiber, "SKYCONT_AMP":skycont_amps}
retval["METRICS"]={"RA":ra,"DEC":dec, "SKYFIBERID": skyfiber.tolist(), "SKYCONT":skycont, "SKYCONT_FIBER":meancontfiber, "SKYCONT_AMP":skycont_amps, "SKYCONT_STAT":skycont_err}
else:
# retval["METRICS"]={"RA":ra,"DEC":dec, "SKYFIBERID": skyfiber.tolist(), "SKYCONT":skycont, "SKYCONT_FIBER":meancontfiber}
retval["METRICS"]={"RA":ra,"DEC":dec, "SKYFIBERID": skyfiber.tolist(), "SKYCONT":skycont, "SKYCONT_FIBER":meancontfiber, "SKYCONT_STAT":skycont_err}
if qlf:
qlf_post(retval)
if qafile is not None:
outfile = qa.write_qa_ql(qafile,retval)
log.debug("Output QA data is in {}".format(outfile))
if qafig is not None:
from lvmspec.qa.qa_plots_ql import plot_sky_continuum
plot_sky_continuum(retval,qafig)
log.debug("Output QA fig {}".format(qafig))
return retval
def get_default_config(self):
return {}
class Sky_Peaks(MonitoringAlg):
def __init__(self,name,config,logger=None):
if name is None or name.strip() == "":
name="SKYPEAK"
from lvmspec.frame import Frame as fr
kwargs=config['kwargs']
parms=kwargs['param']
key=kwargs['refKey'] if 'refKey' in kwargs else "PEAKCOUNT_MED_SKY"
status=kwargs['statKey'] if 'statKey' in kwargs else "PEAKCOUNT_STAT"
kwargs["SAMI_RESULTKEY"]=key
kwargs["SAMI_QASTATUSKEY"]=status
if "ReferenceMetrics" in kwargs:
r=kwargs["ReferenceMetrics"]
if key in r:
kwargs["REFERENCE"]=r[key]
if "PEAKCOUNT_WARN_RANGE" in parms and "PEAKCOUNT_NORMAL_RANGE" in parms:
kwargs["RANGES"]=[(np.asarray(parms["PEAKCOUNT_WARN_RANGE"]),QASeverity.WARNING),
(np.asarray(parms["PEAKCOUNT_NORMAL_RANGE"]),QASeverity.NORMAL)]# sorted by most severe to least severe
MonitoringAlg.__init__(self,name,fr,config,logger)
def run(self,*args,**kwargs):
if len(args) == 0 :
raise qlexceptions.ParameterException("Missing input parameter")
if not self.is_compatible(type(args[0])):
raise qlexceptions.ParameterException("Incompatible parameter type. Was expecting lvmspec.image.Image, got {}".format(type(args[0])))
fibermap=kwargs['FiberMap']
input_frame=args[0]
if "paname" not in kwargs:
paname=None
else:
paname=kwargs["paname"]
if "ReferenceMetrics" in kwargs: refmetrics=kwargs["ReferenceMetrics"]
else: refmetrics=None
amps=False
if "amps" in kwargs:
amps=kwargs["amps"]
if "param" in kwargs: param=kwargs["param"]
else: param=None
psf = None
if "PSFFile" in kwargs:
psf=kwargs["PSFFile"]
if "qlf" in kwargs:
qlf=kwargs["qlf"]
else: qlf=False
if "qafile" in kwargs: qafile = kwargs["qafile"]
else: qafile = None
if "qafig" in kwargs:
qafig=kwargs["qafig"]
else: qafig = None
return self.run_qa(fibermap,input_frame,paname=paname,amps=amps,psf=psf, qafile=qafile, qafig=qafig, param=param, qlf=qlf, refmetrics=refmetrics)
def run_qa(self,fibermap,frame,paname=None,amps=False,psf=None, qafile=None,qafig=None, param=None, qlf=False, refmetrics=None):
from lvmspec.qa.qalib import sky_peaks
retval={}
retval["PANAME"] = paname
retval["QATIME"] = datetime.datetime.now().isoformat()
retval["EXPID"] = '{0:08d}'.format(frame.meta["EXPID"])
retval["CAMERA"] = camera = frame.meta["CAMERA"]
retval["PROGRAM"] = frame.meta["PROGRAM"]
retval["FLAVOR"] = frame.meta["FLAVOR"]
retval["NIGHT"] = frame.meta["NIGHT"]
kwargs=self.config['kwargs']
ra = fibermap["RA_TARGET"]
dec = fibermap["DEC_TARGET"]
# Parameters
if param is None:
log.info("Param is None. Using default param instead")
from lvmspec.io import read_params
desi_params = read_params()
param = desi_params['qa']['skypeaks']['PARAMS']
# Run
nspec_counts, sky_counts = sky_peaks(param, frame, amps=amps)
rms_nspec = qalib.getrms(nspec_counts)
rms_skyspec = qalib.getrms(sky_counts)
sumcount_med_sky=[]
retval["PARAMS"] = param
if "REFERENCE" in kwargs:
retval['PARAMS']['PEAKCOUNT_REF']=kwargs["REFERENCE"]
# retval["METRICS"]={"RA":ra,"DEC":dec, "PEAKCOUNT":nspec_counts,"PEAKCOUNT_RMS":rms_nspec,"PEAKCOUNT_MED_SKY":sumcount_med_sky,"PEAKCOUNT_RMS_SKY":rms_skyspec}
sumcount_err='NORMAL'
retval["METRICS"]={"RA":ra,"DEC":dec, "PEAKCOUNT":nspec_counts,"PEAKCOUNT_RMS":rms_nspec,"PEAKCOUNT_MED_SKY":sumcount_med_sky,"PEAKCOUNT_RMS_SKY":rms_skyspec,"PEAKCOUNT_STAT":sumcount_err}
if qlf:
qlf_post(retval)
if qafile is not None:
outfile = qa.write_qa_ql(qafile,retval)
log.debug("Output QA data is in {}".format(outfile))
if qafig is not None:
from lvmspec.qa.qa_plots_ql import plot_sky_peaks
plot_sky_peaks(retval,qafig)
log.debug("Output QA fig {}".format(qafig))
return retval
def get_default_config(self):
return {}
class Calc_XWSigma(MonitoringAlg):
def __init__(self,name,config,logger=None):
if name is None or name.strip() == "":
name="XWSIGMA"
from lvmspec.image import Image as im
kwargs=config['kwargs']
parms=kwargs['param']
key=kwargs['refKey'] if 'refKey' in kwargs else "WSIGMA_MED_SKY"
status=kwargs['statKey'] if 'statKey' in kwargs else "XWSIGMA_STAT"
kwargs["SAMI_RESULTKEY"]=key
kwargs["SAMI_QASTATUSKEY"]=status
if "ReferenceMetrics" in kwargs:
r=kwargs["ReferenceMetrics"]
if key in r:
kwargs["REFERENCE"]=r[key]
if "XWSIGMA_WARN_RANGE" in parms and "XWSIGMA_NORMAL_RANGE" in parms:
kwargs["RANGES"]=[(np.asarray(parms["XWSIGMA_WARN_RANGE"]),QASeverity.WARNING),
(np.asarray(parms["XWSIGMA_NORMAL_RANGE"]),QASeverity.NORMAL)]# sorted by most severe to least severe
MonitoringAlg.__init__(self,name,im,config,logger)
def run(self,*args,**kwargs):
if len(args) == 0 :
raise qlexceptions.ParameterException("Missing input parameter")
if not self.is_compatible(type(args[0])):
raise qlexceptions.ParameterException("Incompatible parameter type. Was expecting lvmspec.image.Image got {}".format(type(args[0])))
fibermap=kwargs['FiberMap']
input_image=args[0]
if "paname" not in kwargs:
paname=None
else:
paname=kwargs["paname"]
if "ReferenceMetrics" in kwargs: refmetrics=kwargs["ReferenceMetrics"]
else: refmetrics=None
amps=False
if "amps" in kwargs:
amps=kwargs["amps"]
if "param" in kwargs: param=kwargs["param"]
else: param=None
psf = None
if "PSFFile" in kwargs:
psf=kwargs["PSFFile"]
fibermap = None
if "FiberMap" in kwargs:
fibermap=kwargs["FiberMap"]
if "qlf" in kwargs:
qlf=kwargs["qlf"]
else: qlf=False
if "qafile" in kwargs: qafile = kwargs["qafile"]
else: qafile = None
if "qafig" in kwargs: qafig=kwargs["qafig"]
else: qafig = None
return self.run_qa(fibermap,input_image,paname=paname,amps=amps,psf=psf, qafile=qafile,qafig=qafig, param=param, qlf=qlf, refmetrics=refmetrics)
def run_qa(self,fibermap,image,paname=None,amps=False,psf=None, qafile=None,qafig=None, param=None, qlf=False, refmetrics=None):
from scipy.optimize import curve_fit
retval={}
retval["PANAME"] = paname
retval["QATIME"] = datetime.datetime.now().isoformat()
retval["EXPID"] = '{0:08d}'.format(image.meta["EXPID"])
retval["CAMERA"] = camera = image.meta["CAMERA"]
retval["PROGRAM"] = image.meta["PROGRAM"]
retval["FLAVOR"] = image.meta["FLAVOR"]
retval["NIGHT"] = image.meta["NIGHT"]
kwargs=self.config['kwargs']
ra = fibermap["RA_TARGET"]
dec = fibermap["DEC_TARGET"]
if param is None:
log.debug("Param is None. Using default param instead")
if image.meta["FLAVOR"] == 'arc':
param = {
"B_PEAKS":[4047.7, 4359.6, 5087.2],
"R_PEAKS":[6144.8, 6508.3, 6600.8, 6718.9, 6931.4, 7034.4,],
"Z_PEAKS":[8379.9, 8497.7, 8656.8, 8783.0],
"XWSIGMA_NORMAL_RANGE":[-2.0, 2.0],
"XWSIGMA_WARN_RANGE":[-4.0, 4.0]
}
else:
param = {
"B_PEAKS":[3914.4, 5199.3, 5578.9],
"R_PEAKS":[6301.9, 6365.4, 7318.2, 7342.8, 7371.3],
"Z_PEAKS":[8401.5, 8432.4, 8467.5, 9479.4, 9505.6, 9521.8],
"XWSIGMA_NORMAL_RANGE":[-2.0, 2.0],
"XWSIGMA_WARN_RANGE":[-4.0, 4.0]
}
dw=2.
dp=3
b_peaks=param['B_PEAKS']
r_peaks=param['R_PEAKS']
z_peaks=param['Z_PEAKS']
if fibermap["OBJTYPE"][0] == 'ARC':
import lvmspec.psf
psf=lvmspec.psf.PSF(psf)
xsigma=[]
wsigma=[]
xsigma_sky=[]
wsigma_sky=[]
xsigma_amp1=[]
wsigma_amp1=[]
xsigma_amp2=[]
wsigma_amp2=[]
xsigma_amp3=[]
wsigma_amp3=[]
xsigma_amp4=[]
wsigma_amp4=[]
if fibermap['FIBER'].shape[0] >= 500:
fibers = 500
else:
fibers = fibermap['FIBER'].shape[0]
for i in range(fibers):
if camera[0]=="b":
peak_wave=np.array([b_peaks[0]-dw,b_peaks[0]+dw,b_peaks[1]-dw,b_peaks[1]+dw,b_peaks[2]-dw,b_peaks[2]+dw])
xpix=psf.x(ispec=i,wavelength=peak_wave)
ypix=psf.y(ispec=i,wavelength=peak_wave)
xpix_peak1=np.arange(int(round(xpix[0]))-dp,int(round(xpix[1]))+dp+1,1)
ypix_peak1=np.arange(int(round(ypix[0])),int(round(ypix[1])),1)
xpix_peak2=np.arange(int(round(xpix[2]))-dp,int(round(xpix[3]))+dp+1,1)
ypix_peak2=np.arange(int(round(ypix[2])),int(round(ypix[3])),1)
xpix_peak3=np.arange(int(round(xpix[4]))-dp,int(round(xpix[5]))+dp+1,1)
ypix_peak3=np.arange(int(round(ypix[4])),int(round(ypix[5])),1)
xpopt1,xpcov1=curve_fit(qalib.gauss,np.arange(len(xpix_peak1)),image.pix[int(np.mean(ypix_peak1)),xpix_peak1])
wpopt1,wpcov1=curve_fit(qalib.gauss,np.arange(len(ypix_peak1)),image.pix[ypix_peak1,int(np.mean(xpix_peak1))])
xpopt2,xpcov2=curve_fit(qalib.gauss,np.arange(len(xpix_peak2)),image.pix[int(np.mean(ypix_peak2)),xpix_peak2])
wpopt2,wpcov2=curve_fit(qalib.gauss,np.arange(len(ypix_peak2)),image.pix[ypix_peak2,int(np.mean(xpix_peak2))])
xpopt3,xpcov3=curve_fit(qalib.gauss,np.arange(len(xpix_peak3)),image.pix[int(np.mean(ypix_peak3)),xpix_peak3])
wpopt3,wpcov3=curve_fit(qalib.gauss,np.arange(len(ypix_peak3)),image.pix[ypix_peak3,int(np.mean(xpix_peak3))])
xsigma1=np.abs(xpopt1[2])
wsigma1=np.abs(wpopt1[2])
xsigma2=np.abs(xpopt2[2])
wsigma2=np.abs(wpopt2[2])
xsigma3=np.abs(xpopt3[2])
wsigma3=np.abs(wpopt3[2])
xsig=np.array([xsigma1,xsigma2,xsigma3])
wsig=np.array([wsigma1,wsigma2,wsigma3])
xsigma_avg=np.mean(xsig)
wsigma_avg=np.mean(wsig)
xsigma.append(xsigma_avg)
wsigma.append(wsigma_avg)
if camera[0]=="r":
peak_wave=np.array([r_peaks[0]-dw,r_peaks[0]+dw,r_peaks[1]-dw,r_peaks[1]+dw,r_peaks[2]-dw,r_peaks[2]+dw,r_peaks[3]-dw,r_peaks[3]+dw,r_peaks[4]-dw,r_peaks[4]+dw])
xpix=psf.x(ispec=i,wavelength=peak_wave)
ypix=psf.y(ispec=i,wavelength=peak_wave)
xpix_peak1=np.arange(int(round(xpix[0]))-dp,int(round(xpix[1]))+dp+1,1)
ypix_peak1=np.arange(int(round(ypix[0])),int(round(ypix[1])),1)
xpix_peak2=np.arange(int(round(xpix[2]))-dp,int(round(xpix[3]))+dp+1,1)
ypix_peak2=np.arange(int(round(ypix[2])),int(round(ypix[3])),1)
xpix_peak3=np.arange(int(round(xpix[4]))-dp,int(round(xpix[5]))+dp+1,1)
ypix_peak3=np.arange(int(round(ypix[4])),int(round(ypix[5])),1)
xpix_peak4=np.arange(int(round(xpix[6]))-dp,int(round(xpix[7]))+dp+1,1)
ypix_peak4=np.arange(int(round(ypix[6])),int(round(ypix[7])),1)
xpix_peak5=np.arange(int(round(xpix[8]))-dp,int(round(xpix[9]))+dp+1,1)
ypix_peak5=np.arange(int(round(ypix[8])),int(round(ypix[9])),1)
xpopt1,xpcov1=curve_fit(qalib.gauss,np.arange(len(xpix_peak1)),image.pix[int(np.mean(ypix_peak1)),xpix_peak1])
wpopt1,wpcov1=curve_fit(qalib.gauss,np.arange(len(ypix_peak1)),image.pix[ypix_peak1,int(np.mean(xpix_peak1))])
xpopt2,xpcov2=curve_fit(qalib.gauss,np.arange(len(xpix_peak2)),image.pix[int(np.mean(ypix_peak2)),xpix_peak2])
wpopt2,wpcov2=curve_fit(qalib.gauss,np.arange(len(ypix_peak2)),image.pix[ypix_peak2,int(np.mean(xpix_peak2))])
xpopt3,xpcov3=curve_fit(qalib.gauss,np.arange(len(xpix_peak3)),image.pix[int(np.mean(ypix_peak3)),xpix_peak3])
wpopt3,wpcov3=curve_fit(qalib.gauss,np.arange(len(ypix_peak3)),image.pix[ypix_peak3,int(np.mean(xpix_peak3))])
xpopt4,xpcov4=curve_fit(qalib.gauss,np.arange(len(xpix_peak4)),image.pix[int(np.mean(ypix_peak4)),xpix_peak4])
wpopt4,wpcov4=curve_fit(qalib.gauss,np.arange(len(ypix_peak4)),image.pix[ypix_peak4,int(np.mean(xpix_peak4))])
xpopt5,xpcov5=curve_fit(qalib.gauss,np.arange(len(xpix_peak5)),image.pix[int(np.mean(ypix_peak5)),xpix_peak5])
wpopt5,wpcov5=curve_fit(qalib.gauss,np.arange(len(ypix_peak5)),image.pix[ypix_peak5,int(np.mean(xpix_peak5))])
xsigma1=np.abs(xpopt1[2])
wsigma1=np.abs(wpopt1[2])
xsigma2=np.abs(xpopt2[2])
wsigma2=np.abs(wpopt2[2])
xsigma3=np.abs(xpopt3[2])
wsigma3=np.abs(wpopt3[2])
xsigma4=np.abs(xpopt4[2])
wsigma4=np.abs(wpopt4[2])
xsigma5=np.abs(xpopt5[2])
wsigma5=np.abs(wpopt5[2])
xsig=np.array([xsigma1,xsigma2,xsigma3,xsigma4,xsigma5])
wsig=np.array([wsigma1,wsigma2,wsigma3,wsigma4,wsigma5])
xsigma_avg=np.mean(xsig)
wsigma_avg=np.mean(wsig)
xsigma.append(xsigma_avg)
wsigma.append(wsigma_avg)
if camera[0]=="z":
peak_wave=np.array([z_peaks[0]-dw,z_peaks[0]+dw,z_peaks[1]-dw,z_peaks[1]+dw,z_peaks[2]-dw,z_peaks[2]+dw,z_peaks[3]-dw,z_peaks[3]+dw])
xpix=psf.x(ispec=i,wavelength=peak_wave)
ypix=psf.y(ispec=i,wavelength=peak_wave)
xpix_peak1=np.arange(int(round(xpix[0]))-dp,int(round(xpix[1]))+dp+1,1)
ypix_peak1=np.arange(int(round(ypix[0])),int(round(ypix[1])),1)
xpix_peak2=np.arange(int(round(xpix[2]))-dp,int(round(xpix[3]))+dp+1,1)
ypix_peak2=np.arange(int(round(ypix[2])),int(round(ypix[3])),1)
xpix_peak3=np.arange(int(round(xpix[4]))-dp,int(round(xpix[5]))+dp+1,1)
ypix_peak3=np.arange(int(round(ypix[4])),int(round(ypix[5])),1)
xpix_peak4=np.arange(int(round(xpix[6]))-dp,int(round(xpix[7]))+dp+1,1)
ypix_peak4=np.arange(int(round(ypix[6])),int(round(ypix[7])),1)
xpopt1,xpcov1=curve_fit(qalib.gauss,np.arange(len(xpix_peak1)),image.pix[int(np.mean(ypix_peak1)),xpix_peak1])
wpopt1,wpcov1=curve_fit(qalib.gauss,np.arange(len(ypix_peak1)),image.pix[ypix_peak1,int(np.mean(xpix_peak1))])
xpopt2,xpcov2=curve_fit(qalib.gauss,np.arange(len(xpix_peak2)),image.pix[int(np.mean(ypix_peak2)),xpix_peak2])
wpopt2,wpcov2=curve_fit(qalib.gauss,np.arange(len(ypix_peak2)),image.pix[ypix_peak2,int(np.mean(xpix_peak2))])
xpopt3,xpcov3=curve_fit(qalib.gauss,np.arange(len(xpix_peak3)),image.pix[int(np.mean(ypix_peak3)),xpix_peak3])
wpopt3,wpcov3=curve_fit(qalib.gauss,np.arange(len(ypix_peak3)),image.pix[ypix_peak3,int(np.mean(xpix_peak3))])
xpopt4,xpcov4=curve_fit(qalib.gauss,np.arange(len(xpix_peak4)),image.pix[int(np.mean(ypix_peak4)),xpix_peak4])
wpopt4,wpcov4=curve_fit(qalib.gauss,np.arange(len(ypix_peak4)),image.pix[ypix_peak4,int(np.mean(xpix_peak4))])
xsigma1=np.abs(xpopt1[2])
wsigma1=np.abs(wpopt1[2])
xsigma2=np.abs(xpopt2[2])
wsigma2=np.abs(wpopt2[2])
xsigma3=np.abs(xpopt3[2])
wsigma3=np.abs(wpopt3[2])
xsigma4=np.abs(xpopt4[2])
wsigma4=np.abs(wpopt4[2])
xsig=np.array([xsigma1,xsigma2,xsigma3,xsigma4])
wsig=np.array([wsigma1,wsigma2,wsigma3,wsigma4])
xsigma_avg=np.mean(xsig)
wsigma_avg=np.mean(wsig)
xsigma.append(xsigma_avg)
wsigma.append(wsigma_avg)
if fibermap['OBJTYPE'][i]=='SKY':
xsigma_sky=xsigma
wsigma_sky=wsigma
if amps:
if fibermap['FIBER'][i]<240:
if camera[0]=="b":
xsig_amp1=np.array([xsigma1])
xsig_amp3=np.array([xsigma2,xsigma3])
wsig_amp1=np.array([wsigma1])
wsig_amp3=np.array([wsigma2,wsigma3])
if camera[0]=="r":
xsig_amp1=np.array([xsigma1,xsigma2])
xsig_amp3=np.array([xsigma3,xsigma4,xsigma5])
wsig_amp1=np.array([wsigma1,wsigma2])
wsig_amp3=np.array([wsigma3,wsigma4,wsigma5])
if camera[0]=="z":
xsig_amp1=np.array([xsigma1,xsigma2,xsigma3])
xsig_amp3=np.array([xsigma4])
wsig_amp1=np.array([wsigma1,wsigma2,wsigma3])
wsig_amp3=np.array([wsigma4])
xsigma_amp1.append(xsig_amp1)
wsigma_amp1.append(wsig_amp1)
xsigma_amp3.append(xsig_amp3)
wsigma_amp3.append(wsig_amp3)
if fibermap['FIBER'][i]>260:
if camera[0]=="b":
xsig_amp2=np.array([xsigma1])
xsig_amp4=np.array([xsigma2,xsigma3])
wsig_amp2=np.array([wsigma1])
wsig_amp4=np.array([wsigma2,wsigma3])
if camera[0]=="r":
xsig_amp2=np.array([xsigma1,xsigma2])
xsig_amp4=np.array([xsigma3,xsigma4,xsigma5])
wsig_amp2=np.array([wsigma1,wsigma2])
wsig_amp4=np.array([wsigma3,wsigma4,wsigma5])
if camera[0]=="z":
xsig_amp2=np.array([xsigma1,xsigma2,xsigma3])
xsig_amp4=np.array([xsigma4])
wsig_amp2=np.array([wsigma1,wsigma2,wsigma3])
wsig_amp4=np.array([wsigma4])
xsigma_amp2.append(xsig_amp2)
wsigma_amp2.append(wsig_amp2)
xsigma_amp4.append(xsig_amp4)
wsigma_amp4.append(wsig_amp4)
if fibermap['FIBER'].shape[0]<260:
xsigma_amp2=np.zeros(len(xsigma))
xsigma_amp4=np.zeros(len(xsigma))
wsigma_amp2=np.zeros(len(wsigma))
wsigma_amp4=np.zeros(len(wsigma))
xsigma=np.array(xsigma)
wsigma=np.array(wsigma)
xsigma_med=np.median(xsigma)
wsigma_med=np.median(wsigma)
xsigma_med_sky=np.median(xsigma_sky)
wsigma_med_sky=np.median(wsigma_sky)
xwsigma=np.array([xsigma_med_sky,wsigma_med_sky])
xamp1_med=np.median(xsigma_amp1)
xamp2_med=np.median(xsigma_amp2)
xamp3_med=np.median(xsigma_amp3)
xamp4_med=np.median(xsigma_amp4)
wamp1_med=np.median(wsigma_amp1)
wamp2_med=np.median(wsigma_amp2)
wamp3_med=np.median(wsigma_amp3)
wamp4_med=np.median(wsigma_amp4)
xsigma_amp=np.array([xamp1_med,xamp2_med,xamp3_med,xamp4_med])
wsigma_amp= | np.array([wamp1_med,wamp2_med,wamp3_med,wamp4_med]) | numpy.array |
#!/usr/bin/env python
# coding: utf-8
# ## 卷积神经网络(Convolutional Neural Network, CNN)
#
# ## 项目:实现一个狗品种识别算法App
#
# 在这个notebook文件中,有些模板代码已经提供给你,但你还需要实现更多的功能来完成这个项目。除非有明确要求,你无须修改任何已给出的代码。以**'(练习)'**开始的标题表示接下来的代码部分中有你需要实现的功能。这些部分都配有详细的指导,需要实现的部分也会在注释中以'TODO'标出。请仔细阅读所有的提示。
#
# 除了实现代码外,你还**需要**回答一些与项目及代码相关的问题。每个需要回答的问题都会以 **'问题 X'** 标记。请仔细阅读每个问题,并且在问题后的 **'回答'** 部分写出完整的答案。我们将根据 你对问题的回答 和 撰写代码实现的功能 来对你提交的项目进行评分。
#
# >**提示:**Code 和 Markdown 区域可通过 **Shift + Enter** 快捷键运行。此外,Markdown可以通过双击进入编辑模式。
#
#
# 项目中显示为_选做_的部分可以帮助你的项目脱颖而出,而不是仅仅达到通过的最低要求。如果你决定追求更高的挑战,请在此 notebook 中完成_选做_部分的代码。
#
# ---
#
# ### 让我们开始吧
# 在这个notebook中,你将迈出第一步,来开发可以作为移动端或 Web应用程序一部分的算法。在这个项目的最后,你的程序将能够把用户提供的任何一个图像作为输入。如果可以从图像中检测到一只狗,它会输出对狗品种的预测。如果图像中是一个人脸,它会预测一个与其最相似的狗的种类。下面这张图展示了完成项目后可能的输出结果。(……实际上我们希望每个学生的输出结果不相同!)
#
# 
#
# 在现实世界中,你需要拼凑一系列的模型来完成不同的任务;举个例子,用来预测狗种类的算法会与预测人类的算法不同。在做项目的过程中,你可能会遇到不少失败的预测,因为并不存在完美的算法和模型。你最终提交的不完美的解决方案也一定会给你带来一个有趣的学习经验!
#
# ### 项目内容
#
# 我们将这个notebook分为不同的步骤,你可以使用下面的链接来浏览此notebook。
#
# * [Step 0](#step0): 导入数据集
# * [Step 1](#step1): 检测人脸
# * [Step 2](#step2): 检测狗狗
# * [Step 3](#step3): 从头创建一个CNN来分类狗品种
# * [Step 4](#step4): 使用一个CNN来区分狗的品种(使用迁移学习)
# * [Step 5](#step5): 建立一个CNN来分类狗的品种(使用迁移学习)
# * [Step 6](#step6): 完成你的算法
# * [Step 7](#step7): 测试你的算法
#
# 在该项目中包含了如下的问题:
#
# * [问题 1](#question1)
# * [问题 2](#question2)
# * [问题 3](#question3)
# * [问题 4](#question4)
# * [问题 5](#question5)
# * [问题 6](#question6)
# * [问题 7](#question7)
# * [问题 8](#question8)
# * [问题 9](#question9)
# * [问题 10](#question10)
# * [问题 11](#question11)
#
#
# ---
# <a id='step0'></a>
# ## 步骤 0: 导入数据集
#
# ### 导入狗数据集
# 在下方的代码单元(cell)中,我们导入了一个狗图像的数据集。我们使用 scikit-learn 库中的 `load_files` 函数来获取一些变量:
# - `train_files`, `valid_files`, `test_files` - 包含图像的文件路径的numpy数组
# - `train_targets`, `valid_targets`, `test_targets` - 包含独热编码分类标签的numpy数组
# - `dog_names` - 由字符串构成的与标签相对应的狗的种类
# In[2]:
from sklearn.datasets import load_files
from keras.utils import np_utils
import numpy as np
from glob import glob
# define function to load train, test, and validation datasets
def load_dataset(path):
data = load_files(path)
dog_files = np.array(data['filenames'])
dog_targets = np_utils.to_categorical(np.array(data['target']), 133)
return dog_files, dog_targets
# load train, test, and validation datasets
train_files, train_targets = load_dataset('/data/dog_images/train')
valid_files, valid_targets = load_dataset('/data/dog_images/valid')
test_files, test_targets = load_dataset('/data/dog_images/test')
# load list of dog names
dog_names = [item[20:-1] for item in sorted(glob("/data/dog_images/train/*/"))]
# print statistics about the dataset
print('There are %d total dog categories.' % len(dog_names))
print('There are %s total dog images.\n' % len(np.hstack([train_files, valid_files, test_files])))
print('There are %d training dog images.' % len(train_files))
print('There are %d validation dog images.' % len(valid_files))
print('There are %d test dog images.'% len(test_files))
# ### 导入人脸数据集
#
# 在下方的代码单元中,我们导入人脸图像数据集,文件所在路径存储在名为 `human_files` 的 numpy 数组。
# In[3]:
import random
random.seed(8675309)
# 加载打乱后的人脸数据集的文件名
human_files = np.array(glob("/data/lfw/*/*"))
random.shuffle(human_files)
# 打印数据集的数据量
print('There are %d total human images.' % len(human_files))
# ---
# <a id='step1'></a>
# ## 步骤1:检测人脸
#
# 我们将使用 OpenCV 中的 [Haar feature-based cascade classifiers](http://docs.opencv.org/trunk/d7/d8b/tutorial_py_face_detection.html) 来检测图像中的人脸。OpenCV 提供了很多预训练的人脸检测模型,它们以XML文件保存在 [github](https://github.com/opencv/opencv/tree/master/data/haarcascades)。我们已经下载了其中一个检测模型,并且把它存储在 `haarcascades` 的目录中。
#
# 在如下代码单元中,我们将演示如何使用这个检测模型在样本图像中找到人脸。
# In[11]:
import cv2
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
# 提取预训练的人脸检测模型
face_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml')
# 加载彩色(通道顺序为BGR)图像
img = cv2.imread(human_files[3])
# 将BGR图像进行灰度处理
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# 在图像中找出脸
faces = face_cascade.detectMultiScale(gray)
# 打印图像中检测到的脸的个数
print('Number of faces detected:', len(faces))
# 获取每一个所检测到的脸的识别框
for (x,y,w,h) in faces:
# 在人脸图像中绘制出识别框
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
# 将BGR图像转变为RGB图像以打印
cv_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# 展示含有识别框的图像
plt.imshow(cv_rgb)
plt.show()
# 在使用任何一个检测模型之前,将图像转换为灰度图是常用过程。`detectMultiScale` 函数使用储存在 `face_cascade` 中的的数据,对输入的灰度图像进行分类。
#
# 在上方的代码中,`faces` 以 numpy 数组的形式,保存了识别到的面部信息。它其中每一行表示一个被检测到的脸,该数据包括如下四个信息:前两个元素 `x`、`y` 代表识别框左上角的 x 和 y 坐标(参照上图,注意 y 坐标的方向和我们默认的方向不同);后两个元素代表识别框在 x 和 y 轴两个方向延伸的长度 `w` 和 `d`。
#
# ### 写一个人脸识别器
#
# 我们可以将这个程序封装为一个函数。该函数的输入为人脸图像的**路径**,当图像中包含人脸时,该函数返回 `True`,反之返回 `False`。该函数定义如下所示。
# In[12]:
# 如果img_path路径表示的图像检测到了脸,返回"True"
def face_detector(img_path):
img = cv2.imread(img_path)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray)
return len(faces) > 0
# ### **【练习】** 评估人脸检测模型
#
# ---
#
# <a id='question1'></a>
# ### __问题 1:__
#
# 在下方的代码块中,使用 `face_detector` 函数,计算:
#
# - `human_files` 的前100张图像中,能够检测到**人脸**的图像占比多少?
# - `dog_files` 的前100张图像中,能够检测到**人脸**的图像占比多少?
#
# 理想情况下,人图像中检测到人脸的概率应当为100%,而狗图像中检测到人脸的概率应该为0%。你会发现我们的算法并非完美,但结果仍然是可以接受的。我们从每个数据集中提取前100个图像的文件路径,并将它们存储在`human_files_short`和`dog_files_short`中。
# In[13]:
human_files_short = human_files[:100]
dog_files_short = train_files[:100]
## 请不要修改上方代码
## TODO: 基于human_files_short和dog_files_short
## 中的图像测试face_detector的表现
# human_cnt, dog_cnt = 0, 0
# for h, d in zip(human_files_short, dog_files_short):
# if face_detector(h):
# human_cnt += 1
# if face_detector(d):
# dog_cnt += 1
# print("face detector:\n human %.2f\ndog %2.f"% (human_cnt / 100, dog_cnt / 100))
detector = lambda files: np.mean(list(map(face_detector, files)))
# In[14]:
print("face detector:\n human %.2f\ndog %2.f"% (detector(human_files_short), detector(dog_files_short)))
# ---
#
# <a id='question2'></a>
#
# ### __问题 2:__
#
# 就算法而言,该算法成功与否的关键在于,用户能否提供含有清晰面部特征的人脸图像。
# 那么你认为,这样的要求在实际使用中对用户合理吗?如果你觉得不合理,你能否想到一个方法,即使图像中并没有清晰的面部特征,也能够检测到人脸?
#
# __回答:__
#
# 不太合理,因为用户拍摄的照片的质量受环境光线,以及用户拍摄工具的制约,不同用户的照片差别很大。在没有清晰轮廓的情况下,可以使用图片增强功能邓功能。
# ---
#
# <a id='Selection1'></a>
# ### 选做:
#
# 我们建议在你的算法中使用opencv的人脸检测模型去检测人类图像,不过你可以自由地探索其他的方法,尤其是尝试使用深度学习来解决它:)。请用下方的代码单元来设计和测试你的面部监测算法。如果你决定完成这个_选做_任务,你需要报告算法在每一个数据集上的表现。
# In[6]:
## (选做) TODO: 报告另一个面部检测算法在LFW数据集上的表现
### 你可以随意使用所需的代码单元数
# ---
# <a id='step2'></a>
#
# ## 步骤 2: 检测狗狗
#
# 在这个部分中,我们使用预训练的 [ResNet-50](http://ethereon.github.io/netscope/#/gist/db945b393d40bfa26006) 模型去检测图像中的狗。下方的第一行代码就是下载了 ResNet-50 模型的网络结构参数,以及基于 [ImageNet](http://www.image-net.org/) 数据集的预训练权重。
#
# ImageNet 这目前一个非常流行的数据集,常被用来测试图像分类等计算机视觉任务相关的算法。它包含超过一千万个 URL,每一个都链接到 [1000 categories](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a) 中所对应的一个物体的图像。任给输入一个图像,该 ResNet-50 模型会返回一个对图像中物体的预测结果。
# In[15]:
from keras.applications.resnet50 import ResNet50
# 定义ResNet50模型
ResNet50_model = ResNet50(weights='imagenet')
# ### 数据预处理
#
# - 在使用 TensorFlow 作为后端的时候,在 Keras 中,CNN 的输入是一个4维数组(也被称作4维张量),它的各维度尺寸为 `(nb_samples, rows, columns, channels)`。其中 `nb_samples` 表示图像(或者样本)的总数,`rows`, `columns`, 和 `channels` 分别表示图像的行数、列数和通道数。
#
#
# - 下方的 `path_to_tensor` 函数实现如下将彩色图像的字符串型的文件路径作为输入,返回一个4维张量,作为 Keras CNN 输入。因为我们的输入图像是彩色图像,因此它们具有三个通道( `channels` 为 `3`)。
# 1. 该函数首先读取一张图像,然后将其缩放为 224×224 的图像。
# 2. 随后,该图像被调整为具有4个维度的张量。
# 3. 对于任一输入图像,最后返回的张量的维度是:`(1, 224, 224, 3)`。
#
#
# - `paths_to_tensor` 函数将图像路径的字符串组成的 numpy 数组作为输入,并返回一个4维张量,各维度尺寸为 `(nb_samples, 224, 224, 3)`。 在这里,`nb_samples`是提供的图像路径的数据中的样本数量或图像数量。你也可以将 `nb_samples` 理解为数据集中3维张量的个数(每个3维张量表示一个不同的图像。
# In[6]:
from keras.preprocessing import image
from tqdm import tqdm
def path_to_tensor(img_path):
# 用PIL加载RGB图像为PIL.Image.Image类型
img = image.load_img(img_path, target_size=(224, 224))
# 将PIL.Image.Image类型转化为格式为(224, 224, 3)的3维张量
x = image.img_to_array(img)
# 将3维张量转化为格式为(1, 224, 224, 3)的4维张量并返回
return np.expand_dims(x, axis=0)
def paths_to_tensor(img_paths):
list_of_tensors = [path_to_tensor(img_path) for img_path in tqdm(img_paths)]
return np.vstack(list_of_tensors)
# ### 基于 ResNet-50 架构进行预测
#
# 对于通过上述步骤得到的四维张量,在把它们输入到 ResNet-50 网络、或 Keras 中其他类似的预训练模型之前,还需要进行一些额外的处理:
# 1. 首先,这些图像的通道顺序为 RGB,我们需要重排他们的通道顺序为 BGR。
# 2. 其次,预训练模型的输入都进行了额外的归一化过程。因此我们在这里也要对这些张量进行归一化,即对所有图像所有像素都减去像素均值 `[103.939, 116.779, 123.68]`(以 RGB 模式表示,根据所有的 ImageNet 图像算出)。
#
# 导入的 `preprocess_input` 函数实现了这些功能。如果你对此很感兴趣,可以在 [这里](https://github.com/fchollet/keras/blob/master/keras/applications/imagenet_utils.py) 查看 `preprocess_input`的代码。
#
#
# 在实现了图像处理的部分之后,我们就可以使用模型来进行预测。这一步通过 `predict` 方法来实现,它返回一个向量,向量的第 i 个元素表示该图像属于第 i 个 ImageNet 类别的概率。这通过如下的 `ResNet50_predict_labels` 函数实现。
#
# 通过对预测出的向量取用 argmax 函数(找到有最大概率值的下标序号),我们可以得到一个整数,即模型预测到的物体的类别。进而根据这个 [清单](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a),我们能够知道这具体是哪个品种的狗狗。
#
# In[18]:
from keras.applications.resnet50 import preprocess_input, decode_predictions
def ResNet50_predict_labels(img_path):
# 返回img_path路径的图像的预测向量
img = preprocess_input(path_to_tensor(img_path))
return np.argmax(ResNet50_model.predict(img))
# ### 完成狗检测模型
#
#
# 在研究该 [清单](https://gist.github.com/yrevar/942d3a0ac09ec9e5eb3a) 的时候,你会注意到,狗类别对应的序号为151-268。因此,在检查预训练模型判断图像是否包含狗的时候,我们只需要检查如上的 `ResNet50_predict_labels` 函数是否返回一个介于151和268之间(包含区间端点)的值。
#
# 我们通过这些想法来完成下方的 `dog_detector` 函数,如果从图像中检测到狗就返回 `True`,否则返回 `False`。
# In[19]:
def dog_detector(img_path):
prediction = ResNet50_predict_labels(img_path)
return ((prediction <= 268) & (prediction >= 151))
# ### 【作业】评估狗狗检测模型
#
# ---
#
# <a id='question3'></a>
# ### __问题 3:__
#
# 在下方的代码块中,使用 `dog_detector` 函数,计算:
#
# - `human_files_short`中图像检测到狗狗的百分比?
# - `dog_files_short`中图像检测到狗狗的百分比?
# In[20]:
### TODO: 测试dog_detector函数在human_files_short和dog_files_short的表现
# human_cnt = 0
# dog_cnt = 0
# for h, d in zip(human_files_short, dog_files_short):
# if dog_detector(h):
# human_cnt += 1
# if dog_detector(d):
# dog_cnt += 1
# print("dog_detector:", human_cnt / 100, dog_cnt / 100)
detector_dog = lambda dt, files: np.mean(list(map(dt, files)))
print("dog detector:", detector_dog(dog_detector, human_files_short), detector_dog(dog_detector, dog_files_short))
# ---
#
# <a id='step3'></a>
#
# ## 步骤 3: 从头开始创建一个CNN来分类狗品种
#
#
# 现在我们已经实现了一个函数,能够在图像中识别人类及狗狗。但我们需要更进一步的方法,来对狗的类别进行识别。在这一步中,你需要实现一个卷积神经网络来对狗的品种进行分类。你需要__从头实现__你的卷积神经网络(在这一阶段,你还不能使用迁移学习),并且你需要达到超过1%的测试集准确率。在本项目的步骤五种,你还有机会使用迁移学习来实现一个准确率大大提高的模型。
#
# 在添加卷积层的时候,注意不要加上太多的(可训练的)层。更多的参数意味着更长的训练时间,也就是说你更可能需要一个 GPU 来加速训练过程。万幸的是,Keras 提供了能够轻松预测每次迭代(epoch)花费时间所需的函数。你可以据此推断你算法所需的训练时间。
#
# 值得注意的是,对狗的图像进行分类是一项极具挑战性的任务。因为即便是一个正常人,也很难区分布列塔尼犬和威尔士史宾格犬。
#
#
# 布列塔尼犬(Brittany) | 威尔士史宾格犬(Welsh Springer Spaniel)
# - | -
# <img src="images/Brittany_02625.jpg" width="100"> | <img src="images/Welsh_springer_spaniel_08203.jpg" width="200">
#
# 不难发现其他的狗品种会有很小的类间差别(比如金毛寻回犬和美国水猎犬)。
#
#
# 金毛寻回犬(Curly-Coated Retriever) | 美国水猎犬(American Water Spaniel)
# - | -
# <img src="images/Curly-coated_retriever_03896.jpg" width="200"> | <img src="images/American_water_spaniel_00648.jpg" width="200">
#
# 同样,拉布拉多犬(labradors)有黄色、棕色和黑色这三种。那么你设计的基于视觉的算法将不得不克服这种较高的类间差别,以达到能够将这些不同颜色的同类狗分到同一个品种中。
#
# 黄色拉布拉多犬(Yellow Labrador) | 棕色拉布拉多犬(Chocolate Labrador) | 黑色拉布拉多犬(Black Labrador)
# - | -
# <img src="images/Labrador_retriever_06457.jpg" width="150"> | <img src="images/Labrador_retriever_06455.jpg" width="240"> | <img src="images/Labrador_retriever_06449.jpg" width="220">
#
# 我们也提到了随机分类将得到一个非常低的结果:不考虑品种略有失衡的影响,随机猜测到正确品种的概率是1/133,相对应的准确率是低于1%的。
#
# 请记住,在深度学习领域,实践远远高于理论。大量尝试不同的框架吧,相信你的直觉!当然,玩得开心!
#
#
# ### 数据预处理
#
#
# 通过对每张图像的像素值除以255,我们对图像实现了归一化处理。
# In[10]:
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
# Keras中的数据预处理过程
train_tensors = paths_to_tensor(train_files).astype('float32')/255
valid_tensors = paths_to_tensor(valid_files).astype('float32')/255
test_tensors = paths_to_tensor(test_files).astype('float32')/255
# ### 【练习】模型架构
#
#
# 创建一个卷积神经网络来对狗品种进行分类。在你代码块的最后,执行 `model.summary()` 来输出你模型的总结信息。
#
# 我们已经帮你导入了一些所需的 Python 库,如有需要你可以自行导入。如果你在过程中遇到了困难,如下是给你的一点小提示——该模型能够在5个 epoch 内取得超过1%的测试准确率,并且能在CPU上很快地训练。
#
# 
# ---
#
# <a id='question4'></a>
#
# ### __问题 4:__
#
# 在下方的代码块中尝试使用 Keras 搭建卷积网络的架构,并回答相关的问题。
#
# 1. 你可以尝试自己搭建一个卷积网络的模型,那么你需要回答你搭建卷积网络的具体步骤(用了哪些层)以及为什么这样搭建。
# 2. 你也可以根据上图提示的步骤搭建卷积网络,那么请说明为何如上的架构能够在该问题上取得很好的表现。
#
# __回答:__
# 1. 使用了GlobalAveragePooling2D 可以减少计算量,降低过拟合。
# 2. 使用激活层,使得网络结构可以很多拟合非线性问题。
# 3. Dropout 为了防止过拟合,随机跳过百分比的神经元参数,也就是不更新这部分神经元参数。
# 4. 滤波器的数目随着网络结构增加数量也增加,主要是为了逐步抽象出更多差别特征,而浅层滤波器一般学到基础特征例如颜色,形状。使用少量滤波器即可学到。
# In[11]:
import keras.backend.tensorflow_backend as KTF
import tensorflow as tf
KTF.set_session(tf.Session(config=tf.ConfigProto(device_count={'gpu':0})))
# In[48]:
from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D,Activation
from keras.layers import Dropout, Flatten, Dense, BatchNormalization,Dropout
from keras.models import Sequential
model = Sequential()
### TODO: 定义你的网络架构
model.add(Conv2D(32, kernel_size=(2, 2), input_shape=(224, 224, 3), padding='valid'))
model.add(MaxPooling2D((2,2)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Conv2D(32, kernel_size=(3, 3), padding='valid'))
model.add(MaxPooling2D(2,2))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Conv2D(64, kernel_size=(3, 3), padding='valid'))
model.add(MaxPooling2D(2,2))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Conv2D(128, kernel_size=(3, 3), padding='valid'))
model.add(MaxPooling2D(2,2))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(GlobalAveragePooling2D())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(133, activation='softmax'))
model.summary()
# In[5]:
from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D,Activation
from keras.layers import Dropout, Flatten, Dense, BatchNormalization,Dropout
from keras.models import Sequential
model = Sequential()
model.add(Conv2D(filters=16, kernel_size=[3, 3], input_shape=[224, 224, 3]))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Conv2D(filters=32, kernel_size=[3, 3]))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Conv2D(filters=64, kernel_size=[3, 3]))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(filters=64, kernel_size=[3, 3]))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Conv2D(filters=128, kernel_size=[3, 3]))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(filters=128, kernel_size=[3, 3]))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Conv2D(filters=128, kernel_size=[3, 3]))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(filters=128, kernel_size=[3, 3]))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(GlobalAveragePooling2D())
model.add(Dropout(0.5))
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(133, activation='softmax'))
model.summary()
# 在没有使用数据增强的情况下,测试准确率达到14%
# Test accuracy: 14.5933%
# In[28]:
## 编译模型
from keras.optimizers import Adam, RMSprop
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
# ---
# ## 【练习】训练模型
#
#
# ---
#
# <a id='question5'></a>
#
# ### __问题 5:__
#
# 在下方代码单元训练模型。使用模型检查点(model checkpointing)来储存具有最低验证集 loss 的模型。
#
# 可选题:你也可以对训练集进行 [数据增强](https://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html),来优化模型的表现。
#
#
# In[16]:
from keras.callbacks import ModelCheckpoint, EarlyStopping
### TODO: 设置训练模型的epochs的数量
epochs = 50
### 不要修改下方代码
earlyStopping = EarlyStopping(monitor='val_loss', patience=5, verbose=0, mode='auto')
checkpointer = ModelCheckpoint(filepath='saved_models/weights.best.from_scratch.hdf5',
verbose=1, save_best_only=True)
model.fit(train_tensors, train_targets,
validation_data=(valid_tensors, valid_targets),
epochs=epochs, batch_size=20, callbacks=[checkpointer, earlyStopping], verbose=1)
# In[29]:
from keras.preprocessing.image import ImageDataGenerator
epochs = 100
batch_size = 32
train_datagen = ImageDataGenerator(
rotation_range=30,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
train_generator = train_datagen.flow(train_tensors, train_targets, batch_size=batch_size)
val_datagen = ImageDataGenerator()
validation_generator = val_datagen.flow(valid_tensors, valid_targets, batch_size=batch_size)
checkpointer = ModelCheckpoint(filepath='saved_models/weights.best.from_scratch.hdf5',
verbose=1, save_best_only=True)
earlystopping = EarlyStopping(monitor='val_loss', min_delta=0.001, patience=20, verbose=1)
model.fit_generator(
train_generator,
steps_per_epoch=len(train_files) // batch_size,
epochs=epochs,
validation_data=validation_generator,
validation_steps=len(valid_files) // batch_size,
callbacks=[checkpointer, earlystopping])
# 添加图片增加以后测试准确率
#
# In[ ]:
## 加载具有最好验证loss的模型
model.load_weights('saved_models/weights.best.from_scratch.hdf5')
# ### 测试模型
#
# 在狗图像的测试数据集上试用你的模型。确保测试准确率大于1%。
# In[18]:
# 获取测试数据集中每一个图像所预测的狗品种的index
dog_breed_predictions = [np.argmax(model.predict(np.expand_dims(tensor, axis=0))) for tensor in test_tensors]
# 报告测试准确率
test_accuracy = 100*np.sum(np.array(dog_breed_predictions)==np.argmax(test_targets, axis=1))/len(dog_breed_predictions)
print('Test accuracy: %.4f%%' % test_accuracy)
# ---
# <a id='step4'></a>
# ## 步骤 4: 使用一个CNN来区分狗的品种
#
#
# 使用 迁移学习(Transfer Learning)的方法,能帮助我们在不损失准确率的情况下大大减少训练时间。在以下步骤中,你可以尝试使用迁移学习来训练你自己的CNN。
#
# ### 得到从图像中提取的特征向量(Bottleneck Features)
# In[53]:
bottleneck_features = np.load('/data/bottleneck_features/DogVGG16Data.npz')
train_VGG16 = bottleneck_features['train']
valid_VGG16 = bottleneck_features['valid']
test_VGG16 = bottleneck_features['test']
# ### 模型架构
#
# 该模型使用预训练的 VGG-16 模型作为固定的图像特征提取器,其中 VGG-16 最后一层卷积层的输出被直接输入到我们的模型。我们只需要添加一个全局平均池化层以及一个全连接层,其中全连接层使用 softmax 激活函数,对每一个狗的种类都包含一个节点。
# In[54]:
VGG16_model = Sequential()
VGG16_model.add(GlobalAveragePooling2D(input_shape=train_VGG16.shape[1:]))
VGG16_model.add(Dense(133, activation='softmax'))
VGG16_model.summary()
# In[55]:
## 编译模型
VGG16_model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
# In[56]:
## 训练模型
checkpointer = ModelCheckpoint(filepath='saved_models/weights.best.VGG16.hdf5',
verbose=1, save_best_only=True)
VGG16_model.fit(train_VGG16, train_targets,
validation_data=(valid_VGG16, valid_targets),
epochs=20, batch_size=20, callbacks=[checkpointer], verbose=1)
# In[57]:
## 加载具有最好验证loss的模型
VGG16_model.load_weights('saved_models/weights.best.VGG16.hdf5')
# ### 测试模型
# 现在,我们可以测试此CNN在狗图像测试数据集中识别品种的效果如何。我们在下方打印出测试准确率。
# In[58]:
# 获取测试数据集中每一个图像所预测的狗品种的index
VGG16_predictions = [np.argmax(VGG16_model.predict(np.expand_dims(feature, axis=0))) for feature in test_VGG16]
# 报告测试准确率
test_accuracy = 100*np.sum(np.array(VGG16_predictions)==np.argmax(test_targets, axis=1))/len(VGG16_predictions)
print('Test accuracy: %.4f%%' % test_accuracy)
# ### 使用模型预测狗的品种
# In[59]:
from extract_bottleneck_features import *
def VGG16_predict_breed(img_path):
# 提取bottleneck特征
bottleneck_feature = extract_VGG16(path_to_tensor(img_path))
# 获取预测向量
predicted_vector = VGG16_model.predict(bottleneck_feature)
# 返回此模型预测的狗的品种
return dog_names[np.argmax(predicted_vector)]
# ---
# <a id='step5'></a>
# ## 步骤 5: 建立一个CNN来分类狗的品种(使用迁移学习)
#
# 现在你将使用迁移学习来建立一个CNN,从而可以从图像中识别狗的品种。你的 CNN 在测试集上的准确率必须至少达到60%。
#
# 在步骤4中,我们使用了迁移学习来创建一个使用基于 VGG-16 提取的特征向量来搭建一个 CNN。在本部分内容中,你必须使用另一个预训练模型来搭建一个 CNN。为了让这个任务更易实现,我们已经预先对目前 keras 中可用的几种网络进行了预训练:
#
# - [VGG-19](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/DogVGG19Data.npz) bottleneck features
# - [ResNet-50](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/DogResnet50Data.npz) bottleneck features
# - [Inception](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/DogInceptionV3Data.npz) bottleneck features
# - [Xception](https://s3-us-west-1.amazonaws.com/udacity-aind/dog-project/DogXceptionData.npz) bottleneck features
#
# 这些文件被命名为为:
#
# Dog{network}Data.npz
#
# 其中 `{network}` 可以是 `VGG19`、`Resnet50`、`InceptionV3` 或 `Xception` 中的一个。选择上方网络架构中的一个,他们已经保存在目录 `/data/bottleneck_features/` 中。
#
#
# ### 【练习】获取模型的特征向量
#
# 在下方代码块中,通过运行下方代码提取训练、测试与验证集相对应的bottleneck特征。
#
# bottleneck_features = np.load('/data/bottleneck_features/Dog{network}Data.npz')
# train_{network} = bottleneck_features['train']
# valid_{network} = bottleneck_features['valid']
# test_{network} = bottleneck_features['test']
# In[60]:
### TODO: 从另一个预训练的CNN获取bottleneck特征
bottleneck_features = np.load('/data/bottleneck_features/DogResnet50Data.npz')
train_ResNet50 = bottleneck_features['train']
valid_ResNet50 = bottleneck_features['valid']
test_ResNet50 = bottleneck_features['test']
# In[61]:
print(train_ResNet50[0])
# ### 【练习】模型架构
#
# 建立一个CNN来分类狗品种。在你的代码单元块的最后,通过运行如下代码输出网络的结构:
#
# <your model's name>.summary()
#
# ---
#
# <a id='question6'></a>
#
# ### __问题 6:__
#
#
# 在下方的代码块中尝试使用 Keras 搭建最终的网络架构,并回答你实现最终 CNN 架构的步骤与每一步的作用,并描述你在迁移学习过程中,使用该网络架构的原因。
#
#
# __回答:__
#
# 1. ResNet-50具有较高的分类准确率,通过加载相应的特征向量,可以很快的训练处高准确率的模型。
# 2. GlobalAveragePooling2D 全局池化可以有效的降低计算量,使得模型更简单,避免过拟合。
# 3. 早期自己搭建的模型,网络深度不够,模型优化的过程需要大量的时间,如采用网格优化的方法。
# 4. 而是用迁移学习,对于数据集相似的情况下,使用已经得到验证的模型会节省大量时间,很容易得到准确率较高的模型。
# In[62]:
### TODO: 定义你的框架
ResNet_model = Sequential()
ResNet_model.add(GlobalAveragePooling2D(input_shape=train_ResNet50.shape[1:]))
ResNet_model.add(Dense(133, activation='softmax'))
ResNet_model.summary()
# In[63]:
### TODO: 编译模型
ResNet_model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
# ---
#
# ### 【练习】训练模型
#
# <a id='question7'></a>
#
# ### __问题 7:__
#
# 在下方代码单元中训练你的模型。使用模型检查点(model checkpointing)来储存具有最低验证集 loss 的模型。
#
# 当然,你也可以对训练集进行 [数据增强](https://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html) 以优化模型的表现,不过这不是必须的步骤。
#
# In[64]:
### TODO: 训练模型
from keras.callbacks import EarlyStopping
earlyStop = EarlyStopping(monitor='val_loss', patience=5, verbose=0, mode='auto')
checkpointer = ModelCheckpoint(filepath='saved_models/weights.best.ResNet50.hdf5',
verbose=1, save_best_only=True)
ResNet_model.fit(train_ResNet50, train_targets,
validation_data=(valid_ResNet50, valid_targets),
epochs=20, batch_size=20, callbacks=[checkpointer, earlyStop], verbose=1)
# In[65]:
### TODO: 加载具有最佳验证loss的模型权重
ResNet_model.load_weights('saved_models/weights.best.ResNet50.hdf5')
# In[7]:
bottleneck_features = np.load('/data/bottleneck_features/DogXceptionData.npz')
train_Xception = bottleneck_features['train']
valid_Xception = bottleneck_features['valid']
test_Xception = bottleneck_features['test']
# In[8]:
### TODO: 定义你的框架
Xception_model = Sequential()
Xception_model.add(GlobalAveragePooling2D(input_shape=train_Xception.shape[1:]))
Xception_model.add(Dense(133, activation='softmax'))
Xception_model.summary()
### TODO: 编译模型
Xception_model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
# In[85]:
### TODO: 训练模型
from keras.callbacks import EarlyStopping
earlyStop = EarlyStopping(monitor='val_loss', patience=5, verbose=0, mode='auto')
checkpointer = ModelCheckpoint(filepath='saved_models/weights.best.Xception.hdf5',
verbose=1, save_best_only=True)
Xception_model.fit(train_Xception, train_targets,
validation_data=(valid_Xception, valid_targets),
epochs=20, batch_size=20, callbacks=[checkpointer, ], verbose=1)
# In[9]:
# 加载训练最好的权重
Xception_model.load_weights('saved_models/weights.best.Xception.hdf5')
# 获取测试数据集中每一个图像所预测的狗品种的index
Xception_predictions = [np.argmax(Xception_model.predict(np.expand_dims(feature, axis=0))) for feature in test_Xception]
# 报告测试准确率
test_accuracy = 100*np.sum(np.array(Xception_predictions)==np.argmax(test_targets, axis=1))/len(Xception_predictions)
print('Test accuracy: %.4f%%' % test_accuracy)
# ---
#
# ### 【练习】测试模型
#
# <a id='question8'></a>
#
# ### __问题 8:__
#
# 在狗图像的测试数据集上试用你的模型。确保测试准确率大于60%。
# In[66]:
### TODO: 在测试集上计算分类准确率
# 获取测试数据集中每一个图像所预测的狗品种的index
ResNet50_predictions = [np.argmax(ResNet_model.predict(np.expand_dims(feature, axis=0))) for feature in test_ResNet50]
# 报告测试准确率
test_accuracy = 100*np.sum(np.array(ResNet50_predictions)==np.argmax(test_targets, axis=1))/len(ResNet50_predictions)
print('Test accuracy: %.4f%%' % test_accuracy)
# ---
#
# ### 【练习】使用模型测试狗的品种
#
#
# 实现一个函数,它的输入为图像路径,功能为预测对应图像的类别,输出为你模型预测出的狗类别(`Affenpinscher`, `Afghan_hound` 等)。
#
# 与步骤5中的模拟函数类似,你的函数应当包含如下三个步骤:
#
# 1. 根据选定的模型载入图像特征(bottleneck features)
# 2. 将图像特征输输入到你的模型中,并返回预测向量。注意,在该向量上使用 argmax 函数可以返回狗种类的序号。
# 3. 使用在步骤0中定义的 `dog_names` 数组来返回对应的狗种类名称。
#
# 提取图像特征过程中使用到的函数可以在 `extract_bottleneck_features.py` 中找到。同时,他们应已在之前的代码块中被导入。根据你选定的 CNN 网络,你可以使用 `extract_{network}` 函数来获得对应的图像特征,其中 `{network}` 代表 `VGG19`, `Resnet50`, `InceptionV3`, 或 `Xception` 中的一个。
#
# ---
#
# <a id='question9'></a>
#
# ### __问题 9:__
# In[45]:
### TODO: 写一个函数,该函数将图像的路径作为输入
from extract_bottleneck_features import *
def Resnet_predict_breed(img_path):
# 提取bottleneck特征
bottleneck_feature = extract_Resnet50(path_to_tensor(img_path))
# 获取预测向量
predicted_vector = ResNet_model.predict(bottleneck_feature)
# 返回此模型预测的狗的品种
return dog_names[ | np.argmax(predicted_vector) | numpy.argmax |
""" Run experiments on synthetic data """
#Author: <NAME> (<EMAIL>)
#Date: 22 February 2021
import numpy as np
import time
import pickle
import os
import copy
import argparse
import visualization_syntdata
from models import GFA_DiagonalNoiseModel, GFA_OriginalModel
from utils import GFAtools
def generate_missdata(X_train, infoMiss):
"""
Generate missing data in the training data.
Parameters
----------
X_train : list
List of arrays containing the data matrix of each group.
infoMiss : dict
Parameters to generate data with missing values.
Returns
-------
X_train : list
List of arrays containing the training data. The groups
specified in infoMiss will have missing values.
missing_Xtrue : list
List of arrays containing the true values removed from the
groups selected in infoMiss.
"""
missing_Xtrue = [[] for _ in range(len(infoMiss['ds']))]
for i in range(len(infoMiss['ds'])):
g_miss = infoMiss['ds'][i]-1
if 'random' in infoMiss['type'][i]:
#remove entries randomly
missing_val = np.random.choice([0, 1],
size=(X_train[g_miss].shape[0],X_train[g_miss].shape[1]),
p=[1-infoMiss['perc'][i]/100, infoMiss['perc'][i]/100])
mask_miss = np.ma.array(X_train[g_miss], mask = missing_val).mask
missing_Xtrue[i] = np.where(missing_val==1, X_train[g_miss],0)
X_train[g_miss][mask_miss] = 'NaN'
elif 'rows' in infoMiss['type'][i]:
#remove rows randomly
Ntrain = X_train[g_miss].shape[0]
missing_Xtrue[i] = np.zeros((Ntrain, X_train[g_miss].shape[1]))
n_rows = int(infoMiss['perc'][i]/100 * Ntrain)
shuf_samples = np.arange(Ntrain)
np.random.shuffle(shuf_samples)
missing_Xtrue[i][shuf_samples[0:n_rows],:] = X_train[g_miss][shuf_samples[0:n_rows],:]
X_train[g_miss][shuf_samples[0:n_rows],:] = 'NaN'
elif 'nonrand' in infoMiss['type'][i]:
miss_mat = np.zeros((X_train[g_miss].shape[0], X_train[g_miss].shape[1]))
miss_mat[X_train[g_miss] > infoMiss['perc'][i] * np.std(X_train[g_miss])] = 1
miss_mat[X_train[g_miss] < - infoMiss['perc'][i] * np.std(X_train[g_miss])] = 1
mask_miss = np.ma.array(X_train[g_miss], mask = miss_mat).mask
missing_Xtrue[i] = np.where(miss_mat==1, X_train[g_miss],0)
X_train[g_miss][mask_miss] = 'NaN'
return X_train, missing_Xtrue
def get_data_2g(args, infoMiss=None):
"""
Generate synthetic data with 2 groups.
Parameters
----------
args : local namespace
Arguments selected to run the model.
infoMiss : dict | None, optional.
Parameters to generate data with missing values.
Returns
-------
data : dict
Training and test data as well as model parameters used
to generate the data.
"""
Ntrain = 400; Ntest = 100
N = Ntrain + Ntest # total number of samples
M = args.num_groups #number of groups
d = np.array([50, 30]) #number of dimensios in each group
true_K = 4 # true latent factors
# Specify Z manually
Z = np.zeros((N, true_K))
for i in range(0, N):
Z[i,0] = np.sin((i+1)/(N/20))
Z[i,1] = np.cos((i+1)/(N/20))
Z[i,2] = 2 * ((i+1)/N-0.5)
Z[:,3] = np.random.normal(0, 1, N)
# Specify noise precisions manually
tau = [[] for _ in range(d.size)]
tau[0] = 5 * np.ones((1,d[0]))[0]
tau[1] = 10 * np.ones((1,d[1]))[0]
# Specify alphas manually
alpha = np.zeros((M, true_K))
alpha[0,:] = np.array([1,1,1e6,1])
alpha[1,:] = np.array([1,1,1,1e6])
#W and X
W = [[] for _ in range(d.size)]
X_train = [[] for _ in range(d.size)]
X_test = [[] for _ in range(d.size)]
for i in range(0, d.size):
W[i] = np.zeros((d[i], true_K))
for t in range(0, true_K):
#generate W from p(W|alpha)
W[i][:,t] = np.random.normal(0, 1/np.sqrt(alpha[i,t]), d[i])
X = np.zeros((N, d[i]))
for j in range(0, d[i]):
#generate X from the generative model
X[:,j] = np.dot(Z,W[i][j,:].T) + \
np.random.normal(0, 1/ | np.sqrt(tau[i][j]) | numpy.sqrt |
#!/usr/bin/env python
# Copyright 2021
# author: <NAME> <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from scipy.stats import ttest_ind
import netCDF4 as nc
import pickle
import os
from PIL import Image as PIL_Image
import sys
import shutil
import glob
import datetime
import time
import calendar
from numpy import genfromtxt
from scipy.optimize import curve_fit
from scipy.cluster.vq import kmeans,vq
from scipy.interpolate import interpn, interp1d
from math import e as e_constant
import math
import matplotlib.dates as mdates
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.cm as cm
from matplotlib.collections import LineCollection
from matplotlib.ticker import (MultipleLocator, NullFormatter, ScalarFormatter)
from matplotlib.colors import ListedColormap, BoundaryNorm
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
import matplotlib
import warnings
warnings.filterwarnings("ignore")
plt.style.use('classic')
# font size
# font_size = 14
# matplotlib.rc('font', **{'family': 'serif', 'serif': ['Arial'], 'size': font_size})
# matplotlib.rc('font', weight='bold')
p_progress_writing = False
SMALL_SIZE = 8
MEDIUM_SIZE = 10
BIGGER_SIZE = 12
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
time_format = '%d-%m-%Y_%H:%M'
time_format_khan = '%Y%m%d.0%H'
time_format_mod = '%Y-%m-%d_%H:%M:%S'
time_format_twolines = '%H:%M\n%d-%m-%Y'
time_format_twolines_noYear_noMin_intMonth = '%H\n%d-%m'
time_format_twolines_noYear = '%H:%M\n%d-%b'
time_format_twolines_noYear_noMin = '%H\n%d-%b'
time_format_date = '%Y-%m-%d'
time_format_time = '%H:%M:%S'
time_format_parsivel = '%Y%m%d%H%M'
time_format_parsivel_seconds = '%Y%m%d%H%M%S'
time_str_formats = [
time_format,
time_format_mod,
time_format_twolines,
time_format_twolines_noYear,
time_format_date,
time_format_time,
time_format_parsivel
]
default_cm = cm.jet
cm_vir = cm.viridis
listed_cm_colors_list = ['silver', 'red', 'green', 'yellow', 'blue', 'black']
listed_cm = ListedColormap(listed_cm_colors_list, 'indexed')
colorbar_tick_labels_list_cloud_phase = ['Clear', 'Water', 'SLW', 'Mixed', 'Ice', 'Unknown']
listed_cm_colors_list_cloud_phase = ['white', 'red', 'green', 'yellow', 'blue', 'purple']
listed_cm_cloud_phase = ListedColormap(listed_cm_colors_list_cloud_phase, 'indexed')
avogadros_ = 6.022140857E+23 # molecules/mol
gas_const = 83144.598 # cm3 mbar k-1 mol-1
gas_const_2 = 8.3144621 # J mol-1 K-1
gas_const_water = 461 # J kg-1 K-1
gas_const_dry = 287 # J kg-1 K-1
boltzmann_ = gas_const / avogadros_ # cm3 mbar / k molecules
gravity_ = 9.80665 # m/s
poisson_ = 2/7 # for dry air (k)
latent_heat_v = 2.501E+6 # J/kg
latent_heat_f = 3.337E+5 # J/kg
latent_heat_s = 2.834E+6 # J/kg
heat_capacity__Cp = 1005.7 # J kg-1 K-1 dry air
heat_capacity__Cv = 719 # J kg-1 K-1 water vapor
Rs_da = 287.05 # Specific gas const for dry air, J kg^{-1} K^{-1}
Rs_v = 461.51 # Specific gas const for water vapour, J kg^{-1} K^{-1}
Cp_da = 1004.6 # Specific heat at constant pressure for dry air
Cv_da = 719. # Specific heat at constant volume for dry air
Cp_v = 1870. # Specific heat at constant pressure for water vapour
Cv_v = 1410. # Specific heat at constant volume for water vapour
Cp_lw = 4218 # Specific heat at constant pressure for liquid water
Epsilon = 0.622 # Epsilon=Rs_da/Rs_v; The ratio of the gas constants
degCtoK = 273.15 # Temperature offset between K and C (deg C)
rho_w = 1000. # Liquid Water density kg m^{-3}
grav = 9.80665 # Gravity, m s^{-2}
Lv = 2.5e6 # Latent Heat of vaporisation
boltzmann = 5.67e-8 # Stefan-Boltzmann constant
mv = 18.0153e-3 # Mean molar mass of water vapor(kg/mol)
m_a = 28.9644e-3 # Mean molar mass of air(kg/mol)
Rstar_a = 8.31432 # Universal gas constant for air (N m /(mol K))
path_output = '/g/data/k10/la6753/'
# Misc
class Object_create(object):
pass
def list_files_recursive(path_, filter_str=None):
# create list of raw spectra files
file_list = []
# r=root, d=directories, f = files
if filter_str is None:
for r, d, f in os.walk(path_):
for file in f:
file_list.append(os.path.join(r, file))
else:
for r, d, f in os.walk(path_):
for file in f:
if filter_str in file:
file_list.append(os.path.join(r, file))
return file_list
def list_files(path_, filter_str='*'):
file_list = sorted(glob.glob(str(path_ + filter_str)))
return file_list
def coincidence(arr_1,arr_2):
# only coincidences
check_ = arr_1 * arr_2
check_[check_ == check_] = 1
arr_1_checked = arr_1 * check_
arr_2_checked = arr_2 * check_
return arr_1_checked[~np.isnan(arr_1_checked)], arr_2_checked[~np.isnan(arr_2_checked)]
def array_2d_fill_gaps_by_interpolation_linear(array_):
rows_ = array_.shape[0]
cols_ = array_.shape[1]
output_array_X = np.zeros((rows_, cols_), dtype=float)
output_array_Y = np.zeros((rows_, cols_), dtype=float)
row_sum = np.sum(array_, axis=1)
col_index = np.arange(array_.shape[1])
col_sum = np.sum(array_, axis=0)
row_index = np.arange(array_.shape[0])
for r_ in range(array_.shape[0]):
if row_sum[r_] != row_sum[r_]:
# get X direction interpolation
coin_out = coincidence(col_index, array_[r_, :])
output_array_X[r_, :][np.isnan(array_[r_, :])] = np.interp(
col_index[np.isnan(array_[r_, :])], coin_out[0], coin_out[1])
for c_ in range(array_.shape[1]):
if col_sum[c_] != col_sum[c_]:
# get Y direction interpolation
coin_out = coincidence(row_index, array_[:, c_])
output_array_Y[:, c_][np.isnan(array_[:, c_])] = np.interp(
row_index[np.isnan(array_[:, c_])], coin_out[0], coin_out[1])
output_array = np.array(array_)
output_array[np.isnan(array_)] = 0
return output_array + ((output_array_X + output_array_Y)/2)
def array_2d_fill_gaps_by_interpolation_cubic(array_):
rows_ = array_.shape[0]
cols_ = array_.shape[1]
output_array_X = np.zeros((rows_, cols_), dtype=float)
output_array_Y = np.zeros((rows_, cols_), dtype=float)
row_sum = np.sum(array_, axis=1)
col_index = np.arange(array_.shape[1])
col_sum = np.sum(array_, axis=0)
row_index = np.arange(array_.shape[0])
for r_ in range(array_.shape[0]):
if row_sum[r_] != row_sum[r_]:
# get X direction interpolation
coin_out = coincidence(col_index, array_[r_, :])
interp_function = interp1d(coin_out[0], coin_out[1], kind='cubic')
output_array_X[r_, :][np.isnan(array_[r_, :])] = interp_function(col_index[np.isnan(array_[r_, :])])
for c_ in range(array_.shape[1]):
if col_sum[c_] != col_sum[c_]:
# get Y direction interpolation
coin_out = coincidence(row_index, array_[:, c_])
interp_function = interp1d(coin_out[0], coin_out[1], kind='cubic')
output_array_Y[:, c_][np.isnan(array_[:, c_])] = interp_function(row_index[np.isnan(array_[:, c_])])
output_array = np.array(array_)
output_array[np.isnan(array_)] = 0
return output_array + ((output_array_X + output_array_Y)/2)
def combine_2_time_series(time_1_reference, data_1, time_2, data_2,
forced_time_step=None, forced_start_time=None, forced_stop_time=None,
cumulative_var_1=False, cumulative_var_2=False):
"""
takes two data sets with respective time series, and outputs the coincident stamps from both data sets
It does this by using mean_discrete() for both sets with the same start stamp and averaging time, the averaging time
is the forced_time_step
:param time_1_reference: 1D array, same units as time_2, this series will define the returned time step reference
:param data_1: can be 1D or 2D array, first dimention most be same as time_1
:param time_2: 1D array, same units as time_1
:param data_2: can be 1D or 2D array, first dimention most be same as time_2
:param window_: optional, if 0 (default) the values at time_1 and time_2 most match exactly, else, the match can
be +- window_
:param forced_time_step: if not none, the median of the differential of the time_1_reference will be used
:param forced_start_time: if not none, the returned series will start at this time stamp
:param forced_stop_time: if not none, the returned series will stop at this time stamp
:param cumulative_var_1: True is you want the variable to be accumulated instead of means, only of 1D data
:param cumulative_var_2: True is you want the variable to be accumulated instead of means, only of 1D data
:return: Index_averaged_1: 1D array, smallest coincident time, without time stamp gaps
:return: Values_mean_1: same shape as data_1 both according to Index_averaged_1 times
:return: Values_mean_2: same shape as data_2 both according to Index_averaged_1 times
"""
# define forced_time_step
if forced_time_step is None:
forced_time_step = np.median(np.diff(time_1_reference))
# find time period
if forced_start_time is None:
first_time_stamp = max(np.nanmin(time_1_reference), np.nanmin(time_2))
else:
first_time_stamp = forced_start_time
if forced_stop_time is None:
last_time_stamp = min(np.nanmax(time_1_reference), np.nanmax(time_2))
else:
last_time_stamp = forced_stop_time
# do the averaging
print('starting averaging of data 1')
if cumulative_var_1:
Index_averaged_1, Values_mean_1 = mean_discrete(time_1_reference, data_1, forced_time_step,
first_time_stamp, last_index=last_time_stamp,
cumulative_parameter_indx=0)
else:
Index_averaged_1, Values_mean_1 = mean_discrete(time_1_reference, data_1, forced_time_step,
first_time_stamp, last_index=last_time_stamp)
print('starting averaging of data 2')
if cumulative_var_2:
Index_averaged_2, Values_mean_2 = mean_discrete(time_2, data_2, forced_time_step,
first_time_stamp, last_index=last_time_stamp,
cumulative_parameter_indx=0)
else:
Index_averaged_2, Values_mean_2 = mean_discrete(time_2, data_2, forced_time_step,
first_time_stamp, last_index=last_time_stamp)
# check that averaged indexes are the same
if np.nansum(np.abs(Index_averaged_1 - Index_averaged_2)) != 0:
print('error during averaging of series, times do no match ????')
return None, None, None
# return the combined, trimmed data
return Index_averaged_1, Values_mean_1, Values_mean_2
def split_str_chunks(s, n):
"""Produce `n`-character chunks from `s`."""
out_list = []
for start in range(0, len(s), n):
out_list.append(s[start:start+n])
return out_list
def coincidence_multi(array_list):
# only coincidences
parameters_list = array_list
check_ = parameters_list[0]
for param_ in parameters_list[1:]:
check_ = check_ * param_
check_[check_ == check_] = 1
new_arr_list = []
for param_ in parameters_list:
new_arr_list.append(param_ * check_)
check_ = check_ * param_
# delete empty rows_
list_list = []
for param_ in parameters_list:
t_list = []
for i in range(check_.shape[0]):
if check_[i] == check_[i]:
t_list.append(param_[i])
list_list.append(t_list)
# concatenate
ar_list = []
for ii in range(len(parameters_list)):
ar_list.append(np.array(list_list[ii]))
return ar_list
def coincidence_zero(arr_1,arr_2):
# only coincidences
check_ = arr_1 * arr_2
# delete empty rows_
list_1 = []
list_2 = []
for i in range(check_.shape[0]):
if check_[i] != 0:
list_1.append(arr_1[i])
list_2.append(arr_2[i])
return np.array(list_1),np.array(list_2)
def discriminate(X_, Y_, Z_, value_disc_list, discrmnt_invert_bin = False):
if discrmnt_invert_bin:
Z_mask = np.ones(Z_.shape[0])
Z_mask[Z_ > value_disc_list[0]] = np.nan
Z_mask[Z_ >= value_disc_list[1]] = 1
Y_new = Y_ * Z_mask
X_new = X_ * Z_mask
else:
Z_mask = np.ones(Z_.shape[0])
Z_mask[Z_ < value_disc_list[0]] = np.nan
Z_mask[Z_ > value_disc_list[1]] = np.nan
Y_new = Y_ * Z_mask
X_new = X_ * Z_mask
return X_new, Y_new
def add_ratio_to_values(header_, values_, nominator_index, denominator_index, ratio_name, normalization_value=1.):
nominator_data = values_[:,nominator_index]
denominator_data = values_[:,denominator_index]
ratio_ = normalization_value * nominator_data / denominator_data
values_new = np.column_stack((values_,ratio_))
header_new = np.append(header_,ratio_name)
return header_new, values_new
def bin_data(x_val_org,y_val_org, start_bin_edge=0, bin_size=1, min_bin_population=1):
# get coincidences only
x_val,y_val = coincidence(x_val_org,y_val_org)
# combine x and y in matrix
M_ = np.column_stack((x_val,y_val))
# checking if always ascending to increase efficiency
always_ascending = 1
for x in range(x_val.shape[0]-1):
if x_val[x]==x_val[x] and x_val[x+1]==x_val[x+1]:
if x_val[x+1] < x_val[x]:
always_ascending = 0
if always_ascending == 0:
M_sorted = M_[M_[:,0].argsort()] # sort by first column
M_ = M_sorted
# convert data to list of bins
y_binned = []
x_binned = []
last_row = 0
last_row_temp = last_row
while start_bin_edge <= np.nanmax(x_val):
y_val_list = []
for row_ in range(last_row, M_.shape[0]):
if start_bin_edge <= M_[row_, 0] < start_bin_edge + bin_size:
if M_[row_, 1] == M_[row_, 1]:
y_val_list.append(M_[row_, 1])
last_row_temp = row_
if M_[row_, 0] >= start_bin_edge + bin_size:
last_row_temp = row_
break
x_binned.append(start_bin_edge)
if len(y_val_list) >= min_bin_population:
y_binned.append(y_val_list)
else:
y_binned.append([])
start_bin_edge += bin_size
last_row = last_row_temp
# add series
if bin_size >= 1:
x_binned_int = np.array(x_binned, dtype=int)
else:
x_binned_int = x_binned
return x_binned_int, y_binned
def shiftedColorMap(cmap, midpoint=0.5, name='shiftedcmap'):
cdict = {
'red': [],
'green': [],
'blue': [],
'alpha': []
}
# regular index to compute the colors
reg_index = np.linspace(0, 1, 257)
# shifted index to match the data
shift_index = np.hstack([
np.linspace(0.0, midpoint, 128, endpoint=False),
np.linspace(midpoint, 1.0, 129, endpoint=True)
])
for ri, si in zip(reg_index, shift_index):
r, g, b, a = cmap(ri)
cdict['red'].append((si, r, r))
cdict['green'].append((si, g, g))
cdict['blue'].append((si, b, b))
cdict['alpha'].append((si, a, a))
newcmap = matplotlib.colors.LinearSegmentedColormap(name, cdict)
plt.register_cmap(cmap=newcmap)
return newcmap
def student_t_test(arr_1, arr_2):
return ttest_ind(arr_1, arr_2, nan_policy='omit')
def k_means_clusters(array_, cluster_number, forced_centers=None):
if forced_centers is None:
centers_, x = kmeans(array_,cluster_number)
data_id, x = vq(array_, centers_)
return centers_, data_id
else:
data_id, x = vq(array_, forced_centers)
return forced_centers, data_id
def grid_(x, y, z, resX=100, resY=100):
"Convert 3 column data to matplotlib grid"
xi = np.linspace(min(x), max(x), resX)
yi = np.linspace(min(y), max(y), resY)
Z = matplotlib.mlab.griddata(x, y, z, xi, yi)
X, Y = np.meshgrid(xi, yi)
return X, Y, Z
def find_max_index_2d_array(array_):
return np.unravel_index(np.argmax(array_, axis=None), array_.shape)
def find_min_index_2d_array(array_):
return np.unravel_index(np.argmin(array_, axis=None), array_.shape)
def find_max_index_1d_array(array_):
return np.argmax(array_, axis=None)
def find_min_index_1d_array(array_):
return np.argmin(array_, axis=None)
def time_series_interpolate_discrete(Index_, Values_, index_step, first_index,
position_=0., last_index=None):
"""
this will average values from Values_ that are between Index_[n:n+avr_size)
:param Index_: n by 1 numpy array to look for position,
:param Values_: n by m numpy array, values to be averaged
:param index_step: in same units as Index_
:param first_index: is the first discrete index on new arrays.
:param position_: will determine where is the stamp located; 0 = beginning, .5 = mid, 1 = top (optional, default = 0)
:param last_index: in case you want to force the returned series to some fixed period/length
:return: Index_averaged, Values_averaged
"""
# checking if always ascending to increase efficiency
always_ascending = 1
for x in range(Index_.shape[0]-1):
if Index_[x]==Index_[x] and Index_[x+1]==Index_[x+1]:
if Index_[x+1] < Index_[x]:
always_ascending = 0
if always_ascending == 0:
MM_ = np.column_stack((Index_,Values_))
MM_sorted = MM_[MM_[:,0].argsort()] # sort by first column
Index_ = MM_sorted[:,0]
Values_ = MM_sorted[:,1:]
# error checking!
if Index_.shape[0] != Values_.shape[0]:
print('error during shape check! Index_.shape[0] != Values_.shape[0]')
return None, None
if Index_[-1] < first_index:
print('error during shape check! Index_[-1] < first_index')
return None, None
# initialize output matrices
if last_index is None:
final_index = np.nanmax(Index_)
else:
final_index = last_index
total_averaged_rows = int((final_index-first_index) / index_step) + 1
if len(Values_.shape) == 1:
Values_mean = np.zeros(total_averaged_rows)
Values_mean[:] = np.nan
else:
Values_mean = np.zeros((total_averaged_rows,Values_.shape[1]))
Values_mean[:,:] = np.nan
Index_interp = np.zeros(total_averaged_rows)
for r_ in range(total_averaged_rows):
Index_interp[r_] = first_index + (r_ * index_step)
Index_interp -= (position_ * index_step)
Values_interp = np.interp(Index_interp, Index_, Values_)
Index_interp = Index_interp + (position_ * index_step)
return Index_interp, Values_interp
def array_2D_sort_ascending_by_column(array_, column_=0):
array_sorted = array_[array_[:, column_].argsort()]
return array_sorted
def get_ax_range(ax):
x_1 = ax.axis()[0]
x_2 = ax.axis()[1]
y_1 = ax.axis()[2]
y_2 = ax.axis()[3]
return x_1, x_2, y_1, y_2
def get_array_perimeter_only(array_):
return np.concatenate([array_[0, :-1], array_[:-1, -1], array_[-1, ::-1], array_[-2:0:-1, 0]])
# WRF
def wrf_var_search(wrf_nc_file, description_str):
description_str_lower = description_str.lower()
for var_ in sorted(wrf_nc_file.variables):
try:
if description_str_lower in wrf_nc_file.variables[var_].description.lower():
print(var_, '|', wrf_nc_file.variables[var_].description)
except:
pass
def create_virtual_sonde_from_wrf(sonde_dict, filelist_wrf_output,
wrf_filename_time_format = 'wrfout_d03_%Y-%m-%d_%H_%M_%S'):
# create time array
filelist_wrf_output_noPath = []
for filename_ in filelist_wrf_output:
filelist_wrf_output_noPath.append(filename_.split('/')[-1])
wrf_time_file_list = np.array(time_str_to_seconds(filelist_wrf_output_noPath, wrf_filename_time_format))
# create lat and lon arrays
wrf_domain_file = nc.Dataset(filelist_wrf_output[0])
# p(sorted(wrf_domain_file.variables))
# wrf_vars = sorted(wrf_domain_file.variables)
# for i_ in range(len(wrf_vars)):
# try:
# print(wrf_vars[i_], '\t\t', wrf_domain_file.variables[wrf_vars[i_]].description)
# except:
# print(wrf_vars[i_])
wrf_lat = wrf_domain_file.variables['XLAT'][0, :, :].filled(np.nan)
wrf_lon = wrf_domain_file.variables['XLONG'][0, :, :].filled(np.nan)
wrf_lat_U = wrf_domain_file.variables['XLAT_U'][0, :, :].filled(np.nan)
wrf_lon_U = wrf_domain_file.variables['XLONG_U'][0, :, :].filled(np.nan)
wrf_lat_V = wrf_domain_file.variables['XLAT_V'][0, :, :].filled(np.nan)
wrf_lon_V = wrf_domain_file.variables['XLONG_V'][0, :, :].filled(np.nan)
wrf_domain_file.close()
# load sonde's profile
sonde_hght = sonde_dict['hght'] # m ASL
sonde_pres = sonde_dict['pres'] # hPa
sonde_time = sonde_dict['time'] # seconds since epoc
sonde_lati = sonde_dict['lati'] # degrees
sonde_long = sonde_dict['long'] # degrees
# create output lists of virtual sonde
list_p__ = []
list_hgh = []
list_th_ = []
list_th0 = []
list_qv_ = []
list_U__ = []
list_V__ = []
list_tim = []
list_lat = []
list_lon = []
wrf_point_abs_address_old = 0
# loop thru real sonde's points
for t_ in range(sonde_hght.shape[0]):
p_progress_bar(t_, sonde_hght.shape[0])
point_hght = sonde_hght[t_]
point_pres = sonde_pres[t_]
point_time = sonde_time[t_]
point_lati = sonde_lati[t_]
point_long = sonde_long[t_]
# find closest cell via lat, lon
index_tuple = find_index_from_lat_lon_2D_arrays(wrf_lat,wrf_lon, point_lati,point_long)
index_tuple_U = find_index_from_lat_lon_2D_arrays(wrf_lat_U,wrf_lon_U, point_lati,point_long)
index_tuple_V = find_index_from_lat_lon_2D_arrays(wrf_lat_V,wrf_lon_V, point_lati,point_long)
# find closest file via time
file_index = time_to_row_sec(wrf_time_file_list, point_time)
# open wrf file
wrf_domain_file = nc.Dataset(filelist_wrf_output[file_index])
# get pressure array from wrf
wrf_press = (wrf_domain_file.variables['PB'][0, :, index_tuple[0], index_tuple[1]].data +
wrf_domain_file.variables['P'][0, :, index_tuple[0], index_tuple[1]].data) / 100 # hPa
# find closest model layer via pressure
layer_index = find_min_index_1d_array(np.abs(wrf_press - point_pres))
# define point absolute address and check if it is a new point
wrf_point_abs_address_new = (index_tuple[0], index_tuple[1], file_index, layer_index)
if wrf_point_abs_address_new != wrf_point_abs_address_old:
wrf_point_abs_address_old = wrf_point_abs_address_new
# get wrf data
index_tuple_full = (0, layer_index, index_tuple[0], index_tuple[1])
index_tuple_full_U = (0, layer_index, index_tuple_U[0], index_tuple_U[1])
index_tuple_full_V = (0, layer_index, index_tuple_V[0], index_tuple_V[1])
# save to arrays
list_p__.append(float(wrf_press[layer_index]))
list_hgh.append(float(point_hght))
list_th_.append(float(wrf_domain_file.variables['T'][index_tuple_full]))
list_th0.append(float(wrf_domain_file.variables['T00'][0]))
list_qv_.append(float(wrf_domain_file.variables['QVAPOR'][index_tuple_full]))
list_U__.append(float(wrf_domain_file.variables['U'][index_tuple_full_U]))
list_V__.append(float(wrf_domain_file.variables['V'][index_tuple_full_V]))
list_tim.append(float(wrf_time_file_list[file_index]))
list_lat.append(float(wrf_lat[index_tuple[0], index_tuple[1]]))
list_lon.append(float(wrf_lon[index_tuple[0], index_tuple[1]]))
wrf_domain_file.close()
# convert lists to arrays
array_p__ = np.array(list_p__)
array_hgh = np.array(list_hgh)
array_th_ = np.array(list_th_)
array_th0 = np.array(list_th0)
array_qv_ = np.array(list_qv_)
array_U__ = np.array(list_U__)
array_V__ = np.array(list_V__)
array_tim = np.array(list_tim)
array_lat = np.array(list_lat)
array_lon = np.array(list_lon)
# calculate derivative variables
wrf_temp_K = calculate_temperature_from_potential_temperature(array_th_ + array_th0, array_p__)
wrf_temp_C = kelvin_to_celsius(wrf_temp_K)
wrf_e = MixR2VaporPress(array_qv_, array_p__*100)
wrf_td_C = DewPoint(wrf_e)
wrf_td_C[wrf_td_C > wrf_temp_C] = wrf_temp_C[wrf_td_C > wrf_temp_C]
wrf_RH = calculate_RH_from_QV_T_P(array_qv_, wrf_temp_K, array_p__*100)
wrf_WD, wrf_WS = cart_to_polar(array_V__, array_U__)
wrf_WD_met = wrf_WD + 180
wrf_WD_met[wrf_WD_met >= 360] = wrf_WD_met[wrf_WD_met >= 360] - 360
wrf_WS_knots = ws_ms_to_knots(wrf_WS)
# create virtual sonde dict
wrf_sonde_dict = {}
wrf_sonde_dict['hght'] = array_hgh
wrf_sonde_dict['pres'] = array_p__
wrf_sonde_dict['temp'] = wrf_temp_C
wrf_sonde_dict['dwpt'] = wrf_td_C
wrf_sonde_dict['sknt'] = wrf_WS_knots
wrf_sonde_dict['drct'] = wrf_WD_met
wrf_sonde_dict['relh'] = wrf_RH
wrf_sonde_dict['time'] = array_tim
wrf_sonde_dict['lati'] = array_lat
wrf_sonde_dict['long'] = array_lon
return wrf_sonde_dict
def wrf_get_temp_K(wrf_nc):
original_arg_type_str = False
if type(wrf_nc) == str:
original_arg_type_str = True
wrf_domain_file = nc.Dataset(wrf_nc)
else:
wrf_domain_file = wrf_nc
# get pressure array from wrf
wrf_press = (wrf_domain_file.variables['PB'][0, :, :, :].data +
wrf_domain_file.variables['P'][0, :, :, :].data) / 100 # hPa
wrf_theta = (wrf_domain_file.variables['T'][0, :, :, :].data +
wrf_domain_file.variables['T00'][0].data) # K
wrf_temp_K = calculate_temperature_from_potential_temperature(wrf_theta, wrf_press)
if original_arg_type_str:
wrf_domain_file.close()
return wrf_temp_K
def wrf_get_press_hPa(wrf_nc):
original_arg_type_str = False
if type(wrf_nc) == str:
original_arg_type_str = True
wrf_domain_file = nc.Dataset(wrf_nc)
else:
wrf_domain_file = wrf_nc
# get pressure array from wrf
wrf_press = (wrf_domain_file.variables['PB'][0, :, :, :].data +
wrf_domain_file.variables['P'][0, :, :, :].data) / 100 # hPa
if original_arg_type_str:
wrf_domain_file.close()
return wrf_press
def wrf_get_height_m(wrf_nc):
original_arg_type_str = False
if type(wrf_nc) == str:
original_arg_type_str = True
wrf_domain_file = nc.Dataset(wrf_nc)
else:
wrf_domain_file = wrf_nc
# get pressure array from wrf
wrf_height = (wrf_domain_file.variables['PH'][0,:-1,:,:].data +
wrf_domain_file.variables['PHB'][0,:-1,:,:].data) / gravity_
if original_arg_type_str:
wrf_domain_file.close()
return wrf_height
def wrf_get_terrain_height_m(wrf_nc):
original_arg_type_str = False
if type(wrf_nc) == str:
original_arg_type_str = True
wrf_domain_file = nc.Dataset(wrf_nc)
else:
wrf_domain_file = wrf_nc
# get pressure array from wrf
wrf_height = (wrf_domain_file.variables['PH'][0,0,:,:].data +
wrf_domain_file.variables['PHB'][0,0,:,:].data) / gravity_
if original_arg_type_str:
wrf_domain_file.close()
return wrf_height
def wrf_get_water_vapor_mixing_ratio(wrf_nc):
original_arg_type_str = False
if type(wrf_nc) == str:
original_arg_type_str = True
wrf_domain_file = nc.Dataset(wrf_nc)
else:
wrf_domain_file = wrf_nc
# get pressure array from wrf
wrf_QVAPOR = wrf_domain_file.variables['QVAPOR'][0,:,:,:].data
if original_arg_type_str:
wrf_domain_file.close()
return wrf_QVAPOR
def wrf_get_cloud_water_mixing_ratio(wrf_nc):
original_arg_type_str = False
if type(wrf_nc) == str:
original_arg_type_str = True
wrf_domain_file = nc.Dataset(wrf_nc)
else:
wrf_domain_file = wrf_nc
# get pressure array from wrf
wrf_QCLOUD = wrf_domain_file.variables['QCLOUD'][0,:,:,:].data
if original_arg_type_str:
wrf_domain_file.close()
return wrf_QCLOUD
def wrf_get_ice_mixing_ratio(wrf_nc):
original_arg_type_str = False
if type(wrf_nc) == str:
original_arg_type_str = True
wrf_domain_file = nc.Dataset(wrf_nc)
else:
wrf_domain_file = wrf_nc
# get pressure array from wrf
wrf_QICE = wrf_domain_file.variables['QICE'][0,:,:,:].data
if original_arg_type_str:
wrf_domain_file.close()
return wrf_QICE
def wrf_get_lat_lon(wrf_nc):
original_arg_type_str = False
if type(wrf_nc) == str:
original_arg_type_str = True
wrf_domain_file = nc.Dataset(wrf_nc)
else:
wrf_domain_file = wrf_nc
# get pressure array from wrf
wrf_lat = wrf_domain_file.variables['XLAT'][0, :, :].filled(np.nan)
wrf_lon = wrf_domain_file.variables['XLONG'][0, :, :].filled(np.nan)
if original_arg_type_str:
wrf_domain_file.close()
return wrf_lat, wrf_lon
def wrf_rename_files_fix_time_format(filename_original_list, original_character=':', replacement_character='_'):
for i_, filename_ in enumerate(filename_original_list):
p_progress_bar(i_, len(filename_original_list))
new_filename = filename_.replace(original_character,replacement_character)
os.rename(filename_, new_filename)
# meteorology
def calculate_saturation_vapor_pressure_wexler(T_array_K):
# result in mb (hPa)
G0 = -0.29912729E+4
G1 = -0.60170128E+4
G2 = 0.1887643854E+2
G3 = -0.28354721E-1
G4 = 0.17838301E-4
G5 = -0.84150417E-9
G6 = 0.44412543E-12
G7 = 0.2858487E+1
e_s = np.exp((G0 * (T_array_K ** -2)) +
(G1 * (T_array_K ** -1)) +
G2 +
(G3 * T_array_K) +
(G4 * (T_array_K ** 2)) +
(G5 * (T_array_K ** 3)) +
(G6 * (T_array_K ** 4)) +
(G7 * np.log(T_array_K)))
return e_s * 0.01
def calculate_saturation_mixing_ratio(P_array_mb, T_array_K):
e_s = calculate_saturation_vapor_pressure_wexler(T_array_K)
q_s = 621.97 * (e_s / (P_array_mb - e_s))
return q_s
def calculate_potential_temperature(T_array_K, P_array_hPa):
potential_temp = T_array_K * ((1000 / P_array_hPa) ** poisson_)
return potential_temp
def calculate_equivalent_potential_temperature(T_array_K, P_array_hPa, R_array_kg_over_kg):
P_o = 1000
T_e = T_array_K + (latent_heat_v * R_array_kg_over_kg / heat_capacity__Cp)
theta_e = T_e * ((P_o/P_array_hPa)**poisson_)
return theta_e
def calculate_temperature_from_potential_temperature(theta_array_K, P_array_hPa):
temperature_ = theta_array_K * ( (P_array_hPa/1000) ** poisson_ )
return temperature_
def calculate_mountain_height_from_sonde(sonde_dict):
"""
calculates H_hat from given values of u_array, v_array, T_array, effective_height, rh_array, q_array, p_array
"""
# Set initial conditions
height = 1000 # metres
# define arrays
WS_array = ws_knots_to_ms(sonde_dict['SKNT'])
U_array, V_array = polar_to_cart(sonde_dict['DRCT'], WS_array)
T_array = celsius_to_kelvin(sonde_dict['TEMP'])
RH_array = sonde_dict['RELH']
P_array = sonde_dict['PRES']
Z_array = sonde_dict['HGHT']
Q_array = sonde_dict['MIXR']/1000
TH_array = sonde_dict['THTA']
# calculated arrays
q_s = calculate_saturation_mixing_ratio(P_array, T_array)
e_ = gas_const_dry / gas_const_water
# gradients
d_ln_TH = np.gradient(np.log(TH_array))
d_z = np.gradient(Z_array)
d_q_s = np.gradient(q_s)
# Dry Brunt - Vaisala
N_dry = gravity_ * d_ln_TH / d_z
N_dry[RH_array >= 90] = 0
# Moist Brunt - Vaisala
term_1_1 = 1 + ( latent_heat_v * q_s / (gas_const_dry * T_array) )
term_1_2 = 1 + ( e_ * (latent_heat_v**2) * q_s / (heat_capacity__Cp * gas_const_dry * (T_array**2) ) )
term_2_1 = d_ln_TH / d_z
term_2_2 = latent_heat_v / (heat_capacity__Cp * T_array)
term_2_3 = d_q_s / d_z
term_3 = d_q_s / d_z # should be d_q_w but sonde data has no cloud water data
N_moist = gravity_ * ( (term_1_1 / term_1_2) * (term_2_1 + ( term_2_2 * term_2_3) ) - term_3 )
N_moist[RH_array < 90] = 0
# define output array
N_2 = (N_dry + N_moist)**2
H_hat_2 = N_2 * (height**2) / (U_array**2)
return H_hat_2
def calculate_mountain_height_from_era5(era5_pressures_filename, era5_surface_filename, point_lat, point_lon,
return_arrays=False, u_wind_mode='u', range_line_degrees=None,
time_start_str_YYYYmmDDHHMM='',time_stop_str_YYYYmmDDHHMM='',
reference_height=1000, return_debug_arrays=False):
"""
calculates H_hat from given values of u_array, v_array, T_array, effective_height, rh_array, q_array, p_array
u_wind_mode: can be u, wind_speed, normal_to_range. If normal_to_range, then range_line most not be none
if range_line_degrees is not None, u_wind_mode will automatically be set to normal_to_range
range_line_degrees: degress (decimals) from north, clockwise, of the mountain range line.
"""
# load files
era5_sur = nc.Dataset(era5_surface_filename, 'r')
era5_pre = nc.Dataset(era5_pressures_filename, 'r')
# check if times are the same for both files
dif_sum = np.sum(np.abs(era5_pre.variables['time'][:] - era5_sur.variables['time'][:]))
if dif_sum > 0:
print('Error, times in selected files are not the same')
return
# check if lat lon are the same for both files
dif_sum = np.sum(np.abs(era5_pre.variables['latitude'][:] - era5_sur.variables['latitude'][:]))
dif_sum = dif_sum + np.sum(np.abs(era5_pre.variables['longitude'][:] - era5_sur.variables['longitude'][:]))
if dif_sum > 0:
print('Error, latitude or longitude in selected files are not the same')
return
# find lat lon index
lat_index, lon_index = find_index_from_lat_lon(era5_sur.variables['latitude'][:],
era5_sur.variables['longitude'][:], [point_lat], [point_lon])
lat_index = lat_index[0]
lon_index = lon_index[0]
# copy arrays
time_array = time_era5_to_seconds(np.array(era5_sur.variables['time'][:]))
r_1 = 0
r_2 = -1
if time_start_str_YYYYmmDDHHMM != '':
r_1 = time_to_row_str(time_array, time_start_str_YYYYmmDDHHMM)
if time_stop_str_YYYYmmDDHHMM != '':
r_2 = time_to_row_str(time_array, time_stop_str_YYYYmmDDHHMM)
time_array = time_array[r_1:r_2]
sp_array = np.array(era5_sur.variables['sp'][r_1:r_2, lat_index, lon_index]) / 100 # hPa
P_array = np.array(era5_pre.variables['level'][:]) # hPa
if range_line_degrees is not None:
WD_, WS_ = cart_to_polar(np.array(era5_pre.variables['v'][r_1:r_2,:,lat_index,lon_index]).flatten(),
np.array(era5_pre.variables['u'][r_1:r_2,:,lat_index,lon_index]).flatten())
WD_delta = WD_ - range_line_degrees
range_normal_component = WS_ * np.sin(np.deg2rad(WD_delta))
U_array = range_normal_component.reshape((sp_array.shape[0], P_array.shape[0]))
else:
if u_wind_mode == 'u':
U_array = np.array(era5_pre.variables['u'][r_1:r_2,:,lat_index,lon_index])
else:
U_array = np.sqrt(np.array(era5_pre.variables['v'][r_1:r_2,:,lat_index,lon_index]) ** 2 +
np.array(era5_pre.variables['u'][r_1:r_2,:,lat_index,lon_index]) ** 2)
T_array = np.array(era5_pre.variables['t'][r_1:r_2, :, lat_index, lon_index])
Q_L_array = np.array(era5_pre.variables['crwc'][r_1:r_2, :, lat_index, lon_index])
RH_array = np.array(era5_pre.variables['r'][r_1:r_2, :, lat_index, lon_index])
Z_array = np.array(era5_pre.variables['z'][r_1:r_2, :, lat_index, lon_index]) / gravity_
# calculate arrays
TH_array = np.zeros((time_array.shape[0], P_array.shape[0]), dtype=float)
for t_ in range(time_array.shape[0]):
TH_array[t_,:] = calculate_potential_temperature(T_array[t_,:], P_array[:])
# calculated arrays
q_s = calculate_saturation_mixing_ratio(P_array, T_array)
e_ = gas_const_dry / gas_const_water
# create output dict
H_hat_2 = {}
# loop tru time stamps
for t_ in range(time_array.shape[0]):
p_progress_bar(t_,time_array.shape[0])
# find surface pressure at this time stamp
surface_p = sp_array[t_]
# find pressure at 1000 meters
pressure_1000m = np.interp(reference_height, Z_array[t_, :], P_array)
pressure_1000m_index = np.argmin(np.abs(P_array - pressure_1000m))
# find extrapolations
ql_0 = np.interp(np.log(surface_p), np.log(P_array), Q_L_array[t_, :])
z__0 = np.interp(np.log(surface_p), np.log(P_array), Z_array[t_, :])
th_0 = np.interp(np.log(surface_p), np.log(P_array), TH_array[t_, :])
qs_0 = np.interp(np.log(surface_p), np.log(P_array), q_s[t_, :])
t__1000 = np.interp(reference_height, Z_array[t_, :], T_array[t_, :])
u__1000 = np.interp(reference_height, Z_array[t_, :], U_array[t_, :])
ql_1000 = np.interp(reference_height, Z_array[t_, :], Q_L_array[t_, :])
z__1000 = reference_height
th_1000 = np.interp(reference_height, Z_array[t_, :], TH_array[t_, :])
qs_1000 = np.interp(reference_height, Z_array[t_, :], q_s[t_, :])
# gradients
d_ln_TH = np.log(th_1000) - np.log(th_0)
d_z = z__1000 - z__0
d_q_s = qs_1000 - qs_0
d_q_w = (d_q_s) + (ql_1000 - ql_0)
# Brunt - Vaisala
if np.max(RH_array[t_, pressure_1000m_index:])>= 90:
# Moist
term_1_1 = 1 + ( latent_heat_v * qs_1000 / (gas_const_dry * t__1000) )
term_1_2 = 1 + ( e_ * (latent_heat_v**2) * qs_1000 /
(heat_capacity__Cp * gas_const_dry * (t__1000**2) ) )
term_2_1 = d_ln_TH / d_z
term_2_2 = latent_heat_v / (heat_capacity__Cp * t__1000)
term_2_3 = d_q_s / d_z
term_3 = d_q_w / d_z
N_2 = gravity_ * ( (term_1_1 / term_1_2) * (term_2_1 + ( term_2_2 * term_2_3) ) - term_3 )
else:
# Dry
N_2 = gravity_ * d_ln_TH / d_z
# populate each time stamp
H_hat_2[time_array[t_]] = N_2 * (reference_height ** 2) / (u__1000 ** 2)
era5_sur.close()
era5_pre.close()
if return_arrays:
H_hat_2_time = sorted(H_hat_2.keys())
H_hat_2_time = np.array(H_hat_2_time)
H_hat_2_vals = np.zeros(H_hat_2_time.shape[0], dtype=float)
for r_ in range(H_hat_2_time.shape[0]):
H_hat_2_vals[r_] = H_hat_2[H_hat_2_time[r_]]
if return_debug_arrays:
return H_hat_2_time, H_hat_2_vals, N_2, u__1000 ** 2
else:
return H_hat_2_time, H_hat_2_vals
else:
return H_hat_2
def calculate_mountain_height_from_WRF(filename_SP, filename_PR,
filename_UU, filename_VV,
filename_TH, filename_QR,
filename_QV, filename_PH,
return_arrays=False, u_wind_mode='u', range_line_degrees=None,
reference_height=1000):
"""
calculates H_hat from WRF point output text files
u_wind_mode: can be u, wind_speed, normal_to_range. If normal_to_range, then range_line most not be none
if range_line_degrees is not None, u_wind_mode will automatically be set to normal_to_range
range_line_degrees: degress (decimals) from north, clockwise, of the mountain range line.
:param filename_SP: fullpath filename of surface pressure file
:param filename_PR: fullpath filename of pressure file
:param filename_UU: fullpath filename of u wind file
:param filename_VV: fullpath filename of v wind file
:param filename_TH: fullpath filename of potential temperature file
:param filename_QR: fullpath filename of rain water mixing ratio file
:param filename_QV: fullpath filename of Water vapor mixing ratio file
:param filename_PH: fullpath filename of geopotential height file
:param return_arrays: if true, will return also brunt vaisalla and wind component squared
:param u_wind_mode: can be u, wind_speed, normal_to_range. If normal_to_range, then range_line most not be none
:param range_line_degrees: if not None, u_wind_mode will automatically be set to normal_to_range
:param reference_height: mean height of mountain range
:return:
H_hat_2
"""
# load arrays from text
SP_array = genfromtxt(filename_SP, dtype=float, skip_header=1)[:,9] / 100 # hPa
PR_array = genfromtxt(filename_PR, dtype=float, skip_header=1)[:,1:] / 100 # hPa
UU_array = genfromtxt(filename_UU, dtype=float, skip_header=1)[:,1:]
VV_array = genfromtxt(filename_VV, dtype=float, skip_header=1)[:,1:]
TH_array = genfromtxt(filename_TH, dtype=float, skip_header=1)[:,1:]
QR_array = genfromtxt(filename_QR, dtype=float, skip_header=1)[:,1:]
QV_array = genfromtxt(filename_QV, dtype=float, skip_header=1)[:,1:]
Z_array = genfromtxt(filename_PH, dtype=float, skip_header=1)[:,1:] # already in meters
# calculate arrays
if range_line_degrees is not None:
WD_, WS_ = cart_to_polar(UU_array.flatten(), VV_array.flatten())
WD_delta = WD_ - range_line_degrees
range_normal_component = WS_ * np.sin(np.deg2rad(WD_delta))
U_array = range_normal_component.reshape((UU_array.shape[0], UU_array.shape[1]))
else:
if u_wind_mode == 'u':
U_array = UU_array
else:
U_array = np.sqrt(UU_array ** 2 + VV_array ** 2)
T_array = calculate_temperature_from_potential_temperature(TH_array, PR_array)
RH_array = calculate_RH_from_QV_T_P(QV_array, T_array, PR_array*100)
q_s = calculate_saturation_mixing_ratio(PR_array, T_array)
e_ = gas_const_dry / gas_const_water
# create output array
H_hat_2 = np.zeros(PR_array.shape[0], dtype=float)
# loop tru time stamps
for r_ in range(PR_array.shape[0]):
p_progress_bar(r_, PR_array.shape[0])
# find surface pressure at this time stamp
surface_p = SP_array[r_]
# find pressure at 1000 meters
pressure_1000m = np.interp(reference_height, Z_array[r_, :], PR_array[r_, :])
pressure_1000m_index = np.argmin(np.abs(PR_array[r_, :] - pressure_1000m))
# find extrapolations
ql_0 = np.interp(np.log(surface_p), np.log(PR_array[r_, :]), QR_array[r_, :])
z__0 = np.interp(np.log(surface_p), np.log(PR_array[r_, :]), Z_array[r_, :])
th_0 = np.interp(np.log(surface_p), np.log(PR_array[r_, :]), TH_array[r_, :])
qs_0 = np.interp(np.log(surface_p), np.log(PR_array[r_, :]), q_s[r_, :])
t__1000 = np.interp(reference_height, Z_array[r_, :], T_array[r_, :])
u__1000 = np.interp(reference_height, Z_array[r_, :], U_array[r_, :])
ql_1000 = np.interp(reference_height, Z_array[r_, :], QR_array[r_, :])
z__1000 = reference_height
th_1000 = np.interp(reference_height, Z_array[r_, :], TH_array[r_, :])
qs_1000 = np.interp(reference_height, Z_array[r_, :], q_s[r_, :])
# gradients
d_ln_TH = np.log(th_1000) - np.log(th_0)
d_z = z__1000 - z__0
d_q_s = qs_1000 - qs_0
d_q_w = (d_q_s) + (ql_1000 - ql_0)
# Brunt - Vaisala
if np.max(RH_array[r_, pressure_1000m_index:])>= 90:
# Moist
term_1_1 = 1 + ( latent_heat_v * qs_1000 / (gas_const_dry * t__1000) )
term_1_2 = 1 + ( e_ * (latent_heat_v**2) * qs_1000 /
(heat_capacity__Cp * gas_const_dry * (t__1000**2) ) )
term_2_1 = d_ln_TH / d_z
term_2_2 = latent_heat_v / (heat_capacity__Cp * t__1000)
term_2_3 = d_q_s / d_z
term_3 = d_q_w / d_z
N_2 = gravity_ * ( (term_1_1 / term_1_2) * (term_2_1 + ( term_2_2 * term_2_3) ) - term_3 )
else:
# Dry
N_2 = gravity_ * d_ln_TH / d_z
# populate each time stamp
H_hat_2[r_] = N_2 * (reference_height ** 2) / (u__1000 ** 2)
if return_arrays:
return H_hat_2, N_2, u__1000 ** 2
else:
return H_hat_2
def calculate_dewpoint_from_T_RH(T_, RH_):
"""
from Magnus formula, using Bolton's constants
:param T_: ambient temperature [Celsius]
:param RH_: relative humidity
:return: Td_ dew point temperature [celsius]
"""
a = 6.112
b = 17.67
c = 243.5
y_ = np.log(RH_/100) + ((b*T_)/(c+T_))
Td_ = (c * y_) / (b - y_)
return Td_
def calculate_RH_from_QV_T_P(arr_qvapor, arr_temp_K, arr_press_Pa):
tv_ = 6.11 * e_constant**((2500000/461) * ((1/273) - (1/arr_temp_K)))
pv_ = arr_qvapor * (arr_press_Pa/100) / (arr_qvapor + 0.622)
return np.array(100 * pv_ / tv_)
def calculate_profile_input_for_cluster_analysis_from_ERA5(p_profile, t_profile, td_profile, q_profile,
u_profile, v_profile, h_profile, surface_p):
"""
takes data from ERA5 for only one time stamp for all pressure levels from 250 to 1000 hPa
:param p_profile: in hPa
:param t_profile: in Celsius
:param td_profile: in Celsius
:param q_profile: in kg/kg
:param u_profile: in m/s
:param v_profile: in m/s
:param h_profile: in m
:param surface_p: in hPa
:return: surface_p, qv_, qu_, tw_, sh_, tt_
"""
# trim profiles from surface to top
# find which levels should be included
levels_total = 0
for i_ in range(p_profile.shape[0]):
if p_profile[i_] > surface_p:
break
levels_total += 1
####################################### find extrapolations
surface_t = np.interp(np.log(surface_p), np.log(p_profile), t_profile)
surface_td = np.interp(np.log(surface_p), np.log(p_profile), td_profile)
surface_q = np.interp(np.log(surface_p), np.log(p_profile), q_profile)
surface_u = np.interp(np.log(surface_p), np.log(p_profile), u_profile)
surface_v = np.interp(np.log(surface_p), np.log(p_profile), v_profile)
surface_h = np.interp(np.log(surface_p), np.log(p_profile), h_profile)
# create temp arrays
T_array = np.zeros(levels_total + 1, dtype=float)
Td_array = np.zeros(levels_total + 1, dtype=float)
Q_array = np.zeros(levels_total + 1, dtype=float)
U_array = np.zeros(levels_total + 1, dtype=float)
V_array = np.zeros(levels_total + 1, dtype=float)
H_array = np.zeros(levels_total + 1, dtype=float)
P_array = np.zeros(levels_total + 1, dtype=float)
T_array[:levels_total] = t_profile[:levels_total]
Td_array[:levels_total] = td_profile[:levels_total]
Q_array[:levels_total] = q_profile[:levels_total]
U_array[:levels_total] = u_profile[:levels_total]
V_array[:levels_total] = v_profile[:levels_total]
H_array[:levels_total] = h_profile[:levels_total]
P_array[:levels_total] = p_profile[:levels_total]
T_array[-1] = surface_t
Td_array[-1] = surface_td
Q_array[-1] = surface_q
U_array[-1] = surface_u
V_array[-1] = surface_v
H_array[-1] = surface_h
P_array[-1] = surface_p
######################################
r_850 = np.argmin(np.abs(P_array - 850))
r_500 = np.argmin(np.abs(P_array - 500))
dp_ = np.abs(np.gradient(P_array))
tt_ = (T_array[r_850] - (2 * T_array[r_500]) + Td_array[r_850])
qu_ = np.sum(Q_array * U_array * dp_) / gravity_
qv_ = np.sum(Q_array * V_array * dp_) / gravity_
tw_ = np.sum(Q_array * dp_) / gravity_
del_u = U_array[r_850] - U_array[r_500]
del_v = V_array[r_850] - V_array[r_500]
del_z = H_array[r_850] - H_array[r_500]
sh_ = ((del_u / del_z) ** 2 + (del_v / del_z) ** 2) ** 0.5
return surface_p, qv_, qu_, tw_, sh_, tt_
def barometric_equation(presb_pa, tempb_k, deltah_m, Gamma=-0.0065):
"""The barometric equation models the change in pressure with
height in the atmosphere.
INPUTS:
presb_k (pa): The base pressure
tempb_k (K): The base temperature
deltah_m (m): The height differential between the base height and the
desired height
Gamma [=-0.0065]: The atmospheric lapse rate
OUTPUTS
pres (pa): Pressure at the requested level
REFERENCE:
http://en.wikipedia.org/wiki/Barometric_formula
"""
return presb_pa * \
(tempb_k/(tempb_k+Gamma*deltah_m))**(grav*m_a/(Rstar_a*Gamma))
def barometric_equation_inv(heightb_m, tempb_k, presb_pa,
prest_pa, Gamma=-0.0065):
"""The barometric equation models the change in pressure with height in
the atmosphere. This function returns altitude given
initial pressure and base altitude, and pressure change.
INPUTS:
heightb_m (m):
presb_pa (pa): The base pressure
tempb_k (K) : The base temperature
deltap_pa (m): The pressure differential between the base height and the
desired height
Gamma [=-0.0065]: The atmospheric lapse rate
OUTPUTS
heightt_m
REFERENCE:
http://en.wikipedia.org/wiki/Barometric_formula
"""
return heightb_m + \
tempb_k * ((presb_pa/prest_pa)**(Rstar_a*Gamma/(grav*m_a))-1) / Gamma
def Theta(tempk, pres, pref=100000.):
"""Potential Temperature
INPUTS:
tempk (K)
pres (Pa)
pref: Reference pressure (default 100000 Pa)
OUTPUTS: Theta (K)
Source: Wikipedia
Prints a warning if a pressure value below 2000 Pa input, to ensure
that the units were input correctly.
"""
try:
minpres = min(pres)
except TypeError:
minpres = pres
if minpres < 2000:
print("WARNING: P<2000 Pa; did you input a value in hPa?")
return tempk * (pref/pres)**(Rs_da/Cp_da)
def TempK(theta, pres, pref=100000.):
"""Inverts Theta function."""
try:
minpres = min(pres)
except TypeError:
minpres = pres
if minpres < 2000:
print("WARNING: P<2000 Pa; did you input a value in hPa?")
return theta * (pres/pref)**(Rs_da/Cp_da)
def ThetaE(tempk, pres, e):
"""Calculate Equivalent Potential Temperature
for lowest model level (or surface)
INPUTS:
tempk: Temperature [K]
pres: Pressure [Pa]
e: Water vapour partial pressure [Pa]
OUTPUTS:
theta_e: equivalent potential temperature
References:
Eq. (9.40) from Holton (2004)
Eq. (22) from Bolton (1980)
<NAME> and <NAME> (2013), 'Land-Ocean Warming
Contrast over a Wide Range of Climates: Convective Quasi-Equilibrium
Theory and Idealized Simulations', J. Climate """
# tempc
tempc = tempk - degCtoK
# Calculate theta
theta = Theta(tempk, pres)
# T_lcl formula needs RH
es = VaporPressure(tempc)
RH = 100. * e / es
# theta_e needs q (water vapour mixing ratio)
qv = MixRatio(e, pres)
# Calculate the temp at the Lifting Condensation Level
T_lcl = ((tempk-55)*2840 / (2840-(np.log(RH/100)*(tempk-55)))) + 55
# print "T_lcl :%.3f"%T_lcl
# DEBUG STUFF ####
theta_l = tempk * \
(100000./(pres-e))**(Rs_da/Cp_da)*(tempk/T_lcl)**(0.28*qv)
# print "theta_L: %.3f"%theta_l
# Calculate ThetaE
theta_e = theta_l * np.exp((Lv * qv) / (Cp_da * T_lcl))
return theta_e
def ThetaE_Bolton(tempk, pres, e, pref=100000.):
"""Theta_E following Bolton (1980)
INPUTS:
tempk: Temperature [K]
pres: Pressure [Pa]
e: Water vapour partial pressure [Pa]
See http://en.wikipedia.org/wiki/Equivalent_potential_temperature
"""
# Preliminary:
T = tempk
qv = MixRatio(e, pres)
Td = DewPoint(e) + degCtoK
kappa_d = Rs_da / Cp_da
# Calculate TL (temp [K] at LCL):
TL = 56 + ((Td-56.)**-1+(np.log(T/Td)/800.))**(-1)
# print "TL: %.3f"%TL
# Calculate Theta_L:
thetaL = T * (pref/(pres-e))**kappa_d*(T/TL)**(0.28*qv)
# print "theta_L: %.3f"%thetaL
# put it all together to get ThetaE
thetaE = thetaL * np.exp((3036./TL-0.78)*qv*(1+0.448*qv))
return thetaE
def ThetaV(tempk, pres, e):
"""Virtual Potential Temperature
INPUTS
tempk (K)
pres (Pa)
e: Water vapour pressure (Pa) (Optional)
OUTPUTS
theta_v : Virtual potential temperature
"""
mixr = MixRatio(e, pres)
theta = Theta(tempk, pres)
return theta * (1+mixr/Epsilon) / (1+mixr)
def GammaW(tempk, pres):
"""Function to calculate the moist adiabatic lapse rate (deg C/Pa) based
on the environmental temperature and pressure.
INPUTS:
tempk (K)
pres (Pa)
RH (%)
RETURNS:
GammaW: The moist adiabatic lapse rate (Deg C/Pa)
REFERENCE:
http://glossary.ametsoc.org/wiki/Moist-adiabatic_lapse_rate
(Note that I multiply by 1/(grav*rho) to give MALR in deg/Pa)
"""
tempc = tempk-degCtoK
es = VaporPressure(tempc)
ws = MixRatio(es, pres)
# tempv=VirtualTempFromMixR(tempk,ws)
tempv = VirtualTemp(tempk, pres, es)
latent = Latentc(tempc)
Rho = pres / (Rs_da*tempv)
# This is the previous implementation:
# A=1.0+latent*ws/(Rs_da*tempk)
# B=1.0+Epsilon*latent*latent*ws/(Cp_da*Rs_da*tempk*tempk)
# Gamma=(A/B)/(Cp_da*Rho)
# This is algebraically identical but a little clearer:
A = -1. * (1.0+latent*ws/(Rs_da*tempk))
B = Rho * (Cp_da+Epsilon*latent*latent*ws/(Rs_da*tempk*tempk))
Gamma = A / B
return Gamma
def DensHumid(tempk, pres, e):
"""Density of moist air.
This is a bit more explicit and less confusing than the method below.
INPUTS:
tempk: Temperature (K)
pres: static pressure (Pa)
mixr: mixing ratio (kg/kg)
OUTPUTS:
rho_air (kg/m^3)
SOURCE: http://en.wikipedia.org/wiki/Density_of_air
"""
pres_da = pres - e
rho_da = pres_da / (Rs_da * tempk)
rho_wv = e/(Rs_v * tempk)
return rho_da + rho_wv
def Density(tempk, pres, mixr):
"""Density of moist air
INPUTS:
tempk: Temperature (K)
pres: static pressure (Pa)
mixr: mixing ratio (kg/kg)
OUTPUTS:
rho_air (kg/m^3)
"""
virtualT = VirtualTempFromMixR(tempk, mixr)
return pres / (Rs_da * virtualT)
def VirtualTemp(tempk, pres, e):
"""Virtual Temperature
INPUTS:
tempk: Temperature (K)
e: vapour pressure (Pa)
p: static pressure (Pa)
OUTPUTS:
tempv: Virtual temperature (K)
SOURCE: hmmmm (Wikipedia)."""
tempvk = tempk / (1-(e/pres)*(1-Epsilon))
return tempvk
def VirtualTempFromMixR(tempk, mixr):
"""Virtual Temperature
INPUTS:
tempk: Temperature (K)
mixr: Mixing Ratio (kg/kg)
OUTPUTS:
tempv: Virtual temperature (K)
SOURCE: hmmmm (Wikipedia). This is an approximation
based on a m
"""
return tempk * (1.0+0.6*mixr)
def Latentc(tempc):
"""Latent heat of condensation (vapourisation)
INPUTS:
tempc (C)
OUTPUTS:
L_w (J/kg)
SOURCE:
http://en.wikipedia.org/wiki/Latent_heat#Latent_heat_for_condensation_of_water
"""
return 1000 * (2500.8 - 2.36*tempc + 0.0016*tempc**2 - 0.00006*tempc**3)
def VaporPressure(tempc, phase="liquid"):
"""Water vapor pressure over liquid water or ice.
INPUTS:
tempc: (C) OR dwpt (C), if SATURATION vapour pressure is desired.
phase: ['liquid'],'ice'. If 'liquid', do simple dew point. If 'ice',
return saturation vapour pressure as follows:
Tc>=0: es = es_liquid
Tc <0: es = es_ice
RETURNS: e_sat (Pa)
SOURCE: http://cires.colorado.edu/~voemel/vp.html (#2:
CIMO guide (WMO 2008), modified to return values in Pa)
This formulation is chosen because of its appealing simplicity,
but it performs very well with respect to the reference forms
at temperatures above -40 C. At some point I'll implement Goff-Gratch
(from the same resource).
"""
over_liquid = 6.112 * np.exp(17.67*tempc/(tempc+243.12))*100.
over_ice = 6.112 * np.exp(22.46*tempc/(tempc+272.62))*100.
# return where(tempc<0,over_ice,over_liquid)
if phase == "liquid":
# return 6.112*exp(17.67*tempc/(tempc+243.12))*100.
return over_liquid
elif phase == "ice":
# return 6.112*exp(22.46*tempc/(tempc+272.62))*100.
return np.where(tempc < 0, over_ice, over_liquid)
else:
raise NotImplementedError
def SatVap(dwpt, phase="liquid"):
"""This function is deprecated, return ouput from VaporPres"""
print("WARNING: This function is deprecated, please use VaporPressure()" +
" instead, with dwpt as argument")
return VaporPressure(dwpt, phase)
def MixRatio(e, p):
"""Mixing ratio of water vapour
INPUTS
e (Pa) Water vapor pressure
p (Pa) Ambient pressure
RETURNS
qv (kg kg^-1) Water vapor mixing ratio`
"""
return Epsilon * e / (p - e)
def MixR2VaporPress(qv, p):
"""Return Vapor Pressure given Mixing Ratio and Pressure
INPUTS
qv (kg kg^-1) Water vapor mixing ratio`
p (Pa) Ambient pressure
RETURNS
e (Pa) Water vapor pressure
"""
return qv * p / (Epsilon + qv)
def DewPoint(e):
""" Use Bolton's (1980, MWR, p1047) formulae to find tdew.
INPUTS:
e (Pa) Water Vapor Pressure
OUTPUTS:
Td (C)
"""
ln_ratio = np.log(e/611.2)
Td = ((17.67-ln_ratio)*degCtoK+243.5*ln_ratio)/(17.67-ln_ratio)
return Td - degCtoK
def WetBulb(tempc, RH):
"""Stull (2011): Wet-Bulb Temperature from Relative Humidity and Air
Temperature.
INPUTS:
tempc (C)
RH (%)
OUTPUTS:
tempwb (C)
"""
Tw = tempc * np.arctan(0.151977*(RH+8.313659)**0.5) + \
np.arctan(tempc+RH) - np.arctan(RH-1.676331) + \
0.00391838*RH**1.5*np.arctan(0.023101*RH) - \
4.686035
return Tw
# unit conversions
def convert_unit_and_save_data_ppb_ugm3(filename_, station_name):
# https://uk-air.defra.gov.uk/assets/documents/reports/cat06/0502160851_Conversion_Factors_Between_ppb_and.pdf
# http://www2.dmu.dk/AtmosphericEnvironment/Expost/database/docs/PPM_conversion.pdf
parameters_unit_scaling = {'11' : 1.96, # O3
'10' : 1.25, # NO
'9' : 1.88, # NO2
'16' : 2.62, # SO2
'8' : 1.15} # CO
new_unit_name = '[$\mu$g/m$^3$]'
parameter_name_mod = {'9' : 'NO$_2$',
'11' : 'O$_3$',
'12' : 'PM$_1$$_0$',
'13' : 'PM$_2$$_.$$_5$',
'7' : 'CO$_2$',
'16' : 'SO$_2$',
}
# station_name = 'QF_01'
data_array = open_csv_file(filename_)
current_header = data_array[0,:]
new_header = np.array(current_header)
v_current = np.array(data_array[1:,:],dtype=float)
v_new = np.array(v_current)
for keys_ in parameters_unit_scaling.keys():
v_new[:, int(keys_)] = v_current[:, int(keys_)] * parameters_unit_scaling[str(keys_)]
# add station name suffix
for i_ in range(5,22):
if str(i_) in parameter_name_mod.keys():
parameter_name = parameter_name_mod[str(i_)]
else:
parameter_name = current_header[i_].split('_')[0]
if str(i_) in parameters_unit_scaling.keys():
parameter_unit = new_unit_name
else:
parameter_unit = current_header[i_].split('_')[1]
new_header[i_] = station_name + '_' + parameter_name + '_' + parameter_unit
data_array[1:,:] = v_new
data_array[0,:] = new_header
filename_new = filename_.split('\\')[-1].split('.')[0] + '_unit_converted.csv'
current_filename_without_path = filename_.split('\\')[-1]
current_filename_path = filename_[:-len(current_filename_without_path)]
numpy_save_txt(current_filename_path + filename_new, data_array)
print('done!')
def save_data_with_unit_conversion_ppb_ugm3(file_list_path):
file_list = sorted(glob.glob(str(file_list_path + '\\' + '*.csv')))
# https://uk-air.defra.gov.uk/assets/documents/reports/cat06/0502160851_Conversion_Factors_Between_ppb_and.pdf
# http://www2.dmu.dk/AtmosphericEnvironment/Expost/database/docs/PPM_conversion.pdf
parameters_unit_scaling = {'12' : 1.96, # O3
'13' : 1.25, # NO
'14' : 1.88, # NO2
'15' : 2.62, # SO2
'16' : 1.15} # CO
parameters_new_names = ['YYYY', # 0
'MM', # 1
'DD', # 2
'HH', # 3
'mm', # 4
'Day of the week', # 5
'WD degrees', # 6
'WS m/s', # 7
'Temp Celsius', # 8
'RH %', # 9
'SR W/m2', # 10
'ATP mbar', # 11
'O3 ug/m3', # 12
'NO ug/m3', # 13
'NO2 ug/m3', # 14
'SO2 ug/m3', # 15
'CO mg/m3', # 16
'CO2 ppm', # 17
'PM10 ug/m3', # 18
'PM2.5 ug/m3', # 19
'THC ppm', # 20
'Rain mm', # 21
'Ox ppb', # 22
'NOx ppb'] # 23
for month_ in range(1,13):
print(month_)
filename_old = file_list[month_ -1]
data_array = open_csv_file(file_list[month_ -1])
v_ppb = np.array(data_array[1:,:],dtype=float)
v_ug_m3 = np.array(v_ppb)
for keys_ in parameters_unit_scaling.keys():
v_ug_m3[:, int(keys_)] = v_ppb[:, int(keys_)] * parameters_unit_scaling[str(keys_)]
data_array[0, :] = parameters_new_names
data_array[1:,:] = v_ug_m3
filename_new = filename_old.split('\\')[-1].split('.')[0] + '_ugm3.csv'
numpy_save_txt(file_list_path + '\\' + filename_new, data_array)
print('done!')
def RH_to_abs_conc(arr_RH,arr_T):
a_ = 1-(373.15/arr_T)
c_1 = 13.3185
c_2 = -1.97
c_3 = -.6445
c_4 = -.1299
Po_H2O = 1013.25 * e_constant ** ((c_1 * (a_**1)) +
(c_2 * (a_**2)) +
(c_3 * (a_**3)) +
(c_4 * (a_**4)) ) # mbar
return (arr_RH * Po_H2O) / (100 * boltzmann_ * arr_T)
def Mixing_Ratio_to_molecules_per_cm3(arr_MR, ATP_mbar, Temp_C):
arr_temp = Temp_C + 273.15 # kelvin
arr_Molec_per_cm3 = arr_MR * ( ATP_mbar / ( boltzmann_ * arr_temp ) ) # molecules / cm3
return arr_Molec_per_cm3
def molecules_per_cm3_to_Mixing_Ratio(arr_Molec_per_cm3, ATP_mbar, Temp_C):
arr_temp = Temp_C + 273.15 # kelvin
arr_MR = (arr_Molec_per_cm3 * boltzmann_ * arr_temp) / ATP_mbar
return arr_MR
def ws_knots_to_ms(arr_):
return arr_ * .514444
def ws_ms_to_knots(arr_):
return arr_ / .514444
def kelvin_to_celsius(arr_temp_k):
return arr_temp_k - 273.15
def celsius_to_kelvin(arr_temp_c):
return arr_temp_c + 273.15
# geo reference
def find_index_from_lat_lon(series_lat, series_lon, point_lat_list, point_lon_list):
lat_index_list = []
lon_index_list = []
# mask arrays
lat_m = series_lat
lon_m = series_lon
if np.sum(lat_m) != np.sum(lat_m) or np.sum(lon_m) != np.sum(lon_m):
lat_m = np.ma.masked_where(np.isnan(lat_m), lat_m)
lat_m = np.ma.masked_where(np.isinf(lat_m), lat_m)
lon_m = np.ma.masked_where(np.isnan(lon_m), lon_m)
lon_m = np.ma.masked_where(np.isinf(lon_m), lon_m)
if type(point_lat_list) == tuple or type(point_lat_list) == list:
for lat_ in point_lat_list:
lat_index_list.append(np.argmin(np.abs(lat_m - lat_)))
for lon_ in point_lon_list:
lon_index_list.append(np.argmin(np.abs(lon_m - lon_)))
else:
lat_index_list = np.argmin(np.abs(lat_m - point_lat_list))
lon_index_list = np.argmin(np.abs(lon_m - point_lon_list))
return lat_index_list, lon_index_list
def find_index_from_lat_lon_2D_arrays(lat_arr, lon_arr, point_lat, point_lon):
lat_del_arr = lat_arr - point_lat
lon_del_arr = lon_arr - point_lon
dist_arr = ( lat_del_arr**2 + lon_del_arr**2 )**0.5
return find_min_index_2d_array(dist_arr)
def find_index_from_lat_lon_1D_arrays(lat_arr, lon_arr, point_lat, point_lon):
lat_del_arr = lat_arr - point_lat
lon_del_arr = lon_arr - point_lon
dist_arr = ( lat_del_arr**2 + lon_del_arr**2 )**0.5
return find_min_index_1d_array(dist_arr)
def distance_array_lat_lon_2D_arrays_degrees(lat_arr, lon_arr, point_lat, point_lon):
lat_del_arr = lat_arr - point_lat
lon_del_arr = lon_arr - point_lon
return ( lat_del_arr**2 + lon_del_arr**2 )**0.5
def meter_per_degrees(lat_point):
lat_mean_rad = np.deg2rad(np.abs(lat_point))
m_per_deg_lat = 111132.954 - 559.822 * np.cos(2 * lat_mean_rad) + 1.175 * np.cos(4 * lat_mean_rad)
m_per_deg_lon = 111132.954 * np.cos(lat_mean_rad)
return np.abs(m_per_deg_lat), np.abs(m_per_deg_lon)
def degrees_per_meter(lat_point):
m_per_deg_lat, m_per_deg_lon = meter_per_degrees(lat_point)
return 1/m_per_deg_lat, 1/m_per_deg_lon
def distance_array_lat_lon_2D_arrays_degress_to_meters(lat_arr, lon_arr, point_lat, point_lon):
m_per_deg_lat, m_per_deg_lon = meter_per_degrees(np.nanmean(lat_arr))
lat_del_arr_m = (lat_arr - point_lat) * m_per_deg_lat
lon_del_arr_m = (lon_arr - point_lon) * m_per_deg_lon
return ( lat_del_arr_m**2 + lon_del_arr_m**2 )**0.5
def distance_between_to_points_in_meters(point_1_latlon, point_2_latlon):
latMid = (point_1_latlon[0] + point_2_latlon[0]) / 2
m_per_deg_lat, m_per_deg_lon = meter_per_degrees(latMid)
del_lat = (point_1_latlon[0] - point_2_latlon[0]) * m_per_deg_lat
del_lon = (point_1_latlon[1] - point_2_latlon[1]) * m_per_deg_lon
return ((del_lat**2) + (del_lon**2))**0.5
# Data Loading
def numpy_load_txt(filename_, delimiter_=",", format_=float, skip_head=0):
return genfromtxt(filename_, delimiter=delimiter_, dtype=format_, skip_header=skip_head)
def open_csv_file(filename_, delimiter=',', skip_head=0, dtype='<U32'):
# load data
return np.array(genfromtxt(filename_, delimiter=delimiter, dtype=dtype, skip_header=skip_head))
def load_time_columns(filename_):
## user defined variables
day_column_number = 2
month_column_number = 1
year_column_number = 0
hour_column_number = 3
minute_column_number = 4
time_header = 'Time' #defining time header
data_array = open_csv_file(filename_)
# define arrays
values_str = data_array[1:,5:]
values_ = np.zeros((values_str.shape[0],values_str.shape[1]),dtype=float)
for r_ in range(values_.shape[0]):
for c_ in range(values_.shape[1]):
try:
values_[r_,c_] = float(values_str[r_,c_])
except:
values_[r_,c_] = np.nan
header_ = data_array[0 ,1:]
# defining time arrays
time_days = np.zeros(data_array.shape[0] - 1, dtype=float)
time_month = np.zeros(data_array.shape[0] - 1, dtype=int)
time_weekday = np.zeros(data_array.shape[0] - 1, dtype=int)
time_hour = np.zeros(data_array.shape[0] - 1)
for r_ in range(data_array.shape[0] - 1):
time_days[r_] = mdates.date2num(datetime.datetime(
int(float(data_array[r_+1,year_column_number])),
int(float(data_array[r_+1,month_column_number])),
int(float(data_array[r_+1,day_column_number])),
int(float(data_array[r_+1,hour_column_number])),
int(float(data_array[r_+1,minute_column_number]))))
time_month[r_] = int(float(data_array[r_+1,month_column_number]))
time_weekday[r_] = datetime.datetime.weekday(mdates.num2date(time_days[r_]))
time_hour[r_] = float(data_array[r_+1,hour_column_number]) + (float(data_array[r_+1,minute_column_number]) / 60)
# compile names
header_[0] = time_header
header_[1] = 'Month'
header_[2] = 'Day of week'
header_[3] = 'Hour of day'
# compile values
values_ = np.column_stack((time_days, time_month, time_weekday, time_hour, values_))
return header_, values_
def load_object(filename):
with open(filename, 'rb') as input_object:
object_ = pickle.load(input_object)
return object_
def read_one_line_from_text_file(filename_, line_number):
file_ = open(filename_)
for i, line in enumerate(file_):
if i == line_number :
line_str = line
elif i > line_number:
break
file_.close()
return line_str
# data saving/output
def save_time_variable_as_csv(output_filename, var_name, time_in_secs, var_values, time_format_output='%Y%m%d%H%M%S'):
out_file = open(output_filename, 'w')
# write header
out_file.write(time_format_output)
out_file.write(',')
out_file.write(var_name)
out_file.write('\n')
for r_ in range(time_in_secs.shape[0]):
p_progress_bar(r_, time_in_secs.shape[0])
out_file.write(time_seconds_to_str(time_in_secs[r_], time_format_output))
out_file.write(',' + str(var_values[r_]))
out_file.write('\n')
out_file.close()
def numpy_save_txt(filename_, array_, delimiter_=",", format_='%s'):
np.savetxt(filename_, array_, delimiter=delimiter_, fmt=format_)
def save_array_to_disk(header_with_units, time_in_seconds, values_in_floats, filename):
#
if len(values_in_floats.shape) == 1:
header_to_print = ['YYYY', 'MM', 'DD', 'HH', 'mm', header_with_units]
else:
header_to_print = ['YYYY', 'MM', 'DD', 'HH', 'mm']
for parameter_ in header_with_units:
header_to_print.append(parameter_)
# create values block
T_ = time_seconds_to_5C_array(time_in_seconds)
P_ = np.column_stack((T_, values_in_floats))
# change type to str
P_str = np.array(P_, dtype='<U32')
# join header with values
P_final = np.row_stack((header_to_print, P_str))
# save to hard drive
numpy_save_txt(filename, P_final)
print('final data saved to: ' + filename)
def save_HVF(header_, values_, filename):
# check if all shapes match
if len(header_) != values_.shape[1]:
print('shape of header is not compatible with shape of values')
return
time_in_seconds = mdates.num2epoch(values_[:, 0])
header_with_units = header_[2:]
values_in_floats = values_[:, 2:]
header_to_print = ['YYYY', 'MM', 'DD', 'HH', 'mm']
for parameter_ in header_with_units:
header_to_print.append(parameter_)
# create values block
T_ = np.zeros((time_in_seconds.shape[0], 5), dtype='<U32')
for r_ in range(time_in_seconds.shape[0]):
if time_in_seconds[r_] == time_in_seconds[r_]:
T_[r_] = time.strftime("%Y,%m,%d,%H,%M", time.gmtime(time_in_seconds[r_])).split(',')
P_ = np.column_stack((T_, values_in_floats))
# change type to str
P_str = np.array(P_, dtype='<U32')
# join header with values
P_final = np.row_stack((header_to_print, P_str))
# save to hard drive
numpy_save_txt(filename, P_final)
print('final data saved to: ' + filename)
def save_simple_array_to_disk(header_list, values_array, filename_):
# change type to str
values_str = np.array(values_array, dtype='<U32')
# join header with values
array_final = np.row_stack((header_list, values_str))
# save to hard drive
numpy_save_txt(filename_, array_final)
print('final data saved to: ' + filename_)
def save_array_as_is(array_, filename_):
np.savetxt(filename_, array_, delimiter=",", fmt='%s')
def save_object(obj, filename):
with open(filename, 'wb') as output: # Overwrites any existing file.
pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)
# png data handeling
def store_array_to_png(array_, filename_out):
"""
This function saves an array to a png file while keeping as much accuracy as possible with the lowest memory ussage
:param array_: numpy array
:param filename_out: string with full path
:return: none
"""
# shape
rows_ = array_.shape[0]
columns_ = array_.shape[1]
# nan layer
array_nan = np.zeros((rows_, columns_), dtype='uint8')
array_nan[array_ != array_] = 100
# replace nans
array_[array_ != array_] = 0
# convert to all positive
array_positive = np.abs(array_)
# sign layer
array_sign = np.zeros((rows_, columns_), dtype='uint8')
array_sign[array_ >= 0] = 100
# zeros array
array_zeros = np.zeros((rows_, columns_), dtype='uint8')
array_zeros[array_positive != 0] = 1
# sub 1 array
array_sub1 = np.zeros((rows_, columns_), dtype='uint8')
array_sub1[array_positive<1] = 1
array_sub1 = array_sub1 * array_zeros
# power array
exp_ = np.array(np.log10(array_positive), dtype=int)
exp_[array_zeros==0] = 0
# integral array
array_integral = array_positive / 10 ** np.array(exp_, dtype=float)
# array_layer_1
array_layer_1 = np.array(((array_sub1 * 9) + 1) * array_integral * 10, dtype='uint8') + array_sign
# array_layer_2
array_layer_2 = np.array(((array_integral * ((array_sub1 * 9) + 1) * 10)
- np.array(array_integral * ((array_sub1 * 9) + 1) * 10, dtype='uint8')) * 100,
dtype='uint8')
array_layer_2 = array_layer_2 + array_nan
# power sign layer
exp_ = exp_ - array_sub1
array_power_sign = np.zeros((rows_, columns_), dtype='uint8')
array_power_sign[exp_ >= 0] = 100
# array_layer_3
array_layer_3 = np.abs(exp_) + array_power_sign
# initialize out array
out_array = np.zeros((rows_, columns_, 3), dtype='uint8')
# dump into out array
out_array[:, :, 0] = array_layer_1
out_array[:, :, 1] = array_layer_2
out_array[:, :, 2] = array_layer_3
img_arr = PIL_Image.fromarray(out_array)
img_arr.save(filename_out)
def read_png_to_array(filename_):
"""
This functions converts pngs files created by "store_array_to_png" back to numpy arrays
:param filename_: string with full path name to png file created by store_array_to_png
:return: numpy array
"""
# read image into array
img_arr = np.array(PIL_Image.open(filename_))
# shape
rows_ = img_arr.shape[0]
columns_ = img_arr.shape[1]
# nan array
nan_array = np.zeros((rows_, columns_), dtype='uint8')
nan_array[img_arr[:,:,1] >= 100] = 1
# power array
power_array_magnitude = ((img_arr[:,:,2]/100) - np.array(img_arr[:,:,2]/100, dtype='uint8') ) * 100
sign_array = np.zeros((rows_, columns_)) - 1
sign_array[img_arr[:,:,2] >= 100] = 1
power_array = power_array_magnitude * sign_array
# sign array
sign_array = np.array(img_arr[:,:,0]/100, dtype=int)
sign_array[sign_array == 0] = -1
# unit array
unit_array = np.array(img_arr[:,:,0]/10, dtype='uint8') - (np.array(img_arr[:,:,0]/100, dtype='uint8') * 10)
# decimal array
decimal_array_1 = (img_arr[:,:,0]/10) - np.array(img_arr[:,:,0]/10, dtype='uint8')
decimal_array_2 = ((img_arr[:,:,1]/100) - np.array(img_arr[:,:,1]/100, dtype='uint8') ) / 10
# compute out array
out_array = (sign_array * (unit_array + decimal_array_1 + decimal_array_2)) * 10 ** power_array
# flag nans
out_array[nan_array==1]=np.nan
return out_array
# sattelite data load
def load_OMI_NO2_monthly_data(filename_):
# # [molec./cm-2]
# filename_ = 'C:\\_input\\no2_201601.grd'
# arr_NO2, lat_arr_NO2, lon_arr_NO2 = load_OMI_NO2_monthly_data(filename_)
# [440: -820, 1650: 1960]
data_array = genfromtxt(filename_, dtype=float, skip_header=7)
file_object = open(filename_,mode='r')
ncols = int(file_object.readline().split()[-1])
nrows = int(file_object.readline().split()[-1])
xllcorner = float(file_object.readline().split()[-1])
yllcorner = float(file_object.readline().split()[-1])
cellsize = float(file_object.readline().split()[-1])
nodata_value = float(file_object.readline().split()[-1])
# version = file_object.readline().split()[-1]
file_object.close()
lat_arr = np.zeros((nrows, ncols), dtype=float)
lon_arr = np.zeros((nrows, ncols), dtype=float)
lat_series = np.linspace(yllcorner + (cellsize * nrows), yllcorner, nrows)
lon_series = np.linspace(xllcorner, xllcorner + (cellsize * ncols), ncols)
for r_ in range(nrows):
lon_arr[r_, :] = lon_series
for c_ in range(ncols):
lat_arr[:, c_] = lat_series
data_array[data_array==nodata_value] = np.nan
data_array = data_array * 1e13
return data_array[1:-1,:], lat_arr[1:-1,:], lon_arr[1:-1,:]
def load_OMI_HCHO_monthly_data(filename_):
# # [molec./cm-2]
# filename_ = 'C:\\_input\\OMIH2CO_Grid_720x1440_201601.dat'
# arr_HCHO, lat_arr_HCHO, lon_arr_HCHO = load_OMI_HCHO_monthly_data(filename_)
# [220: -410, 825: 980]
data_array = genfromtxt(filename_, dtype=float, skip_header=7)
ncols = 1440
nrows = 720
xllcorner = -180
yllcorner = -90
cellsize = 0.25
lat_arr = np.zeros((nrows, ncols), dtype=float)
lon_arr = np.zeros((nrows, ncols), dtype=float)
lat_series = np.linspace(yllcorner + (cellsize * nrows), yllcorner, nrows)
lon_series = np.linspace(xllcorner, xllcorner + (cellsize * ncols), ncols)
for r_ in range(nrows):
lon_arr[r_, :] = lon_series
for c_ in range(ncols):
lat_arr[:, c_] = lat_series
data_array = data_array * 1e15
return data_array[1:-1,:], lat_arr[1:-1,:], lon_arr[1:-1,:]
def download_HIM8_AUS_ch3_500m(YYYYmmddHHMM_str):
url_ = 'http://dapds00.nci.org.au/thredds/dodsC/rr5/satellite/obs/himawari8/FLDK/' + \
YYYYmmddHHMM_str[:4] + \
'/' + \
YYYYmmddHHMM_str[4:6] + \
'/' + \
YYYYmmddHHMM_str[6:8] + \
'/' + \
YYYYmmddHHMM_str[8:12] + \
'/' + \
YYYYmmddHHMM_str + '00' \
'-P1S-ABOM_BRF_B03-PRJ_GEOS141_500-HIMAWARI8-AHI.nc'
f_ = nc.Dataset(url_)
r_1 = 13194
r_2 = 19491
c_1 = 4442
c_2 = 14076
return f_.variables['channel_0003_brf'][0, r_1:r_2, c_1:c_2]
def download_HIM8_AUS_2000m(YYYYmmddHHMM_str, channel_number_str, print_=True):
url_ = 'http://dapds00.nci.org.au/thredds/dodsC/rr5/satellite/obs/himawari8/FLDK/' + \
YYYYmmddHHMM_str[:4] + '/' + YYYYmmddHHMM_str[4:6] + '/' + YYYYmmddHHMM_str[6:8] + \
'/' + YYYYmmddHHMM_str[8:12] + \
'/' + YYYYmmddHHMM_str + '00' + \
'-P1S-ABOM_OBS_' \
'B' + channel_number_str + \
'-PRJ_GEOS141_2000-HIMAWARI8-AHI.nc'
if print_: print('downloading HIM_8', YYYYmmddHHMM_str, channel_number_str)
f_ = nc.Dataset(url_)
r_1 = 3298
r_2 = 4873
c_1 = 1110
c_2 = 3519
variable_name = ''
for var_key in f_.variables.keys():
if len(var_key.split('channel')) > 1:
variable_name = var_key
break
return f_.variables[variable_name][0, r_1:r_2, c_1:c_2]
def download_HIM8_2000m(YYYYmmddHHMM_str, channel_number_str):
url_ = 'http://dapds00.nci.org.au/thredds/dodsC/rr5/satellite/obs/himawari8/FLDK/' + \
YYYYmmddHHMM_str[:4] + '/' + YYYYmmddHHMM_str[4:6] + '/' + YYYYmmddHHMM_str[6:8] + \
'/' + YYYYmmddHHMM_str[8:12] + \
'/' + YYYYmmddHHMM_str + '00' + \
'-P1S-ABOM_OBS_' \
'B' + channel_number_str + \
'-PRJ_GEOS141_2000-HIMAWARI8-AHI.nc'
f_ = nc.Dataset(url_)
variable_name = ''
for var_key in f_.variables.keys():
if len(var_key.split('channel')) > 1:
variable_name = var_key
break
print('downloading variable:', variable_name)
return f_.variables[variable_name][0, :,:]
def download_HIM8_AUS_truecolor_2000m(YYYYmmddHHMM_str):
H8_b = download_HIM8_AUS_2000m(YYYYmmddHHMM_str, '01')
H8_g = download_HIM8_AUS_2000m(YYYYmmddHHMM_str, '02')
H8_r = download_HIM8_AUS_2000m(YYYYmmddHHMM_str, '03')
img_ = np.zeros((H8_b.shape[0], H8_b.shape[1], 3), dtype='uint8')
img_[:, :, 0] = H8_r * 170
img_[:, :, 1] = H8_g * 170
img_[:, :, 2] = H8_b * 170
return img_
def download_HIM8_truecolor_2000m(YYYYmmddHHMM_str):
H8_b = download_HIM8_2000m(YYYYmmddHHMM_str, '01')
H8_g = download_HIM8_2000m(YYYYmmddHHMM_str, '02')
H8_r = download_HIM8_2000m(YYYYmmddHHMM_str, '03')
img_ = np.zeros((H8_b.shape[0], H8_b.shape[1], 3), dtype='uint8')
img_[:, :, 0] = H8_r * 170
img_[:, :, 1] = H8_g * 170
img_[:, :, 2] = H8_b * 170
return img_
def download_lat_lon_arrays_HIM8_500():
url_ = 'http://dapds00.nci.org.au/thredds/dodsC/rr5/satellite/obs/himawari8/FLDK/ancillary/' \
'20150127000000-P1S-ABOM_GEOM_SENSOR-PRJ_GEOS141_500-HIMAWARI8-AHI.nc'
lat_ = download_big_nc_array_in_parts(url_, 'lat')
lon_ = download_big_nc_array_in_parts(url_, 'lon')
lat_[lat_ > 360] = np.nan
lon_[lon_ > 360] = np.nan
return lat_, lon_
def download_lat_lon_arrays_HIM8_2000():
url_ = 'http://dapds00.nci.org.au/thredds/dodsC/rr5/satellite/obs/himawari8/FLDK/ancillary/' \
'20150127000000-P1S-ABOM_GEOM_SENSOR-PRJ_GEOS141_2000-HIMAWARI8-AHI.nc'
lat_ = download_big_nc_array_in_parts(url_, 'lat')
lon_ = download_big_nc_array_in_parts(url_, 'lon')
lat_[lat_ > 360] = np.nan
lon_[lon_ > 360] = np.nan
return lat_, lon_
def download_big_nc_array_in_parts(url_, variable_name, parts_=4):
f_ = nc.Dataset(url_)
var_shape = f_.variables[variable_name].shape
print('downloading variable', variable_name, 'with shape:', var_shape)
if len(var_shape) == 0:
print('ERROR! variable is not an array')
return None
elif len(var_shape) == 1:
if var_shape[0] == 1:
print('ERROR! variable is a scalar')
return None
else:
rows_per_part = int(var_shape[0] / parts_)
if rows_per_part == 0:
print('ERROR! variable size is too small to be divided, should be downloaded directly')
return None
else:
output_array = np.zeros(var_shape[0])
for part_ in range(parts_ - 1):
output_array[int(part_*rows_per_part):int((part_+1)*rows_per_part)] =\
f_.variables[variable_name][int(part_*rows_per_part):int((part_+1)*rows_per_part)]
output_array[int((parts_ -1)*rows_per_part):] = \
f_.variables[variable_name][int((parts_ -1)*rows_per_part):]
return output_array
elif len(var_shape) == 2:
rows_per_part = int(var_shape[1] / parts_)
if rows_per_part == 0:
print('ERROR! variable size is too small to be divided, should be downloaded directly')
return None
else:
output_array = np.zeros((var_shape[0],var_shape[1]))
for part_ in range(parts_ - 1):
output_array[:,int(part_ * rows_per_part):int((part_ + 1) * rows_per_part)] = \
f_.variables[variable_name][:,int(part_ * rows_per_part):int((part_ + 1) * rows_per_part)]
output_array[:,int((parts_ - 1) * rows_per_part):] = \
f_.variables[variable_name][:,int((parts_ - 1) * rows_per_part):]
return output_array
elif len(var_shape) == 3:
rows_per_part = int(var_shape[1] / parts_)
if rows_per_part == 0:
print('ERROR! variable size is too small to be divided, should be downloaded directly')
return None
else:
output_array = np.zeros((var_shape[0],var_shape[1],var_shape[2]))
for part_ in range(parts_ - 1):
output_array[:,int(part_ * rows_per_part):int((part_ + 1) * rows_per_part),:] = \
f_.variables[variable_name][:,int(part_ * rows_per_part):int((part_ + 1) * rows_per_part),:]
output_array[:,int((parts_ - 1) * rows_per_part):,:] = \
f_.variables[variable_name][:,int((parts_ - 1) * rows_per_part):,:]
return output_array
elif len(var_shape) == 4:
rows_per_part = int(var_shape[1] / parts_)
if rows_per_part == 0:
print('ERROR! variable size is too small to be divided, should be downloaded directly')
return None
else:
output_array = np.zeros((var_shape[0],var_shape[1],var_shape[2],var_shape[3]))
for part_ in range(parts_ - 1):
output_array[:,int(part_ * rows_per_part):int((part_ + 1) * rows_per_part),:,:] = \
f_.variables[variable_name][:,int(part_ * rows_per_part):int((part_ + 1) * rows_per_part),:,:]
output_array[:,int((parts_ - 1) * rows_per_part):,:,:] = \
f_.variables[variable_name][:,int((parts_ - 1) * rows_per_part):,:,:]
return output_array
elif len(var_shape) > 4:
print('ERROR! variable has more than 4 dimensions, not implemented for this many dimentions')
return None
def get_himawari8_2000m_NCI(YYYYmmddHHMM_str, channel_number, output_format='png',
output_path='/g/k10/la6753/data/', row_start=0, row_stop=5500, col_start=0,
col_stop=5500):
"""
gets array from himawari-8 netcdf files and extracts only the indicated channel at the indicated time. saves to output_path
:param YYYYmmddHHMM_str: string with the time in four digits for year, two digits for months...
:param channel_number: int or float with the number of the channel ('01'-'16')
:param output_format: string with either 'png' or 'numpy'. If png the array will be saved used store_array_to_png, otherwise numpy.save will be used
:param output_path: string with the path, or full filename to be used to save the file
:param row_start: int with the row number to start the crop
:param row_stop: int with the row number to stop the crop
:param col_start: int with the coloumn number to start the crop
:param col_stop: int with the coloumn number to stop the crop
:return: None
"""
channel_number_str = str(int(channel_number)).zfill(2)
filename_ = '/g/data/rr5/satellite/obs/himawari8/FLDK/' + \
YYYYmmddHHMM_str[:4] + '/' + YYYYmmddHHMM_str[4:6] + '/' + YYYYmmddHHMM_str[6:8] + \
'/' + YYYYmmddHHMM_str[8:12] + \
'/' + YYYYmmddHHMM_str + '00' + \
'-P1S-ABOM_OBS_' \
'B' + channel_number_str + \
'-PRJ_GEOS141_2000-HIMAWARI8-AHI.nc'
if os.path.exists(filename_):
f_ = nc.Dataset(filename_)
variable_name = ''
for var_key in f_.variables.keys():
if len(var_key.split('channel')) > 1:
variable_name = var_key
break
array_ = f_.variables[variable_name][0, row_start:row_stop, col_start:col_stop]
if output_path[-1] == '/' or output_path[-1] == '\\':
if output_format == 'png':
output_filename = output_path + 'him_2000m_ch' + channel_number_str + '_' + YYYYmmddHHMM_str + '.png'
else:
output_filename = output_path + 'him_2000m_ch' + channel_number_str + '_' + YYYYmmddHHMM_str + '.npy'
else:
output_filename = output_path
if output_format == 'png':
store_array_to_png(array_, output_filename)
else:
np.save(output_filename, array_)
else:
print('File not available for time stamp:', YYYYmmddHHMM_str)
# ERA5
def create_virtual_sondes_from_ERA5(time_stamp_sec, lat_lon_tuple, era5_file_levels_ncFile, era5_file_surface_ncFile,
max_time_delta_sec=21600, show_prints=True):
close_level_file=False
close_surface_file=False
if type(era5_file_levels_ncFile) == str:
era5_file_levels = nc.Dataset(era5_file_levels_ncFile)
close_level_file = True
else:
era5_file_levels = era5_file_levels_ncFile
if type(era5_file_surface_ncFile) == str:
era5_file_surface = nc.Dataset(era5_file_surface_ncFile)
close_surface_file = True
else:
era5_file_surface = era5_file_surface_ncFile
time_era5_levels_sec = time_era5_to_seconds(era5_file_levels.variables['time'][:])
time_era5_surface_sec = time_era5_to_seconds(era5_file_surface.variables['time'][:])
r_era5_levels_1 = time_to_row_sec(time_era5_levels_sec, time_stamp_sec)
r_era5_surface_1 = time_to_row_sec(time_era5_surface_sec, time_stamp_sec)
if np.abs(time_era5_levels_sec[r_era5_levels_1] - time_stamp_sec) > max_time_delta_sec:
if show_prints: print('error time gap is too large', )
return None
# find row and column for the lat lon
lat_index, lon_index = find_index_from_lat_lon(era5_file_levels.variables['latitude'][:].data,
era5_file_levels.variables['longitude'][:].data,
lat_lon_tuple[0], lat_lon_tuple[1])
if show_prints: print('creating input arrays')
t_profile = kelvin_to_celsius(era5_file_levels.variables['t'][r_era5_levels_1, :, lat_index, lon_index].data)
if show_prints: print('created t_array')
td_profile = calculate_dewpoint_from_T_RH(t_profile, era5_file_levels.variables['r'][r_era5_levels_1, :, lat_index, lon_index].data)
if show_prints: print('created Td_array')
h_profile = era5_file_levels.variables['z'][r_era5_levels_1, :, lat_index, lon_index].data / gravity_
if show_prints: print('created z_array')
u_profile = era5_file_levels.variables['u'][r_era5_levels_1, :, lat_index, lon_index].data
if show_prints: print('created u_array')
v_profile = era5_file_levels.variables['v'][r_era5_levels_1, :, lat_index, lon_index].data
if show_prints: print('created v_array')
p_profile = era5_file_levels.variables['level'][:].data # hPa
if show_prints: print('created p_array')
surface_p = era5_file_surface.variables['sp'][r_era5_surface_1, lat_index, lon_index] / 100 # / 100 to convert Pa to hPa
if show_prints: print('created sp_array')
# trim profiles from surface to top
# find which levels should be included
levels_total = 0
for i_ in range(p_profile.shape[0]):
if p_profile[i_] > surface_p:
break
levels_total += 1
####################################### find extrapolations
surface_t = np.interp(np.log(surface_p), np.log(p_profile), t_profile)
surface_td = np.interp(np.log(surface_p), np.log(p_profile), td_profile)
surface_u = np.interp(np.log(surface_p), np.log(p_profile), u_profile)
surface_v = np.interp(np.log(surface_p), np.log(p_profile), v_profile)
surface_h = np.interp(np.log(surface_p), np.log(p_profile), h_profile)
# create temp arrays
T_array = np.zeros(levels_total + 1, dtype=float)
Td_array = np.zeros(levels_total + 1, dtype=float)
Q_array = np.zeros(levels_total + 1, dtype=float)
U_array = np.zeros(levels_total + 1, dtype=float)
V_array = np.zeros(levels_total + 1, dtype=float)
H_array = np.zeros(levels_total + 1, dtype=float)
P_array = np.zeros(levels_total + 1, dtype=float)
T_array[:levels_total] = t_profile[:levels_total]
Td_array[:levels_total] = td_profile[:levels_total]
U_array[:levels_total] = u_profile[:levels_total]
V_array[:levels_total] = v_profile[:levels_total]
H_array[:levels_total] = h_profile[:levels_total]
P_array[:levels_total] = p_profile[:levels_total]
T_array[-1] = surface_t
Td_array[-1] = surface_td
U_array[-1] = surface_u
V_array[-1] = surface_v
H_array[-1] = surface_h
P_array[-1] = surface_p
if close_level_file:
era5_file_levels.close()
if close_surface_file:
era5_file_surface.close()
return P_array, H_array, T_array, Td_array, U_array, V_array
def era5_get_surface_interpolated_vars(era5_file_levels_ncFile, era5_file_surface_ncFile, show_prints=True,
time_start_str_YYYYmmDDHHMM=None, time_stop_str_YYYYmmDDHHMM=None):
close_level_file=False
close_surface_file=False
if type(era5_file_levels_ncFile) == str:
era5_file_levels = nc.Dataset(era5_file_levels_ncFile)
close_level_file = True
else:
era5_file_levels = era5_file_levels_ncFile
if type(era5_file_surface_ncFile) == str:
era5_file_surface = nc.Dataset(era5_file_surface_ncFile)
close_surface_file = True
else:
era5_file_surface = era5_file_surface_ncFile
time_era5_levels_sec = time_era5_to_seconds(era5_file_levels.variables['time'][:])
# trim time
r_1 = 0
r_2 = -1
if time_start_str_YYYYmmDDHHMM is not None:
r_1 = time_to_row_str(time_era5_levels_sec, time_start_str_YYYYmmDDHHMM)
if time_stop_str_YYYYmmDDHHMM is not None:
r_2 = time_to_row_str(time_era5_levels_sec, time_stop_str_YYYYmmDDHHMM)
time_era5_sec = time_era5_levels_sec[r_1:r_2]
if show_prints: print('creating input arrays')
t_profile = kelvin_to_celsius(era5_file_levels.variables['t'][r_1:r_2, 10:, :, :].data)
if show_prints: print('created t_array')
td_profile = calculate_dewpoint_from_T_RH(t_profile, era5_file_levels.variables['r'][r_1:r_2, 10:, :, :].data)
if show_prints: print('created Td_array')
h_profile = era5_file_levels.variables['z'][r_1:r_2, 10:, :, :].data / gravity_
if show_prints: print('created z_array')
u_profile = era5_file_levels.variables['u'][r_1:r_2, 10:, :, :].data
if show_prints: print('created u_array')
v_profile = era5_file_levels.variables['v'][r_1:r_2, 10:, :, :].data
if show_prints: print('created v_array')
p_profile = era5_file_levels.variables['level'][10:].data # hPa
if show_prints: print('created p_array')
surface_p = era5_file_surface.variables['sp'][r_1:r_2, :, :] / 100 # / 100 to convert Pa to hPa
if show_prints: print('created sp_array')
q_profile = era5_file_levels.variables['q'][r_1:r_2, 10:, :, :].data
if show_prints: print('created q_array')
####################################### find extrapolations
surface_t = np.zeros((surface_p.shape), dtype=float)
surface_td = np.zeros((surface_p.shape), dtype=float)
surface_u = np.zeros((surface_p.shape), dtype=float)
surface_v = np.zeros((surface_p.shape), dtype=float)
surface_h = np.zeros((surface_p.shape), dtype=float)
surface_q = np.zeros((surface_p.shape), dtype=float)
if show_prints: print('starting interpolation of every point in time')
for r_ in range(time_era5_sec.shape[0]):
p_progress_bar(r_,time_era5_sec.shape[0])
for lat_ in range(surface_p.shape[1]):
for lon_ in range(surface_p.shape[2]):
surface_t [r_,lat_,lon_] = np.interp(np.log(surface_p[r_,lat_,lon_]),
np.log(p_profile), t_profile [r_,:,lat_,lon_])
surface_td[r_,lat_,lon_] = np.interp(np.log(surface_p[r_,lat_,lon_]),
np.log(p_profile), td_profile[r_,:,lat_,lon_])
surface_u [r_,lat_,lon_] = np.interp(np.log(surface_p[r_,lat_,lon_]),
np.log(p_profile), u_profile [r_,:,lat_,lon_])
surface_v [r_,lat_,lon_] = np.interp(np.log(surface_p[r_,lat_,lon_]),
np.log(p_profile), v_profile [r_,:,lat_,lon_])
surface_h [r_,lat_,lon_] = np.interp(np.log(surface_p[r_,lat_,lon_]),
np.log(p_profile), h_profile [r_,:,lat_,lon_])
surface_q [r_,lat_,lon_] = np.interp(np.log(surface_p[r_,lat_,lon_]),
np.log(p_profile), q_profile [r_,:,lat_,lon_])
if close_level_file:
era5_file_levels.close()
if close_surface_file:
era5_file_surface.close()
return surface_t, surface_td, surface_u, surface_v, surface_h, surface_q, time_era5_sec
# HYSPLIT
def hysplit_load_freq_endpoints(filename_, number_of_hours):
file_obj = open(filename_,'r')
line_list = file_obj.readlines()
file_obj.close()
file_traj_list = []
traj_number = -1
for line_inx, line_str in enumerate(line_list):
if line_str == ' 1 PRESSURE\n':
traj_number += 1
for r_ in range(number_of_hours + 1):
new_line_list = line_list[line_inx + r_ + 1].split()
new_line_list.append(traj_number)
file_traj_list.append(new_line_list)
arr_ = np.zeros((len(file_traj_list),12), dtype=float)
for r_ in range(len(file_traj_list)):
for c_ in range(12):
arr_[r_,c_] = file_traj_list[r_][c_ + 2]
return arr_
def hysplit_load_freq_endpoints_all(file_list):
file_traj_list = []
for filename_ in file_list:
file_obj = open(filename_,'r')
line_list = file_obj.readlines()
file_obj.close()
for line_inx, line_str in enumerate(line_list):
if line_str == ' 1 PRESSURE\n':
for r_ in range(25):
file_traj_list.append(line_list[line_inx + r_ + 1].split())
arr_ = np.zeros((len(file_traj_list),11), dtype=float)
for r_ in range(len(file_traj_list)):
for c_ in range(11):
arr_[r_,c_] = file_traj_list[r_][c_ + 2]
return arr_
def calculate_mean_time(file_list, lat_tuple, lon_tuple):
# file_list_irn = sorted(glob.glob(str('E:\\hysplit_IRN\\' + '*.txt')))
# file_list_uae = sorted(glob.glob(str('E:\\hysplit_UAE\\' + '*.txt')))
# lat_tuple = tuple((24.889974, 26.201930))
# lon_tuple = tuple((50.727086, 51.729315))
hit_counter_list = []
total_counter_list = []
# month_list_list = []
month_mean_time = []
month_std_time = []
month_probability_list = []
for filename_ in file_list:
arr_ = hysplit_load_freq_endpoints(filename_, 24)
hit_counter = 0
hit_age = []
total_number_of_trajs = int(np.max(arr_[:,-1]))
for traj_ in range(total_number_of_trajs + 1):
for r_ in range(arr_.shape[0]):
if arr_[r_,-1] == traj_:
if lat_tuple[0] < arr_[r_, 7] < lat_tuple[1] and lon_tuple[0] < arr_[r_, 8] < lon_tuple[1]:
hit_counter += 1
hit_age.append(arr_[r_, 6])
break
hit_counter_list.append(hit_counter)
total_counter_list.append(total_number_of_trajs)
month_probability_list.append(100*hit_counter/total_number_of_trajs)
# month_list_list.append(hit_age)
month_mean_time.append(np.mean(hit_age))
month_std_time.append(np.std(hit_age))
return month_probability_list, np.array(month_mean_time), hit_counter_list, total_counter_list, np.array(month_std_time)
# BOM
def Lidar_compile_and_convert_txt_to_dict(main_folder_path):
# main_folder_path = 'D:\Data\LIDAR Data\\'
# create the full file list
filename_list = []
path_folders_list = next(os.walk(main_folder_path))[1]
for sub_folder in path_folders_list:
if sub_folder[0] == '2':
path_sub_folders_list = next(os.walk(main_folder_path + sub_folder + '\\'))[1]
for sub_sub_folder in path_sub_folders_list:
path_sub_sub_sub = main_folder_path + sub_folder + '\\' + sub_sub_folder + '\\'
ssss_filelist = sorted(glob.glob(str(path_sub_sub_sub + '*.*')))
for filename_min in ssss_filelist:
filename_list.append(filename_min)
total_files = len(filename_list)
print(' number of files to compile:', str(total_files))
# get first file to get shape
convertion_output = Lidar_convert_txt_to_array(filename_list[0])
range_shape = convertion_output[1].shape[0]
# create arrays
time_array = np.zeros(total_files)
range_array = convertion_output[1][:,0]
ch0_pr2 = np.zeros((total_files, range_shape), dtype=float)
ch0_mrg = np.zeros((total_files, range_shape), dtype=float)
ch1_pr2 = np.zeros((total_files, range_shape), dtype=float)
ch1_mrg = np.zeros((total_files, range_shape), dtype=float)
ch2_pr2 = np.zeros((total_files, range_shape), dtype=float)
ch2_mrg = np.zeros((total_files, range_shape), dtype=float)
print('arrays initialized')
# populate arrays
for i_, filename_ in enumerate(filename_list):
p_progress(i_, total_files)
convertion_output = Lidar_convert_txt_to_array(filename_)
time_array[i_] = convertion_output[0]
ch0_pr2[i_, :] = convertion_output[1][:,1]
ch0_mrg[i_, :] = convertion_output[1][:,2]
ch1_pr2[i_, :] = convertion_output[1][:,3]
ch1_mrg[i_, :] = convertion_output[1][:,4]
ch2_pr2[i_, :] = convertion_output[1][:,5]
ch2_mrg[i_, :] = convertion_output[1][:,6]
# move to dict
output_dict = {}
output_dict['time'] = time_array
output_dict['range'] = range_array
output_dict['ch0_pr2'] = ch0_pr2
output_dict['ch0_mrg'] = ch0_mrg
output_dict['ch1_pr2'] = ch1_pr2
output_dict['ch1_mrg'] = ch1_mrg
output_dict['ch2_pr2'] = ch2_pr2
output_dict['ch2_mrg'] = ch2_mrg
return output_dict
def Lidar_convert_txt_to_array(filename_):
file_time_str = filename_[-25:-6]
time_stamp_seconds = time_str_to_seconds(file_time_str, '%Y-%m-%d_%H-%M-%S')
# read the data into an array
data_array_raw = genfromtxt(filename_,dtype=float, delimiter='\t',skip_header=133)
# only keep one altitude column
data_array_out = np.zeros((data_array_raw.shape[0], 7), dtype=float)
data_array_out[:,0] = data_array_raw[:,0]
data_array_out[:,1] = data_array_raw[:,1]
data_array_out[:,2] = data_array_raw[:,2]
data_array_out[:,3] = data_array_raw[:,4]
data_array_out[:,4] = data_array_raw[:,5]
data_array_out[:,5] = data_array_raw[:,7]
data_array_out[:,6] = data_array_raw[:,8]
return time_stamp_seconds, data_array_out
def compile_AWAP_precip_datafiles(file_list):
# load first file to get shape
print('loading file: ', file_list[0])
arr_1, start_date_sec_1 = load_AWAP_data(file_list[0])
rows_ = arr_1.shape[0]
columns_ = arr_1.shape[1]
# create lat and lon series
series_lat = np.arange(-44.5, -9.95, 0.05)[::-1]
series_lon = np.arange(112, 156.29, 0.05)
# create time array
output_array_time = np.zeros(len(file_list), dtype=float)
# create output array
output_array = np.zeros((len(file_list), rows_, columns_), dtype=float)
# load first array data into output array
output_array[0,:,:] = arr_1
output_array_time[0] = start_date_sec_1
# loop thru remainning files to populate ouput_array
for t_, filename_ in enumerate(file_list[1:]):
print('loading file: ', filename_)
arr_t, start_date_sec_t = load_AWAP_data(filename_)
output_array[t_+1, :, :] = arr_t
output_array_time[t_+1] = start_date_sec_t
return output_array, output_array_time, series_lat, series_lon
def load_AWAP_data(filename_):
start_date_str = filename_.split('\\')[-1][:8]
# stop_date_str = filename_.split('\\')[-1][8:16]
start_date_sec = time_str_to_seconds(start_date_str, '%Y%m%d')
arr_precip = np.genfromtxt(filename_, float, skip_header=6, skip_footer=18)
return arr_precip , start_date_sec
def get_means_from_filelist(file_list, lat_lon_ar):
# lat_lon_points_list = [ 147.8,
# 149,
# -36.8,
# -35.4]
# box domain indexes
index_c = [716, 740]
index_r = [508, 536]
series_lat = np.arange(-44.5, -9.95, 0.05)[::-1]
series_lon = np.arange(112,156.3,0.05)
lat_index_list, lon_index_list = find_index_from_lat_lon(series_lat, series_lon, lat_lon_ar[:,1], lat_lon_ar[:,0])
time_secs_list = []
precip_array = np.zeros((277,9),dtype=float)
for r_, filename_ in enumerate(file_list):
print('loading file: ', filename_)
arr_precip, start_date_sec = load_AWAP_data(filename_)
time_secs_list.append(start_date_sec)
precip_array[r_, 0] = start_date_sec
precip_array[r_, 1] = np.mean(arr_precip[index_r[0]:index_r[1]+1, index_c[0]:index_c[1]+1])
for i_ in range(2,9):
precip_array[r_, i_] = arr_precip[lat_index_list[i_-2],lon_index_list[i_-2]]
save_array_to_disk(['box mean precip [mm]','1 precip [mm]','2 precip [mm]','3 precip [mm]',
'4 precip [mm]','5 precip [mm]','6 precip [mm]','7 precip [mm]'],
precip_array[:,0], precip_array[:,1:], 'C:\\_output\\test_fimi_2.csv')
# save_HVF(['box','1','2','3','4','5','6','7'], precip_array, 'C:\\_output\\test_fimi_1.csv')
print("done")
return precip_array
def compile_BASTA_days_and_save_figure(directory_where_nc_file_are):
# compile BASTA data per day and save plot (per day)
time_format_basta = 'seconds since %Y-%m-%d %H:%M:%S'
# directory_where_nc_file_are = '/home/luis/Data/BASTA/L0/12m5/'
path_input = directory_where_nc_file_are
file_label = path_input.split('/')[-4] + '_' + path_input.split('/')[-3] + '_' + path_input.split('/')[-2] + '_'
file_list_all = sorted(glob.glob(str(path_input + '/*.nc')))
first_day_str = file_list_all[0][-18:-10]
last_day_str = file_list_all[-1][-18:-10]
first_day_int = time_seconds_to_days(time_str_to_seconds(first_day_str,'%Y%m%d'))
last_day_int = time_seconds_to_days(time_str_to_seconds(last_day_str,'%Y%m%d'))
total_number_of_days = last_day_int - first_day_int
print('The data in the folder encompasses', total_number_of_days, 'days')
days_list_int = np.arange(first_day_int, last_day_int + 1)
days_list_str = time_seconds_to_str(time_days_to_seconds(days_list_int),'%Y%m%d')
for day_str in days_list_str:
print('-|' * 20)
file_list_day = sorted(glob.glob(str(path_input + file_label + day_str + '*.nc')))
print('Compiling day', day_str, len(file_list_day), 'files found for this day.')
if len(file_list_day) > 0:
filename_ = file_list_day[0]
print('loading file:', filename_)
netcdf_file_object = nc.Dataset(filename_, 'r')
# variable_names = sorted(netcdf_file_object.variables.keys())
time_raw = netcdf_file_object.variables['time'][:].copy()
file_first_time_stamp = time_str_to_seconds(netcdf_file_object.variables['time'].units,
time_format_basta)
compiled_time_days = time_seconds_to_days(np.array(time_raw, dtype=int) + file_first_time_stamp)
compiled_raw_reflectivity_array = netcdf_file_object.variables['raw_reflectivity'][:].copy()
compiled_range_array = netcdf_file_object.variables['range'][:].copy()
netcdf_file_object.close()
if len(file_list_day) > 1:
for filename_ in file_list_day[1:]:
print('loading file:', filename_)
netcdf_file_object = nc.Dataset(filename_, 'r')
time_raw = netcdf_file_object.variables['time'][:].copy()
file_first_time_stamp = time_str_to_seconds(netcdf_file_object.variables['time'].units,
time_format_basta)
time_days = time_seconds_to_days(np.array(time_raw, dtype = int) + file_first_time_stamp)
compiled_time_days = np.append(compiled_time_days, time_days)
raw_reflectivity_array = netcdf_file_object.variables['raw_reflectivity'][:].copy()
compiled_raw_reflectivity_array = np.vstack((compiled_raw_reflectivity_array,
raw_reflectivity_array))
netcdf_file_object.close()
figure_output_name = path_input + file_label + day_str + '.png'
print('saving figure to:', figure_output_name)
p_arr_vectorized_2(compiled_raw_reflectivity_array, compiled_time_days, compiled_range_array/1000,
cmap_=default_cm, figsize_=(12, 8), vmin_=80, vmax_=140,
cbar_label='Raw Reflectivity dB', x_header='UTC',y_header='Range AGL [km]',
figure_filename=figure_output_name,
time_format_ = '%H')
def compile_BASTA_into_one_file(directory_where_nc_file_are):
# compile BASTA data into one netcdf file
time_format_basta = 'seconds since %Y-%m-%d %H:%M:%S'
# directory_where_nc_file_are = '/home/luis/Data/BASTA/L0/12m5/'
path_input = directory_where_nc_file_are
file_list_all = sorted(glob.glob(str(path_input + '/*.nc')))
# first_day_str = file_list_all[0][-18:-10]
# last_day_str = file_list_all[-1][-18:-10]
# first_day_int = time_seconds_to_days(time_str_to_seconds(first_day_str,'%Y%m%d'))
# last_day_int = time_seconds_to_days(time_str_to_seconds(last_day_str,'%Y%m%d'))
# days_list_int = np.arange(first_day_int, last_day_int + 1)
# create copy of first file
netcdf_file_object = nc.Dataset(file_list_all[-1], 'r')
last_second_raw = netcdf_file_object.variables['time'][:][-1]
file_first_time_stamp = time_str_to_seconds(netcdf_file_object.variables['time'].units,
time_format_basta)
netcdf_file_object.close()
last_second_epoc = last_second_raw + file_first_time_stamp
last_time_str = time_seconds_to_str(last_second_epoc, '%Y%m%d_%H%M%S')
output_filename = file_list_all[0][:-3] + '_' + last_time_str + '.nc'
shutil.copyfile(file_list_all[0], output_filename)
print('Created output file with name:', output_filename)
# open output file for appending data
netcdf_output_file_object = nc.Dataset(output_filename, 'a')
file_first_time_stamp_seconds_epoc = time_str_to_seconds(netcdf_output_file_object.variables['time'].units,
time_format_basta)
variable_names = sorted(netcdf_output_file_object.variables.keys())
# create references to variables in output file
variable_objects_dict = {}
for var_name in variable_names:
variable_objects_dict[var_name] = netcdf_output_file_object.variables[var_name]
for filename_ in file_list_all[1:]:
print('-' * 5)
print('loading file:', filename_)
# open file
netcdf_file_object = nc.Dataset(filename_, 'r')
# create file's time series
file_time_stamp_seconds_epoc = time_str_to_seconds(netcdf_file_object.variables['time'].units,
time_format_basta)
time_raw = netcdf_file_object.variables['time'][:].copy()
time_seconds_epoc = np.array(time_raw, dtype=int) + file_time_stamp_seconds_epoc
row_start = variable_objects_dict['time'].shape[0]
row_end = time_raw.shape[0] + row_start
# append time array
variable_objects_dict['time'][row_start:row_end] = time_seconds_epoc - file_first_time_stamp_seconds_epoc
# append raw_reflectivity array
variable_objects_dict['raw_reflectivity'][row_start:row_end] = \
netcdf_file_object.variables['raw_reflectivity'][:].copy()
# append raw_velocity array
variable_objects_dict['raw_velocity'][row_start:row_end] = \
netcdf_file_object.variables['raw_velocity'][:].copy()
# append all other variables that only time dependent
for var_name in variable_names:
if var_name != 'time' and var_name != 'range' and \
var_name != 'raw_reflectivity' and var_name != 'raw_velocity':
if len(netcdf_file_object.variables[var_name].shape) == 1:
variable_objects_dict[var_name][row_start:row_end] = \
netcdf_file_object.variables[var_name][:].copy()
netcdf_file_object.close()
netcdf_output_file_object.close()
print('done')
def load_BASTA_data_from_netcdf_to_arrays(filename_):
# load BASTA data from netcdf to arrays
# path_input = '/home/luis/Data/BASTA/L0/'
# filename_ = path_input + 'BASTA_L0_12m5_20180606_071716_20180806_025422.nc'
time_format_basta = 'seconds since %Y-%m-%d %H:%M:%S'
# open file
netcdf_file_object = nc.Dataset(filename_, 'r')
# load time as seconds and days
file_time_stamp_seconds_epoc = time_str_to_seconds(netcdf_file_object.variables['time'].units, time_format_basta)
time_raw = netcdf_file_object.variables['time'][:].copy()
time_seconds_epoc = np.array(time_raw, dtype=int) + file_time_stamp_seconds_epoc
time_days_epoc = time_seconds_to_days(time_seconds_epoc)
# append range array
array_range = netcdf_file_object.variables['range'][:].copy()
# append raw_reflectivity array
array_raw_reflectivity = netcdf_file_object.variables['raw_reflectivity']#[:].copy()
# append raw_velocity array
array_raw_velocity = netcdf_file_object.variables['raw_velocity']#[:].copy()
# close file
# netcdf_file_object.close()
return array_raw_reflectivity, array_raw_velocity, array_range, time_seconds_epoc, time_days_epoc
def BASTA_load_period_to_dict(start_time_YMDHM, stop_time_YMDHM, folder_path,
variable_names=('time', 'range', 'raw_reflectivity', 'raw_velocity')):
time_format_basta = 'seconds since %Y-%m-%d %H:%M:%S'
out_dict = {}
temp_dict = {}
variables_with_time_dimension = []
if not 'time' in variable_names:
variable_names_temp_list = ['time']
for variable_name in variable_names:
variable_names_temp_list.append(variable_name)
variable_names = variable_names_temp_list
# data_folder
data_folder = folder_path
# get all data files filenames
file_list = sorted(glob.glob(str(data_folder + '\\*.nc')))
file_times_tuple_list = []
file_times_tuple_list_str = []
for i_, filename_ in enumerate(file_list):
file_time_str_start = filename_.split('_')[-2] + filename_.split('_')[-1].split('.')[0]
file_time_sec_start = time_str_to_seconds(file_time_str_start, '%Y%m%d%H%M%S')
if i_ < len(file_list) -1:
file_time_str_stop = file_list[i_+1].split('_')[-2] + file_list[i_+1].split('_')[-1].split('.')[0]
file_time_sec_stop = time_str_to_seconds(file_time_str_stop, '%Y%m%d%H%M%S')
else:
file_time_sec_stop = file_time_sec_start + (24*60*60)
file_times_tuple_list.append(tuple((file_time_sec_start, file_time_sec_stop)))
file_times_tuple_list_str.append(tuple((file_time_str_start, time_seconds_to_str(file_time_sec_stop,
'%Y%m%d%H%M%S'))))
# select only files inside time range
event_start_sec = time_str_to_seconds(start_time_YMDHM, '%Y%m%d%H%M')
event_stop_sec = time_str_to_seconds(stop_time_YMDHM, '%Y%m%d%H%M')
selected_file_list = []
for file_index in range(len(file_list)):
if event_start_sec <= file_times_tuple_list[file_index][0] <= event_stop_sec:
selected_file_list.append(file_list[file_index])
elif event_start_sec <= file_times_tuple_list[file_index][1] <= event_stop_sec:
selected_file_list.append(file_list[file_index])
elif file_times_tuple_list[file_index][0] <= event_start_sec <= file_times_tuple_list[file_index][1]:
selected_file_list.append(file_list[file_index])
elif file_times_tuple_list[file_index][0] <= event_stop_sec <= file_times_tuple_list[file_index][1]:
selected_file_list.append(file_list[file_index])
print('found files:')
p_(selected_file_list)
# load data
if len(selected_file_list) == 0:
print('No files inside time range!')
return out_dict
else:
cnt = 0
for filename_ in selected_file_list:
if cnt == 0:
nc_file = nc.Dataset(filename_, 'r')
print('reading file:',filename_)
for variable_name in variable_names:
if 'time' in nc_file.variables[variable_name].dimensions:
variables_with_time_dimension.append(variable_name)
if variable_name == 'time':
file_time_stamp_seconds_epoc = time_str_to_seconds(nc_file.variables['time'].units,
time_format_basta)
time_raw = nc_file.variables['time'][:].copy()
time_seconds_epoc = np.array(time_raw, dtype=int) + file_time_stamp_seconds_epoc
temp_dict[variable_name] = time_seconds_epoc
else:
temp_dict[variable_name] = nc_file.variables[variable_name][:].filled(np.nan)
nc_file.close()
cnt += 1
else:
nc_file = nc.Dataset(filename_, 'r')
print('reading file:', filename_)
for variable_name in variable_names:
if 'time' in nc_file.variables[variable_name].dimensions:
variables_with_time_dimension.append(variable_name)
if len(nc_file.variables[variable_name].shape) == 1:
if variable_name == 'time':
file_time_stamp_seconds_epoc = time_str_to_seconds(nc_file.variables['time'].units,
time_format_basta)
time_raw = nc_file.variables['time'][:].copy()
time_seconds_epoc = np.array(time_raw, dtype=int) + file_time_stamp_seconds_epoc
temp_dict[variable_name] = np.hstack((temp_dict[variable_name], time_seconds_epoc))
else:
temp_dict[variable_name] = np.hstack((temp_dict[variable_name],
nc_file.variables[variable_name][:].filled(np.nan)))
else:
temp_dict[variable_name] = np.vstack((temp_dict[variable_name],
nc_file.variables[variable_name][:].filled(np.nan)))
nc_file.close()
# find row for start and end of event
start_row = np.argmin(np.abs(temp_dict['time'] - event_start_sec))
end_row = np.argmin(np.abs(temp_dict['time'] - event_stop_sec))
for variable_name in variable_names:
if variable_name in variables_with_time_dimension:
out_dict[variable_name] = temp_dict[variable_name][start_row:end_row]
else:
out_dict[variable_name] = temp_dict[variable_name]
return out_dict
def MRR_CFAD(range_array, Ze_array, bins_=(12, np.arange(-10, 40, 2)), normalize_height_wise = True, x_header='dBZe',
y_header='Height [km]', custom_y_range_tuple=None, custom_x_range_tuple=None, figure_filename=None,
cbar_label='', cmap_=default_cm, figsize_ = (10,6), title_str = '', contourF_=True, cbar_format='%.2f',
vmin_=None,vmax_=None, grid_=True, fig_ax=None, show_cbar=True, level_threshold_perc=10,
invert_y=False, levels=None,custom_ticks_x=None, custom_ticks_y=None, cbar_ax=None):
if len(range_array.shape) == 1:
temp_array = np.zeros((Ze_array.shape))
for r_ in range(Ze_array.shape[0]):
temp_array[r_,:] = range_array
range_array = temp_array
if type(bins_[0]) == int:
if bins_[0] < 1:
bins_ = (int(range_array.shape[1] * bins_[0]), bins_[1])
hist_out = np.histogram2d(range_array.flatten()[~np.isnan(Ze_array.flatten())] / 1000,
Ze_array.flatten()[~np.isnan(Ze_array.flatten())],
normed=False, bins=bins_)
hist_array, hist_r, hist_c = hist_out
hist_r = (hist_r[:-1] + hist_r[1:]) * 0.5
hist_c = (hist_c[:-1] + hist_c[1:]) * 0.5
hist_r_2d = np.zeros((hist_array.shape), dtype=float)
hist_c_2d = np.zeros((hist_array.shape), dtype=float)
for r_ in range(hist_array.shape[0]):
for c_ in range(hist_array.shape[1]):
hist_r_2d[r_, c_] = hist_r[r_]
hist_c_2d[r_, c_] = hist_c[c_]
# normalize height wise
if normalize_height_wise:
heights_counts = np.sum(hist_array, axis=1)
maximum_count_at_some_height = np.max(heights_counts)
cbar_label_final = 'Height normalized frequency'
for r_ in range(hist_array.shape[0]):
if heights_counts[r_] < maximum_count_at_some_height * (level_threshold_perc/100):
hist_array[r_, :] = np.nan
else:
hist_array[r_, :] = hist_array[r_, :] / heights_counts[r_]
else:
cbar_label_final = 'Normalized frequency'
if cbar_label == '': cbar_label = cbar_label_final
fig_ax = p_arr_vectorized_3(hist_array, hist_c_2d, hist_r_2d, contourF_=contourF_, grid_=grid_,
custom_y_range_tuple=custom_y_range_tuple, custom_x_range_tuple=custom_x_range_tuple,
x_header=x_header, y_header=y_header, cmap_=cmap_, figsize_=figsize_, cbar_ax=cbar_ax,
cbar_label=cbar_label, title_str=title_str, vmin_=vmin_, vmax_=vmax_,levels=levels,
figure_filename=figure_filename, fig_ax=fig_ax,show_cbar=show_cbar, invert_y=invert_y,
custom_ticks_x=custom_ticks_x, custom_ticks_y=custom_ticks_y,cbar_format=cbar_format)
return fig_ax, hist_array.T, hist_c[:-1], hist_r[:-1]
# parsivel
def create_DSD_plot(DSD_arr, time_parsivel_seconds, size_arr, events_period_str, figfilename='',
output_data=False, x_range=(0, 7.5), y_range=(-1, 3.1), figsize_=(5, 5)):
size_series = size_arr[0, :]
event_row_start = time_to_row_str(time_parsivel_seconds, events_period_str.split('_')[0])
event_row_stop_ = time_to_row_str(time_parsivel_seconds, events_period_str.split('_')[1])
# normalize
DSD_arr_over_D = DSD_arr / size_arr
DSD_arr_over_D_by_D = np.sum(DSD_arr_over_D, axis=1)
DSD_arr_over_D_by_D_no_zero = DSD_arr_over_D_by_D * 1
DSD_arr_over_D_by_D_no_zero[DSD_arr_over_D_by_D_no_zero == 0] = np.nan
DSD_arr_over_D_by_D_log = np.log10(DSD_arr_over_D_by_D_no_zero)
DSD_arr_over_D_by_D_log_event_1_bin = np.array(DSD_arr_over_D_by_D_log[event_row_start:event_row_stop_])
DSD_arr_over_D_by_D_log_event_1_bin[~np.isnan(DSD_arr_over_D_by_D_log_event_1_bin)] = 1
DSD_arr_over_D_by_D_log_event_1_bin_sum = np.nansum(DSD_arr_over_D_by_D_log_event_1_bin, axis=0)
DSD_arr_over_D_by_D_log_event_1_meanbyD = np.nanmean(np.array(
DSD_arr_over_D_by_D_log[event_row_start:event_row_stop_]), axis=0)
DSD_arr_over_D_by_D_log_event_1_meanbyD[DSD_arr_over_D_by_D_log_event_1_bin_sum < 10] = np.nan
fig, ax = plt.subplots(figsize=figsize_)
ax.set_title('Mean value of drop concentrations in each diameter bin')
ax.set_xlabel('D [mm]')
ax.set_ylabel('log10 N(D) [m-3 mm-1]')
ax.plot(size_series, DSD_arr_over_D_by_D_log_event_1_meanbyD, '-or', label='Event 1')
ax.set_xlim(x_range)
ax.set_ylim(y_range)
ax.grid()
if figfilename != '':
fig.savefig(figfilename, transparent=True, bbox_inches='tight')
plt.close(fig)
if output_data:
return size_series, DSD_arr_over_D_by_D_log_event_1_meanbyD
def parsivel_nc_format_V2(input_filename, output_filename):
"""
Transform the not so good nc V1 version produced by save_parsivel_arrays_to_netcdf to V2
:param input_filename: output from save_parsivel_arrays_to_netcdf
:param output_filename: a path and filename
:return:
"""
# create file
netcdf_output_file_object = nc.Dataset(output_filename, 'w')
print('created new file')
netcdf_first_file_object = nc.Dataset(input_filename)
# create attributes
netcdf_output_file_object.setncattr('author', '<NAME> (<EMAIL>')
netcdf_output_file_object.setncattr('version', 'V2')
netcdf_output_file_object.setncattr('created', time_seconds_to_str(time.time(), '%Y-%m-%d_%H:%M UTC'))
print('added attributes')
# create list for dimensions and variables
dimension_names_list = sorted(netcdf_first_file_object.dimensions)
variable_names_list = sorted(netcdf_first_file_object.variables)
# create dimensions
for dim_name in dimension_names_list:
if dim_name == 'time':
netcdf_output_file_object.createDimension('time', size=0)
print('time', 'dimension created')
else:
netcdf_output_file_object.createDimension(dim_name,
size=netcdf_first_file_object.dimensions[dim_name].size)
print(dim_name, 'dimension created')
# create variables
# time
var_name = 'time'
netcdf_output_file_object.createVariable(var_name, 'int64', (var_name,), zlib=True)
netcdf_output_file_object.variables[var_name].setncattr('units',
'seconds since ' + time_seconds_to_str(0, time_format_mod))
time_parsivel_seconds = time_str_to_seconds(np.array(netcdf_first_file_object.variables[var_name][:], dtype=str),
time_format_parsivel)
netcdf_output_file_object.variables[var_name][:] = np.array(time_parsivel_seconds, dtype='int64')
print('created time variable')
# time_YmdHM
var_name = 'YYYYmmddHHMM'
netcdf_output_file_object.createVariable(var_name, 'str', ('time',), zlib=True)
netcdf_output_file_object.variables[var_name].setncattr('units', 'YYYYmmddHHMM in string type')
netcdf_output_file_object.variables[var_name][:] = np.array(netcdf_first_file_object.variables['time'][:],
dtype=str)
print('created time_YmdHM variable')
# particle_fall_speed
var_name = 'particles_spectrum'
if var_name in variable_names_list:
netcdf_output_file_object.createVariable(var_name,
netcdf_first_file_object.variables[var_name].dtype,
netcdf_first_file_object.variables[var_name].dimensions, zlib=True)
netcdf_output_file_object.variables[var_name].setncattr('units', 'particle counts per bin per minute')
netcdf_output_file_object.variables[var_name].setncattr('description',
'for each time stamp, the array varies with respect'
' to fall speed on the y axis (rows) starting from the top'
' and varies with respect to size on the x axis (columns) '
'starting from the left')
netcdf_output_file_object.variables[var_name][:] = netcdf_first_file_object.variables[var_name][:].copy()
print('created particles_spectrum variable')
# particle_fall_speed
var_name = 'particle_fall_speed'
netcdf_output_file_object.createVariable(var_name,
netcdf_first_file_object.variables[var_name].dtype,
('particle_fall_speed',), zlib=True)
netcdf_output_file_object.variables[var_name].setncattr('units', 'm/s')
netcdf_output_file_object.variables[var_name][:] = netcdf_first_file_object.variables[var_name][:, 0].copy()
print('created particle_fall_speed variable')
# particle_size
var_name = 'particle_size'
netcdf_output_file_object.createVariable(var_name,
netcdf_first_file_object.variables[var_name].dtype,
('particle_size',), zlib=True)
netcdf_output_file_object.variables[var_name].setncattr('units', 'mm')
netcdf_output_file_object.variables[var_name][:] = netcdf_first_file_object.variables[var_name][0, :].copy()
print('created particle_size variable')
# precipitation_intensity
var_name = 'precipitation_intensity'
netcdf_output_file_object.createVariable(var_name,
'float',
netcdf_first_file_object.variables[
'Intensity of precipitation (mm|h)'].dimensions, zlib=True)
netcdf_output_file_object.variables[var_name].setncattr('units', 'mm/h')
netcdf_output_file_object.variables[var_name][:] = np.array(
netcdf_first_file_object.variables['Intensity of precipitation (mm|h)'][:], dtype=float)
print('created precipitation_intensity variable')
# Weather_code_SYNOP_WaWa
var_name = 'weather_code_SYNOP_WaWa'
netcdf_output_file_object.createVariable(var_name,
netcdf_first_file_object.variables['Weather code SYNOP WaWa'].dtype,
netcdf_first_file_object.variables['Weather code SYNOP WaWa'].dimensions,
zlib=True)
netcdf_output_file_object.variables[var_name].setncattr('units', 'n/a')
netcdf_output_file_object.variables[var_name][:] = \
netcdf_first_file_object.variables['Weather code SYNOP WaWa'][:].copy()
# Weather_code_SYNOP_WaWa
var_name = 'weather_code_METAR_SPECI'
netcdf_output_file_object.createVariable(var_name,
netcdf_first_file_object.variables['Weather code METAR|SPECI'].dtype,
netcdf_first_file_object.variables['Weather code METAR|SPECI'].dimensions,
zlib=True)
netcdf_output_file_object.variables[var_name].setncattr('units', 'n/a')
netcdf_output_file_object.variables[var_name][:] = \
netcdf_first_file_object.variables['Weather code METAR|SPECI'][:].copy()
print('created weather_code_METAR_SPECI variable')
# Weather_code_NWS
var_name = 'weather_code_NWS'
netcdf_output_file_object.createVariable(var_name,
netcdf_first_file_object.variables['Weather code NWS'].dtype,
netcdf_first_file_object.variables['Weather code NWS'].dimensions,
zlib=True)
netcdf_output_file_object.variables[var_name].setncattr('units', 'n/a')
NWS_description = '''precip_type_dict = {
'C': 'No Precip',
'Kein Niederschlag': 'No Precip',
'A': 'Hail',
'L': 'Drizzle',
'L+': 'heavy Drizzle',
'L-': 'light Drizzle',
'R': 'Rain',
'R+': 'heavy Rain',
'R-': 'light Rain',
'RL': 'Drizzle and Rain',
'RL+': 'heavy Drizzle and Rain',
'RL-': 'light Drizzle and Rain',
'RLS': 'Rain, Drizzle and Snow',
'RLS+': 'heavy Rain, Drizzle and Snow',
'RLS-': 'light Rain, Drizzle and Snow',
'S': 'Snow',
'S+': 'heavy Snow',
'S-': 'light Snow',
'SG': 'Snow Grains',
'SP': 'Freezing Rain'
}'''
netcdf_output_file_object.variables[var_name].setncattr('description', NWS_description)
netcdf_output_file_object.variables[var_name][:] = \
netcdf_first_file_object.variables['Weather code NWS'][:].copy()
print('created weather_code_NWS variable')
# Radar_reflectivity (dBz)
var_name = 'radar_reflectivity'
netcdf_output_file_object.createVariable(var_name,
'float',
netcdf_first_file_object.variables['Radar reflectivity (dBz)'].dimensions,
zlib=True)
netcdf_output_file_object.variables[var_name].setncattr('units', 'dBz')
netcdf_output_file_object.variables[var_name][:] = np.array(
netcdf_first_file_object.variables['Radar reflectivity (dBz)'][:], dtype=float)
print('created radar_reflectivity variable')
# particle_count
var_name = 'particle_count'
netcdf_output_file_object.createVariable(var_name,
'int64',
netcdf_first_file_object.variables[
'Number of detected particles'].dimensions, zlib=True)
netcdf_output_file_object.variables[var_name].setncattr('units', 'counts')
netcdf_output_file_object.variables[var_name].setncattr('description', 'Number of detected particles per minute')
netcdf_output_file_object.variables[var_name][:] = np.array(
netcdf_first_file_object.variables['Number of detected particles'][:], dtype='int64')
print('created particle_count variable')
# particle_concentration_spectrum
var_name = 'particle_concentration_spectrum'
var_name_old = 'particle_concentration_spectrum_m-3'
if var_name_old in variable_names_list:
netcdf_output_file_object.createVariable(var_name,
netcdf_first_file_object.variables[var_name_old].dtype,
netcdf_first_file_object.variables[var_name_old].dimensions, zlib=True)
netcdf_output_file_object.variables[var_name].setncattr('units', '1/m3')
netcdf_output_file_object.variables[var_name].setncattr('description', 'particles per meter cube per class')
netcdf_output_file_object.variables[var_name][:] = netcdf_first_file_object.variables[var_name_old][:].copy()
print('created particle_concentration_spectrum variable')
# N_total
var_name = 'N_total'
var_name_old = 'particle_concentration_total_m-3'
netcdf_output_file_object.createVariable(var_name,
netcdf_first_file_object.variables[var_name_old].dtype,
netcdf_first_file_object.variables[var_name_old].dimensions, zlib=True)
netcdf_output_file_object.variables[var_name].setncattr('units', '1/m3')
netcdf_output_file_object.variables[var_name].setncattr('description', 'total particles per meter cube')
netcdf_output_file_object.variables[var_name][:] = netcdf_first_file_object.variables[var_name_old][:].copy()
print('created N_total variable')
# psd
var_name = 'psd'
var_name_old = 'particle_concentration_spectrum_m-3'
netcdf_output_file_object.createVariable(var_name,
'float',
('time', 'particle_size',), zlib=True)
netcdf_output_file_object.variables[var_name].setncattr('units', '1/m3')
netcdf_output_file_object.variables[var_name].setncattr('description', 'particle size distribution, same as '
'particle_concentration_spectrum but all speeds'
'bins are summed, only varies with time and size')
netcdf_output_file_object.variables[var_name][:] = np.sum(netcdf_first_file_object.variables[var_name_old][:],
axis=1)
print('created psd variable')
# rain mask
rain_only_list = ['R', 'R+', 'R-']
RR_ = np.array(netcdf_first_file_object.variables['Intensity of precipitation (mm|h)'][:], dtype=float)
NWS_ = netcdf_first_file_object.variables['Weather code NWS'][:].copy()
rain_mask = np.zeros(RR_.shape[0], dtype=int) + 1
for r_ in range(RR_.shape[0]):
if RR_[r_] > 0 and NWS_[r_] in rain_only_list:
rain_mask[r_] = 0
var_name = 'rain_mask'
netcdf_output_file_object.createVariable(var_name,
'int',
('time',), zlib=True)
netcdf_output_file_object.variables[var_name].setncattr('units', '0 if rain, 1 if not rain')
netcdf_output_file_object.variables[var_name].setncattr('description', 'using the NWS code, only used R, R+ and R-')
netcdf_output_file_object.variables[var_name][:] = rain_mask
print('rain_mask')
# close all files
netcdf_output_file_object.close()
netcdf_first_file_object.close()
def parsivel_sampling_volume(particle_size_2d, particle_fall_speed_2d):
sampling_area = 0.18 * (0.03 - ((particle_size_2d/1000) / 2)) # m2
sampling_time = 60 # seconds
sampling_height = particle_fall_speed_2d * sampling_time # meters
sampling_volume_2d = sampling_area * sampling_height # m3
return sampling_volume_2d
def load_parsivel_txt_to_array(filename_, delimiter_=';'):
# filename_ = 'C:\\_input\\parsivel_2018-07-26-00_2018-08-02-00_1.txt'
size_scale = [0.062,0.187,0.312,0.437,0.562,0.687,0.812,0.937,1.062,1.187,1.375,1.625,1.875,
2.125,2.375,2.75,3.25,3.75,4.25,4.75,5.5,6.5,7.5,8.5,9.5,11,13,15,17,19,21.5,24.5]
speed_scale = [0.05,0.15,0.25,0.35,0.45,0.55,0.65,0.75,0.85,0.95,1.1,1.3,1.5,1.7,1.9,2.2,2.6,3,3.4,
3.8,4.4,5.2,6,6.8,7.6,8.8,10.4,12,13.6,15.2,17.6,20.8]
speed_array = np.zeros((32,32), dtype=float)
size_array = np.zeros((32, 32), dtype=float)
for i in range(32):
speed_array[:,i] = speed_scale
size_array[i, :] = size_scale
# read parsivel file
spectrum_array_list = []
data_array_list = []
with open(filename_) as file_object:
header_ = file_object.readline().split(delimiter_)
line_str = file_object.readline()
line_split = np.array(line_str.split(delimiter_))
if len(line_split) == 17:
line_split[16] = '0'
data_array_list.append(line_split[:-1])
spectrum_array_list.append(np.zeros((32,32)))
elif len(line_split) > 17:
line_split[16] = '0'
data_array_list.append(line_split[:16])
line_split[line_split == ''] = '0'
spectrum_array_list.append(np.array(line_split[16:-1]).reshape((32, 32)))
elif len(line_split) == 16:
data_array_list.append(line_split[:-1])
spectrum_array_list.append(np.zeros((32,32)))
for line in file_object:
line_split = np.array(line.split(delimiter_))
if len(line_split) == 17:
line_split[16] = '0'
data_array_list.append(line_split[:-1])
spectrum_array_list.append(np.zeros((32, 32)))
elif len(line_split) > 17:
line_split[16] = '0'
data_array_list.append(line_split[:16])
line_split[line_split == ''] = '0'
spectrum_array_list.append(np.array(line_split[16:-1]).reshape((32, 32)))
elif len(line_split) == 16:
if line_split[0] != 'Date':
data_array_list.append(line_split[:-1])
spectrum_array_list.append(np.zeros((32, 32)))
data_array = np.stack(data_array_list)
spectrum_array = np.stack(spectrum_array_list).astype(float)
t_list = []
for t_ in range(data_array.shape[0]):
t_list.append(data_array[t_][0] + ' ' + data_array[t_][1])
if len(header_) == 16:
# no spectra was set to record
return data_array, None, t_list, size_array, speed_array, header_
else:
return data_array, spectrum_array, t_list, size_array, speed_array, header_
def save_parsivel_arrays_to_netcdf(raw_spectra_filename, nedcdf_output_filename,
delimiter_=';', raw_time_format='%d.%m.%Y %H:%M:%S'):
# save_parsivel_arrays_to_netcdf('C:\\_input\\parsivel_2018-07-26-00_2018-08-02-00_1.txt', 'C:\\_input\\parsivel_compiled_3.nc')
print('reading txt to array')
data_array, spectrum_array, t_list, size_array, speed_array, header_ = \
load_parsivel_txt_to_array(raw_spectra_filename, delimiter_=delimiter_)
print('arrays created')
file_attributes_tuple_list = [('Compiled by', '<NAME> @: ' + str(datetime.datetime.now())),
('Data source', 'Parsivel Disdrometer'),
('time format', 'YYYYMMDDHHmm in uint64 data type, each ' +
'time stamp is the acumulated precip for one minute')]
# time from str to int
time_array = np.zeros(data_array.shape[0], dtype='<U12')
# for t_ in range(data_array.shape[0]):
# time_array[t_] = int(t_list[t_][6:10] + # YYYY
# t_list[t_][3:5] + # MM
# t_list[t_][:2] + # DD
# t_list[t_][12:14] + # HH
# t_list[t_][15:17]) # mm
for t_ in range(data_array.shape[0]):
time_array[t_] = int(time_seconds_to_str(time_str_to_seconds(t_list[t_],raw_time_format),
time_format_parsivel))
pollutant_attributes_tuple_list = [('units', 'particles per minute')]
# create output file
file_object_nc4 = nc.Dataset(nedcdf_output_filename,'w')#,format='NETCDF4_CLASSIC')
print('output file started')
# create dimensions
file_object_nc4.createDimension('particle_fall_speed', speed_array.shape[0])
file_object_nc4.createDimension('particle_size', size_array.shape[1])
file_object_nc4.createDimension('time', time_array.shape[0])
# create dimension variables
file_object_nc4.createVariable('particle_fall_speed', 'f4', ('particle_fall_speed','particle_size',), zlib=True)
file_object_nc4.createVariable('particle_size', 'f4', ('particle_fall_speed','particle_size',), zlib=True)
file_object_nc4.createVariable('time', 'u8', ('time',), zlib=True)
# populate dimension variables
file_object_nc4.variables['time'][:] = time_array[:]
file_object_nc4.variables['particle_fall_speed'][:] = speed_array[:]
file_object_nc4.variables['particle_size'][:] = size_array[:]
# create particles_spectrum array
if spectrum_array is not None:
file_object_nc4.createVariable('particles_spectrum', 'u2',
('time', 'particle_fall_speed', 'particle_size',), zlib=True)
# populate
file_object_nc4.variables['particles_spectrum'][:] = spectrum_array[:]
# create particle_concentration_spectrum_m-3
# get sampling volume
sampling_volume_2d = parsivel_sampling_volume(size_array, speed_array)
particle_concentration_spectrum = spectrum_array / sampling_volume_2d
# create variable
file_object_nc4.createVariable('particle_concentration_spectrum_m-3', 'float32',
('time', 'particle_fall_speed', 'particle_size',), zlib=True)
# populate
file_object_nc4.variables['particle_concentration_spectrum_m-3'][:] = particle_concentration_spectrum[:]
# create particle_concentration_total_m-3
particle_concentration_total = np.nansum(np.nansum(particle_concentration_spectrum, axis=-1), axis=-1)
# create variable
file_object_nc4.createVariable('particle_concentration_total_m-3', 'float32',
('time', ), zlib=True)
# populate
file_object_nc4.variables['particle_concentration_total_m-3'][:] = particle_concentration_total[:]
for attribute_ in pollutant_attributes_tuple_list:
setattr(file_object_nc4.variables['particles_spectrum'], attribute_[0], attribute_[1])
# create other data variables
for i_, head_ in enumerate(header_[:-1]):
var_name = head_.replace('/','|')
print('storing var name: ' , var_name)
temp_ref = file_object_nc4.createVariable(var_name, str, ('time',), zlib=True)
temp_ref[:] = data_array[:, i_]
for attribute_ in file_attributes_tuple_list:
setattr(file_object_nc4, attribute_[0], attribute_[1])
file_object_nc4.close()
print('Done!')
def load_parsivel_from_nc(netcdf_filename):
netcdf_file_object = nc.Dataset(netcdf_filename, 'r')
file_var_values_dict = {}
variable_name_list = netcdf_file_object.variables.keys()
for var_ in variable_name_list:
file_var_values_dict[var_] = netcdf_file_object.variables[var_][:].copy()
netcdf_file_object.close()
return file_var_values_dict, variable_name_list
def parsivel_plot_spectrum_counts(arr_, title_='', x_range_tuple=(0, 6), y_range_tuple=(0, 10), save_filename=None,
contourF=False, bins_=(0,2,5,10,20,50,100,200), fig_size=(5,5)):
cmap_parsivel = ListedColormap(['white', 'yellow', 'orange', 'lime', 'darkgreen',
'aqua', 'purple', 'navy', 'red'], 'indexed')
size_scale = [0.062,0.187,0.312,0.437,0.562,0.687,0.812,0.937,1.062,1.187,1.375,1.625,1.875,
2.125,2.375,2.75,3.25,3.75,4.25,4.75,5.5,6.5,7.5,8.5,9.5,11,13,15,17,19,21.5,24.5]
speed_scale = [0.05,0.15,0.25,0.35,0.45,0.55,0.65,0.75,0.85,0.95,1.1,1.3,1.5,1.7,1.9,2.2,2.6,3,3.4,
3.8,4.4,5.2,6,6.8,7.6,8.8,10.4,12,13.6,15.2,17.6,20.8]
speed_array = np.zeros((32,32), dtype=float)
size_array = np.zeros((32, 32), dtype=float)
for i in range(32):
speed_array[:,i] = speed_scale
size_array[i, :] = size_scale
spectrum_array_color = np.zeros((arr_.shape[0], arr_.shape[1]), dtype=float)
bin_labels = []
i_ = 0
for i_, bin_ in enumerate(bins_):
spectrum_array_color[arr_ > bin_] = i_ + 1
bin_labels.append(str(bin_))
bin_labels[i_] = '>' + bin_labels[i_]
fig, ax = plt.subplots(figsize=fig_size)
if contourF:
quad1 = ax.contourf(size_array, speed_array, spectrum_array_color, cmap=cmap_parsivel,
vmin=0, vmax=8)
else:
quad1 = ax.pcolormesh(size_array, speed_array, spectrum_array_color, cmap=cmap_parsivel,
vmin=0, vmax=8)
ax.set_ylim(y_range_tuple)
ax.set_xlim(x_range_tuple)
ax.set_xlabel('particle size [mm]')
ax.set_ylabel('particle speed [m/s]')
ax.set_title(title_)
cbar_label = 'Particles per bin'
cb2 = fig.colorbar(quad1)#, ticks=[0,1,2,3,4,5,6,7])
ticks_ = np.linspace(0.5, i_+0.5, len(bins_))
cb2.set_ticks(ticks_)
cb2.set_ticklabels(bin_labels)
cb2.ax.set_ylabel(cbar_label)
if save_filename is None:
plt.show()
else:
fig.savefig(save_filename, transparent=True, bbox_inches='tight')
plt.close(fig)
return fig, ax
def parsivel_plot_spectrum_DSD(arr_, title_='', x_range_tuple=(0, 6), y_range_tuple=(0, 10), save_filename=None,
contourF=False, fig_size=(5,5), cmap_=default_cm, cbar_label='DSD [m-3]',
nozeros_=True, vmin_=None, vmax_=None,):
size_scale = [0.062,0.187,0.312,0.437,0.562,0.687,0.812,0.937,1.062,1.187,1.375,1.625,1.875,
2.125,2.375,2.75,3.25,3.75,4.25,4.75,5.5,6.5,7.5,8.5,9.5,11,13,15,17,19,21.5,24.5]
speed_scale = [0.05,0.15,0.25,0.35,0.45,0.55,0.65,0.75,0.85,0.95,1.1,1.3,1.5,1.7,1.9,2.2,2.6,3,3.4,
3.8,4.4,5.2,6,6.8,7.6,8.8,10.4,12,13.6,15.2,17.6,20.8]
speed_array = np.zeros((32,32), dtype=float)
size_array = np.zeros((32, 32), dtype=float)
for i in range(32):
speed_array[:,i] = speed_scale
size_array[i, :] = size_scale
if nozeros_:
arr_ = np.array(arr_)
arr_[arr_ == 0] = np.nan
fig, ax = plt.subplots(figsize=fig_size)
if contourF:
quad1 = ax.contourf(size_array, speed_array, arr_, cmap=cmap_)
else:
quad1 = ax.pcolormesh(size_array, speed_array, arr_, cmap=cmap_, vmin=vmin_, vmax=vmax_)
ax.set_ylim(y_range_tuple)
ax.set_xlim(x_range_tuple)
ax.set_xlabel('particle size [mm]')
ax.set_ylabel('particle speed [m/s]')
ax.set_title(title_)
cb2 = fig.colorbar(quad1)
cb2.ax.set_ylabel(cbar_label)
if save_filename is None:
plt.show()
else:
fig.savefig(save_filename, transparent=True, bbox_inches='tight')
plt.close(fig)
return fig, ax
def calculate_cumulative_precipitation_parsivel(parsivel_precipitation_mm_per_hour, parsivel_time_sec, time_period_str):
return np.nansum(
parsivel_precipitation_mm_per_hour[time_to_row_str(parsivel_time_sec, time_period_str.split('_')[0]):
time_to_row_str(parsivel_time_sec, time_period_str.split('_')[1])]) / 60
def calculate_D_m(N_D, D_series):
D_grad = np.gradient(D_series)
D_m = np.nansum((N_D * (D_series**4) * D_grad)) / np.nansum((N_D * (D_series ** 3) * D_grad))
return D_m
def calculate_LWC(N_D, D_series):
D_grad = np.gradient(D_series)
water_density = 1E6 # g/m3
LWC_ = (np.pi * water_density / 6) * np.nansum((N_D * (D_series**3) * D_grad))
return LWC_
# Holographic microscope
def convert_raw_to_array(filename_):
print('converting file: ' + filename_.split('/')[-1])
A = np.fromfile(filename_, dtype='uint8')
evenEl = A[1::2]
oddEl = A[0::2]
B = 256 * evenEl + oddEl
width = 2592
height = 1944
I = B.reshape(height, width)
return I
def create_video_from_filelist(file_list, output_filename, cmap_):
width = 2592
height = 1944
array_3d = np.zeros((len(file_list), height, width), dtype='uint8')
time_list = []
for t_, filename_ in enumerate(file_list):
array_3d[t_,:,:] = convert_raw_to_array(filename_)
time_list.append(filename_[-21:-4])
create_video_animation_from_3D_array(array_3d, output_filename, colormap_= cmap_, title_list=time_list,
axes_off=True, show_colorbar=False, interval_=500)
def convert_array_to_png_array(array_):
# shape
rows_ = array_.shape[0]
columns_ = array_.shape[1]
# nan layer
array_nan = np.zeros((rows_, columns_), dtype='uint8')
array_nan[array_ != array_] = 100
# replace nans
array_[array_ != array_] = 0
# convert to all positive
array_positive = np.abs(array_)
# sign layer
array_sign = np.zeros((rows_, columns_), dtype='uint8')
array_sign[array_ >= 0] = 100
# zeros array
array_zeros = np.zeros((rows_, columns_), dtype='uint8')
array_zeros[array_positive != 0] = 1
# sub 1 array
array_sub1 = np.zeros((rows_, columns_), dtype='uint8')
array_sub1[array_positive<1] = 1
array_sub1 = array_sub1 * array_zeros
# power array
exp_ = np.array(np.log10(array_positive), dtype=int)
exp_[array_zeros==0] = 0
# integral array
array_integral = array_positive / 10 ** np.array(exp_, dtype=float)
# array_layer_1
array_layer_1 = np.array(((array_sub1 * 9) + 1) * array_integral * 10, dtype='uint8') + array_sign
# array_layer_2
array_layer_2 = np.array(((array_integral * ((array_sub1 * 9) + 1) * 10)
- np.array(array_integral * ((array_sub1 * 9) + 1) * 10, dtype='uint8')) * 100,
dtype='uint8')
array_layer_2 = array_layer_2 + array_nan
# power sign layer
exp_ = exp_ - array_sub1
array_power_sign = np.zeros((rows_, columns_), dtype='uint8')
array_power_sign[exp_ >= 0] = 100
# array_layer_3
array_layer_3 = np.abs(exp_) + array_power_sign
# initialize out array
out_array = np.zeros((rows_, columns_, 3), dtype='uint8')
# dump into out array
out_array[:, :, 0] = array_layer_1
out_array[:, :, 1] = array_layer_2
out_array[:, :, 2] = array_layer_3
return out_array
# netcdf file handling
def netCDF_crop_timewise(input_filename, time_stamp_start_str_YYYYmmDDHHMM, time_stamp_stop_str_YYYYmmDDHHMM,
output_filename=None, vars_to_keep=None, time_dimension_name='time'):
"""
Creates a copy of an input netCDF4 file with only a subset of the data
:param input_filename: netCDF4 file with path
:param time_stamp_start_str_YYYYmmDDHHMMSS: String in YYYYmmDDHHMMSS format
:param time_stamp_stop_str_YYYYmmDDHHMMSS:
:param output_filename: filename with path and .nc extension. If none, output file will be in same folder as input
:param vars_to_keep: list of variable names in str to be kept in output copy. If none, all variables will be copied
:param time_dimension_name: name of time dimension
:return: 0 if good, filename if error
"""
error_file = 0
try:
nc_input_file = nc.Dataset(input_filename)
time_array = nc_input_file.variables[time_dimension_name][:].copy()
nc_input_file.close()
r_1 = time_to_row_str(time_array, time_stamp_start_str_YYYYmmDDHHMM)
r_2 = time_to_row_str(time_array, time_stamp_stop_str_YYYYmmDDHHMM)
dict_ = load_netcdf_to_dictionary(input_filename, var_list=vars_to_keep,
time_tuple_start_stop_row=(r_1,r_2), time_dimension_name=time_dimension_name)
if output_filename is None:
output_filename = input_filename[:-3] + '_trimmed_' + str(r_1) + '_' + str(r_2) + '.nc'
save_dictionary_to_netcdf(dict_, output_filename)
except BaseException as error_msg:
print(error_msg)
error_file = input_filename
return error_file
def add_variable_to_netcdf_file(nc_filename, variables_dict):
"""
Opens and adds a variable(s) to the file. Will not add new dimensions.
:param nc_filename: str including path
:param variables_dict:
must be a dictionary with keys as variables. inside each variables key should have a dictionary
inside with variable names as keys
Each var most have a data key equal to a numpy array (can be masked) and a attribute key
Each var most have a dimensions key equal to a tuple, in the same order as the array's dimensions
Each var most have a attributes key equal to a list of tuples with name and description text
:return: None
"""
# check if dict_ has the right format
# create dimension and variables lists
vars_list = variables_dict.keys()
for var_ in vars_list:
if 'dimensions' in variables_dict[var_].keys():
pass
else:
print('dictionary has the wrong format, ' + var_ + 'variable is missing its dimensions')
return
if 'attributes' in variables_dict[var_].keys():
pass
else:
print('dictionary has the wrong format, ' + var_ + 'variable is missing its attributes')
return
# open file
file_obj = nc.Dataset(nc_filename,'a')
print('file openned, do not close this threat or file might be corrupted')
try:
# check that variable shapes agree with destination file
for var_ in vars_list:
dim_list = list(variables_dict[var_]['dimensions'])
var_shape = variables_dict[var_]['data'].shape
for i_, dim_ in enumerate(dim_list):
if dim_ in sorted(file_obj.dimensions):
if var_shape[i_] == file_obj.dimensions[dim_].size:
pass
else:
print('Variable', var_, 'has dimension', dim_,
'of different size compared to destination file\nfile closed')
file_obj.close()
return
else:
print('Variable', var_, 'has dimension', dim_,
'which does not exist in destination file\nfile closed')
file_obj.close()
return
# create variables
print('creating', var_, 'variable')
file_obj.createVariable(var_,
variables_dict[var_]['data'].dtype,
variables_dict[var_]['dimensions'], zlib=True)
# populate variables
file_obj.variables[var_][:] = variables_dict[var_]['data']
for var_attr in variables_dict[var_]['attributes']:
if var_attr[0] == '_FillValue' or var_attr[0] == 'fill_value':
pass
else:
setattr(file_obj.variables[var_], var_attr[0], var_attr[1])
print('created', var_, 'variable')
except BaseException as error_msg:
file_obj.close()
print('error, file closed\n', error_msg)
print('All good, closing file')
file_obj.close()
print('Done!')
def save_dictionary_to_netcdf(dict_, output_filename):
"""
Saves a dictionary with the right format to a netcdf file. First dim will be set to unlimited.
:param dict_: must have a dimensions key, a variables key, and a attributes key.
dimensions key should have a list of the names of the dimensions
variables key should have a dictionary inside with variable names as keys
attributes key should have a list of tuples inside, with the name of the attribute and description in each tuple
Each var most have a data key equal to a numpy array (can be masked) and a attribute key
Each var most have a dimensions key equal to a tuple, in the same order as the array's dimensions
all attributes are tuples with name and description text
:param output_filename: should include full path and extension
:return: None
"""
# check if dict_ has the right format
if 'variables' in dict_.keys():
pass
else:
print('dictionary has the wrong format, missing variables key')
return
if 'dimensions' in dict_.keys():
pass
else:
print('dictionary has the wrong format, missing dimensions key')
return
if 'attributes' in dict_.keys():
pass
else:
print('dictionary has the wrong format, missing attributes key')
return
# create dimension and variables lists
vars_list = dict_['variables'].keys()
dims_list = dict_['dimensions']
for dim_ in dims_list:
if dim_ in vars_list:
pass
else:
print('dictionary has the wrong format, ' + dim_ + 'dimension is missing from variables')
for var_ in vars_list:
if 'dimensions' in dict_['variables'][var_].keys():
pass
else:
print('dictionary has the wrong format, ' + var_ + 'variable is missing its dimensions')
return
if 'attributes' in dict_['variables'][var_].keys():
pass
else:
print('dictionary has the wrong format, ' + var_ + 'variable is missing its attributes')
return
# create output file
file_obj = nc.Dataset(output_filename,'w')#,format='NETCDF4_CLASSIC')
print('output file started')
# populate file's attributes
for attribute_ in dict_['attributes']:
setattr(file_obj, attribute_[0], attribute_[1])
# create dimensions
for i_, dim_ in enumerate(dims_list):
if i_ == 0:
file_obj.createDimension(dim_, size=0)
else:
shape_index = np.argwhere(np.array(dict_['variables'][dim_]['dimensions']) == dim_)[0][0]
file_obj.createDimension(dim_, dict_['variables'][dim_]['data'].shape[shape_index])
print('dimensions created')
# create variables
for var_ in vars_list:
print('creating', var_, 'variable')
file_obj.createVariable(var_,
dict_['variables'][var_]['data'].dtype,
dict_['variables'][var_]['dimensions'], zlib=True)
# populate variables
file_obj.variables[var_][:] = dict_['variables'][var_]['data']
for var_attr in dict_['variables'][var_]['attributes']:
if isinstance(var_attr, str):
setattr(file_obj.variables[var_], dict_['variables'][var_]['attributes'][0],
dict_['variables'][var_]['attributes'][1])
break
else:
if var_attr[0] == '_FillValue' or var_attr[0] == 'fill_value':
pass
else:
setattr(file_obj.variables[var_], var_attr[0], var_attr[1])
print('created', var_, 'variable')
print('storing data to disk and closing file')
file_obj.close()
print('Done!')
def load_netcdf_to_dictionary(filename_, var_list=None, time_tuple_start_stop_row=None, time_dimension_name='time'):
"""
creates a dictionary from a netcdf file, with the following format
:param filename_: filename with path of a netCDF4 file
:param var_list: list of variables to be loaded, if none, all variables will be loaded
:param time_tuple_start_stop_str: tuple with two time rows, time dimension will be trimmed r_1:r_2
:param time_dimension_name: name of time dimension
:return: dict_: have a dimensions key, a variables key, and a attributes key.
Each var have a data key equal to a numpy array (can be masked) and a attribute key
Each var have a dimensions key equal to a tuple, in the same order as the array's dimensions
all attributes are tuples with name and description text
"""
# create output dict
out_dict = {}
# open file
file_obj = nc.Dataset(filename_, 'r') # ,format='NETCDF4_CLASSIC')
print('output file started')
# get file's attr
file_att_list_tuple = []
for attr_ in file_obj.ncattrs():
file_att_list_tuple.append((attr_, file_obj.getncattr(attr_)))
out_dict['attributes'] = file_att_list_tuple
# get dimensions
out_dict['dimensions'] = sorted(file_obj.dimensions)
# get variables
if var_list is None:
var_list = sorted(file_obj.variables)
out_dict['variables'] = {}
# create variables
for var_ in var_list:
out_dict['variables'][var_] = {}
if time_tuple_start_stop_row is not None:
if time_dimension_name in file_obj.variables[var_].dimensions:
out_dict['variables'][var_]['data'] = file_obj.variables[var_][time_tuple_start_stop_row[0]:
time_tuple_start_stop_row[1]]
else:
out_dict['variables'][var_]['data'] = file_obj.variables[var_][:]
else:
out_dict['variables'][var_]['data'] = file_obj.variables[var_][:]
out_dict['variables'][var_]['attributes'] = file_obj.variables[var_].ncattrs()
var_att_list_tuple = []
for attr_ in file_obj.variables[var_].ncattrs():
var_att_list_tuple.append((attr_, file_obj.variables[var_].getncattr(attr_)))
out_dict['variables'][var_]['attributes'] = var_att_list_tuple
out_dict['variables'][var_]['dimensions'] = file_obj.variables[var_].dimensions
print('read variable', var_)
file_obj.close()
print('Done!')
return out_dict
def merge_multiple_netCDF_by_time_dimension(directory_where_nc_file_are_in_chronological_order, output_path='',
output_filename=None, time_variable_name='time', time_dimension_name=None,
vars_to_keep=None, nonTimeVars_check_list=None,
key_search_str='', seek_in_subfolders=False, force_file_list=None):
if force_file_list is not None:
file_list_all = sorted(force_file_list)
else:
if seek_in_subfolders:
if key_search_str == '':
file_list_all = sorted(list_files_recursive(directory_where_nc_file_are_in_chronological_order))
else:
file_list_all = sorted(list_files_recursive(directory_where_nc_file_are_in_chronological_order,
filter_str=key_search_str))
else:
file_list_all = sorted(glob.glob(str(directory_where_nc_file_are_in_chronological_order
+ '*' + key_search_str + '*.nc')))
print('Files to be merged (in this order):')
parameter_list = ''
for i, parameter_ in enumerate(file_list_all):
parameter_list = str(parameter_list) + str(i) + " ---> " + str(parameter_) + '\n'
print(parameter_list)
# create copy of first file
if output_filename is None:
if output_path == '':
output_filename = file_list_all[0][:-3] + '_merged.nc'
else:
output_filename = output_path + file_list_all[0].split('\\')[-1][:-3] + '_merged.nc'
# define time variable and dimension
if time_dimension_name is None:
time_dimension_name = time_variable_name
# check if time dimension is unlimited
netcdf_first_file_object = nc.Dataset(file_list_all[0], 'r')
if netcdf_first_file_object.dimensions[time_dimension_name].size == 0 and vars_to_keep is None:
# all good, just make copy of file with output_filename name
netcdf_first_file_object.close()
shutil.copyfile(file_list_all[0], output_filename)
print('first file in merger list has unlimited time dimension, copy created with name:', output_filename)
else:
# not so good, create new file and copy everything from first, make time dimension unlimited...
netcdf_output_file_object = nc.Dataset(output_filename, 'w')
print('first file in merger list does not have unlimited time dimension, new file created with name:',
output_filename)
# copy main attributes
attr_list = netcdf_first_file_object.ncattrs()
for attr_ in attr_list:
netcdf_output_file_object.setncattr(attr_, netcdf_first_file_object.getncattr(attr_))
print('main attributes copied')
# create list for dimensions and variables
dimension_names_list = sorted(netcdf_first_file_object.dimensions)
if vars_to_keep is None:
variable_names_list = sorted(netcdf_first_file_object.variables)
else:
variable_names_list = vars_to_keep
# create dimensions
for dim_name in dimension_names_list:
if dim_name == time_dimension_name:
netcdf_output_file_object.createDimension(time_dimension_name, size=0)
print(time_variable_name, 'dimension created')
else:
netcdf_output_file_object.createDimension(dim_name,
size=netcdf_first_file_object.dimensions[dim_name].size)
print(dim_name, 'dimension created')
# create variables
for var_name in variable_names_list:
# create
netcdf_output_file_object.createVariable(var_name,
netcdf_first_file_object.variables[var_name].dtype,
netcdf_first_file_object.variables[var_name].dimensions, zlib=True)
print(var_name, 'variable created')
# copy the attributes
attr_list = netcdf_first_file_object.variables[var_name].ncattrs()
for attr_ in attr_list:
netcdf_output_file_object.variables[var_name].setncattr(attr_,
netcdf_first_file_object.variables[
var_name].getncattr(attr_))
print('variable attributes copied')
# copy the data to the new file
netcdf_output_file_object.variables[var_name][:] = netcdf_first_file_object.variables[var_name][:].copy()
print('variable data copied')
print('-=' * 20)
# close all files
netcdf_output_file_object.close()
netcdf_first_file_object.close()
print('starting to copy other files into merged file')
vars_list = variable_names_list
for filename_ in file_list_all[1:]:
# open output file for appending data
netcdf_output_file_object = nc.Dataset(output_filename, 'a')
print('-' * 5)
print('loading file:', filename_)
# open hourly file
netcdf_file_object = nc.Dataset(filename_, 'r')
# get time array
time_hourly = np.array(netcdf_file_object.variables[time_variable_name][:], dtype=float)
row_start = netcdf_output_file_object.variables[time_variable_name].shape[0]
row_end = time_hourly.shape[0] + row_start
# append time array
netcdf_output_file_object.variables[time_variable_name][row_start:row_end] = time_hourly
# append all other variables that only time dependent
for var_name in vars_list:
if var_name != time_variable_name:
if time_dimension_name in netcdf_output_file_object.variables[var_name].dimensions:
netcdf_output_file_object.variables[var_name][row_start:row_end] = \
netcdf_file_object.variables[var_name][:].copy()
# check non time dependent variables for consistency
vars_list_sub = sorted(netcdf_file_object.variables)
if vars_list_sub != sorted(netcdf_first_file_object.variables):
print('Alert! Variables in first file are different than other files')
print('first file variables:')
p_(sorted(netcdf_first_file_object.variables))
print(filename_, 'file variables:')
p_(vars_list_sub)
if nonTimeVars_check_list is not None:
for var_name in nonTimeVars_check_list:
if np.nansum(np.abs(netcdf_file_object.variables[var_name][:].copy() -
netcdf_output_file_object.variables[var_name][:].copy())) != 0:
print('Alert!', var_name, 'from file:', filename_, 'does not match the first file')
# copy the attributes
netcdf_output_file_object.variables[var_name].setncattr(
'values from file ' + filename_, netcdf_file_object.variables[var_name][:].copy()
)
netcdf_file_object.close()
netcdf_output_file_object.close()
print('done')
def load_netcdf_file_variable(filename_, variable_name_list=None):
netcdf_file_object = nc.Dataset(filename_, 'r')
file_attributes_dict = {}
file_var_values_dict = {}
file_var_attrib_dict = {}
file_dim_dict = {}
if variable_name_list is None: variable_name_list = list(netcdf_file_object.variables)
for atr_ in netcdf_file_object._attributes:
file_attributes_dict[atr_] = netcdf_file_object._attributes[atr_]
for dim_ in netcdf_file_object.dimensions:
file_dim_dict[dim_] = netcdf_file_object.dimensions[dim_]
for var_ in variable_name_list:
file_var_values_dict[var_] = netcdf_file_object.variables[var_][:].copy()
for atr_ in netcdf_file_object.variables[var_]._attributes:
file_var_attrib_dict[var_] = netcdf_file_object.variables[var_]._attributes[atr_]
netcdf_file_object.close()
return file_attributes_dict, file_var_values_dict, file_var_attrib_dict, file_dim_dict
def save_array_list_as_netcdf(array_list, name_list, units_list, attributes_list, out_filename):
file_object = nc.Dataset(out_filename, 'w')
# file_object.history = 'Created for a test'
for variable_ in range(len(array_list)):
dim_list_name = []
for dim_ in range(len(array_list[variable_].shape)):
dim_name = str(variable_) + '_' + str(dim_)
dim_list_name.append(dim_name)
file_object.createDimension(dim_name, array_list[variable_].shape[dim_])
dtype_ = str(array_list[variable_].dtype)[0]
file_object.createVariable( name_list[variable_], dtype_, tuple(dim_list_name) )
setattr(file_object.variables[name_list[variable_]], 'units',units_list[variable_])
file_object.variables[name_list[variable_]] = array_list[variable_]
# temp_variable_handle[:] = array_list[variable_][:]
for atri_ in attributes_list:
setattr(file_object, atri_[0], atri_[1])
file_object.close()
def save_time_series_as_netcdf(array_list, name_list, units_list, attributes_list, out_filename):
file_object = nc.Dataset(out_filename, 'w')
# create time dimension
file_object.createDimension('time', array_list[0].shape[0])
for variable_ in range(len(array_list)):
dtype_ = str(array_list[variable_].dtype)[0]
if dtype_ == '<': dtype_ = 'S1'
file_object.createVariable(name_list[variable_], dtype_, ('time',))
setattr(file_object.variables[name_list[variable_]], 'units',units_list[variable_])
file_object.variables[name_list[variable_]][:] = array_list[variable_][:]
# temp_variable_handle[:] = array_list[variable_][:]
for atri_ in attributes_list:
setattr(file_object, atri_[0], atri_[1])
file_object.close()
def save_emissions_to_new_netcdf(out_filename, emissions_array, pollutant_name, time_array, lat_array, lon_array,
file_attributes_tuple_list, pollutant_attributes_tuple_list):
file_object = nc.Dataset(out_filename, 'w')
# create dimensions
file_object.createDimension('lat', lat_array.shape[0])
file_object.createDimension('lon', lon_array.shape[0])
file_object.createDimension('time', time_array.shape[0])
# create dimension variables
file_object.createVariable('time', str(time_array.dtype)[0], ('time', ))
file_object.createVariable('lat', str(lat_array.dtype)[0], ('lat',))
file_object.createVariable('lon', str(lon_array.dtype)[0], ('lon',))
# populate dimension variables
file_object.variables['time'][:] = time_array[:]
file_object.variables['lat'][:] = lat_array[:]
file_object.variables['lon'][:] = lon_array[:]
# create emission array
file_object.createVariable(pollutant_name, str(emissions_array.dtype)[0], ('time', 'lat', 'lon',))
# populate
file_object.variables[pollutant_name][:] = emissions_array[:]
for attribute_ in file_attributes_tuple_list:
setattr(file_object, attribute_[0], attribute_[1])
for attribute_ in pollutant_attributes_tuple_list:
setattr(file_object.variables[pollutant_name], attribute_[0], attribute_[1])
file_object.close()
def save_emissions_to_existing_netcdf(out_filename, emissions_array, pollutant_name, attributes_tuple_list):
file_object = nc.Dataset(out_filename, 'a')
file_object.createVariable(pollutant_name, str(emissions_array.dtype)[0], ('time', 'lat', 'lon',))
file_object.variables[pollutant_name][:] = emissions_array[:]
setattr(file_object.variables[pollutant_name], 'pollutant name', pollutant_name)
for attribute_ in attributes_tuple_list:
setattr(file_object.variables[pollutant_name], attribute_[0], attribute_[1])
file_object.close()
def WRF_emission_file_modify(filename_, variable_name, cell_index_west_east, cell_index_south_north, new_value):
netcdf_file_object = nc.Dataset(filename_, 'a')
current_array = netcdf_file_object.variables[variable_name][0,0,:,:].copy()
current_value = current_array[cell_index_south_north, cell_index_west_east]
print(current_value)
current_array[cell_index_south_north, cell_index_west_east] = new_value
netcdf_file_object.variables[variable_name][0,0,:,:] = current_array[:,:]
netcdf_file_object.close()
def find_wrf_3d_cell_from_latlon_to_south_north_west_east(lat_, lon_, wrf_output_filename,
wrf_lat_variablename='XLAT', wrf_lon_variablename='XLONG',
flatten_=False):
netcdf_file_object_wrf = nc.Dataset(wrf_output_filename, 'r')
wrf_lat_array = netcdf_file_object_wrf.variables[wrf_lat_variablename][:,:].copy()
wrf_lon_array = netcdf_file_object_wrf.variables[wrf_lon_variablename][:,:].copy()
netcdf_file_object_wrf.close()
wrf_abs_distance = ( (np.abs(wrf_lat_array - lat_)**2) + (np.abs(wrf_lon_array - lon_)**2) )**0.5
if flatten_:
return np.argmin(wrf_abs_distance)
else:
return np.unravel_index(np.argmin(wrf_abs_distance), wrf_abs_distance.shape)
# specialized tools
def vectorize_array(array_):
output_array = np.zeros((array_.shape[0] * array_.shape[1], 3), dtype=float)
for r_ in range(array_.shape[0]):
for c_ in range(array_.shape[1]):
output_array[r_,0] = r_
output_array[r_, 1] = c_
output_array[r_, 2] = array_[r_,c_]
return output_array
def exceedance_rolling(arr_time_seconds, arr_values, standard_, rolling_period, return_rolling_arrays=False):
## assumes data is in minutes and in same units as standard
time_secs_1h, values_mean_disc_1h = mean_discrete(arr_time_seconds, arr_values, 3600, arr_time_seconds[0], min_data=45)
values_rolling_mean = row_average_rolling(values_mean_disc_1h, rolling_period)
counter_array = np.zeros(values_rolling_mean.shape[0])
counter_array[values_rolling_mean > standard_] = 1
total_number_of_exceedances = np.sum(counter_array)
#create date str array
T_ = np.zeros((time_secs_1h.shape[0],5),dtype='<U32')
for r_ in range(time_secs_1h.shape[0]):
if time_secs_1h[r_] == time_secs_1h[r_]:
T_[r_] = time.strftime("%Y_%m_%d",time.gmtime(time_secs_1h[r_])).split(',')
exceedance_date_list = []
for r_, rolling_stamp in enumerate(values_rolling_mean):
if rolling_stamp > standard_:
exceedance_date_list.append(T_[r_])
exc_dates_array = np.array(exceedance_date_list)
exc_dates_array_unique = np.unique(exc_dates_array)
if return_rolling_arrays:
return total_number_of_exceedances, exc_dates_array_unique, time_secs_1h, values_rolling_mean
else:
return total_number_of_exceedances, exc_dates_array_unique
# ozonesonde and radiosonde related
def load_sonde_data(filename_, mode_='PBL'): ##Loads data and finds inversions, creates I_
# global V_, M_, H_, ASL_, time_header, I_, I_line
# global ASL_avr, L_T, L_RH, time_string, time_days, time_seconds, year_, flight_name
## user defined variables
delimiter_ = ','
error_flag = -999999
first_data_header = 'Day_[GMT]'
day_column_number = 0
month_column_number = 1
year_column_number = 2
hour_column_number = 3
minute_column_number = 4
second_column_number = 5
# time_header = 'Local Time' # defining time header
# main data array
sample_data = filename_
# look for data start (header size)
with open(sample_data) as file_read:
header_size = -1
r_ = 0
for line_string in file_read:
if (len(line_string) >= len(first_data_header) and
line_string[:len(first_data_header)] == first_data_header):
header_size = r_
break
r_ += 1
if header_size == -1:
print('no data found!')
sys.exit()
data_array = np.array(genfromtxt(sample_data,
delimiter=delimiter_,
skip_header=header_size,
dtype='<U32'))
# defining header and data arrays
M_ = data_array[1:, 6:].astype(float)
H_ = data_array[0, 6:]
ASL_ = M_[:, -1]
# year_ = data_array[1, year_column_number]
ASL_[ASL_ == error_flag] = np.nan
# defining time arrays
time_str = data_array[1:, 0].astype('<U32')
for r_ in range(time_str.shape[0]):
time_str[r_] = (str(data_array[r_ + 1, day_column_number]) + '-' +
str(data_array[r_ + 1, month_column_number]) + '-' +
str(data_array[r_ + 1, year_column_number]) + '_' +
str(data_array[r_ + 1, hour_column_number]) + ':' +
str(data_array[r_ + 1, minute_column_number]) + ':' +
str(data_array[r_ + 1, second_column_number]))
time_days = np.array([mdates.date2num(datetime.datetime.utcfromtimestamp(
calendar.timegm(time.strptime(time_string_record, '%d-%m-%Y_%H:%M:%S'))))
for time_string_record in time_str])
time_seconds = time_days_to_seconds(time_days)
V_ = M_.astype(float)
V_[V_ == error_flag] = np.nan
T_avr = np.ones(V_[:, 1].shape)
RH_avr = np.ones(V_[:, 1].shape)
ASL_avr = np.ones(V_[:, 1].shape)
L_T = np.zeros(V_[:, 1].shape)
L_RH = np.zeros(V_[:, 1].shape)
I_ = np.zeros(V_[:, 1].shape)
I_[:] = np.nan
# rolling average of T RH and ASL
mean_size = 7 # 5
for r_ in range(mean_size, V_[:, 1].shape[0] - mean_size):
T_avr[r_] = np.nanmean(V_[r_ - mean_size: r_ + mean_size, 1])
RH_avr[r_] = np.nanmean(V_[r_ - mean_size: r_ + mean_size, 2])
ASL_avr[r_] = np.nanmean(ASL_[r_ - mean_size: r_ + mean_size])
for r_ in range(mean_size, V_[:, 1].shape[0] - mean_size):
if (ASL_avr[r_ + 1] - ASL_avr[r_]) > 0:
L_T[r_] = ((T_avr[r_ + 1] - T_avr[r_]) /
(ASL_avr[r_ + 1] - ASL_avr[r_]))
L_RH[r_] = ((RH_avr[r_ + 1] - RH_avr[r_]) /
(ASL_avr[r_ + 1] - ASL_avr[r_]))
# define location of inversion
# PBL or TSI
if mode_ == 'PBL':
for r_ in range(mean_size, V_[:, 1].shape[0] - mean_size):
if L_T[r_] > 7 and L_RH[r_] < -20: # PBL = 7,20 / TSI = 20,200
I_[r_] = 1
# get one of I_ only per layer
temperature_gap = .4 # kilometres
I_line = np.zeros((1, 3)) # height, time, intensity
if np.nansum(I_) > 1:
r_ = -1
while r_ < I_.shape[0] - mean_size:
r_ += 1
if I_[r_] == 1 and ASL_avr[r_] < 4:
layer_temp = T_avr[r_]
layer_h = ASL_avr[r_]
layer_time = time_seconds[r_]
for rr_ in range(r_, I_.shape[0] - mean_size):
if T_avr[rr_] < layer_temp - temperature_gap:
delta_h = ASL_avr[rr_] - layer_h
altitude_ = layer_h
stanking_temp = np.array([altitude_, layer_time, delta_h])
I_line = np.row_stack((I_line, stanking_temp))
r_ = rr_
break
if np.max(I_line[:, 0]) != 0:
I_line = I_line[1:, :]
else:
I_line[:, :] = np.nan
else:
for r_ in range(mean_size, V_[:, 1].shape[0] - mean_size):
if L_T[r_] > 20 and L_RH[r_] < -200: # PBL = 7,20 / TSI = 20,200
I_[r_] = 1
# get one of I_ only per layer
temperature_gap = .4 # kilometres
I_line = np.zeros((1, 3)) # height, time, intensity
if np.nansum(I_) > 1:
r_ = -1
while r_ < I_.shape[0] - mean_size:
r_ += 1
if I_[r_] == 1 and 4 < ASL_avr[r_] < 8:
layer_temp = T_avr[r_]
layer_h = ASL_avr[r_]
layer_time = time_seconds[r_]
for rr_ in range(r_, I_.shape[0] - mean_size):
if T_avr[rr_] < layer_temp - temperature_gap:
delta_h = ASL_avr[rr_] - layer_h
altitude_ = layer_h
stanking_temp = np.array([altitude_, layer_time, delta_h])
I_line = np.row_stack((I_line, stanking_temp))
r_ = rr_
break
if np.max(I_line[:, 0]) != 0:
I_line = I_line[1:, :]
else:
I_line[:, :] = np.nan
return H_, V_, time_days, time_seconds, I_, I_line, L_T, L_RH
def plot_X1_X2_Y(X1_blue, X2_green, Y):
fig, ax1 = plt.subplots()
ax2 = ax1.twiny()
ax1.plot(X1_blue, Y, s=5, color='b', edgecolor='none')
ax1.axvline(0, c='k')
ax2.scatter(X2_green, Y, s=5, color='g', edgecolor='none')
ax2.axvline(0, c='k')
plt.show()
def plot_T_RH_I_(V_, I_line):
fig, ax1 = plt.subplots()
ax2 = ax1.twiny()
ASL_ = V_[:, -1]
ax1.set_ylabel('ASL')
ax1.set_xlabel('Temp')
ax2.set_xlabel('RH')
ax1.scatter(V_[:, 1], ASL_, s=5, color='b', edgecolor='none')
ax1.axvline(0, c='k')
RH_temp = V_[:, 2]
RH_temp = RH_temp
ax2.scatter(RH_temp, ASL_, s=5, color='g', edgecolor='none')
ax2.axvline(0, c='k')
for x in range(I_line.shape[0]):
plt.axhline(I_line[x, 0], c='r')
plt.show()
def plot_ThetaVirtual_I_(V_, I_line):
fig, ax1 = plt.subplots()
ASL_ = V_[:, -1]
ax1.set_ylabel('ASL')
ax1.set_xlabel('Virtual Potential Temperature [K]')
ax1.scatter(V_[:, 5], ASL_, s=5, color='b', edgecolor='none')
for x in range(I_line.shape[0]):
plt.axhline(I_line[x, 0], c='r')
plt.show()
def last_lat_lon_alt_ozonesonde(filename_):
data_array = genfromtxt(filename_, delimiter=',', dtype='<U32', skip_header=23)
return data_array[-1,31], data_array[-1,32], data_array[-1,33], data_array[-1,0]
def load_khancoban_sondes(filename_):
line_number = -1
dict_ = {}
dict_['filename'] = filename_.split('\\')[-1]
dict_['date'] = '20' + filename_.split('\\')[-1][2:]
profile_header = []
profile_units = []
profile_data = []
with open(filename_) as file_object:
for line in file_object:
line_number += 1
line_items = line.split()
if 17 <= line_number <= 35:
profile_header.append(line_items[0])
profile_units.append(line_items[1])
if line_number >= 39 and len(line_items)>1:
profile_data.append(line_items)
profile_array = np.zeros((len(profile_data), len(profile_data[0])), dtype=float)
for r_ in range(len(profile_data)):
profile_array[r_, :] = profile_data[r_]
for c_ in range(len(profile_header)):
dict_[profile_header[c_]] = {}
dict_[profile_header[c_]]['data'] = profile_array[:, c_]
dict_[profile_header[c_]]['units'] = profile_units[c_]
return dict_
def convert_khan_sonde_data_to_skewt_dict(khan_dict, sonde_name):
# create time array in seconds since epoc
date_seconds = time_str_to_seconds(khan_dict[sonde_name]['date'], '%Y%m%d.0%H')
time_sonde_sec = date_seconds + khan_dict[sonde_name]['time']['data']
mydata_0=dict(zip(('hght','pres','temp','dwpt', 'sknt', 'drct', 'relh', 'time', 'lati', 'long'),
(khan_dict[sonde_name]['Height']['data'],
khan_dict[sonde_name]['P']['data'],
kelvin_to_celsius(khan_dict[sonde_name]['T']['data']),
kelvin_to_celsius(khan_dict[sonde_name]['TD']['data']),
ws_ms_to_knots(khan_dict[sonde_name]['FF']['data']),
khan_dict[sonde_name]['DD']['data'],
khan_dict[sonde_name]['RH']['data'],
time_sonde_sec,
khan_dict[sonde_name]['Lat']['data'],
khan_dict[sonde_name]['Lon']['data']
)))
return mydata_0
# data averaging
def average_all_data_files(filename_, number_of_seconds, WD_index = None, WS_index = None,
min_data_number=None, cumulative_parameter_list=None):
header_, values_ = load_time_columns(filename_)
time_sec = time_days_to_seconds(values_[:,0])
# wind tratment
if WD_index is not None and WS_index is not None:
print('wind averaging underway for parameters: ' + header_[WD_index] + ' and ' + header_[WS_index])
# converting wind parameters to cartesian
WD_ = values_[:,WD_index]
WS_ = values_[:,WS_index]
North_, East_ = polar_to_cart(WD_, WS_)
values_[:,WD_index] = North_
values_[:,WS_index] = East_
# averaging
if min_data_number is None: min_data_number = int(number_of_seconds/60 * .75)
if cumulative_parameter_list is None:
Index_mean,Values_mean = mean_discrete(time_sec, values_[:,2:], number_of_seconds,
time_sec[0], min_data = min_data_number,
cumulative_parameter_indx= None)
else:
Index_mean,Values_mean = mean_discrete(time_sec, values_[:,2:], number_of_seconds,
time_sec[0], min_data = min_data_number,
cumulative_parameter_indx=np.array(cumulative_parameter_list) - 2)
if WD_index is not None and WS_index is not None:
# converting wind parameters to polar
North_ = Values_mean[:,WD_index - 2]
East_ = Values_mean[:,WS_index - 2]
WD_, WS_ = cart_to_polar(North_, East_)
Values_mean[:,WD_index - 2] = WD_
Values_mean[:,WS_index - 2] = WS_
output_filename = filename_.split('.')[0]
output_filename += '_' + str(int(number_of_seconds/60)) + '_minute_mean' + '.csv'
save_array_to_disk(header_[2:], Index_mean, Values_mean, output_filename)
print('Done!')
print('saved at: ' + output_filename)
def median_discrete(Index_, Values_, avr_size, first_index, min_data=1, position_=0.0):
# Index_: n by 1 numpy array to look for position,
# Values_: n by m numpy array, values to be averaged
# avr_size in same units as Index_,
# first_index is the first discrete index on new arrays.
# min_data is minimum amount of data for average to be made (optional, default = 1)
# position_ will determine where is the stamp located; 0 = beginning, .5 = mid, 1 = top (optional, default = 0)
# this will average values from Values_ that are between Index_[n:n+avr_size)
# will return: Index_averaged, Values_averaged
# checking if always ascending to increase efficiency
always_ascending = 1
for x in range(Index_.shape[0]-1):
if Index_[x]==Index_[x] and Index_[x+1]==Index_[x+1]:
if Index_[x+1] < Index_[x]:
always_ascending = 0
if always_ascending == 0:
MM_ = np.column_stack((Index_,Values_))
MM_sorted = MM_[MM_[:,0].argsort()] # sort by first column
Index_ = MM_sorted[:,0]
Values_ = MM_sorted[:,1:]
# error checking!
if Index_.shape[0] != Values_.shape[0]:
return None, None
if Index_[-1] < first_index:
return None, None
if min_data < 1:
return None, None
# initialize averaged matrices
final_index = np.nanmax(Index_)
total_averaged_rows = int((final_index-first_index)/avr_size) + 1
if len(Values_.shape) == 1:
Values_median = np.zeros(total_averaged_rows)
Values_median[:] = np.nan
else:
Values_median = np.zeros((total_averaged_rows,Values_.shape[1]))
Values_median[:,:] = np.nan
Index_averaged = np.zeros(total_averaged_rows)
for r_ in range(total_averaged_rows):
Index_averaged[r_] = first_index + (r_ * avr_size)
Index_averaged -= (position_ * avr_size)
Values_25pr = np.array(Values_median)
Values_75pr = np.array(Values_median)
Std_ = np.array(Values_median)
indx_avr_r = -1
last_raw_r = 0
r_raw_a = 0
r_raw_b = 1
while indx_avr_r <= total_averaged_rows-2:
indx_avr_r += 1
indx_a = Index_averaged[indx_avr_r]
indx_b = Index_averaged[indx_avr_r] + avr_size
stamp_population = 0
for r_raw in range(last_raw_r,Index_.shape[0]):
if indx_a <= Index_[r_raw] < indx_b:
if stamp_population == 0: r_raw_a = r_raw
r_raw_b = r_raw + 1
stamp_population += 1
if Index_[r_raw] >= indx_b:
last_raw_r = r_raw
break
if stamp_population >= min_data:
if len(Values_.shape) == 1:
Values_median[indx_avr_r] = np.nanmedian(Values_[r_raw_a:r_raw_b])
Values_25pr[indx_avr_r] = np.nanmedian(Values_[r_raw_a:r_raw_b])
Values_75pr[indx_avr_r] = np.nanmedian(Values_[r_raw_a:r_raw_b])
Std_[indx_avr_r] = np.nanstd(Values_[r_raw_a:r_raw_b])
else:
for c_ in range(Values_.shape[1]):
Values_median[indx_avr_r,c_] = np.nanmedian(Values_[r_raw_a:r_raw_b,c_])
Values_25pr[indx_avr_r,c_] = np.nanpercentile(Values_[r_raw_a:r_raw_b,c_],25)
Values_75pr[indx_avr_r,c_] = np.nanpercentile(Values_[r_raw_a:r_raw_b,c_],75)
Std_[indx_avr_r] = np.nanstd(Values_[r_raw_a:r_raw_b],c_)
Index_averaged = Index_averaged + (position_ * avr_size)
return Index_averaged,Values_median,Values_25pr,Values_75pr, Std_
def mean_discrete(Index_, Values_, avr_size, first_index,
min_data=1, position_=0., cumulative_parameter_indx=None, last_index=None, show_progress=True):
"""
this will average values from Values_ that are between Index_[n:n+avr_size)
:param Index_: n by 1 numpy array to look for position,
:param Values_: n by m numpy array, values to be averaged
:param avr_size: in same units as Index_
:param first_index: is the first discrete index on new arrays.
:param min_data: is minimum amount of data for average to be made (optional, default = 1)
:param position_: will determine where is the stamp located; 0 = beginning, .5 = mid, 1 = top (optional, default = 0)
:param cumulative_parameter_indx: in case there is any column in Values_ to be summed, not averaged. Most be a list
:param last_index: in case you want to force the returned series to some fixed period/length
:return: Index_averaged, Values_averaged
"""
# checking if always ascending to increase efficiency
always_ascending = 1
for x in range(Index_.shape[0]-1):
if Index_[x]==Index_[x] and Index_[x+1]==Index_[x+1]:
if Index_[x+1] < Index_[x]:
always_ascending = 0
if always_ascending == 0:
MM_ = np.column_stack((Index_,Values_))
MM_sorted = MM_[MM_[:,0].argsort()] # sort by first column
Index_ = MM_sorted[:,0]
Values_ = MM_sorted[:,1:]
# error checking!
if Index_.shape[0] != Values_.shape[0]:
print('error during shape check! Index_.shape[0] != Values_.shape[0]')
return None, None
if Index_[-1] < first_index:
print('error during shape check! Index_[-1] < first_index')
return None, None
if min_data < 1:
print('error during shape check! min_data < 1')
return None, None
# initialize averaged matrices
if last_index is None:
final_index = np.nanmax(Index_)
else:
final_index = last_index
total_averaged_rows = int((final_index-first_index)/avr_size) + 1
if len(Values_.shape) == 1:
Values_mean = np.zeros(total_averaged_rows)
Values_mean[:] = np.nan
else:
Values_mean = np.zeros((total_averaged_rows,Values_.shape[1]))
Values_mean[:,:] = np.nan
Index_averaged = np.zeros(total_averaged_rows)
for r_ in range(total_averaged_rows):
Index_averaged[r_] = first_index + (r_ * avr_size)
Index_averaged -= (position_ * avr_size)
indx_avr_r = -1
last_raw_r = 0
r_raw_a = 0
r_raw_b = 1
while indx_avr_r <= total_averaged_rows-2:
if show_progress: p_progress_bar(indx_avr_r, total_averaged_rows-2, extra_text='averaged')
indx_avr_r += 1
indx_a = Index_averaged[indx_avr_r]
indx_b = Index_averaged[indx_avr_r] + avr_size
stamp_population = 0
for r_raw in range(last_raw_r,Index_.shape[0]):
if indx_a <= Index_[r_raw] < indx_b:
if stamp_population == 0: r_raw_a = r_raw
r_raw_b = r_raw + 1
stamp_population += 1
if Index_[r_raw] >= indx_b:
last_raw_r = r_raw
break
if stamp_population >= min_data:
if len(Values_.shape) == 1:
if cumulative_parameter_indx is not None:
Values_mean[indx_avr_r] = np.nansum(Values_[r_raw_a:r_raw_b])
else:
Values_mean[indx_avr_r] = np.nanmean(Values_[r_raw_a:r_raw_b])
else:
for c_ in range(Values_.shape[1]):
if cumulative_parameter_indx is not None:
if c_ in cumulative_parameter_indx:
Values_mean[indx_avr_r, c_] = np.nansum(Values_[r_raw_a:r_raw_b, c_])
else:
Values_mean[indx_avr_r, c_] = np.nanmean(Values_[r_raw_a:r_raw_b, c_])
else:
Values_mean[indx_avr_r,c_] = np.nanmean(Values_[r_raw_a:r_raw_b,c_])
Index_averaged = Index_averaged + (position_ * avr_size)
return Index_averaged,Values_mean
def mean_discrete_std(Index_, Values_, avr_size, first_index, min_data=1, position_=0.):
# Index_: n by 1 numpy array to look for position,
# Values_: n by m numpy array, values to be averaged
# avr_size in same units as Index_,
# first_index is the first discrete index on new arrays.
# min_data is minimum amount of data for average to be made (optional, default = 1)
# position_ will determine where is the stamp located; 0 = beginning, .5 = mid, 1 = top (optional, default = 0)
# this will average values from Values_ that are between Index_[n:n+avr_size)
# will return: Index_averaged, Values_averaged
# checking if always ascending to increase efficiency
always_ascending = 1
for x in range(Index_.shape[0]-1):
if Index_[x]==Index_[x] and Index_[x+1]==Index_[x+1]:
if Index_[x+1] < Index_[x]:
always_ascending = 0
if always_ascending == 0:
MM_ = np.column_stack((Index_,Values_))
MM_sorted = MM_[MM_[:,0].argsort()] # sort by first column
Index_ = MM_sorted[:,0]
Values_ = MM_sorted[:,1:]
# error checking!
if Index_.shape[0] != Values_.shape[0]:
return None, None
if Index_[-1] < first_index:
return None, None
if min_data < 1:
return None, None
# initialize averaged matrices
final_index = np.nanmax(Index_)
total_averaged_rows = int((final_index-first_index)/avr_size) + 1
if len(Values_.shape) == 1:
Values_mean = np.zeros(total_averaged_rows)
Values_mean[:] = np.nan
else:
Values_mean = np.zeros((total_averaged_rows,Values_.shape[1]))
Values_mean[:,:] = np.nan
Index_averaged = np.zeros(total_averaged_rows)
for r_ in range(total_averaged_rows):
Index_averaged[r_] = first_index + (r_ * avr_size)
Index_averaged -= (position_ * avr_size)
Std_ = np.array(Values_mean)
indx_avr_r = -1
last_raw_r = 0
r_raw_a = 0
r_raw_b = 1
while indx_avr_r <= total_averaged_rows-2:
indx_avr_r += 1
indx_a = Index_averaged[indx_avr_r]
indx_b = Index_averaged[indx_avr_r] + avr_size
stamp_population = 0
for r_raw in range(last_raw_r,Index_.shape[0]):
if indx_a <= Index_[r_raw] < indx_b:
if stamp_population == 0: r_raw_a = r_raw
r_raw_b = r_raw + 1
stamp_population += 1
if Index_[r_raw] >= indx_b:
last_raw_r = r_raw
break
if stamp_population >= min_data:
if len(Values_.shape) == 1:
Values_mean[indx_avr_r] = np.nanmean(Values_[r_raw_a:r_raw_b])
Std_[indx_avr_r] = np.nanstd(Values_[r_raw_a:r_raw_b])
else:
for c_ in range(Values_.shape[1]):
Values_mean[indx_avr_r,c_] = np.nanmean(Values_[r_raw_a:r_raw_b,c_])
Std_[indx_avr_r] = np.nanstd(Values_[r_raw_a:r_raw_b],c_)
Index_averaged = Index_averaged + (position_ * avr_size)
return Index_averaged,Values_mean,Std_
def sum_discrete_3D_array(Index_, array_3D, sum_size, first_index, min_data=1, position_=0.):
# Index_: n by 1 numpy array to look for position,
# Values_: n by m numpy array, values to be averaged
# avr_size in same units as Index_,
# first_index is the first discrete index on new arrays.
# min_data is minimum amount of data for average to be made (optional, default = 1)
# position_ will determine where is the stamp located; 0 = beginning, .5 = mid, 1 = top (optional, default = 0)
# this will average values from Values_ that are between Index_[n:n+avr_size)
# will return: Index_averaged, Values_averaged
# checking if always ascending to increase efficiency
always_ascending = 1
for x in range(Index_.shape[0]-1):
if Index_[x]==Index_[x] and Index_[x+1]==Index_[x+1]:
if Index_[x+1] < Index_[x]:
always_ascending = 0
if always_ascending == 0:
print('Error, index must always be ascending')
return None, None
# error checking!
if Index_.shape[0] != array_3D.shape[0]:
print('Error, axes 0 of 3D array must be equal to Index size')
return None, None
if Index_[-1] < first_index:
print('Error, first')
return None, None
# initialize averaged matrices
final_index = np.nanmax(Index_)
total_summed_rows = int((final_index-first_index)/sum_size) + 1
Values_sum = np.zeros((total_summed_rows, array_3D.shape[1], array_3D.shape[2]))
Values_sum[:,:,:] = np.nan
Index_summed = np.zeros(total_summed_rows)
for r_ in range(total_summed_rows):
Index_summed[r_] = first_index + (r_ * sum_size)
Index_summed -= (position_ * sum_size)
indx_sum_r = -1
last_raw_r = 0
r_raw_a = 0
r_raw_b = 1
while indx_sum_r <= total_summed_rows-2:
indx_sum_r += 1
indx_a = Index_summed[indx_sum_r]
indx_b = Index_summed[indx_sum_r] + sum_size
stamp_population = 0
for r_raw in range(last_raw_r,Index_.shape[0]):
if indx_a <= Index_[r_raw] < indx_b:
if stamp_population == 0: r_raw_a = r_raw
r_raw_b = r_raw + 1
stamp_population += 1
if Index_[r_raw] >= indx_b:
last_raw_r = r_raw
break
if stamp_population >= min_data:
Values_sum[indx_sum_r,:,:] = np.nansum(array_3D[r_raw_a:r_raw_b,:,:],axis=0)
Index_summed = Index_summed + (position_ * sum_size)
return Index_summed,Values_sum
def row_average_rolling(arr_, average_size):
result_ = np.array(arr_) * np.nan
for r_ in range(arr_.shape[0] +1 - int(average_size)):
result_[r_] = np.nanmean(arr_[r_ : r_ + average_size])
return result_
def row_average_discrete_1D(arr_, average_size):
result_ = np.zeros(int(arr_.shape[0]/average_size)) * np.nan
for r_ in range(result_.shape[0]):
result_[r_] = np.nanmean(arr_[int(r_* average_size) : int(r_* average_size) + average_size], axis=0)
return result_
def row_average_discrete_2D(arr_, average_size):
result_ = np.zeros((int(arr_.shape[0]/average_size), arr_.shape[1])) * np.nan
for r_ in range(result_.shape[0]):
result_[r_,:] = np.nanmean(arr_[int(r_* average_size) : int(r_* average_size) + average_size], axis=0)
return result_
def row_average_discrete_3D(arr_, average_size):
result_ = np.zeros((int(arr_.shape[0]/average_size), arr_.shape[1], arr_.shape[2])) * np.nan
for r_ in range(result_.shape[0]):
result_[r_,:,:] = np.nanmean(arr_[int(r_* average_size) : int(r_* average_size) + average_size], axis=0)
return result_
def column_average_discrete_2D(arr_, average_size):
result_ = np.zeros((arr_.shape[0], int(arr_.shape[1]/average_size))) * np.nan
for c_ in range(result_.shape[1]):
result_[:, c_] = np.nanmean(arr_[:, int(c_* average_size) : int(c_* average_size) + average_size], axis=1)
return result_
def column_average_discrete_3D(arr_, average_size):
result_ = np.zeros((arr_.shape[0], int(arr_.shape[1]/average_size), arr_.shape[2])) * np.nan
for c_ in range(result_.shape[1]):
result_[:, c_,:] = np.nanmean(arr_[:, int(c_* average_size) : int(c_* average_size) + average_size,:], axis=1)
return result_
def average_all_data_files_monthly(filename_, number_of_seconds, min_data_number = None,
WD_index = None, WS_index = None, cumulative_parameter_list=None):
header_, values_ = load_time_columns(filename_)
time_sec = time_days_to_seconds(values_[:,0])
# wind tratment
if WD_index is not None and WS_index is not None:
print('wind averaging underway for parameters: ' + header_[WD_index] + ' and ' + header_[WS_index])
# converting wind parameters to cartesian
WD_ = values_[:,WD_index]
WS_ = values_[:,WS_index]
North_, East_ = polar_to_cart(WD_, WS_)
values_[:,WD_index] = North_
values_[:,WS_index] = East_
# averaging
if min_data_number is None: min_data_number = int(number_of_seconds/60 * .75)
if cumulative_parameter_list is None:
Index_mean,Values_mean = mean_discrete(time_sec, values_[:,2:], number_of_seconds,
time_sec[0], min_data = min_data_number,
cumulative_parameter_indx= None)
else:
Index_mean,Values_mean = mean_discrete(time_sec, values_[:,2:], number_of_seconds,
time_sec[0], min_data = min_data_number,
cumulative_parameter_indx=np.array(cumulative_parameter_list) - 2)
if WD_index is not None and WS_index is not None:
# converting wind parameters to polar
North_ = Values_mean[:,WD_index - 2]
East_ = Values_mean[:,WS_index - 2]
WD_, WS_ = cart_to_polar(North_, East_)
Values_mean[:,WD_index - 2] = WD_
Values_mean[:,WS_index - 2] = WS_
output_filename = filename_.split('.')[0]
output_filename += '_' + str(int(number_of_seconds/60)) + '_minute_mean' + '.csv'
save_array_to_disk(header_[2:], Index_mean, Values_mean, output_filename)
print('Done!')
print('saved at: ' + output_filename)
def rolling_window(array_, window_size):
shape = array_.shape[:-1] + (array_.shape[-1] - window_size + 1, window_size)
strides = array_.strides + (array_.strides[-1],)
return | np.lib.stride_tricks.as_strided(array_, shape=shape, strides=strides) | numpy.lib.stride_tricks.as_strided |
import datetime as dt
from io import StringIO
import logging
import numpy as np
import os
import pytest
import warnings
import aacgmv2
class TestConvertArray:
def setup(self):
self.out = None
self.ref = None
self.rtol = 1.0e-4
def teardown(self):
del self.out, self.ref, self.rtol
def evaluate_output(self, ind=None):
""" Function used to evaluate convert_latlon_arr output"""
if self.out is not None:
if ind is not None:
self.ref = [[rr[ind]] for rr in self.ref]
np.testing.assert_equal(len(self.out), len(self.ref))
for i, oo in enumerate(self.out):
if not isinstance(oo, np.ndarray):
raise TypeError("output value is not a numpy array")
np.testing.assert_equal(len(oo), len(self.ref[i]))
np.testing.assert_allclose(oo, self.ref[i], rtol=self.rtol)
class TestConvertLatLon:
def setup(self):
"""Runs before every method to create a clean testing setup"""
self.dtime = dt.datetime(2015, 1, 1, 0, 0, 0)
self.ddate = dt.date(2015, 1, 1)
self.in_args = [60, 0]
self.out = None
self.rtol = 1.0e-4
def teardown(self):
"""Runs after every method to clean up previous testing"""
del self.out, self.in_args, self.rtol, self.dtime, self.ddate
@pytest.mark.parametrize('alt,method_code,ref',
[(300, 'TRACE', [58.2268, 81.1613, 1.0457]),
(3000.0, "G2A|BADIDEA", [64.3578, 83.2895,
1.4694]),
(7000.0, "G2A|TRACE|BADIDEA",
[69.3187, 85.0845, 2.0973])])
def test_convert_latlon(self, alt, method_code, ref):
"""Test single value latlon conversion"""
self.in_args.extend([alt, self.dtime, method_code])
self.out = aacgmv2.convert_latlon(*self.in_args)
np.testing.assert_allclose(self.out, ref, rtol=self.rtol)
@pytest.mark.parametrize('lat,ref',
[(90.01, [83.927161, 170.1471396, 1.04481923]),
(-90.01, [-74.9814852, 17.990332, 1.044819236])])
def test_convert_latlon_high_lat(self, lat, ref):
"""Test single latlon conversion with latitude just out of bounds"""
self.in_args[0] = lat
self.in_args.extend([300, self.dtime, 'G2A'])
self.out = aacgmv2.convert_latlon(*self.in_args)
np.testing.assert_allclose(self.out, ref, rtol=self.rtol)
def test_convert_latlon_datetime_date(self):
"""Test single latlon conversion with date and datetime input"""
self.in_args.extend([300, self.ddate, 'TRACE'])
self.out = aacgmv2.convert_latlon(*self.in_args)
np.testing.assert_allclose(self.out, [58.2268, 81.1613, 1.0457],
rtol=self.rtol)
def test_convert_latlon_location_failure(self):
"""Test single value latlon conversion with a bad location"""
self.out = aacgmv2.convert_latlon(0, 0, 0, self.dtime, self.in_args[-1])
assert np.all(np.isnan(np.array(self.out)))
def test_convert_latlon_maxalt_failure(self):
"""test convert_latlon failure for an altitude too high for coeffs"""
self.in_args.extend([2001, self.dtime, ""])
self.out = aacgmv2.convert_latlon(*self.in_args)
assert np.all(np.isnan(np.array(self.out)))
@pytest.mark.parametrize('in_rep,in_irep,msg',
[(None, 3, "must be a datetime object"),
(91, 0, "unrealistic latitude"),
(-91, 0, "unrealistic latitude"),
(None, 4, "unknown method code")])
def test_convert_latlon_failure(self, in_rep, in_irep, msg):
self.in_args.extend([300, self.dtime, "G2A"])
self.in_args[in_irep] = in_rep
with pytest.raises(ValueError, match=msg):
aacgmv2.convert_latlon(*self.in_args)
class TestConvertLatLonArr(TestConvertArray):
def setup(self):
"""Runs before every method to create a clean testing setup"""
self.dtime = dt.datetime(2015, 1, 1, 0, 0, 0)
self.ddate = dt.date(2015, 1, 1)
self.lat_in = [60.0, 61.0]
self.lon_in = [0.0, 0.0]
self.alt_in = [300.0, 300.0]
self.method = 'TRACE'
self.out = None
self.ref = [[58.2268, 59.3184], [81.1613, 81.6080], [1.0457, 1.0456]]
self.rtol = 1.0e-4
def teardown(self):
"""Runs after every method to clean up previous testing"""
del self.lat_in, self.lon_in, self.alt_in, self.dtime, self.ddate
del self.method, self.out, self.ref, self.rtol
def test_convert_latlon_arr_single_val(self):
"""Test array latlon conversion for a single value"""
self.out = aacgmv2.convert_latlon_arr(self.lat_in[0], self.lon_in[0],
self.alt_in[0], self.dtime,
self.method)
self.evaluate_output(ind=0)
def test_convert_latlon_arr_arr_single(self):
"""Test array latlon conversion for array input of shape (1,)"""
self.out = aacgmv2.convert_latlon_arr(np.array([self.lat_in[0]]),
np.array([self.lon_in[0]]),
np.array([self.alt_in[0]]),
self.dtime, self.method)
self.evaluate_output(ind=0)
def test_convert_latlon_arr_list_single(self):
"""Test array latlon conversion for list input of single values"""
self.out = aacgmv2.convert_latlon_arr([self.lat_in[0]],
[self.lon_in[0]],
[self.alt_in[0]], self.dtime,
self.method)
self.evaluate_output(ind=0)
def test_convert_latlon_arr_list(self):
"""Test array latlon conversion for list input"""
self.out = aacgmv2.convert_latlon_arr(self.lat_in, self.lon_in,
self.alt_in, self.dtime,
self.method)
self.evaluate_output()
def test_convert_latlon_arr_arr(self):
"""Test array latlon conversion for array input"""
self.out = aacgmv2.convert_latlon_arr(np.array(self.lat_in),
np.array(self.lon_in),
np.array(self.alt_in),
self.dtime, self.method)
self.evaluate_output()
def test_convert_latlon_arr_list_mix(self):
"""Test array latlon conversion for mixed types with list"""
self.out = aacgmv2.convert_latlon_arr(self.lat_in, self.lon_in[0],
self.alt_in[0], self.dtime,
self.method)
self.evaluate_output()
def test_convert_latlon_arr_arr_mix(self):
"""Test array latlon conversion for mixed type with an array"""
self.out = aacgmv2.convert_latlon_arr(np.array(self.lat_in),
self.lon_in[0], self.alt_in[0],
self.dtime, self.method)
self.evaluate_output()
def test_convert_latlon_arr_arr_mult_and_single_element(self):
"""Test latlon conversion for arrays with multiple and single vals"""
self.out = aacgmv2.convert_latlon_arr(np.array(self.lat_in),
np.array([self.lon_in[0]]),
np.array(self.alt_in),
self.dtime, self.method)
self.evaluate_output()
@pytest.mark.parametrize('method_code,alt,local_ref',
[("BADIDEA", 3000.0,
[[64.3580], [83.2895], [1.4694]]),
("BADIDEA|TRACE", 7000.0,
[[69.3187], [85.0845], [2.0973]])])
def test_convert_latlon_arr_badidea(self, method_code, alt, local_ref):
"""Test array latlon conversion for BADIDEA"""
self.out = aacgmv2.convert_latlon_arr(self.lat_in[0], self.lon_in[0],
[alt], self.dtime, method_code)
self.ref = local_ref
self.evaluate_output()
def test_convert_latlon_arr_location_failure(self):
"""Test array latlon conversion with a bad location"""
with warnings.catch_warnings():
# Causes all warnings to be surpressed
warnings.simplefilter("ignore")
# Trigger a warning
self.out = aacgmv2.convert_latlon_arr([0], [0], [0], self.dtime, "")
# Test the output
np.testing.assert_equal(len(self.out), len(self.ref))
assert np.any(~np.isfinite(np.array(self.out)))
def test_convert_latlon_arr_datetime_date(self):
"""Test array latlon conversion with date and datetime input"""
self.out = aacgmv2.convert_latlon_arr(self.lat_in, self.lon_in,
self.alt_in, self.ddate,
self.method)
self.evaluate_output()
def test_convert_latlon_arr_clip(self):
"""Test array latlon conversion with latitude clipping"""
self.lat_in = [90.01, -90.01]
self.ref = [[83.92352053, -74.98110552], [170.1381271, 17.98164313],
[1.04481924, 1.04481924]]
self.out = aacgmv2.convert_latlon_arr(self.lat_in, self.lon_in,
self.alt_in, self.ddate,
self.method)
self.evaluate_output()
def test_convert_latlon_arr_maxalt_failure(self):
"""test convert_latlon_arr failure for altitudes too high for coeffs"""
self.method = ""
self.out = aacgmv2.convert_latlon_arr(self.lat_in[0], self.lon_in[0],
[2001], self.dtime, self.method)
assert np.all(np.isnan(np.array(self.out)))
@pytest.mark.parametrize('in_rep,in_irep,msg',
[(None, 3, "must be a datetime object"),
([np.full(shape=(3, 2), fill_value=50.0), 0],
[0, 1], "unable to process multi-dimensional"),
([50, 60, 70], 0, "arrays are mismatched"),
([[91, 60, -91], 0, 300], [0, 1, 2],
"unrealistic latitude"),
(None, 4, "unknown method code")])
def test_convert_latlon_arr_failure(self, in_rep, in_irep, msg):
in_args = np.array([self.lat_in, self.lon_in, self.alt_in, self.dtime,
"G2A"], dtype=object)
in_args[in_irep] = in_rep
with pytest.raises(ValueError, match=msg):
aacgmv2.convert_latlon_arr(*in_args)
class TestGetAACGMCoord:
def setup(self):
"""Runs before every method to create a clean testing setup"""
self.dtime = dt.datetime(2015, 1, 1, 0, 0, 0)
self.ddate = dt.date(2015, 1, 1)
self.in_args = [60, 0]
self.out = None
self.rtol = 1.0e-4
def teardown(self):
"""Runs after every method to clean up previous testing"""
del self.out, self.in_args, self.rtol, self.dtime, self.ddate
@pytest.mark.parametrize('alt,method_code,ref',
[(300, 'TRACE', [58.2268, 81.1613, 0.1888]),
(3000.0, "G2A|BADIDEA", [64.3578, 83.2895,
0.3307]),
(7000.0, "G2A|TRACE|BADIDEA",
[69.3187, 85.0845, 0.4503])])
def test_get_aacgm_coord(self, alt, method_code, ref):
"""Test single value AACGMV2 calculation, defaults to TRACE"""
self.in_args.extend([alt, self.dtime, method_code])
self.out = aacgmv2.get_aacgm_coord(*self.in_args)
np.testing.assert_allclose(self.out, ref, rtol=self.rtol)
def test_get_aacgm_coord_datetime_date(self):
"""Test single AACGMV2 calculation with date and datetime input"""
self.in_args.extend([300.0, self.ddate, 'TRACE'])
self.out = aacgmv2.get_aacgm_coord(*self.in_args)
np.testing.assert_allclose(self.out, [58.2268, 81.1613, 0.1888],
rtol=self.rtol)
def test_get_aacgm_coord_location_failure(self):
"""Test single value AACGMV2 calculation with a bad location"""
self.in_args.extend([0.0, self.dtime, 'TRACE'])
self.in_args[0] = 0.0
self.out = aacgmv2.get_aacgm_coord(*self.in_args)
np.all(np.isnan(np.array(self.out)))
def test_get_aacgm_coord_maxalt_failure(self):
"""test get_aacgm_coord failure for an altitude too high for coeffs"""
self.in_args.extend([2001, self.dtime, ""])
self.out = aacgmv2.get_aacgm_coord(*self.in_args)
assert np.all(np.isnan(np.array(self.out)))
@pytest.mark.parametrize('in_index,value',
[(3, None), (0, 91.0), (0, -91.0)])
def test_get_aacgm_coord_raise_value_error(self, in_index, value):
"""Test different ways to raise a ValueError"""
self.in_args.extend([300.0, self.dtime])
self.in_args[in_index] = value
with pytest.raises(ValueError):
self.out = aacgmv2.get_aacgm_coord(*self.in_args)
class TestGetAACGMCoordArr(TestConvertArray):
def setup(self):
"""Runs before every method to create a clean testing setup"""
self.dtime = dt.datetime(2015, 1, 1, 0, 0, 0)
self.ddate = dt.date(2015, 1, 1)
self.lat_in = [60.0, 61.0]
self.lon_in = [0.0, 0.0]
self.alt_in = [300.0, 300.0]
self.method = 'TRACE'
self.out = None
self.ref = [[58.22676, 59.31847], [81.16135, 81.60797],
[0.18880, 0.21857]]
self.rtol = 1.0e-4
def teardown(self):
"""Runs after every method to clean up previous testing"""
del self.out, self.ref, self.lat_in, self.dtime, self.ddate
del self.lon_in, self.alt_in, self.method, self.rtol
def test_get_aacgm_coord_arr_single_val(self):
"""Test array AACGMV2 calculation for a single value"""
self.out = aacgmv2.get_aacgm_coord_arr(self.lat_in[0], self.lon_in[0],
self.alt_in[0], self.dtime,
self.method)
self.evaluate_output(ind=0)
def test_get_aacgm_coord_arr_list_single(self):
"""Test array AACGMV2 calculation for list input of single values"""
self.out = aacgmv2.get_aacgm_coord_arr([self.lat_in[0]],
[self.lon_in[0]],
[self.alt_in[0]], self.dtime,
self.method)
self.evaluate_output(ind=0)
def test_get_aacgm_coord_arr_arr_single(self):
"""Test array AACGMV2 calculation for array with a single value"""
self.out = aacgmv2.get_aacgm_coord_arr(np.array([self.lat_in[0]]),
np.array([self.lon_in[0]]),
np.array([self.alt_in[0]]),
self.dtime, self.method)
self.evaluate_output(ind=0)
def test_get_aacgm_coord_arr_list(self):
"""Test array AACGMV2 calculation for list input"""
self.out = aacgmv2.get_aacgm_coord_arr(self.lat_in, self.lon_in,
self.alt_in, self.dtime,
self.method)
self.evaluate_output()
def test_get_aacgm_coord_arr_arr(self):
"""Test array AACGMV2 calculation for an array"""
self.out = aacgmv2.get_aacgm_coord_arr(np.array(self.lat_in),
np.array(self.lon_in),
np.array(self.alt_in),
self.dtime, self.method)
self.evaluate_output()
def test_get_aacgm_coord_arr_list_mix(self):
"""Test array AACGMV2 calculation for a list and floats"""
self.out = aacgmv2.get_aacgm_coord_arr(self.lat_in, self.lon_in[0],
self.alt_in[0], self.dtime,
self.method)
self.evaluate_output()
def test_get_aacgm_coord_arr_arr_mix(self):
"""Test array AACGMV2 calculation for an array and floats"""
self.out = aacgmv2.get_aacgm_coord_arr(np.array(self.lat_in),
self.lon_in[0], self.alt_in[0],
self.dtime, self.method)
self.evaluate_output()
def test_get_aacgm_coord_arr_badidea(self):
"""Test array AACGMV2 calculation for BADIDEA"""
self.method = "|".join([self.method, "BADIDEA"])
self.out = aacgmv2.get_aacgm_coord_arr(self.lat_in[0], self.lon_in[0],
[3000.0], self.dtime,
self.method)
self.ref = [[64.3481], [83.2885], [0.3306]]
self.evaluate_output()
def test_get_aacgm_coord_arr_location_failure(self):
"""Test array AACGMV2 calculation with a bad location"""
self.out = aacgmv2.get_aacgm_coord_arr([0], [0], [0], self.dtime,
self.method)
np.testing.assert_equal(len(self.out), len(self.ref))
assert [isinstance(oo, np.ndarray) and len(oo) == 1 for oo in self.out]
assert np.any([np.isnan(oo) for oo in self.out])
def test_get_aacgm_coord_arr_mult_failure(self):
"""Test aacgm_coord_arr failure with multi-dim array input"""
with pytest.raises(ValueError):
(self.mlat_out, self.mlon_out,
self.mlt_out) = aacgmv2.get_aacgm_coord_arr(
np.array([[60, 61, 62], [63, 64, 65]]), 0, 300, self.dtime)
def test_get_aacgm_coord_arr_time_failure(self):
"""Test array AACGMV2 calculation with a bad time"""
with pytest.raises(ValueError):
aacgmv2.get_aacgm_coord_arr(self.lat_in, self.lon_in, self.alt_in,
None, self.method)
def test_get_aacgm_coord_arr_mlat_failure(self):
"""Test error return for co-latitudes above 90 for an array"""
self.lat_in = [91, 60, -91]
with pytest.raises(ValueError):
self.out = aacgmv2.get_aacgm_coord_arr(self.lat_in, self.lon_in[0],
self.alt_in[0], self.dtime,
self.method)
def test_get_aacgm_coord_arr_datetime_date(self):
"""Test array AACGMV2 calculation with date and datetime input"""
self.out = aacgmv2.get_aacgm_coord_arr(self.lat_in, self.lon_in,
self.alt_in, self.ddate,
self.method)
self.ref = aacgmv2.get_aacgm_coord_arr(self.lat_in, self.lon_in,
self.alt_in, self.dtime,
self.method)
self.evaluate_output()
def test_get_aacgm_coord_arr_maxalt_failure(self):
"""test aacgm_coord_arr failure for an altitude too high for coeff"""
self.method = ""
self.alt_in = [2001 for ll in self.lat_in]
self.out = aacgmv2.get_aacgm_coord_arr(self.lat_in, self.lon_in,
self.alt_in, self.dtime,
self.method)
np.testing.assert_equal(len(self.out), len(self.ref))
assert [isinstance(oo, np.ndarray) and len(oo) == len(self.lat_in)
for oo in self.out]
assert np.all(np.isnan(np.array(self.out)))
class TestConvertCode:
def setup(self):
self.c_method_code = None
self.ref_code = None
self.out = None
def teardown(self):
del self.c_method_code, self.ref_code, self.out
def set_c_code(self):
""" Utility test to get desired C method code"""
if self.ref_code is not None:
self.ref_code = self.ref_code.upper()
self.c_method_code = getattr(aacgmv2._aacgmv2, self.ref_code)
def set_bad_c_code(self):
""" Test failure to get bad code name"""
self.ref_code = "not_a_valid_code"
with pytest.raises(AttributeError):
self.set_c_code()
@pytest.mark.parametrize('method_code',
[('G2A'), ('A2G'), ('TRACE'), ('ALLOWTRACE'),
('BADIDEA'), ('GEOCENTRIC'), ('g2a')])
def test_standard_convert_str_to_bit(self, method_code):
"""Test conversion from string code to bit for standard cases"""
self.ref_code = method_code
self.set_c_code()
self.out = aacgmv2.convert_str_to_bit(method_code)
np.testing.assert_equal(self.out, self.c_method_code)
@pytest.mark.parametrize('str_code,bit_ref',
[("G2A | trace",
aacgmv2._aacgmv2.G2A + aacgmv2._aacgmv2.TRACE),
("ggoogg|", aacgmv2._aacgmv2.G2A)])
def test_non_standard_convert_str_to_bit(self, str_code, bit_ref):
"""Test conversion from string code to bit for non-standard cases"""
self.out = aacgmv2.convert_str_to_bit(str_code)
np.testing.assert_equal(self.out, bit_ref)
@pytest.mark.parametrize('bool_dict,method_code',
[({}, 'G2A'), ({'a2g': True}, 'A2G'),
({'trace': True}, 'TRACE'),
({'allowtrace': True}, 'ALLOWTRACE'),
({'badidea': True}, 'BADIDEA'),
({'geocentric': True}, 'GEOCENTRIC')])
def test_convert_bool_to_bit(self, bool_dict, method_code):
"""Test conversion from Boolean code to bit"""
self.ref_code = method_code
self.set_c_code()
self.out = aacgmv2.convert_bool_to_bit(**bool_dict)
| np.testing.assert_equal(self.out, self.c_method_code) | numpy.testing.assert_equal |
import numpy as np
from . import *
class Bystander(Participant):
""" A bystander (crowd participant) in the bodyguard environment, performing a movement that involves visiting random landmarks. If the bystander is near a bodyguard, it stops...
"""
def __init__(self, scenario):
super().__init__(scenario)
self.action_callback = self.theaction
self.color = np.array([0.8, 0.0, 0.0]) # red
self.state.p_pos = np.random.uniform(-1,+1, scenario.world.dim_p)
self.state.p_vel = np.zeros(scenario.world.dim_p)
self.goal_a = None
self.wait_count = 0
def reset(self):
super(Bystander, self).reset()
self.goal_a=None
def theaction(self, agent, world):
""" The behavior of the bystanders. Implemented as callback function
"""
# If the agent finds itself out of range, jump to a random new location
if self.out_of_bounds():
self.reset()
bystander_action = Action()
# The bystanders freeze if they are near a bodyguard or have no goal
if self.near_bodyguard(agent, world) or not self.goal_a:
bystander_action.u = np.zeros(world.dim_p)
self.wait_count += 1
if self.wait_count > 50:
agent.goal_a = self.nearest_landmark(world)
relative_position = (agent.goal_a.state.p_pos - agent.state.p_pos)
bystander_action.u = (relative_position/np.linalg.norm(relative_position))
self.wait_count = 0
return bystander_action
# If the agent reached its goal, picks a new goal randomly from the landmarks
if self.reached_goal():
agent.goal_a = np.random.choice(world.landmarks)
# otherwise, move towards the landmark
relative_position = (agent.goal_a.state.p_pos - agent.state.p_pos)
bystander_action.u = (relative_position/np.linalg.norm(relative_position)) * self.step_size
return bystander_action
def near_bodyguard(self, agent, world):
bodyguard_p_pos = np.asarray([bodyguard.state.p_pos for bodyguard in self.scenario.bodyguards])
distance_between_all_bodyguards = np.linalg.norm(bodyguard_p_pos-agent.state.p_pos, axis=1)
return np.any(0.3 > distance_between_all_bodyguards)
def nearest_landmark(self, world):
landmark_p_pos = np.array([landmark.state.p_pos for landmark in world.landmarks])
idx = np.linalg.norm(landmark_p_pos-self.state.p_pos, axis=1).argsort()[0]
return world.landmarks[idx]
class StreetBystander(Bystander):
""" A bystander (crowd participant) in the bodyguard environment, performing Vicsek Particle Motion. If the bystander is near a bodyguard, it stops...
"""
def __init__(self, scenario):
super().__init__(scenario)
self.action_callback = self.theaction
self.theta = np.random.uniform(-np.pi,np.pi)
self.noise = np.random.rand()
def reset(self):
""" Reset the states of an agent """
self.state.p_vel = np.random.uniform(-.5, .5, self.scenario.world.dim_p)
self.theta=np.random.uniform(-np.pi,np.pi)
def theaction(self, agent, world):
""" The behavior of the bystanders. Implemented as callback function
"""
#print("bystander action")
# If the agent finds itself out of range, jump to a random new location
bystander_action = Action()
#The bystanders freeze if they are near a bodyguard
if self.near_bodyguard(agent, world) or self.out_of_bounds():
bystander_action.u = np.array([-0.2, -0.2])
return bystander_action
# otherwise, move towards the landmark
relative_position= (self.vicsek_step() - agent.state.p_pos)
bystander_action.u = (relative_position/np.linalg.norm(relative_position))
return bystander_action
def near_bodyguard(self, agent, world):
bodyguard_p_pos = np.asarray([bodyguard.state.p_pos for bodyguard in self.scenario.bodyguards])
distance_between_all_bodyguards = np.linalg.norm(bodyguard_p_pos-agent.state.p_pos, axis=1)
return np.any(0.1 > distance_between_all_bodyguards)
def vicsek_step(self):
noise_increments = (self.noise - 0.5)
bystander_p_pos = np.asarray([bystander.state.p_pos for bystander in self.scenario.bystanders])
distance_between_all_crowd = np.linalg.norm(bystander_p_pos-self.state.p_pos, axis=1)
np.nan_to_num(distance_between_all_crowd, False)
near_range_bystanders = np.where((distance_between_all_crowd > 0) & (distance_between_all_crowd <=1.5))[0].tolist()
near_angles = [self.scenario.bystanders[idx].theta for idx in near_range_bystanders]
near_angles = np.array(near_angles)
mean_directions = np.arctan2(np.mean(np.sin(near_angles)), np.mean(np.cos(near_angles)))
self.theta = mean_directions + noise_increments
vel = np.multiply([np.cos(self.theta), | np.sin(self.theta) | numpy.sin |
import os
import json
import torch
import numpy as np
from torch.utils.data import Dataset
import pandas as pd
import argparse
import copy
from collections import Counter
from nltk.tokenize import TweetTokenizer
# import gensim
class _YELP_RESTAURANT(Dataset):
def __init__(self, args, vocab_obj, df, item_boa_dict, user_boa_dict):
super().__init__()
self.m_data_dir = args.data_dir
self.m_max_seq_len = args.max_seq_length
self.m_batch_size = args.batch_size
# self.m_vocab_file = "amazon_vocab.json"
self.m_max_line = 1e10
self.m_sos_id = vocab_obj.sos_idx
self.m_eos_id = vocab_obj.eos_idx
self.m_pad_id = vocab_obj.pad_idx
self.m_vocab_size = vocab_obj.vocab_size
self.m_vocab = vocab_obj
self.m_sample_num = len(df)
print("sample num", self.m_sample_num)
self.m_batch_num = int(self.m_sample_num/self.m_batch_size)
print("batch num", self.m_batch_num)
if (self.m_sample_num/self.m_batch_size - self.m_batch_num) > 0:
self.m_batch_num += 1
###get length
self.m_input_batch_list = []
self.m_input_freq_batch_list = []
self.m_input_length_batch_list = []
self.m_user_batch_list = []
self.m_item_batch_list = []
self.m_target_batch_list = []
self.m_target_length_batch_list = []
self.m_user2uid = {}
self.m_item2iid = {}
userid_list = df.userid.tolist()
itemid_list = df.itemid.tolist()
# review_list = df.review.tolist()
# tokens_list = df.token_idxs.tolist()
attr_list = df.attr.tolist()
for sample_index in range(self.m_sample_num):
# for sample_index in range(1000):
user_id = userid_list[sample_index]
item_id = itemid_list[sample_index]
attrlist_i = attr_list[sample_index]
# item_boa = item_boa_dict[str(item_id)]
# input_boa = item_boa
# input_boa_freq = [0 for i in range(len(item_boa))]
# item_boa_boafreq = item_boa_dict[str(item_id)]
# item_boa = item_boa_boafreq[0]
# item_freq = item_boa_boafreq[1]
item_attrdict_i = item_boa_dict[str(item_id)]
item_attr_list_i = list(item_attrdict_i.keys())
item_attrfreq_list_i = list(item_attrdict_i.values())
"""
scale the item freq into the range [0, 1]
"""
def max_min_scale(val_list):
vals = np.array(val_list)
min_val = min(vals)
max_val = max(vals)
if max_val == min_val:
scale_vals = np.zeros_like(vals)
# print("scale_vals", scale_vals)
else:
scale_vals = (vals-min_val)/(max_val-min_val)
scale_vals = scale_vals+1.0
scale_val_list = list(scale_vals)
# if max_val-min_val == 0:
# print("--"*20)
# print("error max_val-min_val", max_val, min_val)
# print(item_id, val_list)
return scale_val_list
item_freq = max_min_scale(item_attrfreq_list_i)
input_boa = item_attr_list_i
input_boa_freq = item_freq
# target_boa = boa
target_boa = attrlist_i
input_len = len(input_boa)
target_len = len(target_boa)
self.m_input_batch_list.append(input_boa)
self.m_input_freq_batch_list.append(input_boa_freq)
self.m_target_batch_list.append(target_boa)
# uid = self.m_user2uid[user_id]
self.m_user_batch_list.append(user_id)
# iid = self.m_item2iid[item_id]
self.m_item_batch_list.append(item_id)
self.m_input_length_batch_list.append(input_len)
self.m_target_length_batch_list.append(target_len)
# exit()
print("... load train data ...", len(self.m_item_batch_list), len(self.m_user_batch_list), len(self.m_input_batch_list), len(self.m_target_batch_list), len(self.m_input_length_batch_list), len(self.m_target_length_batch_list))
# exit()
def __len__(self):
return len(self.m_input_batch_list)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
i = idx
input_i = self.m_input_batch_list[i]
input_freq_i = self.m_input_freq_batch_list[i]
input_length_i = self.m_input_length_batch_list[i]
user_i = self.m_user_batch_list[i]
item_i = self.m_item_batch_list[i]
target_i = self.m_target_batch_list[i]
target_length_i = self.m_target_length_batch_list[i]
return input_i, input_freq_i, input_length_i, user_i, item_i, target_i, target_length_i, self.m_pad_id, self.m_vocab_size
@staticmethod
def collate(batch):
batch_size = len(batch)
input_iter = []
input_freq_iter = []
input_length_iter = []
user_iter = []
item_iter = []
target_iter = []
target_length_iter = []
for i in range(batch_size):
sample_i = batch[i]
input_length_i = sample_i[2]
input_length_iter.append(input_length_i)
target_length_i = sample_i[6]
target_length_iter.append(target_length_i)
max_input_length_iter = max(input_length_iter)
max_target_length_iter = max(target_length_iter)
user_iter = []
item_iter = []
# freq_pad_id = float('-inf')
freq_pad_id = float(0)
for i in range(batch_size):
sample_i = batch[i]
input_i = copy.deepcopy(sample_i[0])
input_i = [int(i) for i in input_i]
input_freq_i = copy.deepcopy(sample_i[1])
input_length_i = sample_i[2]
# if input_i is None:
# print("error input is none", sample_i[0])
# print(input_i)
# print(len(input_i))
pad_id = sample_i[7]
vocab_size = sample_i[8]
input_i.extend([pad_id]*(max_input_length_iter-input_length_i))
input_iter.append(input_i)
input_freq_i.extend([freq_pad_id]*(max_input_length_iter-input_length_i))
input_freq_iter.append(input_freq_i)
user_i = sample_i[3]
user_iter.append(user_i)
item_i = sample_i[4]
item_iter.append(item_i)
# target_i = copy.deepcopy(sample_i[4])
# target_length_i = sample_i[5]
# target_i.extend([pad_id]*(max_target_length_iter-target_length_i))
# target_iter.append(target_i)
target_index_i = copy.deepcopy(sample_i[5])
target_i = np.zeros(vocab_size)
target_i[np.array(target_index_i, int)] = 1
# print("input_i", input_i)
# print("target_i", target_i)
target_i = target_i[input_i]
target_iter.append(target_i)
# exit()
# print("input_iter", input_iter)
input_iter_tensor = torch.from_numpy(np.array(input_iter)).long()
input_freq_iter_tensor = torch.from_numpy(np.array(input_freq_iter)).float()
input_length_iter_tensor = torch.from_numpy(np.array(input_length_iter)).long()
user_iter_tensor = torch.from_numpy(np.array(user_iter)).long()
item_iter_tensor = torch.from_numpy( | np.array(item_iter) | numpy.array |
# -*- coding: utf-8 -*-
from __future__ import division
from collections import namedtuple
import numpy as np
import torch
import time
Transition = namedtuple('Transition', ('timestep', 'state', 'action', 'reward', 'nonterminal'))
# blank_trans = Transition(0, torch.zeros(84, 84, dtype=torch.uint8), None, 0, False)
# Segment tree data structure where parent node values are sum/max of children node values
class SegmentTree():
def __init__(self, size):
self.index = 0
self.size = size
self.full = False # Used to track actual capacity
self.sum_tree = np.zeros((2 * size - 1, ), dtype=np.float32) # Initialise fixed size tree with all (priority) zeros
self.data = np.array([None] * size) # Wrap-around cyclic buffer
self.max = 1 # Initial max value to return (1 = 1^ω)
# Propagates value up tree given a tree index
def _propagate(self, index, value):
parent = (index - 1) // 2
left, right = 2 * parent + 1, 2 * parent + 2
self.sum_tree[parent] = self.sum_tree[left] + self.sum_tree[right]
if parent != 0:
self._propagate(parent, value)
# Updates value given a tree index
def update(self, index, value):
self.sum_tree[index] = value # Set new value
self._propagate(index, value) # Propagate value
self.max = max(value, self.max)
def append(self, data, value):
self.data[self.index] = data # Store data in underlying data structure
self.update(self.index + self.size - 1, value) # Update tree
self.index = (self.index + 1) % self.size # Update index
self.full = self.full or self.index == 0 # Save when capacity reached
self.max = max(value, self.max)
# Searches for the location of a value in sum tree
def _retrieve(self, index, value):
left, right = 2 * index + 1, 2 * index + 2
if left >= len(self.sum_tree):
return index
elif value <= self.sum_tree[left]:
return self._retrieve(left, value)
else:
return self._retrieve(right, value - self.sum_tree[left])
def find_parallel(self, value):
if isinstance(value, float):
value = np.array([value])
# debug_value = value.copy()
assert 0 <= np.min(value)
assert np.max(value) <= self.sum_tree[0] + 1e-5
assert isinstance(value[0], float)
idx = np.zeros(len(value), dtype=int)
cont = np.ones(len(value), dtype=bool)
while np.any(cont):
idx[cont] = 2 * idx[cont] + 1
value_new = np.where(self.sum_tree[idx] <= value, value - self.sum_tree[idx], value)
idx = np.where(np.logical_or(self.sum_tree[idx] > value, np.logical_not(cont)), idx, idx + 1)
value = value_new
cont = idx < self.size - 1
index = idx
data_index = index - self.size + 1
# For debugging
'''
for i in range(debug_value.shape[0]):
debug_result = self.find(debug_value[i])
assert abs(debug_result[0] - self.sum_tree[index][i])<1e-5
assert abs(debug_result[1] - data_index[i]) < 1e-5
assert abs(debug_result[2] - index[i]) < 1e-5
'''
if len(data_index) == 1:
index = index[0]
data_index = data_index[0]
# print(debug_result, self.sum_tree[index], data_index, index)
return (self.sum_tree[index], data_index, index)
# Searches for a value in sum tree and returns value, data index and tree index
def find(self, value):
index = self._retrieve(0, value) # Search for index of item from root
data_index = index - self.size + 1
return (self.sum_tree[index], data_index, index) # Return value, data index, tree index
# Returns data given a data index
def get(self, data_index):
return self.data[data_index % self.size]
def total(self):
return self.sum_tree[0]
class ReplayMemory():
def __init__(self, args, capacity):
if args.env_type == 'atari':
self.blank_trans = Transition(0, torch.zeros(84, 84, dtype=torch.uint8), None, 0, False)
self.state_int = True
elif args.env_type == 'sepsis':
self.blank_trans = Transition(0, torch.zeros(46, 1, 1, dtype=torch.float32), None, 0, False)
self.state_int = False
elif args.env_type == 'hiv':
self.blank_trans = Transition(0, torch.zeros(6, 1, dtype=torch.float32), None, 0, False)
self.state_int = False
else:
self.blank_trans = Transition(0, torch.zeros(args.state_dim, 1, dtype=torch.float32), None, 0, False)
self.state_int = False
self.device = args.device
self.capacity = capacity
self.history = args.history_length
self.discount = args.discount
self.n = args.multi_step
self.priority_weight = args.priority_weight # Initial importance sampling weight β, annealed to 1 over course of training
self.priority_exponent = args.priority_exponent
self.t = 0 # Internal episode timestep counter
self.transitions = SegmentTree(capacity) # Store transitions in a wrap-around cyclic buffer within a sum tree for querying priorities
# Adds state and action at time t, reward and terminal at time t + 1
def append(self, state, action, reward, terminal):
if self.state_int:
state = state[-1].mul(255).to(dtype=torch.uint8, device=torch.device('cpu')) # Only store last frame and discretise to save memory
else:
state = state[-1].mul(255).to(dtype=torch.float32, device=torch.device('cpu'))
self.transitions.append(Transition(self.t, state, action, reward, not terminal), self.transitions.max) # Store new transition with maximum priority
self.t = 0 if terminal else self.t + 1 # Start new episodes with t = 0
# Returns a transition with blank states where appropriate
def _get_transition(self, idx):
transition = np.array([None] * (self.history + self.n))
transition[self.history - 1] = self.transitions.get(idx)
for t in range(self.history - 2, -1, -1): # e.g. 2 1 0
if transition[t + 1].timestep == 0:
transition[t] = self.blank_trans # If future frame has timestep 0
else:
transition[t] = self.transitions.get(idx - self.history + 1 + t)
for t in range(self.history, self.history + self.n): # e.g. 4 5 6
if transition[t - 1].nonterminal:
transition[t] = self.transitions.get(idx - self.history + 1 + t)
else:
transition[t] = self.blank_trans # If prev (next) frame is terminal
return transition
# Returns a valid sample from a segment
def _get_sample_from_segment(self, segment, i):
valid = False
num_fail = 0
while not valid:
if num_fail < 10:
sample = np.random.uniform(i * segment, (i + 1) * segment) # Uniformly sample an element from within a segment
else:
p_total = self.transitions.total()
sample = np.random.uniform(1, p_total)
prob, idx, tree_idx = self.transitions.find(sample) # Retrieve sample from tree with un-normalised probability
# Resample if transition straddled current index or probablity 0
if (self.transitions.index - idx) % self.capacity > self.n and (idx - self.transitions.index) % self.capacity >= self.history and prob != 0:
valid = True # Note that conditions are valid but extra conservative around buffer index 0
else:
num_fail += 1
# Retrieve all required transition data (from t - h to t + n)
transition = self._get_transition(idx)
# Create un-discretised state and nth next state
state = torch.stack([trans.state for trans in transition[:self.history]]).to(device=self.device).to(dtype=torch.float32).div_(255)
next_state = torch.stack([trans.state for trans in transition[self.n:self.n + self.history]]).to(device=self.device).to(dtype=torch.float32).div_(255)
# Discrete action to be used as index
action = torch.tensor([transition[self.history - 1].action], dtype=torch.int64, device=self.device)
# Calculate truncated n-step discounted return R^n = Σ_k=0->n-1 (γ^k)R_t+k+1 (note that invalid nth next states have reward 0)
R = torch.tensor([sum(self.discount ** n * transition[self.history + n - 1].reward for n in range(self.n))], dtype=torch.float32, device=self.device)
# Mask for non-terminal nth next states
nonterminal = torch.tensor([transition[self.history + self.n - 1].nonterminal], dtype=torch.float32, device=self.device)
return prob, idx, tree_idx, state, action, R, next_state, nonterminal
def _get_sample_from_idx(self, idx):
tree_idx = (idx % self.transitions.size) + self.transitions.size - 1
# Retrieve all required transition data (from t - h to t + n)
transition = self._get_transition(idx)
# Create un-discretised state and nth next state
state = torch.stack([trans.state for trans in transition[:self.history]]).to(device=self.device).to(dtype=torch.float32).div_(255)
next_state = torch.stack([trans.state for trans in transition[self.n:self.n + self.history]]).to(device=self.device).to(dtype=torch.float32).div_(255)
# Discrete action to be used as index
action = torch.tensor([transition[self.history - 1].action], dtype=torch.int64, device=self.device)
# Calculate truncated n-step discounted return R^n = Σ_k=0->n-1 (γ^k)R_t+k+1 (note that invalid nth next states have reward 0)
R = torch.tensor([sum(self.discount ** n * transition[self.history + n - 1].reward for n in range(self.n))], dtype=torch.float32, device=self.device)
# Mask for non-terminal nth next states
nonterminal = torch.tensor([transition[self.history + self.n - 1].nonterminal], dtype=torch.float32, device=self.device)
return idx, tree_idx, state, action, R, next_state, nonterminal
def sample(self, batch_size):
p_total = self.transitions.total() # Retrieve sum of all priorities (used to create a normalised probability distribution)
segment = p_total / batch_size # Batch size number of segments, based on sum over all probabilities
batch = [self._get_sample_from_segment(segment, i) for i in range(batch_size)] # Get batch of valid samples
probs, idxs, tree_idxs, states, actions, returns, next_states, nonterminals = zip(*batch)
states, next_states, = torch.stack(states), torch.stack(next_states)
actions, returns, nonterminals = torch.cat(actions), torch.cat(returns), torch.stack(nonterminals)
probs = np.array(probs, dtype=np.float32) / p_total # Calculate normalised probabilities
capacity = self.capacity if self.transitions.full else self.transitions.index
weights = (capacity * probs) ** -self.priority_weight # Compute importance-sampling weights w
weights = torch.tensor(weights / weights.max(), dtype=torch.float32, device=self.device) # Normalise by max importance-sampling weight from batch
return tree_idxs, states, actions, returns, next_states, nonterminals, weights
def sample2(self, batch_size):
# import time
# start_time = time.time()
idxs, probs = self._sample_proportional(batch_size)
# print("batch size", batch_size, "sample tree time", time.time() - start_time)
p_total = self.transitions.total()
probs = np.array(probs, dtype=np.float32) / p_total # Calculate normalised probabilities
capacity = self.capacity if self.transitions.full else self.transitions.index
weights = (capacity * probs) ** -self.priority_weight # Compute importance-sampling weights w
weights = torch.tensor(weights / weights.max(), dtype=torch.float32,
device=self.device) # Normalise by max importance-sampling weight from batch
# start_time = time.time()
encoded_sample = self._encode_sample(idxs)
# print("batch size", batch_size, "encode sample time", time.time() - start_time)
# encoded_sample = self._sample_from_idxs(idxs)[:-1]
return tuple(list(encoded_sample) + [weights,])
# def _sample_proportional(self, batch_size):
# p_total = self.transitions.total()
# prefixs = np.random.uniform(size=batch_size) * p_total
# idxs = []
# probs = []
# for i in range(batch_size):
# prob, idx, _ = self.transitions.find(prefixs[i])
# idxs.append(idx)
# probs.append(prob)
# return idxs, probs
def _sample_proportional(self, batch_size):
idxs, probs = [], []
p_total = self.transitions.total()
segment = p_total / batch_size
samples = np.random.uniform(0, segment, size=batch_size) + np.arange(batch_size) * segment
probs, idxs, tree_idxs = self.transitions.find_parallel(samples)
valid_idx = np.logical_and(np.logical_and((self.transitions.index - idxs) % self.capacity > self.n,
(idxs - self.transitions.index) % self.capacity >= self.history), probs != 0)
# print(idxs[:10], self.transitions.index, self.capacity, self.n, self.history)
# print('original idx number', len(idxs), idxs)
# print('valid idx number', np.sum(valid_idx), valid_idx.shape)
probs = probs[valid_idx]
idxs = idxs[valid_idx]
if | np.sum(valid_idx) | numpy.sum |
import warnings
import numpy as np
from scipy.special import factorial
from .pyramid import SteerablePyramidBase
from .c.wrapper import pointOp
from ..tools.utils import rcosFn
class SteerablePyramidFreq(SteerablePyramidBase):
"""Steerable frequency pyramid.
Construct a steerable pyramid on matrix IM, in the Fourier domain.
This is similar to Spyr, except that:
+ Reconstruction is exact (within floating point errors)
+ It can produce any number of orientation bands.
- Typically slower, especially for non-power-of-two sizes.
- Boundary-handling is circular.
The squared radial functions tile the Fourier plane with a raised-cosine
falloff. Angular functions are cos(theta- k*pi/order+1)^(order).
Notes
-----
Transform described in [1]_, filter kernel design described in [2]_.
Parameters
----------
image : `array_like`
2d image upon which to construct to the pyramid.
height : 'auto' or `int`.
The height of the pyramid. If 'auto', will automatically determine based on the size of
`image`.
order : `int`.
The Gaussian derivative order used for the steerable filters. Default value is 3.
Note that to achieve steerability the minimum number of orientation is `order` + 1,
and is used here. To get more orientations at the same order, use the method `steer_coeffs`
twidth : `int`
The width of the transition region of the radial lowpass function, in octaves
is_complex : `bool`
Whether the pyramid coefficients should be complex or not. If True, the real and imaginary
parts correspond to a pair of even and odd symmetric filters. If False, the coefficients
only include the real part / even symmetric filter.
Attributes
----------
image : `array_like`
The input image used to construct the pyramid.
image_size : `tuple`
The size of the input image.
pyr_type : `str` or `None`
Human-readable string specifying the type of pyramid. For base class, is None.
pyr_coeffs : `dict`
Dictionary containing the coefficients of the pyramid. Keys are `(level, band)` tuples and
values are 1d or 2d numpy arrays (same number of dimensions as the input image)
pyr_size : `dict`
Dictionary containing the sizes of the pyramid coefficients. Keys are `(level, band)`
tuples and values are tuples.
is_complex : `bool`
Whether the coefficients are complex- or real-valued.
References
----------
.. [1] <NAME> and <NAME>, "The Steerable Pyramid: A Flexible Architecture for
Multi-Scale Derivative Computation," Second Int'l Conf on Image Processing, Washington, DC,
Oct 1995.
.. [2] <NAME> and <NAME>, "A Filter Design Technique for Steerable Pyramid
Image Transforms", ICASSP, Atlanta, GA, May 1996.
"""
def __init__(self, image, height='auto', order=3, twidth=1, is_complex=False):
# in the Fourier domain, there's only one choice for how do edge-handling: circular. to
# emphasize that thisisn'ta choice, we use None here.
super().__init__(image=image, edge_type=None)
self.pyr_type = 'SteerableFrequency'
self.is_complex = is_complex
# SteerablePyramidFreq doesn't have filters, they're constructed in the frequency space
self.filters = {}
self.order = int(order)
# we can't use the base class's _set_num_scales method because the max height is calculated
# slightly differently
max_ht = np.floor(np.log2(min(self.image.shape))) - 2
if height == 'auto' or height is None:
self.num_scales = int(max_ht)
elif height > max_ht:
raise Exception("Cannot build pyramid higher than %d levels." % (max_ht))
else:
self.num_scales = int(height)
if self.order > 15 or self.order < 0:
raise Exception("order must be an integer in the range [0,15]. Truncating.")
self.num_orientations = int(order + 1)
if twidth <= 0:
warnings.warn("twidth must be positive. Setting to 1.")
twidth = 1
twidth = int(twidth)
dims = np.array(self.image.shape)
ctr = np.ceil((np.array(dims)+0.5)/2).astype(int)
(xramp, yramp) = np.meshgrid(np.linspace(-1, 1, dims[1]+1)[:-1],
np.linspace(-1, 1, dims[0]+1)[:-1])
angle = np.arctan2(yramp, xramp)
log_rad = np.sqrt(xramp**2 + yramp**2)
log_rad[ctr[0]-1, ctr[1]-1] = log_rad[ctr[0]-1, ctr[1]-2]
log_rad = np.log2(log_rad)
# Radial transition function (a raised cosine in log-frequency):
(Xrcos, Yrcos) = rcosFn(twidth, (-twidth/2.0), np.array([0, 1]))
Yrcos = np.sqrt(Yrcos)
YIrcos = np.sqrt(1.0 - Yrcos**2)
lo0mask = pointOp(log_rad, YIrcos, Xrcos[0], Xrcos[1]-Xrcos[0])
self._lo0mask = lo0mask
imdft = np.fft.fftshift(np.fft.fft2(self.image))
hi0mask = pointOp(log_rad, Yrcos, Xrcos[0], Xrcos[1]-Xrcos[0])
self._hi0mask = hi0mask
hi0dft = imdft * hi0mask.reshape(imdft.shape[0], imdft.shape[1])
hi0 = np.fft.ifft2(np.fft.ifftshift(hi0dft))
self.pyr_coeffs['residual_highpass'] = np.real(hi0)
self.pyr_size['residual_highpass'] = hi0.shape
lo0mask = lo0mask.reshape(imdft.shape[0], imdft.shape[1])
lodft = imdft * lo0mask
self._anglemasks = []
self._himasks = []
self._lomasks = []
for i in range(self.num_scales):
Xrcos -= np.log2(2)
lutsize = 1024
Xcosn = np.pi * np.arange(-(2*lutsize+1), (lutsize+2)) / lutsize
const = (2**(2*self.order))*(factorial(self.order, exact=True)**2)/ float(self.num_orientations*factorial(2*self.order, exact=True))
if self.is_complex:
# TODO clean that up and give comments
alfa = ((np.pi+Xcosn) % (2.0*np.pi)) - np.pi
Ycosn = (2.0 * np.sqrt(const) * (np.cos(Xcosn) ** self.order) *
(np.abs(alfa) < np.pi/2.0).astype(int))
else:
Ycosn = np.sqrt(const) * (np.cos(Xcosn))**self.order
log_rad_test = np.reshape(log_rad, (1, log_rad.shape[0] * log_rad.shape[1]))
himask = pointOp(log_rad_test, Yrcos, Xrcos[0], Xrcos[1]-Xrcos[0])
himask = himask.reshape((lodft.shape[0], lodft.shape[1]))
self._himasks.append(himask)
anglemasks = []
for b in range(self.num_orientations):
angle_tmp = np.reshape(angle, (1, angle.shape[0] * angle.shape[1]))
anglemask = pointOp(angle_tmp, Ycosn, Xcosn[0]+np.pi*b/self.num_orientations,
Xcosn[1]-Xcosn[0])
anglemask = anglemask.reshape(lodft.shape[0], lodft.shape[1])
anglemasks.append(anglemask)
# that (-1j)**order term in the beginning will be 1, -j, -1, j for order 0, 1, 2,
# 3, and will then loop again
banddft = (-1j) ** self.order * lodft * anglemask * himask
band = np.fft.ifft2(np.fft.ifftshift(banddft))
if not self.is_complex:
self.pyr_coeffs[(i, b)] = np.real(band.copy())
else:
self.pyr_coeffs[(i, b)] = band.copy()
self.pyr_size[(i, b)] = band.shape
self._anglemasks.append(anglemasks)
dims = np.array(lodft.shape)
ctr = np.ceil((dims+0.5)/2).astype(int)
lodims = np.ceil((dims-0.5)/2).astype(int)
loctr = np.ceil((lodims+0.5)/2).astype(int)
lostart = ctr - loctr
loend = lostart + lodims
log_rad = log_rad[lostart[0]:loend[0], lostart[1]:loend[1]]
angle = angle[lostart[0]:loend[0], lostart[1]:loend[1]]
lodft = lodft[lostart[0]:loend[0], lostart[1]:loend[1]]
YIrcos = np.abs(np.sqrt(1.0 - Yrcos**2))
log_rad_tmp = np.reshape(log_rad, (1, log_rad.shape[0] * log_rad.shape[1]))
lomask = pointOp(log_rad_tmp, YIrcos, Xrcos[0], Xrcos[1]-Xrcos[0])
lomask = lomask.reshape(lodft.shape[0], lodft.shape[1])
self._lomasks.append(lomask)
lodft = lodft * lomask
lodft = np.fft.ifft2(np.fft.ifftshift(lodft))
self.pyr_coeffs['residual_lowpass'] = np.real(np.array(lodft).copy())
self.pyr_size['residual_lowpass'] = lodft.shape
def recon_pyr(self, levels='all', bands='all', twidth=1):
"""Reconstruct the image, optionally using subset of pyramid coefficients.
Parameters
----------
levels : `list`, `int`, or {`'all'`, `'residual_highpass'`}
If `list` should contain some subset of integers from `0` to `self.num_scales-1`
(inclusive) and `'residual_lowpass'`. If `'all'`, returned value will contain all
valid levels. Otherwise, must be one of the valid levels.
bands : `list`, `int`, or `'all'`.
If list, should contain some subset of integers from `0` to `self.num_orientations-1`.
If `'all'`, returned value will contain all valid orientations. Otherwise, must be one
of the valid orientations.
twidth : `int`
The width of the transition region of the radial lowpass function, in octaves
Returns
-------
recon : `np.array`
The reconstructed image.
"""
if twidth <= 0:
warnings.warn("twidth must be positive. Setting to 1.")
twidth = 1
recon_keys = self._recon_keys(levels, bands)
# make list of dims and bounds
bound_list = []
dim_list = []
# we go through pyr_sizes from smallest to largest
for dims in sorted(self.pyr_size.values()):
if dims in dim_list:
continue
dim_list.append(dims)
dims = np.array(dims)
ctr = np.ceil((dims+0.5)/2).astype(int)
lodims = np.ceil((dims-0.5)/2).astype(int)
loctr = np.ceil((lodims+0.5)/2).astype(int)
lostart = ctr - loctr
loend = lostart + lodims
bounds = (lostart[0], lostart[1], loend[0], loend[1])
bound_list.append(bounds)
bound_list.append((0, 0, dim_list[-1][0], dim_list[-1][1]))
dim_list.append((dim_list[-1][0], dim_list[-1][1]))
# matlab code starts here
dims = np.array(self.pyr_size['residual_highpass'])
ctr = np.ceil((dims+0.5)/2.0).astype(int)
(xramp, yramp) = np.meshgrid((np.arange(1, dims[1]+1)-ctr[1]) / (dims[1]/2.),
(np.arange(1, dims[0]+1)-ctr[0]) / (dims[0]/2.))
angle = np.arctan2(yramp, xramp)
log_rad = np.sqrt(xramp**2 + yramp**2)
log_rad[ctr[0]-1, ctr[1]-1] = log_rad[ctr[0]-1, ctr[1]-2]
log_rad = np.log2(log_rad)
# Radial transition function (a raised cosine in log-frequency):
(Xrcos, Yrcos) = rcosFn(twidth, (-twidth/2.0), np.array([0, 1]))
Yrcos = np.sqrt(Yrcos)
YIrcos = | np.sqrt(1.0 - Yrcos**2) | numpy.sqrt |
import pytest
import numpy as np
from .segment import (
partition,
partition_segment,
partition_segment_old,
closest_point_of_line_segment,
)
def test_partition_segment_old_raises_exception_for_invalid_partition_size_type():
p1 = np.array([0.0, 0.0, 0.0])
p2 = np.array([1.0, 0.0, 0.0])
with pytest.raises(TypeError):
partition_segment_old(p1, p2, "foobar")
def test_partition_segment_old_raises_exception_for_invalid_partition_size_value():
p1 = np.array([0.0, 0.0, 0.0])
p2 = np.array([1.0, 0.0, 0.0])
with pytest.raises(ValueError):
partition_segment_old(p1, p2, 1)
def test_partition_segment_old_returns_partition_for_odd_partition_size():
p1 = np.array([0.0, 0.0, 0.0])
p2 = np.array([2.0, 0.0, 0.0])
partition_size = 4
expected_partition_points = np.array(
[[0.5, 0.0, 0.0], [1.0, 0.0, 0.0], [1.5, 0.0, 0.0]]
)
np.testing.assert_array_almost_equal(
partition_segment_old(p1, p2, partition_size),
expected_partition_points,
decimal=7,
)
def test_partition_segment_old_returns_partition_points_for_even_partition_size():
p1 = np.array([0.0, 0.0, 0.0])
p2 = np.array([1.0, 0.0, 0.0])
partition_size = 5
expected_partition_points = np.array(
[[0.2, 0.0, 0.0], [0.4, 0.0, 0.0], [0.6, 0.0, 0.0], [0.8, 0.0, 0.0]]
)
np.testing.assert_array_almost_equal(
partition_segment_old(p1, p2, partition_size),
expected_partition_points,
decimal=7,
)
def test_partition_segment_old_returns_partition_points_in_oriented_order():
p1 = np.array([0.0, 0.0, 0.0])
p2 = np.array([1.0, 0.0, 0.0])
partition_size = 5
expected_partition_points = np.array(
[[0.8, 0.0, 0.0], [0.6, 0.0, 0.0], [0.4, 0.0, 0.0], [0.2, 0.0, 0.0]]
)
np.testing.assert_array_almost_equal(
partition_segment_old(p2, p1, partition_size),
expected_partition_points,
decimal=7,
)
def test_partition_segment_old_returns_partition_points_for_diagonal_segment():
p1 = np.array([0.0, 0.0, 0.0])
p2 = np.array([1.0, 1.0, 0.0])
partition_size = 3
dist = np.linalg.norm(p2 - p1)
domain = [(1 / 3.0) * dist, (2 / 3.0) * dist]
unit_direction = (p2 - p1) / dist
expected_partition_points = np.array(
[p1 + scalar * unit_direction for scalar in domain]
)
np.testing.assert_array_almost_equal(
partition_segment_old(p1, p2, partition_size),
expected_partition_points,
decimal=7,
)
def test_partition_segment_raises_exception_for_invalid_partition_size_type():
p1 = np.array([0.0, 0.0, 0.0])
p2 = np.array([1.0, 0.0, 0.0])
with pytest.raises(TypeError):
partition_segment(p1, p2, "foobar")
def test_partition_segment_raises_exception_for_invalid_partition_size_value():
p1 = np.array([0.0, 0.0, 0.0])
p2 = np.array([1.0, 0.0, 0.0])
with pytest.raises(ValueError):
partition_segment(p1, p2, 1)
def test_partition_segment_returns_partition_for_odd_partition_size():
p1 = np.array([0.0, 0.0, 0.0])
p2 = np.array([2.0, 0.0, 0.0])
partition_size = 5
expected_partition_points = np.array(
[
[0.0, 0.0, 0.0],
[0.5, 0.0, 0.0],
[1.0, 0.0, 0.0],
[1.5, 0.0, 0.0],
[2.0, 0.0, 0.0],
]
)
np.testing.assert_array_almost_equal(
partition_segment(p1, p2, partition_size), expected_partition_points, decimal=7
)
def test_partition_segment_returns_partition_points_for_even_partition_size():
p1 = np.array([0.0, 0.0, 0.0])
p2 = | np.array([1.0, 0.0, 0.0]) | numpy.array |
'''
_*_coding:utf-8 _*_
@Time :2022/1/30 10:28
@Author : qiaofengsheng
@File :pytorch_onnx_infer.py
@Software :PyCharm
'''
import os
import sys
import numpy as np
sys.path.append(os.path.abspath(os.path.dirname(os.path.dirname(__file__))))
import cv2
import onnxruntime
import argparse
from PIL import Image, ImageDraw, ImageFont
from torchvision import transforms
import torch
from model.utils import utils
parse = argparse.ArgumentParser(description='onnx model infer!')
parse.add_argument('demo', type=str, help='推理类型支持:image/video/camera')
parse.add_argument('--config_path', type=str, help='配置文件存放地址')
parse.add_argument('--onnx_path', type=str, default=None, help='onnx包存放路径')
parse.add_argument('--image_path', type=str, default='', help='图片存放路径')
parse.add_argument('--video_path', type=str, default='', help='视频路径')
parse.add_argument('--camera_id', type=int, default=0, help='摄像头id')
parse.add_argument('--device', type=str, default='cpu', help='默认设备cpu (暂未完善GPU代码)')
def to_numpy(tensor):
return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy()
def onnx_infer_image(args, config):
ort_session = onnxruntime.InferenceSession(args.onnx_path)
transform = transforms.Compose([transforms.ToTensor()])
image = Image.open(args.image_path)
image_data = utils.keep_shape_resize(image, config['image_size'])
image_data = transform(image_data)
image_data = torch.unsqueeze(image_data, dim=0)
if args.device == 'cpu':
ort_input = {ort_session.get_inputs()[0].name: to_numpy(image_data)}
ort_out = ort_session.run(None, ort_input)
out = | np.argmax(ort_out[0], axis=1) | numpy.argmax |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 29 7:07pm 2020
Meant to interface with Lv2_dj_lsp and functions from stingray.pulse.pulsar to
analyze Swift data pertaining to NGC 300 X-1 in one place, instead of having
the analysis spread between Lv2_dj_lsp.py and test.py
"""
from __future__ import division, print_function
import numpy as np
from astropy.io import fits
import matplotlib.pyplot as plt
import Lv0_dirs,Lv2_dj_lsp,Lv2_swift_lc,Lv2_phase
import os
from scipy import stats
from scipy.optimize import curve_fit
from tqdm import tqdm
import subprocess
from matplotlib import cm
from PyAstronomy.pyasl import foldAt
from mpl_toolkits.mplot3d import Axes3D
import mplcursors
import pathlib
from stingray.pulse.pulsar import pulse_phase,phase_exposure,fold_events
#####
## Noting here first that all the barycentering, time-ordering, extracting events
## (with XSELECT), doing exposure corrections (xrtlccorr), and subsequently the
## background subtraction, are all done in Lv2_swift_lc. There's no need to do so here.
#####
##### Parameters
eventfile = '/Volumes/Samsung_T5/NGC300_ULX_Swift/xrt/event/ngc300x1/ngc300x1_merge_niceroverlap_all.evt' #1 year of data; overlaps with NICER
#eventfile = '/Volumes/Samsung_T5/NGC300_ULX_Swift/xrt/event/ngc300x1/ngc300x1_swift_dec16_may19.evt'
#eventfile = '/Volumes/Samsung_T5/NGC300_ULX_Swift/xrt/event/ngc300x1/ngc300x1_merge.evt' #all 14 years
eventfile_xmm = '/Volumes/Samsung_T5/NGC300_XMMdata/ngc300x1_pn.evt'
times = fits.open(eventfile)[1].data['TIME'] #getting array of times
times_xmm = fits.open(eventfile_xmm)[1].data['TIME']
gtis_data = fits.open(eventfile)[2].data #getting GTIs
gtis_data_xmm = fits.open(eventfile_xmm)[59].data #59 for pn, 15 for mos1, 19 for mos2
T = sum([ gtis_data[i]['STOP']-gtis_data[i]['START'] for i in range(len(gtis_data)) ]) #exposure time
T_xmm = sum([ gtis_data_xmm[i]['STOP']-gtis_data_xmm[i]['START'] for i in range(len(gtis_data_xmm)) ]) #exposure time
print(T_xmm)
T0_MJD = fits.open(eventfile)[1].header['MJDREFI'] + fits.open(eventfile)[1].header['MJDREFF'] + fits.open(eventfile)[1].header['TSTART']/86400 #SWIFT
T0_MJD_eclipse = 58239.3498 #mid-eclipse!
T0_MJD_xmm = fits.open(eventfile_xmm)[1].header['MJDREF'] + fits.open(eventfile_xmm)[1].header['TSTART']/86400 #XMM-NEWTON
MJDREFI = fits.open(eventfile)[1].header['MJDREFI'] #Swift
MJDREFF = fits.open(eventfile)[1].header['MJDREFF'] #Swift
MJDREF = fits.open(eventfile_xmm)[1].header['MJDREF'] #XMM-Newton
diff_swiftxmm = (MJDREFI+MJDREFF-MJDREF)*86400
##### Get the phase offset between Swift eclipse time and XMM's first event time:
Porb_days = (1/8.4712e-6)/86400
xmm_first = MJDREF + times_xmm[0]/86400
no_cycles = (T0_MJD_eclipse - T0_MJD_xmm)/Porb_days
xmm_ecl = T0_MJD_eclipse - int(no_cycles)*Porb_days #time of the mid-eclipse BEFORE the first XMM event
if xmm_ecl > xmm_first:
xmm_ecl -= Porb_days
phaseoff = (xmm_first-xmm_ecl)/Porb_days
print('Phase offset is ' + str(phaseoff))
##### Be careful here, as Swift and XMM have different MJDREFs!!!
gtis_conform = []
for i in range(len(gtis_data)):
gtis_conform.append([gtis_data[i][0],gtis_data[i][1]]) #conform to the input that Stingray uses
gtis_conform_xmm = []
for i in range(len(gtis_data_xmm)):
gtis_conform_xmm.append([gtis_data_xmm[i][0],gtis_data_xmm[i][1]]) #conform to the input that Stingray uses
#bary_outputfolder = '/Volumes/Samsung_T5/NGC300_ULX_Swift/xrt/event/lightcurve/'
#obsids = [str(i) for i in range(49834027,49834042)] + [str(i) for i in range(49834043,49834062)] + [str(i) for i in range(49834063,49834066)] + ['88810002'] + [str(i) for i in range(49834066,49834069)] + [str(i) for i in range(49834070,49834079)] + [str(i) for i in range(49834080,49834088)]
#corr_lc_files = [bary_outputfolder + 'sw000' + obsids[i] + '_corr.lc' for i in range(len(obsids))]
#corr_ulx1_files = [bary_outputfolder + 'sw000' + obsids[i] + '_ulx1_corr.lc' for i in range(len(obsids))]
#corr_bg_files = [bary_outputfolder + 'sw000' + obsids[i] + '_bg_corr.lc' for i in range(len(obsids))]
#bg_scale_x1 = (30/120)**2
#bg_scale_ulx1 = (35/120)**2
#completeness = np.array([0,10,20,30,40,50,60,70,80,90,100])/100
#rebinned_t, rebinned_rate, rebinned_err, rebinned_fracexp = Lv2_dj_lsp.rebin_lc(corr_lc_files,corr_bg_files,bg_scale_x1,100,0.5)
#rebinned_t_ulx1, rebinned_rate_ulx1, rebinned_err_ulx1, rebinned_fracexp_ulx1 = rebin_lc(corr_ulx1_files,corr_bg_files,bg_scale_ulx1,3600,0)
#tstart_49834027 = 546830295.758713
"""
### Writing the data from the light curves of X-1 and ULX-1 into text files; also plotting the light curve, This is mainly for 3600s bins
x1_text = open(bary_outputfolder + 'ngc300x1_bg_exp_corr_lc_3600s.txt','w')
ulx1_text = open(bary_outputfolder + 'ngc300ulx1_bg_exp_corr_lc_3600s.txt','w')
for i in range(len(rebinned_t)):
x1_text.write(str(51910 + 7.428703700000000E-04+(rebinned_t[i]+tstart_49834027)/86400) + ' ' + str(rebinned_rate[i]) + ' ' + str(rebinned_err[i]) + '\n')
x1_text.close()
for i in range(len(rebinned_t_ulx1)):
ulx1_text.write(str(51910 + 7.428703700000000E-04 + (rebinned_t_ulx1[i]+tstart_49834027)/86400) + ' ' + str(rebinned_rate_ulx1[i]) + ' ' + str(rebinned_err_ulx1[i]) + '\n')
ulx1_text.close()
mjd = 51910 + 7.428703700000000E-04 + (tstart_49834027+rebinned_t)/86400
mjd_ulx1 = 51910 + 7.428703700000000E-04 + (tstart_49834027+rebinned_t_ulx1)/86400
plt.errorbar(x=mjd[rebinned_err<=0.06],y=rebinned_rate[rebinned_err<=0.06],yerr=rebinned_err[rebinned_err<=0.06],fmt='rx')
plt.errorbar(x=mjd_ulx1[rebinned_err_ulx1<=0.06],y=rebinned_rate_ulx1[rebinned_err_ulx1<=0.06],yerr=rebinned_err_ulx1[rebinned_err_ulx1<=0.06],fmt='bx')
plt.legend(('X-1','ULX-1'),fontsize=12)
plt.xlabel('Time (MJD)',fontsize=12)
plt.ylabel('[Exposure-corrected] Count rate (c/s)',fontsize=12)
plt.axhline(y=0,color='k',lw=0.5,alpha=0.5)
plt.show()
"""
### Running Lv2_dj_lsp.lsp
"""
for i in range(len(completeness)):
rebinned_t, rebinned_rate, rebinned_err, rebinned_fracexp = Lv2_dj_lsp.rebin_lc(corr_lc_files,corr_bg_files,bg_scale_x1,100,completeness[i])
omega,psd,prob3,prob4,prob5 = Lv2_dj_lsp.lsp(rebinned_t,rebinned_rate)
nu_reg = omega/(2.0*np.pi)
freq = omega/(2*np.pi)
plt.figure()
plt.plot(freq,psd,'rx-')
#plt.yscale('log')
#plt.xscale('log')
plt.xlabel('Frequency (Hz)',fontsize=12)
plt.ylabel('Normalized Power',fontsize=12)
plt.axhline(y=prob3,lw=0.5,alpha=0.5)
plt.axhline(y=prob4,lw=0.5,alpha=0.5)
plt.axhline(y=prob5,lw=0.5,alpha=0.5)
#print(prob3,prob4,prob5)
print(np.max(psd),freq[psd==np.max(psd)][0])
#plt.show()
"""
### Doing FR/RSS
#for i in range(len(completeness)):
# rebinned_t, rebinned_rate, rebinned_err, rebinned_fracexp = Lv2_dj_lsp.rebin_lc(corr_lc_files,corr_bg_files,bg_scale_x1,100,completeness[i])
# freqs_list, psd_list = Lv2_dj_lsp.psd_error(rebinned_t,rebinned_rate,rebinned_err)
# print(str(completeness[i]) + '%')
# print('Median frequency: ' + str(np.median(freqs_list)))
# print('Error in frequency: ' + str(np.std(freqs_list)))
#print('Powers: ' + str(psd_list))
################################################################################
################################### FOLDING ####################################
################################################################################
"""
##### Folding using my routine; confirmed that the folding of the raw data agrees with Stingray's and foldAt
nbins = 20
freq = 8.4712e-6
offset = -0.215*nbins
#freq = 8.6088e-6
freqdot = 0
freqdotdot = 0
phase_frac = (T0_MJD_eclipse-T0_MJD)/((1/freq)/86400)
#print('MID ECLIPSE TIME:')
#print( fits.open(eventfile)[1].header['MJDREFI'] + fits.open(eventfile)[1].header['MJDREFF'] + (times[0] + 0.21569724*1/freq)/86400)
#T0_MJD = fits.open(eventfile)[1].header['MJDREF'] + times[0]/86400
##### Using Lv2_phase
plt.figure()
phase,profile,profile_error = Lv2_phase.pulse_folding(times,T,T0_MJD,freq,freqdot,freqdotdot,nbins,"SWIFT")
plt.errorbar(x=phase[:-1],y=profile,yerr=profile_error,color='r',drawstyle='steps-mid')
expos = Lv2_phase.phase_exposure(times[0]-times[0],times[-1]-times[0],1/freq,nbin=nbins,gtis=np.array(gtis_conform)-times[0])
total_expos = np.array(list(expos) + list(expos))
plt.errorbar(x=phase[:-1],y=profile/total_expos,yerr=profile_error/total_expos,color='b',drawstyle='steps-mid')
plt.title(str(pathlib.Path(eventfile).name) +', exposure-corrected (using Lv2_phase)',fontsize=12)
plt.xlabel('Phase',fontsize=12)
plt.ylabel('Counts/s',fontsize=12)
plt.legend(('Folded profile','Exposure-corrected profile'),loc='best',fontsize=12)
print('Original expos:')
print(expos)
##### Using stingray.pulse.pulsar's fold_events
phase_sr,prof_sr,err_sr = fold_events(times,freq,freqdot,freqdotdot,gtis=np.array(gtis_conform),ref_time=times[0],nbin=nbins)
phase_sr_expo,prof_sr_expo,err_sr_expo = fold_events(times,freq,freqdot,freqdotdot,gtis=np.array(gtis_conform),ref_time=times[0],expocorr=True,nbin=nbins)
total_phase_sr = list(phase_sr) + list(phase_sr+1)
total_prof_sr = list(prof_sr)*2
total_err_sr = list(err_sr)*2
total_phase_sr_expo = list(phase_sr_expo) + list(phase_sr_expo+1)
total_prof_sr_expo = list(prof_sr_expo)*2
total_err_sr_expo = list(err_sr_expo)*2
if nbins % 2 == 0:
fft_x = np.array(list(np.arange(int(nbins/2)+1)) + list(np.arange(int(nbins/2)-1) - (int(nbins/2)-1)))
else:
fft_x = np.array(list(np.arange(int(nbins/2)+1)) + list(np.arange(int(nbins/2)) - int(nbins/2)))
shift = np.exp(-2j*np.pi*fft_x*offset/nbins)
shifted_prof_sr = np.real(np.fft.ifft(np.fft.fft(prof_sr_expo)*shift)) #taking the real component of the inverse transform of the shifted Fourier transform of the original folded profile
shifted_err_sr = np.real(np.fft.ifft(np.fft.fft(err_sr_expo)*shift)) #taking the real component of the inverse transform of the shifted Fourier transform of the original folded profile
a = np.array(list(shifted_prof_sr)*2)/T
b = np.array(list(shifted_err_sr)*2)/T
swift_lc = open(Lv0_dirs.NGC300_2020 + 'swift_shifted_folded_curve.txt','w')
for i in range(len(total_expos)):
swift_lc.write(str(total_phase_sr[i]) + ' ' + str(a[i]) + ' ' + str(b[i]) + '\n')
swift_lc.close()
plt.figure()
plt.errorbar(x=total_phase_sr,y=total_prof_sr/T,yerr=total_err_sr/T,color='r',drawstyle='steps-mid')
plt.errorbar(x=total_phase_sr_expo,y=total_prof_sr_expo/T,yerr=total_err_sr_expo/T,color='b',drawstyle='steps-mid')
plt.legend(('Folded profile','Exposure-corrected'),loc='best',fontsize=12)
plt.title(str(pathlib.Path(eventfile).name) +', exposure-corrected (using Stingray fold_events)',fontsize=12)
plt.xlabel('Phase',fontsize=12)
plt.ylabel('Counts/s',fontsize=12)
plt.show()
"""
"""
##### Using foldAt by PyAstronomy
plt.figure()
phase_bins = np.linspace(0,1,21)
phases = foldAt(times,1/freq,T0=times[0]-(1-phase_frac)*1/freq)
expos = Lv2_phase.phase_exposure(times[0]-times[0],times[-1]-times[0],1/freq,nbin=nbins,gtis=np.array(gtis_conform)-times[0])
total_expos = np.array(list(expos) + list(expos))
expos_index = int(phase_frac/(phase_bins[1]-phase_bins[0])) #starting point for exposures
altered_expos = np.array(list(total_expos[expos_index:]) + list(total_expos[:expos_index]))
#print('Altered expos:')
#print(altered_expos)
profile,bin_edges,binnumber = stats.binned_statistic(phases,np.ones(len(phases)),statistic='sum',bins=phase_bins)
error = np.sqrt(profile)
phase_to_2 = np.array(list(phase_bins[:-1]) + list(phase_bins+1))
profile_to_2 = np.array(list(profile)*2)
error_to_2 = np.array(list(error)*2)
plt.errorbar(phase_to_2[:-1],profile_to_2/(T*altered_expos),yerr=error_to_2/(T*altered_expos),color='b',drawstyle='steps-mid')
plt.legend(('Folded profile','Exposure-corrected'),loc='best',fontsize=12)
plt.title(str(pathlib.Path(eventfile).name) +', exposure-corrected (using PyAstronomy foldAt)',fontsize=12)
plt.xlabel('Phase',fontsize=12)
plt.ylabel('Counts/s',fontsize=12)
##### Shifting pulse profiles through a shifted FT (see Deepto's 7/20/2020 email)
if nbins % 2 == 0:
fft_x = np.array(list(np.arange(int(nbins/2)+1)) + list(np.arange(int(nbins/2)-1) - (int(nbins/2)-1)))
else:
fft_x = np.array(list(np.arange(int(nbins/2)+1)) + list(np.arange(int(nbins/2)) - int(nbins/2)))
shift = np.exp(-2j*np.pi*fft_x*offset/nbins)
shifted_prof_sr = np.real(np.fft.ifft(np.fft.fft(prof_sr_expo)*shift)) #taking the real component of the inverse transform of the shifted Fourier transform of the original folded profile
shifted_err_sr = np.real(np.fft.ifft(np.fft.fft(err_sr_expo)*shift)) #taking the real component of the inverse transform of the shifted Fourier transform of the original folded profile
plt.figure()
plt.errorbar(x=total_phase_sr_expo,y=total_prof_sr_expo/T,yerr=total_err_sr_expo/T,color='b',drawstyle='steps-mid')
plt.errorbar(total_phase_sr,np.array(list(shifted_prof_sr)*2)/T,yerr=np.array(list(shifted_err_sr)*2)/T,color='r',drawstyle='steps-mid')
plt.xlabel('Phase',fontsize=12)
plt.ylabel('Counts/s',fontsize=12)
plt.title('Exposure-corrected, folded profiles for NGC 300 X-1 from Swift over May 2018 to May 2019')
plt.legend(('Folded with T0 = time of first event','Folded with T0 = inferred eclipse time/phase'),fontsize=12)
"""
"""
nbins_t = len(times)
offset = (1-0.215)*1/freq
##### Shifting pulse profiles through a shifted FT (see Deepto's 7/20/2020 email)
if nbins_t % 2 == 0:
fft_x = np.array(list(np.arange(int(nbins_t/2)+1)) + list(np.arange(int(nbins_t/2)-1) - (int(nbins_t/2)-1)))
else:
fft_x = np.array(list(np.arange(int(nbins_t/2)+1)) + list(np.arange(int(nbins_t/2)) - int(nbins_t/2)))
shift = np.exp(-2j*np.pi*fft_x*offset/nbins_t)
shifted_t = np.real(np.fft.ifft(np.fft.fft(times)*shift)) #taking the real component of the inverse transform of the shifted Fourier transform of the original folded profile
for i in range(20):
print(times[i],shifted_t[i])
phase_sr,prof_sr,err_sr = fold_events(shifted_t,freq,freqdot,freqdotdot,gtis=np.array(gtis_conform),ref_time=times[0],nbin=nbins)
phase_sr_expo,prof_sr_expo,err_sr_expo = fold_events(shifted_t,freq,freqdot,freqdotdot,gtis=np.array(gtis_conform),ref_time=times[0],expocorr=True,nbin=nbins)
plt.figure()
plt.errorbar(phase_sr,prof_sr/T,color='b',drawstyle='steps-mid')
plt.errorbar(phase_sr,prof_sr_expo/T,color='r',drawstyle='steps-mid')
plt.xlabel('Phase',fontsize=12)
plt.ylabel('Counts/s',fontsize=12)
"""
#plt.show()
"""
##### Fitting 6-model step-and-ramp parameters to the folded profile
plt.figure()
plt.errorbar(x=phase[:-1],y=profile,yerr=profile_error,color='r',drawstyle='steps-mid')
plt.errorbar(x=phase[:-1],y=profile/total_expos,yerr=profile_error/total_expos,color='b',drawstyle='steps-mid')
plt.title(str(pathlib.Path(eventfile).name) +', exposure-corrected (using Lv2_phase)',fontsize=12)
plt.xlabel('Phase',fontsize=12)
plt.ylabel('Counts/s',fontsize=12)
plt.legend(('Folded profile','Exposure-corrected profile'),loc='best',fontsize=12)
start_phase = 0.45
end_phase = 1.95
phase_model = np.linspace(start_phase,end_phase,1001)
x = phase[:-1][(phase[:-1]>=start_phase)&(phase[:-1]<=end_phase)]
y = profile[(phase[:-1]>=start_phase)&(phase[:-1]<=end_phase)]/total_expos[(phase[:-1]>=start_phase)&(phase[:-1]<=end_phase)]
y_err = profile_error[(phase[:-1]>=start_phase)&(phase[:-1]<=end_phase)]/total_expos[(phase[:-1]>=start_phase)&(phase[:-1]<=end_phase)]
def piecewise_linear(x,b1,b2,b3,b4,top,bottom):
return np.piecewise(x, [(x>=start_phase)&(x<=b1), (x>b1)&(x<=b2), (x>b2)&(x<=b3), (x>b3)&(x<=b4), (x>b4)&(x<=end_phase)], [lambda x:top, lambda x:((bottom-top)/(b2-b1)*x+bottom-(bottom-top)/(b2-b1)*b2), lambda x:bottom, lambda x:((top-bottom)/(b4-b3)*x+top-(top-bottom)/(b4-b3)*b4), lambda x:top])
pguess = np.array([1.05,1.15,1.30,1.45,0.0011,0.0003])
popt,pcov = curve_fit(piecewise_linear,x,y,p0=pguess)#,sigma=y_err)
print(popt)
print(np.diag(np.sqrt(pcov))/popt*100)
plt.plot(phase_model,piecewise_linear(phase_model,*popt),'k-')
"""
#plt.show()
########################### DOING CHI^2 EXPLORATION ############################
def lorentzian(f, f0, a, gam,const):
x = (f-f0)/(gam/2)
return a * 1/(1+x**2) + const
def gaussian(f,f0,a,sig,const):
return a * np.exp( -(f-f0)**2/(2*sig**2) ) + const
def sum(f,f0,a,gam,b,sig,const):
x = (f-f0)/(gam/2)
return a * 1/(1+x**2) + b * np.exp( -(f-f0)**2/(2*sig**2)) + const
"""
nbins=20
chi2 = []
freqs = np.arange(8.25e-6,8.7e-6,0.01e-6)
#freqs = np.arange(-9e-17,-1e-18,1e-20)
for i in tqdm(range(len(freqs))):
phase_sr_expo,prof_sr_expo,err_sr_expo = fold_events(times,freqs[i],gtis=np.array(gtis_conform),ref_time=times[0],expocorr=True,nbins=nbins)
chi2_freq = Lv2_phase.get_chi2(prof_sr_expo,err_sr_expo)
chi2.append( chi2_freq )
"""
"""
freqs_filter = freqs[(freqs>=8.4693e-6)&(freqs<=8.472e-6)] #8.47 to 8.47275 for 1-year data
chi2_filter = np.array(chi2)[(freqs>=8.4693e-6)&(freqs<=8.472e-6)]
freq_model = np.linspace(8.4693e-6,8.472e-6,1001)
pguess_l = np.array([8.4706e-6,650,0.002e-6])
popt_l,pcov_l = curve_fit(lorentzian,freqs_filter,chi2_filter,p0=pguess_l)
print(popt_l)
print(np.sqrt(np.diag(pcov_l)))
pguess_g = np.array([8.4706e-6,650,0.002e-6])
popt_g,pcov_g = curve_fit(gaussian,freqs_filter,chi2_filter,p0=pguess_g)
print(popt_g)
print(np.sqrt(np.diag(pcov_g)))
pguess_s = np.array([8.4706e-6,650,0.002e-6,600,0.002e-6])
popt_s,pcov_s = curve_fit(sum,freqs_filter,chi2_filter,p0=pguess_s)
print(popt_s)
print(np.sqrt(np.diag(pcov_s)))
"""
"""
fig,ax = plt.subplots()
def pdot_to_fdot(pdot):
return -pdot/(1/8.4712e-6)**2
def fdot_to_pdot(fdot):
return (-fdot/(8.4712e-6)**2)/1e-7
chi2 = np.array(chi2)
#secax = ax.secondary_xaxis('top',functions=(fdot_to_pdot,pdot_to_fdot))
#secax.set_xlabel('Period Derivative (1E-7 s/s)',fontsize=12)
print(np.max(chi2),freqs[chi2==np.max(chi2)])
ax.plot(freqs,chi2,'rx-')
#ax.axvline(x=-5.60e-17,lw=0.5,alpha=0.5,color='k')
#ax.axvline(x=-2.80e-17,lw=0.5,alpha=0.5,color='k')
ax.axhline(y=869.357,lw=0.5,alpha=0.5,color='b')
#plt.plot(freq_model,lorentzian(freq_model,popt_l[0],popt_l[1],popt_l[2]),'b-')
#plt.plot(freq_model,gaussian(freq_model,popt_g[0],popt_g[1],popt_g[2]),'k-')
#plt.plot(freq_model,sum(freq_model,popt_s[0],popt_s[1],popt_s[2],popt_s[3],popt_s[4]),'m-')
ax.set_xlabel('Frequency Derivative (Hz/s)',fontsize=12)
ax.set_ylabel('chi^2 [ sum( (profile-mean)^2/error^2) ]',fontsize=12)
#plt.legend(('manual chi^2','Lorentzian fit','Gaussian fit','L+G'),fontsize=12)
plt.show()
"""
def sinecurve(x,a,T,phi,c):
return a* | np.sin(2*np.pi/T*x+phi) | numpy.sin |
import numpy
import xraylib
import scipy.constants as codata
# needed by bragg_calc
from xoppylib.crystals.bragg_preprocessor_file_io import bragg_preprocessor_file_v2_write
from dabax.common_tools import f0_xop, f0_xop_with_fractional_charge
from dabax.common_tools import bragg_metrictensor, lorentz, atomic_symbols
import sys
import os
import platform
from xoppylib.xoppy_util import locations
from dabax.dabax_xraylib import DabaxXraylib
#
#
#
def bragg_metrictensor(a,b,c,a1,a2,a3,RETURN_REAL_SPACE=0,RETURN_VOLUME=0,HKL=None):
"""
Returns the metric tensor in the reciprocal space
:param a: unit cell a
:param b: unit cell b
:param c: unit cell c
:param a1: unit cell alpha
:param a2: unit cell beta
:param a3: unit cell gamma
:param RETURN_REAL_SPACE: set to 1 for returning metric tensor in real space
:param RETURN_VOLUME: set to 1 to return the unit cell volume in Angstroms^3
:param HKL: if !=None, returns the d-spacing for the corresponding [H,K,L] reflection
:return: the returned value depends on the keywords used. If RETURN_REAL_SPACE=0,RETURN_VOLUME=0, and HKL=None
then retuns the metric tensor in reciprocal space.
"""
# input cell a,b,c,alpha,beta,gamma; angles in degrees
a1 *= numpy.pi / 180.0
a2 *= numpy.pi / 180.0
a3 *= numpy.pi / 180.0
# ;
# ; tensor in real space
# ;
g = numpy.array( [ [a*a, a*b*numpy.cos(a3), a*c*numpy.cos(a2)], \
[a*b*numpy.cos(a3), b*b, b*c*numpy.cos(a1)], \
[a*c*numpy.cos(a2), b*c*numpy.cos(a1), c*c]] )
if RETURN_REAL_SPACE: return g
# print("g: ",g)
# ;
# ; volume of the lattice
# ;
volume2 = numpy.linalg.det(g)
volume = numpy.sqrt(volume2)
# print("Volume of unit cell: %g A^3",volume)
if RETURN_VOLUME: return volume
# ;
# ; tensor in reciprocal space
# ;
ginv = numpy.linalg.inv(g)
# ;print,gInv
#
# itmp = where(abs(ginv) LT 1d-8)
# IF itmp[0] NE -1 THEN ginv[itmp]=0D
itmp = numpy.where(numpy.abs(ginv) < 1e-8)
ginv[itmp] = 0.0
# print("ginv: ",ginv)
if HKL != None:
# ; computes d-spacing
dd = numpy.dot( numpy.array(HKL) , numpy.dot( ginv , numpy.array(HKL)))
#
# print("DD: ", dd)
dd1 = 1.0 / numpy.sqrt(dd)
# print("D-spacing: ",dd1)
return dd1
else:
return ginv
def lorentz(theta_bragg_deg,return_what=0):
"""
This function returns the Lorentz factor, polarization factor (unpolarized beam), geometric factor,
or a combination of them.
:param theta_bragg_deg: Bragg angle in degrees
:param return_what: A flag indicating the returned variable:
0: (default) PolFac*lorentzFac
1: PolFac
2: lorentzFac
3: geomFac
:return: a scalar value
"""
tr = theta_bragg_deg * numpy.pi / 180.
polarization_factor = 0.5 * (1.0 + (numpy.cos(2.0 * tr))**2)
lorentz_factor = 1.0 / numpy.sin(2.0 * tr)
geometrical_factor = 1.0 * numpy.cos(tr) / numpy.sin(2.0 * tr)
if return_what == 0:
return polarization_factor*lorentz_factor
elif return_what == 1:
return polarization_factor
elif return_what == 2:
return lorentz_factor
elif return_what == 3:
return geometrical_factor
elif return_what == 4:
return polarization_factor*lorentz_factor*geometrical_factor
# OBSOLETE.... USE bragg_calc2() INSTEAD!
def bragg_calc(descriptor="Si",hh=1,kk=1,ll=1,temper=1.0,emin=5000.0,emax=15000.0,estep=100.0,fileout=None,
material_constants_library=xraylib):
"""
Preprocessor for Structure Factor (FH) calculations. It calculates the basic ingredients of FH.
:param descriptor: crystal name (as in xraylib)
:param hh: miller index H
:param kk: miller index K
:param ll: miller index L
:param temper: temperature factor (scalar <=1.0 )
:param emin: photon energy minimum
:param emax: photon energy maximum
:param estep: photon energy step
:param fileout: name for the output file (default=None, no output file)
:return: a dictionary with all ingredients of the structure factor.
"""
output_dictionary = {}
codata_e2_mc2 = codata.e**2 / codata.m_e / codata.c**2 / (4*numpy.pi*codata.epsilon_0) # in m
# f = open(fileout,'w')
version = "2.5"
output_dictionary["version"] = version
# todo: txt not longer used here... can be removed
txt = ""
txt += "# Bragg version, Data file type\n"
txt += "%s 1\n" % version
cryst = material_constants_library.Crystal_GetCrystal(descriptor)
if cryst is None:
raise Exception("Crystal not found in xraylib: %s" % descriptor )
volume = cryst['volume']
# crystal data - not needed
icheck = 0
if icheck:
print (" Unit cell dimensions are %f %f %f" % (cryst['a'],cryst['b'],cryst['c']))
print (" Unit cell angles are %f %f %f" % (cryst['alpha'],cryst['beta'],cryst['gamma']))
print (" Unit cell volume is %f A^3" % volume )
print (" Atoms at:")
print (" Z fraction X Y Z")
for i in range(cryst['n_atom']):
atom = cryst['atom'][i]
print (" %3i %f %f %f %f" % (atom['Zatom'], atom['fraction'], atom['x'], atom['y'], atom['z']) )
print (" ")
volume = volume*1e-8*1e-8*1e-8 # in cm^3
dspacing = material_constants_library.Crystal_dSpacing(cryst, hh, kk, ll)
rn = (1e0/volume)*(codata_e2_mc2*1e2)
dspacing *= 1e-8 # in cm
txt += "# RN = (e^2/(m c^2))/V) [cm^-2], d spacing [cm]\n"
txt += "%e %e \n" % (rn , dspacing)
output_dictionary["rn"] = rn
output_dictionary["dspacing"] = dspacing
atom = cryst['atom']
list_Zatom = [ atom[i]['Zatom'] for i in range(len(atom))]
number_of_atoms = len(list_Zatom)
list_fraction = [ atom[i]['fraction'] for i in range(len(atom))]
try:
list_charge = [atom[i]['charge'] for i in range(len(atom))]
except:
list_charge = [0.0] * number_of_atoms
list_x = [ atom[i]['x'] for i in range(len(atom))]
list_y = [ atom[i]['y'] for i in range(len(atom))]
list_z = [ atom[i]['z'] for i in range(len(atom))]
# creates an is that contains Z, occupation and charge, that will
# define the different sites.
IDs = []
number_of_atoms = len(list_Zatom)
for i in range(number_of_atoms):
IDs.append("Z:%2d-F:%g-C:%g" % (list_Zatom[i],list_fraction[i], list_charge[i]))
# calculate indices of uniqte Id's sorted by Z
unique_indexes1 = numpy.unique(IDs, return_index=True) [1]
unique_Zatom1 = [list_Zatom[i] for i in unique_indexes1]
# sort by Z
ii = numpy.argsort(unique_Zatom1)
unique_indexes = unique_indexes1[ii]
unique_Zatom = [list_Zatom[i] for i in unique_indexes]
unique_charge = [list_charge[i] for i in unique_indexes]
unique_scattering_electrons = []
for i, Zi in enumerate(unique_Zatom):
unique_scattering_electrons.append(Zi - unique_charge[i])
nbatom = (len(unique_Zatom))
txt += "# Number of different element-sites in unit cell NBATOM:\n%d \n" % nbatom
output_dictionary["nbatom"] = nbatom
txt += "# for each element-site, the number of scattering electrons (Z_i + charge_i)\n"
for i in unique_Zatom:
txt += "%d "%i
txt += "\n"
output_dictionary["atnum"] = list(unique_scattering_electrons)
txt += "# for each element-site, the occupation factor\n"
unique_fraction = []
for i in range(len(unique_indexes)):
unique_fraction.append(list_fraction[unique_indexes[i]])
txt += "%g "%(unique_fraction[i])
txt += "\n"
output_dictionary["fraction"] = unique_fraction
txt += "# for each element-site, the temperature factor\n" # temperature parameter
list_temper = []
for i in range(len(unique_indexes)):
txt += "%5.3f "%temper
list_temper.append(temper)
txt += "\n"
output_dictionary["temper"] = list_temper
#
# Geometrical part of structure factor: G and G_BAR
#
txt += "# for each type of element-site, COOR_NR=G_0\n"
list_multiplicity = []
for i in range(len(unique_indexes)):
id = IDs[unique_indexes[i]]
txt += "%d "%IDs.count(id)
list_multiplicity.append(IDs.count(id))
txt += "\n"
output_dictionary["G_0"] = list_multiplicity
txt += "# for each type of element-site, G and G_BAR (both complex)\n"
list_g = []
list_g_bar = []
for i in range(len(unique_indexes)):
id = IDs[unique_indexes[i]]
ga = 0.0 + 0j
for i,zz in enumerate(IDs):
if zz == id:
ga += numpy.exp(2j*numpy.pi*(hh*list_x[i]+kk*list_y[i]+ll*list_z[i]))
txt += "(%g,%g) \n"%(ga.real,ga.imag)
txt += "(%g,%g) \n"%(ga.real,-ga.imag)
list_g.append(ga)
list_g_bar.append(ga.conjugate())
output_dictionary["G"] = list_g
output_dictionary["G_BAR"] = list_g_bar
#
# F0 part
#
txt += "# for each type of element-site, the number of f0 coefficients followed by them\n"
list_f0 = []
for i in range(len(unique_indexes)):
zeta = list_Zatom[unique_indexes[i]]
tmp = f0_xop(zeta)
txt += ("11 "+"%g "*11+"\n")%(tuple(tmp))
list_f0.append(tmp.tolist())
output_dictionary["f0coeff"] = list_f0
npoint = int( (emax - emin)/estep + 1 )
txt += "# The number of energy points NPOINT: \n"
txt += ("%i \n") % npoint
output_dictionary["npoint"] = npoint
txt += "# for each energy point, energy, F1(1),F2(1),...,F1(nbatom),F2(nbatom)\n"
list_energy = []
out_f1 = numpy.zeros( (len(unique_indexes),npoint), dtype=float)
out_f2 = numpy.zeros( (len(unique_indexes),npoint), dtype=float)
out_fcompton = numpy.zeros( (len(unique_indexes),npoint), dtype=float) # todo is complex?
for i in range(npoint):
energy = (emin+estep*i)
txt += ("%20.11e \n") % (energy)
list_energy.append(energy)
for j in range(len(unique_indexes)):
zeta = list_Zatom[unique_indexes[j]]
f1a = material_constants_library.Fi(int(zeta),energy*1e-3)
f2a = -material_constants_library.Fii(int(zeta),energy*1e-3) # TODO: check the sign!!
txt += (" %20.11e %20.11e 1.000 \n")%(f1a, f2a)
out_f1[j,i] = f1a
out_f2[j,i] = f2a
out_fcompton[j,i] = 1.0
output_dictionary["energy"] = list_energy
output_dictionary["f1"] = out_f1
output_dictionary["f2"] = out_f2
output_dictionary["fcompton"] = out_fcompton
if fileout != None:
bragg_preprocessor_file_v2_write(output_dictionary, fileout)
# with open(fileout,"w") as f:
# f.write(txt)
# print("File written to disk: %s" % fileout)
return output_dictionary
#
#
#
def crystal_fh(input_dictionary,phot_in,theta=None,forceratio=0):
"""
:param input_dictionary: as resulting from bragg_calc()
:param phot_in: photon energy in eV
:param theta: incident angle (half of scattering angle) in rad
:return: a dictionary with structure factor
"""
# outfil = input_dictionary["outfil"]
# fract = input_dictionary["fract"]
rn = input_dictionary["rn"]
dspacing = numpy.array(input_dictionary["dspacing"])
nbatom = numpy.array(input_dictionary["nbatom"])
atnum = numpy.array(input_dictionary["atnum"])
temper = numpy.array(input_dictionary["temper"])
G_0 = numpy.array(input_dictionary["G_0"])
G = numpy.array(input_dictionary["G"])
G_BAR = numpy.array(input_dictionary["G_BAR"])
f0coeff = numpy.array(input_dictionary["f0coeff"])
npoint = numpy.array(input_dictionary["npoint"])
energy = numpy.array(input_dictionary["energy"])
fp = numpy.array(input_dictionary["f1"])
fpp = numpy.array(input_dictionary["f2"])
fraction = numpy.array(input_dictionary["fraction"])
phot_in = numpy.array(phot_in,dtype=float).reshape(-1)
toangstroms = codata.h * codata.c / codata.e * 1e10
itheta = numpy.zeros_like(phot_in)
for i,phot in enumerate(phot_in):
if theta is None:
itheta[i] = numpy.arcsin(toangstroms*1e-8/phot/2/dspacing)
else:
itheta[i] = theta
# print("energy= %g eV, theta = %15.13g deg"%(phot,itheta[i]*180/numpy.pi))
if phot < energy[0] or phot > energy[-1]:
raise Exception("Photon energy %g eV outside of valid limits [%g,%g]"%(phot,energy[0],energy[-1]))
if forceratio == 0:
ratio = numpy.sin(itheta[i]) / (toangstroms / phot)
else:
ratio = 1 / (2 * dspacing * 1e8)
# print("Ratio: ",ratio)
F0 = numpy.zeros(nbatom)
F000 = numpy.zeros(nbatom)
for j in range(nbatom):
#icentral = int(f0coeff.shape[1]/2)
#F0[j] = f0coeff[j,icentral]
icentral = int(len(f0coeff[j])/2)
F0[j] = f0coeff[j][icentral]
# F000[j] = F0[j]
for i in range(icentral):
#F0[j] += f0coeff[j,i] * numpy.exp(-1.0*f0coeff[j,i+icentral+1]*ratio**2)
F0[j] += f0coeff[j][i] * numpy.exp(-1.0*f0coeff[j][i+icentral+1]*ratio**2)
#srio F000[j] += f0coeff[j][i] #actual number of electrons carried by each atom, <NAME>, <EMAIL>
F000[j] = atnum[j] # srio
# ;C
# ;C Interpolate for the atomic scattering factor.
# ;C
for j,ienergy in enumerate(energy):
if ienergy > phot:
break
nener = j - 1
F1 = numpy.zeros(nbatom,dtype=float)
F2 = numpy.zeros(nbatom,dtype=float)
F = numpy.zeros(nbatom,dtype=complex)
for j in range(nbatom):
F1[j] = fp[j,nener] + (fp[j,nener+1] - fp[j,nener]) * \
(phot - energy[nener]) / (energy[nener+1] - energy[nener])
F2[j] = fpp[j,nener] + (fpp[j,nener+1] - fpp[j,nener]) * \
(phot - energy[nener]) / (energy[nener+1] - energy[nener])
r_lam0 = toangstroms * 1e-8 / phot
for j in range(nbatom):
F[j] = F0[j] + F1[j] + 1j * F2[j]
# print("F",F)
F_0 = 0.0 + 0.0j
FH = 0.0 + 0.0j
FH_BAR = 0.0 + 0.0j
FHr = 0.0 + 0.0j
FHi = 0.0 + 0.0j
FH_BARr = 0.0 + 0.0j
FH_BARi = 0.0 + 0.0j
TEMPER_AVE = 1.0
for j in range(nbatom):
FH += fraction[j] * (G[j] * F[j] * 1.0) * temper[j]
FHr += fraction[j] * (G[j] * (F0[j] + F1[j])* 1.0) * temper[j]
FHi += fraction[j] * (G[j] * F2[j] * 1.0) * temper[j]
FN = F000[j] + F1[j] + 1j * F2[j]
F_0 += fraction[j] * (G_0[j] * FN * 1.0)
# TEMPER_AVE *= (temper[j])**(G_0[j]/(G_0.sum()))
FH_BAR += fraction[j] * ((G_BAR[j] * F[j] * 1.0)) * temper[j]
FH_BARr += fraction[j] * ((G_BAR[j] * (F0[j] + F1[j]) *1.0)) * temper[j]
FH_BARi += fraction[j] * ((G_BAR[j] * F2[j] * 1.0)) * temper[j]
# print("TEMPER_AVE: ",TEMPER_AVE)
# ;C
# ;C multiply by the average temperature factor
# ;C
# FH *= TEMPER_AVE
# FHr *= TEMPER_AVE
# FHi *= TEMPER_AVE
# FH_BAR *= TEMPER_AVE
# FH_BARr *= TEMPER_AVE
# FH_BARi *= TEMPER_AVE
STRUCT = numpy.sqrt(FH * FH_BAR)
# ;C
# ;C PSI_CONJ = F*( note: PSI_HBAR is PSI at -H position and is
# ;C proportional to fh_bar but PSI_CONJ is complex conjugate os PSI_H)
# ;C
psi_over_f = rn * r_lam0**2 / numpy.pi
psi_h = rn * r_lam0**2 / numpy.pi * FH
psi_hr = rn * r_lam0**2 / numpy.pi * FHr
psi_hi = rn * r_lam0**2 / numpy.pi * FHi
psi_hbar = rn * r_lam0**2 / numpy.pi * FH_BAR
psi_hbarr = rn * r_lam0**2 / numpy.pi * FH_BARr
psi_hbari = rn * r_lam0**2 / numpy.pi * FH_BARi
psi_0 = rn * r_lam0**2 / numpy.pi * F_0
psi_conj = rn * r_lam0**2 / numpy.pi * FH.conjugate()
# ;
# ; Darwin width
# ;
# print(rn,r_lam0,STRUCT,itheta)
ssvar = rn * (r_lam0**2) * STRUCT / numpy.pi / numpy.sin(2.0*itheta)
spvar = ssvar * numpy.abs((numpy.cos(2.0*itheta)))
ssr = ssvar.real
spr = spvar.real
# ;C
# ;C computes refractive index.
# ;C ([3.171] of Zachariasen's book)
# ;C
REFRAC = (1.0+0j) - r_lam0**2 * rn * F_0 / 2/ numpy.pi
DELTA_REF = 1.0 - REFRAC.real
ABSORP = 4.0 * numpy.pi * (-REFRAC.imag) / r_lam0
txt = ""
txt += '\n******************************************************'
txt += '\n at energy = '+repr(phot)+' eV'
txt += '\n = '+repr(r_lam0*1e8)+' Angstroms'
txt += '\n and at angle = '+repr(itheta*180.0/numpy.pi)+' degrees'
txt += '\n = '+repr(itheta)+' rads'
txt += '\n******************************************************'
for j in range(nbatom):
txt += '\n '
txt += '\nFor atom '+repr(j+1)+':'
txt += '\n fo + fp+ i fpp = '
txt += '\n '+repr(F0[j])+' + '+ repr(F1[j].real)+' + i'+ repr(F2[j])+" ="
txt += '\n '+repr(F0[j] + F1[j] + 1j * F2[j])
txt += '\n Z = '+repr(atnum[j])
txt += '\n Temperature factor = '+repr(temper[j])
txt += '\n '
txt += '\n Structure factor F(0,0,0) = '+repr(F_0)
txt += '\n Structure factor FH = ' +repr(FH)
txt += '\n Structure factor FH_BAR = ' +repr(FH_BAR)
txt += '\n Structure factor F(h,k,l) = '+repr(STRUCT)
txt += '\n '
txt += '\n Psi_0 = ' +repr(psi_0)
txt += '\n Psi_H = ' +repr(psi_h)
txt += '\n Psi_HBar = '+repr(psi_hbar)
txt += '\n '
txt += '\n Psi_H(real) Real and Imaginary parts = ' + repr(psi_hr)
txt += '\n Psi_H(real) Modulus = ' + repr(numpy.abs(psi_hr))
txt += '\n Psi_H(imag) Real and Imaginary parts = ' + repr(psi_hi)
txt += '\n Psi_H(imag) Modulus = ' + repr(abs(psi_hi))
txt += '\n Psi_HBar(real) Real and Imaginary parts = '+ repr(psi_hbarr)
txt += '\n Psi_HBar(real) Modulus = ' + repr(abs(psi_hbarr))
txt += '\n Psi_HBar(imag) Real and Imaginary parts = '+ repr(psi_hbari)
txt += '\n Psi_HBar(imag) Modulus = ' + repr(abs(psi_hbari))
txt += '\n '
txt += '\n Psi/F factor = ' + repr(psi_over_f)
txt += '\n '
txt += '\n Average Temperature factor = ' + repr(TEMPER_AVE)
txt += '\n Refraction index = 1 - delta - i*beta'
txt += '\n delta = ' + repr(DELTA_REF)
txt += '\n beta = ' + repr(1.0e0*REFRAC.imag)
txt += '\n Absorption coeff = ' + repr(ABSORP)+' cm^-1'
txt += '\n '
txt += '\n e^2/(mc^2)/V = ' + repr(rn)+' cm^-2'
txt += '\n d-spacing = ' + repr(dspacing*1.0e8)+' Angstroms'
txt += '\n SIN(theta)/Lambda = ' + repr(ratio)
txt += '\n '
txt += '\n Darwin width for symmetric s-pol [microrad] = ' + repr(2.0e6*ssr)
txt += '\n Darwin width for symmetric p-pol [microrad] = ' + repr(2.0e6*spr)
return {"PHOT":phot, "WAVELENGTH":r_lam0*1e-2 ,"THETA":itheta, "F_0":F_0, "FH":FH, "FH_BAR":FH_BAR,
"STRUCT":STRUCT, "psi_0":psi_0, "psi_h":psi_h, "psi_hbar":psi_hbar,
"DELTA_REF":DELTA_REF, "REFRAC":REFRAC, "ABSORP":ABSORP, "RATIO":ratio,
"ssr":ssr, "spr":spr, "psi_over_f":psi_over_f, "info":txt}
#
#
#
def bragg_calc2(descriptor="YB66", hh=1, kk=1, ll=1, temper=1.0,
emin=5000.0, emax=15000.0, estep=100.0, ANISO_SEL=0,
fileout=None,
do_not_prototype=0, # 0=use site groups (recommended), 1=use all individual sites
verbose=True,
material_constants_library=xraylib,
):
"""
Preprocessor for Structure Factor (FH) calculations. It calculates the basic ingredients of FH.
:param descriptor: crystal name (as in xraylib)
:param hh: miller index H
:param kk: miller index K
:param ll: miller index L
:param temper: temperature factor (scalar <=1.0 )
:param emin: photon energy minimum
:param emax: photon energy maximum
:param estep: photon energy step
:param ANISO_SEL: source of temperature factor:
0: use scalar value defined in temper
1: use isotropic value calculated from keyword UNIANISO_COFF in dabax Crystal.dat file
2: use anisotropic value calculated from keyword UNIANISO_COFF in dabax Crystal.dat file
:param fileout: name for the output file (default=None, no output file)
:return: a dictionary with all ingredients of the structure factor.
"""
output_dictionary = {}
codata_e2_mc2 = codata.e ** 2 / codata.m_e / codata.c ** 2 / (4 * numpy.pi * codata.epsilon_0) # in m
# f = open(fileout,'w')
version = "2.6"
output_dictionary["version"] = version
txt = ""
txt += "# Bragg version, Data file type\n"
txt += "%s\n" % version
cryst = material_constants_library.Crystal_GetCrystal(descriptor)
if cryst is None:
raise Exception("Crystal descriptor %s not found in material constants library" % descriptor)
volume = cryst['volume']
# test crystal data - not needed
icheck= 0
if icheck:
print(" Unit cell dimensions are %f %f %f" % (cryst['a'], cryst['b'], cryst['c']))
print(" Unit cell angles are %f %f %f" % (cryst['alpha'], cryst['beta'], cryst['gamma']))
print(" Unit cell volume is %f A^3" % volume)
print(" Atoms at:")
print(" Z fraction X Y Z")
for i in range(cryst['n_atom']):
atom = cryst['atom'][i]
print(" %3i %f %f %f %f" % (atom['Zatom'], atom['fraction'], atom['x'], atom['y'], atom['z']))
print(" ")
volume = volume * 1e-8 * 1e-8 * 1e-8 # in cm^3
rn = (1e0 / volume) * (codata_e2_mc2 * 1e2)
dspacing = bragg_metrictensor(cryst['a'], cryst['b'], cryst['c'], cryst['alpha'], cryst['beta'], cryst['gamma'], HKL=[hh, kk, ll])
dspacing *= 1e-8 # in cm
txt += "# RN = (e^2/(m c^2))/V) [cm^-2], d spacing [cm]\n"
txt += "%e %e \n" % (rn, dspacing)
output_dictionary["rn"] = rn
output_dictionary["dspacing"] = dspacing
atom = cryst['atom']
number_of_atoms = len(atom)
list_Zatom = [atom[i]['Zatom'] for i in range(len(atom))]
list_fraction = [atom[i]['fraction'] for i in range(number_of_atoms)]
try:
list_charge = [atom[i]['charge'] for i in range(number_of_atoms)]
except:
list_charge = [0.0] * number_of_atoms
list_x = [atom[i]['x'] for i in range(number_of_atoms)]
list_y = [atom[i]['y'] for i in range(number_of_atoms)]
list_z = [atom[i]['z'] for i in range(number_of_atoms)]
# calculate array of temperature factor for all atoms
#
# Consider anisotropic temperature factor
# <NAME>, <EMAIL>
# A dummy dictionary Aniso with start =0 if no aniso temperature factor input
# start
if 'Aniso' in cryst.keys() and cryst['Aniso'][0]['start'] > 0: # most crystals have no Anisotropic input
TFac = TemperFactor(1.0 / (2.0 * dspacing * 1e8), cryst['Aniso'], Miller={'h': hh, 'k': kk, 'l': ll}, \
cell={'a': cryst['a'], 'b': cryst['b'], 'c': cryst['c']}, n=len(atom))
B_TFac = 1
else:
B_TFac = 0
#
#
#
list_temper = []
list_temper_label = []
if ANISO_SEL == 0:
for i in range(number_of_atoms):
list_temper.append(temper)
list_temper_label.append(-1)
elif ANISO_SEL == 1:
if B_TFac:
for i in range(number_of_atoms):
list_temper.append(TFac[0, i])
list_temper_label.append(TFac[2, i])
else:
raise Exception("No crystal data to calculate isotropic temperature factor for crystal %s" % descriptor)
elif ANISO_SEL == 2:
if B_TFac:
for i in range(number_of_atoms):
list_temper.append(TFac[1, i])
list_temper_label.append(TFac[2, i])
else:
raise Exception("No crystal data to calculate anisotropic temperature factor for crystal %s" % descriptor)
list_AtomicName = []
for i in range(number_of_atoms):
s = atomic_symbols()[atom[i]['Zatom']]
# if sourceCryst == 1: # charge is not available in xraylib
try: # charge is not available in xraylib
if atom[i]['charge'] != 0.0: # if charge is 0, s is symbol only, not B0, etc
s = s + f'%+.6g' % atom[i]['charge']
except:
pass
list_AtomicName.append(s)
# identify the prototypical atoms
labels_prototypical = []
for i in range(number_of_atoms):
labels_prototypical.append("Z=%d C=%g F=%g T=%g" % (list_Zatom[i], list_charge[i], list_fraction[i], list_temper_label[i]))
if do_not_prototype:
indices_prototypical = numpy.arange(number_of_atoms) # different with diff_pat for complex crystal
else:
indices_prototypical = numpy.unique(labels_prototypical, return_index=True)[1]
number_of_prototypical_atoms = len(indices_prototypical)
# for i in range(number_of_prototypical_atoms):
# print(" >>> ", i, indices_prototypical[i], labels_prototypical[indices_prototypical[i]])
#
# for i in indices_prototypical:
# print(" >>>>> ", i, labels_prototypical[i])
#
# print(">>>> list_labels", len(labels_prototypical), len(indices_prototypical), labels_prototypical)
#
# get f0 coefficients
#
# f0coeffs = []
# if sourceF0 == 0:
# for i in indices_prototypical:
# f0coeffs.append(f0_xop(atom[i]['Zatom']))
# elif sourceF0 == 1:
# for i in indices_prototypical:
# f0coeffs.append(material_constants_library.f0_with_fractional_charge(atom[i]['Zatom'], atom[i]['charge']) )
# elif sourceF0 == 2:
# total_charge_flag = numpy.abs(numpy.array(list_charge)).sum() # note the abs(): to be used as flag...
#
# if total_charge_flag != 0: # Use dabax
# for i in indices_prototypical:
# f0coeffs.append(material_constants_library.f0_with_fractional_charge(atom[i]['Zatom'], atom[i]['charge']))
# else: # use xraylib
# if 'AtomicName' not in atom[0].keys():
# for i in indices_prototypical: #normal case come in here
# f0coeffs.append(f0_xop(atom[i]['Zatom']))
# else: #for case with like 'Y3+' entries in f0_xop
# import re
# for i in indices_prototypical:
# x = atom[i]['AtomicName']
# tmp_x = re.search('(^[a-zA-Z]*)',x)
# if tmp_x.group(0) == x:
# f0coeffs.append(f0_xop(atom[i]['Zatom'])) #neutral atom
# else:
# f0coeffs.append(f0_xop(0,AtomicName=x)) #charged atom
f0coeffs = []
for i in indices_prototypical:
try:
charge = atom[i]['charge']
except:
charge = 0.0
f0coeffs.append(f0_xop_with_fractional_charge(atom[i]['Zatom'], charge))
txt += "# Number of different element-sites in unit cell NBATOM:\n%d \n" % number_of_prototypical_atoms
output_dictionary["nbatom"] = number_of_prototypical_atoms
txt += "# for each element-site, the number of scattering electrons (Z_i + charge_i)\n"
atnum_list = []
for i in indices_prototypical:
txt += "%f " % (list_Zatom[i] - list_charge[i])
atnum_list.append(list_Zatom[i] - list_charge[i])
txt += "\n"
output_dictionary["atnum"] = atnum_list
txt += "# for each element-site, the occupation factor\n"
unique_fraction = [list_fraction[i] for i in indices_prototypical]
for z in unique_fraction:
txt += "%g " % (z)
txt += "\n"
output_dictionary["fraction"] = unique_fraction
txt += "# for each element-site, the temperature factor\n" # temperature parameter
unique_temper = []
for i in indices_prototypical:
txt += "%g " % list_temper[i]
unique_temper.append(list_temper[i])
txt += "\n"
output_dictionary["temper"] = unique_temper
#
# Geometrical part of structure factor: G and G_BAR
#
txt += "# for each type of element-site, COOR_NR=G_0\n"
list_multiplicity = []
for i in indices_prototypical:
# zz = list_AtomicName[i]
# fraction = list_fraction[i]
# temper = list_temper[i]
# count = 0
# for j in range(len(list_Zatom)):
# if (list_AtomicName[j] == zz) and (list_fraction[j] == fraction) and (list_temper[j] == temper): count += 1
if do_not_prototype:
txt += "%d " % 1
list_multiplicity.append(1)
else:
count = 0
for j in range(number_of_atoms):
if labels_prototypical[j] == labels_prototypical[i]: count += 1
txt += "%d " % count
list_multiplicity.append(count)
txt += "\n"
output_dictionary["G_0"] = list_multiplicity
txt += "# for each type of element-site, G and G_BAR (both complex)\n"
list_g = []
list_g_bar = []
for i in indices_prototypical:
if do_not_prototype:
# # ga_item = numpy.exp(2j * numpy.pi * (hh * list_x[i] + kk * list_y[i] + ll * list_z[i]))
# ga += ga_item
ga = numpy.exp(2j * numpy.pi * (hh * list_x[i] + kk * list_y[i] + ll * list_z[i]))
else:
ga = 0.0 + 0j
for j in range(number_of_atoms):
if labels_prototypical[j] == labels_prototypical[i]:
# if list_AtomicName[j] == zz and list_fraction[j] == ff and list_temper[j] == tt:
ga_item = numpy.exp(2j * numpy.pi * (hh * list_x[j] + kk * list_y[j] + ll * list_z[j]))
ga += ga_item
txt += "(%g,%g) \n" % (ga.real, ga.imag)
txt += "(%g,%g) \n" % (ga.real, -ga.imag)
list_g.append(ga)
list_g_bar.append(ga.conjugate())
output_dictionary["G"] = list_g
output_dictionary["G_BAR"] = list_g_bar
#
# F0 part
#
txt += "# for each type of element-site, the number of f0 coefficients followed by them\n"
for f0coeffs_item in f0coeffs:
txt += "%d " % len(f0coeffs_item)
for cc in f0coeffs_item:
txt += "%g " % cc
txt += "\n"
output_dictionary["f0coeff"] = f0coeffs
# <NAME>, use ceil to round up, otherwise we may get actual max energy less than emax
npoint = int(numpy.ceil(((emax - emin) / estep + 1)))
txt += "# The number of energy points NPOINT: \n"
txt += ("%i \n") % npoint
output_dictionary["npoint"] = npoint
txt += "# for each energy point, energy, F1(1),F2(1),...,F1(nbatom),F2(nbatom)\n"
list_energy = []
out_f1 = numpy.zeros((len(indices_prototypical), npoint), dtype=float)
out_f2 = numpy.zeros((len(indices_prototypical), npoint), dtype=float)
out_fcompton = numpy.zeros((len(indices_prototypical), npoint), dtype=float) # todo: is complex?
if isinstance(material_constants_library, DabaxXraylib ):
# vectorize with DABAX
energies = numpy.zeros(npoint)
for i in range(npoint):
energies[i] = (emin + estep * i)
DABAX_F_RESULTS = []
for j, jj in enumerate(indices_prototypical):
DABAX_F_RESULTS.append(numpy.array( material_constants_library.FiAndFii(list_Zatom[jj], energies * 1e-3)))
for i in range(npoint):
energy = (emin + estep * i)
txt += ("%20.11e \n") % (energy)
list_energy.append(energy)
for j, jj in enumerate(indices_prototypical):
f1a = (DABAX_F_RESULTS[j])[0, i] # material_constants_library.Fi(list_Zatom[jj], energy * 1e-3)
f2a = -(DABAX_F_RESULTS[j])[1, i] # -material_constants_library.Fii(list_Zatom[jj], energy * 1e-3)
txt += (" %20.11e %20.11e 1.000 \n") % (f1a, f2a)
out_f1[j, i] = f1a
out_f2[j, i] = f2a
out_fcompton[j, i] = 1.0
else:
# make a simple loop with xraylib (fast)
for i in range(npoint):
energy = (emin + estep * i)
txt += ("%20.11e \n") % (energy)
list_energy.append(energy)
for j,jj in enumerate(indices_prototypical):
f1a = material_constants_library.Fi(list_Zatom[jj], energy * 1e-3)
f2a = -material_constants_library.Fii(list_Zatom[jj], energy * 1e-3)
txt += (" %20.11e %20.11e 1.000 \n") % (f1a, f2a)
out_f1[j, i] = f1a
out_f2[j, i] = f2a
out_fcompton[j, i] = 1.0
output_dictionary["energy"] = list_energy
output_dictionary["f1"] = out_f1
output_dictionary["f2"] = out_f2
output_dictionary["fcompton"] = out_fcompton
if fileout != None:
bragg_preprocessor_file_v2_write(output_dictionary, fileout)
# with open(fileout, "w") as f:
# f.write(txt)
# if verbose: print("File written to disk: %s" % fileout)
return output_dictionary
# todo: rename
def TemperFactor(sinTheta_lambda,anisos,Miller={'h':1,'k':1,'l':1},cell={'a':23.44,'b':23.44,'c':23.44},n=1936):
'''
#+
# Singapore Synchrotron Light Source (SSLS)
# :Author: <NAME>, <EMAIL>
# :Name: TemperFactor
# :Purpose: Calculation isotropic & anisotropic temerature factors
# :Input:
# Miller: Miller indices
# cell: dictionary of lattice [a,b,c] in units of Angstrom
# sinTheta_lambda: Sin(theta)/lambda, lambda in units of Angstrom
# n: number of atomic sites
# anisos: array of dictionary containing anisotropic coefficients
# Out: output results in a 2-elements list: [[sotropic],[anisotropic]]
#-
'''
#0: isotropic, 1: anisotropic temerature factors
# results = numpy.zeros([2,n])
results = numpy.zeros([3,n]) # srio adds "start"
for i,aniso in enumerate(anisos):
s = aniso['start']-1
e = aniso['end']
if aniso['beta11'] >= 1:
#if beta11>=1, then beta22 is Beq, the other fields are unused
#if Beq specified, anisotropic temperature factor same as isotropic
Beq = aniso['beta22']
results[1,s:e] = numpy.exp(-sinTheta_lambda*sinTheta_lambda*Beq)
else:
Beq = 4.0/3.0*( aniso['beta11']*cell['a']*cell['a']+aniso['beta22']*cell['b']*cell['b']+ \
aniso['beta33']*cell['c']*cell['c'] ) # this is true only for cubic, tetragonal and orthorhombic Giacovazzo pag 188
results[1,s:e] = numpy.exp(-(aniso['beta11']*Miller['h']*Miller['h'] + \
aniso['beta22']*Miller['k']*Miller['k'] + aniso['beta33']*Miller['l']*Miller['l'] + \
2.0*Miller['h']*Miller['k']*aniso['beta12'] + 2.0*Miller['h']*Miller['l']*aniso['beta13'] + 2.0*Miller['k']*Miller['l']*aniso['beta23']))
results[0,s:e] = numpy.exp(-sinTheta_lambda*sinTheta_lambda*Beq)
results[2, s:e] = s
return results
def mare_calc(descriptor,H,K,L,HMAX,KMAX,LMAX,FHEDGE,DISPLAY,lambda1,deltalambda,PHI,DELTAPHI,
material_constants_library=xraylib,verbose=0):
"""
Calculates:
- Spaghetti plots (lambda versis Psi for multiple crystal reflection)
- The Umweganregung peak location plot (the diffracted wavelength lambda vs. Psi) for a given primary
reflection,i.e., an horizontal cut of the spaghetti plot.
- The Glitches spectrum (the negative intensity for versus the wavelength) or a vertical cut of the spaghetti plot.
Psi is the azimutal angle of totation, i.e., the totation around
the H vector (main reflection)
In other words, if a crystal is set with a particular Bragg angle to match a given reflection (inputs: H,K,L) at
a given wavelength (input: WaveLength), many other (secondary) reflections are excited when the crystal is rotated
around the azimutal angle Psi, without changing the Bragg angle.
The plot (WaveLength,Psi) of the possible reflections is calculated and contains all possible reflection curves
up to a maximum reflection (input: H Max, K Max, L Max).
Umweg plot:
The intersection of these curves with an horizontal line at the wavelength of the primary reflection
(input: WaveLength) gives the position of the peaks in the unweg plot. The width of each peak depends on the
pendent of the curve at the intersection. For that, the Psi1 and Psi2 intersection angles with a band of width
(input: DeltaWaveLength) are calculated. With this width and the intensity of the diffraction line, it is possible
to compute a Gaussian that "roughly" describe the peak.
Glitches plot:
The intersection of these curves with a vertical line at a given Psi gives the position of the peaks in the
glitches plot. The width of each peak is the difference between the wavelength values for Psi+/-DeltaPsi
With this width and the intensity of the diffraction line, it is possible to compute a Gaussian that "roughly"
describe the peak.
:param descriptor: a valid crystal name for xraylib
:param H: the miller index H
:param K: the miller index K
:param L: the miller index L
:param HMAX: the maximum miller index H
:param KMAX: the maximum miller index K
:param LMAX: the maximum miller index L
:param FHEDGE: below this edge (structure factor value) the reflections are discarded
:param DISPLAY:
0: Create spaghetti plot script
0: Create spaghetti+Umweg plot scripts
0: Create spaghetti+Glitches plot scripts
0: Create spaghetti+Umweg+Glitches plot scripts
:param lambda1: wavelength in Angstroms for Umweg plot
:param deltalambda: delta wavelength in Angstroms for Umweg plot
:param PHI: phi angle in deg for the Glitches plot
:param DELTAPHI: delta phi angle in deg for the Glitches plot
:param verbose: set to 1 for a more verbose output
:return:
"""
list_of_scripts = []
cryst = material_constants_library.Crystal_GetCrystal(descriptor)
# volume = cryst['volume']
#
# # crystal data - not needed
#
# print (" Unit cell dimensions are %f %f %f" % (cryst['a'],cryst['b'],cryst['c']))
# print (" Unit cell angles are %f %f %f" % (cryst['alpha'],cryst['beta'],cryst['gamma']))
# print (" Unit cell volume is %f A^3" % volume )
# print (" Atoms at:")
# print (" Z fraction X Y Z")
# for i in range(cryst['n_atom']):
# atom = cryst['atom'][i]
# print (" %3i %f %f %f %f" % (atom['Zatom'], atom['fraction'], atom['x'], atom['y'], atom['z']) )
# print (" ")
fhEdge = FHEDGE
fhMax = -1e0
fhMaxIndex = -1
flg_s = 0
flg_u = 0
flg_g = 0
if DISPLAY == 0:
flg_s = 1
elif DISPLAY == 1:
flg_s = 1
flg_u = 1
elif DISPLAY == 2:
flg_s = 1
flg_g = 1
elif DISPLAY == 3:
flg_s = 1
flg_u = 1
flg_g = 1
# ;
# ; compute the metric tensor in the reciprocal space
# ;
ginv = bragg_metrictensor(cryst['a'],cryst['b'],cryst['c'],cryst['alpha'],cryst['beta'],cryst['gamma'])
# ;
# ; wavelength (for intersections: unweg pattern)
# ;
# lambda1 = LAMBDA # ; for intersections
# deltalambda = DELTALAMBDA
lambdas = numpy.array([lambda1-deltalambda, lambda1, lambda1+deltalambda])
# ;
# ; phi (for intersections: glitches pattern)
# ;
phi = PHI
deltaPhi = DELTAPHI
phis = numpy.array([phi-deltaPhi, phi, phi+deltaPhi])
# ;
# ; Main reflection
# ;
P = numpy.array([H,K,L],dtype=int)
p2 = (P[0]**2 + P[1]**2 + P[2]**2)
pn = numpy.sqrt(p2)
# ;
# ; Calculate Reference axis (corresponding to phi =0)
# ; This is a vector perpendicular to P
# ;
mm1 = numpy.dot(ginv,P.T)
mm2 = [mm1[1],-mm1[0],0]
mm3 = numpy.min(numpy.abs( mm1[ | numpy.where(mm1 != 0) | numpy.where |
import torch
import torch.nn as nn
import numpy as np
def gather_feat(feat, ind):
"""
feat=[b,h*w,c]
ind=[b,k]
expand to [b,k,3] and get from [b,h*w,c]
"""
dim = feat.size(2)
ind = ind.unsqueeze(2).expand(ind.size(0),ind.size(1),dim)
feat = feat.gather(1, ind)
return feat
def transpose_and_gather_feat(feat, ind):
"""
feat=[b,w,h,c], transpose to [b,h,w,c] first, then
get [b,k] from [b,h*w,c]
"""
feat = feat.permute(0,2,3,1).contiguous() #deep copy[b,h,w,c]
feat = feat.view(feat.size(0), -1, feat.size(3)) #[b,h*w,c]
feat = gather_feat(feat, ind)
return feat
def sigmoid(x):
y = torch.clamp(x.sigmoid_(), min=1e-4, max=1-1e-4)
return y
def nms(heat, kernel=3, ch_pool=False):
#find max scores value (after sigmoid) in 3*3 area
pad = (kernel - 1) // 2
hmax = nn.functional.max_pool2d(heat, (kernel,kernel),stride=1, padding=pad)
keep = (hmax==heat).float()
return heat*keep
def topK(scores, K):
#topK on each channel first,then topK acrossing channels
batch, cat, height, width = scores.size()
#[b,c,h*w] -> [b,c,k]
topk_scores, topk_indices = torch.topk(scores.view(batch,cat,-1), K)
#topk_indices = topk_inds % (height * width)
topk_ys = (topk_indices / width).int().float()
topk_xs = (topk_indices % width).int().float()
#[b,c,k] -> [b,k]
topk_scores, topk_indice = torch.topk(topk_scores.view(batch, -1), K)
topk_clses = (topk_indice/K).int() #get cls of each indice
#get [b,k] from [b,ck,1]
topk_indices = gather_feat(topk_indices.view(batch,-1,1), topk_indice).view(batch,K)
topk_ys = gather_feat(topk_ys.view(batch,-1,1), topk_indice).view(batch,K)
topk_xs = gather_feat(topk_xs.view(batch,-1,1), topk_indice).view(batch,K)
return topk_scores, topk_indices, topk_clses, topk_ys, topk_xs
def get_multi_scale_topk_dets(dets, K):
def sort_rule(elem):
return elem['score']
dets.sort(key=sort_rule, reverse=True)
det_topk = dets[0:K]
return det_topk
def get_clsreg_alpha(rot, num_bins, opt=None):
"""
rot: (2*num_bins)
"""
bins = rot[0:num_bins]
reg = rot[num_bins:]
ps = np.exp(bins)
ps /= np.sum(ps)
head_bin = | np.argmax(bins) | numpy.argmax |
# Copyright (C) 2018, <NAME>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>
"""Controllers."""
import six
import abc
import warnings
import numpy as np
from scipy import optimize
from scipy.stats import entropy
@six.add_metaclass(abc.ABCMeta)
class BaseController():
"""Base trajectory optimizer controller."""
@abc.abstractmethod
def fit(self, x0, us_init, *args, **kwargs):
"""Computes the optimal controls.
Args:
x0: Initial state [state_size].
us_init: Initial control path [N, action_size].
*args, **kwargs: Additional positional and key-word arguments.
Returns:
Tuple of
xs: optimal state path [N+1, state_size].
us: optimal control path [N, action_size].
"""
raise NotImplementedError
class iLQR(BaseController):
"""Finite Horizon Iterative Linear Quadratic Regulator."""
def __init__(self, dynamics, cost, N, max_reg=1e10, hessians=False):
"""Constructs an iLQR solver.
Args:
dynamics: Plant dynamics.
cost: Cost function.
N: Horizon length.
max_reg: Maximum regularization term to break early due to
divergence. This can be disabled by setting it to None.
hessians: Use the dynamic model's second order derivatives.
Default: only use first order derivatives. (i.e. iLQR instead
of DDP).
"""
self.dynamics = dynamics
self.cost = cost
self.N = N
self._use_hessians = hessians and dynamics.has_hessians
if hessians and not dynamics.has_hessians:
warnings.warn("hessians requested but are unavailable in dynamics")
# Regularization terms: Levenberg-Marquardt parameter.
# See II F. Regularization Schedule.
self._mu = 1.0
self._mu_min = 1e-6
self._mu_max = max_reg
self._delta_0 = 2.0
self._delta = self._delta_0
self._k = np.zeros((N, dynamics.action_size))
self._K = np.zeros((N, dynamics.action_size, dynamics.state_size))
super(iLQR, self).__init__()
def fit(self, x0, us_init, n_iterations=100, tol=1e-6, on_iteration=None):
"""Computes the optimal controls.
Args:
x0: Initial state [state_size].
us_init: Initial control path [N, action_size].
n_iterations: Maximum number of interations. Default: 100.
tol: Tolerance. Default: 1e-6.
on_iteration: Callback at the end of each iteration with the
following signature:
(iteration_count, x, J_opt, accepted, converged) -> None
where:
iteration_count: Current iteration count.
xs: Current state path.
us: Current action path.
J_opt: Optimal cost-to-go.
accepted: Whether this iteration yielded an accepted result.
converged: Whether this iteration converged successfully.
Default: None.
Returns:
Tuple of
xs: optimal state path [N+1, state_size].
us: optimal control path [N, action_size].
"""
# Reset regularization term.
self._mu = 1.0
self._delta = self._delta_0
# Backtracking line search candidates 0 < alpha <= 1.
alphas = 1.1**(-np.arange(10)**2)
us = us_init.copy()
k = self._k
K = self._K
changed = True
converged = False
for iteration in range(n_iterations):
accepted = False
# Forward rollout only if it needs to be recomputed.
if changed:
(xs, F_x, F_u, L, L_x, L_u, L_xx, L_ux, L_uu, F_xx, F_ux,
F_uu) = self._forward_rollout(x0, us, us_init)
J_opt = L.sum()
changed = False
try:
# Backward pass.
k, K = self._backward_pass(F_x, F_u, L_x, L_u, L_xx, L_ux, L_uu,
F_xx, F_ux, F_uu)
# Backtracking line search.
for alpha in alphas:
xs_new, us_new = self._control(xs, us, k, K, alpha)
J_new = self._trajectory_cost(xs_new, us_new)
if J_new < J_opt:
if np.abs((J_opt - J_new) / J_opt) < tol:
converged = True
J_opt = J_new
xs = xs_new
us = us_new
changed = True
# Decrease regularization term.
self._delta = min(1.0, self._delta) / self._delta_0
self._mu *= self._delta
if self._mu <= self._mu_min:
self._mu = 0.0
# Accept this.
accepted = True
break
except np.linalg.LinAlgError as e:
# Quu was not positive-definite and this diverged.
# Try again with a higher regularization term.
warnings.warn(str(e))
if not accepted:
# Increase regularization term.
self._delta = max(1.0, self._delta) * self._delta_0
self._mu = max(self._mu_min, self._mu * self._delta)
if self._mu_max and self._mu >= self._mu_max:
warnings.warn("exceeded max regularization term")
break
if on_iteration:
on_iteration(iteration, xs, us, J_opt, accepted, converged)
if converged:
break
# Store fit parameters.
self._k = k
self._K = K
self._nominal_xs = xs
self._nominal_us = us
return xs, us
def _control(self, xs, us, k, K, alpha=1.0):
"""Applies the controls for a given trajectory.
Args:
xs: Nominal state path [N+1, state_size].
us: Nominal control path [N, action_size].
k: Feedforward gains [N, action_size].
K: Feedback gains [N, action_size, state_size].
alpha: Line search coefficient.
Returns:
Tuple of
xs: state path [N+1, state_size].
us: control path [N, action_size].
"""
xs_new = np.zeros_like(xs)
us_new = np.zeros_like(us)
xs_new[0] = xs[0].copy()
for i in range(self.N):
# Eq (12).
# Applying alpha only on k[i] as in the paper for some reason
# doesn't converge.
us_new[i] = us[i] + alpha * (k[i] + K[i].dot(xs_new[i] - xs[i]))
# Eq (8c).
xs_new[i + 1] = self.dynamics.f(xs_new[i], us_new[i], i)
return xs_new, us_new
def _trajectory_cost(self, xs, us):
"""Computes the given trajectory's cost.
Args:
xs: State path [N+1, state_size].
us: Control path [N, action_size].
Returns:
Trajectory's total cost.
"""
J = map(lambda args: self.cost.l(*args), zip(xs[:-1], us, range(self.N)))
return sum(J) + self.cost.l(xs[-1], None, self.N, terminal=True)
def _forward_rollout(self, x0, us, local_policy):
"""Apply the forward dynamics to have a trajectory from the starting
state x0 by applying the control path us.
Args:
x0: Initial state [state_size].
us: Control path [N, action_size].
Returns:
Tuple of:
xs: State path [N+1, state_size].
F_x: Jacobian of state path w.r.t. x
[N, state_size, state_size].
F_u: Jacobian of state path w.r.t. u
[N, state_size, action_size].
L: Cost path [N+1].
L_x: Jacobian of cost path w.r.t. x [N+1, state_size].
L_u: Jacobian of cost path w.r.t. u [N, action_size].
L_xx: Hessian of cost path w.r.t. x, x
[N+1, state_size, state_size].
L_ux: Hessian of cost path w.r.t. u, x
[N, action_size, state_size].
L_uu: Hessian of cost path w.r.t. u, u
[N, action_size, action_size].
F_xx: Hessian of state path w.r.t. x, x if Hessians are used
[N, state_size, state_size, state_size].
F_ux: Hessian of state path w.r.t. u, x if Hessians are used
[N, state_size, action_size, state_size].
F_uu: Hessian of state path w.r.t. u, u if Hessians are used
[N, state_size, action_size, action_size].
"""
state_size = self.dynamics.state_size
action_size = self.dynamics.action_size
N = us.shape[0]
xs = np.empty((N + 1, state_size))
F_x = np.empty((N, state_size, state_size))
F_u = np.empty((N, state_size, action_size))
if self._use_hessians:
F_xx = np.empty((N, state_size, state_size, state_size))
F_ux = np.empty((N, state_size, action_size, state_size))
F_uu = np.empty((N, state_size, action_size, action_size))
else:
F_xx = None
F_ux = None
F_uu = None
L = np.empty(N + 1)
L_x = np.empty((N + 1, state_size))
L_u = np.empty((N, action_size))
L_xx = np.empty((N + 1, state_size, state_size))
L_ux = np.empty((N, action_size, state_size))
L_uu = np.empty((N, action_size, action_size))
xs[0] = x0
for i in range(N):
x = xs[i]
u = us[i]
xs[i + 1] = self.dynamics.f(x, u, i)
F_x[i] = self.dynamics.f_x(x, u, i)
F_u[i] = self.dynamics.f_u(x, u, i)
L[i] = self.cost.l(x, u, i, terminal=False)
L_x[i] = self.cost.l_x(x, u, i, terminal=False)
L_u[i] = self.cost.l_u(x, u, i, terminal=False)
L_xx[i] = self.cost.l_xx(x, u, i, terminal=False)
L_ux[i] = self.cost.l_ux(x, u, i, terminal=False)
L_uu[i] = self.cost.l_uu(x, u, i, terminal=False)
if self._use_hessians:
F_xx[i] = self.dynamics.f_xx(x, u, i)
F_ux[i] = self.dynamics.f_ux(x, u, i)
F_uu[i] = self.dynamics.f_uu(x, u, i)
x = xs[-1]
L[-1] = self.cost.l(x, None, N, terminal=True)
L_x[-1] = self.cost.l_x(x, None, N, terminal=True)
L_xx[-1] = self.cost.l_xx(x, None, N, terminal=True)
return xs, F_x, F_u, L, L_x, L_u, L_xx, L_ux, L_uu, F_xx, F_ux, F_uu
def _backward_pass(self,
F_x,
F_u,
L_x,
L_u,
L_xx,
L_ux,
L_uu,
F_xx=None,
F_ux=None,
F_uu=None):
"""Computes the feedforward and feedback gains k and K.
Args:
F_x: Jacobian of state path w.r.t. x [N, state_size, state_size].
F_u: Jacobian of state path w.r.t. u [N, state_size, action_size].
L_x: Jacobian of cost path w.r.t. x [N+1, state_size].
L_u: Jacobian of cost path w.r.t. u [N, action_size].
L_xx: Hessian of cost path w.r.t. x, x
[N+1, state_size, state_size].
L_ux: Hessian of cost path w.r.t. u, x [N, action_size, state_size].
L_uu: Hessian of cost path w.r.t. u, u
[N, action_size, action_size].
F_xx: Hessian of state path w.r.t. x, x if Hessians are used
[N, state_size, state_size, state_size].
F_ux: Hessian of state path w.r.t. u, x if Hessians are used
[N, state_size, action_size, state_size].
F_uu: Hessian of state path w.r.t. u, u if Hessians are used
[N, state_size, action_size, action_size].
Returns:
Tuple of
k: feedforward gains [N, action_size].
K: feedback gains [N, action_size, state_size].
"""
V_x = L_x[-1]
V_xx = L_xx[-1]
k = np.empty_like(self._k)
K = np.empty_like(self._K)
for i in range(self.N - 1, -1, -1):
if self._use_hessians:
Q_x, Q_u, Q_xx, Q_ux, Q_uu = self._Q(
F_x[i], F_u[i], L_x[i], L_u[i], L_xx[i], L_ux[i], L_uu[i],
V_x, V_xx, F_xx[i], F_ux[i], F_uu[i])
else:
Q_x, Q_u, Q_xx, Q_ux, Q_uu = self._Q(F_x[i], F_u[i], L_x[i],
L_u[i], L_xx[i], L_ux[i],
L_uu[i], V_x, V_xx)
# Eq (6).
k[i] = -np.linalg.solve(Q_uu, Q_u)
K[i] = -np.linalg.solve(Q_uu, Q_ux)
# Eq (11b).
V_x = Q_x + K[i].T.dot(Q_uu).dot(k[i])
V_x += K[i].T.dot(Q_u) + Q_ux.T.dot(k[i])
# Eq (11c).
V_xx = Q_xx + K[i].T.dot(Q_uu).dot(K[i])
V_xx += K[i].T.dot(Q_ux) + Q_ux.T.dot(K[i])
V_xx = 0.5 * (V_xx + V_xx.T) # To maintain symmetry.
return np.array(k), np.array(K)
def _Q(self,
f_x,
f_u,
l_x,
l_u,
l_xx,
l_ux,
l_uu,
V_x,
V_xx,
f_xx=None,
f_ux=None,
f_uu=None):
"""Computes second order expansion.
Args:
F_x: Jacobian of state w.r.t. x [state_size, state_size].
F_u: Jacobian of state w.r.t. u [state_size, action_size].
L_x: Jacobian of cost w.r.t. x [state_size].
L_u: Jacobian of cost w.r.t. u [action_size].
L_xx: Hessian of cost w.r.t. x, x [state_size, state_size].
L_ux: Hessian of cost w.r.t. u, x [action_size, state_size].
L_uu: Hessian of cost w.r.t. u, u [action_size, action_size].
V_x: Jacobian of the value function at the next time step
[state_size].
V_xx: Hessian of the value function at the next time step w.r.t.
x, x [state_size, state_size].
F_xx: Hessian of state w.r.t. x, x if Hessians are used
[state_size, state_size, state_size].
F_ux: Hessian of state w.r.t. u, x if Hessians are used
[state_size, action_size, state_size].
F_uu: Hessian of state w.r.t. u, u if Hessians are used
[state_size, action_size, action_size].
Returns:
Tuple of
Q_x: [state_size].
Q_u: [action_size].
Q_xx: [state_size, state_size].
Q_ux: [action_size, state_size].
Q_uu: [action_size, action_size].
"""
# Eqs (5a), (5b) and (5c).
Q_x = l_x + f_x.T.dot(V_x)
Q_u = l_u + f_u.T.dot(V_x)
Q_xx = l_xx + f_x.T.dot(V_xx).dot(f_x)
# Eqs (11b) and (11c).
reg = self._mu * np.eye(self.dynamics.state_size)
Q_ux = l_ux + f_u.T.dot(V_xx + reg).dot(f_x)
Q_uu = l_uu + f_u.T.dot(V_xx + reg).dot(f_u)
if self._use_hessians:
Q_xx += np.tensordot(V_x, f_xx, axes=1)
Q_ux += np.tensordot(V_x, f_ux, axes=1)
Q_uu += np.tensordot(V_x, f_uu, axes=1)
return Q_x, Q_u, Q_xx, Q_ux, Q_uu
'''
iLQR_GPS: iLQR modified for GPS
'''
class iLQR_GPS(BaseController):
"""Finite Horizon Iterative Linear Quadratic Regulator."""
def __init__(self, dynamics, cost_GPS, N, A, B, C, max_reg=1e10, hessians=False, epsilon=1):
"""Constructs an iLQR solver.
Args:
dynamics: Plant dynamics.
cost: Cost function.
N: Horizon length.
max_reg: Maximum regularization term to break early due to
divergence. This can be disabled by setting it to None.
hessians: Use the dynamic model's second order derivatives.
Default: only use first order derivatives. (i.e. iLQR instead
of DDP).
"""
self.dynamics = dynamics
self.cost_GPS = cost_GPS
self.N = N
self._use_hessians = hessians and dynamics.has_hessians
if hessians and not dynamics.has_hessians:
warnings.warn("hessians requested but are unavailable in dynamics")
# Regularization terms: Levenberg-Marquardt parameter.
# See II F. Regularization Schedule.
self._mu = 1.0
self._mu_min = 1e-6
self._mu_max = max_reg
self._delta_0 = 2.0
self._delta = self._delta_0
self._k = np.random.uniform(-0.1, 0.1, (N, dynamics.action_size))
self._K = 0.01 * np.random.normal(0, np.eye(dynamics.action_size, dynamics.state_size),
(N, dynamics.action_size, dynamics.state_size))
cov_u = []
temp = 0.01 * np.eye(dynamics.action_size)
cov_u.append(temp)
self.cov_u = np.array(cov_u*self.N)
### New params
self.epsilon = epsilon
self.A = A
self.B = B
self.C = C
super(iLQR_GPS, self).__init__()
def generate_mean_cov(self, x, u, k, K, A, B, C, mean_old, cov_old, Q_uu):
### EQUATION 2.54, 2.55
mean_xu = []
cov_xu = []
mean = [mean_old[0]]
cov = [cov_old[0]]
for i in range(self.N):
temp = u[i][:, np.newaxis] + k[i][:, np.newaxis] + K[i].dot(mean[-1] - x[i][:, np.newaxis])
temp1 = np.concatenate((mean[-1], temp), axis=0)
mean_new = np.matmul(A, temp1) + B
temp2 = np.matmul(cov[-1], K[i].T)
temp3 = np.linalg.inv(Q_uu[i]) + np.matmul(K[i], temp2)
temp4 = np.matmul(np.matmul(A, np.block([[cov[-1], temp2], [temp2.T, temp3]])), A.T)
cov_new = temp4 + C
mean.append(mean_new)
cov.append(cov_new)
mean_xu.append(temp1)
cov_xu.append(np.block([[cov_old[i], temp2], [temp2.T, temp3]]))
return np.array(mean_xu), np.array(cov_xu), np.array(mean), np.array(cov)
def cost_estimation(self, eta, mean_xu, cov_xu, us, us_old):
### EQUATION 2.50 to 2.57
J_estimate_1 = self._trajectory_cost_estimate(mean_xu)
J_estimate_2 = 0
for i in range(mean_xu.shape[0]):
temp = np.trace(np.matmul(self.cost_GPS.Q, cov_xu[i]))
J_estimate_2 = J_estimate_2 + temp
J_estimate_3 = np.sum(entropy(np.abs(us), np.abs(us_old))) + self.epsilon
J_estimate = J_estimate_1/eta + J_estimate_2/eta - eta * J_estimate_3
return J_estimate
def eta_estimation(self, mean_xu, cov_xu, us, us_old):
### Page 54, 55 eta = [0.001, 10]
eta_max = self.cost_estimation(0.001, mean_xu, cov_xu, us, us_old)
eta_min = self.cost_estimation(10, mean_xu, cov_xu, us, us_old)
if eta_max*eta_min < 0:
print('Doing Brentq')
eta = optimize.brentq(self.cost_estimation, 0.001, 10, args=(mean_xu, cov_xu, us, us_old))
print('New eta ',eta)
else:
print('Doing Log search')
param_range = np.geomspace(0.001, 10, 30)
loss = []
for i in param_range:
temp = self.cost_estimation(i, mean_xu, cov_xu, us, us_old)
loss.append(temp)
opt_index = loss.index(min(loss))
eta = param_range[opt_index]
print('New eta ',eta)
return eta
def _control_GPS(self, xs, us, k, K, alpha=1.0):
"""Applies the controls for a given trajectory.
Args:
xs: Nominal state path [N+1, state_size + action_size].
us: Nominal control path [N, action_size].
k: Feedforward gains [N, action_size].
K: Feedback gains [N, action_size, state_size].
alpha: Line search coefficient.
Returns:
Tuple of
xs: state path [N+1, state_size + action_size].
us: control path [N, action_size].
"""
xs_new = np.zeros_like(xs)
us_new = np.zeros_like(us)
xs_new[0] = xs[0].copy()
state_size = self.dynamics.state_size
action_size = self.dynamics.action_size
for i in range(self.N):
# Applying alpha only on k[i] as in the paper for some reason
# doesn't converge.
us_new[i] = us[i] + alpha * (k[i]) + K[i].dot(xs_new[i][:state_size] - xs[i][:state_size])
xs_new[i + 1][:state_size] = self.dynamics.f(xs_new[i][:state_size], us_new[i], i)
xs_new[state_size:] = us_new[i]
return xs_new, us_new
def _trajectory_cost_GPS(self, x, x_old, u_old, k, K, cov_u):
"""Computes the given trajectory's cost.
Args:
xs: State path [N+1, state_size + action_size].
u_old: Old control path [N, action_size].
Returns:
Trajectory's total cost.
"""
J = map(lambda args: self.cost_GPS.l(*args), zip(x, x_old, u_old, k, K, cov_u, range(self.N)))
return sum(J) + self.cost_GPS.l(x[-1], None, None, None, None, None, self.N, terminal=True)
def _trajectory_cost_estimate(self, xs):
"""Computes the given trajectory's cost.
Args:
xs: State path [N+1, state_size + action_size].
u_old: Old control path [N, action_size].
Returns:
Trajectory's total cost.
"""
J = map(lambda args: self.cost_GPS.l_estimate(*args), zip(xs[:-1], range(self.N)))
return sum(J) + self.cost_GPS.l_estimate(xs[-1], self.N, terminal=True)
def _forward_rollout_GPS(self, x0, x_old, us_current, us_old, k, K, cov_u):
"""Apply the forward dynamics to have a trajectory from the starting
state x0 by applying the control path us.
Args:
x0: Initial state [state_size + action_size].
us: Control path [N, action_size].
Returns:
Tuple of:
xs: State path [N+1, state_size+action_size].
F_linear: Jacobain of state path w.r.t. x and u combined [state_size, state_size + action_size].
F_quad: Hessian of state path w.r.t. x and u combined [state_size, state_size + action_size, state_size + action_size].
F_x: Jacobian of state path w.r.t. x
[N, state_size, state_size].
F_u: Jacobian of state path w.r.t. u
[N, state_size, action_size].
L: Cost path [N+1].
L_x: Jacobian of cost path w.r.t. x [N+1, state_size].
L_u: Jacobian of cost path w.r.t. u [N, action_size].
L_xx: Hessian of cost path w.r.t. x, x
[N+1, state_size, state_size].
L_ux: Hessian of cost path w.r.t. u, x
[N, action_size, state_size].
L_uu: Hessian of cost path w.r.t. u, u
[N, action_size, action_size].
F_xx: Hessian of state path w.r.t. x, x if Hessians are used
[N, state_size, state_size, state_size].
F_ux: Hessian of state path w.r.t. u, x if Hessians are used
[N, state_size, action_size, state_size].
F_uu: Hessian of state path w.r.t. u, u if Hessians are used
[N, state_size, action_size, action_size].
"""
state_size = self.dynamics.state_size
action_size = self.dynamics.action_size
N = us_current.shape[0]
xs = np.empty((N + 1, state_size + action_size))
F_x = np.empty((N, state_size, state_size))
F_u = np.empty((N, state_size, action_size))
if self._use_hessians:
F_xx = np.empty((N, state_size, state_size, state_size))
F_ux = np.empty((N, state_size, action_size, state_size))
F_uu = np.empty((N, state_size, action_size, action_size))
else:
F_xx = None
F_ux = None
F_uu = None
L = np.empty(N + 1)
L_x = np.empty((N + 1, state_size))
L_u = np.empty((N, action_size))
L_xx = np.empty((N + 1, state_size, state_size))
L_ux = np.empty((N, action_size, state_size))
L_uu = np.empty((N, action_size, action_size))
xs[0] = x0
for i in range(self.N):
xs[i, state_size:] = us_current[i]
x = xs[i]
xs[i + 1][:state_size] = self.dynamics.f(x[:state_size], x[state_size:], i)
F_x[i] = self.dynamics.f_x(x[:state_size], x[state_size:], i)
F_u[i] = self.dynamics.f_u(x[:state_size], x[state_size:], i)
L[i] = np.asarray(self.cost_GPS.l(x, x_old[i, :state_size], us_old[i], k[i], K[i], cov_u[i], i, terminal=False))
L_x[i] = self.cost_GPS.l_x(x, x_old[i, :state_size], us_old[i], k[i], K[i], cov_u[i], i, terminal=False)
L_u[i] = self.cost_GPS.l_u(x, x_old[i, :state_size], us_old[i], k[i], K[i], cov_u[i], i, terminal=False)
L_xx[i] = self.cost_GPS.l_xx(x, x_old[i, :state_size], us_old[i], k[i], K[i], cov_u[i], i, terminal=False)
L_ux[i] = self.cost_GPS.l_ux(x, x_old[i, :state_size], us_old[i], k[i], K[i], cov_u[i], i, terminal=False)
L_uu[i] = self.cost_GPS.l_uu(x, x_old[i, :state_size], us_old[i], k[i], K[i], cov_u[i], i, terminal=False)
if self._use_hessians:
F_xx[i] = self.dynamics.f_xx(x[i, :state_size], x[i, state_size:], i)
F_ux[i] = self.dynamics.f_ux(x[i, :state_size], x[i, state_size:], i)
F_uu[i] = self.dynamics.f_uu(x[i, :state_size], x[i, state_size:], i)
x = xs[-1]
L[-1] = self.cost_GPS.l(x, None, None, None, None, None, N, terminal=True)
L_x[-1] = self.cost_GPS.l_x(x, None, None, None, None, None, N, terminal=True)
L_xx[-1] = self.cost_GPS.l_xx(x, None, None, None, None, None, N, terminal=True)
return xs, F_x, F_u, L, L_x, L_u, L_xx, L_ux, L_uu, F_xx, F_ux, F_uu
def _backward_pass_GPS(self,
F_x,
F_u,
L_x,
L_u,
L_xx,
L_ux,
L_uu,
F_xx=None,
F_ux=None,
F_uu=None):
"""Computes the feedforward and feedback gains k and K.
Args:
F_x: Jacobian of state path w.r.t. x [N, state_size, state_size].
F_u: Jacobian of state path w.r.t. u [N, state_size, action_size].
L_x: Jacobian of cost path w.r.t. x [N+1, state_size].
L_u: Jacobian of cost path w.r.t. u [N, action_size].
L_xx: Hessian of cost path w.r.t. x, x
[N+1, state_size, state_size].
L_ux: Hessian of cost path w.r.t. u, x [N, action_size, state_size].
L_uu: Hessian of cost path w.r.t. u, u
[N, action_size, action_size].
F_xx: Hessian of state path w.r.t. x, x if Hessians are used
[N, state_size, state_size, state_size].
F_ux: Hessian of state path w.r.t. u, x if Hessians are used
[N, state_size, action_size, state_size].
F_uu: Hessian of state path w.r.t. u, u if Hessians are used
[N, state_size, action_size, action_size].
Returns:
Tuple of
k: feedforward gains [N, action_size].
K: feedback gains [N, action_size, state_size].
"""
V_x = L_x[-1]
V_xx = L_xx[-1]
k = np.empty_like(self._k)
K = np.empty_like(self._K)
Q = []
for i in range(self.N - 1, -1, -1):
if self._use_hessians:
Q_x, Q_u, Q_xx, Q_ux, Q_uu = self._Q_GPS(
F_x[i], F_u[i], L_x[i], L_u[i], L_xx[i], L_ux[i], L_uu[i],
V_x, V_xx, F_xx[i], F_ux[i], F_uu[i])
Q.append(Q_uu)
else:
Q_x, Q_u, Q_xx, Q_ux, Q_uu = self._Q_GPS(F_x[i], F_u[i], L_x[i],
L_u[i], L_xx[i], L_ux[i],
L_uu[i], V_x, V_xx)
Q.append(Q_uu)
# Eq (6).
k[i] = -np.linalg.solve(Q_uu, Q_u)
K[i] = -np.linalg.solve(Q_uu, Q_ux)
# Eq (11b).
V_x = Q_x + K[i].T.dot(Q_uu).dot(k[i])
V_x += K[i].T.dot(Q_u) + Q_ux.T.dot(k[i])
# Eq (11c).
V_xx = Q_xx + K[i].T.dot(Q_uu).dot(K[i])
V_xx += K[i].T.dot(Q_ux) + Q_ux.T.dot(K[i])
V_xx = 0.5 * (V_xx + V_xx.T) # To maintain symmetry.
return np.array(k), np.array(K), np.array(Q)
def _Q_GPS(self,
f_x,
f_u,
l_x,
l_u,
l_xx,
l_ux,
l_uu,
V_x,
V_xx,
f_xx=None,
f_ux=None,
f_uu=None):
"""Computes second order expansion.
Args:
F_x: Jacobian of state w.r.t. x [state_size, state_size].
F_u: Jacobian of state w.r.t. u [state_size, action_size].
L_x: Jacobian of cost w.r.t. x [state_size].
L_u: Jacobian of cost w.r.t. u [action_size].
L_xx: Hessian of cost w.r.t. x, x [state_size, state_size].
L_ux: Hessian of cost w.r.t. u, x [action_size, state_size].
L_uu: Hessian of cost w.r.t. u, u [action_size, action_size].
V_x: Jacobian of the value function at the next time step
[state_size].
V_xx: Hessian of the value function at the next time step w.r.t.
x, x [state_size, state_size].
F_xx: Hessian of state w.r.t. x, x if Hessians are used
[state_size, state_size, state_size].
F_ux: Hessian of state w.r.t. u, x if Hessians are used
[state_size, action_size, state_size].
F_uu: Hessian of state w.r.t. u, u if Hessians are used
[state_size, action_size, action_size].
Returns:
Tuple of
Q_x: [state_size].
Q_u: [action_size].
Q_xx: [state_size, state_size].
Q_ux: [action_size, state_size].
Q_uu: [action_size, action_size].
"""
# Eqs (5a), (5b) and (5c).
Q_x = l_x + f_x.T.dot(V_x)
Q_u = l_u + f_u.T.dot(V_x)
Q_xx = l_xx + f_x.T.dot(V_xx).dot(f_x)
# Eqs (11b) and (11c).
reg = self._mu * np.eye(self.dynamics.state_size)
Q_ux = l_ux + f_u.T.dot(V_xx + reg).dot(f_x)
Q_uu = l_uu + f_u.T.dot(V_xx + reg).dot(f_u)
if self._use_hessians:
Q_xx += np.tensordot(V_x, f_xx, axes=1)
Q_ux += np.tensordot(V_x, f_ux, axes=1)
Q_uu += np.tensordot(V_x, f_uu, axes=1)
return Q_x, Q_u, Q_xx, Q_ux, Q_uu
def fit_GPS(self, x0, us_init, us_local, n_iterations=100, tol=1e-6, cov_method='MEM', on_iteration=None):
"""Computes the optimal controls.
Args:
x0: Initial state [state_size + action_size].
us_init: Initial control path, more precisely local policy [N, action_size].
n_iterations: Maximum number of interations. Default: 100.
tol: Tolerance. Default: 1e-6.
cov_method: Defines the way covariances should be calculated
MEM: Maximum Entropy Method. Cov = (Q_uu)^-1
FCM: Fixed Covariance Method. Cov = 0.01*eye(state_size + action_size, state_size + action_size)
on_iteration: Callback at the end of each iteration with the
following signature:
(iteration_count, x, J_opt, accepted, converged) -> None
where:
iteration_count: Current iteration count.
xs: Current state path.
us: Current action path.
J_opt: Optimal cost-to-go.
accepted: Whether this iteration yielded an accepted result.
converged: Whether this iteration converged successfully.
Default: None.
Returns:
Tuple of
xs: optimal state path [N+1, state_size].
us: optimal control path [N, action_size].
Note:
eta: Range of eta is [0.001, 10].
epsilon: Range of epsilon is [0.25, 1].
alpha: Backtracking line search parameter alpha is set to 1 always. If you want to the search then backtracking
line search candidates 0 < alpha <= 1. Code: alphas = 1.1**(-np.arange(10)**2)
"""
# Determine state size and action size
# Reset regularization term.
self._mu = 1.0
self._delta = self._delta_0
# Make a copy of initial guess
us = us_init.copy()
xs = np.zeros((self.N + 1, self.dynamics.state_size + self.dynamics.action_size))
# Control parameter
# k: Open loop controller gain matrix and
# K: Closed loop feedback controller gain matrix
k = self._k
K = self._K
changed = True
converged = False
for iteration in range(n_iterations):
accepted = False
if iteration == 0:
# Set eta for the first iteration equal to one
self.cost_GPS.eta = 10.0
mean = []
cov = []
temp = np.matmul(self.A, x0[:, np.newaxis]) + self.B
temp1 = self.C
mean.append(temp)
cov.append(temp1)
mean = np.array(mean*(self.N+1))
cov = np.array(cov*(self.N+1))
# # else:
# # # Estimate the eta
# # mean_xu, cov_xu, mean, cov = self.generate_mean_cov(xs[:, :self.dynamics.state_size], xs[:, self.dynamics.state_size:], k, K,
# # self.A, self.B, self.C, mean, cov, Q_uu)
# # self.cost_GPS.eta = self.eta_estimation(mean_xu, cov_xu, us, us_init)
# Forward rollout only if it needs to be recomputed.
if changed:
(xs, F_x, F_u, L, L_x, L_u, L_xx, L_ux, L_uu, F_xx, F_ux,
F_uu) = self._forward_rollout_GPS(x0, xs, xs[:-1, self.dynamics.state_size:], us, k, K, self.cov_u)
J_opt = L.sum()
changed = False
try:
# Backward pass.
k, K, Q_uu = self._backward_pass_GPS(F_x, F_u, L_x, L_u, L_xx, L_ux, L_uu,
F_xx, F_ux, F_uu)
xs_new, us_new = self._control_GPS(xs, us, k, K, alpha=1.0)
J_new = self._trajectory_cost_GPS(xs_new, xs, us, k, K, self.cov_u)
if J_new < J_opt:
if np.abs((J_opt - J_new) / J_opt) < tol:
converged = True
else:
mean_xu, cov_xu, mean, cov = self.generate_mean_cov(xs[:, :self.dynamics.state_size], xs[:-1, self.dynamics.state_size:],
k, K, self.A, self.B, self.C, mean, cov, Q_uu)
self.cost_GPS.eta = self.eta_estimation(mean_xu, cov_xu, us, us_new)
J_opt = J_new
xs = xs_new
us = us_new
cov_u = []
if cov_method == 'MEM':
for i in range(self.N):
cov_u.append(np.linalg.inv(Q_uu[i]))
self.cov_u = np.array(cov_u)
elif cov_method == 'FCM':
temp = 0.01 * np.eye(self.dynamics.action_size)
cov_u.append(temp)
self.cov_u = | np.array(cov_u*self.N) | numpy.array |
#!/usr/bin/env python
# TF KOMPAS: Site Caller
# Author: <NAME>
# Version: 5/18/2020
import argparse
programDescription = 'Calls TFBS from bed/genome or fasta files'
parser = argparse.ArgumentParser(description=programDescription,add_help=False)
req = parser.add_argument_group('parameter arguments')
req.add_argument('-k', '--kmerFile',type = argparse.FileType('r'), default = '-', help='aligned kmer file to use, stdin if not specified')
req.add_argument('-c','--core', type=int,required = True, nargs=2, help='Start and end positions of the core, relative to the model (2 values)')
req.add_argument('-o','--output', type=str,required = True, help='Output file')
# Bed or fasta
search = parser.add_argument_group('search space arguments [-b/-g or -f]')
search.add_argument('-b','--bed', type=str, help='Coordinates to search (.bed)')
search.add_argument('-g','--genome', type=str,help='Genome file (.fasta/.fa)')
search.add_argument('-f','--fasta', type=str, help='Fasta file (.fasta/.fa)')
# Optional
opt = parser.add_argument_group('optional arguments')
opt.add_argument('-gc','--gcParse',action='store_true', help='If using a fasta input with genomic coordinates, returns bed file of coordinates')
opt.add_argument('-t','--threshold', type=float,default = 0.4, help='Escore threshold for calling (float, -0.5 to 0.5)')
opt.add_argument('-l', '--log',type=str, help='Generate a log file')
opt.add_argument('-rM','--rankModel' ,action='store_true', help='add alignment model score')
opt.add_argument('-h', '--help', action='help', default=argparse.SUPPRESS, help='Show this help message and exit.')
args = parser.parse_args()
if args.bed is None and args.genome is None and args.fasta is None:
parser.error("KOMPAS requires either (-b and -g) or (-f)")
if (args.bed and args.genome is None) or (args.genome and args.bed is None):
parser.error("-b and -g need to be specified together")
if (args.fasta and args.genome) or (args.fasta and args.bed):
parser.error("-f argument cannot be used with -b or -g")
if (args.gcParse and args.genome) or (args.gcParse and args.bed):
parser.error("-gc argument cannot be used with -b or -g")
output = args.output
kmerFile = args.kmerFile
isFasta = args.fasta
if args.bed:
peakFile = args.bed
genomeFile = args.genome
elif args.fasta:
gcParse = args.gcParse
fastaFile = args.fasta
userCore = args.core
threshold = args.threshold
logFile = args.log
rM = args.rankModel
# Imports
import pandas as pd
import numpy as np
import os
import sys
from io import StringIO
import itertools
from pyfaidx import Fasta
# Store input data and parse
inputData = kmerFile.read()
kmerFile.close()
# Parse palindrome setting
f = StringIO(inputData)
palSetting = f.readline().strip()
if not palSetting.startswith('#Palindrome') and not palSetting.startswith('#Non'):
raise ValueError('Invalid input, file needs to be the output from a KOMPAS alignment tool')
if palSetting.startswith('#Palindrome'):
isPalindrome = True
else:
isPalindrome = False
f.close()
# Read in kmer data
def convList(string):
"""
Input: String of a list of integers
Output: List of integers
"""
toList = string[1:-1].split(', ')
toInts = [int(i) for i in toList]
return(toInts)
kmer = pd.read_csv(StringIO(inputData), sep = '\t',converters={'kposition': lambda x: convList(x)}, skiprows = 7)
k = len(kmer['kmer'][0])
# Read in model length
PWM = pd.read_csv(StringIO(inputData), delim_whitespace=True, skiprows = 2, nrows = 4, header = None)
mStart = k
mLength = (len(PWM.columns) -1)
mEnd = mStart + mLength
# Check if given positions are valid
if userCore[0] > mLength or userCore[1] > mLength:
raise ValueError('Core position(s) greater than model length')
if userCore[1] <= userCore[0]:
raise ValueError('Core start must be greater than core end')
# Define core in kPosition
core = []
core.append(mStart + (userCore[0] -1))
core.append(mStart + (userCore[1]))
coreLen = core[1] - core[0]
centerPos = mStart
# Find the kPositions required, any would be sufficient to call
if k > coreLen:
searchEnd = core[1]
checkK = 0
ReqKpos = set() #
while checkK != core[0]:
checkK = searchEnd - k
if checkK <= core[0]:
ReqKpos.add(checkK)
searchEnd = searchEnd + 1
# Or find the group of all kPositions that are needed, all or none
else:
searchStart = core[0]
checkK = 0
ReqKpos = set()
while searchStart + k <= core[1]:
ReqKpos.add(searchStart)
searchStart = searchStart + 1
# Determine flanks of ReqKPos for threshold score reporting
ScoredKpos = ReqKpos.copy()
if k >= coreLen:
ScoredKpos.add(min(ReqKpos) - 1)
ScoredKpos.add(max(ReqKpos) + 1)
# Filter dataframe for kmers with ScoredKpos
def filtPos(inputList, ScoredKpos = ScoredKpos):
"""
Input: List of integers (kPosition)
Output: Intersected list of ScoredKPos
"""
result = []
for position in inputList:
if position in ScoredKpos:
result.append(position)
return(result)
thrKmers = kmer.copy(deep = True)
thrKmers = thrKmers.query("classified == 1") # Use classified kmers
thrKmers['kposition'] = thrKmers['kposition'].apply(lambda x: filtPos(x)) # Only ScoredKpos
thrKmers = thrKmers[thrKmers['kposition'].apply(lambda x: len(x) != 0)] # Remove empty lists
thrKmers = thrKmers[thrKmers['Escore'] >= threshold]
kDict = dict(zip(thrKmers['kmer'],zip(thrKmers['kposition'],thrKmers['Escore'])))
# Sequence Manipulation Functions
def revComp(sequence):
"""
Input: String of DNA sequences in any case
Output: Reverse Complement in upper case
"""
# Define dictionary, make input string upper case
rcDict = {'A':'T', 'T':'A', 'C':'G', 'G':'C', 'N':'N'}
seqUp = sequence.upper()
rcSeq = ''
for letter in seqUp:
if letter not in rcDict:
raise ValueError('Error: nucleotide not A, C, G, T, N found in string')
else:
rcSeq = rcSeq + rcDict[letter]
return(rcSeq[::-1])
# Scoring and Calling Functions
def kmerMatch(seq):
"""
Input: Sequence to search for kPos and Escore
Output[0] = Lists of all possible kPosition lists
Output[1] = Lists of all possible Escore lists
"""
kPosLists = []
escoreLists = []
def recursiveKmerMatch(seq, crntKposList,crntEList, seqPos):
"""
Input: DNA sequence, list of previous kPositions, Escores, and
current position in the sequence
Output: Appends kPositions and Escores to initiallized empty list
in kmerMatch
"""
for i in range(len(seq) - k + 1 - seqPos): #range of current fork
iPos = i + seqPos # start from the current fork
window = seq[iPos:iPos+k]
if window in kDict:
if len(kDict[window][0]) == 1: # If only 1 kPos
crntKposList.append(kDict[window][0][0])
crntEList.append(kDict[window][1])
else:
for j in kDict[window][0]:
frkdKposList = crntKposList.copy()
frkdEList = crntEList.copy()
frkdKposList.append(j)
frkdEList.append(kDict[window][1])
recursiveKmerMatch(seq, frkdKposList,frkdEList, iPos+1)
return None
else:
crntKposList.append(0)
crntEList.append(-0.5)
kPosLists.append(crntKposList)
escoreLists.append(crntEList)
recursiveKmerMatch(seq,[],[], 0)
return((kPosLists, escoreLists))
def findConsArrays(matchOutput):
"""
Input: Output from kmerMatch
Output: Sequence positions, kPositions
"""
consArrays = []
for kpos, kscore in zip(matchOutput[0], matchOutput[1]):
kpos = np.array(kpos)
kscore = np.array(kscore)
if k >= coreLen:
position = list(filter(lambda x: len(x) != 1,np.split(np.r_[:len(kpos)], np.where(np.diff(kpos) != 1)[0]+1)))
kpos = list(filter(lambda x: len(x) != 1,np.split(kpos, np.where( | np.diff(kpos) | numpy.diff |
import pytest
import numpy as np
import tensorflow as tf
from tensorflow.errors import FailedPreconditionError
from neupy import init
from neupy.utils import tf_utils, asfloat
from base import BaseTestCase
@pytest.mark.parametrize("in_shape,out_shape", [
((10,), (10,)),
((10, 2), (20,)),
((10, 2, 4), (80,)),
])
def test_flatten(in_shape, out_shape):
X = np.random.random(in_shape)
Y = tf_utils.tensorflow_eval(tf_utils.flatten(X))
assert Y.shape == out_shape
class TFUtilsTestCase(BaseTestCase):
def test_outer(self):
actual = self.eval(tf_utils.outer(np.ones(10), np.ones(10)))
np.testing.assert_array_almost_equal(actual, np.ones((10, 10)))
def test_dot(self):
actual = self.eval(tf_utils.dot(
np.arange(10).astype(np.float32),
2 * np.arange(10).astype(np.float32),
))
self.assertEqual(actual, 570)
def test_repeat(self):
matrix = np.array([
[1, 2],
[3, 4],
])
actual = self.eval(tf_utils.repeat(matrix, (2, 3)))
expected = np.array([
[1, 1, 1, 2, 2, 2],
[1, 1, 1, 2, 2, 2],
[3, 3, 3, 4, 4, 4],
[3, 3, 3, 4, 4, 4],
])
np.testing.assert_array_equal(actual, expected)
def test_make_single_vector(self):
w1 = tf.Variable(np.ones((4, 3)))
b1 = tf.Variable(np.zeros((3,)))
w2 = tf.Variable(np.ones((3, 2)))
actual = self.eval(tf_utils.make_single_vector([w1, b1, w2]))
expected = np.array([1] * 12 + [0] * 3 + [1] * 6)
np.testing.assert_array_equal(actual, expected)
def test_setup_parameter_updates(self):
w1 = tf.Variable(np.ones((4, 3)))
b1 = tf.Variable(np.zeros((3,)))
w2 = tf.Variable(np.ones((3, 2)))
tf_utils.initialize_uninitialized_variables([w1, b1, w2])
updates = 2 * tf_utils.make_single_vector([w1, b1, w2]) + 1
updates = tf_utils.setup_parameter_updates([w1, b1, w2], updates)
sess = tf_utils.tensorflow_session()
for parameter, new_value in updates:
sess.run(parameter.assign(new_value))
np.testing.assert_array_almost_equal(
self.eval(w1),
3 * np.ones((4, 3)),
)
np.testing.assert_array_almost_equal(
self.eval(b1),
np.ones(3),
)
np.testing.assert_array_almost_equal(
self.eval(w2),
3 * np.ones((3, 2)),
)
def test_function_name_scope(self):
@tf_utils.function_name_scope
def new_variable():
return tf.Variable(0, name='myvar')
variable = new_variable()
self.assertEqual(variable.name, 'new_variable/myvar:0')
def test_class_method_name_scope(self):
class MyRelu(object):
@tf_utils.class_method_name_scope
def output(self):
return tf.Variable(0, name='weights')
variable = MyRelu().output()
self.assertEqual(variable.name, 'MyRelu/weights:0')
def test_function_without_updates(self):
x = tf.placeholder(name='x', dtype=tf.float32)
w = tf.Variable(asfloat(np.random.random((4, 3))), name='w')
b = tf.Variable(asfloat(np.random.random((3,))), name='b')
y = tf.matmul(x, w) + b
prediction = tf_utils.function([x], y)
tf_utils.initialize_uninitialized_variables()
actual = prediction(np.random.random((7, 4)))
self.assertEqual(actual.shape, (7, 3))
def test_function_with_updates(self):
x = tf.placeholder(name='x', dtype=tf.float32)
w = tf.Variable(asfloat(np.ones((4, 3))), name='w')
b = tf.Variable(asfloat( | np.ones((3,)) | numpy.ones |
import numpy as np
from .base import EvaluationMethod
import ot
import json
import scipy
def wasserstein(X,Y,metric):
M = ot.dist(X, Y, metric=metric)
M /= np.max(M)
n1,n2 = M.shape
a = np.ones(n1) / n1 # 1d histogram, uniform distribution
b = np.ones(n2) / n2
return ot.emd2(a,b,M)
def frechet(u1,u2, C1, C2):
Lambda, U = np.linalg.eig(C1@C2)
Lambda[Lambda < 0] = 0 # C1 and C2 are positive semi-definit
Lambda = np.sqrt(Lambda)
sqrtm = np.real(U @ np.diag(Lambda) @ np.linalg.inv(U)) # matrix square-root of C1*C2
return np.dot(u1-u2, u1-u2) + np.trace(C1+C2-2*sqrtm)
def rv_coefficient(X,Y):
all_cov = np.cov(X,Y,rowvar=False)
K1 = X.shape[1]
cov_XX = all_cov[:K1, :K1]
cov_YY = all_cov[K1:, K1:]
cov_XY = all_cov[:K1, K1:]
cov_YX = all_cov[K1:, :K1]
COVV = np.trace(cov_XY @ cov_YX)
VAV_X = np.trace(cov_XX @ cov_XX)
VAV_Y = np.trace(cov_YY @ cov_YY)
return COVV / np.sqrt(VAV_X*VAV_Y)
class Project(EvaluationMethod):
def __init__(self, proj_matrix, mean, std, transform=None):
"""
proj_matrix: Projection matrix of shape [H*W*C, K]
mean: mean vector of data
std: standard deviation of data
"""
super(Project, self).__init__()
self.proj_matrix = proj_matrix
self.mean = mean
self.std = std
self.transform = transform
self.projections = []
def evaluate_SR(self, i, LR, SR):
N,H,W,C = SR.shape
normed = (SR - self.mean) / self.std
if self.transform:
projected = self.transform(normed).reshape(N, -1)
else:
projected = normed.reshape(N, -1) @ self.proj_matrix # N x K
self.projections.append(projected)
def finalize(self):
projs = np.concatenate(self.projections, axis=0)
| np.save(self.dir / 'projected.npy', projs, allow_pickle=False) | numpy.save |
import argparse
import numpy as np
import os
import sys
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from saliency.visualizer.smiles_visualizer import SmilesVisualizer
def visualize(dir_path):
parent_dir = os.path.dirname(dir_path)
saliency_vanilla = np.load(os.path.join(dir_path, "saliency_vanilla.npy"))
saliency_smooth = np.load(os.path.join(dir_path, "saliency_smooth.npy"))
saliency_bayes = np.load(os.path.join(dir_path, "saliency_bayes.npy"))
visualizer = SmilesVisualizer()
os.makedirs(os.path.join(parent_dir, "result_vanilla"), exist_ok=True)
os.makedirs(os.path.join(parent_dir, "result_smooth"), exist_ok=True)
os.makedirs(os.path.join(parent_dir, "result_bayes"), exist_ok=True)
test_idx = np.load(os.path.join(dir_path, "test_idx.npy"))
answer = np.load(os.path.join(dir_path, "answer.npy"))
output = np.load(os.path.join(dir_path, "output.npy"))
smiles_all = np.load(os.path.join(parent_dir, "smiles.npy"))
def calc_range(saliency):
vmax = float('-inf')
vmin = float('inf')
for v in saliency:
vmax = max(vmax, | np.max(v) | numpy.max |
# -*- coding: utf-8 -*-
"""
"""
from __future__ import division, print_function, unicode_literals
import scipy.signal
import numpy as np
from phasor.utilities.print import pprint
def cheby_boost_7(
F_center = 1.,
shift = 5.,
):
F_center = float(F_center)
N = 7
z = []
p = []
k = 1
zpk_cheby = scipy.signal.cheby1(N, .2, 1, analog = True, output = 'zpk')
z.extend(zpk_cheby[1])
p.extend(zpk_cheby[0])
k = k / zpk_cheby[2]
z.append(-1 + 1j)
p.append(-.1 + 1j)
p.extend([-.01] * N)
z = F_center/shift * np.asarray(z)
p = F_center/shift * np.asarray(p)
k = F_center/shift * np.asarray(k)
Fx, hd = scipy.signal.freqresp(
(z, p, k,),
F_center
)
k = k / abs(hd)
return z, p, k
def ledge_controller(
F_center = 1.,
shift = 5.,
N = 3,
):
F_center = float(F_center)
z = []
p = []
k = 1
zpk_cheby = scipy.signal.cheby1(N, .3, 1, analog = True, output='zpk')
z.extend(zpk_cheby[1])
p.extend(zpk_cheby[0])
k = k / zpk_cheby[2]
zpk_cheby = scipy.signal.cheby1(N+2, .3, 1.00, analog = True, output='zpk')
z.extend(zpk_cheby[0])
p.extend(zpk_cheby[1])
k = k * zpk_cheby[2]
zpk_cheby = scipy.signal.butter(2, 2, analog = True, output='zpk')
z.extend(zpk_cheby[1])
p.extend(zpk_cheby[0])
k = k / zpk_cheby[2]
zpk_cheby = scipy.signal.cheby1(1, 1, 3, analog = True, output='zpk')
z.extend(zpk_cheby[0])
p.extend(zpk_cheby[1])
k = k * zpk_cheby[2]
z = F_center/shift * np.asarray(z)
p = F_center/shift * | np.asarray(p) | numpy.asarray |
import numpy as np
import mpmath
from abc import ABC, abstractmethod
from math import factorial
from desc.utils import flatten_list
from desc.io import IOAble
from desc.backend import jnp, jit, sign, fori_loop, gammaln
__all__ = [
"PowerSeries",
"FourierSeries",
"DoubleFourierSeries",
"ZernikePolynomial",
"FourierZernikeBasis",
]
class Basis(IOAble, ABC):
"""Basis is an abstract base class for spectral basis sets."""
_io_attrs_ = [
"_L",
"_M",
"_N",
"_NFP",
"_modes",
"_sym",
"_spectral_indexing",
]
def __init__(self):
self._enforce_symmetry()
self._sort_modes()
self._create_idx()
def _set_up(self):
"""Called after loading or changing resolution."""
self._enforce_symmetry()
self._sort_modes()
self._create_idx()
def _enforce_symmetry(self):
"""Enforce stellarator symmetry."""
assert self.sym in [
"sin",
"sine",
"cos",
"cosine",
False,
], f"Unknown symmetry type {self.sym}"
if self.sym in ["cos", "cosine"]: # cos(m*t-n*z) symmetry
non_sym_idx = np.where(sign(self.modes[:, 1]) != sign(self.modes[:, 2]))
self._modes = np.delete(self.modes, non_sym_idx, axis=0)
elif self.sym in ["sin", "sine"]: # sin(m*t-n*z) symmetry
non_sym_idx = np.where(sign(self.modes[:, 1]) == sign(self.modes[:, 2]))
self._modes = np.delete(self.modes, non_sym_idx, axis=0)
def _sort_modes(self):
"""Sorts modes for use with FFT."""
sort_idx = np.lexsort((self.modes[:, 1], self.modes[:, 0], self.modes[:, 2]))
self._modes = self.modes[sort_idx]
def _create_idx(self):
"""Create index for use with self.get_idx()."""
self._idx = {}
for idx, (L, M, N) in enumerate(self.modes):
if L not in self._idx:
self._idx[L] = {}
if M not in self._idx[L]:
self._idx[L][M] = {}
self._idx[L][M][N] = idx
def get_idx(self, L=0, M=0, N=0):
"""Get the index of the ``'modes'`` array corresponding to given mode numbers.
Parameters
----------
L : int
Radial mode number.
M : int
Poloidal mode number.
N : int
Toroidal mode number.
Returns
-------
idx : ndarray of int
Index of given mode numbers.
"""
try:
return self._idx[L][M][N]
except KeyError as e:
raise ValueError(
"mode ({}, {}, {}) is not in basis {}".format(L, M, N, str(self))
) from e
@abstractmethod
def _get_modes(self):
"""ndarray: Mode numbers for the basis."""
@abstractmethod
def evaluate(
self, nodes, derivatives=np.array([0, 0, 0]), modes=None, unique=False
):
"""Evaluate basis functions at specified nodes.
Parameters
----------
nodes : ndarray of float, size(num_nodes,3)
node coordinates, in (rho,theta,zeta)
derivatives : ndarray of int, shape(3,)
order of derivatives to compute in (rho,theta,zeta)
modes : ndarray of in, shape(num_modes,3), optional
basis modes to evaluate (if None, full basis is used)
unique : bool, optional
whether to workload by only calculating for unique values of nodes, modes
can be faster, but doesn't work with jit or autodiff
Returns
-------
y : ndarray, shape(num_nodes,num_modes)
basis functions evaluated at nodes
"""
@abstractmethod
def change_resolution(self):
"""Change resolution of the basis to the given resolutions."""
@property
def L(self):
"""int: Maximum radial resolution."""
return self.__dict__.setdefault("_L", 0)
@property
def M(self):
"""int: Maximum poloidal resolution."""
return self.__dict__.setdefault("_M", 0)
@property
def N(self):
"""int: Maximum toroidal resolution."""
return self.__dict__.setdefault("_N", 0)
@property
def NFP(self):
"""int: Number of field periods."""
return self.__dict__.setdefault("_NFP", 1)
@property
def sym(self):
"""str: {``'cos'``, ``'sin'``, ``False``} Type of symmetry."""
return self.__dict__.setdefault("_sym", False)
@property
def modes(self):
"""ndarray: Mode numbers [l,m,n]."""
return self.__dict__.setdefault("_modes", np.array([]).reshape((0, 3)))
@modes.setter
def modes(self, modes):
self._modes = modes
@property
def num_modes(self):
"""int: Total number of modes in the spectral basis."""
return self.modes.shape[0]
@property
def spectral_indexing(self):
"""str: Type of indexing used for the spectral basis."""
return self.__dict__.setdefault("_spectral_indexing", "linear")
def __repr__(self):
"""String form of the object."""
return (
type(self).__name__
+ " at "
+ str(hex(id(self)))
+ " (L={}, M={}, N={}, NFP={}, sym={}, spectral_indexing={})".format(
self.L, self.M, self.N, self.NFP, self.sym, self.spectral_indexing
)
)
class PowerSeries(Basis):
"""1D basis set for flux surface quantities.
Power series in the radial coordinate.
Parameters
----------
L : int
Maximum radial resolution.
"""
def __init__(self, L):
self._L = L
self._M = 0
self._N = 0
self._NFP = 1
self._sym = False
self._spectral_indexing = "linear"
self._modes = self._get_modes(L=self.L)
super().__init__()
def _get_modes(self, L=0):
"""Get mode numbers for power series.
Parameters
----------
L : int
Maximum radial resolution.
Returns
-------
modes : ndarray of int, shape(num_modes,3)
Array of mode numbers [l,m,n].
Each row is one basis function with modes (l,m,n).
"""
l = np.arange(L + 1).reshape((-1, 1))
z = np.zeros((L + 1, 2))
return np.hstack([l, z])
def evaluate(
self, nodes, derivatives=np.array([0, 0, 0]), modes=None, unique=False
):
"""Evaluate basis functions at specified nodes.
Parameters
----------
nodes : ndarray of float, size(num_nodes,3)
Node coordinates, in (rho,theta,zeta).
derivatives : ndarray of int, shape(num_derivatives,3)
Order of derivatives to compute in (rho,theta,zeta).
modes : ndarray of in, shape(num_modes,3), optional
Basis modes to evaluate (if None, full basis is used)
unique : bool, optional
whether to workload by only calculating for unique values of nodes, modes
can be faster, but doesn't work with jit or autodiff
Returns
-------
y : ndarray, shape(num_nodes,num_modes)
basis functions evaluated at nodes
"""
if modes is None:
modes = self.modes
if not len(modes):
return np.array([]).reshape((len(nodes), 0))
r, t, z = nodes.T
l, m, n = modes.T
if unique:
_, ridx, routidx = np.unique(
r, return_index=True, return_inverse=True, axis=0
)
_, lidx, loutidx = np.unique(
l, return_index=True, return_inverse=True, axis=0
)
r = r[ridx]
l = l[lidx]
radial = powers(r, l, dr=derivatives[0])
if unique:
radial = radial[routidx][:, loutidx]
return radial
def change_resolution(self, L):
"""Change resolution of the basis to the given resolution.
Parameters
----------
L : int
Maximum radial resolution.
"""
if L != self.L:
self._L = L
self._modes = self._get_modes(self.L)
self._set_up()
class FourierSeries(Basis):
"""1D basis set for use with the magnetic axis.
Fourier series in the toroidal coordinate.
Parameters
----------
N : int
Maximum toroidal resolution.
NFP : int
number of field periods
sym : {``'cos'``, ``'sin'``, False}
* ``'cos'`` for cos(m*t-n*z) symmetry
* ``'sin'`` for sin(m*t-n*z) symmetry
* ``False`` for no symmetry (Default)
"""
def __init__(self, N, NFP=1, sym=False):
self._L = 0
self._M = 0
self._N = N
self._NFP = NFP
self._sym = sym
self._spectral_indexing = "linear"
self._modes = self._get_modes(N=self.N)
super().__init__()
def _get_modes(self, N=0):
"""Get mode numbers for Fourier series.
Parameters
----------
N : int
Maximum toroidal resolution.
Returns
-------
modes : ndarray of int, shape(num_modes,3)
Array of mode numbers [l,m,n].
Each row is one basis function with modes (l,m,n).
"""
dim_tor = 2 * N + 1
n = np.arange(dim_tor).reshape((-1, 1)) - N
z = np.zeros((dim_tor, 2))
return np.hstack([z, n])
def evaluate(
self, nodes, derivatives=np.array([0, 0, 0]), modes=None, unique=False
):
"""Evaluate basis functions at specified nodes.
Parameters
----------
nodes : ndarray of float, size(num_nodes,3)
Node coordinates, in (rho,theta,zeta).
derivatives : ndarray of int, shape(num_derivatives,3)
Order of derivatives to compute in (rho,theta,zeta).
modes : ndarray of in, shape(num_modes,3), optional
Basis modes to evaluate (if None, full basis is used).
unique : bool, optional
Whether to workload by only calculating for unique values of nodes, modes
can be faster, but doesn't work with jit or autodiff.
Returns
-------
y : ndarray, shape(num_nodes,num_modes)
Basis functions evaluated at nodes.
"""
if modes is None:
modes = self.modes
if not len(modes):
return np.array([]).reshape((len(nodes), 0))
r, t, z = nodes.T
n = modes[:, 2]
if unique:
_, zidx, zoutidx = np.unique(
z, return_index=True, return_inverse=True, axis=0
)
_, nidx, noutidx = np.unique(
n, return_index=True, return_inverse=True, axis=0
)
z = z[zidx]
n = n[nidx]
toroidal = fourier(z[:, np.newaxis], n, self.NFP, derivatives[2])
if unique:
toroidal = toroidal[zoutidx][:, noutidx]
return toroidal
def change_resolution(self, N):
"""Change resolution of the basis to the given resolutions.
Parameters
----------
N : int
Maximum toroidal resolution.
"""
if N != self.N:
self._N = N
self._modes = self._get_modes(self.N)
self._set_up()
class DoubleFourierSeries(Basis):
"""2D basis set for use on a single flux surface.
Fourier series in both the poloidal and toroidal coordinates.
Parameters
----------
M : int
Maximum poloidal resolution.
N : int
Maximum toroidal resolution.
NFP : int
Number of field periods.
sym : {``'cos'``, ``'sin'``, ``False``}
* ``'cos'`` for cos(m*t-n*z) symmetry
* ``'sin'`` for sin(m*t-n*z) symmetry
* ``False`` for no symmetry (Default)
"""
def __init__(self, M, N, NFP=1, sym=False):
self._L = 0
self._M = M
self._N = N
self._NFP = NFP
self._sym = sym
self._spectral_indexing = "linear"
self._modes = self._get_modes(M=self.M, N=self.N)
super().__init__()
def _get_modes(self, M=0, N=0):
"""Get mode numbers for double Fourier series.
Parameters
----------
M : int
Maximum poloidal resolution.
N : int
Maximum toroidal resolution.
Returns
-------
modes : ndarray of int, shape(num_modes,3)
Array of mode numbers [l,m,n].
Each row is one basis function with modes (l,m,n).
"""
dim_pol = 2 * M + 1
dim_tor = 2 * N + 1
m = np.arange(dim_pol) - M
n = | np.arange(dim_tor) | numpy.arange |
# License: MIT
import abc
import time
import typing
import numpy as np
from typing import List
from openbox.utils.util_funcs import get_types
from openbox.core.base import build_surrogate
from openbox.utils.constants import VERY_SMALL_NUMBER
from openbox.utils.config_space import ConfigurationSpace
from openbox.utils.config_space.util import convert_configurations_to_array
from openbox.utils.normalization import zero_mean_unit_var_normalization, zero_one_normalization
from openbox.utils.logging_utils import get_logger
class BaseTLSurrogate(object):
def __init__(self, config_space: ConfigurationSpace,
source_hpo_data: List,
seed: int,
history_dataset_features: List = None,
num_src_hpo_trial: int = 50,
surrogate_type='rf'):
self.method_id = None
self.config_space = config_space
self.random_seed = seed
self.num_src_hpo_trial = num_src_hpo_trial
self.source_hpo_data = source_hpo_data
self.source_surrogates = None
self.target_surrogate = None
self.history_dataset_features = history_dataset_features
# The number of en problems.
if source_hpo_data is not None:
self.K = len(source_hpo_data)
if history_dataset_features is not None:
assert len(history_dataset_features) == self.K
self.surrogate_type = surrogate_type
self.types, self.bounds = get_types(config_space)
self.instance_features = None
self.var_threshold = VERY_SMALL_NUMBER
self.w = None
self.eta_list = list()
# meta features.
self.meta_feature_scaler = None
self.meta_feature_imputer = None
self.target_weight = list()
self.logger = get_logger(self.__class__.__name__)
@abc.abstractmethod
def train(self, X: np.ndarray, y: np.ndarray):
pass
@abc.abstractmethod
def predict(self, X: np.ndarray):
pass
def build_source_surrogates(self, normalize):
if self.source_hpo_data is None:
self.logger.warning('No history BO data provided, resort to naive BO optimizer without TL.')
return
self.logger.info('Start to train base surrogates.')
start_time = time.time()
self.source_surrogates = list()
for hpo_evaluation_data in self.source_hpo_data:
print('.', end='')
model = build_surrogate(self.surrogate_type, self.config_space,
np.random.RandomState(self.random_seed))
_X, _y = list(), list()
for _config, _config_perf in hpo_evaluation_data.items():
_X.append(_config)
_y.append(_config_perf)
X = convert_configurations_to_array(_X)
y = np.array(_y, dtype=np.float64)
if self.num_src_hpo_trial != -1:
X = X[:self.num_src_hpo_trial]
y = y[:self.num_src_hpo_trial]
if normalize == 'standardize':
if (y == y[0]).all():
y[0] += 1e-4
y, _, _ = zero_mean_unit_var_normalization(y)
elif normalize == 'scale':
if (y == y[0]).all():
y[0] += 1e-4
y, _, _ = zero_one_normalization(y)
y = 2 * y - 1.
else:
raise ValueError('Invalid parameter in norm.')
self.eta_list.append(np.min(y))
model.train(X, y)
self.source_surrogates.append(model)
self.logger.info('Building base surrogates took %.3fs.' % (time.time() - start_time))
def build_single_surrogate(self, X: np.ndarray, y: np.array, normalize):
assert normalize in ['standardize', 'scale', 'none']
model = build_surrogate(self.surrogate_type, self.config_space, np.random.RandomState(self.random_seed))
if normalize == 'standardize':
if (y == y[0]).all():
y[0] += 1e-4
y, _, _ = zero_mean_unit_var_normalization(y)
elif normalize == 'scale':
if (y == y[0]).all():
y[0] += 1e-4
y, _, _ = zero_one_normalization(y)
else:
pass
model.train(X, y)
return model
def predict_marginalized_over_instances(self, X: np.ndarray) -> typing.Tuple[np.ndarray, np.ndarray]:
"""Predict mean and variance marginalized over all instances.
Returns the predictive mean and variance marginalised over all
instances for a set of configurations.
Parameters
----------
X : np.ndarray
[n_samples, n_features (config)]
Returns
-------
means : np.ndarray of shape = [n_samples, 1]
Predictive mean
vars : np.ndarray of shape = [n_samples, 1]
Predictive variance
"""
if len(X.shape) != 2:
raise ValueError('Expected 2d array, got %dd array!' % len(X.shape))
if X.shape[1] != len(self.bounds):
raise ValueError('Rows in X should have %d entries but have %d!' %
(len(self.bounds), X.shape[1]))
if self.instance_features is None or \
len(self.instance_features) == 0:
mean, var = self.predict(X)
assert var is not None # please mypy
var[var < self.var_threshold] = self.var_threshold
var[np.isnan(var)] = self.var_threshold
return mean, var
raise ValueError('Unexpected case happened.')
def combine_predictions(self, X: np.array,
combination_method: str = 'idp_lc',
weight: np.array = None):
n, m = X.shape[0], len(self.w)
mu, var = np.zeros((n, 1)), np.zeros((n, 1))
if weight is None:
w = self.w
else:
w = weight
var_buf = | np.zeros((n, m)) | numpy.zeros |
from __future__ import division, print_function
import numpy as np
from numpy import dot, newaxis
from numpy.linalg import norm, solve
import os
import sys
import lib
from training import print_dict, training_data
def col_square_norm(A):
return np.einsum('ij, ij->j', A, A)
def row_square_norm(A):
return np.einsum('ij, ij->i', A, A)
# Optimize B in-place, using Lagrange dual method of:
# Lee et al., Efficient Sparse Coding Algorithms.
# with c=1.
@lib.timeit
def optimize_dictionary(X_T, S_T, B_T, Lam_0=None):
SST = dot(S_T.T, S_T)
XST = dot(X_T.T, S_T)
XST_T = XST.T.copy()
XTX = dot(X_T, X_T.T)
XSTTXST = dot(XST_T, XST)
def B(Lam_vec):
Lam = np.diag(Lam_vec)
return solve(SST + Lam, XST_T)
def D(Lam_vec):
Lam = np.diag(Lam_vec)
return np.trace(XTX) - np.trace(Lam) \
- np.trace(XST.dot(solve(SST + Lam, XST_T)))
def grad(Lam_vec):
Lam = np.diag(Lam_vec)
return row_square_norm(solve(SST + Lam, XST_T)) - 1
def hessian(Lam, inv_SST_Lam):
return -2 * inv_SST_Lam \
* (inv_SST_Lam.dot(XSTTXST).dot(inv_SST_Lam))
# last_B_T = None
Lam_vec = np.ones(S_T.shape[1]) if Lam_0 is None else Lam_0.copy()
print('current D:', D(Lam_vec))
Lam_vec, _, _ = scipy.optimize.fmin_l_bfgs_b(
func=lambda x: -D(x),
bounds=[(0, np.inf) for l in Lam_vec],
fprime=lambda x: -grad(x),
x0=Lam_vec
)
print('final D:', D(Lam_vec))
B_T[...] = B(Lam_vec)
print(B_T)
return Lam_vec
def solve_cholesky(L, b):
# solve L L* x = b
y = solve_triangular(L, b, lower=True)
return solve_triangular(L.T, y)
@lib.timeit
# @profile
def feature_sign_search_vec(Y_T, X_T, A_T, gamma):
Y = Y_T.T.copy()
A = A_T.T.copy()
X = X_T.T.copy()
ATA = dot(A_T, A)
X_T[abs(X_T) < 1e-7] = 0
active_set = X != 0
theta = np.sign(X)
A_T_Y = dot(A_T, Y)
first_step_2 = True
last_Is = None
# shape same as X
L2_partials = 2 * (dot(ATA, X) - A_T_Y)
L2_partials_abs = np.abs(L2_partials)
while True:
print()
print('==== STEP 2 ====')
L2_partials_abs[np.abs(X) >= 1e-7] = 0 # rule out zero elements of X
Is = L2_partials_abs.argmax(axis=0) # max for each column
activate_rows, = np.nonzero(L2_partials_abs.max(axis=0) > gamma)
index = (Is[activate_rows], activate_rows)
active_set[index] = True
theta[index] = -np.sign(L2_partials[index])
print('mean active:', active_set.sum(axis=0).mean())
print('activating rows:', activate_rows.shape[0])
if activate_rows.shape[0] == 0:
print('WARNING: activating nothing')
assert last_Is is None or \
not np.all(last_Is == Is[activate_rows])
last_Is = Is[activate_rows]
working_rows = np.arange(X.shape[1]) if first_step_2 else activate_rows
first_step_2 = False
while True:
print('---- STEP 3 ----')
print('working rows:', working_rows.shape[0])
Q = A_T_Y[:, working_rows] - gamma / 2 * theta[:, working_rows]
X_working = X[:, working_rows]
X_new = X_working.copy()
Y_working = Y[:, working_rows]
active_set_working = active_set[:, working_rows]
for idx, active in enumerate(active_set_working.T):
active_idxs, = active.nonzero()
q_hat = Q[active_idxs, idx]
ATA_hat = ATA[np.ix_(active_idxs, active_idxs)]
_, x_new_hat, info = scipy.linalg.lapack.dposv(ATA_hat, q_hat)
if info != 0:
x_new_hat = dot(pinv(ATA_hat), q_hat)
if np.abs(dot(ATA_hat, x_new_hat) - q_hat).mean() > 0.1:
# no good. try null-space zero crossing.
active = active_set[:, idx]
x_hat = X[active_idxs, idx]
theta_hat = theta[active_idxs, idx]
u, s, v = np.linalg.svd(ATA_hat)
assert s[s.shape[0] - 1] < 1e-7
z = v[v.shape[0] - 1]
assert np.abs(dot(ATA_hat, z)).sum() < 1e-7
# [x_hat + t_i * z]_i = 0
# want to reduce theta dot (x + tz) => t * theta dot z
# so t should have opposite sign of theta dot z
direction = -np.sign(dot(theta_hat, z))
null_ts = -x_hat / z
null_ts[np.sign(null_ts) != direction] = np.inf
null_ts[np.abs(null_ts) < 1e-7] = np.inf
first_change = np.abs(null_ts).argmin()
x_new_hat = x_hat + null_ts[first_change] * z
X_new[active_idxs, idx] = x_new_hat
# sign_changes = np.logical_xor(x_new_hat > 0, x_hat > 0)
sign_changes = np.logical_and.reduce([
np.logical_xor(X_new > 0, X_working > 0),
np.abs(X_working) >= 1e-7,
# np.abs(X_new) >= 1e-7,
# np.abs((X_new - X_working) / X_working) >= 1e-9,
])
# (1 - t) * x + t * x_new
count_sign_changes = sign_changes.sum(axis=0)
max_sign_changes = count_sign_changes.max()
has_sign_changes, = np.nonzero(count_sign_changes > 0)
print('max sign changes:', max_sign_changes)
print('rows with sign changes:', has_sign_changes.shape[0])
if max_sign_changes > 0:
sign_changes = sign_changes[:, has_sign_changes]
count_sign_changes = count_sign_changes[has_sign_changes]
Y_sign = Y_working[:, has_sign_changes]
X_new_sign = X_new[:, has_sign_changes]
X_sign = X_working[:, has_sign_changes]
compressed_ts = np.zeros((max_sign_changes, has_sign_changes.shape[0]))
compressed_mask = np.tile(np.arange(max_sign_changes),
(compressed_ts.shape[1], 1)).T < count_sign_changes
assert compressed_mask.shape == compressed_ts.shape
assert compressed_mask.sum() == sign_changes.sum()
# ts = -x_hat_sign / (x_new_hat_sign - x_hat_sign)
# NB: only faster to use where= on slow ops like divide.
all_ts = np.divide(-X_sign, X_new_sign - X_sign, where=sign_changes)
# transpose necessary to get order right.
compressed_ts.T[compressed_mask.T] = all_ts.T[sign_changes.T]
ts = compressed_ts[:, newaxis, :] # broadcast over components.
test_X_ts = np.multiply(1 - ts, X_sign, where=compressed_mask[:, newaxis, :]) \
+ np.multiply(ts, X_new_sign, where=compressed_mask[:, newaxis, :])
test_X = np.concatenate([test_X_ts, X_new_sign[newaxis, :, :]], axis=0)
# assert np.sum(test_X[0, X_new_sign != 0] == 0) > 0
A_X_sign = dot(A, X_sign)
A_X_new_sign = dot(A, X_new_sign)
test_A_X_ts = np.multiply(1 - ts, A_X_sign, where=compressed_mask[:, newaxis, :]) \
+ np.multiply(ts, A_X_new_sign, where=compressed_mask[:, newaxis, :])
test_A_X = np.concatenate([test_A_X_ts, A_X_new_sign[newaxis, :, :]], axis=0)
test_mask = np.concatenate([
compressed_mask,
np.full((1, compressed_mask.shape[1]), True),
])
objectives = np.square(Y_sign - test_A_X, where=test_mask[:, newaxis, :]).sum(axis=1) \
+ gamma * np.abs(test_X).sum(axis=1)
objectives[~test_mask] = np.inf
lowest_objective = objectives.argmin(axis=0)
best_X = test_X[lowest_objective, :, np.arange(test_X.shape[2])].T
assert np.all(best_X[:, 0] == test_X[lowest_objective[0], :, 0])
# # coord_mask = sign_changes[:, 0]
# coord_mask = active_set_working[:, has_sign_changes][:, 0]
# shape = X_sign[coord_mask, 0][newaxis, ...].shape
# debug_array = np.concatenate([
# X_sign[coord_mask, 0][newaxis, ...],
# X_new_sign[coord_mask, 0][newaxis, ...],
# np.full(shape, np.nan),
# all_ts[coord_mask, 0][newaxis, ...],
# np.full(shape, np.nan),
# test_X[:, coord_mask, 0],
# ])
# objective_mark = np.zeros(objectives[:, 0].shape)
# objective_mark[lowest_objective[0]] = -1
# objective_mark[~test_mask[:, 0]] = np.nan
# print np.concatenate([
# np.concatenate([[np.nan] * 5, compressed_ts[:, 0], [1]])[:, newaxis],
# np.full((debug_array.shape[0], 1), np.nan),
# debug_array,
# np.full((debug_array.shape[0], 1), np.nan),
# np.concatenate([[np.nan] * 5, objectives[:, 0]])[:, newaxis],
# np.concatenate([[np.nan] * 5, objective_mark])[:, newaxis],
# ], axis=1)
# print best_X[coord_mask, 0]
X_new[:, has_sign_changes] = best_X
# update x, theta, active set.
zero_coeffs_mask = np.abs(X_new) < 1e-7
zero_coeffs = np.nonzero(zero_coeffs_mask)
X_new[zero_coeffs] = 0
X[:, working_rows] = X_new
active_set[:, working_rows] = ~zero_coeffs_mask
theta[:, working_rows] = np.sign(X_new)
# objective = np.square(Y - dot(A, X)).sum() + gamma * np.abs(X).sum()
# print 'CURRENT OBJECTIVE:', objective
L2_partials_working = 2 * (dot(ATA, X_new) - A_T_Y[:, working_rows])
f_partials = L2_partials_working + gamma * theta[:, working_rows]
# only look at max of nonzero coefficients.
f_partials[zero_coeffs] = 0
row_highest_nz_partial = np.abs(f_partials).max(axis=0)
print('highest nonzero partial:', row_highest_nz_partial.max())
if max_sign_changes == 0 or row_highest_nz_partial.max() < 1e-7:
break
working_rows = working_rows[row_highest_nz_partial >= 1e-7]
np.save('fss_inter.npy', X.T)
objective = np.square(Y - dot(A, X)).sum() + gamma * np.abs(X).sum()
print('CURRENT OBJECTIVE:', objective)
assert objective < 1e11
zero_coeffs = np.abs(X) < 1e-7
L2_partials[:, working_rows] = L2_partials_working
L2_partials_abs[:, working_rows] = np.abs(L2_partials_working)
highest_zero_partial = L2_partials_abs[zero_coeffs].max()
print('highest zero partial:', highest_zero_partial)
if highest_zero_partial <= gamma * 1.01:
break
X_T.T = X[:]
@lib.timeit
def feature_sign_search_alternating(X_T, Z_T, D_T, lam):
feature_sign_search_vec(X_T, Z_T, D_T, lam)
# feature_sign_search(X_T, Z_T, D_T, lam)
np.save('fss.npy', Z_T)
print('optimizing dict.')
global Lam_last
Lam_last = optimize_dictionary(X_T, Z_T, D_T, Lam_0=Lam_last)
np.save('dict.npy', D_T)
def blockwise_coord_descent_mapping(X_T, S_T, B_T, lam):
alpha = lam / 2.
K = B_T.shape[0]
A = B_T.dot(B_T.T)
np.fill_diagonal(A, 0)
E = B_T.dot(X_T.T)
S = S_T.T
for k in range(K):
if k % 100 == 0: print(k)
row = E[k] - A[k].dot(S)
S[k] = np.maximum(row, alpha) + np.minimum(row, -alpha)
def blockwise_coord_descent_dict(X_T, S_T, B_T, lam):
K = B_T.shape[0]
G = S_T.T.dot(S_T)
np.fill_diagonal(G, 0)
W = X_T.T.dot(S_T)
for k in range(K):
row = W[:, k] - B_T.T.dot(G[:, k])
B_T[k] = row / norm(row)
@lib.timeit
def blockwise_coord_descent(X_T, S_T, B_T, lam):
blockwise_coord_descent_mapping(X_T, S_T, B_T, lam)
np.save('fss.npy', S_T)
blockwise_coord_descent_dict(X_T, S_T, B_T, lam)
np.save('dict.npy', B_T)
def test_train():
W_l = 5 # window size
W_h = 2 * W_l
font_size = 56
K = 512 # Dictionary size
lam = 0.1 # weight of sparsity
if os.path.isfile('training.npy'):
X_T = np.load('training.npy')
else:
X_T = training_data("/Library/Fonts/Microsoft/Constantia.ttf",
font_size, W_l, W_h)
np.save('training.npy', X_T)
t = X_T.shape[0]
if os.path.isfile('fss.npy'):
Z_T = np.load('fss.npy')
else:
Z_T = np.zeros((t, K), dtype=np.float64)
if os.path.isfile('dict.npy'):
D_T = np.load('dict.npy')
else:
D_T = np.random.normal(size=(K, W_l * W_l + W_h * W_h)).astype(np.float64)
# D_T = X_T[np.random.choice(X_T.shape[0], size=K, replace=False)]
D_T /= norm(D_T, axis=1)[:, newaxis]
np.save('dict.npy', D_T)
print('shapes:', X_T.shape, Z_T.shape, D_T.shape)
global Lam_last
Lam_last = None
last_objective = None
for i in range(100000):
print('\n==== ITERATION', i, '====')
# feature_sign_search_alternating(X_T, Z_T, D_T, lam)
blockwise_coord_descent(X_T, Z_T, D_T, lam)
print_dict('lo_dict.png'.format(i), D_T[:, :W_l * W_l])
print_dict('hi_dict.png'.format(i), D_T[:, W_l * W_l:])
highest = Z_T.argmax()
weight = Z_T.flat[highest]
patch_X, patch_D = np.unravel_index(highest, Z_T.shape)
print('highest weight:', weight)
print(weight * D_T[patch_D, :W_l * W_l].reshape(W_l, W_l))
print(weight * D_T[patch_D, W_l * W_l:].reshape(W_h, W_h))
print(X_T[patch_X, :W_l * W_l].reshape(W_l, W_l))
print(X_T[patch_X, W_l * W_l:].reshape(W_h, W_h))
print(dot(Z_T[patch_X], D_T)[:W_l * W_l])
diff = (X_T - dot(Z_T, D_T)).reshape(-1)
objective = dot(diff, diff).sum() + lam * abs(Z_T).sum()
print('\nTOTAL OBJECTIVE VALUE:', objective)
if last_objective is not None:
relative_err = abs(last_objective - objective) / last_objective
print('relative error:', relative_err)
if relative_err < 1e-4:
break
last_objective = objective
def train(dest, font_path, sizes):
for size in sizes:
W_l = int(size / 3) | 1
W_h = 2 * W_l
K = 64 # Dictionary size
lam = 0.2 # weight of sparsity
dest_dir = os.path.join(dest, str(size))
if not os.path.isdir(dest_dir):
print('making directory', dest_dir)
os.makedirs(dest_dir)
training_file = os.path.join(dest_dir, 'training.npy')
dict_file = os.path.join(dest_dir, 'dict.npy')
mapping_file = os.path.join(dest_dir, 'mapping.npy')
if os.path.isfile(training_file):
X_T = np.load(training_file)
else:
X_T = training_data(font_path, size * 2, W_l, W_h)
np.save(training_file, X_T)
t = X_T.shape[0]
if os.path.isfile(mapping_file):
Z_T = np.load(mapping_file)
else:
Z_T = np.zeros((t, K), dtype=np.float64)
if os.path.isfile(dict_file):
D_T = np.load(dict_file)
else:
D_T = np.random.normal(size=(K, W_l * W_l + W_h * W_h)).astype(np.float64)
D_T /= norm(D_T, axis=1)[:, newaxis]
| np.save(dict_file, D_T) | numpy.save |
"""
Module containing classes for ray tracing through the ice.
Ray tracer classes correspond to ray trace path classes, where the ray
tracer is responsible for calculating the existence and launch angle of
paths between points, and the ray tracer path objects are responsible for
returning information about propagation along their respective path.
"""
import logging
import numpy as np
import scipy.constants
import scipy.fft
import scipy.optimize
from pyrex.internal_functions import normalize, LazyMutableClass, lazy_property
from pyrex.ice_model import AntarcticIce, UniformIce, ice
logger = logging.getLogger(__name__)
class BasicRayTracePath(LazyMutableClass):
"""
Class for representing a single ray-trace solution between points.
Stores parameters of the ray path with calculations performed by
integrating z-steps of size ``dz``. Most properties are lazily evaluated
to save on computation time. If any attributes of the class instance are
changed, the lazily-evaluated properties will be cleared.
Parameters
----------
parent_tracer : BasicRayTracer
Ray tracer for which this path is a solution.
launch_angle : float
Launch angle (radians) of the ray path.
direct : boolean
Whether the ray path is direct. If ``True`` this means the path does
not "turn over". If ``False`` then the path does "turn over" by either
reflection or refraction after reaching some maximum depth.
Attributes
----------
from_point : ndarray
The starting point of the ray path.
to_point : ndarray
The ending point of the ray path.
theta0 : float
The launch angle of the ray path at `from_point`.
ice
The ice model used for the ray tracer.
dz : float
The z-step (m) to be used for integration of the ray path attributes.
direct : boolean
Whether the ray path is direct. If ``True`` this means the path does
not "turn over". If ``False`` then the path does "turn over" by either
reflection or refraction after reaching some maximum depth.
emitted_direction
received_direction
path_length
tof
coordinates
See Also
--------
pyrex.internal_functions.LazyMutableClass : Class with lazy properties
which may depend on other class
attributes.
BasicRayTracer : Class for calculating the ray-trace solutions between
points.
Notes
-----
Even more attributes than those listed are available for the class, but
are mainly for internal use. These attributes can be found by exploring
the source code.
"""
def __init__(self, parent_tracer, launch_angle, direct):
self.from_point = parent_tracer.from_point
self.to_point = parent_tracer.to_point
self.theta0 = launch_angle
self.ice = parent_tracer.ice
self.dz = parent_tracer.dz
self.direct = direct
super().__init__()
@property
def _metadata(self):
"""Metadata dictionary for writing `BasicRayTracePath` information."""
return {
"n0": self.n0,
"dz": self.dz,
"emitted_x": self.emitted_direction[0],
"emitted_y": self.emitted_direction[1],
"emitted_z": self.emitted_direction[2],
"received_x": self.received_direction[0],
"received_y": self.received_direction[1],
"received_z": self.received_direction[2],
"launch_angle": np.arccos(self.emitted_direction[2]),
"receiving_angle": np.pi-np.arccos(self.received_direction[2]),
"path_length": self.path_length,
"tof": self.tof
}
@property
def z_turn_proximity(self):
"""
Parameter for how closely path approaches z_turn.
Necessary to avoid diverging integrals which occur at z_turn.
"""
# Best value of dz/10 determined empirically by checking errors
return self.dz/10
@property
def z0(self):
"""Depth (m) of the launching point."""
return self.from_point[2]
@property
def z1(self):
"""Depth (m) of the receiving point."""
return self.to_point[2]
@lazy_property
def n0(self):
"""Index of refraction of the ice at the launching point."""
return self.ice.index(self.z0)
@lazy_property
def rho(self):
"""Radial distance (m) between the endpoints."""
u = self.to_point - self.from_point
return np.sqrt(u[0]**2 + u[1]**2)
@lazy_property
def phi(self):
"""Azimuthal angle (radians) between the endpoints."""
u = self.to_point - self.from_point
return np.arctan2(u[1], u[0])
@lazy_property
def beta(self):
"""Launching beta parameter (n(z0) * sin(theta0))."""
return self.n0 * np.sin(self.theta0)
@lazy_property
def z_turn(self):
"""Turning depth (m) of the path."""
return self.ice.depth_with_index(self.beta)
# @property
# def exists(self):
# """Boolean of whether the path between the points with the
# given launch angle exists."""
# return True
@lazy_property
def emitted_direction(self):
"""Direction in which ray is emitted."""
return np.array([np.sin(self.theta0) * np.cos(self.phi),
np.sin(self.theta0) * np.sin(self.phi),
np.cos(self.theta0)])
@lazy_property
def received_direction(self):
"""Direction ray is travelling when it is received."""
if self.direct:
sign = np.sign(np.cos(self.theta0))
return np.array([np.sin(self.theta(self.z1)) * np.cos(self.phi),
np.sin(self.theta(self.z1)) * np.sin(self.phi),
sign*np.cos(self.theta(self.z1))])
else:
return np.array([np.sin(self.theta(self.z1)) * np.cos(self.phi),
np.sin(self.theta(self.z1)) * np.sin(self.phi),
-np.cos(self.theta(self.z1))])
def theta(self, z):
"""
Polar angle of the ray at the given depths.
Calculates the polar angle of the ray's direction at the given depth
in the ice. Note that the ray could be travelling upward or downward
at this polar angle.
Parameters
----------
z : array_like
(Negative-valued) depths (m) in the ice.
Returns
-------
array_like
Polar angle at the given values of `z`.
"""
return np.arcsin(np.sin(self.theta0) * self.n0/self.ice.index(z))
# Log-scaled zs (commented out below and in z_integral method) seemed
# like a good idea for reducing dimensionality, but didn't work out.
# Kept here in case it works out better in the future
# @lazy_property
# def dn(self):
# return np.abs(self.ice.gradient(-10)[2])*self.dz
# def _log_scale_zs(self, z0, z1):
# # Base dn on dz at 10 meter depth
# n0 = self.ice.index(z0)
# n1 = self.ice.index(z1)
# n_steps = int(np.abs(n1-n0)/self.dn)
# ns = np.linspace(n0, n1, n_steps+2)
# return self.ice.depth_with_index(ns)
def z_integral(self, integrand):
"""
Calculate the numerical integral of the given integrand.
For the integrand as a function of z, the numerical integral is
calculated along the ray path.
Parameters
----------
integrand : function
Function returning the values of the integrand at a given array of
values for the depth z.
Returns
-------
float
The value of the numerical integral along the ray path.
"""
if self.direct:
n_zs = int(np.abs(self.z1-self.z0)/self.dz)
zs, dz = np.linspace(self.z0, self.z1, n_zs+1, retstep=True)
return np.trapz(integrand(zs), dx=np.abs(dz), axis=0)
# zs = self._log_scale_zs(self.z0, self.z1)
# return np.trapz(integrand(zs), x=zs, axis=0)
else:
n_zs_1 = int(np.abs(self.z_turn-self.z_turn_proximity-self.z0)/self.dz)
zs_1, dz_1 = np.linspace(self.z0, self.z_turn-self.z_turn_proximity,
n_zs_1+1, retstep=True)
n_zs_2 = int(np.abs(self.z_turn-self.z_turn_proximity-self.z1)/self.dz)
zs_2, dz_2 = np.linspace(self.z_turn-self.z_turn_proximity, self.z1,
n_zs_2+1, retstep=True)
return (np.trapz(integrand(zs_1), dx=np.abs(dz_1), axis=0) +
np.trapz(integrand(zs_2), dx=np.abs(dz_2), axis=0))
# zs_1 = self._log_scale_zs(self.z0, self.z_turn-self.z_turn_proximity)
# zs_2 = self._log_scale_zs(self.z1, self.z_turn-self.z_turn_proximity)
# return (np.trapz(integrand(zs_1), x=zs_1, axis=0) +
# np.trapz(integrand(zs_2), x=zs_2, axis=0))
@lazy_property
def path_length(self):
"""Length (m) of the ray path."""
return self.z_integral(lambda z: 1/np.cos(self.theta(z)))
@lazy_property
def tof(self):
"""Time of flight (s) along the ray path."""
return self.z_integral(lambda z: self.ice.index(z) / scipy.constants.c
/ np.cos(self.theta(z)))
@lazy_property
def fresnel(self):
"""
Fresnel factors for reflection off the ice surface.
The fresnel reflectance calculated is the square root (ratio of
amplitudes, not powers) for reflection off ice surface (1 if doesn't
reach surface). Stores the s and p polarized reflectances, respectively.
"""
if self.direct or self.z_turn<self.ice.valid_range[1]:
return 1, 1
else:
n_1 = self.ice.index(self.ice.valid_range[1])
n_2 = self.ice.index_above
theta_1 = self.theta(self.ice.valid_range[1])
cos_1 = np.cos(theta_1)
sin_2 = n_1/n_2*np.sin(theta_1)
if sin_2<=1:
# Plain reflection with real coefficients
cos_2 = np.sqrt(1 - (sin_2)**2)
else:
# Total internal reflection off the surface, results in complex
# fresnel factors encoding the phase data
cos_2 = np.sqrt((sin_2)**2 - 1)*1j
# TODO: Confirm sign convention here
r_s = (n_1*cos_1 - n_2*cos_2) / (n_1*cos_1 + n_2*cos_2)
r_p = (n_2*cos_1 - n_1*cos_2) / (n_2*cos_1 + n_1*cos_2)
return r_s, r_p
def attenuation(self, f):
"""
Calculate the attenuation factor for signal frequencies.
Calculates the attenuation factor to be multiplied by the signal
amplitude at the given frequencies.
Parameters
----------
f : array_like
Frequencies (Hz) at which to calculate signal attenuation.
Returns
-------
array_like
Attenuation factors for the signal at the frequencies `f`.
"""
fa = np.abs(f)
def integrand(z):
partial_integrand = 1 / np.cos(self.theta(z))
alen = self.ice.attenuation_length(z, fa)
return (partial_integrand / alen.T).T
return np.exp(-np.abs(self.z_integral(integrand)))
def propagate(self, signal=None, polarization=None,
attenuation_interpolation=None):
"""
Propagate the signal with optional polarization along the ray path.
Applies the frequency-dependent signal attenuation along the ray path
and shifts the times according to the ray time of flight. Additionally
provides the s and p polarization directions.
Parameters
----------
signal : Signal, optional
``Signal`` object to propagate.
polarization : array_like, optional
Vector representing the linear polarization of the `signal`.
attenuation_interpolation: float, optional
Logarithmic (base 10) interpolation step to be used for
interpolating attenuation along the ray path. If `None`, no
interpolation is applied and the attenuation is pre-calculated at
the expected signal frequencies.
Returns
-------
tuple of Signal
Tuple of ``Signal`` objects representing the s and p polarizations
of the original `signal` attenuated along the ray path. Only
returned if `signal` was not ``None``.
tuple of ndarray
Tuple of polarization vectors representing the s and p polarization
directions of the `signal` at the end of the ray path. Only
returned if `polarization` was not ``None``.
See Also
--------
pyrex.Signal : Base class for time-domain signals.
"""
if polarization is None:
if signal is None:
return
else:
new_signal = signal.copy()
new_signal.shift(self.tof)
# Pre-calculate attenuation at the designated frequencies to
# save on heavy computation time of the attenuation method
freqs = scipy.fft.fftfreq(2*len(signal.times), d=signal.dt)
if attenuation_interpolation is None:
freqs.sort()
else:
logf_min = np.log10(np.min(freqs[freqs>0]))
logf_max = np.log10(np.max(freqs))
n_steps = int((logf_max - logf_min)
/ attenuation_interpolation)
if (logf_max-logf_min)%attenuation_interpolation:
n_steps += 1
logf = np.logspace(logf_min, logf_max, n_steps+1)
freqs = np.concatenate((-np.flipud(logf), [0], logf))
atten_vals = self.attenuation(freqs)
attenuation = lambda f: np.interp(f, freqs, atten_vals)
new_signal.filter_frequencies(attenuation)
return new_signal
else:
# Unit vectors perpendicular and parallel to plane of incidence
# at the launching point
u_s0 = normalize(np.cross(self.emitted_direction, [0, 0, 1]))
u_p0 = normalize(np.cross(u_s0, self.emitted_direction))
# Unit vector parallel to plane of incidence at the receiving point
# (perpendicular vector stays the same)
u_p1 = normalize(np.cross(u_s0, self.received_direction))
if signal is None:
return (u_s0, u_p1)
else:
# Amplitudes of s and p components
pol_s = np.dot(polarization, u_s0)
pol_p = np.dot(polarization, u_p0)
# Fresnel reflectances of s and p components
r_s, r_p = self.fresnel
# Pre-calculate attenuation at the designated frequencies to
# save on heavy computation time of the attenuation method
freqs = scipy.fft.fftfreq(2*len(signal.times), d=signal.dt)
if attenuation_interpolation is None:
freqs.sort()
else:
logf_min = np.log10(np.min(freqs[freqs>0]))
logf_max = np.log10(np.max(freqs))
n_steps = int((logf_max - logf_min)
/ attenuation_interpolation)
if (logf_max-logf_min)%attenuation_interpolation:
n_steps += 1
logf = np.logspace(logf_min, logf_max, n_steps+1)
freqs = np.concatenate((-np.flipud(logf), [0], logf))
atten_vals = self.attenuation(freqs)
# Apply fresnel s and p coefficients in addition to attenuation
attenuation_s = lambda f: np.interp(f, freqs, atten_vals) * r_s
attenuation_p = lambda f: np.interp(f, freqs, atten_vals) * r_p
signal_s = signal * pol_s
signal_p = signal * pol_p
signal_s.shift(self.tof)
signal_p.shift(self.tof)
signal_s.filter_frequencies(attenuation_s, force_real=True)
signal_p.filter_frequencies(attenuation_p, force_real=True)
return (signal_s, signal_p), (u_s0, u_p1)
@lazy_property
def coordinates(self):
"""
x, y, and z-coordinates along the path (using dz step).
Coordinates are provided for plotting purposes only, and are not vetted
for use in calculations.
"""
if self.direct:
n_zs = int(np.abs(self.z1-self.z0)/self.dz)
zs, dz = np.linspace(self.z0, self.z1, n_zs+1, retstep=True)
integrand = np.tan(self.theta(zs))
rs = np.zeros(len(integrand))
trap_areas = (integrand[:-1] + np.diff(integrand)/2) * dz
rs[1:] += np.abs(np.cumsum(trap_areas))
else:
n_zs_1 = int(np.abs(self.z_turn-self.z_turn_proximity-self.z0) /
self.dz)
zs_1, dz_1 = np.linspace(self.z0, self.z_turn-self.z_turn_proximity,
n_zs_1+1, retstep=True)
integrand_1 = np.tan(self.theta(zs_1))
n_zs_2 = int(np.abs(self.z_turn-self.z_turn_proximity-self.z1) /
self.dz)
zs_2, dz_2 = np.linspace(self.z_turn-self.z_turn_proximity, self.z1,
n_zs_2+1, retstep=True)
integrand_2 = np.tan(self.theta(zs_2))
rs_1 = np.zeros(len(integrand_1))
trap_areas = ((integrand_1[:-1] + np.diff(integrand_1)/2) *
np.abs(dz_1))
rs_1[1:] += np.cumsum(trap_areas)
rs_2 = np.zeros(len(integrand_2)) + rs_1[-1]
trap_areas = ((integrand_2[:-1] + np.diff(integrand_2)/2) *
np.abs(dz_2))
rs_2[1:] += np.cumsum(trap_areas)
rs = np.concatenate((rs_1, rs_2[1:]))
zs = np.concatenate((zs_1, zs_2[1:]))
xs = self.from_point[0] + rs*np.cos(self.phi)
ys = self.from_point[1] + rs*np.sin(self.phi)
return xs, ys, zs
class SpecializedRayTracePath(BasicRayTracePath):
"""
Class for representing a single ray-trace solution between points.
Stores parameters of the ray path with calculations performed analytically
(with the exception of attenuation). These calculations require the index
of refraction of the ice to be of the form n(z)=n0-k*exp(a*z). However this
restriction allows for most of the integrations to be performed
analytically. The attenuation is the only attribute which is still
calculated by numerical integration with z-steps of size ``dz``. Most
properties are lazily evaluated to save on computation time. If any
attributes of the class instance are changed, the lazily-evaluated
properties will be cleared.
Parameters
----------
parent_tracer : SpecializedRayTracer
Ray tracer for which this path is a solution.
launch_angle : float
Launch angle (radians) of the ray path.
direct : boolean
Whether the ray path is direct. If ``True`` this means the path does
not "turn over". If ``False`` then the path does "turn over" by either
reflection or refraction after reaching some maximum depth.
Attributes
----------
from_point : ndarray
The starting point of the ray path.
to_point : ndarray
The ending point of the ray path.
theta0 : float
The launch angle of the ray path at `from_point`.
ice
The ice model used for the ray tracer.
dz : float
The z-step (m) to be used for integration of the ray path attributes.
direct : boolean
Whether the ray path is direct. If ``True`` this means the path does
not "turn over". If ``False`` then the path does "turn over" by either
reflection or refraction after reaching some maximum depth.
uniformity_factor : float
Factor (<1) of the base index of refraction (n0 in the ice model)
beyond which calculations start to break down numerically.
beta_tolerance : float
``beta`` value (near 0) below which calculations start to break down
numerically.
emitted_direction
received_direction
path_length
tof
coordinates
See Also
--------
pyrex.internal_functions.LazyMutableClass : Class with lazy properties
which may depend on other class
attributes.
SpecializedRayTracer : Class for calculating the ray-trace solutions
between points.
Notes
-----
Even more attributes than those listed are available for the class, but
are mainly for internal use. These attributes can be found by exploring
the source code.
The requirement that the ice model go as n(z)=n0-k*exp(a*z) is implemented
by requiring the ice model to inherit from `AntarcticIce`. Obviously this
is not fool-proof, but likely the ray tracing will obviously fail if the
index follows a very different functional form.
"""
# Factor of index of refraction at which calculations may break down
uniformity_factor = 0.99999
# Beta value below which calculations may break down
beta_tolerance = 0.005
@lazy_property
def valid_ice_model(self):
"""Whether the ice model being used supports this specialization."""
return ((isinstance(self.ice, type) and
issubclass(self.ice, AntarcticIce))
or isinstance(self.ice, AntarcticIce))
@lazy_property
def z_uniform(self):
"""
Depth (m) beyond which the ice should be treated as uniform.
Calculated based on the ``uniformity_factor``. Necessary due to
numerical rounding issues at indices close to the index limit.
"""
return self.ice.depth_with_index(self.ice.n0 * self.uniformity_factor)
@staticmethod
def _z_int_uniform_correction(z0, z1, z_uniform, beta, ice, integrand,
integrand_kwargs={}, numerical=False, dz=None,
derivative_special_case=False):
"""
Function to perform a z-integration with a uniform ice correction.
Can be an analytic or numerical integration. Takes into account the
effect of treating the ice as uniform beyond some depth.
Parameters
----------
z0 : float
(Negative-valued) depth (m) of the left limit of the integral.
z1 : float
(Negative-valued) depth (m) of the right limit of the integral.
z_uniform : float
(Negative-valued) depth (m) below which the ice is assumed to have
a uniform index.
beta : float
``beta`` value of the ray path.
ice
Ice model to be used for ray tracing.
integrand : function
Function returning the values of the integrand at a given array of
values for the depth z.
integrand_kwargs : dict, optional
A dictionary of keyword arguments to be passed into the `integrand`
function.
numerical : boolean, optional
Whether to use the numerical integral instead of an analytic one.
If ``False`` the analytic integral is calculated. If ``True`` the
numerical integral is calculated.
dz : float, optional
The z-step to use for numerical integration. Only needed when
`numerical` is ``True``.
derivative_special_case : boolean, optional
Boolean controlling whether the special case of doing the distance
integral beta derivative should be used.
Returns
-------
Integral of the given `integrand` along the path from `z0` to `z1`.
"""
# Suppress numpy RuntimeWarnings
with np.errstate(divide='ignore', invalid='ignore'):
if numerical:
if dz is None:
raise ValueError("Argument dz must be specified for "+
"numerical integrals")
if (z0<z_uniform)==(z1<z_uniform):
# z0 and z1 on same side of z_uniform
n_zs = int(np.abs(z1-z0)/dz)
if n_zs<10:
n_zs = 10
zs = np.linspace(z0, z1, n_zs+1)
return integrand(zs, beta=beta, ice=ice, deep=z0<z_uniform,
**integrand_kwargs)
else:
n_zs_1 = int(np.abs(z_uniform-z0)/dz)
if n_zs_1<10:
n_zs_1 = 10
zs_1 = np.linspace(z0, z_uniform, n_zs_1+1)
n_zs_2 = int(np.abs(z1-z_uniform)/dz)
if n_zs_2<10:
n_zs_2 = 10
zs_2 = np.linspace(z_uniform, z1, n_zs_2+1)
return (integrand(zs_1, beta=beta, ice=ice,
deep=z0<z_uniform,
**integrand_kwargs) +
integrand(zs_2, beta=beta, ice=ice,
deep=z1<z_uniform,
**integrand_kwargs))
# Analytic integrals
int_z0 = integrand(z0, beta, ice, deep=z0<z_uniform,
**integrand_kwargs)
int_z1 = integrand(z1, beta, ice, deep=z1<z_uniform,
**integrand_kwargs)
if not derivative_special_case:
if (z0<z_uniform)==(z1<z_uniform):
# z0 and z1 on same side of z_uniform
return int_z1 - int_z0
else:
int_diff = (
integrand(z_uniform, beta, ice, deep=True,
**integrand_kwargs) -
integrand(z_uniform, beta, ice, deep=False,
**integrand_kwargs)
)
if z0<z1:
# z0 below z_uniform, z1 above z_uniform
return int_z1 - int_z0 + int_diff
else:
# z0 above z_uniform, z1 below z_uniform
return int_z1 - int_z0 - int_diff
else:
# Deal with special case of doing distance integral beta derivative
# which includes two bounds instead of just giving indef. integral
# FIXME: Somewhat inaccurate, should probably be done differently
z_turn = np.log((ice.n0-beta)/ice.k)/ice.a
if (z0<z_uniform)==(z1<z_uniform)==(z_turn<z_uniform):
# All on same side of z_uniform
return int_z0 + int_z1
else:
int_diff = (
integrand(z_uniform, beta, ice, deep=True,
**integrand_kwargs) -
integrand(z_uniform, beta, ice, deep=False,
**integrand_kwargs)
)
if (z0<z_uniform)==(z1<z_uniform):
# z0 and z1 below z_uniform, but z_turn above
return int_z0 + int_z1 - 2*int_diff
else:
# z0 or z1 below z_uniform, others above
return int_z0 + int_z1 - int_diff
def z_integral(self, integrand, integrand_kwargs={}, numerical=False):
"""
Calculate the integral of the given integrand.
For the integrand as a function of z, the analytic or numerical
integral is calculated along the ray path.
Parameters
----------
integrand : function
Function returning the values of the integrand at a given array of
values for the depth z.
integrand_kwargs : dict, optional
A dictionary of keyword arguments to be passed into the `integrand`
function.
numerical : boolean, optional
Whether to use the numerical integral instead of an analytic one.
If ``False`` the analytic integral is calculated. If ``True`` the
numerical integral is calculated.
Returns
-------
float
The value of the integral along the ray path.
Raises
------
TypeError
If the ice model is not valid for the specialized analytic
integrations.
"""
if not self.valid_ice_model:
raise TypeError("Ice model must inherit methods from "+
"pyrex.AntarcticIce")
if self.direct:
return self._z_int_uniform_correction(self.z0, self.z1,
self.z_uniform,
self.beta, self.ice,
integrand, integrand_kwargs,
numerical, self.dz)
else:
int_1 = self._z_int_uniform_correction(self.z0, self.z_turn,
self.z_uniform,
self.beta, self.ice,
integrand, integrand_kwargs,
numerical, self.dz)
int_2 = self._z_int_uniform_correction(self.z1, self.z_turn,
self.z_uniform,
self.beta, self.ice,
integrand, integrand_kwargs,
numerical, self.dz)
return int_1 + int_2
@staticmethod
def _int_terms(z, beta, ice):
"""
Useful pre-calculated substitutions for integrations.
Parameters
----------
z : array_like
(Negative-valued) depth (m) in the ice.
beta : float
``beta`` value of the ray path.
ice
Ice model to be used for ray tracing.
Returns
-------
alpha : float
``n0``^2 - `beta`^2
n_z : float
Index at depth `z`.
gamma : float
`n_z`^2 - `beta`^2
log_term_1 : float
``n0``*`n_z` - `beta`^2 - sqrt(`alpha`*`gamma`)
log_term_2 : float
`n_z` + sqrt(`gamma`)
"""
alpha = ice.n0**2 - beta**2
n_z = ice.n0 - ice.k*np.exp(ice.a*z)
gamma = n_z**2 - beta**2
# Prevent errors when gamma is a very small negative number due to
# numerical rounding errors. This could cause other problems for cases
# where a not-tiny negative gamma would have meant nans but now leads to
# non-nan values. It appears this only occurs when the launch angle
# is greater than the maximum value allowed in the ray tracer however,
# so it's likely alright. If problems arise, replace with gamma<0 and
# np.isclose(gamma, 0) or similar
gamma = np.where(gamma<0, 0, gamma)
log_term_1 = ice.n0*n_z - beta**2 - np.sqrt(alpha*gamma)
log_term_2 = -n_z - np.sqrt(gamma)
return alpha, n_z, gamma, log_term_1, -log_term_2
@classmethod
def _distance_integral(cls, z, beta, ice, deep=False):
"""
Indefinite z-integral for calculating radial distance.
Calculates the indefinite z-integral of tan(arcsin(beta/n(z))), which
between two z values gives the radial distance of the direct path
between the z values.
Parameters
----------
z : array_like
(Negative-valued) depth (m) in the ice.
beta : float
``beta`` value of the ray path.
ice
Ice model to be used for ray tracing.
deep : boolean, optional
Whether or not the integral is calculated in deep (uniform) ice.
Returns
-------
array_like
The value of the indefinite integral at `z`.
"""
alpha, n_z, gamma, log_1, log_2 = cls._int_terms(z, beta, ice)
if deep:
return beta * z / np.sqrt(alpha)
else:
return np.where(np.isclose(beta, 0, atol=cls.beta_tolerance),
0,
beta / np.sqrt(alpha) * (-z + np.log(log_1)/ice.a))
@classmethod
def _distance_integral_derivative(cls, z, beta, ice, deep=False):
"""
Beta derivative of indefinite z-integral for radial distance.
Calculates the beta derivative of the indefinite z-integral of
tan(arcsin(beta/n(z))), which is used for finding the maximum distance
integral value as a function of launch angle. This function actually
gives the integral from z to the turning point ``z_turn``, since that
is what's needed for finding the peak angle.
Parameters
----------
z : array_like
(Negative-valued) depth (m) in the ice.
beta : float
``beta`` value of the ray path.
ice
Ice model to be used for ray tracing.
deep : boolean, optional
Whether or not the integral is calculated in deep (uniform) ice.
Returns
-------
array_like
The value of the indefinite integral derivative at `z`.
"""
alpha, n_z, gamma, log_1, log_2 = cls._int_terms(z, beta, ice)
z_turn = np.log((ice.n0-beta)/ice.k)/ice.a
if deep:
if z_turn<ice.valid_range[1]:
return ((np.log((ice.n0-beta)/ice.k)/ice.a - z -
beta/(ice.a*(ice.n0-beta))) / np.sqrt(alpha))
else:
return -z / np.sqrt(alpha)
else:
if z_turn<ice.valid_range[1]:
term_1 = ((1+beta**2/alpha)/np.sqrt(alpha) *
(z + np.log(beta*ice.k/log_1) / ice.a))
term_2 = -(beta**2+ice.n0*n_z) / (ice.a*alpha*np.sqrt(gamma))
else:
term_1 = -(1+beta**2/alpha)/np.sqrt(alpha)*(-z + np.log(log_1) /
ice.a)
term_2 = -((beta*(np.sqrt(alpha)-np.sqrt(gamma)))**2 /
(ice.a*alpha*np.sqrt(gamma)*log_1))
alpha, n_z, gamma, log_1, log_2 = cls._int_terms(ice.valid_range[1], beta, ice)
term_1 += (1+beta**2/alpha)/np.sqrt(alpha)*(np.log(log_1) /
ice.a)
term_2 += ((beta*(np.sqrt(alpha)-np.sqrt(gamma)))**2 /
(ice.a*alpha*np.sqrt(gamma)*log_1))
return np.where(np.isclose(beta, 0, atol=cls.beta_tolerance),
np.inf,
term_1+term_2)
# If the value of the integral just at z is needed (e.g. you want the
# correct values when reflecting off the surface of the ice),
# then use the terms below instead
# Be warned, however, that this gives the wrong value when turning over
# below the surface of the ice. The values get closer if only term_1
# is returned in cases where gamma==0 (turning over in ice),
# though the values are still slightly off
# if deep:
# return z / np.sqrt(alpha)
# term_1 = (1+beta**2/alpha)/np.sqrt(alpha)*(-z + np.log(log_1) / ice.a)
# term_2 = ((beta*(np.sqrt(alpha)-np.sqrt(gamma)))**2 /
# (ice.a*alpha*np.sqrt(gamma)*log_1))
# return np.where(gamma==0, term_1, term_1+term_2)
@classmethod
def _pathlen_integral(cls, z, beta, ice, deep=False):
"""
Indefinite z-integral for calculating path length.
Calculates the indefinite z-integral of sec(arcsin(beta/n(z))), which
between two z values gives the path length of the direct path between
the z values.
Parameters
----------
z : array_like
(Negative-valued) depth (m) in the ice.
beta : float
``beta`` value of the ray path.
ice
Ice model to be used for ray tracing.
deep : boolean, optional
Whether or not the integral is calculated in deep (uniform) ice.
Returns
-------
array_like
The value of the indefinite integral at `z`.
"""
alpha, n_z, gamma, log_1, log_2 = cls._int_terms(z, beta, ice)
if deep:
return ice.n0 * z / np.sqrt(alpha)
else:
return np.where(np.isclose(beta, 0, atol=cls.beta_tolerance),
z,
(ice.n0/np.sqrt(alpha) * (-z + np.log(log_1)/ice.a)
+ np.log(log_2) / ice.a))
@classmethod
def _tof_integral(cls, z, beta, ice, deep=False):
"""
Indefinite z-integral for calculating time of flight.
Calculates the indefinite z-integral of n(z)/c*sec(arcsin(beta/n(z))),
which between two z values gives the time of flight of the direct path
between the z values.
Parameters
----------
z : array_like
(Negative-valued) depth (m) in the ice.
beta : float
``beta`` value of the ray path.
ice
Ice model to be used for ray tracing.
deep : boolean, optional
Whether or not the integral is calculated in deep (uniform) ice.
Returns
-------
array_like
The value of the indefinite integral at `z`.
"""
alpha, n_z, gamma, log_1, log_2 = cls._int_terms(z, beta, ice)
if deep:
return (ice.n0*(n_z+ice.n0*(ice.a*z-1))
/ (ice.a*np.sqrt(alpha)*scipy.constants.c))
else:
return np.where(np.isclose(beta, 0, atol=cls.beta_tolerance),
((n_z-ice.n0)/ice.a + ice.n0*z) / scipy.constants.c,
(((np.sqrt(gamma) + ice.n0*np.log(log_2) +
ice.n0**2*np.log(log_1)/np.sqrt(alpha))/ice.a) -
z*ice.n0**2/np.sqrt(alpha)) / scipy.constants.c)
@classmethod
def _attenuation_integral_def(cls, zs, f, beta, ice, deep=False):
"""
Definite z-integral for calculating attenuation.
Calculates the definite z-integral of sec(arcsin(beta/n(z)))/A(z,f),
which between two z values gives the path length over attenuation length
of the direct path between the z values.
Parameters
----------
zs : array_like
(Negative-valued) depths (m) in the ice.
f : array_like
Frequencies (Hz) at which to calculate signal attenuation.
beta : float
``beta`` value of the ray path.
ice
Ice model to be used for ray tracing.
deep : boolean, optional
Whether or not the integral is calculated in deep (uniform) ice.
Returns
-------
array_like
The value of the definite integral along `zs`.
"""
fa = np.abs(f)
if deep or np.isclose(beta, 0, atol=cls.beta_tolerance):
int_var = zs
partial_integrand = 1 / np.cos(np.arcsin(beta/ice.index(zs)))
else:
# When approaching z_turn, the usual integrand approaches infinity.
# In that case make the change of variables below to fix it.
# The assumption now is that z_turn is always above z_uniform,
# which is valid for most realistic detector configurations.
int_var = np.sqrt(1 - (beta/ice.index(zs))**2)
partial_integrand = (ice.index(zs)**3 / beta**2 /
(-ice.k*ice.a*np.exp(ice.a*zs)))
alen = ice.attenuation_length(zs, fa)
integrand = (partial_integrand / alen.T).T
return np.trapz(integrand, x=int_var, axis=0)
@lazy_property
def path_length(self):
"""Length (m) of the ray path."""
return np.abs(self.z_integral(self._pathlen_integral))
@lazy_property
def tof(self):
"""Time of flight (s) along the ray path."""
return np.abs(self.z_integral(self._tof_integral))
def attenuation(self, f):
"""
Calculate the attenuation factor for signal frequencies.
Calculates the attenuation factor to be multiplied by the signal
amplitude at the given frequencies. Uses numerical integration since
frequency dependence causes there to be no analytic form.
Parameters
----------
f : array_like
Frequencies (Hz) at which to calculate signal attenuation.
Returns
-------
array_like
Attenuation factors for the signal at the frequencies `f`.
"""
return np.exp(-np.abs(self.z_integral(
self._attenuation_integral_def,
integrand_kwargs={'f': f},
numerical=True
)))
@lazy_property
def coordinates(self):
"""
x, y, and z-coordinates along the path (using dz step).
Coordinates are provided for plotting purposes only, and are not vetted
for use in calculations.
"""
def r_int(z0, z1s):
return np.array([self._z_int_uniform_correction(
z0, z, self.z_uniform, self.beta, self.ice,
self._distance_integral
)
for z in z1s])
if self.direct:
n_zs = int(np.abs(self.z1-self.z0)/self.dz)
zs = np.linspace(self.z0, self.z1, n_zs+1)
rs = r_int(self.z0, zs)
rs *= np.sign(np.cos(self.theta0))
else:
n_zs_1 = int(np.abs(self.z_turn-self.z0)/self.dz)
zs_1 = np.linspace(self.z0, self.z_turn, n_zs_1, endpoint=False)
rs_1 = r_int(self.z0, zs_1)
r_turn = r_int(self.z0, np.array([self.z_turn]))[0]
n_zs_2 = int(np.abs(self.z_turn-self.z1)/self.dz)
zs_2 = np.linspace(self.z_turn, self.z1, n_zs_2+1)
rs_2 = r_turn - r_int(self.z_turn, zs_2)
rs = np.concatenate((rs_1, rs_2))
zs = np.concatenate((zs_1, zs_2))
xs = self.from_point[0] + rs*np.cos(self.phi)
ys = self.from_point[1] + rs*np.sin(self.phi)
return xs, ys, zs
class BasicRayTracer(LazyMutableClass):
"""
Class for calculating the ray-trace solutions between points.
Calculations performed by integrating z-steps of size ``dz``. Most
properties are lazily evaluated to save on computation time. If any
attributes of the class instance are changed, the lazily-evaluated
properties will be cleared.
Parameters
----------
from_point : array_like
Vector starting point of the ray path.
to_point : array_like
Vector ending point of the ray path.
ice_model : optional
The ice model used for the ray tracer.
dz : float, optional
The z-step (m) to be used for integration of the ray path attributes.
Attributes
----------
from_point : ndarray
The starting point of the ray path.
to_point : ndarray
The ending point of the ray path.
ice
The ice model used for the ray tracer.
dz : float
The z-step (m) to be used for integration of the ray path attributes.
solution_class
Class to be used for each ray-trace solution path.
exists
expected_solutions
solutions
See Also
--------
pyrex.internal_functions.LazyMutableClass : Class with lazy properties
which may depend on other class
attributes.
BasicRayTracePath : Class for representing a single ray-trace solution
between points.
Notes
-----
Even more attributes than those listed are available for the class, but
are mainly for internal use. These attributes can be found by exploring
the source code.
"""
solution_class = BasicRayTracePath
def __init__(self, from_point, to_point, ice_model=ice, dz=1):
self.from_point = np.array(from_point)
self.to_point = np.array(to_point)
self.ice = ice_model
self.dz = dz
super().__init__()
@property
def z_turn_proximity(self):
"""
Parameter for how closely path approaches z_turn.
Necessary to avoid diverging integrals which occur at z_turn.
"""
# Best value of dz/10 determined empirically by checking errors
return self.dz/10
# Calculations performed as if launching from low to high
@property
def z0(self):
"""
Depth (m) of the lower endpoint.
Ray tracing performed as if launching from lower point to higher point,
since the only difference in the paths produced is a time reversal.
This is the depth of the assumed launching point.
"""
return min([self.from_point[2], self.to_point[2]])
@property
def z1(self):
"""
Depth (m) of the higher endpoint.
Ray tracing performed as if launching from lower point to higher point,
since the only difference in the paths produced is a time reversal.
This is the depth of the assumed receiving point.
"""
return max([self.from_point[2], self.to_point[2]])
@lazy_property
def n0(self):
"""Index of refraction of the ice at the lower endpoint."""
return self.ice.index(self.z0)
@lazy_property
def rho(self):
"""Radial distance between the endpoints."""
u = self.to_point - self.from_point
return np.sqrt(u[0]**2 + u[1]**2)
@lazy_property
def max_angle(self):
"""Maximum possible launch angle that could connect the endpoints."""
return np.arcsin(self.ice.index(self.z1)/self.n0)
@lazy_property
def peak_angle(self):
"""
Angle at which the indirect solutions curve (in r vs angle) peaks.
This angle separates the angle intervals to be used for indirect
solution root-finding.
"""
for tolerance in np.logspace(-12, -4, num=3):
for angle_step in np.logspace(-3, 0, num=4):
r_func = (lambda angle, brent_arg:
self._indirect_r_prime(angle, brent_arg,
d_angle=angle_step))
try:
peak_angle = self.angle_search(0, r_func,
angle_step, self.max_angle,
tolerance=tolerance)
except (RuntimeError, ValueError):
# Failed to converge
continue
else:
if peak_angle>np.pi/2:
peak_angle = np.pi - peak_angle
return peak_angle
# If all else fails, just use the max_angle
return self.max_angle
@lazy_property
def direct_r_max(self):
"""Maximum r value of direct ray solutions."""
z_turn = self.ice.depth_with_index(self.n0 * np.sin(self.max_angle))
return self._direct_r(self.max_angle,
force_z1=z_turn-self.z_turn_proximity)
@lazy_property
def indirect_r_max(self):
"""Maximum r value of indirect ray solutions."""
return self._indirect_r(self.peak_angle)
@lazy_property
def exists(self):
"""Boolean of whether any paths exist between the endpoints."""
return True in self.expected_solutions
@lazy_property
def expected_solutions(self):
"""
List of which types of solutions are expected to exist.
The first element of the list represents the direct path, the second
element represents the indirect path with a launch angle greater than
the peak angle, and the third element represents the indirect path with
a launch angle less than the peak angle.
"""
if not(self.ice.contains(self.from_point) and
self.ice.contains(self.to_point)):
return [False, False, False]
if self.rho<self.direct_r_max:
return [True, False, True]
elif self.rho<self.indirect_r_max:
return [False, True, True]
else:
return [False, False, False]
@lazy_property
def solutions(self):
"""
List of existing rays between the two points.
This list should have zero elements if there are no possible paths
between the endpoints or two elements otherwise, representing the
more direct and the less direct paths, respectively.
"""
angles = [
self.direct_angle,
self.indirect_angle_1,
self.indirect_angle_2
]
return [self.solution_class(self, angle, direct=(i==0))
for i, angle, exists in zip(range(3), angles,
self.expected_solutions)
if exists and angle is not None]
def _direct_r(self, angle, brent_arg=0, force_z1=None):
"""
Calculate the r distance of the direct ray for a given launch angle.
Parameters
----------
angle : float
Launch angle (radians) of a direct ray.
brent_arg : float, optional
Argument to subtract from the return value. Used for the brentq
root finder to find a value other than zero.
force_z1 : float or None, optional
Value to use for the ``z1`` receiving depth. If ``None``, the
``z1`` property of the class will be used. Useful for changing the
integration limits to integrate to the turning point instead.
Returns
-------
float
Value of the radial distance integral minus the `brent_arg`.
"""
if force_z1 is not None:
z1 = force_z1
else:
z1 = self.z1
n_zs = int(np.abs((z1-self.z0)/self.dz))
zs, dz = np.linspace(self.z0, z1, n_zs+1, retstep=True)
integrand = np.tan(np.arcsin(np.sin(angle) *
self.n0/self.ice.index(zs)))
return np.trapz(integrand, dx=dz) - brent_arg
def _indirect_r(self, angle, brent_arg=0):
"""
Calculate the r distance of the indirect ray for a given launch angle.
Parameters
----------
angle : float
Launch angle (radians) of an indirect ray.
brent_arg : float, optional
Argument to subtract from the return value. Used for the brentq
root finder to find a value other than zero.
Returns
-------
float
Value of the radial distance integral minus the `brent_arg`.
"""
z_turn = self.ice.depth_with_index(self.n0 * | np.sin(angle) | numpy.sin |
# %%
'''
## Advanced Lane Finding Project
The goals / steps of this project are the following:
* Compute the camera calibration matrix and distortion coefficients given a set of chessboard images.
* Apply a distortion correction to raw images.
* Use color transforms, gradients, etc., to create a thresholded binary image.
* Apply a perspective transform to rectify binary image ("birds-eye view").
* Detect lane pixels and fit to find the lane boundary.
* Determine the curvature of the lane and vehicle position with respect to center.
* Warp the detected lane boundaries back onto the original image.
* Output visual display of the lane boundaries and numerical estimation of lane curvature and vehicle position.
---
## First, I'll compute the camera calibration using chessboard images
'''
import glob
import os
import cv2
import matplotlib
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
matplotlib.style.use('ggplot')
def read_image(file):
return mpimg.imread(file)
def grayscale(img):
return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Get the undistorted grid of points. This is used as reference by the calibrate camera function to
# create a calibration matrix from the detected corners on the chessboard image.
def un_distorted_grid(nx, ny):
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = | np.zeros((nx * ny, 3), np.float32) | numpy.zeros |
"""
This file is uses slightly modified code from pyDRMetrics [1]_, see:
- https://doi.org/10.1016/j.heliyon.2021.e06199 - the article.
- https://data.mendeley.com/datasets/jbjd5fmggh/1 - the supplementary files.
The following changes have been made:
- :mod:`numba` JIT for performance reasons
- use broadcasting instead of a 3rd loop in :func:`_ranking_matrix`
[1] <NAME> (2021),
“Source code, sample data, and case study report for pyDRMetrics”,
Mendeley Data, V1, doi: 10.17632/jbjd5fmggh.1
"""
from ....tools.decorators import metric
from ....tools.normalize import log_cpm_hvg
from anndata import AnnData
from numba import njit
from scipy.sparse import issparse
from sklearn.metrics import pairwise_distances
from typing import Tuple
import numpy as np
__original_author__ = "<NAME>"
__original_author_email__ = "<EMAIL>"
__license__ = "CC BY 4.0"
__license_link__ = (
"https://data.mendeley.com/datasets/"
"jbjd5fmggh/1/files/da1bca42-c4da-4376-9177-bd2d9a308108"
)
_K = 30
@njit(cache=True, fastmath=True)
def _ranking_matrix(D: np.ndarray) -> np.ndarray: # pragma: no cover
assert D.shape[0] == D.shape[1]
R = np.zeros(D.shape)
m = len(R)
ks = np.arange(m)
for i in range(m):
for j in range(m):
R[i, j] = np.sum(
(D[i, :] < D[i, j]) | ((ks < j) & (np.abs(D[i, :] - D[i, j]) <= 1e-12))
)
return R
@njit(cache=True, fastmath=True)
def _coranking_matrix(R1: np.ndarray, R2: np.ndarray) -> np.ndarray: # pragma: no cover
assert R1.shape == R2.shape
Q = np.zeros(R1.shape, dtype=np.int32)
m = len(Q)
for i in range(m):
for j in range(m):
k = int(R1[i, j])
l = int(R2[i, j]) # noqa: E741
Q[k, l] += 1
return Q
@njit(cache=True, fastmath=True)
def _metrics(
Q: np.ndarray,
) -> Tuple[
np.ndarray, np.ndarray, np.ndarray, float, np.ndarray, int, float, float
]: # pragma: no cover
Q = Q[1:, 1:]
m = len(Q)
T = np.zeros(m - 1) # trustworthiness
C = np.zeros(m - 1) # continuity
QNN = | np.zeros(m) | numpy.zeros |
import numpy as np
from collections import OrderedDict
import math
import random
import gym
import torch.nn as nn
import torch
import torch.nn.functional as F
from torch.distributions import Normal
# Expects tuples of (state, next_state, action, reward, done)
class ReplayBuffer(object):
def __init__(self, max_size=1e6):
self.storage = []
self.max_size = max_size
self.ptr = 0
def add(self, data):
if len(self.storage) == self.max_size:
self.storage[int(self.ptr)] = data
self.ptr = (self.ptr + 1) % self.max_size
else:
self.storage.append(data)
def sample(self, batch_size):
ind = np.random.randint(0, len(self.storage), size=batch_size)
x, y, u, r, d = [], [], [], [], []
for i in ind:
X, Y, U, R, D = self.storage[i]
x.append(np.array(X, copy=False))
y.append(np.array(Y, copy=False))
u.append(np.array(U, copy=False))
r.append(np.array(R, copy=False))
d.append(np.array(D, copy=False))
return np.array(x), np.array(y), | np.array(u) | numpy.array |
#!/usr/bin/env python
# coding: utf-8
# # <center>Lab 1</center>
# ## <center> Optical Digit Recognition </center>
# 
# ### Description:
# The scope of this exercise is the implementation of __an optical digit recognition system__. Our dataset comes from __US Postal Service__, written by hand (scanned from postal envelopes), and contains digits from 0 to 9 separated in train and test set.
# ### Data:
# We are given two text files (train.txt and text.txt). Each line corresponds to a sample-digit and each collumn corresponds to a features of the digit. For example, the value (i, j) is the j-th feature of the i-th digit. Every digit is described from 257 values. The first value is the class (if it is 0, 1 etc) and the rest 256 values are the pixels that describe it in grayscale.
# ### Implementation:
# First, we import all the necessary libraries and suppress some unnecessary warnings.
# In[1]:
# various
import numpy as np
from matplotlib import pyplot as plt
import random
import scipy.stats
# sklearn
from sklearn.base import BaseEstimator, ClassifierMixin
from sklearn.model_selection import KFold, learning_curve, ShuffleSplit, cross_val_score, train_test_split
from sklearn.svm import SVC
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.decomposition import PCA
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier, VotingClassifier, BaggingClassifier
# pytorch
from torch.utils.data import Dataset, DataLoader
import torch
from torch import nn
from torch import optim
# In[2]:
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
# #### The first 13 steps were implemented as a part of the PrepareLab located in prepare_lab folder.
# __Step 1:__ Read input data from given text files.
# In[3]:
# Define useful variables
data_path = "./pr_lab1_2016-17_data_0/pr_lab1_2016-17_data"
train_size = 7291
test_size = 2007
n_features = 256
# Initialize X_train, X_test, y_train, y_test
X_train = np.zeros((train_size, n_features), dtype=np.float64)
X_test = np.zeros((test_size, n_features), dtype=np.float64)
y_train = np.zeros(train_size, dtype='int64')
y_test = np.zeros(test_size, dtype='int64')
# Read train data
with open(data_path + "/train.txt") as f:
for i, line in enumerate(f):
# Split i-th line
line = line.split()
# Keep the first collumn as the class of the i-th digit
y_train[i] = int(float(line[0]))
# Keep the rest 256 values as the pixels of the i-th digit.
for j, pixel in enumerate(line[1:]):
X_train[i][j] = pixel
print("Finished reading training data.")
# Read test data
with open(data_path + "/test.txt") as f:
for i, line in enumerate(f):
# Split i-th line
line = line.split()
# Keep the first collumn as the class of the i-th digit
y_test[i] = int(float(line[0]))
# Keep the rest 256 values as the pixels of the i-th digit.
for j, pixel in enumerate(line[1:]):
X_test[i][j] = pixel
print("Finished reading test data.")
# __Step 2:__ Display a certain sample (index 131) as an 16x16 image.
# In[4]:
# Reshape the 256 vector in a 16x16 matrix.
img_131 = np.reshape(X_train[131], (16, 16))
# Turn the axis off and display the image.
plt.axis('off')
plt.imshow(img_131)
# __Step 3:__ Display one random image from each digit.
# In[5]:
# Define a figure with 10 plots.
fig = plt.figure(figsize=(15,6))
columns = 5
rows = 2
for digit in range(10):
# Pick all images of current digit
curr_data = []
for j, y in enumerate(y_train):
if y == digit:
curr_data.append(X_train[j])
# Select randomly an image
sample = random.choice(curr_data)
# Display the randomly selected image in a subplot
fig.add_subplot(rows, columns, digit+1)
plt.axis('off')
plt.imshow(np.reshape(sample, (16, 16)))
plt.show()
# __Step 4:__ Compute the mean value of pixel (10,10) of all 0's in the train set.
# In[6]:
# Get indexes of 0's in the train set
idx_0 = [i for i in range(train_size) if y_train[i] == 0]
# Get pixel (10,10) of all 0's
X_train_0_10 = np.take(X_train[:, 10*16+10], idx_0)
# Compute mean
mean_0_10 = np.mean(X_train_0_10)
print("Mean value of pixel (10, 10) of all 0's in the train set is: " + str(mean_0_10))
# __Step 5:__ Compute variance of (10,10) pixel of all 0's in the train set
# In[7]:
var_0_10 = np.var(X_train_0_10)
print("Variance of pixel (10, 10) of all 0's in the train set is: " + str(var_0_10))
# __Step 6:__ Compute mean value and variance of every pixel of 0's in the train set
# In[8]:
# Get pixels of all 0's
X_train_0 = np.take(X_train, idx_0, axis=0)
# Compute mean value along each pixel
mean_0 = np.mean(X_train_0, axis=0, keepdims=True)
# Compute variance along each pixel
var_0 = np.var(X_train_0, axis=0, keepdims=True)
# Verify their shape
print("Shape of mean values: " + str(mean_0.shape))
print("Shape of variances: " + str(var_0.shape))
# __Step 7:__ Display digit '0' using the mean value of each pixel.
# In[9]:
plt.axis("off")
plt.imshow(np.reshape(mean_0, (16, 16)))
# __Step 8:__ Display '0' using the variance of each pixel.
# In[10]:
plt.axis("off")
plt.imshow(np.reshape(var_0, (16, 16)))
# We observe that the digit in the mean-image contains less noise than in the variance-image. However, in both images the digit can be distinguished.
# __Step 9:__
#
# __(a)__ Compute the mean value and the variance for all digits (0-9).
# In[11]:
mean = np.zeros((10, 256))
var = np.zeros((10, 256))
for digit in range(10):
idx_i = [i for i in range(train_size) if y_train[i] == digit]
X_train_i = np.take(X_train, idx_i, axis=0)
mean[digit, :] = np.mean(X_train_i, axis=0, keepdims=True)
var[digit, :] = np.var(X_train_i, axis=0, keepdims=True)
# __(b)__ Display all digits using their computed mean value.
# In[12]:
fig = plt.figure(figsize=(15,6))
columns = 5
rows = 2
for digit in range(10):
fig.add_subplot(rows, columns, digit+1)
plt.axis('off')
plt.imshow(np.reshape(mean[digit, :], (16, 16)))
plt.show()
# __Step 10:__ Classify X_test[101], using Euclidean distance.
# In[13]:
# Define a function that classifies a sample based on the
# euclidean distance.
def predict_eucl(x):
pred = 0
dist = np.linalg.norm(x - mean[0, :])
for i in range(1, 10):
if np.linalg.norm(x - mean[i, :]) < dist:
dist = np.linalg.norm(x - mean[i, :])
pred = i
return pred
print("Prediction: " + str(predict_eucl(X_test[101])))
print("Ground truth: " + str(y_test[101]))
# In[14]:
plt.axis('off')
plt.imshow(np.reshape(X_test[101], (16, 16)))
# We observe that the classification is wrong, since X_test[101] is the digit 6.
# __Step 11:__
#
# __(a)__ Classify test set using Euclidean distance
# In[15]:
# Compute predictions for each test sample
y_pred = np.zeros(test_size)
for i, x in enumerate(X_test):
y_pred[i] = predict_eucl(x)
# __(b)__ Compute accuracy
# In[16]:
# Count number of correct predictions and output the total accuracy.
corr = 0
for i in range(len(y_test)):
if y_test[i] == y_pred[i]:
corr += 1
acc = corr / len(y_test) * 100
print("Accuracy of Euclidean classifier in test set: " + str(acc))
# __Step 12:__ Create a scikit-learn euclidean estimator
# In[17]:
class EuclideanClassifier(BaseEstimator, ClassifierMixin):
"""Classify samples based on the distance from the mean feature value"""
def __init__(self):
self.X_mean_ = None
self.classes_ = None
def fit(self, X, y):
"""
This should fit classifier. All the "work" should be done here.
Calculates self.X_mean_ based on the mean
feature values in X for each class.
self.X_mean_ becomes a numpy.ndarray of shape
(n_classes, n_features)
fit always returns self.
"""
# Compute classes
self.classes_ = np.unique(y)
train_size, n_features = X.shape
n_classes = len(self.classes_)
self.X_mean_ = np.zeros((n_classes, n_features))
for k in range(n_classes):
idx_i = [i for i in range(train_size) if y[i] == k]
X_k = np.take(X, idx_i, axis=0)
self.X_mean_[k, :] = np.mean(X_k, axis=0, keepdims=True)
return self
def predict(self, X):
"""
Make predictions for X based on the
euclidean distance from self.X_mean_
"""
closest = np.argmin(euclidean_distances(X, self.X_mean_), axis=1)
return closest
def score(self, X, y):
"""
Return accuracy score on the predictions
for X based on ground truth y
"""
corr = 0
y_pred = self.predict(X)
corr = sum(int(y[i] == y_pred[i]) for i in range(len(y)))
acc = corr / len(y)
return acc
# __Step 13:__
#
# __(a)__ Score above euclidean classifier using 5-fold cross-validation
# In[18]:
# Define a custom scorer
def my_scorer(clf, X, y_true):
return clf.score(X, y_true)
# Create the classifier
clf = EuclideanClassifier()
scores = cross_val_score(clf, X_train, y_train,
cv=KFold(n_splits=5, random_state=42),
scoring=my_scorer)
print("Euclidean Classifier score from 5-fold cross-validation = %f +-%f" % (np.mean(scores), np.std(scores)))
# __(b)__ Plot the decision surface of the euclidean classifier
# In[19]:
# Define a function that plots the decision surface of 2-dimensional data
def plot_clf(clf, X, y, labels):
fig, ax = plt.subplots()
# title for the plots
title = ('Decision surface of Classifier')
# Set-up grid for plotting.
X0, X1 = X[:, 0], X[:, 1]
x_min, x_max = X0.min() - 1, X0.max() + 1
y_min, y_max = X1.min() - 1, X1.max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, .05),
np.arange(y_min, y_max, .05))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
out = ax.contourf(xx, yy, Z, cmap=plt.cm.coolwarm, alpha=0.8)
zero = ax.scatter(
X0[y == 0], X1[y == 0],
c='blue', label=labels[0],
s=60, alpha=0.9, edgecolors='k')
one = ax.scatter(
X0[y == 1], X1[y == 1],
c='red', label=labels[1],
s=60, alpha=0.9, edgecolors='k')
two = ax.scatter(
X0[y == 2], X1[y == 2],
c='purple', label=labels[2],
s=60, alpha=0.9, edgecolors='k')
three = ax.scatter(
X0[y == 3], X1[y == 3],
c='green', label=labels[3],
s=60, alpha=0.9, edgecolors='k')
four = ax.scatter(
X0[y == 4], X1[y == 4],
c='gray', label=labels[4],
s=60, alpha=0.9, edgecolors='k')
five = ax.scatter(
X0[y == 5], X1[y == 5],
c='orange', label=labels[5],
s=60, alpha=0.9, edgecolors='k')
six = ax.scatter(
X0[y == 6], X1[y == 6],
c='black', label=labels[6],
s=60, alpha=0.9, edgecolors='k')
seven = ax.scatter(
X0[y == 7], X1[y == 7],
c='pink', label=labels[7],
s=60, alpha=0.9, edgecolors='k')
eight = ax.scatter(
X0[y == 8], X1[y == 8],
c='white', label=labels[8],
s=60, alpha=0.9, edgecolors='k')
nine = ax.scatter(
X0[y == 9], X1[y == 9],
c='yellow', label=labels[9],
s=60, alpha=0.9, edgecolors='k')
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(title)
ax.legend()
plt.show()
# Since our data is 256-dimensional, we should apply a dimensionality reduction technique in order to plot them in 3D space. We choose to use PCA.
# In[20]:
# Define PCA
pca = PCA(n_components=2)
pca.fit(X_train)
# Apply PCA on train and test set
X_train_2d = pca.transform(X_train)
X_test_2d = pca.transform(X_test)
# In[21]:
# Train a classifier in th 2D data and plot the decision boundary.
clf = EuclideanClassifier()
clf.fit(X_train_2d, y_train)
plot_clf(clf, X_test_2d, y_test, [i for i in range(10)])
# The plot is a bit complex, since we have 10 classes instead of 2.
# __(c)__ Plot the learning curve of the euclidean classifier.
# In[22]:
# Function from https://scikit-learn.org/stable/auto_examples/model_selection/plot_learning_curve.html
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=None, train_sizes=np.linspace(.1, 1.0, 5)):
"""
Generate a simple plot of the test and training learning curve.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
title : string
Title for the chart.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
ylim : tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is not a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validators that can be used here.
n_jobs : int or None, optional (default=None)
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
"""
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
return plt
# In[23]:
title = "Learning Curve of Euclidean Classifier"
cv = ShuffleSplit(n_splits=100, test_size=0.2, random_state=0)
estimator = EuclideanClassifier()
plot_learning_curve(estimator, title, X_train, y_train, (0.8, 1.01), cv=cv, n_jobs=8)
plt.show()
# #### The next steps are implemented as part of the main lab
# __Step 14:__ Compute the a-priori probabilities of each class, using the above formula:
#
# \begin{align*}
# prior(c_i) = \frac{N_i}{N}
# \end{align*}
#
# where $N_i$ is the number of the training samples that represent digit i and $N$ is the training size.
# In[24]:
prior = np.bincount(y_train.astype(int)) / train_size
for i in range(10):
print("Digit " + str(i) + ": " + str(prior[i]))
# For testing puproses
print("Sum is equal to: " + str(sum(prior)))
# __Step 15:__
#
# __(a)__ Creation of a Gaussian Naive Bayes classifier using NumPy.
# The Naive Bayes classifier is based on the above equation:
#
# \begin{align*}
# posterior = \frac{likelihood * prior}{evidence}
# \end{align*}
#
# or more formally,
#
# \begin{align*}
# P(c_i | x) = \frac{P(x | c_i) * P(c_i)}{P(c_i)}
# \end{align*}
#
# In practice, there is interest only in the numerator of that fraction, because the denominator does not depend on C and the values of the features $x_{i}$ are given, so that the denominator is effectively constant. The prior probabilities $P(c_i)$ can be computed as above and the likelihood $P(x | c_i)$ is taken from a normal distribution with the mean value and the variance of the corresponding pixel. After computing the above fraction, the class with the maximum posterior probability is taken. This is known as the maximum a posteriori or MAP decision rule.
#
# \begin{align*}
# y = argmax_{k \in {0, .., 9}} P(c_k) \prod_{i=i}^k P(x_i | c_k)
# \end{align*}
#
# In[25]:
class GaussianNB_np(BaseEstimator, ClassifierMixin):
"""Classify samples based on the Gaussian Naive Bayes"""
def __init__(self):
self.X_mean_ = None
self.X_var_ = None
self.prior = None
self.n_classes = None
def fit(self, X, y):
"""
This should fit classifier. All the "work" should be done here.
Calculates self.X_mean_ and self.X_var_ based on the mean
feature values in X for each class. Also, calculates self.prior
that contains the prior probability of each class.
self.X_mean_ becomes a numpy.ndarray of shape
(n_classes, n_features)
self.X_var_ becomes a numpy.ndarray of shape
(n_classes, n_features)
self.prior becomes a numpy.array of shape
(n_classes)
fit always returns self.
"""
# Initialize useful variables
train_size, n_features = X.shape
self.n_classes = len(np.unique(y))
self.X_mean_ = np.zeros((self.n_classes, n_features))
self.X_var_ = np.zeros((self.n_classes, n_features))
# Compute mean and variance values for each class
for k in range(self.n_classes):
idx_i = [i for i in range(train_size) if y[i] == k]
X_k = np.take(X, idx_i, axis=0)
self.X_mean_[k, :] = | np.mean(X_k, axis=0, keepdims=True) | numpy.mean |
"""
Utility functions for the validation scripts.
"""
from htof.parse import DataParser, HipparcosOriginalData, HipparcosRereductionDVDBook, HipparcosRereductionJavaTool
from htof.fit import AstrometricFitter
from htof.sky_path import parallactic_motion, earth_ephemeris
from astropy import time
from astropy.coordinates import Angle
from astropy.table import Table
from glob import glob
import os
import warnings
import numpy as np
def refit_hip_fromdata(data: DataParser, fit_degree, cntr_RA=Angle(0, unit='degree'), cntr_Dec=Angle(0, unit='degree'),
use_parallax=False):
data.calculate_inverse_covariance_matrices()
# generate parallax motion
jyear_epoch = time.Time(data.julian_day_epoch(), format='jd', scale='tcb').jyear
# note that ra_motion and dec_motion are in degrees here.
# generate sky path
year_epochs = jyear_epoch - time.Time(1991.25, format='decimalyear', scale='tcb').jyear
ra_motion, dec_motion = parallactic_motion(jyear_epoch, cntr_RA.degree, cntr_Dec.degree, 'degree',
time.Time(1991.25, format='decimalyear', scale='tcb').jyear,
ephemeris=earth_ephemeris) # Hipparcos was in a geostationary orbit.
ra_resid = Angle(data.residuals.values * np.sin(data.scan_angle.values), unit='mas')
dec_resid = Angle(data.residuals.values * np.cos(data.scan_angle.values), unit='mas')
# instantiate fitter
fitter = AstrometricFitter(data.inverse_covariance_matrix, year_epochs,
use_parallax=use_parallax, fit_degree=fit_degree,
parallactic_pertubations={'ra_plx': Angle(ra_motion, 'degree').mas,
'dec_plx': Angle(dec_motion, 'degree').mas})
fit_coeffs, errors, chisq = fitter.fit_line(ra_resid.mas, dec_resid.mas, return_all=True)
parallax_factors = ra_motion * np.sin(data.scan_angle.values) + dec_motion * | np.cos(data.scan_angle.values) | numpy.cos |
"""
3d vascular growth sim
just the commands
"""
import io
import numpy as np
from scipy import spatial as spspat
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from scipy import integrate as spint
import time
def sphere_init_config(fovea_radius = 0.3,lens_depth = 0.3,num_pts = 100,inner_rad = 0.8,outer_rad = 1.2,prune_into_eye = True):
"""
sample = np.random.normal(size = (num_pts,3))
random_radii = np.random.rand(num_pts)*(outer_rad-inner_rad)+inner_rad
sample = [[sample[i]/np.linalg.norm(sample[i]),random_radii[i]] for i in range(len(sample))]
if prune_into_eye:
#remove portions near iris
for i in range(len(sample)-1,-1,-1):
#print(i)
if (sample[i][0][-1] > 1-lens_depth) or (np.linalg.norm(sample[i][0] - np.array([0.,0.,-1.])) < fovea_radius):
sample.pop(i)
"""
sample = []
while(len(sample) < num_pts):
pt = np.random.normal(size = 3)
pt /= np.linalg.norm(pt)
pt_rad = np.random.rand()*(outer_rad-inner_rad)+inner_rad
sample_pt = [pt,pt_rad]
if prune_into_eye:
if ((pt*pt_rad)[-1] <= 1-lens_depth) and (np.linalg.norm(pt*pt_rad - np.array([0.,0.,-1.])) >= fovea_radius):
sample.append(sample_pt)
return np.array(sample)
def geodesic_dist(p1,p2):
p1norm = np.linalg.norm(p1[0])
p2norm = np.linalg.norm(p2[0])
p1dotp2 = np.dot(p1[0],p2[0])
if np.abs(p1dotp2)>1.:
p1dotp2 = np.sign(p1dotp2)
return np.arccos(p1dotp2) + np.abs(p1[1] - p2[1])
def tangent_vector(p1,p2,normalized = True):
p1dotp2 = np.dot(p1[0],p2[0])
if np.abs(p1dotp2)>1.:
p1dotp2 = np.sign(p1dotp2)
p2bar = p2[0] - (p1dotp2)*np.array(p1[0])
p2bar /= np.linalg.norm(p2bar)
#print(p1dotp2)
if normalized:
return np.array([p2bar,(p2[1]-p1[1])/np.abs(p2[1]-p1[1])])
else:
return np.array([(np.arccos(p1dotp2))*p2bar, p2[1]-p1[1]])
def exp_map(pt, direction):
dirnorm = np.linalg.norm(direction[0])
#pt_dot_dir = np.dot(pt,dir)
#dir_bar = dir - pt_dot_dir*np.array(pt)
#dir_bar /= np.linalg.norm(dir_bar)
#theta_star = np.arccos(pt_dot_dir)
return np.array([np.cos(dirnorm)*np.array(pt[0]) + np.sin(dirnorm)*np.array(direction[0])/dirnorm,pt[1]+direction[1] ])
#exp_map([0.,0.,1.2],tangent_vector([0.,0.,1.2],[0.,1,0.]))
"""
p1 = [[0.,0.,1.],1.1]
p2 = [[0.0,1.1,0.],0.9]
print(geodesic_dist(p1,p2))
print(tangent_vector(p1,p2))
"""
"""
X = sphere_init_config(num_pts = 1000)
fig = plt.figure()
ax = fig.add_subplot(111,projection="3d")
ax.scatter(X[:,0],X[:,1],X[:,2])
plt.show()
"""
def prune_dist_chart(dist_chart,min_dist_pointers,death_dist = 0.1):
return
def vascular_growth_sim(num_iterations = 3,fovea_radius = 0.3,lens_depth = 0.5,noisy = True,max_iter = 10,init_num_pts = 1000,inner_rad = 0.7,outer_rad = 1.2, growth_type = "average",weighted_stepsizes = True,D_step = 0.05,death_dist = 0.05,save_time_data = False):
#set up data structure
pt_list = [[[0.5,0.,-0.5*np.sqrt(3)],outer_rad]]
to_grow_indicator = np.array([1])
branches = [[0]]
branch_membership = [[0]]
if save_time_data:
time_data = [[pt_list,list(branches),list(branch_membership)]]
#start the iteration
for iter_count in range(num_iterations):
#sample auxin
if iter_count == 0:
sample_auxin = sphere_init_config(fovea_radius = fovea_radius,lens_depth = lens_depth,num_pts = init_num_pts,inner_rad = inner_rad,outer_rad = outer_rad)
init_sample = np.array(sample_auxin)
else:
sample_auxin = sphere_init_config(fovea_radius = fovea_radius,lens_depth = lens_depth,num_pts = 2**iter_count*init_num_pts,inner_rad = inner_rad,outer_rad = outer_rad)
D_step = D_step/(2**iter_count);death_dist = death_dist/(2**iter_count)
init_sample = np.vstack([init_sample,sample_auxin])
#print("sampled points are: \n");print(sample_auxin)
#set up auxin-vein node distance chart
if iter_count == 0:
auxin_vein_dists = [geodesic_dist(pt_list[0],s) for s in sample_auxin]
auxin_min_dists = [[0,d] for d in auxin_vein_dists ]
else:
auxin_vein_dists = np.array([[geodesic_dist(pt,s) for s in sample_auxin] for pt in pt_list])
auxin_min_dists = []
for s_idx in range(len(sample_auxin)):
argmin_idx = np.argmin(auxin_vein_dists[:,s_idx])
auxin_min_dists.append([argmin_idx,auxin_vein_dists[argmin_idx,s_idx]])
auxin_min_dists = np.array(auxin_min_dists)
#print("sampled point dists are: \n");print(auxin_vein_dists)
#print("sampled point dists are: \n");print(auxin_min_dists)
count = 0
#"while there are auxin nodes"
while((count < max_iter) and (len(sample_auxin)>0)):
if noisy:
print("at step {}".format(count))
count += 1
#manually find the nearest neighbor
nns = [[] for pt in pt_list]
#print("getting nearest neighbors for {} auxin".format(len(sample_auxin)))
for i in range(len(sample_auxin)):
#match the nearest neighbor of an auxin node to the index of said auxin node
nns[int(auxin_min_dists[i][0])].append(i)
#now compute the step vectors
#print("the to grow indicators are {}".format(to_grow_indicator))
for i in range(len(pt_list))[::-1]:
#print("the nearest neighbors for {} are {}".format(i,nns[i]))
#print("pt {} s nearest neighbors are: {}".format(i,nns[i]))
if len(nns[i])>0:
#check if the given point is a head or not
#if not, generate a new branch
if to_grow_indicator[i] == 0:
branches.append([i])
branch_membership[i].append(len(branches)-1)
#get the step vector for the grown point
#geometry_type = "average" means
if growth_type == "average":
if weighted_stepsizes:
step_vec = sum([(1./len(nns[i]))*tangent_vector(pt_list[i],sample_auxin[k],normalized = True) for k in nns[i]])
vprime = exp_map(pt_list[i], [D_step*step_vec[0],D_step*step_vec[1]])
else:
step_vec = sum([(1./len(nns[i]))*tangent_vector(pt_list[i],sample_auxin[k],normalized = False) for k in nns[i]])
vprime = exp_map(pt_list[i], [D_step*step_vec[0],D_step*step_vec[1]])
elif growth_type == "nearest":
#print(auxin_vein_dists)
#print(auxin_vein_dists[i])
if len(pt_list) == 1:
nearest_auxin = 0
else:
#print(auxin_vein_dists.shape)
#print(np.array(auxin_min_dists).shape)
#print(auxin_min_dists)
#print(nns[i])
#print(len(sample_auxin))
nearest_auxin = np.argmin([auxin_vein_dists[i][k] for k in nns[i]])
#now construct the step vector
if weighted_stepsizes:
step_vec = tangent_vector(pt_list[i],sample_auxin[nns[i][nearest_auxin]],normalized = True)
vprime = exp_map(pt_list[i],[D_step*step_vec[0],D_step*step_vec[1]])
else:
step_vec = tangent_vector(pt_list[i],sample_auxin[nns[i][nearest_auxin]],normalized = False)
vprime = exp_map(pt_list[i], [D_step*step_vec[0],D_step*step_vec[1]])
#if the new point is far enough away from the fovea:
if np.linalg.norm(vprime[1]*vprime[0] - np.array([0.,0.,-1.])) > fovea_radius:
#print("growing from {} to {}".format(pt_list[i],vprime))
#add the new point to the list of points
pt_list = np.vstack([pt_list,vprime])
#change the old grow indicator to 0
to_grow_indicator[i] = 0
#change the new grow indicator to 1
to_grow_indicator = np.append(to_grow_indicator,1)
#add branch information for this new branch
branch_membership.append([branch_membership[i][-1]])
branches[branch_membership[i][-1]].append(len(to_grow_indicator)-1)
#update distance array
dists = np.array([geodesic_dist(vprime,s) for s in sample_auxin])
#print("distances to auxin for vprime are: {}".format(dists))
#set up auxin-vein node distance chart
auxin_vein_dists = np.vstack([auxin_vein_dists,dists])
#update min distances
for j in range(len(sample_auxin))[::-1]:
if dists[j] < auxin_min_dists[j][1]:
#update the min distance array
#sample_auxin = np.delete(sample_auxin,j,0)
auxin_min_dists[j][1] = dists[j]
auxin_min_dists[j][0] = len(to_grow_indicator)-1
#prune auxin nodes
for j in range(len(sample_auxin))[::-1]:
#first check whether or not the new point got close enough to an auxin node
#print(dists)
if auxin_min_dists[j][1] < death_dist:
#delete auxin
sample_auxin = np.delete(sample_auxin,j,0)
auxin_vein_dists = np.delete(auxin_vein_dists,j,1)
auxin_min_dists = np.delete(auxin_min_dists,j,0)
#print("to grow indicator is: \n"); print(to_grow_indicator)
#print("new point dists are: \n");print(auxin_vein_dists)
#print("new point dists are: \n");print(auxin_min_dists)
if save_time_data:
time_data.append([pt_list,list(branches),list(branch_membership)])
#while there are auxin nodes left or max_counts has been exceeded
if save_time_data:
return np.array(pt_list), branches, branch_membership, init_sample,time_data
else:
return np.array(pt_list), branches, branch_membership, init_sample
def convert_from_product(pt_list):
new_pts = []
for pt in pt_list:
new_pts.append(pt[1]*np.array(pt[0]))
return np.array(new_pts)
def get_vein_radii(num_pts, branches,init_radii = 0.05,branch_power = 3.):
vein_radii = | np.zeros(num_pts) | numpy.zeros |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import numpy as np
import cv2
import matplotlib.pyplot as plt
import math
import sys
def rgb2hsi(rgb):
# separar
R,G,B= cv2.split(rgb)
# normalizar
R =R/255
G =G/255
B =B/255
# cantidad de elementos
x=R.shape[0]
y=R.shape[1]
# crear arrays
r=np.empty([x,y])
g=np.empty([x,y])
b=np.empty([x,y])
H=np.empty([x,y])
S=np.empty([x,y])
I=np.empty([x,y])
# recorrer
for i in range(0, x):
for j in range(0,y):
# calcular rgb
divisor=R[i,j]+G[i,j]+B[i,j]
I[i,j]=divisor/3.0
if (divisor != 0.0):
r[i,j]=R[i,j]/divisor
g[i,j]=G[i,j]/divisor
b[i,j]=B[i,j]/divisor
# calcular RGB
if (R[i,j]==G[i,j]) and (G[i,j]==B[i,j]):
H[i,j]=0
S[i,j]=0
else:
argum=(R[i,j]-G[i,j])*(R[i,j]-G[i,j])+(R[i,j]-B[i,j])*(G[i,j]-B[i,j])
num=0.5*((R[i,j]-G[i,j]) + (R[i,j]-B[i,j]))
w=num/math.sqrt(argum)
if (w>1): w=1
if (w<-1): w=-1
H[i,j]=math.acos(w)
if H[i,j] < 0:
print('b')
break
if B[i,j] > G[i,j]:
H[i,j]=2*math.pi-H[i,j]
if (r[i,j] <= g[i,j]) & (r[i,j] <= b[i,j]):
S[i,j]=1-3*r[i,j]
if (g[i,j] <= r[i,j]) & (g[i,j] <= b[i,j]):
S[i,j]=1-3*g[i,j]
if (b[i,j] <= r[i,j]) & (b[i,j] <= g[i,j]):
S[i,j]=1-3*b[i,j]
H*=179
S*=255
I*=255
hsi=cv2.merge([H,S,I])
return hsi
def hsi2rgb(hsi):
H,S,I = cv2.split(hsi)
H=H/179
S=S/255
I=I/255
x=H.shape[0]
y=H.shape[1]
R=np.empty([x,y])
G=np.empty([x,y])
B=np.empty([x,y])
r=np.empty([x,y])
g=np.empty([x,y])
b=np.empty([x,y])
for i in range(0, x):
for j in range(0,y):
if (S[i,j] >1): S[i,j]=1
if (I[i,j] >1): I[i,j]=1
if (S[i,j] ==0):
R[i,j]=I[i,j]
G[i,j]=I[i,j]
B[i,j]=I[i,j]
else:
ums=(1-S[i,j])/3
if (H[i,j]>=0) and (H[i,j]<np.radians(120)):
b[i,j]=ums
r[i,j]= 1/3*(1+(S[i,j]*np.cos(H[i,j])/np.cos(np.radians(60)-H[i,j])))
g[i,j]=1-r[i,j]-b[i,j]
elif (H[i,j]>=np.radians(120)) and (H[i,j]<np.radians(240)):
H[i,j]-=np.radians(120)
r[i,j]=ums
g[i,j]=1/3*(1+(S[i,j]*np.cos(H[i,j])/np.cos(np.radians(60)-H[i,j])))
b[i,j]=1-r[i,j]-g[i,j]
elif (H[i,j]>=np.radians(240)) and (H[i,j]< | np.radians(360) | numpy.radians |
#!/usr/bin/env python
# work with MPU6050_kalman.ino
from PyQt5 import QtCore, QtWidgets, uic, QtGui
from pyqtgraph import PlotWidget
from PyQt5.QtWidgets import QApplication, QVBoxLayout
import pyqtgraph as pg
import numpy as np
import datetime
import serial
import sys
import os
import time
from time import sleep
from colorama import Fore, Back, Style
import csv
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
import matplotlib.pyplot as plt
import random
import struct
start_cmd = 0x11
interval_cmd = 0x22
sleep_cmd = 0x33
aq_cmd = 0x44
nbins = 20
data_len = 500
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def read_current_time():
now = datetime.datetime.now(datetime.timezone.utc)
current_time = now.strftime("%Z:%j/%H:%M:%S")
return current_time
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
self.setFixedSize(851, 630)
#Load the UI Page
uic.loadUi('drift.ui', self)
self.gyrox.setBackground('w')
self.gyroy.setBackground('w')
self.gyroz.setBackground('w')
self.serial_ports_list = []
self.serial_speed = [1000000]
# Ref: https://stackoverflow.com/questions/59898215/break-an-infinit-loop-when-button-is-pressed
self.timer = QtCore.QTimer(self, interval=5, timeout=self.read_port)
self.ser=serial.Serial()
self.scan_btn.clicked.connect(self.scan)
self.open_btn.clicked.connect(self.open_port)
self.close_btn.clicked.connect(self.close)
self.start_btn.clicked.connect(self.start_read_port)
self.stop_btn.clicked.connect(self.stop_read_port)
self.calc_btn.clicked.connect(self.calc_results)
self.gyrox_data = [0] * data_len
self.gyroy_data = [0] * data_len
self.gyroz_data = [0] * data_len
self.time_index=list(range(1, data_len+1))
for x in self.serial_speed:
self.speed_comboBox.addItem(str(x))
def scan(self):
if os.name == 'nt': # sys.platform == 'win32':
from serial.tools.list_ports_windows import comports
elif os.name == 'posix':
from serial.tools.list_ports_posix import comports
for info in comports(False):
port, desc, hwid = info
iterator = sorted(comports(False))
self.serial_ports_list = [] # clear the list first
for n, (port, desc, hwid) in enumerate(iterator, 1):
self.serial_ports_list.append("{:20}\n".format(port))
ports_num = len(self.serial_ports_list)
self.serial_comboBox.clear() # clear the list first
for x in self.serial_ports_list:
self.serial_comboBox.addItem(x)
self.start_id = 0
self.interval_id = 0
self.sleep_id = 0
def open_port(self):
index = self.serial_comboBox.currentIndex()
serial_ports_port = self.serial_ports_list[index][:-1] # delete the \n at the end
index = self.speed_comboBox.currentIndex()
self.ser = serial.Serial(serial_ports_port, self.serial_speed[index])
current_time = read_current_time()
print(current_time, self.ser.name + " Opened @ " + str(self.serial_speed[index]) + "bps")
def start_read_port(self):
self.gyrox_data = [0] * data_len
self.gyroy_data = [0] * data_len
self.gyroz_data = [0] * data_len
self.data_num = 0
self.timer.start() # Start the timer
def stop_read_port(self):
self.timer.stop() # Stop the timer
def read_port(self):
if (self.ser.inWaiting()):
current_time = read_current_time()
gyro = self.ser.read(24) # 3 double value: gyrox, gyroy, gyroz
gyrox_i = gyro[0:8]
gyroy_i = gyro[8:16]
gyroz_i = gyro[16:24]
gyrox_d=struct.unpack('d', gyrox_i)[0]
gyroy_d=struct.unpack('d', gyroy_i)[0]
gyroz_d=struct.unpack('d', gyroz_i)[0]
# print(current_time, " ---> ", gyrox_d, gyroy_d, gyroz_d)
self.gyrox_data.pop(0)
self.gyrox_data.append(gyrox_d)
self.gyrox.clear()
self.gyrox.plot(self.time_index, self.gyrox_data, pen=pg.mkPen('b', width=2))
self.gyroy_data.pop(0)
self.gyroy_data.append(gyroy_d)
self.gyroy.clear()
self.gyroy.plot(self.time_index, self.gyroy_data, pen=pg.mkPen('r', width=2))
self.gyroz_data.pop(0)
self.gyroz_data.append(gyroz_d)
self.gyroz.clear()
self.gyroz.plot(self.time_index, self.gyroz_data, pen=pg.mkPen('g', width=2))
def calc_results(self):
print(np.std(self.gyrox_data))
print(np.mean(self.gyrox_data))
print( | np.std(self.gyroy_data) | numpy.std |
from __future__ import division, print_function
import numpy as np
from scipy.signal import flattop
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
import scipy.linalg
import scipy.ndimage as ndi
"""
This is for operations on arrays, which will be extended to a class of image data.
"""
#Functions for manipulating images
def enhance_contrast(array, p_low,p_high):
#saturate p_low percent of dark pixels and p-high percent of the bright pixels in the image to spread the histogram over larger display range.
n_low = int(np.floor(np.prod(np.shape(array))*p_low/200))
n_high = int(np.floor(np.prod(np.shape(array))*p_high/200))
a = np.unravel_index(np.argsort(array, axis = None), np.shape(array))
return np.clip(array, array[a][n_low], array[a][-n_high-1])
def fft(im):
return np.fft.fftshift(np.fft.fft2(im))
def pfft(im):
[rows,cols] = np.shape(im)
#Compute boundary conditions
s = np.zeros( np.shape(im) )
s[0,:] = im[0,:] - im[-1,:]
s[-1,:] = -s[0,:]
s[:,0] = s[:,0] + im[:,0] - im[:,-1]
s[:,-1] = s[:,-1] - im[:,0] + im[:,-1]
#Create grid for computing Poisson solution
[cx, cy] = np.meshgrid(2*np.pi*np.arange(0,cols)/cols, 2*np.pi*np.arange(0,rows)/rows)
#Generate smooth component from Poisson Eq with boundary condition
D = (2*(2 - np.cos(cx) - np.cos(cy)))
D[0,0] = np.inf # Enforce 0 mean & handle div by zero
S = np.fft.fft2(s)/D
P = np.fft.fft2(im) - S # FFT of periodic component
return np.fft.fftshift(P)
def gauss2D(data, sigma):
data = ndi.gaussian_filter(data, sigma)
return (data)
def zoom(array, amt, **kwargs):
if array.ndim!=3:
return ndi.interpolation.zoom(array, amt, order = 0, **kwargs)
else:
x, y, z= np.shape(array)
new = np.zeros((int(x*amt), int(y*amt), z))
for i in range(z):
new[:,:,i] = ndi.interpolation.zoom(array[:, :, i], amt, order = 0, **kwargs)
return new
def remove_brights(array, threshold=False):
inthreshold=threshold
if inthreshold==False:
inthreshold=10
threshold = np.mean(array) + inthreshold*np.std(array)
brights = array>threshold
print("Found {} bright pixels".format(np.count_nonzero(brights)))
while np.count_nonzero(brights)>0.0001*np.prod(np.shape(array)):
#print("num std devs above mean is {}".format(inthreshold))
print('too many brights')
inthreshold+=1
threshold = | np.mean(array) | numpy.mean |
# coding=utf-8
# pylint:disable=too-many-locals,too-many-branches
"""
Module segmented volume class, to be used for
simulation of 2D segmented maps of a binary volume
"""
import json
import os
import numpy as np
import matplotlib.pyplot as plt
import pycuda.driver as drv
import pycuda.gpuarray as gpua
from pycuda.compiler import SourceModule
from scipy.ndimage.morphology import binary_fill_holes as fill
from scipy.ndimage.morphology import binary_erosion as erode
from scipy.ndimage.morphology import binary_dilation as dilate
import fanslicer.pycuda_simulation.mesh as mesh
import fanslicer.pycuda_simulation.cuda_reslicing as cres
class SegmentedVolume:
"""
Class that holds a segmented volume, with both
meshes and 3D binary volumes
"""
def __init__(self,
mesh_dir,
config_dir,
image_num=1,
downsampling=1,
voxel_size=1.0):
"""
Create segmented volume object
:param mesh_dir: directory with vtk models used in slicing
:param config_dir: json file with reslicing parameters and
model names to be used
:param voxel_size: isotropic voxel size considered to
generate the binary volumes for each vtk model
:param image_num: number of images to consider for preallocation
:param downsampling: downsampling factor on image dimensions
"""
self.binary_volumes = dict()
if voxel_size > 0:
self.voxel_size = voxel_size
else:
raise ValueError("Voxel size must be positive!")
# Load meshes if a directory is given
self.config = None
self.meshes = dict()
if os.path.isfile(config_dir):
config_file = open(config_dir)
self.config = json.load(config_file)
else:
raise ValueError("No valid config file!")
# First, load meshes to constructor
self.load_vtk_from_dir(mesh_dir)
# Then, load or generate simulation binary volumes
self.load_binary_volumes(mesh_dir)
# Now, preallocate variables to speed up reslicing
# Call function to preallocate relevant variables
# to existing lists, first the GPU ones
self.g_variables = []
# Image dimensioning parameters
self.image_variables = []
self.blockdim = np.array([1, 1])
# Initialise image num and downsample
self.image_num = None
self.downsampling = None
# Now run allocation to set these vars
self.preallocate_bin_gpu_var(image_num=image_num,
downsampling=downsampling)
# Read kernel source code in C++
self.kernel_code = cres.RESLICING_KERNELS
def load_vtk_from_dir(self,
mesh_dir):
"""
Loads vtk files into mesh3D objects, according
to self.config
:param mesh_dir: directory with vtk files
"""
if self.config is None:
raise ValueError("SegmentedVolume object has no config")
if not os.path.isdir(mesh_dir):
raise ValueError("No valid mesh directory")
# Get relevant files from the config
meshes_to_load = self.config["models"]["files"]
mesh_dict = {}
for file in meshes_to_load:
mesh_file = os.path.join(mesh_dir, file + '.vtk')
# Allocate mesh to mesh list if it exists
if os.path.isfile(mesh_file):
mesh_dict[file.replace(" ", "_")] =\
mesh.load_mesh_from_vtk(mesh_file)
else:
raise ValueError(file + '.vtk not found')
self.meshes = mesh_dict
return 0
def load_binary_volumes(self,
data_dir):
"""
Load or generate binary models from relevant meshes
If binary volumes do not exist in data dir, a binary volume
is generated for every relevant mesh defined in config
:param data_dir: directory from where binary volumes
is loaded/saved
"""
if not os.path.isdir(data_dir):
raise ValueError("No valid data directory")
# Prepare dictionary that contains models
volume_dict = dict()
for model in range(len(self.config['simulation']
['simulation_models'])):
# Check if model is intended for simulation
if self.config['simulation']['simulation_models'][model]:
model_name = self.config['models']['files'][model]
model_name = model_name.replace(" ", "_")
# Get a bounding box and define volume margin
margin = np.array([20, 20, 20])
bound_box = self.meshes[model_name].get_bounding_box()
bound_box[0, :] = np.floor(bound_box[0, :]) - margin
bound_box[1, :] = np.floor(bound_box[1, :]) + margin
# Check if a binary map already exists
binary_name = 'binary_' + model_name + '.npy'
if os.path.isfile(data_dir + binary_name):
# Load a pre-saved model
volume = np.load(data_dir + binary_name)
print('Loaded ' + binary_name)
else:
# Generate a model
volume = voxelise_mesh(self.meshes[model_name],
self.voxel_size,
margin,
save_dir=data_dir,
file_name=binary_name)
# Allocate to dictionary with bounding box
volume_dict[model_name] = [volume, bound_box]
# Allocate final results
self.binary_volumes = volume_dict
return 0
def preallocate_bin_gpu_var(self,
image_num,
downsampling):
"""
Function to generate local gpu variables that will
be used for simulation from binary volumes. Variable
sizes depend on the config parameters.
g_ prefix indicates gpu variables
:param image_num: maximum number of images to be simulated
:param downsampling: downsampling value on image dimensions
per call
"""
# First check if current image variables are empty or not,
# (if they have been set before). If they are not, reset
if self.g_variables:
self.g_variables = []
if self.image_variables:
self.image_variables = []
# Check if downsampling is at least 1
if downsampling < 1:
raise ValueError("Downsampling must be greater than 1")
# Check if maximum number of images is valid
if not isinstance(image_num, int) or image_num <= 0:
raise ValueError('image_num must be positive integer')
# Now, choose between curvilinear and linear array
transducer_type = self.config["simulation"]["transducer"]
if transducer_type == "curvilinear":
# For the curvilinear case, get
# geometrical parameters of fan shape as a float:
# 0-Angular ray resolution, 1-ray depth resolution, 2-angle aperture
# 3-ray depth, 4-ray offset to origin, 5-ray offset to image top
fan_parameters = np.array(self.config["simulation"]["fan_geometry"])
fan_parameters[0] = np.deg2rad(fan_parameters[0])
fan_parameters[2] = np.deg2rad(fan_parameters[2])
fan_parameters[3:6] = fan_parameters[3:6] * fan_parameters[1]
fan_parameters = fan_parameters.astype(np.float32)
# Append them to image variables (becomes index 0)
self.image_variables.append(fan_parameters)
# Get point cloud dimensions from fan parameters, necessary to
# know how many points will be sampled and used for intersection
coord_w = len(np.arange((-fan_parameters[2] / 2).astype(np.float32),
(fan_parameters[2] / 2).astype(np.float32),
fan_parameters[0]))
coord_h = len(np.arange(fan_parameters[4],
fan_parameters[4] + fan_parameters[3],
fan_parameters[1]))
# Append to image variables (becomes index 1)
slice_dim = np.array([coord_w, coord_h, image_num]).astype(np.int32)
self.image_variables.append(slice_dim)
# Through downsampling, obtain the output image dimensions
# and append (becomes index 2)
image_dim_2d = np.array(self.config["simulation"]
["image_dimensions"])
image_dim = np.append(image_dim_2d / downsampling, image_num) \
.astype(np.int32)
self.image_variables.append(image_dim)
# Do the same for the image pixel size (becomes index 3)
pixel_size = np.array(self.config["simulation"]["pixel_size"])
pixel_size = (downsampling * pixel_size).astype(np.float32)
self.image_variables.append(pixel_size)
# Knowing these dimensions, now append preallocate all
# GPU variables. First, 2D and 3D positions of the fans
# (become index 0 and 1, respectively)
self.g_variables. \
append(gpua.GPUArray((1, np.prod(slice_dim) * 3),
dtype=np.float32))
# The 3D positions, with the same size (becomes index 1)
self.g_variables.\
append(gpua.GPUArray((1, np.prod(slice_dim) * 3),
dtype=np.float32))
# The fan intersection with the volume (becomes index 2)
self.g_variables. \
append(gpua.GPUArray((1, np.prod(slice_dim)),
dtype=np.int32))
# Now, the outputs, with image_dim as dimension, both images
# and fan shape outline used for interpolation (become
# index 3 and 4, respectively)
self.g_variables. \
append(gpua.GPUArray((1, np.prod(image_dim)),
dtype=np.int32))
self.g_variables. \
append(gpua.GPUArray((1, np.prod(image_dim)),
dtype=bool))
# Finally, determine optimal blocksize for kernels
blockdim_x, blockdim_y = cres.get_block_size(coord_w, coord_h)
self.blockdim = np.array([blockdim_x, blockdim_y])
elif transducer_type == "linear":
# For the linear case, variable definition is simpler
# Get rectangular plane dimensions first, and append
# to image variables (becomes index 0)
image_dim_2d = np.array(self.config["simulation"]
["image_dimensions"])
image_dim = np.append(image_dim_2d / downsampling, image_num) \
.astype(np.int32)
self.image_variables.append(image_dim)
# Do the same for the image pixel size (becomes index 1)
pixel_size = np.array(self.config["simulation"]["pixel_size"])
pixel_size = (downsampling * pixel_size).astype(np.float32)
self.image_variables.append(pixel_size)
# Now preallocate gpu variables, first the positions
# (becomes index 0)
self.g_variables. \
append(gpua.GPUArray((1, np.prod(image_dim) * 3),
dtype=np.float32))
# Secondly, volume intersections that do not
# need to be warped in this case (becomes index 1)
self.g_variables. \
append(gpua.GPUArray((1, np.prod(image_dim)),
dtype=np.int32))
# Finally, determine optimal blocksize for kernels
blockdim_x, blockdim_y = cres.get_block_size(image_dim[0],
image_dim[1])
self.blockdim = np.array([blockdim_x, blockdim_y])
else:
# In case the transducer is another option
raise ValueError("No valid transducer type!")
# To avoid repeating allocation code, allocate volumes now
# The volumes to be sliced, in a 1D array. These are added
# at the end, as their indexes start from 5 in curvilinear case,
# and 2 linear case
for model in range(len(self.config["simulation"]["simulation_models"])):
# Check if model index m is to be considered
if self.config["simulation"]["simulation_models"][model]:
# Define its dictionary key
model_name = self.config["models"]["files"][model]
model_name = model_name.replace(" ", "_")
# Reshape it, and append it as a variable
volume = self.binary_volumes[model_name][0].copy()
volume_dim = volume.shape
volume = np.swapaxes(volume, 0, 1)
volume = volume.reshape([1, np.prod(volume.shape)], order="F")
self.g_variables.append(gpua.to_gpu(volume.astype(bool)))
# Also, append their bound box, shape and display color
# to image variables becomes a variable index starting
# from 4 in curvilinear, and 2 in linear (a tuple of 3 arrays)
model_color = self.config["simulation"]["colors"][model]
self.image_variables.append([self.binary_volumes[model_name][1],
volume_dim, model_color])
self.image_num = image_num
self.downsampling = downsampling
def simulate_image(self,
poses=np.eye(4),
image_num=1,
out_points=False):
"""
Function that generates a set of images from multiple
segmented models stored in self.config. Uses the function
slice_volume or linear_slice_volume
:param poses: array with probe poses
:param image_num: number of images to simulate
:param out_points: bool to get sampling positions or not
:return: positions in 3D, stack of resulting images with
multiple labels, and stack with colored images for
visualisation
"""
# Check if number of images matches number of poses
if poses.shape[1] / 4 != image_num:
raise ValueError("Input poses do not match image number!")
# In order to not fix the number of images to be used, check
# if image num is the same as the one considered by the object
# If they differ, preallocate again
current_image_num = self.image_num
if image_num != current_image_num:
self.preallocate_bin_gpu_var(image_num=image_num,
downsampling=self.downsampling)
print("Number of images was changed from " +
str(current_image_num) + " to " + str(image_num))
# Get config parameters for the simulation
transducer_type = self.config["simulation"]["transducer"]
if transducer_type == "curvilinear":
image_dim = self.image_variables[2]
aux_index = 4
else:
# Linear case
image_dim = self.image_variables[0]
aux_index = 2
voxel_size = np.array([self.voxel_size,
self.voxel_size,
self.voxel_size])
# Prepare outputs
visual_images = np.zeros((image_dim[1], image_dim[0], 3, image_num))
simulation_images = np.zeros((image_dim[1], image_dim[0], image_num))
# Go through the models that should be intersected
for model in range(len(self.binary_volumes)):
# Go through each stored model
if transducer_type == "curvilinear":
points, images, mask = slice_volume(
self.kernel_code,
self.image_variables,
self.g_variables,
self.blockdim,
model,
voxel_size,
poses,
out_points)
else:
points, images = linear_slice_volume(
self.kernel_code,
self.image_variables,
self.g_variables,
self.blockdim,
model,
voxel_size,
poses,
out_points)
# Add images to output
simulation_images = simulation_images\
+ images.astype(int)*(model + 1)
# Create colored images, just for visualisation
model_color = self.image_variables[aux_index + model][2]
visual_images[:, :, 0, :] = visual_images[:, :, 0, :] + \
images * model_color[0] / 255
visual_images[:, :, 1, :] = visual_images[:, :, 1, :] + \
images * model_color[1] / 255
visual_images[:, :, 2, :] = visual_images[:, :, 2, :] + \
images * model_color[2] / 255
# Add grey outline, in case the array is curvilinear
if transducer_type == "curvilinear":
outline = np.repeat(1 - mask[:, :, np.newaxis], 3, axis=2).\
astype(int)*210/255
outline = np.repeat(outline[:, :, :, np.newaxis],
image_num, axis=3)
visual_images = visual_images + outline
return points, simulation_images, visual_images
def show_plane(self,
image_array,
image_index,
point_array):
"""
Show intersection and plane geometry in 3D model
No suitable way of showing meshes, so this method
needs improvements
:param image_array: stack of images to show
:param image_index: stack index of image to be shown
:param point_array: point cloud with stack of plane points
"""
# Get number of points per plane
points_per_plane = int(point_array.shape[0]/image_array.shape[3])
# First, prepare figure
fig = plt.figure()
# Add 3D visualisation subplot
ax_3d = fig.add_subplot(121, projection='3d')
# Get the meshes to be plotted
for m_i in range(len(self.meshes.keys())):
# Add mesh to plot
if self.config["simulation"]["simulation_models"][m_i]:
model_name = self.config["models"]["files"][m_i]\
.replace(" ", "_")
model = self.meshes[model_name]
# Get color and opacity of models
model_color = np.array([self.config["simulation"]
["colors"][m_i]])/255
# model_opacity = np.array([self.config["simulation"]
# ["opacity"][model]])
ax_3d.scatter(model.vertices[0:-1:1, 0],
model.vertices[0:-1:1, 1],
model.vertices[0:-1:1, 2],
color=model_color,
alpha=0.5)
# Add plane point cloud
ax_3d.scatter(point_array[image_index*points_per_plane:
points_per_plane*(image_index + 1):10, 0],
point_array[image_index*points_per_plane:
points_per_plane*(image_index + 1):10, 1],
point_array[image_index*points_per_plane:
points_per_plane*(image_index + 1):10, 2],
color=[0, 0, 0])
# Add 2D visualisation subplot
ax_2d = fig.add_subplot(122)
ax_2d.imshow(image_array[:, :, :, image_index])
plt.show()
return 0
def voxelise_mesh(input_mesh,
voxel_size,
margin=None,
save_dir=None,
file_name=None):
"""
Method that generates binary volume from an input mesh
:param input_mesh: triangular mesh to be voxelised
:param voxel_size: 3D voxel size
:param margin: 3D vector with additional voxel margin
around the bounding box of the input mesh
:param save_dir: directory to save file
:param file_name: name of file to save
:return: 3D binary volume
"""
if margin is None:
margin = np.array([0, 0, 0])
bound_box = input_mesh.get_bounding_box()
# Add margins
bound_box[0, :] = bound_box[0, :] - margin
bound_box[1, :] = bound_box[1, :] + margin
# Define output size (x, y, z)
dimensions = (np.ceil(bound_box[1, :])
- np.floor(bound_box[0, :]))/voxel_size
# Round and convert to integer
bin_dimensions = np.ceil(dimensions).astype(int)
# Create empty volume
bin_volume = np.zeros(bin_dimensions, dtype=bool)
# Get point coordinates and faces
v_x = input_mesh.vertices[:, 0]
v_y = input_mesh.vertices[:, 1]
v_z = input_mesh.vertices[:, 2]
t_x = v_x[input_mesh.faces]
t_y = v_y[input_mesh.faces]
t_z = v_z[input_mesh.faces]
# Get face/triangles bounding box
tx_min = np.amin(t_x, axis=1)
ty_min = np.amin(t_y, axis=1)
tz_min = np.amin(t_z, axis=1)
tx_max = np.amax(t_x, axis=1)
ty_max = np.amax(t_y, axis=1)
tz_max = np.amax(t_z, axis=1)
# 1-Intersecting XY plane
xyplane_x = np.arange(np.floor(bound_box[0, 0]),
np.ceil(bound_box[1, 0]), voxel_size)
xyplane_y = np.arange(np.floor(bound_box[0, 1]),
np.ceil(bound_box[1, 1]), voxel_size)
# Loop through points with perpendicular ray and store them
inter_xy = np.empty((0, 3), dtype=float)
for x_ind in xyplane_x:
for y_ind in xyplane_y:
# Get intersectable triangles
inter_t = np.asarray(np.where((tx_min <= x_ind)
& (tx_max >= x_ind)
& (ty_min <= y_ind)
& (ty_max >= y_ind)))
# Test each of these triangles for intersection
for t_ind in inter_t[0, :]:
# Define the ray
origin = | np.array([x_ind, y_ind, 0]) | numpy.array |
# -*- coding: utf-8 -*-
import torch
import numpy as np
import torch.nn as nn
from warnings import warn
from copy import deepcopy
from dliplib.reconstructors.base import BaseLearnedReconstructor
from dliplib.utils.models import get_iradonmap_model
class IRadonMapReconstructor(BaseLearnedReconstructor):
HYPER_PARAMS = deepcopy(BaseLearnedReconstructor.HYPER_PARAMS)
HYPER_PARAMS.update({
'scales': {
'default': 5,
'retrain': True
},
'epochs': {
'default': 20,
'retrain': True
},
'lr': {
'default': 0.01,
'retrain': True
},
'skip_channels': {
'default': 4,
'retrain': True
},
'batch_size': {
'default': 64,
'retrain': True
},
'fully_learned': {
'default': False,
'retrain': True
},
'use_sigmoid': {
'default': False,
'retrain': True
},
})
"""
CT Reconstructor that learns a fully connected layer for filtering along
the axis of the detector pixels s, followed by the backprojection
(segment 1). After that, a residual CNN acts as a post-processing net
(segment 2). We use the U-Net from the FBPUnet model.
In the original paper [1], a learned version of the back-
projection layer (sinusoidal layer) is used. This layer introduces a lot
more parameters. Therefore, we added an option to directly use the operator
in our implementation. Additionally, we drop the tanh activation after
the first fully connectect layer, due to bad performance.
In any configuration, the iRadonMap has less parameters than an
Automap network [2].
References
----------
.. [1] <NAME> and <NAME>, 2018,
"Radon Inversion via Deep Learning".
arXiv preprint.
`arXiv:1808.03015v1
<https://arxiv.org/abs/1808.03015>`_
.. [2] <NAME>, <NAME>, <NAME> et al., 2018,
"Image Reconstruction by Domain-Transform Manifold Learning".
Nature 555, 487--492.
`doi:10.1038/nature25988
<https://doi.org/10.1038/nature25988>`_
"""
def __init__(self, ray_trafo, fully_learned=None, scales=None,
epochs=None, batch_size=None, lr=None, skip_channels=None,
num_data_loader_workers=8, use_cuda=True, show_pbar=True,
fbp_impl='astra_cuda', hyper_params=None, coord_mat=None,
**kwargs):
"""
Parameters
----------
ray_trafo : :class:`odl.tomo.RayTransform`
Ray transform from which the FBP operator is constructed.
fully_learned : bool, optional
Learn the backprojection operator or take the fixed one from astra.
epochs : int, optional
Number of epochs to train (a hyper parameter).
batch_size : int, optional
Batch size (a hyper parameter).
num_data_loader_workers : int, optional
Number of parallel workers to use for loading data.
use_cuda : bool, optional
Whether to use cuda for the model.
show_pbar : bool, optional
Whether to show tqdm progress bars during the epochs.
fbp_impl : str, optional
The backend implementation passed to
:class:`odl.tomo.RayTransform` in case no `ray_trafo` is specified.
Then ``dataset.get_ray_trafo(impl=fbp_impl)`` is used to get the
ray transform and FBP operator.
coord_mat : array, optional
Precomputed coordinate matrix for the `LearnedBackprojection`.
This option is provided for performance optimization.
If `None` is passed, the matrix is computed in :meth:`init_model`.
"""
super().__init__(ray_trafo, epochs=epochs, batch_size=batch_size,
lr=lr,
num_data_loader_workers=num_data_loader_workers,
use_cuda=use_cuda, show_pbar=show_pbar,
fbp_impl=fbp_impl, hyper_params=hyper_params,
**kwargs)
if scales is not None:
hyper_params['scales'] = scales
if kwargs.get('hyper_params', {}).get('scales') is not None:
warn("hyper parameter 'scales' overridden by constructor " +
"argument")
if skip_channels is not None:
hyper_params['skip_channels'] = skip_channels
if kwargs.get('hyper_params', {}).get('skip_channels') is not None:
warn("hyper parameter 'skip_channels' overridden by " +
"constructor argument")
if fully_learned is not None:
hyper_params['fully_learned'] = fully_learned
if kwargs.get('hyper_params', {}).get('fully_learned') is not None:
warn("hyper parameter 'fully_learned' overridden by " +
"constructor argument")
self.coord_mat = coord_mat
def get_skip_channels(self):
return self.hyper_params['skip_channels']
def set_skip_channels(self, skip_channels):
self.hyper_params['skip_channels'] = skip_channels
skip_channels = property(get_skip_channels, set_skip_channels)
def get_scales(self):
return self.hyper_params['scales']
def set_scales(self, scales):
self.hyper_params['scales'] = scales
scales = property(get_scales, set_scales)
def get_fully_learned(self):
return self.hyper_params['fully_learned']
def set_fully_learned(self, fully_learned):
self.hyper_params['fully_learned'] = fully_learned
fully_learned = property(get_fully_learned, set_fully_learned)
def init_model(self):
self.model = get_iradonmap_model(
ray_trafo=self.ray_trafo, fully_learned=self.fully_learned,
scales=self.scales, skip=self.skip_channels,
use_sigmoid=self.hyper_params['use_sigmoid'],
coord_mat=self.coord_mat)
if self.use_cuda:
self.model = nn.DataParallel(self.model).to(self.device)
# def init_optimizer(self, dataset_train):
# self.optimizer = torch.optim.RMSprop(self.model.parameters(),
# lr=self.lr, momentum=0.9)
def _reconstruct(self, observation):
self.model.eval()
obs_tensor = torch.from_numpy(
| np.asarray(observation) | numpy.asarray |
"""
################################################################################
Parallel-and-asynchronous Stitching Script - Parallel version
Author: <NAME>
This script pulls the data generated through TissueCyte (or another microscope system) and
can perfom image averaging correction on the images if requested, before calling ImageJ
from the command line to perform the stitching. You will need to have the plugin script
OverlapY.ijm installed in ImageJ in order for the difference in the X and Y overlap to be
registered. Otherwise the X overlap will be used for both.
The pipeline has been sped up in some areas by parallelising some functions.
Installation:
1) Navigate to the folder containing the parasyncstitchGM.py
2) Run 'pip install -r requirements.txt'
Instructions:
1) Run the script in a Python IDE (e.g. for Python 3 > exec(open('parasyncstitchicGM_v2.py').read()))
2) Fill in the parameters that you are asked for
Note: You can drag and drop folder paths (works on MacOS) or copy and paste the paths
Note: The temporary directory is required to speed up ImageJ loading of the files
Important updates:
06.03.19 - Updated the overlap and crop parameters to improve the image average result and
tiling artefacts.
11.03.19 - Included default values and parameter search from Mosaic file.
02.05.19 - Python 3 compatible
14.05.19 - Added slack integration
31.01.20 - Changed option for 16-bit output with improved scaling
05.02.20 - Removed 16-bit
05.02.20 - Added feathering to tile edge and stitching with max intensity
10.02.20 - Added average tile from flatfield imaging and uses that for correction
################################################################################
"""
import cv2, os, sys, warnings, time, glob, errno, subprocess, shutil, readline, re, tempfile, random
import numpy as np
import tifffile
from PIL import Image
from multiprocessing import Pool, cpu_count, Array, Manager
from functools import partial
from datetime import date
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
warnings.simplefilter('ignore', Image.DecompressionBombWarning)
Image.MAX_IMAGE_PIXELS = 1000000000
#=============================================================================================
# Slack notification
#=============================================================================================
#def slack_message(text, channel, username):
# from urllib import request, parse
# import json
#
# post = {
# "text": "{0}".format(text),
# "channel": "{0}".format(channel),
# "username": "{0}".format(username),
# "icon_url": "https://github.com/gm515/gm515.github.io/blob/master/Images/imperialstplogo.png?raw=true"
# }
#
# try:
# json_data = json.dumps(post)
# req = request.Request('https://hooks.slack.com/services/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',
# data=json_data.encode('ascii'),
# headers={'Content-Type': 'application/json'})
# resp = request.urlopen(req)
# except Exception as em:
# print("EXCEPTION: " + str(em))
#=============================================================================================
# Function to load images in parallel
#=============================================================================================
def load_tile(file, cropstart, cropend):
if '_00' in file:
try:
tileimage_ch1 = np.array(Image.fromarray(tifffile.imread(file.replace('_00', '_01'))).crop((cropstart, cropstart+65, cropend, cropend+65)).rotate(90))
tileimage_ch2 = np.array(Image.fromarray(tifffile.imread(file.replace('_00', '_01'))).crop((cropstart, cropstart+65, cropend, cropend+65)).rotate(90))
tileimage_ch3 = np.array(Image.fromarray(tifffile.imread(file.replace('_00', '_01'))).crop((cropstart, cropstart+65, cropend, cropend+65)).rotate(90))
tileimage = np.maximum(tileimage_ch1, tileimage_ch2)
tileimage = np.maximum(tileimage, tileimage_ch3)
except (ValueError, IOError, OSError):
tileimage = np.zeros((cropend-cropstart, cropend-cropstart))
else:
try:
tileimage = np.array(Image.fromarray(tifffile.imread(file)).crop((cropstart, cropstart+65, cropend, cropend+65)).rotate(90))
except (ValueError, IOError, OSError):
tileimage = | np.zeros((cropend-cropstart, cropend-cropstart)) | numpy.zeros |
# -*- coding: utf-8 -*-
# pylint: disable=C0103
# pylint: disable=C0111
# ignore snakecase warning, missing docstring
"""Copyright 2015 <NAME>.
FilterPy library.
http://github.com/rlabbe/filterpy
Documentation at:
https://filterpy.readthedocs.org
Supporting book at:
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the readme.MD file
for more information.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from math import cos, sin
import matplotlib.pyplot as plt
import numpy.random as random
from numpy.random import randn
from numpy import asarray
import numpy as np
from pytest import approx
from scipy.spatial.distance import mahalanobis as scipy_mahalanobis
from filterpy.kalman import UnscentedKalmanFilter
from filterpy.kalman import (unscented_transform, MerweScaledSigmaPoints,
JulierSigmaPoints, SimplexSigmaPoints,
KalmanFilter)
from filterpy.common import Q_discrete_white_noise, Saver
import filterpy.stats as stats
DO_PLOT = False
def test_sigma_plot():
""" Test to make sure sigma's correctly mirror the shape and orientation
of the covariance array."""
x = np.array([[1, 2]])
P = np.array([[2, 1.2],
[1.2, 2]])
kappa = .1
# if kappa is larger, than points shoudld be closer together
sp0 = JulierSigmaPoints(n=2, kappa=kappa)
sp1 = JulierSigmaPoints(n=2, kappa=kappa*1000)
sp2 = MerweScaledSigmaPoints(n=2, kappa=0, beta=2, alpha=1e-3)
sp3 = SimplexSigmaPoints(n=2)
# test __repr__ doesn't crash
str(sp0)
str(sp1)
str(sp2)
str(sp3)
w0 = sp0.Wm
w1 = sp1.Wm
w2 = sp2.Wm
w3 = sp3.Wm
Xi0 = sp0.sigma_points(x, P)
Xi1 = sp1.sigma_points(x, P)
Xi2 = sp2.sigma_points(x, P)
Xi3 = sp3.sigma_points(x, P)
assert max(Xi1[:, 0]) > max(Xi0[:, 0])
assert max(Xi1[:, 1]) > max(Xi0[:, 1])
if DO_PLOT:
plt.figure()
for i in range(Xi0.shape[0]):
plt.scatter((Xi0[i, 0]-x[0, 0])*w0[i] + x[0, 0],
(Xi0[i, 1]-x[0, 1])*w0[i] + x[0, 1],
color='blue', label='Julier low $\kappa$')
for i in range(Xi1.shape[0]):
plt.scatter((Xi1[i, 0]-x[0, 0]) * w1[i] + x[0, 0],
(Xi1[i, 1]-x[0, 1]) * w1[i] + x[0, 1],
color='green', label='Julier high $\kappa$')
for i in range(Xi2.shape[0]):
plt.scatter((Xi2[i, 0] - x[0, 0]) * w2[i] + x[0, 0],
(Xi2[i, 1] - x[0, 1]) * w2[i] + x[0, 1],
color='red')
for i in range(Xi3.shape[0]):
plt.scatter((Xi3[i, 0] - x[0, 0]) * w3[i] + x[0, 0],
(Xi3[i, 1] - x[0, 1]) * w3[i] + x[0, 1],
color='black', label='Simplex')
stats.plot_covariance_ellipse([1, 2], P)
def test_scaled_weights():
for n in range(1, 5):
for alpha in np.linspace(0.99, 1.01, 100):
for beta in range(2):
for kappa in range(2):
sp = MerweScaledSigmaPoints(n, alpha, 0, 3-n)
assert abs(sum(sp.Wm) - 1) < 1.e-1
assert abs(sum(sp.Wc) - 1) < 1.e-1
def test_julier_sigma_points_1D():
""" tests passing 1D data into sigma_points"""
kappa = 0.
sp = JulierSigmaPoints(1, kappa)
Wm, Wc = sp.Wm, sp.Wc
assert np.allclose(Wm, Wc, 1e-12)
assert len(Wm) == 3
mean = 5
cov = 9
Xi = sp.sigma_points(mean, cov)
xm, ucov = unscented_transform(Xi, Wm, Wc, 0)
# sum of weights*sigma points should be the original mean
m = 0.0
for x, w in zip(Xi, Wm):
m += x*w
assert abs(m-mean) < 1.e-12
assert abs(xm[0] - mean) < 1.e-12
assert abs(ucov[0, 0] - cov) < 1.e-12
assert Xi.shape == (3, 1)
def test_simplex_sigma_points_1D():
""" tests passing 1D data into sigma_points"""
sp = SimplexSigmaPoints(1)
Wm, Wc = sp.Wm, sp.Wc
assert np.allclose(Wm, Wc, 1e-12)
assert len(Wm) == 2
mean = 5
cov = 9
Xi = sp.sigma_points(mean, cov)
xm, ucov = unscented_transform(Xi, Wm, Wc, 0)
# sum of weights*sigma points should be the original mean
m = 0.0
for x, w in zip(Xi, Wm):
m += x*w
assert abs(m-mean) < 1.e-12
assert abs(xm[0] - mean) < 1.e-12
assert abs(ucov[0, 0]-cov) < 1.e-12
assert Xi.shape == (2, 1)
class RadarSim(object):
def __init__(self, dt):
self.x = 0
self.dt = dt
def get_range(self):
vel = 100 + 5*randn()
alt = 1000 + 10*randn()
self.x += vel*self.dt
v = self.x * 0.05*randn()
rng = (self.x**2 + alt**2)**.5 + v
return rng
def test_radar():
def fx(x, dt):
A = np.eye(3) + dt * np.array([[0, 1, 0],
[0, 0, 0],
[0, 0, 0]])
return A.dot(x)
def hx(x):
return [np.sqrt(x[0]**2 + x[2]**2)]
dt = 0.05
sp = JulierSigmaPoints(n=3, kappa=0.)
kf = UnscentedKalmanFilter(3, 1, dt, fx=fx, hx=hx, points=sp)
assert np.allclose(kf.x, kf.x_prior)
assert np.allclose(kf.P, kf.P_prior)
# test __repr__ doesn't crash
str(kf)
kf.Q *= 0.01
kf.R = 10
kf.x = np.array([0., 90., 1100.])
kf.P *= 100.
radar = RadarSim(dt)
t = np.arange(0, 20+dt, dt)
n = len(t)
xs = np.zeros((n, 3))
random.seed(200)
rs = []
for i in range(len(t)):
r = radar.get_range()
kf.predict()
kf.update(z=[r])
xs[i, :] = kf.x
rs.append(r)
# test mahalanobis
a = np.zeros(kf.y.shape)
maha = scipy_mahalanobis(a, kf.y, kf.SI)
assert kf.mahalanobis == approx(maha)
if DO_PLOT:
print(xs[:, 0].shape)
plt.figure()
plt.subplot(311)
plt.plot(t, xs[:, 0])
plt.subplot(312)
plt.plot(t, xs[:, 1])
plt.subplot(313)
plt.plot(t, xs[:, 2])
def test_linear_2d_merwe():
""" should work like a linear KF if problem is linear """
def fx(x, dt):
F = np.array([[1, dt, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, dt],
[0, 0, 0, 1]], dtype=float)
return np.dot(F, x)
def hx(x):
return np.array([x[0], x[2]])
dt = 0.1
points = MerweScaledSigmaPoints(4, .1, 2., -1)
kf = UnscentedKalmanFilter(dim_x=4, dim_z=2, dt=dt,
fx=fx, hx=hx, points=points)
kf.x = np.array([-1., 1., -1., 1])
kf.P *= 1.1
# test __repr__ doesn't crash
str(kf)
zs = [[i+randn()*0.1, i+randn()*0.1] for i in range(20)]
Ms, Ps = kf.batch_filter(zs)
smooth_x, _, _ = kf.rts_smoother(Ms, Ps, dts=dt)
if DO_PLOT:
plt.figure()
zs = np.asarray(zs)
plt.plot(zs[:, 0], marker='+')
plt.plot(Ms[:, 0], c='b')
plt.plot(smooth_x[:, 0], smooth_x[:, 2], c='r')
print(smooth_x)
def test_linear_2d_simplex():
""" should work like a linear KF if problem is linear """
def fx(x, dt):
F = np.array([[1, dt, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, dt],
[0, 0, 0, 1]], dtype=float)
return np.dot(F, x)
def hx(x):
return np.array([x[0], x[2]])
dt = 0.1
points = SimplexSigmaPoints(n=4)
kf = UnscentedKalmanFilter(dim_x=4, dim_z=2, dt=dt,
fx=fx, hx=hx, points=points)
kf.x = np.array([-1., 1., -1., 1])
kf.P *= 0.0001
zs = []
for i in range(20):
z = np.array([i+randn()*0.1, i+randn()*0.1])
zs.append(z)
Ms, Ps = kf.batch_filter(zs)
smooth_x, _, _ = kf.rts_smoother(Ms, Ps, dts=dt)
if DO_PLOT:
zs = np.asarray(zs)
plt.plot(Ms[:, 0])
plt.plot(smooth_x[:, 0], smooth_x[:, 2])
print(smooth_x)
def test_linear_1d():
""" should work like a linear KF if problem is linear """
def fx(x, dt):
F = np.array([[1., dt],
[0, 1]])
return np.dot(F, x)
def hx(x):
return np.array([x[0]])
dt = 0.1
points = MerweScaledSigmaPoints(2, .1, 2., -1)
kf = UnscentedKalmanFilter(dim_x=2, dim_z=1, dt=dt,
fx=fx, hx=hx, points=points)
kf.x = np.array([1, 2])
kf.P = np.array([[1, 1.1],
[1.1, 3]])
kf.R *= 0.05
kf.Q = np.array([[0., 0], [0., .001]])
z = np.array([2.])
kf.predict()
kf.update(z)
zs = []
for i in range(50):
z = np.array([i + randn()*0.1])
zs.append(z)
kf.predict()
kf.update(z)
print('K', kf.K.T)
print('x', kf.x)
def test_batch_missing_data():
""" batch filter should accept missing data with None in the measurements """
def fx(x, dt):
F = np.array([[1, dt, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, dt],
[0, 0, 0, 1]], dtype=float)
return np.dot(F, x)
def hx(x):
return np.array([x[0], x[2]])
dt = 0.1
points = MerweScaledSigmaPoints(4, .1, 2., -1)
kf = UnscentedKalmanFilter(dim_x=4, dim_z=2, dt=dt,
fx=fx, hx=hx, points=points)
kf.x = np.array([-1., 1., -1., 1])
kf.P *= 0.0001
zs = []
for i in range(20):
z = np.array([i + randn()*0.1, i + randn()*0.1])
zs.append(z)
zs[2] = None
Rs = [1]*len(zs)
Rs[2] = None
Ms, Ps = kf.batch_filter(zs)
def test_rts():
def fx(x, dt):
A = np.eye(3) + dt * np.array([[0, 1, 0],
[0, 0, 0],
[0, 0, 0]])
f = np.dot(A, x)
return f
def hx(x):
return [np.sqrt(x[0]**2 + x[2]**2)]
dt = 0.05
sp = JulierSigmaPoints(n=3, kappa=1.)
kf = UnscentedKalmanFilter(3, 1, dt, fx=fx, hx=hx, points=sp)
kf.Q *= 0.01
kf.R = 10
kf.x = np.array([0., 90., 1100.])
kf.P *= 100.
radar = RadarSim(dt)
t = np.arange(0, 20 + dt, dt)
n = len(t)
xs = np.zeros((n, 3))
random.seed(200)
rs = []
for i in range(len(t)):
r = radar.get_range()
kf.predict()
kf.update(z=[r])
xs[i, :] = kf.x
rs.append(r)
kf.x = np.array([0., 90., 1100.])
kf.P = np.eye(3) * 100
M, P = kf.batch_filter(rs)
assert np.array_equal(M, xs), "Batch filter generated different output"
Qs = [kf.Q] * len(t)
M2, P2, K = kf.rts_smoother(Xs=M, Ps=P, Qs=Qs)
if DO_PLOT:
print(xs[:, 0].shape)
plt.figure()
plt.subplot(311)
plt.plot(t, xs[:, 0])
plt.plot(t, M2[:, 0], c='g')
plt.subplot(312)
plt.plot(t, xs[:, 1])
plt.plot(t, M2[:, 1], c='g')
plt.subplot(313)
plt.plot(t, xs[:, 2])
plt.plot(t, M2[:, 2], c='g')
def test_fixed_lag():
def fx(x, dt):
A = np.eye(3) + dt * np.array([[0, 1, 0],
[0, 0, 0],
[0, 0, 0]])
f = np.dot(A, x)
return f
def hx(x):
return [np.sqrt(x[0]**2 + x[2]**2)]
dt = 0.05
sp = JulierSigmaPoints(n=3, kappa=0)
kf = UnscentedKalmanFilter(3, 1, dt, fx=fx, hx=hx, points=sp)
kf.Q *= 0.01
kf.R = 10
kf.x = np.array([0., 90., 1100.])
kf.P *= 1.
radar = RadarSim(dt)
t = np.arange(0, 20 + dt, dt)
n = len(t)
xs = np.zeros((n, 3))
random.seed(200)
rs = []
M = []
P = []
N = 10
flxs = []
for i in range(len(t)):
r = radar.get_range()
kf.predict()
kf.update(z=[r])
xs[i, :] = kf.x
flxs.append(kf.x)
rs.append(r)
M.append(kf.x)
P.append(kf.P)
print(i)
if i == 20 and len(M) >= N:
try:
M2, P2, K = kf.rts_smoother(Xs=np.asarray(M)[-N:],
Ps=np.asarray(P)[-N:])
flxs[-N:] = M2
except:
print('except', i)
kf.x = np.array([0., 90., 1100.])
kf.P = np.eye(3) * 100
M, P = kf.batch_filter(rs)
Qs = [kf.Q]*len(t)
M2, P2, K = kf.rts_smoother(Xs=M, Ps=P, Qs=Qs)
flxs = np.asarray(flxs)
print(xs[:, 0].shape)
plt.figure()
plt.subplot(311)
plt.plot(t, xs[:, 0])
plt.plot(t, flxs[:, 0], c='r')
plt.plot(t, M2[:, 0], c='g')
plt.subplot(312)
plt.plot(t, xs[:, 1])
plt.plot(t, flxs[:, 1], c='r')
plt.plot(t, M2[:, 1], c='g')
plt.subplot(313)
plt.plot(t, xs[:, 2])
plt.plot(t, flxs[:, 2], c='r')
plt.plot(t, M2[:, 2], c='g')
def test_circle():
from filterpy.kalman import KalmanFilter
from math import radians
def hx(x):
radius = x[0]
angle = x[1]
x = cos(radians(angle)) * radius
y = sin(radians(angle)) * radius
return np.array([x, y])
def fx(x, dt):
return np.array([x[0], x[1] + x[2], x[2]])
std_noise = .1
sp = JulierSigmaPoints(n=3, kappa=0.)
f = UnscentedKalmanFilter(dim_x=3, dim_z=2, dt=.01,
hx=hx, fx=fx, points=sp)
f.x = np.array([50., 90., 0])
f.P *= 100
f.R = np.eye(2)*(std_noise**2)
f.Q = np.eye(3)*.001
f.Q[0, 0] = 0
f.Q[2, 2] = 0
kf = KalmanFilter(dim_x=6, dim_z=2)
kf.x = np.array([50., 0., 0, 0, .0, 0.])
F = np.array([[1., 1., .5, 0., 0., 0.],
[0., 1., 1., 0., 0., 0.],
[0., 0., 1., 0., 0., 0.],
[0., 0., 0., 1., 1., .5],
[0., 0., 0., 0., 1., 1.],
[0., 0., 0., 0., 0., 1.]])
kf.F = F
kf.P *= 100
kf.H = np.array([[1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0]])
kf.R = f.R
kf.Q[0:3, 0:3] = Q_discrete_white_noise(3, 1., .00001)
kf.Q[3:6, 3:6] = Q_discrete_white_noise(3, 1., .00001)
results = []
zs = []
kfxs = []
for t in range(12000):
a = t / 30 + 90
x = cos(radians(a)) * 50. + randn() * std_noise
y = sin(radians(a)) * 50. + randn() * std_noise
# create measurement = t plus white noise
z = np.array([x, y])
zs.append(z)
f.predict()
f.update(z)
kf.predict()
kf.update(z)
# save data
results.append(hx(f.x))
kfxs.append(kf.x)
results = np.asarray(results)
zs = | np.asarray(zs) | numpy.asarray |
import torch
import matplotlib.pyplot as plt
import numpy as np
import time
import floris.tools as wfct
from superposition import super_position
from optimisation import FLORIS_wake_steering, CNNwake_wake_steering
from superposition import FLORIS_farm_power, CNNWake_farm_power
__author__ = "<NAME>"
__copyright__ = "Copyright 2021, CNNwake"
__credits__ = ["<NAME>"]
__license__ = "MIT"
__version__ = "1.0"
__email__ = "<EMAIL>"
__status__ = "Development"
def visualize_turbine(plane, domain_size, nr_points, title="", ax=None):
"""
Function to plot the flow field around a single turbine
Args:
plane (2d numpy array): Flow field around turbine
domain_size (list or numpy array): x and y limits of the domain,
the first two values correspond to min and max of x and
similar for the y values [x_min, x_max, y_min, y_max]
nr_points (list or numpy array): Nr. of points in the array
title (str, optional): Title of the graph. Defaults to "".
ax (ax.pcolormesh, optional): Pyplot subplot class,
adds the plot to this location.
Returns:
ax.pcolormesh: Image of the flow field
"""
# create mesh grid for plotting
x = np.linspace(domain_size[0], domain_size[1], nr_points[0])
y = np.linspace(domain_size[2], domain_size[3], nr_points[1])
x_mesh, y_mesh = np.meshgrid(x, y)
# Plot the cut-through
im = ax.pcolormesh(x_mesh, y_mesh, plane, shading='auto', cmap="coolwarm")
ax.set_title(title)
# Make equal axis
ax.set_aspect("equal")
return im
def visualize_farm(
plane, nr_points, size_x, size_y, title="", ax=None, vmax=False):
"""
Function to plot flow-field around a wind farm.
Args:
plane (2d numpy array): Flow field of wind farm
nr_points (list or np array): List of nr of points in x and y
size_x (int): Size of domain in x direction (km)
size_y (int): Size of domain in y direction (km)
title (str, optional): Title of the plot. Defaults to "".
ax (ax.pcolormesh, optional): Pyplot subplot class, adds the plot
to this location.
vmax (bool, optional): Maximum value to plot. If false,
the max value of the plane is used a vmax
Returns:
ax.pcolormesh: Image of the flow field around the wind farm
"""
x = np.linspace(0, size_x, nr_points[0]) # this is correct!
y = np.linspace(0, size_y, nr_points[1])
x_mesh, y_mesh = np.meshgrid(x, y)
# if no vmax is set, use the maximum of plane
if vmax is False:
vmax = np.max(plane)
# Plot the cut-through
im = ax.pcolormesh(x_mesh, y_mesh, plane,
shading='auto', cmap="coolwarm", vmax=vmax)
ax.set_title(title)
# Make equal axis
ax.set_aspect("equal")
return im
def Compare_CNN_FLORIS(
x_position, y_position, yawn_angles, wind_velocity, turbulent_int,
CNN_generator, Power_model, TI_model, device,
florisjason_path='', plot=False):
"""
Generates the wind field around a wind park using the neural networks.
The individual wakes of the turbines are calculated using thee CNN and
superimposed onto the wind farm flow field using a super-position model.
The energy produced by the turbines are calcuated using another fully
connected network from the flow data just upstream the turbine.
The functions generates the same wind park flow field using FLORIS so that
the two solutions can be compared when plot = True is set.
Args:
x_position (list): 1d array of x locations of the wind turbines in m.
y_position (list): 1d array of y locations of the wind turbines in m.
yawn_angles (list): 1d array of yaw angles of every wind turbine.
wind_velocity (float): Free stream wind velocity in m/s.
turbulent_int (float): Turbulent intensity in percent.
device (torch.device): Device to store and run the neural network on,
cpu or cuda
florisjason_path (string): Location of the FLORIS jason file
plot (bool, optional): If True, the FLORIS and CNN solution will
be plotted and compared.
Returns:
numpy array: Final 2d array of flow field around the wind park.
"""
# Define the x and y length of a single cell in the array
# This is set by the standard value used in FLORIS wakes
dx = 18.4049079755
dy = 2.45398773006
# Set the maximum length of the array to be 3000m and 400m
# more than the maximum x and y position of the wind park
# If a larger physical domain was used change adapt the values
x_max = np.max(x_position) + 3000
y_max = np.max(y_position) + 300
# Number of cells in x and y needed to create a 2d array of
# that is x_max x y_max using dx, dy values
Nx = int(x_max / dx)
Ny = int(y_max / dy)
# Initialise a 2d array of the wind park with the
# inlet wind speed
farm_array = np.ones((Ny, Nx)) * wind_velocity
# set up FLORIS model
floris_model = wfct.floris_interface.FlorisInterface(
florisjason_path + "FLORIS_input_gauss.json")
floris_model.reinitialize_flow_field(
layout_array=[x_position, np.array(y_position)])
for _ in range(0, len(x_position)):
floris_model.change_turbine([_], {'yaw_angle': yawn_angles[_],
"blade_pitch": 0.0})
floris_model.reinitialize_flow_field(wind_speed=wind_velocity,
turbulence_intensity=turbulent_int)
start_t = time.time()
# Calcuate using FLORIS and extract 2d flow field
floris_model.calculate_wake()
print(f"Time taken for FLORIS to generate"
f" wind park: {time.time() - start_t:.3f}")
floris_plane = floris_model.get_hor_plane(
height=90, x_resolution=Nx, y_resolution=Ny, x_bounds=[0, x_max],
y_bounds=[0, y_max]).df.u.values.reshape(Ny, Nx)
floris_power = floris_model.get_turbine_power()
floris_ti = floris_model.get_turbine_ti()
# print(floris_power, floris_ti)
power_CNN = []
ti_CNN = []
t = time.time()
with torch.no_grad():
# Do CNNwake cautions
for i in range(len(x_position)):
# determine the x and y cells that the turbine center is at
turbine_cell = [int((x_position[i]) / dx),
int((y_position[i] - 200) / dy)]
t1 = time.time()
# extract wind speeds along the rotor, 60 meters upstream
u_upstream_hub = farm_array[
turbine_cell[1] + 45: turbine_cell[1] + 110,
turbine_cell[0] - 3]
# Do an running average, this is done because CNNwake has slight
# variations in the u predictions, also normalise the u values
u_power = [
((u_upstream_hub[i - 1] + u_upstream_hub[i] +
u_upstream_hub[i + 1]) / 3) / 12 for
i in np.linspace(5, 55, 40, dtype=int)]
u_power = np.append(u_power, yawn_angles[i] / 30)
u_power = np.append(u_power, turbulent_int)
# The local TI does not change from inlet TI if the turbine
# is not covered by a wake, therefore check if if all values
# in u_list_hub are the same -> means no wake coverage
# Local TI also depends on yaw, if yaw is less than 12° and
# turbine is not in wake -> use inlet TI for local TI
if np.allclose(u_power[0], u_power[0:-3],
rtol=1e-02, atol=1e-02) and abs(u_power[-2]) < 0.4:
# print("Turbine in free stream, set ti to normal")
ti = turbulent_int
else:
ti = TI_model((torch.tensor(u_power).float().to(device))).detach().cpu().numpy() * 0.30000001192092896
# regulate TI to ensure it is not to different from free stream
if ti < turbulent_int * 0.7:
# print(f"TI REGULATED 1 AT {i}")
ti = turbulent_int * 1.5
# clip ti values to max and min trained
ti = np.clip(ti, 0.015, 0.25).item(0)
ti_CNN.append(ti)
u_power[-1] = ti
energy = Power_model(torch.tensor(u_power).float().to(device)).detach().cpu().numpy() * 4834506
power_CNN.append(energy[0])
hub_speed = np.round(np.mean(u_upstream_hub), 2)
turbine_condition = [[hub_speed, ti, yawn_angles[i]]]
turbine_field = CNN_generator(torch.tensor(turbine_condition).float().to(device))
# Use CNN to calculate wake of individual trubine
# Since CNN output is normalised,
# mutiply by 12 and create a numpy array
turbine_field = turbine_field[0][0].detach().cpu().numpy() * 12
# Place wake of indivual turbine in the farm_array
farm_array = super_position(
farm_array, turbine_field, turbine_cell, hub_speed,
wind_velocity, sp_model="SOS")
# print information
print(f"Time taken for CNNwake to generate wind park: {time.time() - t:.3f}")
print(f"CNNwake power prediction error: "
f"{100 * np.mean(abs(np.array(floris_power) - np.array(power_CNN)) / np.array(floris_power)):.2f} %")
print(f"CNNwake TI prediction error: {100 * np.mean(abs(np.array(floris_ti) - np.array(ti_CNN)) / np.array(floris_ti)):.2f} %")
print(f"APWP error: {100 * np.mean(abs(floris_plane - farm_array) / np.max(floris_plane)):.2f}")
if plot:
plt.rcParams.update({'font.size': 16})
# Plot wake fields of both wind farms and error field
fig, axarr = plt.subplots(3, 1, sharex=True, figsize=(20, 49))
im1 = visualize_farm(farm_array, nr_points=[Nx, Ny], size_x=x_max,
size_y=y_max, title="CNNwake", ax=axarr[0])
im2 = visualize_farm(floris_plane, nr_points=[Nx, Ny], size_x=x_max,
size_y=y_max, title="FLORIS", ax=axarr[1])
im3 = visualize_farm(
(100 * abs(floris_plane - farm_array) / np.max(floris_plane)),
nr_points=[Nx, Ny], size_x=x_max, size_y=y_max,
title="Pixel wise percentage error ", ax=axarr[2], vmax=20)
col1 = fig.colorbar(im1, ax=axarr[0])
col1.set_label('m/s', labelpad=15, y=1.06, rotation=0)
col2 = fig.colorbar(im2, ax=axarr[1])
col2.set_label('m/s', labelpad=15, y=1.06, rotation=0)
col3 = fig.colorbar(im3, ax=axarr[2])
col3.set_label('%', labelpad=11, y=0.9, rotation=0)
axarr[2].set_xlabel('m', fontsize=15)
axarr[0].set_ylabel('m', labelpad=9, rotation=0, y=0.4, fontsize=15)
axarr[1].set_ylabel('m', labelpad=9, rotation=0, y=0.4, fontsize=15)
axarr[2].set_ylabel('m', labelpad=9, rotation=0, y=0.4, fontsize=15)
# Plot TI and Power of every turbine for FLORIS adn CNNNwake
fig, axarr = plt.subplots(2, figsize=(9, 9))
axarr[0].plot(range(1, len(x_position) + 1),
np.array(power_CNN)/1.e06, 'o--', label="CNNwake")
axarr[0].plot(range(1, len(x_position) + 1),
np.array(floris_power)/1.e06, 'o--', label="FLORIS")
axarr[1].plot(range(1, len(x_position) + 1),
np.array(ti_CNN), 'o--', label="CNNwake")
axarr[1].plot(range(1, len(x_position) + 1),
floris_ti, 'o--', label="FLORIS")
axarr[0].set_ylabel('Power output [MW]', fontsize=15)
axarr[1].set_ylabel('Local TI [%]', fontsize=15)
axarr[1].set_xlabel('Turbine Nr.', rotation=0, fontsize=15)
axarr[1].legend()
axarr[0].legend()
plt.show()
return farm_array, floris_plane
if __name__ == '__main__':
# To run individual CNNWake files, the imports are not allowed to be
# relative. Instead of: from .superposition import super_position
# it needs to be: from superposition import super_position, for all CNNWake imports
# also import all NNs
from CNN_model import Generator
from FCC_model import FCNN
from superposition import super_position
# Set up/load all NNs
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
CNN_generator = Generator(3, 30).to(device)
CNN_generator.load_model('./trained_models/CNN_FLOW.pt', device=device)
CNN_generator = CNN_generator.to()
CNN_generator.eval()
# the first forward pass is super slow so do it outside loop and use the
# output for a simple assert test
example_out = CNN_generator(torch.tensor([[4, 0.1, 20]]).float().to(device))
assert example_out.size() == torch.Size([1, 1, 163, 163])
Power_model = FCNN(42, 300, 1).to(device)
Power_model.load_state_dict(torch.load('./trained_models/FCNN_POWER.pt', map_location=device))
Power_model.eval()
# the first forward pass is super slow so do it outside loop and use the
# output for a simple assert test
energy = Power_model(torch.tensor([i for i in range(0, 42)]).float().to(device))
assert energy.size() == torch.Size([1])
TI_model = FCNN(42, 300, 1).to(device)
TI_model.load_state_dict(torch.load('./trained_models/FCNN_TI.pt', map_location=device))
TI_model.eval()
# the first forward pass is super slow so do it outside loop and use the
# output for a simple assert test
TI = TI_model(torch.tensor([i for i in range(0, 42)]).float().to(device))
assert TI.size() == torch.Size([1])
# Compare a single wind farm, this will show the wake, energy and local TI
# for every turbine and compare it to FLORIS
'''farm, a = Compare_CNN_FLORIS([100, 100, 700, 700, 1200, 1200],
[300, 800, 1300, 550, 1050, 300],
[0, 0, 0, 0, 0, 0, 0], 11.6, 0.06,
CNN_generator, Power_model,
TI_model, device, plot=True)'''
def heatmap(xs, ys, res=10):
"""
Assess the performance of the DNN vs FLORIS on
parametric optimiser calls for a wide range of
inlet speed and turbulence intensity for a
specific array configuration.
Args:
xs (numpy array of floats) Turbine x coordinates.
ys (numpy array of floats) Turbine y coordinates.
res (int, optional) Resolution of heatmap.
farm_opt (boolean, optional) Calls either farm or yaw optimisers.
"""
# Wind speeds and turbulence intensities examined
x_ws = np.linspace(3, 12, res)
y_ti = np.linspace(0.05, 0.25, res)
# Initialisation of power and timing heatmaps
g0 = np.zeros((res, res))
g1 = np.zeros((res, res))
g2 = | np.zeros((res, res)) | numpy.zeros |
import os
import sys
import glob
import h5py
import random
import numpy as np
import cv2
import trimesh
import math
from torch.utils.data import Dataset
from graphics import Voxelgrid
from utils.mapping import *
class ScanNet(Dataset):
def __init__(self, config_data):
self.root_dir = config_data.root_dir
self.resolution = (config_data.resy, config_data.resx) # numpy format [rows, columns]
self.pad = config_data.pad
self.augmentations = config_data.augmentations
self.normalize = config_data.normalize
self.transform = config_data.transform
self.frame_ratio = config_data.frame_ratio
self.scene_list = config_data.scene_list
self.input = config_data.input
self.target = config_data.target
self.semantics = config_data.semantics
self.mode = config_data.mode
self.intensity_gradient = config_data.intensity_grad
self.truncation_strategy = config_data.truncation_strategy
self.fusion_strategy = config_data.fusion_strategy
self._scenes = []
if config_data.data_load_strategy == 'hybrid':
# The loading strategy 'hybrid' will always only load from
# at most 1 trajectory of a scene at a time
self._get_scene_load_order(config_data.load_scenes_at_once)
else:
# The loading strategy 'max_depth_diversity' loads all
# trajectories from all scenes at the same time.
self.scenedir = None
self._load_color()
self._load_cameras()
self._load_intriniscs()
if self.input != 'image':
self._load_depth()
if self.target == 'depth_gt':
self._load_depth_gt()
if self.semantics == 'nyu40':
self.rgb_map = scannet_color_palette()
self.label_map = ids_to_nyu40()
self.names_map = scannet_nyu40_names()
self._load_semantic_gt()
elif self.semantics == 'nyu20':
self.rgb_map = [scannet_color_palette()[i] for i in scannet_main_ids()]
self.label_map = ids_to_nyu20()
self.names_map = scannet_nyu20_names()
self.main_ids = np.array(scannet_main_ids())
self._load_semantic_gt()
def _get_scene_load_order(self, nbr_load_scenes):
# create list of training scenes
# format: scans/scene0000_00 = [scans_dir]/scene[id]_[traj]
scenes_list = list()
with open(self.scene_list, 'r') as file:
for line in file:
if line.split(' ')[0].split('/')[1] not in scenes_list:
scenes_list.append(line.split(' ')[0].split('/')[1])
self._scenes = scenes_list
scenes_dict = {k: [] for k in {scene.split('_')[0] for scene in scenes_list}}
for scene in self._scenes:
scenes_dict[scene.split('_')[0]].append(scene.split('_')[1])
# make sure nbr_load_scenes <= len(trajectory_list)
if nbr_load_scenes > len(scenes_dict):
raise ValueError('nbr_load_scenes variable must be lower than the number of scenes')
listdir = {k: [] for k in range(nbr_load_scenes)}
while scenes_dict:
scene_indices = random.sample(range(0, len(scenes_dict)), min(len(scenes_dict), nbr_load_scenes))
for key, scene_idx in enumerate(scene_indices):
scene = list(scenes_dict.keys())[scene_idx]
for traj in scenes_dict[scene]:
listdir[key].append(scene + '_' + traj)
scenes_dict = {k: v for idx, (k, v) in enumerate(scenes_dict.items()) if idx not in scene_indices}
self.scenedir = {k: random.sample(v, len(v)) for k, v in listdir.items()}
def _hybrid_load(self, modality):
# create the full lists for each key in scenedir
img_paths = []
tmp_dict = dict()
for key in self.scenedir:
tmp_list = list()
for scene in self.scenedir[key]:
files = glob.glob(os.path.join(self.root_dir, scene, modality, '*'))
files = sorted(files, key=lambda x: int(os.path.splitext(x.split('/')[-1])[0]))
for file in files:
tmp_list.append(file)
# replace the short list with the long list including all file paths
tmp_dict[key] = tmp_list
# fuse the lists into one
# find key with longest list
max_key = max(tmp_dict, key=lambda x: len(tmp_dict[x]))
idx = 0
while idx < len(tmp_dict[max_key]):
for key in tmp_dict:
if idx < len(tmp_dict[key]):
img_paths.append(tmp_dict[key][idx])
idx += 1
img_paths = img_paths[::self.frame_ratio]
return img_paths
def _load_from_list(self, position, debug=False):
# reading files from list
img_paths = []
with open(self.scene_list, 'r') as fp:
for line in fp:
line = line.rstrip('\n').split(' ')
if line[0].split('/')[1] not in self._scenes:
self._scenes.append(line[0].split('/')[1])
files = glob.glob(os.path.join(self.root_dir, line[position], '*'))
files = sorted(files, key=lambda x: int(os.path.splitext(x.split('/')[-1])[0]))
for file in files:
img_paths.append(file)
if debug: print(file)
img_paths = img_paths[::self.frame_ratio]
# perhaps it will be important to order the frames for testing and training the fusion network.
img_paths.sort(key=lambda x: int(os.path.splitext(x.split('/')[-1])[0]))
return img_paths
def _load_depth(self):
# loads the paths of the noisy depth images to a list
self.depth_images = self._hybrid_load('depth') if self.scenedir else self._load_from_list(0)
def _load_depth_gt(self):
# loads the paths of the ground truth depth images to a list
self.depth_images_gt = self._hybrid_load('depth') if self.scenedir else self._load_from_list(0)
def _load_color(self):
# loads the paths of the RGB images to a list
self.color_images = self._hybrid_load('color') if self.scenedir else self._load_from_list(1)
def _load_semantic_gt(self):
# loads the paths of the ground truth semantic images to a list
self.semantic_images_gt = self._hybrid_load('label-filt') if self.scenedir else self._load_from_list(2)
def _load_cameras(self):
# loads the paths of the camera extrinsic matrices to a list
self.cameras = self._hybrid_load('pose') if self.scenedir else self._load_from_list(3)
def _load_intriniscs(self):
# loads the paths of the camera intrinsic matrices to a dict
self.intrinsics = {}
with open(self.scene_list, 'r') as file:
for line in file:
line = line.rstrip('\n').split(' ')
scene = line[0].split('/')[1]
intrinsics = np.loadtxt(os.path.join(self.root_dir, line[-1], 'intrinsic_depth.txt'))
kx = self.resolution[1] / 640
ky = self.resolution[0] / 480
k = np.array([[kx, 0, kx], [0, ky, ky], [0, 0, 1]]).astype(np.float32)
intrinsics = np.matmul(k, intrinsics[0:3, 0:3])
self.intrinsics.update({scene: intrinsics})
@property
def scenes(self):
return self._scenes
def __len__(self):
return len(self.color_images)
def __getitem__(self, item):
sample = dict()
sample['item_id'] = item
# load rgb image
file = self.color_images[item]
pathsplit = file.split('/')
scene = pathsplit[-3]
frame = os.path.splitext(pathsplit[-1])[0]
frame_id = '{}/{}'.format(scene, frame)
sample['frame_id'] = frame_id
image = cv2.imread(file)
image = cv2.resize(image, self.resolution[::-1], interpolation=cv2.INTER_NEAREST) # expects shape as columnsXrows
if self.semantics:
# load ground truth semantics
file = self.semantic_images_gt[item]
semantic = cv2.imread(file, -1)
semantic = cv2.resize(semantic, self.resolution[::-1], interpolation=cv2.INTER_NEAREST)
if self.augmentations is not None:
image, semantic = self.augmentations(image, semantic)
semantic = np.array([self.label_map[s] for s in semantic.flatten()])
semantic = semantic.reshape(self.resolution)
sample['semantic_gt'] = semantic.astype(np.uint8)
if self.normalize:
mean = [99.09, 113.94, 126.81]
std = [69.64, 71.31, 73.16]
image = (image - mean) / std
sample['image'] = image.astype(np.float32)
if self.input == 'depth_gt':
# load input depth map
file = self.depth_images[item]
depth = cv2.imread(file, -1)
depth = cv2.resize(depth, self.resolution[::-1], interpolation=cv2.INTER_NEAREST) / 1000.
if np.any(np.isnan(depth)):
print("NaN in depth input")
sample[self.input] = depth.astype(np.float32)
# define mask
mask = (depth > 0.01)
sample['mask'] = mask
if self.target == 'depth_gt':
# load ground truth depth map
file = self.depth_images_gt[item]
depth = cv2.imread(file, -1)
depth = cv2.resize(depth, self.resolution[::-1], interpolation=cv2.INTER_NEAREST) / 1000.
sample[self.target] = depth.astype(np.float32)
# load extrinsics
# the fusion code expects that the camera coordinate system is such that z is in the
# camera viewing direction, y is down and x is to the right.
file = self.cameras[item]
sample['extrinsics'] = np.loadtxt(file).astype(np.float32)
sample['intrinsics'] = self.intrinsics[scene]
if self.transform:
sample = self.transform(sample)
return sample
def get_grid(self, scene, truncation, semantic_grid):
file = os.path.join(self.root_dir, 'scans', scene, scene + '_sdf.hdf')
# read from hdf file
try:
f = h5py.File(file, 'r')
except:
f = h5py.File(file.replace('scans', 'scans_test'), 'r')
voxels = | np.array(f['sdf'][0]) | numpy.array |
"""
pysteps.timeseries.autoregression
=================================
Methods related to autoregressive AR(p) models.
.. autosummary::
:toctree: ../generated/
adjust_lag2_corrcoef1
adjust_lag2_corrcoef2
ar_acf
estimate_ar_params_ols
estimate_ar_params_ols_localized
estimate_ar_params_yw
estimate_ar_params_yw_localized
estimate_var_params_ols
estimate_var_params_ols_localized
estimate_var_params_yw
iterate_ar_model
iterate_var_model
"""
import numpy as np
from scipy.special import binom
from scipy import linalg as la
from scipy import ndimage
def adjust_lag2_corrcoef1(gamma_1, gamma_2):
"""A simple adjustment of lag-2 temporal autocorrelation coefficient to
ensure that the resulting AR(2) process is stationary when the parameters
are estimated from the Yule-Walker equations.
Parameters
----------
gamma_1 : float
Lag-1 temporal autocorrelation coeffient.
gamma_2 : float
Lag-2 temporal autocorrelation coeffient.
Returns
-------
out : float
The adjusted lag-2 correlation coefficient.
"""
gamma_2 = np.maximum(gamma_2, 2 * gamma_1 * gamma_1 - 1 + 1e-10)
gamma_2 = np.minimum(gamma_2, 1 - 1e-10)
return gamma_2
def adjust_lag2_corrcoef2(gamma_1, gamma_2):
"""A more advanced adjustment of lag-2 temporal autocorrelation coefficient
to ensure that the resulting AR(2) process is stationary when
the parameters are estimated from the Yule-Walker equations.
Parameters
----------
gamma_1 : float
Lag-1 temporal autocorrelation coeffient.
gamma_2 : float
Lag-2 temporal autocorrelation coeffient.
Returns
-------
out : float
The adjusted lag-2 correlation coefficient.
"""
gamma_2 = np.maximum(gamma_2, 2 * gamma_1 * gamma_2 - 1)
gamma_2 = np.maximum(
gamma_2, (3 * gamma_1 ** 2 - 2 + 2 * (1 - gamma_1 ** 2) ** 1.5) / gamma_1 ** 2
)
return gamma_2
def ar_acf(gamma, n=None):
"""Compute theoretical autocorrelation function (ACF) from the AR(p) model
with lag-l, l=1,2,...,p temporal autocorrelation coefficients.
Parameters
----------
gamma : array-like
Array of length p containing the lag-l, l=1,2,...p, temporal
autocorrelation coefficients.
The correlation coefficients are assumed to be in ascending
order with respect to time lag.
n : int
Desired length of ACF array. Must be greater than len(gamma).
Returns
-------
out : array-like
Array containing the ACF values.
"""
ar_order = len(gamma)
if n == ar_order or n is None:
return gamma
elif n < ar_order:
raise ValueError(
"n=%i, but must be larger than the order of the AR process %i"
% (n, ar_order)
)
phi = estimate_ar_params_yw(gamma)[:-1]
acf = gamma.copy()
for t in range(0, n - ar_order):
# Retrieve gammas (in reverse order)
gammas = acf[t : t + ar_order][::-1]
# Compute next gamma
gamma_ = np.sum(gammas * phi)
acf.append(gamma_)
return acf
def estimate_ar_params_ols(
x, p, d=0, check_stationarity=True, include_constant_term=False, h=0, lam=0.0
):
r"""Estimate the parameters of an autoregressive AR(p) model
:math:`x_{k+1}=c+\phi_1 x_k+\phi_2 x_{k-1}+\dots+\phi_p x_{k-p}+\phi_{p+1}\epsilon`
by using ordinary least squares (OLS). If :math:`d\geq 1`, the parameters
are estimated for a d times differenced time series that is integrated back
to the original one by summation of the differences.
Parameters
----------
x : array_like
Array of shape (n,...) containing a time series of length n=p+d+h+1.
The remaining dimensions are flattened. The rows and columns of x
represent time steps and samples, respectively.
p : int
The order of the model.
d : {0,1}
The order of differencing to apply to the time series.
check_stationarity : bool
Check the stationarity of the estimated model.
include_constant_term : bool
Include the constant term :math:`c` to the model.
h : int
If h>0, the fitting is done by using a history of length h in addition
to the minimal required number of time steps n=p+d+1.
lam : float
If lam>0, the regression is regularized by adding a penalty term
(i.e. ridge regression).
Returns
-------
out : list
The estimated parameter matrices :math:`\mathbf{\Phi}_1,\mathbf{\Phi}_2,
\dots,\mathbf{\Phi}_{p+1}`. If include_constant_term is True, the
constant term :math:`c` is added to the beginning of the list.
Notes
-----
Estimation of the innovation term parameter :math:`\phi_{p+1}` is currently
implemented for p<=2. If p > 2, :math:`\phi_{p+1}` is set to zero.
"""
n = x.shape[0]
if n != p + d + h + 1:
raise ValueError(
"n = %d, p = %d, d = %d, h = %d, but n = p+d+h+1 = %d required"
% (n, p, d, h, p + d + h + 1)
)
if len(x.shape) > 1:
x = x.reshape((n, np.prod(x.shape[1:])))
if d not in [0, 1]:
raise ValueError("d = %d, but 0 or 1 required" % d)
if d == 1:
x = np.diff(x, axis=0)
n -= d
x_lhs = x[p:, :]
Z = []
for i in range(x.shape[1]):
for j in range(p - 1, n - 1 - h):
z_ = np.hstack([x[j - k, i] for k in range(p)])
if include_constant_term:
z_ = np.hstack([[1], z_])
Z.append(z_)
Z = np.column_stack(Z)
b = np.dot(
np.dot(x_lhs, Z.T), np.linalg.inv(np.dot(Z, Z.T) + lam * np.eye(Z.shape[0]))
)
b = b.flatten()
if include_constant_term:
c = b[0]
phi = list(b[1:])
else:
phi = list(b)
if p == 1:
phi_pert = np.sqrt(1.0 - phi[0] * phi[0])
elif p == 2:
phi_pert = np.sqrt(
(1.0 + phi[1]) * ((1.0 - phi[1]) ** 2.0 - phi[0] ** 2.0) / (1.0 - phi[1])
)
else:
phi_pert = 0.0
if check_stationarity:
if not test_ar_stationarity(phi):
raise RuntimeError(
"Error in estimate_ar_params_yw: " "nonstationary AR(p) process"
)
if d == 1:
phi_out = _compute_differenced_model_params(phi, p, 1, 1)
else:
phi_out = phi
phi_out.append(phi_pert)
if include_constant_term:
phi_out.insert(0, c)
return phi_out
def estimate_ar_params_ols_localized(
x,
p,
window_radius,
d=0,
include_constant_term=False,
h=0,
lam=0.0,
window="gaussian",
):
r"""Estimate the parameters of a localized AR(p) model
:math:`x_{k+1,i}=c_i+\phi_{1,i}x_{k,i}+\phi_{2,i}x_{k-1,i}+\dots+\phi_{p,i}x_{k-p,i}+\phi_{p+1,i}\epsilon`
by using ordinary least squares (OLS), where :math:`i` denote spatial
coordinates with arbitrary dimension. If :math:`d\geq 1`, the parameters
are estimated for a d times differenced time series that is integrated back
to the original one by summation of the differences.
Parameters
----------
x : array_like
Array of shape (n,...) containing a time series of length n=p+d+h+1.
The remaining dimensions are flattened. The rows and columns of x
represent time steps and samples, respectively.
p : int
The order of the model.
window_radius : float
Radius of the moving window. If window is 'gaussian', window_radius is
the standard deviation of the Gaussian filter. If window is 'uniform',
the size of the window is 2*window_radius+1.
d : {0,1}
The order of differencing to apply to the time series.
include_constant_term : bool
Include the constant term :math:`c_i` to the model.
h : int
If h>0, the fitting is done by using a history of length h in addition
to the minimal required number of time steps n=p+d+1.
lam : float
If lam>0, the regression is regularized by adding a penalty term
(i.e. ridge regression).
window : {"gaussian", "uniform"}
The weight function to use for the moving window. Applicable if
window_radius < np.inf. Defaults to 'gaussian'.
Returns
-------
out : list
List of length p+1 containing the AR(p) parameter fields for for the
lag-p terms and the innovation term. The parameter fields have the same
shape as the elements of gamma. Nan values are assigned, where the
sample size for estimating the parameters is too small. If
include_constant_term is True, the constant term :math:`c_i` is added
to the beginning of the list.
Notes
-----
Estimation of the innovation term parameter :math:`\phi_{p+1}` is currently
implemented for p<=2. If p > 2, :math:`\phi_{p+1}` is set to a zero array.
"""
n = x.shape[0]
if n != p + d + h + 1:
raise ValueError(
"n = %d, p = %d, d = %d, h = %d, but n = p+d+h+1 = %d required"
% (n, p, d, h, p + d + h + 1)
)
if d == 1:
x = np.diff(x, axis=0)
n -= d
if window == "gaussian":
convol_filter = ndimage.gaussian_filter
else:
convol_filter = ndimage.uniform_filter
if window == "uniform":
window_size = 2 * window_radius + 1
else:
window_size = window_radius
XZ = np.zeros(np.hstack([[p], x.shape[1:]]))
for i in range(p):
for j in range(h + 1):
tmp = convol_filter(
x[p + j, :] * x[p - 1 - i + j, :], window_size, mode="constant"
)
XZ[i, :] += tmp
if include_constant_term:
v = 0.0
for i in range(h + 1):
v += convol_filter(x[p + i, :], window_size, mode="constant")
XZ = | np.vstack([v[np.newaxis, :], XZ]) | numpy.vstack |
##################################################
# MIT License
#
# Copyright (c) 2019 Learning Equality
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
##################################################
from config import BULLET_THRESHOLD
from functools import reduce
from PIL import Image, ImageDraw
import cv2
import numpy as np
import re
class BoundingBox(object):
def __init__(self, x1, y1, x2, y2):
if x1 >= x2:
print(x1, y1, x2, y2)
assert x1 < x2
assert y1 < y2
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
def center(self):
return (self.x1 + self.x2) / 2, (self.y1 + self.y2) / 2
def area(self):
return self.width() * self.height()
def width(self):
return self.x2 - self.x1
def height(self):
return self.y2 - self.y1
def expanded(self, factor, axis="both"):
assert axis in ["both", "x", "y"], "`axis` must be one of 'both', 'x', or 'y'"
x1, y1, x2, y2 = self.x1, self.y1, self.x2, self.y2
if axis in ["x", "both"]:
width = self.width()
x1 -= factor * width
x2 += factor * width
elif axis in ["y", "both"]:
height = self.height()
y1 -= factor * height
y2 += factor * height
return BoundingBox(int(x1), int(y1), int(x2), int(y2))
def shrunk(self, factor, axis="both"):
return self.expanded(-factor, axis=axis)
def shifted(self, x, y):
return BoundingBox(self.x1 + x, self.y1 + y, self.x2 + x, self.y2 + y)
def __and__(self, other):
# returns the intersection of the bounding boxes
x1 = max(self.x1, other.x1)
y1 = max(self.y1, other.y1)
x2 = min(self.x2, other.x2)
y2 = min(self.y2, other.y2)
if (x2 <= x1) or (y2 <= y1):
return None
return BoundingBox(x1, y1, x2, y2)
def __or__(self, other):
# returns the union of the bounding boxes (box that contains both)
x1 = min(self.x1, other.x1)
y1 = min(self.y1, other.y1)
x2 = max(self.x2, other.x2)
y2 = max(self.y2, other.y2)
return BoundingBox(x1, y1, x2, y2)
def overlap(self, other, axis="both"):
"""
Calculate the Intersection over Union (IoU) of two bounding boxes.
Adapted from: https://stackoverflow.com/questions/25349178/calculating-percentage-of-bounding-box-overlap-for-image-detector-evaluation
"""
assert axis in ["both", "x", "y"], "`axis` must be one of 'both', 'x', or 'y'"
if axis == "x":
self = BoundingBox(self.x1, 0, self.x2, 1)
other = BoundingBox(other.x1, 0, other.x2, 1)
elif axis == "y":
self = BoundingBox(0, self.y1, 1, self.y2)
other = BoundingBox(0, other.y1, 1, other.y2)
intersection = self & other
if intersection is None:
return 0.0
intersection_area = intersection.area()
# compute the intersection over union by taking the intersection area and dividing it
# by the sum of the two areas minus the intersection area
return intersection_area / float(self.area() + other.area() - intersection_area)
def get_subimage(self, img):
return img[self.y1 : self.y2, self.x1 : self.x2, :].copy()
def __contains__(self, item):
intersection = self & item
if intersection is None:
return False
return item.overlap(intersection) > 0.8
def __str__(self):
return "({}, {})/({}, {})".format(self.x1, self.y1, self.x2, self.y2)
def __repr__(self):
return "<BoundingBox: {}>".format(str(self))
class BoundingBoxSet(list):
def __init__(self, *args, overlap_threshold=0.4, **kwargs):
self.overlap_threshold = overlap_threshold
super().__init__(*args, **kwargs)
def get_outer_box(self):
return reduce(lambda a, b: a | b, self)
def __and__(self, other):
# returns the bounding boxes that are in both sets (intersection)
results = BoundingBoxSet(overlap_threshold=self.overlap_threshold)
for box_a in self:
for box_b in other:
if box_a.overlap(box_b) > self.overlap_threshold:
results.append(box_a & box_b)
return results
def __or__(self, other):
# returns the bounding boxes that are in either set (union)
results = BoundingBoxSet(overlap_threshold=self.overlap_threshold)
for box_a in self:
for box_b in other:
if box_a.overlap(box_b) > self.overlap_threshold:
results.append(box_a & box_b)
return results
def __add__(self, other):
return self | other
def __sub__(self, other):
# returns the set of boxes in self but not in other
results = BoundingBoxSet(overlap_threshold=self.overlap_threshold)
for box_a in self:
found = False
for box_b in other:
if box_a.overlap(box_b) > self.overlap_threshold:
found = True
break
if not found:
results.append(box_a)
return results
def __contains__(self, item):
for box in self:
if box in item and item in box:
return True
return False
def deduplicate(self):
unique = BoundingBoxSet([], overlap_threshold=self.overlap_threshold)
for box in self:
if box not in unique:
unique.append(box)
return unique
class Word(object):
"""
Attributes:
text: string of characters
bounding_box: BoundingBox object with coordinates for word
"""
bounding_box = None
text = ""
def __init__(self, text, bounding_box):
self.bounding_box = bounding_box
self.text = text
def __repr__(self):
return '<Word: "{}" @ {}>'.format(self.text, str(self.bounding_box))
class Line(object):
"""
Attributes:
words: a list of Words
fontweight: float representing how bold the line of text is
"""
words = None
fontweight = None
column_box = None
def __init__(self, words, fontweight=None, column_box=None):
self.fontweight = fontweight
self.words = words or []
self.column_box = column_box
def add_word(self, word):
self.words.append(word)
def get_box(self):
if not self.words:
return None
return BoundingBox(
min(word.bounding_box.x1 for word in self.words),
min(word.bounding_box.y1 for word in self.words),
max(word.bounding_box.x2 for word in self.words),
max(word.bounding_box.y2 for word in self.words),
)
def get_text(self):
return " ".join([word.text for word in self.words])
def get_indentation(self, word=None, units="col_width"):
assert self.column_box, "line must have column_box set to calculate indentation"
assert units in ["col_width", "pixels", "line_height"]
if word is None:
word = self.words[0]
elif isinstance(word, int):
word = self.words[word]
indent = word.bounding_box.x1 - self.column_box.x1
if units == "col_width":
return indent / self.column_box.width()
elif units == "line_height":
return indent / np.mean([word.bounding_box.height() for word in self.words])
elif units == "pixels":
return indent
def __repr__(self):
return "<Line: {} @ {}>".format(
[word.text for word in self.words], str(self.get_box())
)
def extract_bullet_by_space(self):
# Get average character width and use that to detect wider spaces
character_sizes = []
for word in self.words:
width = word.bounding_box.x2 - word.bounding_box.x1
char_size = width / len(word.text)
character_sizes.extend([char_size for _ in range(len(word.text))])
threshold = np.mean(character_sizes) * BULLET_THRESHOLD
for index, word in enumerate(self.words[:-1]):
space = self.words[index + 1].bounding_box.x1 - word.bounding_box.x2
if space > threshold:
bullet_words = self.words[: index + 1]
self.words = self.words[index + 1 :]
return Word(
" ".join([w.text for w in bullet_words]),
BoundingBox(
min(word.bounding_box.x1 for word in bullet_words),
min(word.bounding_box.y1 for word in bullet_words),
max(word.bounding_box.x2 for word in bullet_words),
max(word.bounding_box.y2 for word in bullet_words),
),
)
def extract_bullet_by_pattern(
self, bullet_patterns=[r"•", r"-", r"\d+\.\d+\.\d+", r"[a-zA-Z0-9]{1,3}\)"]
):
text = ""
for index, word in enumerate(self.words):
text += word.text.strip()
for pattern in bullet_patterns:
if re.match(pattern, text):
bullet_words = self.words[: index + 1]
self.words = self.words[index + 1 :]
return Word(
text.replace(" ", ""),
BoundingBoxSet(
word.bounding_box for word in bullet_words
).get_outer_box(),
)
class Item(object):
lines = None
bullet = None
tabs = None
def __init__(self, lines, bullet=None):
self.bullet = bullet
self.lines = lines or []
def set_bullet(self, bullet):
self.bullet = bullet
def add_lines(self, lines):
self.lines.extend(lines)
def get_box(self, include_bullet=False):
boxes = [line.get_box() for line in self.lines]
if include_bullet and self.bullet:
boxes = [self.bullet.bounding_box] + boxes
boxes = BoundingBoxSet([box for box in boxes if box])
return boxes.get_outer_box()
def get_indentation(self, include_bullet=False, units="col_width"):
assert len(self.lines) > 0, "can't get indentation for item with no lines"
word = self.bullet if self.bullet and include_bullet else self.lines[0].words[0]
return self.lines[0].get_indentation(word=word, units=units)
def average_fontweight(self):
fontweights = [
line.fontweight for line in self.lines if line.fontweight is not None
]
if fontweights:
return np.mean(fontweights)
else:
return None
def get_text(self, separator=" "):
text = separator.join([line.get_text().strip() for line in self.lines])
text = (
text.replace(" ,", ",")
.replace(" .", ".")
.replace(" :", ":")
.replace(" ;", ";")
.replace("( ", "(")
.replace(" )", ")")
.lstrip(".")
.strip()
)
for match in re.findall(r"\d+\. \d+\. \d+", text):
text = text.replace(match, match.replace(" ", ""))
return text
def __str__(self):
text = "'{}'".format(self.get_text(separator=" "))
if self.bullet:
text = "[{}] {}".format(self.bullet.text, text)
return text
def __repr__(self):
return "<Item: {} @ {}>".format(str(self), self.get_box())
class ItemList(list):
def get_box(self, include_bullet=False):
items = BoundingBoxSet(
[item.get_box(include_bullet=include_bullet) for item in self]
)
return items.get_outer_box()
def add_item(self, item):
if item.lines:
self.append(item)
def combine_lines(
self,
header_text=[
"Content",
"Specific Objectives",
"Suggested Resources",
"Suggested Further Assessment",
"Notes",
],
factor_in_fontweight=False,
):
new_items = ItemList([])
current_item = Item([])
prev_bold = False
for item in self:
# check whether there's a bullet, or the boldness of the text changed
bold = (
(item.lines[0].fontweight > 1)
if factor_in_fontweight and item.lines[0].fontweight
else False
)
has_bullet = bool(item.bullet)
boldness_changed = bold != prev_bold
if has_bullet or boldness_changed:
new_items.add_item(current_item)
current_item = Item([], bullet=item.bullet)
current_item.add_lines(item.lines)
prev_bold = bold
new_items.add_item(current_item)
itemlist = ItemList([item for item in new_items if item.get_text().strip()])
# split on header text
for i, item in list(reversed(list(enumerate(itemlist)))):
if len(item.lines) == 1:
continue
found = False
for header in header_text:
if header in item.get_text():
section_starts = set([0, len(item.lines)])
found = ""
started_at = None
for j, line in enumerate(item.lines):
seeking = header[len(found) :].strip()
if seeking.startswith(line.get_text()):
if started_at is None: # found start
started_at = j
found += " " + line.get_text()
if found.strip() == header: # found end
section_starts.add(started_at)
section_starts.add(j + 1)
found = ""
started_at = None
section_starts = list(sorted(section_starts))
new_items = []
for ind in range(len(section_starts) - 1):
lines = item.lines[
section_starts[ind] : section_starts[ind + 1]
]
if ind == 0:
new_items.append(Item(lines, bullet=item.bullet))
else:
new_items.append(Item(lines))
itemlist[i : i + 1] = new_items
return itemlist
class PageImage(np.ndarray):
_annotated_array = None
box = None
def __new__(
subtype,
source,
dtype=float,
buffer=None,
offset=0,
strides=None,
order=None,
box=None,
):
if isinstance(source, str):
source = cv2.imread(source)
elif isinstance(source, Image.Image):
source = np.array(source)
shape = source.shape
dtype = source.dtype
buffer = source.copy().data
obj = super(PageImage, subtype).__new__(
subtype, shape, dtype, buffer, offset, strides, order
)
obj.box = box
return obj
def __array_finalize__(self, obj):
if obj is None:
return
# Note that it is here, rather than in the __new__ method, that we set default values,
# because this method sees all creation of default objects - with the __new__ constructor,
# but also with arr.view(PageImage).
self.box = getattr(obj, "box", None)
def _initialize_annotated_array(self):
if not self._annotated_array is not None:
self._annotated_array = self.copy()
def _repr_png_(self):
self._initialize_annotated_array()
return self.as_pil_image(annotated=True)._repr_png_()
def clear(self):
self._initialize_annotated_array()
self._annotated_array.data = self.copy().data
def draw_box(self, box, color=(255, 0, 0), width=2):
self._initialize_annotated_array()
if isinstance(color[0], float):
color = tuple(map(int, | np.array(color[:3]) | numpy.array |
import numpy
from srxraylib.plot.gol import plot, plot_image, set_qt
set_qt()
def W(x1,x2):
delta_x = x2 - x1
mu = numpy.exp(-delta_x ** 2 / 2 / sigma_xi ** 2)
s1 = numpy.exp(-x1 ** 2 / 2 / sigma_x ** 2)
s2 = numpy.exp(-x2 ** 2 / 2 / sigma_x ** 2)
return mu * numpy.sqrt(s1) * numpy.sqrt(s2)
def get_coherent_fraction_exact(beta):
q = 1 + 0.5 * beta**2 + beta * numpy.sqrt( (beta/2)**2 + 1 )
q = 1.0 / q
CF = 1 - q
return CF
if __name__ == "__main__":
beta = 0.0922395 #1.151 #0.02 #
sigma_x = 3.03783e-05
sigma_xi = beta * sigma_x
x1 = numpy.linspace(-0.00012, 0.00012, 400)
# plot(x1, numpy.exp(-x1**2/(2*sigma_x**2)))
X1 = numpy.outer(x1, numpy.ones_like(x1))
X2 = numpy.outer(numpy.ones_like(x1), x1 )
cross_spectral_density = W(X1,X2)
indices = numpy.arange(x1.size)
plot(x1, W(x1,x1),
x1, cross_spectral_density[indices, indices],
x1, numpy.exp(-x1**2/2/sigma_x**2),
title="Spectral density", legend=["SD function", "SD array", "Gaussian with sigma_x"])
plot_image(cross_spectral_density, x1, x1)
#
# diagonalize the CSD
#
w, v = | numpy.linalg.eig(cross_spectral_density) | numpy.linalg.eig |
###############################################################################################################################
# This script implements an adaptation of the optimization method proposed by Paria et al.: https://arxiv.org/abs/1805.12168. #
# Our adaptations to the original are: #
# A different tchebyshev scalarization function #
# A RF model instead of GP #
# A multi-start local search to optimize the acquisition functions instead of the DIRECT algorithm #
# a contrained optimization implementation as proposed by Gardner et al. http://proceedings.mlr.press/v32/gardner14.pdf #
###############################################################################################################################
import sys
import os
import space
import random
import models
from sklearn.ensemble import ExtraTreesRegressor
import operator
import numpy as np
import csv
import json
import copy
import datetime
from jsonschema import Draft4Validator, validators, exceptions
from utility_functions import *
from collections import defaultdict
from scipy import stats
from local_search import get_min_configurations, get_neighbors, local_search
def run_acquisition_function(acquisition_function,
configurations,
objective_weights,
regression_models,
param_space,
scalarization_method,
objective_limits,
iteration_number,
data_array,
model_type,
classification_model=None,
number_of_cpus=0):
"""
Apply the chosen acquisition function to a list of configurations.
:param acquisition_function: a string defining which acquisition function to apply
:param bufferx: a list of tuples containing the configurations.
:param objective_weights: a list containing the weights for each objective.
:param regression_models: the surrogate models used to evaluate points.
:param param_space: a space object containing the search space.
:param scalarization_method: a string indicating which scalarization method to use.
:param evaluations_per_optimization_iteration: how many configurations to return.
:param objective_limits: a dictionary with estimated minimum and maximum values for each objective.
:param iteration_number: an integer for the current iteration number, used to compute the beta on ucb
:param classification_model: the surrogate model used to evaluate feasibility constraints
:param number_of_cpus: an integer for the number of cpus to be used in parallel.
:return: a list of scalarized values for each point in bufferx.
"""
tmp_objective_limits = None
configurations = concatenate_list_of_dictionaries(configurations)
configurations = data_dictionary_to_tuple(configurations, param_space.get_input_parameters())
if acquisition_function == "TS":
scalarized_values, tmp_objective_limits = thompson_sampling(
configurations,
objective_weights,
regression_models,
param_space,
scalarization_method,
objective_limits,
model_type,
classification_model,
number_of_cpus)
elif acquisition_function == "UCB":
scalarized_values, tmp_objective_limits = ucb(
configurations,
objective_weights,
regression_models,
param_space,
scalarization_method,
objective_limits,
iteration_number,
model_type,
classification_model,
number_of_cpus)
elif acquisition_function == "EI":
scalarized_values, tmp_objective_limits = EI(configurations,
data_array,
objective_weights,
regression_models,
param_space,
scalarization_method,
objective_limits,
iteration_number,
model_type,
classification_model,
number_of_cpus)
else:
print("Unrecognized acquisition function:", acquisition_function)
raise SystemExit
scalarized_values = list(scalarized_values)
# we want the local search to consider all points feasible, we already account for feasibility it in the scalarized value
feasibility_indicators = [1]*len(scalarized_values)
return scalarized_values, feasibility_indicators
def ucb(bufferx,
objective_weights,
regression_models,
param_space,
scalarization_method,
objective_limits,
iteration_number,
model_type,
classification_model=None,
number_of_cpus=0):
"""
Multi-objective ucb acquisition function as detailed in https://arxiv.org/abs/1805.12168.
The mean and variance of the predictions are computed as defined by Hutter et al.: https://arxiv.org/pdf/1211.0906.pdf
:param bufferx: a list of tuples containing the points to predict and scalarize.
:param objective_weights: a list containing the weights for each objective.
:param regression_models: the surrogate models used to evaluate points.
:param param_space: a space object containing the search space.
:param scalarization_method: a string indicating which scalarization method to use.
:param evaluations_per_optimization_iteration: how many configurations to return.
:param objective_limits: a dictionary with estimated minimum and maximum values for each objective.
:param iteration_number: an integer for the current iteration number, used to compute the beta
:param classification_model: the surrogate model used to evaluate feasibility constraints
:param number_of_cpus: an integer for the number of cpus to be used in parallel.
:return: a list of scalarized values for each point in bufferx.
"""
beta = np.sqrt(0.125*np.log(2*iteration_number + 1))
augmentation_constant = 0.05
prediction_means = {}
prediction_variances = {}
number_of_predictions = len(bufferx)
tmp_objective_limits = copy.deepcopy(objective_limits)
prediction_means, prediction_variances = models.compute_model_mean_and_uncertainty(bufferx, regression_models, model_type, param_space, var=True)
if classification_model != None:
classification_prediction_results = models.model_probabilities(bufferx, classification_model, param_space)
feasible_parameter = param_space.get_feasible_parameter()[0]
true_value_index = classification_model[feasible_parameter].classes_.tolist().index(True)
feasibility_indicator = classification_prediction_results[feasible_parameter][:,true_value_index]
else:
feasibility_indicator = [1]*number_of_predictions # if no classification model is used, then all points are feasible
# Compute scalarization
if (scalarization_method == "linear"):
scalarized_predictions = np.zeros(number_of_predictions)
beta_factor = 0
for objective in regression_models:
scalarized_predictions += objective_weights[objective]*prediction_means[objective]
beta_factor += objective_weights[objective]*prediction_variances[objective]
scalarized_predictions -= beta*np.sqrt(beta_factor)
scalarized_predictions = scalarized_predictions*feasibility_indicator
# The paper does not propose this, I applied their methodology to the original tchebyshev to get the approach below
# Important: since this was not proposed in the paper, their proofs and bounds for the modified_tchebyshev may not be valid here.
elif(scalarization_method == "tchebyshev"):
scalarized_predictions = | np.zeros(number_of_predictions) | numpy.zeros |
import os
import shutil
import six
import pytest
import numpy as np
from pyshac.config import hyperparameters as hp, data
# compatible with both Python 2 and 3
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
def deterministic_test(func):
@six.wraps(func)
def wrapper(*args, **kwargs):
np.random.seed(0)
output = func(*args, **kwargs)
np.random.seed(None)
return output
return wrapper
# wrapper function to clean up saved files
def cleanup_dirs(func):
@six.wraps(func)
def wrapper(*args, **kwargs):
output = func(*args, **kwargs)
# remove temporary files
if os.path.exists('shac/'):
shutil.rmtree('shac/')
if os.path.exists('custom/'):
shutil.rmtree('custom/')
return output
return wrapper
def get_hyperparameter_list():
h1 = hp.DiscreteHyperParameter('h1', [0, 1, 2])
h2 = hp.DiscreteHyperParameter('h2', [3, 4, 5, 6])
h3 = hp.UniformContinuousHyperParameter('h3', 7, 10)
h4 = hp.DiscreteHyperParameter('h4', ['v1', 'v2'])
return [h1, h2, h3, h4]
def get_multi_parameter_list():
h1 = hp.MultiDiscreteHyperParameter('h1', [0, 1, 2], sample_count=2)
h2 = hp.MultiDiscreteHyperParameter('h2', [3, 4, 5, 6], sample_count=3)
h3 = hp.MultiUniformContinuousHyperParameter('h3', 7, 10, sample_count=5)
h4 = hp.MultiDiscreteHyperParameter('h4', ['v1', 'v2'], sample_count=4)
return [h1, h2, h3, h4]
@cleanup_dirs
def test_dataset_param_list():
params = get_hyperparameter_list()
dataset = data.Dataset(params)
assert isinstance(dataset._parameters, hp.HyperParameterList)
dataset.set_parameters(params)
assert isinstance(dataset._parameters, hp.HyperParameterList)
h = hp.HyperParameterList(params)
dataset.set_parameters(h)
assert isinstance(dataset._parameters, hp.HyperParameterList)
@cleanup_dirs
def test_dataset_multi_param_list():
params = get_multi_parameter_list()
dataset = data.Dataset(params)
assert isinstance(dataset._parameters, hp.HyperParameterList)
dataset.set_parameters(params)
assert isinstance(dataset._parameters, hp.HyperParameterList)
h = hp.HyperParameterList(params)
dataset.set_parameters(h)
assert isinstance(dataset._parameters, hp.HyperParameterList)
@cleanup_dirs
def test_dataset_basedir():
params = get_hyperparameter_list()
h = hp.HyperParameterList(params)
dataset = data.Dataset(h)
assert os.path.exists(dataset.basedir)
@cleanup_dirs
def test_dataset_basedir_custom():
params = get_hyperparameter_list()
h = hp.HyperParameterList(params)
dataset = data.Dataset(h, basedir='custom')
assert os.path.exists(dataset.basedir)
assert not os.path.exists('shac')
@cleanup_dirs
def test_dataset_add_sample():
params = get_hyperparameter_list()
h = hp.HyperParameterList(params)
dataset = data.Dataset(h)
samples = [(h.sample(), np.random.uniform()) for _ in range(5)]
for sample in samples:
dataset.add_sample(*sample)
x, y = dataset.get_dataset()
assert len(dataset) == 5
assert x.shape == (5, 4)
assert y.shape == (5,)
@cleanup_dirs
def test_dataset_multi_add_sample():
params = get_multi_parameter_list()
h = hp.HyperParameterList(params)
dataset = data.Dataset(h)
samples = [(h.sample(), np.random.uniform()) for _ in range(5)]
for sample in samples:
dataset.add_sample(*sample)
x, y = dataset.get_dataset()
assert len(dataset) == 5
assert x.shape == (5, 14)
assert y.shape == (5,)
@cleanup_dirs
def test_set_dataset():
params = get_hyperparameter_list()
h = hp.HyperParameterList(params)
dataset = data.Dataset(h)
# numpy arrays
samples = [(np.array(h.sample()), np.random.uniform()) for _ in range(5)]
x, y = zip(*samples)
x = np.array(x)
y = np.array(y)
dataset.set_dataset(x, y)
assert len(dataset) == 5
dataset.clear()
# python arrays
samples = [(h.sample(), float(np.random.uniform())) for _ in range(5)]
x, y = zip(*samples)
dataset.set_dataset(x, y)
assert len(dataset) == 5
# None data
with pytest.raises(TypeError):
dataset.set_dataset(None, int(6))
with pytest.raises(TypeError):
dataset.set_dataset([1, 2, 3], None)
with pytest.raises(TypeError):
dataset.set_dataset(None, None)
@cleanup_dirs
def test_multi_set_dataset():
params = get_multi_parameter_list()
h = hp.HyperParameterList(params)
dataset = data.Dataset(h)
# numpy arrays
samples = [(np.array(h.sample()), np.random.uniform()) for _ in range(5)]
x, y = zip(*samples)
x = np.array(x)
y = np.array(y)
dataset.set_dataset(x, y)
assert len(dataset) == 5
dataset.clear()
# python arrays
samples = [(h.sample(), float(np.random.uniform())) for _ in range(5)]
x, y = zip(*samples)
dataset.set_dataset(x, y)
assert len(dataset) == 5
# None data
with pytest.raises(TypeError):
dataset.set_dataset(None, int(6))
with pytest.raises(TypeError):
dataset.set_dataset([1, 2, 3], None)
with pytest.raises(TypeError):
dataset.set_dataset(None, None)
@cleanup_dirs
@deterministic_test
def test_dataset_get_best_parameters():
params = get_hyperparameter_list()
h = hp.HyperParameterList(params)
dataset = data.Dataset(h)
with pytest.raises(ValueError):
dataset.get_best_parameters(None)
# Test with empty dataset
assert dataset.get_best_parameters() is None
samples = [(h.sample(), np.random.uniform()) for _ in range(5)]
for sample in samples:
dataset.add_sample(*sample)
objective_values = [v for h, v in samples]
min_index = np.argmin(objective_values)
max_index = np.argmax(objective_values)
max_hp = list(dataset.get_best_parameters(objective='max').values())
min_hp = list(dataset.get_best_parameters(objective='min').values())
assert max_hp == samples[max_index][0]
assert min_hp == samples[min_index][0]
@cleanup_dirs
@deterministic_test
def test_dataset_multi_get_best_parameters():
params = get_multi_parameter_list()
h = hp.HyperParameterList(params)
dataset = data.Dataset(h)
with pytest.raises(ValueError):
dataset.get_best_parameters(None)
# Test with empty dataset
assert dataset.get_best_parameters() is None
samples = [(h.sample(), np.random.uniform()) for _ in range(5)]
for sample in samples:
dataset.add_sample(*sample)
objective_values = [v for h, v in samples]
min_index = np.argmin(objective_values)
max_index = | np.argmax(objective_values) | numpy.argmax |
#%%
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches
# Hack to allow loading the Core package
if __name__ == "__main__" and __package__ is None:
from sys import path, argv
from os.path import dirname, abspath, join
path.insert(0, abspath(join(dirname(argv[0]), "..")))
path.insert(0, abspath(join(dirname(argv[0]), "..", 'Core')))
del path, argv, dirname, abspath, join
import FreqTrans
#%%
pCrit = -1+0j
T = np.array([-0.5 - 0.5j])
TUnc = np.array([0.5 + 0.25j])
rCritNom, rCritUnc, rCrit, pCont = FreqTrans.DistCritEllipse(T, TUnc, pCrit = pCrit)
#rCritNomCirc, rCritUncCirc, rCritCirc = FreqTrans.DistCritCirc(T, TUnc, pCrit = pCrit, typeNorm = 'RMS')
rCirc = np.sqrt(0.5) * np.abs(TUnc) # RMS
#rCirc = np.max([TUnc.real, TUnc.imag]) # Max
#rCirc = np.mean([TUnc.real, TUnc.imag]) # Mean
#rCirc = np.abs(TUnc) # RSS
TUncCirc = | np.array([rCirc+1j*rCirc]) | numpy.array |
"""
Augmenters that somehow change the size of the images.
List of augmenters:
* :class:`Resize`
* :class:`CropAndPad`
* :class:`Crop`
* :class:`Pad`
* :class:`PadToFixedSize`
* :class:`CenterPadToFixedSize`
* :class:`CropToFixedSize`
* :class:`CenterCropToFixedSize`
* :class:`CropToMultiplesOf`
* :class:`CenterCropToMultiplesOf`
* :class:`PadToMultiplesOf`
* :class:`CenterPadToMultiplesOf`
* :class:`CropToPowersOf`
* :class:`CenterCropToPowersOf`
* :class:`PadToPowersOf`
* :class:`CenterPadToPowersOf`
* :class:`CropToAspectRatio`
* :class:`CenterCropToAspectRatio`
* :class:`PadToAspectRatio`
* :class:`CenterPadToAspectRatio`
* :class:`CropToSquare`
* :class:`CenterCropToSquare`
* :class:`PadToSquare`
* :class:`CenterPadToSquare`
* :class:`KeepSizeByResize`
"""
from __future__ import print_function, division, absolute_import
import re
import functools
import numpy as np
import cv2
import imgaug as ia
from imgaug.imgaug import _normalize_cv2_input_arr_
from . import meta
from .. import parameters as iap
def _crop_trbl_to_xyxy(shape, top, right, bottom, left, prevent_zero_size=True):
if prevent_zero_size:
top, right, bottom, left = _crop_prevent_zero_size(
shape[0], shape[1], top, right, bottom, left)
height, width = shape[0:2]
x1 = left
x2 = width - right
y1 = top
y2 = height - bottom
# these steps prevent negative sizes
# if x2==x1 or y2==y1 then the output arr has size 0 for the respective axis
# note that if height/width of arr is zero, then y2==y1 or x2==x1, which
# is still valid, even if height/width is zero and results in a zero-sized
# axis
x2 = max(x2, x1)
y2 = max(y2, y1)
return x1, y1, x2, y2
def _crop_arr_(arr, top, right, bottom, left, prevent_zero_size=True):
x1, y1, x2, y2 = _crop_trbl_to_xyxy(arr.shape, top, right, bottom, left,
prevent_zero_size=prevent_zero_size)
return arr[y1:y2, x1:x2, ...]
def _crop_and_pad_arr(arr, croppings, paddings, pad_mode="constant",
pad_cval=0, keep_size=False):
height, width = arr.shape[0:2]
image_cr = _crop_arr_(arr, *croppings)
image_cr_pa = pad(
image_cr,
top=paddings[0], right=paddings[1],
bottom=paddings[2], left=paddings[3],
mode=pad_mode, cval=pad_cval)
if keep_size:
image_cr_pa = ia.imresize_single_image(image_cr_pa, (height, width))
return image_cr_pa
def _crop_and_pad_heatmap_(heatmap, croppings_img, paddings_img,
pad_mode="constant", pad_cval=0.0, keep_size=False):
return _crop_and_pad_hms_or_segmaps_(heatmap, croppings_img,
paddings_img, pad_mode, pad_cval,
keep_size)
def _crop_and_pad_segmap_(segmap, croppings_img, paddings_img,
pad_mode="constant", pad_cval=0, keep_size=False):
return _crop_and_pad_hms_or_segmaps_(segmap, croppings_img,
paddings_img, pad_mode, pad_cval,
keep_size)
def _crop_and_pad_hms_or_segmaps_(augmentable, croppings_img,
paddings_img, pad_mode="constant",
pad_cval=None, keep_size=False):
if isinstance(augmentable, ia.HeatmapsOnImage):
arr_attr_name = "arr_0to1"
pad_cval = pad_cval if pad_cval is not None else 0.0
else:
assert isinstance(augmentable, ia.SegmentationMapsOnImage), (
"Expected HeatmapsOnImage or SegmentationMapsOnImage, got %s." % (
type(augmentable)))
arr_attr_name = "arr"
pad_cval = pad_cval if pad_cval is not None else 0
arr = getattr(augmentable, arr_attr_name)
arr_shape_orig = arr.shape
augm_shape = augmentable.shape
croppings_proj = _project_size_changes(croppings_img, augm_shape, arr.shape)
paddings_proj = _project_size_changes(paddings_img, augm_shape, arr.shape)
croppings_proj = _crop_prevent_zero_size(arr.shape[0], arr.shape[1],
*croppings_proj)
arr_cr = _crop_arr_(arr,
croppings_proj[0], croppings_proj[1],
croppings_proj[2], croppings_proj[3])
arr_cr_pa = pad(
arr_cr,
top=paddings_proj[0], right=paddings_proj[1],
bottom=paddings_proj[2], left=paddings_proj[3],
mode=pad_mode,
cval=pad_cval)
setattr(augmentable, arr_attr_name, arr_cr_pa)
if keep_size:
augmentable = augmentable.resize(arr_shape_orig[0:2])
else:
augmentable.shape = _compute_shape_after_crop_and_pad(
augmentable.shape, croppings_img, paddings_img)
return augmentable
def _crop_and_pad_kpsoi_(kpsoi, croppings_img, paddings_img, keep_size):
# using the trbl function instead of croppings_img has the advantage
# of incorporating prevent_zero_size, dealing with zero-sized input image
# axis and dealing the negative crop amounts
x1, y1, _x2, _y2 = _crop_trbl_to_xyxy(kpsoi.shape, *croppings_img)
crop_left = x1
crop_top = y1
shape_orig = kpsoi.shape
shifted = kpsoi.shift_(
x=-crop_left+paddings_img[3],
y=-crop_top+paddings_img[0])
shifted.shape = _compute_shape_after_crop_and_pad(
shape_orig, croppings_img, paddings_img)
if keep_size:
shifted = shifted.on_(shape_orig)
return shifted
def _compute_shape_after_crop_and_pad(old_shape, croppings, paddings):
x1, y1, x2, y2 = _crop_trbl_to_xyxy(old_shape, *croppings)
new_shape = list(old_shape)
new_shape[0] = y2 - y1 + paddings[0] + paddings[2]
new_shape[1] = x2 - x1 + paddings[1] + paddings[3]
return tuple(new_shape)
def _crop_prevent_zero_size(height, width, crop_top, crop_right, crop_bottom,
crop_left):
remaining_height = height - (crop_top + crop_bottom)
remaining_width = width - (crop_left + crop_right)
if remaining_height < 1:
regain = abs(remaining_height) + 1
regain_top = regain // 2
regain_bottom = regain // 2
if regain_top + regain_bottom < regain:
regain_top += 1
if regain_top > crop_top:
diff = regain_top - crop_top
regain_top = crop_top
regain_bottom += diff
elif regain_bottom > crop_bottom:
diff = regain_bottom - crop_bottom
regain_bottom = crop_bottom
regain_top += diff
crop_top = crop_top - regain_top
crop_bottom = crop_bottom - regain_bottom
if remaining_width < 1:
regain = abs(remaining_width) + 1
regain_right = regain // 2
regain_left = regain // 2
if regain_right + regain_left < regain:
regain_right += 1
if regain_right > crop_right:
diff = regain_right - crop_right
regain_right = crop_right
regain_left += diff
elif regain_left > crop_left:
diff = regain_left - crop_left
regain_left = crop_left
regain_right += diff
crop_right = crop_right - regain_right
crop_left = crop_left - regain_left
return (
max(crop_top, 0), max(crop_right, 0), max(crop_bottom, 0),
max(crop_left, 0))
def _project_size_changes(trbl, from_shape, to_shape):
if from_shape[0:2] == to_shape[0:2]:
return trbl
height_to = to_shape[0]
width_to = to_shape[1]
height_from = from_shape[0]
width_from = from_shape[1]
top = trbl[0]
right = trbl[1]
bottom = trbl[2]
left = trbl[3]
# Adding/subtracting 1e-4 here helps for the case where a heatmap/segmap
# is exactly half the size of an image and the size change on an axis is
# an odd value. Then the projected value would end up being <something>.5
# and the rounding would always round up to the next integer. If both
# sides then have the same change, they are both rounded up, resulting
# in more change than expected.
# E.g. image height is 8, map height is 4, change is 3 at the top and 3 at
# the bottom. The changes are projected to 4*(3/8) = 1.5 and both rounded
# up to 2.0. Hence, the maps are changed by 4 (100% of the map height,
# vs. 6 for images, which is 75% of the image height).
top = _int_r(height_to * (top/height_from) - 1e-4)
right = _int_r(width_to * (right/width_from) + 1e-4)
bottom = _int_r(height_to * (bottom/height_from) + 1e-4)
left = _int_r(width_to * (left/width_from) - 1e-4)
return top, right, bottom, left
def _int_r(value):
return int(np.round(value))
# TODO somehow integrate this with pad()
def _handle_pad_mode_param(pad_mode):
pad_modes_available = {
"constant", "edge", "linear_ramp", "maximum", "mean", "median",
"minimum", "reflect", "symmetric", "wrap"}
if pad_mode == ia.ALL:
return iap.Choice(list(pad_modes_available))
if ia.is_string(pad_mode):
assert pad_mode in pad_modes_available, (
"Value '%s' is not a valid pad mode. Valid pad modes are: %s." % (
pad_mode, ", ".join(pad_modes_available)))
return iap.Deterministic(pad_mode)
if isinstance(pad_mode, list):
assert all([v in pad_modes_available for v in pad_mode]), (
"At least one in list %s is not a valid pad mode. Valid pad "
"modes are: %s." % (str(pad_mode), ", ".join(pad_modes_available)))
return iap.Choice(pad_mode)
if isinstance(pad_mode, iap.StochasticParameter):
return pad_mode
raise Exception(
"Expected pad_mode to be ia.ALL or string or list of strings or "
"StochasticParameter, got %s." % (type(pad_mode),))
def _handle_position_parameter(position):
if position == "uniform":
return iap.Uniform(0.0, 1.0), iap.Uniform(0.0, 1.0)
if position == "normal":
return (
iap.Clip(iap.Normal(loc=0.5, scale=0.35 / 2),
minval=0.0, maxval=1.0),
iap.Clip(iap.Normal(loc=0.5, scale=0.35 / 2),
minval=0.0, maxval=1.0)
)
if position == "center":
return iap.Deterministic(0.5), iap.Deterministic(0.5)
if (ia.is_string(position)
and re.match(r"^(left|center|right)-(top|center|bottom)$",
position)):
mapping = {"top": 0.0, "center": 0.5, "bottom": 1.0, "left": 0.0,
"right": 1.0}
return (
iap.Deterministic(mapping[position.split("-")[0]]),
iap.Deterministic(mapping[position.split("-")[1]])
)
if isinstance(position, iap.StochasticParameter):
return position
if isinstance(position, tuple):
assert len(position) == 2, (
"Expected tuple with two entries as position parameter. "
"Got %d entries with types %s.." % (
len(position), str([type(item) for item in position])))
for item in position:
if ia.is_single_number(item) and (item < 0 or item > 1.0):
raise Exception(
"Both position values must be within the value range "
"[0.0, 1.0]. Got type %s with value %.8f." % (
type(item), item,))
position = [iap.Deterministic(item)
if ia.is_single_number(item)
else item for item in position]
only_sparams = all([isinstance(item, iap.StochasticParameter)
for item in position])
assert only_sparams, (
"Expected tuple with two entries that are both either "
"StochasticParameter or float/int. Got types %s." % (
str([type(item) for item in position])
))
return tuple(position)
raise Exception(
"Expected one of the following as position parameter: string "
"'uniform', string 'normal', string 'center', a string matching "
"regex ^(left|center|right)-(top|center|bottom)$, a single "
"StochasticParameter or a tuple of two entries, both being either "
"StochasticParameter or floats or int. Got instead type %s with "
"content '%s'." % (
type(position),
(str(position)
if len(str(position)) < 20
else str(position)[0:20] + "...")
)
)
# TODO this is the same as in imgaug.py, make DRY
# Added in 0.4.0.
def _assert_two_or_three_dims(shape):
if hasattr(shape, "shape"):
shape = shape.shape
assert len(shape) in [2, 3], (
"Expected image with two or three dimensions, but got %d dimensions "
"and shape %s." % (len(shape), shape))
def pad(arr, top=0, right=0, bottom=0, left=0, mode="constant", cval=0):
"""Pad an image-like array on its top/right/bottom/left side.
This function is a wrapper around :func:`numpy.pad`.
Added in 0.4.0. (Previously named ``imgaug.imgaug.pad()``.)
**Supported dtypes**:
* ``uint8``: yes; fully tested (1)
* ``uint16``: yes; fully tested (1)
* ``uint32``: yes; fully tested (2) (3)
* ``uint64``: yes; fully tested (2) (3)
* ``int8``: yes; fully tested (1)
* ``int16``: yes; fully tested (1)
* ``int32``: yes; fully tested (1)
* ``int64``: yes; fully tested (2) (3)
* ``float16``: yes; fully tested (2) (3)
* ``float32``: yes; fully tested (1)
* ``float64``: yes; fully tested (1)
* ``float128``: yes; fully tested (2) (3)
* ``bool``: yes; tested (2) (3)
- (1) Uses ``cv2`` if `mode` is one of: ``"constant"``, ``"edge"``,
``"reflect"``, ``"symmetric"``. Otherwise uses ``numpy``.
- (2) Uses ``numpy``.
- (3) Rejected by ``cv2``.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pad.
top : int, optional
Amount of pixels to add to the top side of the image.
Must be ``0`` or greater.
right : int, optional
Amount of pixels to add to the right side of the image.
Must be ``0`` or greater.
bottom : int, optional
Amount of pixels to add to the bottom side of the image.
Must be ``0`` or greater.
left : int, optional
Amount of pixels to add to the left side of the image.
Must be ``0`` or greater.
mode : str, optional
Padding mode to use. See :func:`numpy.pad` for details.
In case of mode ``constant``, the parameter `cval` will be used as
the ``constant_values`` parameter to :func:`numpy.pad`.
In case of mode ``linear_ramp``, the parameter `cval` will be used as
the ``end_values`` parameter to :func:`numpy.pad`.
cval : number or iterable of number, optional
Value to use for padding if `mode` is ``constant``.
See :func:`numpy.pad` for details. The cval is expected to match the
input array's dtype and value range. If an iterable is used, it is
expected to contain one value per channel. The number of values
and number of channels are expected to match.
Returns
-------
(H',W') ndarray or (H',W',C) ndarray
Padded array with height ``H'=H+top+bottom`` and width
``W'=W+left+right``.
"""
import imgaug.dtypes as iadt
_assert_two_or_three_dims(arr)
assert all([v >= 0 for v in [top, right, bottom, left]]), (
"Expected padding amounts that are >=0, but got %d, %d, %d, %d "
"(top, right, bottom, left)" % (top, right, bottom, left))
is_multi_cval = ia.is_iterable(cval)
if top > 0 or right > 0 or bottom > 0 or left > 0:
min_value, _, max_value = iadt.get_value_range_of_dtype(arr.dtype)
# without the if here there are crashes for float128, e.g. if
# cval is an int (just using float(cval) seems to not be accurate
# enough)
if arr.dtype.name == "float128":
cval = np.float128(cval) # pylint: disable=no-member
if is_multi_cval:
cval = np.clip(cval, min_value, max_value)
else:
cval = max(min(cval, max_value), min_value)
# Note that copyMakeBorder() hangs/runs endlessly if arr has an
# axis of size 0 and mode is "reflect".
# Numpy also complains in these cases if mode is not "constant".
has_zero_sized_axis = any([axis == 0 for axis in arr.shape])
if has_zero_sized_axis:
mode = "constant"
mapping_mode_np_to_cv2 = {
"constant": cv2.BORDER_CONSTANT,
"edge": cv2.BORDER_REPLICATE,
"linear_ramp": None,
"maximum": None,
"mean": None,
"median": None,
"minimum": None,
"reflect": cv2.BORDER_REFLECT_101,
"symmetric": cv2.BORDER_REFLECT,
"wrap": None,
cv2.BORDER_CONSTANT: cv2.BORDER_CONSTANT,
cv2.BORDER_REPLICATE: cv2.BORDER_REPLICATE,
cv2.BORDER_REFLECT_101: cv2.BORDER_REFLECT_101,
cv2.BORDER_REFLECT: cv2.BORDER_REFLECT
}
bad_mode_cv2 = mapping_mode_np_to_cv2.get(mode, None) is None
# these datatypes all simply generate a "TypeError: src data type = X
# is not supported" error
bad_datatype_cv2 = (
arr.dtype.name
in ["uint32", "uint64", "int64", "float16", "float128", "bool"]
)
# OpenCV turns the channel axis for arrays with 0 channels to 512
# TODO add direct test for this. indirectly tested via Pad
bad_shape_cv2 = (arr.ndim == 3 and arr.shape[-1] == 0)
if not bad_datatype_cv2 and not bad_mode_cv2 and not bad_shape_cv2:
# convert cval to expected type, as otherwise we get TypeError
# for np inputs
kind = arr.dtype.kind
if is_multi_cval:
cval = [float(cval_c) if kind == "f" else int(cval_c)
for cval_c in cval]
else:
cval = float(cval) if kind == "f" else int(cval)
if arr.ndim == 2 or arr.shape[2] <= 4:
# without this, only the first channel is padded with the cval,
# all following channels with 0
if arr.ndim == 3 and not is_multi_cval:
cval = tuple([cval] * arr.shape[2])
arr_pad = cv2.copyMakeBorder(
_normalize_cv2_input_arr_(arr),
top=top, bottom=bottom, left=left, right=right,
borderType=mapping_mode_np_to_cv2[mode], value=cval)
if arr.ndim == 3 and arr_pad.ndim == 2:
arr_pad = arr_pad[..., np.newaxis]
else:
result = []
channel_start_idx = 0
cval = cval if is_multi_cval else tuple([cval] * arr.shape[2])
while channel_start_idx < arr.shape[2]:
arr_c = arr[..., channel_start_idx:channel_start_idx+4]
cval_c = cval[channel_start_idx:channel_start_idx+4]
arr_pad_c = cv2.copyMakeBorder(
_normalize_cv2_input_arr_(arr_c),
top=top, bottom=bottom, left=left, right=right,
borderType=mapping_mode_np_to_cv2[mode], value=cval_c)
arr_pad_c = np.atleast_3d(arr_pad_c)
result.append(arr_pad_c)
channel_start_idx += 4
arr_pad = np.concatenate(result, axis=2)
else:
# paddings for 2d case
paddings_np = [(top, bottom), (left, right)]
# add paddings for 3d case
if arr.ndim == 3:
paddings_np.append((0, 0))
if mode == "constant":
if arr.ndim > 2 and is_multi_cval:
arr_pad_chans = [
np.pad(arr[..., c], paddings_np[0:2], mode=mode,
constant_values=cval[c])
for c in np.arange(arr.shape[2])]
arr_pad = np.stack(arr_pad_chans, axis=-1)
else:
arr_pad = np.pad(arr, paddings_np, mode=mode,
constant_values=cval)
elif mode == "linear_ramp":
if arr.ndim > 2 and is_multi_cval:
arr_pad_chans = [
np.pad(arr[..., c], paddings_np[0:2], mode=mode,
end_values=cval[c])
for c in np.arange(arr.shape[2])]
arr_pad = np.stack(arr_pad_chans, axis=-1)
else:
arr_pad = np.pad(arr, paddings_np, mode=mode,
end_values=cval)
else:
arr_pad = np.pad(arr, paddings_np, mode=mode)
return arr_pad
return np.copy(arr)
def pad_to_aspect_ratio(arr, aspect_ratio, mode="constant", cval=0,
return_pad_amounts=False):
"""Pad an image array on its sides so that it matches a target aspect ratio.
See :func:`~imgaug.imgaug.compute_paddings_for_aspect_ratio` for an
explanation of how the required padding amounts are distributed per
image axis.
Added in 0.4.0. (Previously named ``imgaug.imgaug.pad_to_aspect_ratio()``.)
**Supported dtypes**:
See :func:`~imgaug.augmenters.size.pad`.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pad.
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. ``2.0`` denotes the
image having twice as much width as height.
mode : str, optional
Padding mode to use. See :func:`~imgaug.imgaug.pad` for details.
cval : number, optional
Value to use for padding if `mode` is ``constant``.
See :func:`numpy.pad` for details.
return_pad_amounts : bool, optional
If ``False``, then only the padded image will be returned. If
``True``, a ``tuple`` with two entries will be returned, where the
first entry is the padded image and the second entry are the amounts
by which each image side was padded. These amounts are again a
``tuple`` of the form ``(top, right, bottom, left)``, with each value
being an ``int``.
Returns
-------
(H',W') ndarray or (H',W',C) ndarray
Padded image as ``(H',W')`` or ``(H',W',C)`` ndarray, fulfilling the
given `aspect_ratio`.
tuple of int
Amounts by which the image was padded on each side, given as a
``tuple`` ``(top, right, bottom, left)``.
This ``tuple`` is only returned if `return_pad_amounts` was set to
``True``.
"""
pad_top, pad_right, pad_bottom, pad_left = \
compute_paddings_to_reach_aspect_ratio(arr, aspect_ratio)
arr_padded = pad(
arr,
top=pad_top,
right=pad_right,
bottom=pad_bottom,
left=pad_left,
mode=mode,
cval=cval
)
if return_pad_amounts:
return arr_padded, (pad_top, pad_right, pad_bottom, pad_left)
return arr_padded
def pad_to_multiples_of(arr, height_multiple, width_multiple, mode="constant",
cval=0, return_pad_amounts=False):
"""Pad an image array until its side lengths are multiples of given values.
See :func:`~imgaug.imgaug.compute_paddings_for_aspect_ratio` for an
explanation of how the required padding amounts are distributed per
image axis.
Added in 0.4.0. (Previously named ``imgaug.imgaug.pad_to_multiples_of()``.)
**Supported dtypes**:
See :func:`~imgaug.augmenters.size.pad`.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Image-like array to pad.
height_multiple : None or int
The desired multiple of the height. The computed padding amount will
reflect a padding that increases the y axis size until it is a multiple
of this value.
width_multiple : None or int
The desired multiple of the width. The computed padding amount will
reflect a padding that increases the x axis size until it is a multiple
of this value.
mode : str, optional
Padding mode to use. See :func:`~imgaug.imgaug.pad` for details.
cval : number, optional
Value to use for padding if `mode` is ``constant``.
See :func:`numpy.pad` for details.
return_pad_amounts : bool, optional
If ``False``, then only the padded image will be returned. If
``True``, a ``tuple`` with two entries will be returned, where the
first entry is the padded image and the second entry are the amounts
by which each image side was padded. These amounts are again a
``tuple`` of the form ``(top, right, bottom, left)``, with each value
being an integer.
Returns
-------
(H',W') ndarray or (H',W',C) ndarray
Padded image as ``(H',W')`` or ``(H',W',C)`` ndarray.
tuple of int
Amounts by which the image was padded on each side, given as a
``tuple`` ``(top, right, bottom, left)``.
This ``tuple`` is only returned if `return_pad_amounts` was set to
``True``.
"""
pad_top, pad_right, pad_bottom, pad_left = \
compute_paddings_to_reach_multiples_of(
arr, height_multiple, width_multiple)
arr_padded = pad(
arr,
top=pad_top,
right=pad_right,
bottom=pad_bottom,
left=pad_left,
mode=mode,
cval=cval
)
if return_pad_amounts:
return arr_padded, (pad_top, pad_right, pad_bottom, pad_left)
return arr_padded
def compute_paddings_to_reach_aspect_ratio(arr, aspect_ratio):
"""Compute pad amounts required to fulfill an aspect ratio.
"Pad amounts" here denotes the number of pixels that have to be added to
each side to fulfill the desired constraint.
The aspect ratio is given as ``ratio = width / height``.
Depending on which dimension is smaller (height or width), only the
corresponding sides (top/bottom or left/right) will be padded.
The axis-wise padding amounts are always distributed equally over the
sides of the respective axis (i.e. left and right, top and bottom). For
odd pixel amounts, one pixel will be left over after the equal
distribution and could be added to either side of the axis. This function
will always add such a left over pixel to the bottom (y-axis) or
right (x-axis) side.
Added in 0.4.0. (Previously named
``imgaug.imgaug.compute_paddings_to_reach_aspect_ratio()``.)
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray or tuple of int
Image-like array or shape tuple for which to compute pad amounts.
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. ``2.0`` denotes the
image having twice as much width as height.
Returns
-------
tuple of int
Required padding amounts to reach the target aspect ratio, given as a
``tuple`` of the form ``(top, right, bottom, left)``.
"""
_assert_two_or_three_dims(arr)
assert aspect_ratio > 0, (
"Expected to get an aspect ratio >0, got %.4f." % (aspect_ratio,))
pad_top = 0
pad_right = 0
pad_bottom = 0
pad_left = 0
shape = arr.shape if hasattr(arr, "shape") else arr
height, width = shape[0:2]
if height == 0:
height = 1
pad_bottom += 1
if width == 0:
width = 1
pad_right += 1
aspect_ratio_current = width / height
if aspect_ratio_current < aspect_ratio:
# image is more vertical than desired, width needs to be increased
diff = (aspect_ratio * height) - width
pad_right += int(np.ceil(diff / 2))
pad_left += int(np.floor(diff / 2))
elif aspect_ratio_current > aspect_ratio:
# image is more horizontal than desired, height needs to be increased
diff = ((1/aspect_ratio) * width) - height
pad_top += int(np.floor(diff / 2))
pad_bottom += int(np.ceil(diff / 2))
return pad_top, pad_right, pad_bottom, pad_left
def compute_croppings_to_reach_aspect_ratio(arr, aspect_ratio):
"""Compute crop amounts required to fulfill an aspect ratio.
"Crop amounts" here denotes the number of pixels that have to be removed
from each side to fulfill the desired constraint.
The aspect ratio is given as ``ratio = width / height``.
Depending on which dimension is smaller (height or width), only the
corresponding sides (top/bottom or left/right) will be cropped.
The axis-wise padding amounts are always distributed equally over the
sides of the respective axis (i.e. left and right, top and bottom). For
odd pixel amounts, one pixel will be left over after the equal
distribution and could be added to either side of the axis. This function
will always add such a left over pixel to the bottom (y-axis) or
right (x-axis) side.
If an aspect ratio cannot be reached exactly, this function will return
rather one pixel too few than one pixel too many.
Added in 0.4.0.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray or tuple of int
Image-like array or shape tuple for which to compute crop amounts.
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. ``2.0`` denotes the
image having twice as much width as height.
Returns
-------
tuple of int
Required cropping amounts to reach the target aspect ratio, given as a
``tuple`` of the form ``(top, right, bottom, left)``.
"""
_assert_two_or_three_dims(arr)
assert aspect_ratio > 0, (
"Expected to get an aspect ratio >0, got %.4f." % (aspect_ratio,))
shape = arr.shape if hasattr(arr, "shape") else arr
assert shape[0] > 0, (
"Expected to get an array with height >0, got shape %s." % (shape,))
height, width = shape[0:2]
aspect_ratio_current = width / height
top = 0
right = 0
bottom = 0
left = 0
if aspect_ratio_current < aspect_ratio:
# image is more vertical than desired, height needs to be reduced
# c = H - W/r
crop_amount = height - (width / aspect_ratio)
crop_amount = min(crop_amount, height - 1)
top = int( | np.floor(crop_amount / 2) | numpy.floor |
# ***************************************************************
# Copyright (c) 2020 Jittor. All Rights Reserved.
# Authors:
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>.
#
# This file is subject to the terms and conditions defined in
# file 'LICENSE.txt', which is part of this source code package.
# ***************************************************************
import jittor as jt
import numpy as np
import unittest
try:
import autograd.numpy as anp
from autograd import jacobian
has_autograd = True
except:
has_autograd = False
@unittest.skipIf(not has_autograd, "No autograd found.")
class TestCodeOp(unittest.TestCase):
def test_svd(self):
def check_svd(a):
u,s,v = anp.linalg.svd(a, full_matrices=0)
return u,s,v
def check_u(a):
u,s,v = anp.linalg.svd(a, full_matrices=0)
return u
def check_s(a):
u,s,v = anp.linalg.svd(a, full_matrices=0)
return s
def check_v(a):
u,s,v = anp.linalg.svd(a, full_matrices=0)
return v
for i in range(50):
#not for full-matrices!
a = jt.random((2,2,5,4))
c_a = anp.array(a.data)
u,s,v = jt.linalg.svd(a)
tu,ts,tv = check_svd(c_a)
assert np.allclose(tu,u.data)
assert np.allclose(ts,s.data)
assert np.allclose(tv,v.data)
ju = jt.grad(u,a)
js = jt.grad(s,a)
jv = jt.grad(v,a)
grad_u = jacobian(check_u)
gu = grad_u(c_a)
gu = np.sum(gu, 4)
gu = np.sum(gu, 4)
gu = np.sum(gu, 2)
gu = np.sum(gu, 2)
grad_s = jacobian(check_s)
gs = grad_s(c_a)
gs = np.sum(gs, 4)
gs = np.sum(gs, 2)
gs = np.sum(gs, 2)
grad_v = jacobian(check_v)
gv = grad_v(c_a)
gv = np.sum(gv, 4)
gv = np.sum(gv, 4)
gv = np.sum(gv, 2)
gv = np.sum(gv, 2)
try:
assert np.allclose(ju.data,gu,atol=1e-5)
except AssertionError:
print(ju.data)
print(gu)
try:
assert np.allclose(js.data,gs,atol=1e-5)
except AssertionError:
print(js.data)
print(gs)
try:
assert np.allclose(jv.data,gv,atol=1e-5)
except AssertionError:
print(jv.data)
print(gv)
def test_eigh(self):
def check_eigh(a,UPLO='L'):
w, v = anp.linalg.eigh(a,UPLO)
return w, v
def check_w(a,UPLO='L'):
w, v = anp.linalg.eigh(a,UPLO)
return w
def check_v(a,UPLO='L'):
w, v = anp.linalg.eigh(a,UPLO)
return v
for i in range(50):
a = jt.random((2,2,3,3))
c_a = a.data
w, v = jt.linalg.eigh(a)
tw, tv = check_eigh(c_a)
assert np.allclose(w.data,tw)
assert np.allclose(v.data,tv)
jw = jt.grad(w, a)
jv = jt.grad(v, a)
check_gw = jacobian(check_w)
check_gv = jacobian(check_v)
gw = check_gw(c_a)
gw = np.sum(gw,4)
gw = np.sum(gw,2)
gw = np.sum(gw,2)
assert np.allclose(gw,jw.data,rtol = 1,atol = 5e-8)
gv = check_gv(c_a)
gv = np.sum(gv,4)
gv = np.sum(gv,4)
gv = np.sum(gv,2)
gv = np.sum(gv,2)
assert np.allclose(gv,jv.data,rtol = 1,atol = 5e-8)
def test_pinv(self):
def check_pinv(a):
w = anp.linalg.pinv(a)
return w
for i in range(50):
x = jt.random((2,2,4,4))
c_a = x.data
mx = jt.linalg.pinv(x)
tx = check_pinv(c_a)
np.allclose(mx.data,tx)
jx = jt.grad(mx,x)
check_grad = jacobian(check_pinv)
gx = check_grad(c_a)
np.allclose(gx,jx.data)
def test_inv(self):
def check_inv(a):
w = anp.linalg.inv(a)
return w
for i in range(50):
tn = np.random.randn(4,4).astype('float32')*5
while np.allclose(np.linalg.det(tn),0):
tn = np.random.randn((4,4)).astype('float32')*5
x = jt.array(tn)
x = x.reindex([2,2,x.shape[0],x.shape[1]],["i2","i3"])
c_a = x.data
mx = jt.linalg.inv(x)
tx = check_inv(c_a)
np.allclose(mx.data,tx)
jx = jt.grad(mx,x)
check_grad = jacobian(check_inv)
gx = check_grad(c_a)
np.allclose(gx,jx.data)
def test_slogdet(self):
def check_ans(a):
s, w = anp.linalg.slogdet(a)
return s, w
def check_slogdet(a):
s, w = anp.linalg.slogdet(a)
return w
for i in range(50):
tn = np.random.randn(4,4).astype('float32')*10
while np.allclose(np.linalg.det(tn),0):
tn = np.random.randn((4,4)).astype('float32')*10
x = jt.array(tn)
x = x.reindex([2,2,x.shape[0],x.shape[1]],["i2","i3"])
s = list(x.shape)
det_s = s[:-2]
if len(det_s) == 0:
det_s.append(1)
sign, mx = jt.linalg.slogdet(x)
ts, ta = check_ans(x.data)
assert np.allclose(sign.data, ts)
assert np.allclose(mx.data, ta)
jx = jt.grad(mx,x)
check_sgrad = jacobian(check_slogdet)
gx = check_sgrad(x.data)
gx = np.sum(gx,2)
gx = np.sum(gx,2)
assert np.allclose(gx,jx.data)
def test_cholesky(self):
def check_cholesky(a):
L = anp.linalg.cholesky(a)
return L
for i in range(50):
x = jt.array(np.diag((np.random.rand(3) + 1) * 2))
x = x.reindex([2,2,x.shape[0],x.shape[1]],["i2","i3"])
tx = x.data
L = jt.linalg.cholesky(x)
tL = check_cholesky(tx)
assert np.allclose(tL,L.data)
jx = jt.grad(L,x)
check_grad = jacobian(check_cholesky)
gx = check_grad(tx)
gx = np.sum(gx, 0)
gx = np.sum(gx, 0)
gx = np.sum(gx, 0)
gx = np.sum(gx, 0)
assert np.allclose(jx.data,gx)
def test_solve(self):
def check_solve(a,b):
ans = anp.linalg.solve(a,b)
return ans
for i in range(50):
a = jt.random((2,2,3,3))
b = jt.random((2,2,3))
ans = jt.linalg.solve(a,b)
ta = check_solve(a.data,b.data)
assert np.allclose(ans.data, ta)
jx = jt.grad(ans, a)
check_sgrad = jacobian(check_solve)
gx = check_sgrad(a.data,b.data)
gx = np.sum(gx,0)
gx = np.sum(gx,0)
gx = np.sum(gx,0)
try:
assert np.allclose(gx, jx.data,rtol=1)
except AssertionError:
print(gx)
print(jx.data)
def test_det(self):
def check_det(a):
de = anp.linalg.det(a)
return de
for i in range(50):
tn = | np.random.randn(3, 3) | numpy.random.randn |
"""
Metrics for evaluating the performance.
"""
import pickle
import numpy as np
from preprocessor import Data
def type_confirm(data: Data) -> Data:
return data
def dcg_at_k(obj: Data, score, eval=True):
"""
Calculate the DCG score
:param obj: A instance of Data.
:param score: The predicted CVR score for evaluation.
:return: A list of DCG scores: [@2, @4, @6]
"""
score = np.array(score)
user, item, click, convert = obj.get_test_data()
user_num = | np.max(user) | numpy.max |
'''
This script is intended to compare results with Yanovich's paper
It is ran on the NCSLGR corpus, with FLS, FS and DS
'''
from models.data_utils import *
from models.model_utils import *
from models.train_model import *
from models.perf_utils import *
import math
import numpy as np
#from matplotlib import pyplot as plt
#plt.switch_backend('agg')
import pickle
import argparse
import time
import os.path
from os import path
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
v0 = tf.__version__[0]
if v0 == '2':
# For tensorflow 2, keras is included in tf
from tensorflow.keras.models import *
elif v0 == '1':
#For tensorflow 1.2.0
from keras.models import *
parser = argparse.ArgumentParser(description='Trains a Keras-TF model for the recognition of a unique type of annotation, on the DictaSign-LSF-v2 corpus')
#group = parser.add_mutually_exclusive_group()
#group.add_argument("-v", "--verbose", action="store_true")
#group.add_argument("-q", "--quiet", action="store_true")
# Output type
parser.add_argument('--outputsList',
type=str,
default=['fls', 'DS', 'PT', 'FBUOY'],
help='The outputs that the model is trained to recognize',
nargs='+')
parser.add_argument('--outputsWeightList',
type=int,
default=[1, 1, 1, 1],
help='The output weights in the loss',
nargs='+')
#parser.add_argument('--outputName', type=str, default='PT', help='The output type that the model is trained to recognize')
#parser.add_argument('--flsBinary', type=int, default=1, help='If the output is FLS, if seen as binary', choices=[0, 1])
#parser.add_argument('--flsKeep', type=int, default=[], help='If the output is FLS, list of FLS indices to consider', nargs='*')
parser.add_argument('--comment',
type=str,
default='',
help='A comment to describe this run')
# Training global setting
parser.add_argument('--videoSplitMode',
type=str,
default='auto',
choices=['manual', 'auto'],
help='Split mode for videos (auto or manually specified)')
parser.add_argument('--fractionValid',
type=float,
default=0.10,
help='Fraction of valid data wrt total (if auto split mode)')
parser.add_argument('--fractionTest',
type=float,
default=0.10,
help='Fraction of test data wrt total (if auto split mode)')
parser.add_argument('--signerIndependent',
type=int,
default=0,
choices=[0, 1],
help='Signer independent train/valid/test random shuffle')
parser.add_argument('--taskIndependent',
type=int,
default=0,
choices=[0, 1],
help='Task independent train/valid/test random shuffle')
parser.add_argument('--excludeTask9',
type=int,
default=0,
choices=[0, 1],
help='Whether to exclude task 9')
parser.add_argument('--tasksTrain',
type=int,
default=[],
choices=range(1,10),
help='Training task indices',
nargs='*')
parser.add_argument('--tasksValid',
type=int,
default=[],
choices=range(1,10),
help='Validation task indices',
nargs='*')
parser.add_argument('--tasksTest',
type=int,
default=[],
choices=range(1,10),
help='Test task indices',
nargs='*')
parser.add_argument('--signersTrain',
type=int,
default=[],
choices=range(0,16),
help='Training signer indices',
nargs='*')
parser.add_argument('--signersValid',
type=int,
default=[],
choices=range(0,16),
help='Validation signer indices',
nargs='*')
parser.add_argument('--signersTest',
type=int,
default=[],
choices=range(0,16),
help='Test signer indices',
nargs='*')
parser.add_argument('--idxTrainBypass',
type=int,
default=[],
choices=range(0,94),
help='If you really want to set video indices directly',
nargs='*')
parser.add_argument('--idxValidBypass',
type=int,
default=[],
choices=range(0,94),
help='If you really want to set video indices directly',
nargs='*')
parser.add_argument('--idxTestBypass',
type=int,
default=[],
choices=range(0,94),
help='If you really want to set video indices directly',
nargs='*')
parser.add_argument('--randSeed',
type=int,
default=17,
help='Random seed (numpy)')
parser.add_argument('--weightCorrection',
type=float,
default=0,
help='Correction for data imbalance (from 0 (no correction) to 1)')
# Fine parameters
parser.add_argument('--seqLength',
type=int,
default=100,
help='Length of sequences')
parser.add_argument('--batchSize',
type=int,
default=200,
help='Batch size')
parser.add_argument('--epochs',
type=int,
default=100,
help='Number of epochs')
parser.add_argument('--separation',
type=int,
default=0,
help='Separation between videos')
parser.add_argument('--dropout',
type=float,
default=0.5,
help='Dropout (0 to 1)')
parser.add_argument('--rnnNumber',
type=int,
default=1,
help='Number of RNN layers')
parser.add_argument('--rnnHiddenUnits',
type=int,
default=50,
help='Number of hidden units in RNN')
parser.add_argument('--mlpLayersNumber',
type=int,
default=0,
help='Number MLP layers after RNN')
parser.add_argument('--convolution',
type=int,
default=1,
help='Whether to use a conv. layer',
choices=[0, 1])
parser.add_argument('--convFilt',
type=int,
default=200,
help='Number of convolution kernels')
parser.add_argument('--convFiltSize',
type=int,
default=3,
help='Size of convolution kernels')
parser.add_argument('--learningRate',
type=float,
default=0.001,
help='Learning rate')
parser.add_argument('--optimizer',
type=str,
default='rms',
help='Training optimizer',
choices=['rms', 'ada', 'sgd'])
parser.add_argument('--earlyStopping',
type=int,
default=0,
help='Early stopping',
choices=[0, 1])
parser.add_argument('--redLrOnPlat',
type=int,
default=0,
help='Reduce l_rate on plateau',
choices=[0, 1])
parser.add_argument('--redLrMonitor',
type=str,
default='val_f1K',
help='Metric for l_rate reduction')
parser.add_argument('--redLrMonitorMode',
type=str,
default='max',
help='Mode for l_rate reduction',
choices=['min', 'max'])
parser.add_argument('--redLrPatience',
type=int,
default=10,
help='Patience before l_rate reduc')
parser.add_argument('--redLrFactor',
type=float,
default=0.5,
help='Factor for each l_rate reduc')
# save data and monitor best
parser.add_argument('--saveModel',
type=str,
default='all',
help='Whether to save only best model, or all, or none',
choices=['no', 'best', 'all'])
parser.add_argument('--saveBestMonitor',
type=str,
default='val_f1K',
help='What metric to decide best model')
parser.add_argument('--saveBestMonMode',
type=str,
default='max',
help='Mode to define best',
choices=['min', 'max'])
parser.add_argument('--saveGlobalresults',
type=str,
default='reports/corpora/DictaSign/recognitionMulti/global/globalMulti.dat',
help='Where to save global results')
parser.add_argument('--savePredictions',
type=str,
default='reports/corpora/DictaSign/recognitionMulti/predictions/',
help='Where to save predictions')
# Metrics
parser.add_argument('--stepWolf',
type=float,
default=0.1,
help='Step between Wolf metric eval points',
choices=['rms', 'ada', 'sgd'])
args = parser.parse_args()
# Random initilialization
np.random.seed(args.randSeed)
## PARAMETERS
corpus = 'DictaSign'
# Output type
#outputName = args.outputName#'PT'
#flsBinary = bool(args.flsBinary)#True
#flsKeep = args.flsKeep#[]
outputsList = args.outputsList
outputsWeightList = args.outputsWeightList
comment = args.comment#[]
# Training global setting
videoSplitMode = args.videoSplitMode
fractionValid = args.fractionValid
fractionTest = args.fractionTest
signerIndependent = bool(args.signerIndependent)#False
taskIndependent = bool(args.taskIndependent)
excludeTask9 = bool(args.excludeTask9)
tasksTrain = args.tasksTrain#[2,3,4,5,6,7,8]
tasksValid = args.tasksValid#[9]
tasksTest = args.tasksTest#[7] # session 7
signersTrain = args.signersTrain#[2,3,4,5,6,7,8]
signersValid = args.signersValid#[9]
signersTest = args.signersTest#[7] # session 7
idxTrainBypass = args.idxTrainBypass
idxValidBypass = args.idxValidBypass
idxTestBypass = args.idxTestBypass
weightCorrection = args.weightCorrection
# Fine parameters
seq_length = args.seqLength
batch_size = args.batchSize
epochs = args.epochs
separation = args.separation
dropout = args.dropout
rnn_number = args.rnnNumber
rnn_hidden_units = args.rnnHiddenUnits
mlp_layers_number = args.mlpLayersNumber
convolution = bool(args.convolution)
convFilt = args.convFilt
convFiltSize = args.convFiltSize
learning_rate = args.learningRate
optimizer = args.optimizer
earlyStopping = bool(args.earlyStopping)
reduceLrOnPlateau = bool(args.redLrOnPlat)#False
reduceLrMonitor = args.redLrMonitor
reduceLrMonitorMode = args.redLrMonitorMode
reduceLrPatience = args.redLrPatience
reduceLrFactor = args.redLrFactor
# save data and monitor best
save = args.saveModel
saveMonitor = args.saveBestMonitor
saveMonitorMode = args.saveBestMonMode
saveGlobalresults = args.saveGlobalresults
savePredictions = args.savePredictions
# Metrics
stepWolf = args.stepWolf#0.1
metrics = ['acc', f1K, precisionK, recallK]
metricsNames = ['acc', 'f1K', 'precisionK', 'recallK']
nOutputs = len(outputsList)
outputName = ''
outputNbList = []
outputCategories = []
outputAssembleList = []
for i in range(nOutputs-1):
outputName += outputsList[i]
outputName += '_'
outputCategories.append([1])
outputAssembleList.append([outputsList[i]])
outputNbList.append(2)
outputName += outputsList[nOutputs-1]
outputCategories.append([1])
outputAssembleList.append([outputsList[nOutputs-1]])
outputNbList.append(2)
timeString = str(round(time.time()/10))
saveBestName='recognitionMultiDictaSign_'+outputName+'_'+timeString
if path.exists(saveGlobalresults):
dataGlobal = pickle.load(open(saveGlobalresults, 'rb'))
else:
dataGlobal = {}
if outputName not in dataGlobal:
dataGlobal[outputName] = {}
dataGlobal[outputName][timeString] = {}
dataGlobal[outputName][timeString]['comment'] = comment
dataGlobal[outputName][timeString]['params'] = {}
#dataGlobal[outputName][timeString]['params']['flsBinary'] = flsBinary
#dataGlobal[outputName][timeString]['params']['flsKeep'] = flsKeep
dataGlobal[outputName][timeString]['params']['videoSplitMode'] = videoSplitMode
dataGlobal[outputName][timeString]['params']['fractionValid'] = fractionValid
dataGlobal[outputName][timeString]['params']['fractionTest'] = fractionTest
dataGlobal[outputName][timeString]['params']['signerIndependent'] = signerIndependent
dataGlobal[outputName][timeString]['params']['taskIndependent'] = taskIndependent
dataGlobal[outputName][timeString]['params']['excludeTask9'] = excludeTask9
dataGlobal[outputName][timeString]['params']['tasksTrain'] = tasksTrain
dataGlobal[outputName][timeString]['params']['tasksValid'] = tasksValid
dataGlobal[outputName][timeString]['params']['tasksTest'] = tasksTest
dataGlobal[outputName][timeString]['params']['signersTrain'] = signersTrain
dataGlobal[outputName][timeString]['params']['signersValid'] = signersValid
dataGlobal[outputName][timeString]['params']['signersTest'] = signersTest
dataGlobal[outputName][timeString]['params']['idxTrainBypass'] = idxTrainBypass
dataGlobal[outputName][timeString]['params']['idxValidBypass'] = idxValidBypass
dataGlobal[outputName][timeString]['params']['idxTestBypass'] = idxTestBypass
dataGlobal[outputName][timeString]['params']['weightCorrection'] = weightCorrection
dataGlobal[outputName][timeString]['params']['seq_length'] = seq_length
dataGlobal[outputName][timeString]['params']['batch_size'] = batch_size
dataGlobal[outputName][timeString]['params']['epochs'] = epochs
dataGlobal[outputName][timeString]['params']['separation'] = separation
dataGlobal[outputName][timeString]['params']['dropout'] = dropout
dataGlobal[outputName][timeString]['params']['rnn_number'] = rnn_number
dataGlobal[outputName][timeString]['params']['rnn_hidden_units'] = rnn_hidden_units
dataGlobal[outputName][timeString]['params']['mlp_layers_number'] = mlp_layers_number
dataGlobal[outputName][timeString]['params']['convolution'] = convolution
dataGlobal[outputName][timeString]['params']['convFilt'] = convFilt
dataGlobal[outputName][timeString]['params']['convFiltSize'] = convFiltSize
dataGlobal[outputName][timeString]['params']['learning_rate'] = learning_rate
dataGlobal[outputName][timeString]['params']['optimizer'] = optimizer
dataGlobal[outputName][timeString]['params']['earlyStopping'] = earlyStopping
dataGlobal[outputName][timeString]['params']['reduceLrOnPlateau'] = reduceLrOnPlateau
dataGlobal[outputName][timeString]['params']['reduceLrMonitor'] = reduceLrMonitor
dataGlobal[outputName][timeString]['params']['reduceLrMonitorMode'] = reduceLrMonitorMode
dataGlobal[outputName][timeString]['params']['reduceLrPatience'] = reduceLrPatience
dataGlobal[outputName][timeString]['params']['reduceLrFactor'] = reduceLrFactor
dataGlobal[outputName][timeString]['params']['save'] = save
dataGlobal[outputName][timeString]['params']['saveMonitor'] = saveMonitor
dataGlobal[outputName][timeString]['params']['saveMonitorMode'] = saveMonitorMode
dataGlobal[outputName][timeString]['params']['saveGlobalresults'] = saveGlobalresults
dataGlobal[outputName][timeString]['params']['savePredictions'] = savePredictions
dataGlobal[outputName][timeString]['params']['stepWolf'] = stepWolf
## GET VIDEO INDICES
if len(idxTrainBypass) + len(idxValidBypass) + len(idxTestBypass) > 0:
idxTrain = np.array(idxTrainBypass)
idxValid = np.array(idxValidBypass)
idxTest = np.array(idxTestBypass)
else:
idxTrain, idxValid, idxTest = getVideoIndicesSplitDictaSign(tasksTrain,
tasksValid,
tasksTest,
signersTrain,
signersValid,
signersTest,
signerIndependent,
taskIndependent,
excludeTask9,
videoSplitMode,
fractionValid,
fractionTest,
checkSplits=True,
checkSets=True)
#get_annotations_videos_categories('DictaSign',['PT', 'DS', 'fls'], [[1], [1], [1]], output_assemble=[['PT'], [ 'DS'], ['fls']], video_indices=np.arange(2),from_notebook=True)
features_train, annot_train = get_data_concatenated(corpus=corpus,
output_form='mixed',
output_names_final=outputsList,
output_categories_or_names_original=outputCategories,
output_assemble=outputAssembleList,
video_indices=idxTrain,
separation=separation)
features_valid, annot_valid = get_data_concatenated(corpus=corpus,
output_form='mixed',
output_names_final=outputsList,
output_categories_or_names_original=outputCategories,
output_assemble=outputAssembleList,
video_indices=idxValid,
separation=separation)
features_test, annot_test = get_data_concatenated(corpus=corpus,
output_form='mixed',
output_names_final=outputsList,
output_categories_or_names_original=outputCategories,
output_assemble=outputAssembleList,
video_indices=idxTest,
separation=separation)
classWeightFinal = []
#for i in range(nOutputs):
# nClasses = outputNbList[i]#annot_train.shape[2]
# classWeightsCorrected, _ = weightVectorImbalancedDataOneHot(annot_train[i][0, :, :])
# classWeightsNotCorrected = np.ones(nClasses)
# classWeightFinal.append(weightCorrection*classWeightsCorrected + (1-weightCorrection)*classWeightsNotCorrected)
model = get_model(outputsList,outputNbList,outputsWeightList,
dropout=dropout,
rnn_number=rnn_number,
rnn_hidden_units=rnn_hidden_units,
mlp_layers_number=mlp_layers_number,
conv=convolution,
conv_filt=convFilt,
conv_ker=convFiltSize,
time_steps=seq_length,
learning_rate=learning_rate,
optimizer=optimizer,
metrics=metrics)
history = train_model(model,
features_train,
annot_train,
features_valid,
annot_valid,
output_class_weights=classWeightFinal,
batch_size=batch_size,
epochs=epochs,
seq_length=seq_length,
save=save,
saveMonitor=saveMonitor,
saveMonitorMode=saveMonitorMode,
saveBestName=saveBestName,
reduceLrOnPlateau=reduceLrOnPlateau,
reduceLrMonitor=reduceLrMonitor,
reduceLrMonitorMode=reduceLrMonitorMode,
reduceLrPatience=reduceLrPatience,
reduceLrFactor=reduceLrFactor)
time_distributed_1_f1K = np.array(history['time_distributed_1_f1K'])
time_distributed_2_f1K = np.array(history['time_distributed_2_f1K'])
time_distributed_3_f1K = np.array(history['time_distributed_3_f1K'])
time_distributed_4_f1K = np.array(history['time_distributed_4_f1K'])
time_distributed_sum_f1K = time_distributed_1_f1K + time_distributed_2_f1K + time_distributed_3_f1K + time_distributed_4_f1K
print(time_distributed_sum_f1K.shape)
bestf1K_idx = | np.argmax(time_distributed_sum_f1K) | numpy.argmax |
# version adapted for rye grass april 4, 2006, <NAME> for fst modelling;
# model is based on lingra model in cgms
# for forage growth and production simulation which was written fortran
# (grsim.pfo) and later in c; application of model is the
# simulation of perennial ryegrass (l. perenne) growth under both
# potential and water-limited growth conditions.
# model is different from lingra model in cgms with respect to:
# 1) evaporation, transpiration, water balance, root depth growth
# and growth reduction by drought stress (tranrf) are derived from lingra
# model for thimothee (e.g. subroutine penman, evaptr and drunir)
# 2) running average to calculate soil temperature (soitmp) is derived from
# approach in lingra model for thimothee
# variables:
# biomass in leaves, reserves, etc. in kg dm / ha
# terms of water balance in mm/day
def lingrars(latitude, meteolist, plot):
"""
Function for Lingra_RS
:param latitude: required for daylength calculation
:param meteolist: input for weather and RS params
:param plot: False (process mode) or True (debug only)
:return:[tiller[-1], yielD[-1], wlvg[-1], wlvd1[-1], wa[-1], grass[-1], tracu[-1], evacu[-1]]
"""
import numpy as np
# GPU offload
# from numba import jit, cuda
# function optimized to run on gpu
# @jit(target="cuda")
# def func2(a):
# for i in range(10000000):
# a[i] += 1
# latitude (used to calculate daylength;
year = meteolist[0] # year in weather file
doy = meteolist[1] # doy of the year
rdd = meteolist[2] # solar radiation (kj m-2 day-1)
tmmn = meteolist[3] # minimum temperature (degrees celsius)
tmmx = meteolist[4] # maximum temperature (degrees celsius)
vp = meteolist[5] # water vapour pressure (kpa)
wn = meteolist[6] # average wind speed (m s-1)
rain = meteolist[7] # daily rainfall (mm day-1)
RSevp = meteolist[8] # daily RS Evaporation actual (mm day-1)
RStrn = meteolist[9] # daily RS Transpiration actual (mm day-1)
RSlai = meteolist[10] # daily RS LAI (-)
RScut = meteolist[11] # daily RS cutting event (0/1)
RSsms = meteolist[11] # daily RS soil moisture (cm3/cm3)
# print(year[0], doy[0], rdd[0], tmmn[0], tmmx[0], vp[0], wn[0], rain[0], RSevp[0], RStrn[0], RSlai[0], RScut[0])
# print(year[1], doy[1], rdd[1], tmmn[1], tmmx[1], vp[1], wn[1], rain[1], RSevp[1], RStrn[1], RSlai[1], RScut[1])
# print(year[2], doy[2], rdd[2], tmmn[2], tmmx[2], vp[2], wn[2], rain[2], RSevp[2], RStrn[2], RSlai[2], RScut[2])
# number of days in simulation (days)
fintim = len(year)
# initial
rootdi = 0.4
LAIi = 0.1
tilli = 7000.
wrei = 200.
wrti = 4.
# functions and parameters for grass
# parameters
co2a = 360. # atmospheric co2 concentration (ppm)
kdif = 0.60 #
LAIcr = 4. # critical LAI
luemax = 3.0 # light use efficiency (g dm mj-1 par intercepted)
sla = 0.0025 # specific leaf area (m2 g-1 dm)
cLAI = 0.8 # LAI after cutting (-)
nitmax = 3.34 # maximum nitrogen content (%)
nitr = 3.34 # actual nitrogen content (%)
rdrd = 0.01 # base death rate (fraction)
tmbas1 = 3. # base temperature perennial ryegrass (degrees celsius)
# parameters for water relations from lingra for thimothee
drate = 50. # drainage rate (mm day-1)
irrigf = 0. # irrigation (0 = no irrigation till 1 = full irrigation)
rootdm = 0.4 # maximum root depth (m)
rrdmax = 0.012 # maximum root growth (m day-1)
wcad = 0.005 # air dry water content (fraction)
wcwp = 0.12 # wilting point water content (fraction)
wcfc = 0.29 # field capacity water content (fraction)
wci = 0.29 # initial water content (fraction)
wcwet = 0.37 # minimum water content at water logging (fraction)
wcst = 0.41 # saturation water content (fraction)
pi = 3.1415927
rad = pi / 180.
# initial available water (mm)
wai = 1000. * rootdi * wci
# initial leaf weight is initialized as initial
# leaf area divided by initial specific leaf area, kg ha-1
wlvgi = LAIi / sla
# remaining leaf weight after cutting is initialized at remaining
# leaf area after cutting divided by initial specific leaf area, kg ha-1
cwlvg = cLAI / sla
# maximum site filling new buds (fsmax) decreases due
# to low nitrogen contents, van loo and schapendonk (1992)
# theoretical maximum tillering size = 0.693
fsmax = nitr / nitmax * 0.693
# 11. data
# specifying variables:
davtmp = np.mean(np.array([tmmn, tmmx]), axis=0)
dec = | np.zeros(fintim) | numpy.zeros |
# Copyright 2020 Google
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coverage: ignore
from typing import Callable, List, Optional, Tuple
from itertools import product
import time
import numpy as np
import scipy as sp
from scipy.optimize import OptimizeResult
from openfermion.transforms import get_interaction_operator
from openfermion.ops import FermionOperator, InteractionRDM
from joblib import Parallel, delayed # type: ignore
from recirq.hfvqe.objective import \
RestrictedHartreeFockObjective
from recirq.hfvqe.circuits import rhf_params_to_matrix
def get_one_body_fermion_operator(coeff_matrix): # testpragma: no cover
# coverage: ignore
operator = FermionOperator()
for i, j in product(range(coeff_matrix.shape[0]), repeat=2):
operator += coeff_matrix[i, j] * FermionOperator(((i, 1), (j, 0)))
return operator
def kdelta(i: int, j: int) -> float: # testpragma: no cover
# coverage: ignore
"""Delta function."""
return 1.0 if i == j else 0.0
def group_action(old_unitary: np.ndarray, new_parameters: np.ndarray,
occ: List[int],
virt: List[int]) -> np.ndarray: # testpragma: no cover
# coverage: ignore
"""U(e^{kappa}) * U(e^{kappa'}) = U(e^{kappa} * e^{kappa'})
Args:
old_unitary: unitary that we update--left multiply
new_parameters: parameters for new unitary
occ: list of occupied indices
virt: list of virtual indices
Returns:
Updated unitary
"""
kappa_new = rhf_params_to_matrix(new_parameters,
len(occ) + len(virt), occ, virt)
assert kappa_new.shape == (len(occ) + len(virt), len(occ) + len(virt))
return sp.linalg.expm(kappa_new) @ old_unitary
def non_redundant_rotation_generators(
rhf_objective: RestrictedHartreeFockObjective
) -> List[FermionOperator]: # testpragma: no cover
# coverage: ignore
"""Produce rotation generators for restricted Hartree-Fock.
Generates the fermionic representation of all non-redundant rotation
generators for restricted Hartree-Fock.
Args:
rhf_objective: recirq.hfvqe.RestrictedHartreeFock object
Returns:
List of fermionic generators.
"""
rotation_generators = []
for p in range(rhf_objective.nocc * rhf_objective.nvirt):
grad_params = np.zeros(rhf_objective.nocc * rhf_objective.nvirt)
grad_params[p] = 1
kappa_spatial_orbital = rhf_params_to_matrix(
grad_params,
len(rhf_objective.occ) + len(rhf_objective.virt), rhf_objective.occ,
rhf_objective.virt)
p0 = np.array([[1, 0], [0, 1]])
kappa_spin_orbital = np.kron(kappa_spatial_orbital, p0)
fermion_op = get_one_body_fermion_operator(kappa_spin_orbital)
rotation_generators.append(fermion_op)
return rotation_generators
def get_dvec_hmat(rotation_generators: List[FermionOperator],
rhf_objective: RestrictedHartreeFockObjective,
rdms: InteractionRDM,
diagonal_hessian=False
) -> (np.ndarray, np.ndarray): # testpragma: no cover
# coverage: ignore
"""Generate first and second terms of the BCH expansion.
Args:
rotation_generators: List FermionOperators corresponding to
non-redundant rotation generators
rhf_objective: recirq.hfvqe.RestrictedHartreeFockObject
rdms: openfermion.InteractionRDMs where the 2-RDM is generated
from the 1-RDM as of.wedge(opdm, opdm)
diagonal_hessian: Boolean indicator for what type of Hessian
construction should be used.
"""
dvec = np.zeros(len(rotation_generators), dtype=np.complex128)
hmat = np.zeros((len(rotation_generators), len(rotation_generators)),
dtype=np.complex128)
num_qubits = rhf_objective.num_qubits
kdelta_mat = | np.eye(rhf_objective.hamiltonian.one_body_tensor.shape[0]) | numpy.eye |
from copy import deepcopy
from itertools import product
import numpy as np
from PlanningCore.robot import Robot
from PlanningCore.core import (
coordinate_transformation,
constants as c,
get_common_tangent_angles,
get_line_formula,
get_vector_angles,
State,
)
def init_table(cue_ball_pos, balls_pos, robot_pos=(0, -2)):
cue_ball = Ball(
no=0,
color='white',
pos=coordinate_transformation(cue_ball_pos),
radius=c.ball_radius, is_cue=True,
)
balls = [Ball(
no=i + 1,
color='yellow',
pos=coordinate_transformation(pos),
radius=c.ball_radius,
) for i, pos in enumerate(balls_pos)]
balls.insert(0, cue_ball)
pockets = [Pocket(no=i, pos=(x, y), radius=c.pocket_radius)
for i, (x, y) in enumerate(product((0, c.table_width), (0, c.table_height / 2, c.table_height)))]
robot = Robot(pos=coordinate_transformation(robot_pos))
table = Table(width=c.table_width, height=c.table_height, balls=balls, pockets=pockets, robot=robot)
return table
class Ball(object):
def __init__(self, no, color, pos=(0, 0), radius=10, is_cue=False):
self.no = no
self.color = color
self.pos = pos
self.radius = radius
self.is_cue = is_cue
self.velocity = (0, 0, 0)
self.angular_velocity = (0, 0, 0)
self.force_angle = 0
self.state = State.stationary
def __repr__(self):
return (f'Ball(no={self.no}, pos=(x: {self.pos[0]:.2f}, y: {self.pos[1]:.2f}), '
f'state={State.all[self.state]}{", cue_ball" if self.is_cue else ""})')
def set_state(self, state):
self.state = state
def set_pos(self, pos):
self.pos = pos[:2]
def set_velocity(self, velocity):
self.velocity = velocity
def set_angular_velocity(self, angular_velocity):
self.angular_velocity = angular_velocity
def set_rvw(self, rvw):
r, v, w = rvw
self.set_pos(r)
self.set_velocity(v)
self.set_angular_velocity(w)
@property
def rvw(self):
return | np.array([[*self.pos, 0], self.velocity, self.angular_velocity]) | numpy.array |
"""
Authors: <NAME>
Code for comparing GP-UCB and Elimination algorithm on lenient and standard regret
"""
import os
import argparse
import numpy as np
from good_action.GPBO import GPBO
from good_action.utils import FUNC, ALGO
from good_action.functions import Syn_1
parser = argparse.ArgumentParser(description='Arguments of good action identification.')
parser.add_argument("--eps", type=float, default=0.9, help='good action threshold')
args = parser.parse_args()
def cal_regret(beta_func, epsilon):
func = Syn_1(noisy=True)
noiseless_func = Syn_1(noisy=False)
func_bounds=func.bounds
ucb_standard_cumu_arr = np.zeros((N_EXP, LEN))
ucb_indicator_cumu_arr = np.zeros((N_EXP, LEN))
ucb_gap_cumu_arr = np.zeros((N_EXP, LEN))
ucb_hinge_cumu_arr = np.zeros((N_EXP, LEN))
elim_standard_cumu_arr = np.zeros((N_EXP, LEN))
elim_indicator_cumu_arr = np.zeros((N_EXP, LEN))
elim_gap_cumu_arr = np.zeros((N_EXP, LEN))
elim_hinge_cumu_arr = np.zeros((N_EXP, LEN))
elim_s_list = []
ucb_s_list = []
meshgrid = np.array(np.meshgrid(np.linspace(-3, 3, 60), np.linspace(-3, 3, 60)))
Mt = meshgrid.reshape(2,-1).T
f_max = noiseless_func(Mt).max()
X_init = np.random.uniform(func_bounds[:, 0], func_bounds[:, 1], size=(N_INITS, func_bounds.shape[0]))
for i in range(N_EXP):
Bo_ucb=GPBO(func, func_bounds, 'gpucb', epsilon)
Bo_ucb.gp.noise_delta=0.000001
Bo_ucb.initiate(X_init)
Bo_ucb.Mt = np.vstack([Mt, Bo_ucb.X_S])
Bo_ucb.set_ls(0.1, 1)
Bo_ucb.beta_func = beta_func
Bo_elim=GPBO(func, func_bounds, 'elim', epsilon)
Bo_elim.gp.noise_delta=0.000001
Bo_elim.initiate(X_init)
Bo_elim.Mt = np.vstack([Mt, Bo_elim.X_S])
Bo_elim.set_ls(0.1, 1)
Bo_elim.beta_func = beta_func
for j in range(LEN):
x_ucb = Bo_ucb.sample_new_value_ucb()
y_ucb = np.array(noiseless_func(x_ucb.squeeze())).squeeze()
x_elim = Bo_elim.sample_new_value_elimination()
y_elim = np.array(noiseless_func(x_elim.squeeze())).squeeze()
ucb_standard_regret = f_max - y_ucb
ucb_indicator_regret = 1 if y_ucb < epsilon else 0
ucb_gap_regret = ucb_standard_regret if y_ucb < epsilon else 0
ucb_hinge_regret = (epsilon - y_ucb) if y_ucb < epsilon else 0
ucb_standard_cumu_arr[i, j] = ucb_standard_cumu_arr[i, j-1] + ucb_standard_regret
ucb_indicator_cumu_arr[i, j] = ucb_indicator_cumu_arr[i, j-1] + ucb_indicator_regret
ucb_gap_cumu_arr[i, j] = ucb_gap_cumu_arr[i, j-1] + ucb_gap_regret
ucb_hinge_cumu_arr[i, j] = ucb_hinge_cumu_arr[i, j-1] + ucb_hinge_regret
elim_standard_regret = f_max - y_elim
elim_indicator_regret = 1 if y_elim < epsilon else 0
elim_gap_regret = elim_standard_regret if y_elim < epsilon else 0
elim_hinge_regret = (epsilon - y_elim) if y_elim < epsilon else 0
elim_standard_cumu_arr[i, j] = elim_standard_cumu_arr[i, j-1] + elim_standard_regret
elim_indicator_cumu_arr[i, j] = elim_indicator_cumu_arr[i, j-1] + elim_indicator_regret
elim_gap_cumu_arr[i, j] = elim_gap_cumu_arr[i, j-1] + elim_gap_regret
elim_hinge_cumu_arr[i, j] = elim_hinge_cumu_arr[i, j-1] + elim_hinge_regret
print("Experiment %i iter %i : elim num %i, y_ucb %f, y_elim %f " % (i,j,len(Bo_elim.Mt),y_ucb,y_elim))
if elim_indicator_regret == 0:
elim_s_list.append(i)
if ucb_indicator_regret == 0:
ucb_s_list.append(i)
del Bo_ucb
del Bo_elim
print('elim success', elim_s_list)
print('ucb success', ucb_s_list)
return [ucb_standard_cumu_arr, ucb_indicator_cumu_arr, ucb_gap_cumu_arr, ucb_hinge_cumu_arr],\
[elim_standard_cumu_arr, elim_indicator_cumu_arr, elim_gap_cumu_arr, elim_hinge_cumu_arr]
if __name__ == '__main__':
MAXSTEP = 800
N_INITS = 2
N_EXP = 5
LEN = MAXSTEP - N_INITS
regrets_ucb, regrets_elim = cal_regret(lambda x : np.log(2*x)**1.5, float(args.eps))
regrets = {'ucb' : regrets_ucb,
'elim' : regrets_elim,
}
np.save('./syn1_lenient.npy', | np.asarray(regrets) | numpy.asarray |
import numpy as np
import py.test
import random
from weldnumpy import weldarray, erf as welderf
import scipy.special as ss
'''
TODO0: Decompose heavily repeated stuff, like the assert blocks and so on.
TODO: New tests:
- reduce ufuncs: at least the supported ones.
- use np.add.reduce syntax for the reduce ufuncs.
- getitem: lists and ndarrays + ints.
- error based tests: nan; underflow/overflow; unsupported types [true] * [...] etc;
- long computational graphs - that segfault or take too long; will require implicit evaluation
when the nested ops get too many.
- edge/failing cases: out = ndarray for op involving weldarrays.
- update elements of an array in a loop etc. --> setitem test.
- setitem + views tests.
'''
UNARY_OPS = [np.exp, np.log, np.sqrt]
# TODO: Add wa.erf - doesn't use the ufunc functionality of numpy so not doing it for
# now.
BINARY_OPS = [np.add, np.subtract, np.multiply, np.divide]
REDUCE_UFUNCS = [np.add.reduce, np.multiply.reduce]
# FIXME: weld mergers dont support non-commutative ops --> need to find a workaround for this.
# REDUCE_UFUNCS = [np.add.reduce, np.subtract.reduce, np.multiply.reduce, np.divide.reduce]
TYPES = ['float32', 'float64', 'int32', 'int64']
NUM_ELS = 10
# TODO: Create test with all other ufuncs.
def random_arrays(num, dtype):
'''
Generates random Weld array, and numpy array of the given num elements.
'''
# np.random does not support specifying dtype, so this is a weird
# way to support both float/int random numbers
test = np.zeros((num), dtype=dtype)
test[:] = np.random.randn(*test.shape)
test = np.abs(test)
# at least add 1 so no 0's (o.w. divide errors)
random_add = np.random.randint(1, high=10, size=test.shape)
test = test + random_add
test = test.astype(dtype)
np_test = np.copy(test)
w = weldarray(test, verbose=False)
return np_test, w
def given_arrays(l, dtype):
'''
@l: list.
returns a np array and a weldarray.
'''
test = np.array(l, dtype=dtype)
np_test = np.copy(test)
w = weldarray(test)
return np_test, w
def test_unary_elemwise():
'''
Tests all the unary ops in UNARY_OPS.
FIXME: For now, unary ops seem to only be supported on floats.
'''
for op in UNARY_OPS:
for dtype in TYPES:
# int still not supported for the unary ops in Weld.
if "int" in dtype:
continue
np_test, w = random_arrays(NUM_ELS, dtype)
w2 = op(w)
np_result = op(np_test)
w2_eval = w2.evaluate()
assert np.allclose(w2, np_result)
assert np.array_equal(w2_eval, np_result)
def test_binary_elemwise():
'''
'''
for op in BINARY_OPS:
for dtype in TYPES:
np_test, w = random_arrays(NUM_ELS, dtype)
np_test2, w2 = random_arrays(NUM_ELS, dtype)
w3 = op(w, w2)
weld_result = w3.evaluate()
np_result = op(np_test, np_test2)
# Need array equal to keep matching types for weldarray, otherwise
# allclose tries to subtract floats from ints.
assert np.array_equal(weld_result, np_result)
def test_multiple_array_creation():
'''
Minor edge case but it fails right now.
---would probably be fixed after we get rid of the loop fusion at the numpy
level.
'''
np_test, w = random_arrays(NUM_ELS, 'float32')
w = weldarray(w) # creating array again.
w2 = np.exp(w)
weld_result = w2.evaluate()
np_result = np.exp(np_test)
assert np.allclose(weld_result, np_result)
def test_array_indexing():
'''
Need to decide: If a weldarray item is accessed - should we evaluateuate the
whole array (for expected behaviour to match numpy) or not?
'''
pass
def test_numpy_operations():
'''
Test operations that aren't implemented yet - it should pass it on to
numpy's implementation, and return weldarrays.
'''
np_test, w = random_arrays(NUM_ELS, 'float32')
np_result = np.sin(np_test)
w2 = np.sin(w)
weld_result = w2.evaluate()
assert np.allclose(weld_result, np_result)
def test_type_conversion():
'''
After evaluating, the dtype of the returned array must be the same as
before.
'''
for t in TYPES:
_, w = random_arrays(NUM_ELS, t)
_, w2 = random_arrays(NUM_ELS, t)
w2 = np.add(w, w2)
weld_result = w2.evaluate()
assert weld_result.dtype == t
def test_concat():
'''
Test concatenation of arrays - either Weld - Weld, or Weld - Numpy etc.
'''
pass
def test_views_basic():
'''
Taking views into a 1d weldarray should return a weldarray view of the
correct data without any copying.
'''
n, w = random_arrays(NUM_ELS, 'float32')
w2 = w[2:5]
n2 = n[2:5]
assert isinstance(w2, weldarray)
def test_views_update_child():
'''
Updates both parents and child to put more strain.
'''
def asserts(w, n, w2, n2):
assert np.allclose(w[2:5], w2.evaluate())
assert np.allclose(w2.evaluate(), n2)
assert np.allclose(w, n)
NUM_ELS = 10
n, w = random_arrays(NUM_ELS, 'float32')
w2 = w[2:5]
n2 = n[2:5]
# unary part
w2 = np.exp(w2, out=w2)
n2 = np.exp(n2, out=n2)
asserts(w, n, w2, n2)
# binary part
n3, w3 = random_arrays(3, 'float32')
n2 = np.add(n2, n3, out=n2)
w2 = np.add(w2, w3, out=w2)
w2.evaluate()
asserts(w, n, w2, n2)
w2 += 5.0
n2 += 5.0
w2.evaluate()
asserts(w, n, w2, n2)
def test_views_update_parent():
'''
Create a view, then update the parent in place. The change should be
effected in the view-child as well.
'''
def asserts(w, n, w2, n2):
assert np.allclose(w[2:4], w2.evaluate())
assert np.allclose(w2.evaluate(), n2)
assert np.allclose(w, n)
n, w = random_arrays(NUM_ELS, 'float32')
w2 = w[2:4]
n2 = n[2:4]
w = np.exp(w, out=w)
n = np.exp(n, out=n)
w2.evaluate()
print(w2)
print(w[2:4])
# w2 should have been updated too.
asserts(w, n, w2, n2)
n3, w3 = random_arrays(NUM_ELS, 'float32')
w = np.add(w, w3, out=w)
n = | np.add(n, n3, out=n) | numpy.add |
from basic.types import TP_BAYES, Bow, Configuration, Tuple, Union, elemental
from basic.decorators import type_checker, document
import basic.docfunc as doc
from typing import Sequence
import numpy as np
from numpy import ndarray
import copy
def _rect_inter_inner(x1: ndarray, x2: ndarray) -> Tuple[ndarray]:
n1 = x1.shape[0]-1
n2 = x2.shape[0]-1
X1 = np.c_[x1[:-1], x1[1:]]
X2 = np.c_[x2[:-1], x2[1:]]
S1 = np.tile(X1.min(axis=1), (n2, 1)).T
S2 = np.tile(X2.max(axis=1), (n1, 1))
S3 = np.tile(X1.max(axis=1), (n2, 1)).T
S4 = np.tile(X2.min(axis=1), (n1, 1))
return S1, S2, S3, S4
def _rectangle_intersection_(x1: ndarray, y1: ndarray, x2: ndarray, y2: ndarray) -> Tuple[ndarray]:
S1, S2, S3, S4 = _rect_inter_inner(x1, x2)
S5, S6, S7, S8 = _rect_inter_inner(y1, y2)
C1 = np.less_equal(S1, S2)
C2 = np.greater_equal(S3, S4)
C3 = np.less_equal(S5, S6)
C4 = np.greater_equal(S7, S8)
ii, jj = np.nonzero(C1 & C2 & C3 & C4)
return ii, jj
def intersection(x1: ndarray, y1: ndarray, x2: ndarray, y2: ndarray) -> Tuple[float]:
"""function to obtain interaction point between two curves"""
x1 = np.asarray(x1)
x2 = | np.asarray(x2) | numpy.asarray |
# ******************************************************************************
# Copyright (c) 2020, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ******************************************************************************
import os
import pytest
import numpy as np
from numpy.testing import (assert_allclose, assert_array_equal)
from skipp.morphology import (erosion, dilation)
def test_dilate_erode_symmetry():
# Test eccentric structuring elements
# test dilate and erode symmetry
black_pixel = 255 * np.ones((4, 4), dtype=np.uint8)
black_pixel[1, 1] = 0
white_pixel = 255 - black_pixel
selem_list = []
# rectacle 2 x 2
# analog of skimage.morphology.selem.square(2)
# or .skimage.morphology.selem.rectangle(2, 2)
selem_list.append(np.array([[1, 1],
[1, 1]], dtype=np.uint8))
# rectacle 1 x 2
# analog of skimage.morphology.selem.rectangle(1, 2)
selem_list.append(np.array([[1, 1]], dtype=np.uint8))
# rectacle 2 x 1
# analog of skimage.morphology.selem.rectangle(2, 1)
selem_list.append(np.array([[1],
[1]], dtype=np.uint8))
for s in selem_list:
c = erosion(black_pixel, s)
d = dilation(white_pixel, s)
assert np.all(c == (255 - d))
@pytest.mark.parametrize("function", [pytest.param(dilation, id="dilation"),
pytest.param(erosion, id="erosion")])
def test_default_selem(function):
# selem_diamond
# the same as is scikit-images's
# skimage.morphology.selem.diamond(radius=1)
selem_diamond = np.array([[0, 1, 0],
[1, 1, 1],
[0, 1, 0]], dtype=np.uint8)
image = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], np.uint8)
im_expected = function(image, selem_diamond)
im_test = function(image)
assert_allclose(im_expected, im_test)
def test_float():
# float test images
im = np.array([[0.55, 0.72, 0.60, 0.54, 0.42],
[0.65, 0.44, 0.89, 0.96, 0.38],
[0.79, 0.53, 0.57, 0.93, 0.07],
[0.09, 0.02, 0.83, 0.78, 0.87],
[0.98, 0.80, 0.46, 0.78, 0.12]],
dtype=np.float32)
eroded = np.array([[0.55, 0.44, 0.54, 0.42, 0.38],
[0.44, 0.44, 0.44, 0.38, 0.07],
[0.09, 0.02, 0.53, 0.07, 0.07],
[0.02, 0.02, 0.02, 0.78, 0.07],
[0.09, 0.02, 0.46, 0.12, 0.12]],
dtype=np.float32)
dilated = np.array([[0.72, 0.72, 0.89, 0.96, 0.54],
[0.79, 0.89, 0.96, 0.96, 0.96],
[0.79, 0.79, 0.93, 0.96, 0.93],
[0.98, 0.83, 0.83, 0.93, 0.87],
[0.98, 0.98, 0.83, 0.78, 0.87]],
dtype=np.float32)
assert_allclose(erosion(im), eroded)
assert_allclose(dilation(im), dilated)
def test_uint16():
# given images match with given data on test suit
# `test_float`
# see: skimage.img_as_uint
# e.g. im16 = skimage.img_as_uint(im)
im16 = np.array([[36044, 47185, 39321, 35389, 27525],
[42598, 28835, 58326, 62914, 24903],
[51773, 34734, 37355, 60948, 4587],
[ 5898, 1311, 54394, 51117, 57015],
[64224, 52428, 30146, 51117, 7864]],
dtype=np.uint16)
eroded16 = np.array([[36044, 28835, 35389, 27525, 24903],
[28835, 28835, 28835, 24903, 4587],
[ 5898, 1311, 34734, 4587, 4587],
[ 1311, 1311, 1311, 51117, 4587],
[ 5898, 1311, 30146, 7864, 7864]],
dtype=np.uint16)
dilated16 = np.array([[47185, 47185, 58326, 62914, 35389],
[51773, 58326, 62914, 62914, 62914],
[51773, 51773, 60948, 62914, 60948],
[64224, 54394, 54394, 60948, 57015],
[64224, 64224, 54394, 51117, 57015]],
dtype=np.uint16)
assert_allclose(erosion(im16), eroded16)
assert_allclose(dilation(im16), dilated16)
@pytest.mark.skip(reason="needs __get_output implementation")
def test_discontiguous_out_array():
image = np.array([[5, 6, 2],
[7, 2, 2],
[3, 5, 1]], np.uint8)
out_array_big = np.zeros((5, 5), np.uint8)
out_array = out_array_big[::2, ::2]
expected_dilation = np.array([[7, 0, 6, 0, 6],
[0, 0, 0, 0, 0],
[7, 0, 7, 0, 2],
[0, 0, 0, 0, 0],
[7, 0, 5, 0, 5]], np.uint8)
expected_erosion = np.array([[5, 0, 2, 0, 2],
[0, 0, 0, 0, 0],
[2, 0, 2, 0, 1],
[0, 0, 0, 0, 0],
[3, 0, 1, 0, 1]], np.uint8)
dilation(image, out=out_array)
assert_array_equal(out_array_big, expected_dilation)
erosion(image, out=out_array)
assert_array_equal(out_array_big, expected_erosion)
def test_1d_erosion():
image = np.array([1, 2, 3, 2, 1], dtype=np.uint8)
expected = | np.array([1, 1, 2, 1, 1], dtype=np.uint8) | numpy.array |
#!/usr/bin/env python3
import click
import os
import random
import numpy as np
import torch
from torch import nn
from emtgan.common import *
from emtgan.datasets import *
from emtgan.models import *
from emtgan.utils import *
random.seed(1234)
np.random.seed(1234)
# set hyperparameters
discriminator_lr = 0.001
generator_lr = 0.001
num_epochs = 200
ensembles = 10
weight_decay = 0
betas = (
0.5,
0.999
)
lambda_adv = 1
lambda_cycle = 10
lambda_ident = 5
lambda_comp = 1e-4
CC0 = False
variant = ''
if CC0:
lambda_cycle = 0
variant = 'CC0'
enable_scheduling = True
def model_error(G, x, y):
input_branch_1, input_branch_2 = np.split(x, 2, 1)
input_1 = np2torch(input_branch_1)
input_2 = np2torch(input_branch_2)
op_branch_1 = G(input_1)
op_branch_2 = G(input_2)
op_branch_1 = torch2np(torch.cat([input_1[:,:2], op_branch_1], 1))
op_branch_2 = torch2np(torch.cat([input_2[:,:2], op_branch_2], 1))
y_1, y_2 = np.split(y, 2, 1)
dcap = np.linalg.norm(y_1 - y_2, axis=1)
d = np.linalg.norm((unnormalize(op_branch_1) - unnormalize(op_branch_2))[:,:3], axis=1)
return d - dcap
def model_MSE(G, x, y):
d_err = model_error(G, x, y)
err = d_err
return np.sum(np.square(err)) / x.shape[0]
def train_iteration(epoch, iteration, D_cl, opt_D_cl, D_lc, opt_D_lc, G_cl, G_lc, opt_G, Xlab, Xcarm, ycarm):
real, fake = make_labels_hard(Xlab.size(0))
lab_1, lab_2 = torch.split(Xlab, len(input_features), 1)
carm_1, carm_2 = torch.split(Xcarm, len(input_features), 1)
### train generators ###
opt_G.zero_grad()
fake_lab_1 = torch.cat([carm_1[:,:2], G_cl(carm_1)], 1)
fake_lab_2 = torch.cat([carm_2[:,:2], G_cl(carm_2)], 1)
fake_carm_1 = torch.cat([lab_1[:,:2], G_lc(lab_1)], 1)
fake_carm_2 = torch.cat([lab_2[:,:2], G_lc(lab_2)], 1)
## adversarial loss ##
# how well can G fool D?
loss_D_cl_adv = bceloss(D_cl(torch.cat([fake_lab_1, fake_lab_2], 1)), real)
loss_D_lc_adv = bceloss(D_lc(torch.cat([fake_carm_1, fake_carm_2], 1)), real)
loss_adv = (loss_D_cl_adv + loss_D_lc_adv) / 2
## cycle loss ##
# enforce cycle consistency
recov_lab = torch.cat([fake_carm_1[:,:2], G_cl(fake_carm_1)], 1)
recov_carm = torch.cat([fake_lab_1[:,:2], G_lc(fake_lab_1)], 1)
loss_recov_lab = mse(recov_lab, lab_1)
loss_recov_carm = mse(recov_carm, carm_1)
loss_cycle = (loss_recov_lab + loss_recov_carm) / 2
## identity loss ##
loss_ident_lab = mse(lab_1, torch.cat([lab_1[:,:2], G_cl(lab_1)], 1))
loss_ident_carm = mse(carm_1, torch.cat([carm_1[:,:2], G_lc(carm_1)], 1))
loss_ident = (loss_ident_lab + loss_ident_carm) / 2
d_fake = torch.norm(tensor_unnormalize(fake_lab_1)[:,:3] - tensor_unnormalize(fake_lab_2)[:,:3], 2, 1)
y_1, y_2 = torch.split(ycarm, 3, 1)
d_real = torch.norm(y_1 - y_2, 2, 1)
loss_comp = mse(d_fake, d_real)
## total loss for both generators ##
loss_G = lambda_adv * loss_adv + lambda_cycle * loss_cycle + lambda_ident * loss_ident + lambda_comp * loss_comp
torch.nn.utils.clip_grad_norm_(G_lc.parameters(), 1.0)
torch.nn.utils.clip_grad_norm_(G_cl.parameters(), 1.0)
loss_G.backward()
opt_G.step()
real, fake = make_labels_soft(Xlab.size(0))
### train discriminators
## D_cl
opt_D_cl.zero_grad()
fake_lab_1 = torch.cat([carm_1[:,:2], G_cl(carm_1)], 1)
fake_lab_2 = torch.cat([carm_2[:,:2], G_cl(carm_2)], 1)
loss_real = bceloss(D_cl(Xlab), real) + bceloss(D_cl(Xcarm), fake)
loss_fake = bceloss(D_cl(torch.cat([fake_lab_1, fake_lab_2], 1)), fake)
loss_D_cl = (loss_real + loss_fake) / 3
torch.nn.utils.clip_grad_norm_(D_cl.parameters(), 1.0)
loss_D_cl.backward()
opt_D_cl.step()
## D_lc
opt_D_lc.zero_grad()
fake_carm_1 = torch.cat([lab_1[:,:2], G_lc(lab_1)], 1)
fake_carm_2 = torch.cat([lab_2[:,:2], G_lc(lab_2)], 1)
loss_real = bceloss(D_lc(Xcarm), real) + bceloss(D_lc(Xlab), fake)
loss_fake = bceloss(D_lc(torch.cat([fake_carm_1, fake_carm_2], 1)), fake)
loss_D_lc = (loss_real + loss_fake) / 3
torch.nn.utils.clip_grad_norm_(D_lc.parameters(), 1.0)
loss_D_lc.backward()
opt_D_lc.step()
return dict(
discriminator_CL=loss_D_cl,
discriminator_LC=loss_D_lc,
cycle=lambda_cycle * loss_cycle,
adversarial=lambda_adv * loss_adv,
ident=lambda_ident * loss_ident,
comp=lambda_comp * loss_comp,
generator=loss_G
)
def train_model():
val_losses = np.array([])
min_val_loss_total = np.inf
num_iterations = min(len(lab_dataloader), len(carm_dataloader))
for model_num in range(ensembles):
#### Discriminators ####
## D for c-arm --> lab conversion
D_cl = CycleGANDiscriminatorNetwork().to(cuda)
initialize_weights_normal(D_cl)
opt_D_cl = optim.Adam(D_cl.parameters(), lr=discriminator_lr, betas=betas)
## D for lab --> c-arm conversion
D_lc = CycleGANDiscriminatorNetwork().to(cuda)
initialize_weights_normal(D_lc)
opt_D_lc = optim.Adam(D_lc.parameters(), lr=discriminator_lr, betas=betas)
#### Generators ####
## G for c-arm --> lab conversion
G_cl = CycleGANGeneratorNetwork().to(cuda)
initialize_weights_normal(G_cl)
## G for lab --> c-arm conversion
G_lc = CycleGANGeneratorNetwork().to(cuda)
initialize_weights_normal(G_lc)
opt_G = optim.Adam(chain(G_lc.parameters(), G_cl.parameters()), lr=generator_lr, betas=betas)
min_val_loss = np.inf
min_val_index = 0
hist_epoch = np.array([])
hist_train_losses = {}
hist_val_loss = np.array([])
if enable_scheduling:
sched_G = optim.lr_scheduler.LambdaLR(opt_G, lr_lambda=DecayLambda(num_epochs, 0, num_epochs // 2).step)
sched_D_cl = optim.lr_scheduler.LambdaLR(opt_D_cl, lr_lambda=DecayLambda(num_epochs, 0, num_epochs // 2).step)
sched_D_lc = optim.lr_scheduler.LambdaLR(opt_D_lc, lr_lambda=DecayLambda(num_epochs, 0, num_epochs // 2).step)
## adversarial training
for epoch in range(num_epochs):
train_losses = {}
for iteration in range(num_iterations):
lab_batch = next(iter(lab_dataloader))
carm_batch = next(iter(carm_dataloader))
Xlab = lab_batch['x'].float().to(cuda)
Xcarm = carm_batch['x'].float().to(cuda)
ycarm = carm_batch['gt'].float().to(cuda)
losses = train_iteration(
epoch,
iteration,
D_cl, opt_D_cl,
D_lc, opt_D_lc,
G_cl, G_lc, opt_G,
Xlab, Xcarm,
ycarm
)
for key, value in losses.items():
if key not in train_losses:
train_losses[key] = | np.array([]) | numpy.array |
from assay import assert_raises
from numpy import abs, arange, sqrt
from skyfield import constants
from skyfield.api import Distance, load, wgs84, wms
from skyfield.functions import length_of
from skyfield.positionlib import Apparent, Barycentric
from skyfield.toposlib import ITRSPosition, iers2010
angle = (-15, 15, 35, 45)
def ts():
yield load.timescale()
def test_latitude_longitude_elevation_str_and_repr():
w = wgs84.latlon(36.7138, -112.2169, 2400.0)
assert str(w) == ('WGS84 latitude +36.7138 N'
' longitude -112.2169 E elevation 2400.0 m')
assert repr(w) == ('<GeographicPosition WGS84 latitude +36.7138 N'
' longitude -112.2169 E elevation 2400.0 m>')
w = wgs84.latlon([1.0, 2.0], [3.0, 4.0], [5.0, 6.0])
assert str(w) == (
'WGS84 latitude [+1.0000 +2.0000] N'
' longitude [3.0000 4.0000] E'
' elevation [5.0 6.0] m'
)
assert repr(w) == '<GeographicPosition {0}>'.format(w)
w = wgs84.latlon(arange(6.0), arange(10.0, 16.0), arange(20.0, 26.0))
assert str(w) == (
'WGS84 latitude [+0.0000 +1.0000 ... +4.0000 +5.0000] N'
' longitude [10.0000 11.0000 ... 14.0000 15.0000] E'
' elevation [20.0 21.0 ... 24.0 25.0] m'
)
assert repr(w) == '<GeographicPosition {0}>'.format(w)
def test_raw_itrs_position():
d = Distance(au=[1, 2, 3])
p = ITRSPosition(d)
ts = load.timescale()
t = ts.utc(2020, 12, 16, 12, 59)
p.at(t)
def test_wgs84_velocity_matches_actual_motion():
# It looks like this is a sweet spot for accuracy: presumably a
# short enough fraction of a second that the vector does not time to
# change direction much, but long enough that the direction does not
# get lost down in the noise.
factor = 300.0
ts = load.timescale()
t = ts.utc(2019, 11, 2, 3, 53, [0, 1.0 / factor])
jacob = wgs84.latlon(36.7138, -112.2169)
p = jacob.at(t)
velocity1 = p.position.km[:,1] - p.position.km[:,0]
velocity2 = p.velocity.km_per_s[:,0]
assert length_of(velocity2 - factor * velocity1) < 0.0007
def test_lst():
ts = load.timescale()
ts.delta_t_table = [-1e99, 1e99], [69.363285] * 2 # from finals2000A.all
t = ts.utc(2020, 11, 27, 15, 34)
top = wgs84.latlon(0.0, 0.0)
expected = 20.0336663100 # see "authorities/horizons-lst"
actual = top.lst_hours_at(t)
difference_mas = (actual - expected) * 3600 * 15 * 1e3
horizons_ra_offset_mas = 51.25
difference_mas -= horizons_ra_offset_mas
assert abs(difference_mas) < 1.0
def test_itrs_xyz_attribute_and_itrf_xyz_method():
top = wgs84.latlon(45.0, 0.0, elevation_m=constants.AU_M - constants.ERAD)
x, y, z = top.itrs_xyz.au
assert abs(x - sqrt(0.5)) < 2e-7
assert abs(y - 0.0) < 1e-14
assert abs(z - sqrt(0.5)) < 2e-7
ts = load.timescale()
t = ts.utc(2019, 11, 2, 3, 53)
x, y, z = top.at(t).itrf_xyz().au
assert abs(x - sqrt(0.5)) < 1e-4
assert abs(y - 0.0) < 1e-14
assert abs(z - | sqrt(0.5) | numpy.sqrt |
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensorflow_transform.tf_utils."""
import os
import numpy as np
from packaging import version
import tensorflow as tf
from tensorflow_transform import analyzers
from tensorflow_transform import annotators
from tensorflow_transform import tf_utils
from tensorflow_transform import test_case
import unittest
from tensorflow.python.framework import composite_tensor # pylint: disable=g-direct-tensorflow-import
_CONSTRUCT_TABLE_PARAMETERS = [
dict(testcase_name='_string', asset_path_input_fn=lambda x: x),
dict(testcase_name='_string_tensor', asset_path_input_fn=tf.constant),
]
def _construct_table(asset_file_path,
key_dtype=tf.string,
key_index=0,
value_dtype=tf.int64,
value_index=1,
default_value=-1):
initializer = tf.lookup.TextFileInitializer(
asset_file_path,
key_dtype=key_dtype,
key_index=key_index,
value_dtype=value_dtype,
value_index=value_index)
return tf.lookup.StaticHashTable(initializer, default_value=default_value)
def _value_to_tensor(value):
if isinstance(value, tf.compat.v1.SparseTensorValue):
return tf.compat.v1.convert_to_tensor_or_sparse_tensor(value)
elif isinstance(value, tf.compat.v1.ragged.RaggedTensorValue):
return tf.ragged.constant(value.to_list())
else:
return tf.constant(value)
class _SparseTensorSpec:
def __init__(self, shape, dtype):
self._shape = shape
self._dtype = dtype
if not hasattr(tf, 'SparseTensorSpec'):
tf.SparseTensorSpec = _SparseTensorSpec
class TFUtilsTest(test_case.TransformTestCase):
def _assertCompositeRefEqual(self, left, right):
"""Asserts that a two `tf_util._CompositeTensorRef`s are equal."""
self.assertEqual(left.type_spec, right.type_spec)
self.assertAllEqual(left.list_of_refs, right.list_of_refs)
def test_copy_tensors_produces_different_tensors(self):
with tf.compat.v1.Graph().as_default():
tensors = {
'dense':
tf.compat.v1.placeholder(
tf.int64, (None,), name='my_dense_input'),
'sparse':
tf.compat.v1.sparse_placeholder(tf.int64, name='my_sparse_input'),
'ragged':
tf.compat.v1.ragged.placeholder(
tf.int64, ragged_rank=2, name='my_ragged_input')
}
copied_tensors = tf_utils.copy_tensors(tensors)
self.assertNotEqual(tensors['dense'], copied_tensors['dense'])
self.assertNotEqual(tensors['sparse'].indices,
copied_tensors['sparse'].indices)
self.assertNotEqual(tensors['sparse'].values,
copied_tensors['sparse'].values)
self.assertNotEqual(tensors['sparse'].dense_shape,
copied_tensors['sparse'].dense_shape)
self.assertNotEqual(tensors['ragged'].values,
copied_tensors['ragged'].values)
self.assertNotEqual(tensors['ragged'].row_splits,
copied_tensors['ragged'].row_splits)
def test_copy_tensors_produces_equivalent_tensors(self):
with tf.compat.v1.Graph().as_default():
tensors = {
'dense':
tf.compat.v1.placeholder(
tf.int64, (None,), name='my_dense_input'),
'sparse':
tf.compat.v1.sparse_placeholder(tf.int64, name='my_sparse_input'),
'ragged':
tf.compat.v1.ragged.placeholder(
tf.int64, ragged_rank=1, name='my_ragged_input')
}
copied_tensors = tf_utils.copy_tensors(tensors)
with tf.compat.v1.Session() as session:
dense_value = [1, 2]
sparse_value = tf.compat.v1.SparseTensorValue(
indices=[[0, 0], [0, 2], [1, 1]],
values=[3, 4, 5],
dense_shape=[2, 3])
ragged_value = tf.compat.v1.ragged.RaggedTensorValue(
values=np.array([3, 4, 5], dtype=np.int64),
row_splits=np.array([0, 2, 3], dtype=np.int64))
sample_tensors = session.run(
copied_tensors,
feed_dict={
tensors['dense']: dense_value,
tensors['sparse']: sparse_value,
tensors['ragged']: ragged_value
})
self.assertAllEqual(sample_tensors['dense'], dense_value)
self.assertAllEqual(sample_tensors['sparse'].indices,
sparse_value.indices)
self.assertAllEqual(sample_tensors['sparse'].values,
sparse_value.values)
self.assertAllEqual(sample_tensors['sparse'].dense_shape,
sparse_value.dense_shape)
self.assertAllEqual(sample_tensors['ragged'].values,
ragged_value.values)
self.assertAllEqual(sample_tensors['ragged'].row_splits,
ragged_value.row_splits)
@test_case.named_parameters(
test_case.cross_with_function_handlers([
dict(
testcase_name='2d',
tensor=tf.compat.v1.ragged.RaggedTensorValue(
values=np.array([1.2, 1., 1.2, 1.]),
row_splits=np.array([0, 2, 4])),
rowids=[0, 0, 1, 1],
tensor_spec=tf.RaggedTensorSpec([None, None], tf.float32)),
dict(
testcase_name='3d',
tensor=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=np.array([1.2, 1., 1.2, 1.]),
row_splits=np.array([0, 3, 4])),
row_splits=np.array([0, 1, 1, 2])),
rowids=[0, 0, 0, 2],
tensor_spec=tf.RaggedTensorSpec([None, None, None], tf.float32)),
]))
def test_get_ragged_batch_value_rowids(self, tensor, rowids, tensor_spec,
function_handler):
@function_handler(input_signature=[tensor_spec])
def get_ragged_batch_value_rowids(tensor):
return tf_utils._get_ragged_batch_value_rowids(tensor)
self.assertAllEqual(get_ragged_batch_value_rowids(tensor), rowids)
@test_case.named_parameters(
test_case.cross_with_function_handlers([
dict(
testcase_name='rank1',
x=['a', 'b', 'a'],
x_spec=tf.TensorSpec(None, tf.string),
weights=[1, 1, 2],
filter_regex=None,
expected_unique_x=[b'a', b'b'],
expected_summed_weights_per_x=[3, 1]),
dict(
testcase_name='rank2',
x=[['a', 'b\n', 'a'], ['b\n', 'a', 'b\n']],
x_spec=tf.TensorSpec(None, tf.string),
weights=[[1, 2, 1], [1, 2, 2]],
filter_regex=None,
expected_unique_x=[b'a', b'b\n'],
expected_summed_weights_per_x=[4, 5]),
dict(
testcase_name='rank3',
x=[[['a', 'b', 'a'], ['b', 'a', 'b']],
[['a', 'b', 'a'], ['b', 'a', 'b']]],
x_spec=tf.TensorSpec(None, tf.string),
weights=[[[1, 1, 2], [1, 2, 1]], [[1, 2, 1], [1, 2, 1]]],
filter_regex=None,
expected_unique_x=[b'a', b'b'],
expected_summed_weights_per_x=[9, 7]),
dict(
testcase_name='sparse',
x=tf.compat.v1.SparseTensorValue(
indices=[[0, 0], [0, 1], [2, 1]],
values=['a', 'a', 'b'],
dense_shape=[4, 2]),
x_spec=tf.SparseTensorSpec([4, 2], tf.string),
weights=[2, 3, 4],
filter_regex=None,
expected_unique_x=[b'a', b'b'],
expected_summed_weights_per_x=[5, 4]),
dict(
testcase_name='ragged',
x=tf.compat.v1.ragged.RaggedTensorValue( # pylint: disable=g-long-lambda
values=tf.compat.v1.ragged.RaggedTensorValue(
values=np.array(['a', 'b', 'b', 'a']),
row_splits=np.array([0, 2, 4])),
row_splits=np.array([0, 2])),
x_spec=tf.RaggedTensorSpec([None, None, None], tf.string),
weights=[2, 3, 4, 6],
filter_regex=None,
expected_unique_x=[b'a', b'b'],
expected_summed_weights_per_x=[8, 7]),
dict(
testcase_name='regex_filtering',
x=[['a\n', '', '\n\r'], ['\r', 'a', 'b']],
x_spec=tf.TensorSpec(None, tf.string),
weights=[[1, 2, 1], [1, 2, 2]],
filter_regex=analyzers._EMPTY_STRING_OR_NEWLINE_CHARS_REGEX,
expected_unique_x=[b'a', b'b'],
expected_summed_weights_per_x=[2, 2]),
dict(
testcase_name='regex_filtering_invalid_utf8',
x=[[b'\xe1\n', b'\xa9', b'\n\xb8\r'],
[b'\xe8\r', b'\xc6', b'\n\xb3']],
x_spec=tf.TensorSpec(None, tf.string),
weights=[[1, 3, 1], [1, 4, 2]],
filter_regex=analyzers._EMPTY_STRING_OR_NEWLINE_CHARS_REGEX,
expected_unique_x=[b'\xa9', b'\xc6'],
expected_summed_weights_per_x=[3, 4]),
]))
def test_reduce_batch_weighted_counts(self, x, x_spec, weights, filter_regex,
expected_unique_x,
expected_summed_weights_per_x,
function_handler):
input_signature = [x_spec, tf.TensorSpec(None, tf.float32)]
@function_handler(input_signature=input_signature)
def _reduce_batch_weighted_counts(x, weights):
(unique_x, summed_weights_per_x, summed_positive_per_x_and_y,
counts_per_x) = tf_utils.reduce_batch_weighted_counts(
x, weights, filter_regex=filter_regex)
self.assertIsNone(summed_positive_per_x_and_y)
self.assertIsNone(counts_per_x)
return unique_x, summed_weights_per_x
unique_x, summed_weights_per_x = _reduce_batch_weighted_counts(x, weights)
self.assertAllEqual(unique_x,
expected_unique_x)
self.assertAllEqual(summed_weights_per_x,
expected_summed_weights_per_x)
@test_case.named_parameters(
test_case.cross_with_function_handlers([
dict(
testcase_name='rank1',
x=['a', 'b', 'a'],
filter_regex=None,
expected_result=[b'a', b'b', b'a'],
),
dict(
testcase_name='rank2',
x=[['a', 'b\r', 'a'], ['b\r', 'a', 'b\r']],
filter_regex=None,
expected_result=[b'a', b'b\r', b'a', b'b\r', b'a', b'b\r'],
),
dict(
testcase_name='rank3',
x=[[['a', 'b', 'a'], ['b', 'a', 'b']],
[['a', 'b', 'a'], ['b', 'a', 'b']]],
filter_regex=None,
expected_result=[
b'a', b'b', b'a', b'b', b'a', b'b', b'a', b'b', b'a', b'b',
b'a', b'b'
],
),
dict(
testcase_name='regex_filtering_empty_result',
x=['a\n\r', 'b\n', 'a\r', '', 'a\rsd', ' \r', '\nas'],
filter_regex=analyzers._EMPTY_STRING_OR_NEWLINE_CHARS_REGEX,
expected_result=[],
),
]))
def test_reduce_batch_weighted_counts_weights_none(self, x, filter_regex,
expected_result,
function_handler):
input_signature = [tf.TensorSpec(None, tf.string)]
@function_handler(input_signature=input_signature)
def _reduce_batch_weighted_counts(x):
(unique_x, summed_weights_per_x, summed_positive_per_x_and_y,
counts_per_x) = tf_utils.reduce_batch_weighted_counts(
x, force=False, filter_regex=filter_regex)
self.assertIsNone(summed_weights_per_x)
self.assertIsNone(summed_positive_per_x_and_y)
self.assertIsNone(counts_per_x)
return unique_x
unique_x = _reduce_batch_weighted_counts(x)
self.assertAllEqual(unique_x, expected_result)
@test_case.named_parameters(
test_case.cross_with_function_handlers([
dict(
testcase_name='rank1',
x=['a', 'b', 'a'],
filter_regex=None,
expected_result=([b'a', b'b'], [2, 1]),
),
dict(
testcase_name='rank3',
x=[[['a', 'b', 'a'], ['b', 'a', 'b']],
[['a', 'b', 'a'], ['b', 'a', 'b']]],
filter_regex=None,
expected_result=([b'a', b'b'], [6, 6]),
),
dict(
testcase_name='regex_filtering',
x=['a\n\r', 'b\n', 'a\r', '', 'asd', ' ', '\nas'],
filter_regex=analyzers._EMPTY_STRING_OR_NEWLINE_CHARS_REGEX,
expected_result=([b'asd', b' '], [1, 1]),
),
dict(
testcase_name='regex_filtering_empty_result',
x=['a\n\r', 'b\n', 'a\r', '', 'a\rsd', ' \r', '\nas'],
filter_regex=analyzers._EMPTY_STRING_OR_NEWLINE_CHARS_REGEX,
expected_result=([], []),
),
]))
def test_reduce_batch_weighted_counts_weights_none_force(
self, x, filter_regex, expected_result, function_handler):
input_signature = [tf.TensorSpec(None, tf.string)]
@function_handler(input_signature=input_signature)
def _reduce_batch_weighted_counts(x):
(unique_x, summed_weights_per_x, summed_positive_per_x_and_y,
counts_per_x) = tf_utils.reduce_batch_weighted_counts(
x, force=True, filter_regex=filter_regex)
self.assertIsNone(summed_weights_per_x)
self.assertIsNone(summed_positive_per_x_and_y)
return unique_x, counts_per_x
expected_unique_x, expected_counts_per_x = expected_result
unique_x, counts_per_x = _reduce_batch_weighted_counts(x)
self.assertAllEqual(unique_x, expected_unique_x)
self.assertAllEqual(counts_per_x, expected_counts_per_x)
@test_case.named_parameters([
dict(testcase_name='constant', get_value_fn=lambda: tf.constant([1.618])),
dict(testcase_name='op', get_value_fn=lambda: tf.identity),
dict(testcase_name='int', get_value_fn=lambda: 4),
dict(testcase_name='object', get_value_fn=object),
dict(
testcase_name='sparse',
get_value_fn=lambda: tf.SparseTensor( # pylint: disable=g-long-lambda
indices=[[0, 0], [2, 1]],
values=['a', 'b'],
dense_shape=[4, 2])),
dict(
testcase_name='ragged',
get_value_fn=lambda: tf.RaggedTensor.from_row_splits( # pylint: disable=g-long-lambda
values=['a', 'b'],
row_splits=[0, 1, 2])),
dict(
testcase_name='ragged_multi_dimension',
get_value_fn=lambda: tf.RaggedTensor.from_row_splits( # pylint: disable=g-long-lambda
values=tf.RaggedTensor.from_row_splits(
values=[[0, 1], [2, 3]], row_splits=[0, 1, 2]),
row_splits=[0, 2])),
])
def test_hashable_tensor_or_op(self, get_value_fn):
with tf.compat.v1.Graph().as_default():
input_value = get_value_fn()
input_ref = tf_utils.hashable_tensor_or_op(input_value)
input_dict = {input_ref: input_value}
input_deref = tf_utils.deref_tensor_or_op(input_ref)
if isinstance(input_value, composite_tensor.CompositeTensor):
self._assertCompositeRefEqual(
input_ref, tf_utils.hashable_tensor_or_op(input_deref))
else:
self.assertAllEqual(input_ref,
tf_utils.hashable_tensor_or_op(input_deref))
if isinstance(input_value, tf.SparseTensor):
input_deref = input_deref.values
input_dict[input_ref] = input_dict[input_ref].values
input_value = input_value.values
self.assertAllEqual(input_value, input_deref)
self.assertAllEqual(input_value, input_dict[input_ref])
@test_case.named_parameters(
test_case.cross_with_function_handlers([
dict(
testcase_name='rank1_with_weights_and_binary_y',
x=['a', 'b', 'a'],
weights=[1, 1, 2],
y=[0, 1, 1],
expected_result=tf_utils.ReducedBatchWeightedCounts(
[b'a', b'b', b'global_y_count_sentinel'], [3, 1, 4],
[[1, 2], [0, 1], [1, 3]], [2, 1, 3]),
filter_regex=None,
),
dict(
testcase_name='rank1_with_weights_and_multi_class_y',
x=['a', 'b\n', 'a', 'a'],
weights=[1, 1, 2, 2],
y=[0, 2, 1, 1],
expected_result=tf_utils.ReducedBatchWeightedCounts(
[b'a', b'b\n', b'global_y_count_sentinel'], [5, 1, 6],
[[1, 4, 0], [0, 0, 1], [1, 4, 1]], [3, 1, 4]),
filter_regex=None,
),
dict(
testcase_name='rank1_with_weights_and_missing_y_values',
x=['a', 'b', 'a', 'a'],
weights=[1, 1, 2, 2],
y=[3, 5, 6, 6],
expected_result=tf_utils.ReducedBatchWeightedCounts(
[b'a', b'b', b'global_y_count_sentinel'], [5, 1, 6],
[[0, 0, 0, 1, 0, 0, 4], [0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 1, 0, 1, 4]], [3, 1, 4]),
filter_regex=None,
),
dict(
testcase_name='rank2_with_weights_and_binary_y',
x=[['a', 'b', 'a'], ['b', 'a', 'b']],
weights=[[1, 2, 1], [1, 2, 2]],
y=[[1, 0, 1], [1, 0, 0]],
expected_result=tf_utils.ReducedBatchWeightedCounts(
[b'a', b'b', b'global_y_count_sentinel'], [4, 5, 9],
[[2, 2], [4, 1], [6, 3]], [3, 3, 6]),
filter_regex=None,
),
dict(
testcase_name='rank3_with_weights_and_binary_y',
x=[[['a', 'b', 'a'], ['b', 'a', 'b']],
[['a', 'b', 'a'], ['b', 'a', 'b']]],
weights=[[[1, 1, 2], [1, 2, 1]], [[1, 2, 1], [1, 2, 1]]],
y=[[[1, 1, 0], [1, 0, 1]], [[1, 0, 1], [1, 0, 1]]],
expected_result=tf_utils.ReducedBatchWeightedCounts(
[b'a', b'b', b'global_y_count_sentinel'], [9, 7, 16],
[[6, 3], [2, 5], [8, 8]], [6, 6, 12]),
filter_regex=None,
),
dict(
testcase_name='rank1_with_weights_multi_class_y_and_filtering',
x=['\na\r', '', '\na\r', 'a', ''],
weights=[1, 1, 2, 2, 3],
y=[0, 2, 1, 1, 2],
expected_result=tf_utils.ReducedBatchWeightedCounts(
[b'a', b'global_y_count_sentinel'], [2, 9],
[[0, 2, 0], [1, 4, 4]], [1, 5]),
filter_regex=analyzers._EMPTY_STRING_OR_NEWLINE_CHARS_REGEX,
),
dict(
testcase_name='rank1_with_weights_filtering_empty_result',
x=['\na\r', '', '\na\r', '\ra', ''],
weights=[1, 1, 2, 2, 3],
y=[0, 2, 1, 1, 2],
expected_result=tf_utils.ReducedBatchWeightedCounts(
[b'global_y_count_sentinel'], [9], [[1, 4, 4]], [5]),
filter_regex=analyzers._EMPTY_STRING_OR_NEWLINE_CHARS_REGEX,
),
]))
def test_reduce_batch_coocurrences(self, x, weights, y, expected_result,
filter_regex, function_handler):
input_signature = [tf.TensorSpec(None, tf.string),
tf.TensorSpec(None, tf.int64),
tf.TensorSpec(None, tf.int64)]
@function_handler(input_signature=input_signature)
def _reduce_batch_weighted_cooccurrences(x, y, weights):
return tf_utils.reduce_batch_weighted_cooccurrences(
x, y, weights, filter_regex=filter_regex)
result = _reduce_batch_weighted_cooccurrences(x, y, weights)
self.assertAllEqual(result.unique_x,
expected_result.unique_x)
self.assertAllEqual(result.summed_weights_per_x,
expected_result.summed_weights_per_x)
self.assertAllEqual(result.summed_positive_per_x_and_y,
expected_result.summed_positive_per_x_and_y)
self.assertAllEqual(result.counts_per_x,
expected_result.counts_per_x)
@test_case.named_parameters(
test_case.cross_with_function_handlers([
dict(
testcase_name='rank1_with_binary_y',
x=['a', 'b', 'a'],
y=[0, 1, 1],
expected_result=tf_utils.ReducedBatchWeightedCounts(
[b'a', b'b', b'global_y_count_sentinel'], [2, 1, 3],
[[1, 1], [0, 1], [1, 2]], [2, 1, 3]),
input_signature=[
tf.TensorSpec(None, tf.string),
tf.TensorSpec(None, tf.int64)
],
filter_regex=None),
dict(
testcase_name='rank1_with_multi_class_y',
x=['yes', 'no', 'yes', 'may\rbe', 'yes'],
y=[1, 1, 0, 2, 3],
expected_result=tf_utils.ReducedBatchWeightedCounts(
[b'yes', b'no', b'may\rbe', b'global_y_count_sentinel'],
[3, 1, 1, 5],
[[1, 1, 0, 1], [0, 1, 0, 0], [0, 0, 1, 0], [1, 2, 1, 1]],
[3, 1, 1, 5]),
input_signature=[
tf.TensorSpec(None, tf.string),
tf.TensorSpec(None, tf.int64)
],
filter_regex=None),
dict(
testcase_name='rank2_with_binary_y',
x=[['a', 'b', 'a'], ['b', 'a', 'b']],
y=[[1, 0, 1], [1, 0, 0]],
expected_result=tf_utils.ReducedBatchWeightedCounts(
[b'a', b'b', b'global_y_count_sentinel'], [3, 3, 6],
[[1, 2], [2, 1], [3, 3]], [3, 3, 6]),
input_signature=[
tf.TensorSpec(None, tf.string),
tf.TensorSpec(None, tf.int64)
],
filter_regex=None),
dict(
testcase_name='rank2_with_missing_y_values',
x=[['a', 'b', 'a'], ['b', 'a', 'b']],
y=[[2, 0, 2], [2, 0, 0]],
# The label 1 isn't in the batch but it will have a position (with
# weights of 0) in the resulting array.
expected_result=tf_utils.ReducedBatchWeightedCounts(
[b'a', b'b', b'global_y_count_sentinel'], [3, 3, 6],
[[1, 0, 2], [2, 0, 1], [3, 0, 3]], [3, 3, 6]),
input_signature=[
tf.TensorSpec(None, tf.string),
tf.TensorSpec(None, tf.int64)
],
filter_regex=None),
dict(
testcase_name='rank2_with_multi_class_y',
x=[['a', 'b', 'a'], ['b', 'a', 'b']],
y=[[1, 0, 1], [1, 0, 2]],
expected_result=tf_utils.ReducedBatchWeightedCounts(
[b'a', b'b', b'global_y_count_sentinel'], [3, 3, 6],
[[1, 2, 0], [1, 1, 1], [2, 3, 1]], [3, 3, 6]),
input_signature=[
tf.TensorSpec(None, tf.string),
tf.TensorSpec(None, tf.int64)
],
filter_regex=None),
dict(
testcase_name='rank3_with_binary_y',
x=[[['a', 'b', 'a'], ['b', 'a', 'b']],
[['a', 'b', 'a'], ['b', 'a', 'b']]],
y=[[[1, 1, 0], [1, 0, 1]], [[1, 0, 1], [1, 0, 1]]],
expected_result=tf_utils.ReducedBatchWeightedCounts(
[b'a', b'b', b'global_y_count_sentinel'], [6, 6, 12],
[[3, 3], [1, 5], [4, 8]], [6, 6, 12]),
input_signature=[
tf.TensorSpec(None, tf.string),
tf.TensorSpec(None, tf.int64)
],
filter_regex=None),
dict(
testcase_name='sparse',
x=tf.compat.v1.SparseTensorValue(
indices=[[0, 0], [2, 1]],
values=['a', 'b'],
dense_shape=[4, 2]),
y=[0, 1, 0, 0],
expected_result=tf_utils.ReducedBatchWeightedCounts(
[b'a', b'b', b'global_y_count_sentinel'], [1, 1, 4],
[[1, 0], [1, 0], [3, 1]], [1, 1, 4]),
input_signature=[
tf.SparseTensorSpec([None, 2], tf.string),
tf.TensorSpec([None], tf.int64)
],
filter_regex=None),
dict(
testcase_name='empty_sparse',
x=tf.compat.v1.SparseTensorValue(
indices=np.empty([0, 2]), values=[], dense_shape=[4, 2]),
y=[1, 0, 1, 1],
expected_result=tf_utils.ReducedBatchWeightedCounts(
[b'global_y_count_sentinel'], [4], [[1, 3]], [4]),
input_signature=[
tf.SparseTensorSpec([None, 2], tf.string),
tf.TensorSpec([None], tf.int64)
],
filter_regex=None),
dict(
testcase_name='ragged',
x=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=np.array(['a', 'b', 'a', 'b', 'b']),
row_splits=np.array([0, 2, 3, 4, 5])),
row_splits=np.array([0, 2, 3, 4])),
row_splits=np.array([0, 2, 3])),
y=[1, 0],
expected_result=tf_utils.ReducedBatchWeightedCounts(
[b'a', b'b', b'global_y_count_sentinel'], [2, 3, 2],
[[0, 2], [1, 2], [1, 1]], [2, 3, 2]),
input_signature=[
tf.RaggedTensorSpec([None, None, None, None], tf.string),
tf.TensorSpec([None], tf.int64)
],
filter_regex=None),
dict(
testcase_name='rank1_with_filtering',
x=['yes\n', 'no', 'yes\n', '', 'yes\n'],
y=[1, 1, 0, 2, 3],
expected_result=tf_utils.ReducedBatchWeightedCounts(
[b'no', b'global_y_count_sentinel'], [1, 5],
[[0, 1, 0, 0], [1, 2, 1, 1]], [1, 5]),
input_signature=[
tf.TensorSpec(None, tf.string),
tf.TensorSpec(None, tf.int64)
],
filter_regex=analyzers._EMPTY_STRING_OR_NEWLINE_CHARS_REGEX),
]))
def test_reduce_batch_coocurrences_no_weights(self, x, y, expected_result,
input_signature, filter_regex,
function_handler):
@function_handler(input_signature=input_signature)
def _reduce_batch_weighted_cooccurrences_no_weights(x, y):
return tf_utils.reduce_batch_weighted_cooccurrences(
x, y, filter_regex=filter_regex)
result = _reduce_batch_weighted_cooccurrences_no_weights(x, y)
self.assertAllEqual(result.unique_x,
expected_result.unique_x)
self.assertAllEqual(result.summed_weights_per_x,
expected_result.summed_weights_per_x)
self.assertAllEqual(result.summed_positive_per_x_and_y,
expected_result.summed_positive_per_x_and_y)
self.assertAllEqual(result.counts_per_x,
expected_result.counts_per_x)
@test_case.parameters(
([[1], [2]], [[1], [2], [3]], None, None, tf.errors.InvalidArgumentError,
'Condition x == y did not hold element-wise:'),
([[1], [2], [3]], [[1], [2], [3]], [None, None], [None], ValueError,
r'Shapes \(None, None\) and \(None,\) are incompatible'),
)
def test_same_shape_exceptions(self, x_input, y_input, x_shape, y_shape,
exception_cls, error_string):
with tf.compat.v1.Graph().as_default():
x = tf.compat.v1.placeholder(tf.int32, x_shape)
y = tf.compat.v1.placeholder(tf.int32, y_shape)
with tf.compat.v1.Session() as sess:
with self.assertRaisesRegexp(exception_cls, error_string):
sess.run(tf_utils.assert_same_shape(x, y), {x: x_input, y: y_input})
@test_case.named_parameters(test_case.FUNCTION_HANDLERS)
def test_same_shape(self, function_handler):
input_signature = [tf.TensorSpec(None, tf.int64),
tf.TensorSpec(None, tf.int64)]
@function_handler(input_signature=input_signature)
def _assert_shape(x, y):
x_return, _ = tf_utils.assert_same_shape(x, y)
return x_return
input_list = [[1], [2], [3]]
x_return = _assert_shape(input_list, input_list)
self.assertAllEqual(x_return, input_list)
@test_case.named_parameters([
dict(
testcase_name='_all_keys_in_vocab',
query_list=['a', 'a', 'b', 'a', 'b'],
key_vocab_list=['a', 'b'],
query_shape=[None],
expected_output=[0, 0, 1, 0, 1]),
dict(
testcase_name='_missing_keys_in_vocab',
query_list=['a', 'c', 'b', 'a', 'b'],
key_vocab_list=['a', 'b'],
query_shape=[None],
expected_output=[0, -1, 1, 0, 1]),
dict(
testcase_name='_nd_keys',
query_list=[['a', 'c', 'b'], ['a', 'b', 'a']],
key_vocab_list=['a', 'b'],
query_shape=[None, None],
expected_output=[[0, -1, 1], [0, 1, 0]]),
dict(
testcase_name='_empty_vocab',
query_list=['a', 'c', 'b', 'a', 'b'],
key_vocab_list=[],
query_shape=[None],
expected_output=[-1, -1, -1, -1, -1]),
dict(
testcase_name='_empty_query',
query_list=[],
key_vocab_list=['a'],
query_shape=[None],
expected_output=[]),
])
def test_lookup_key(self, query_list, key_vocab_list, query_shape,
expected_output):
with tf.compat.v1.Graph().as_default():
query_ph = tf.compat.v1.placeholder(
dtype=tf.string, shape=query_shape, name='query')
key_vocab_ph = tf.compat.v1.placeholder(
dtype=tf.string, shape=[None], name='key_vocab')
key_indices = tf_utils.lookup_key(query_ph, key_vocab_ph)
with tf.compat.v1.Session().as_default() as sess:
output = sess.run(
key_indices,
feed_dict={
query_ph.name: query_list,
key_vocab_ph.name: key_vocab_list
})
self.assertAllEqual(expected_output, output)
@test_case.named_parameters([
dict(
testcase_name='_with_default',
with_default_value=True,
input_keys=['<KEY>']),
dict(
testcase_name='_wihout_default',
with_default_value=False,
input_keys=['<KEY>']),
dict(
testcase_name='_single_oov_key',
with_default_value=False,
input_keys=['e'])
])
def test_apply_per_key_vocab(self, with_default_value, input_keys):
default_value = '-7,-5' if with_default_value else None
vocab_data = [('0,0', 'a'), ('1,-1', 'b'), ('-1,1', 'c'), ('-2,2', 'd')]
expected_missing_key_result = [-7, -5] if default_value else [0, 0]
expected_lookup_results = {
'a': [0, 0],
'b': [1, -1],
'c': [-1, 1],
'd': [-2, 2],
}
with tf.compat.v1.Graph().as_default():
input_tensor = _value_to_tensor(input_keys)
vocab_filename = os.path.join(self.get_temp_dir(), 'test.txt')
encoded_vocab = '\n'.join([' '.join(pair) for pair in vocab_data])
with tf.io.gfile.GFile(vocab_filename, 'w') as f:
f.write(encoded_vocab)
output_tensor = tf_utils.apply_per_key_vocabulary(
tf.constant(vocab_filename),
input_tensor,
default_value=default_value)
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.tables_initializer())
output = output_tensor.eval()
expected_data = [
expected_lookup_results.get(key, expected_missing_key_result)
for key in input_keys
]
self.assertAllEqual(output, expected_data)
@test_case.named_parameters(
test_case.cross_with_function_handlers([
dict(
testcase_name='dense',
x=[[[1], [2]], [[1], [2]]],
expected_result=4,
reduce_instance_dims=True,
input_signature=[tf.TensorSpec(None, tf.int64)]),
dict(
testcase_name='dense_with_nans',
x=[[[1], [np.nan]], [[1], [2]]],
expected_result=3,
reduce_instance_dims=True,
input_signature=[tf.TensorSpec(None, tf.float32)]),
dict(
testcase_name='dense_elementwise',
x=[[[1], [2]], [[1], [2]]],
expected_result=[[2], [2]],
reduce_instance_dims=False,
input_signature=[tf.TensorSpec(None, tf.int64)]),
dict(
testcase_name='dense_elementwise_with_nans',
x=[[[1], [2]], [[1], [np.nan]]],
expected_result=[[2], [1]],
reduce_instance_dims=False,
input_signature=[tf.TensorSpec(None, tf.float32)]),
dict(
testcase_name='sparse',
x=tf.compat.v1.SparseTensorValue(
indices=[[0, 0, 0], [0, 2, 0], [1, 1, 0], [1, 2, 0]],
values=[1., 2., 3., 4.],
dense_shape=[2, 4, 1]),
expected_result=4,
reduce_instance_dims=True,
input_signature=[tf.SparseTensorSpec([None, 4, 1], tf.float32)]),
dict(
testcase_name='sparse_with_nans',
x=tf.compat.v1.SparseTensorValue(
indices=[[0, 0, 0], [0, 2, 0], [1, 1, 0], [1, 2, 0],
[1, 3, 0]],
values=[1., 2., 3., 4., np.nan],
dense_shape=[2, 4, 1]),
expected_result=4,
reduce_instance_dims=True,
input_signature=[tf.SparseTensorSpec([None, 4, 1], tf.float32)]),
dict(
testcase_name='sparse_elementwise',
x=tf.compat.v1.SparseTensorValue(
indices=[[0, 0, 0], [0, 2, 0], [1, 1, 0], [1, 2, 0]],
values=[1., 2., 3., 4.],
dense_shape=[2, 4, 1]),
expected_result=[[1], [1], [2], [0]],
reduce_instance_dims=False,
input_signature=[tf.SparseTensorSpec([None, 4, 1], tf.float32)]),
dict(
testcase_name='sparse_elementwise_with_nans',
x=tf.compat.v1.SparseTensorValue(
indices=[[0, 0, 0], [0, 2, 0], [1, 1, 0], [1, 2, 0],
[1, 3, 0]],
values=[1., 2., 3., 4., np.nan],
dense_shape=[2, 4, 1]),
expected_result=[[1], [1], [2], [0]],
reduce_instance_dims=False,
input_signature=[tf.SparseTensorSpec([None, 4, 1], tf.float32)]),
dict(
testcase_name='ragged',
x=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=np.array([1., 2., 3., 4., 5.], np.float32),
row_splits=np.array([0, 2, 3, 4, 5])),
row_splits=np.array([0, 2, 3, 4])),
row_splits=np.array([0, 2, 3])),
expected_result=5,
reduce_instance_dims=True,
input_signature=[
tf.RaggedTensorSpec([None, None, None, None], tf.float32)
]),
dict(
testcase_name='ragged_with_nans',
x=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=np.array([1., 2., 3., 4., 5., np.nan],
np.float32),
row_splits=np.array([0, 2, 3, 4, 6])),
row_splits=np.array([0, 2, 3, 4])),
row_splits=np.array([0, 2, 3])),
expected_result=5,
reduce_instance_dims=True,
input_signature=[
tf.RaggedTensorSpec([None, None, None, None], tf.float32)
]),
dict(
testcase_name='ragged_elementwise',
x=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=np.array([1., 2., 3., 4., 5.], np.float32),
row_splits=np.array([0, 2, 2, 4, 5])),
row_splits=np.array([0, 3, 3, 4])),
row_splits=np.array([0, 2, 3])),
expected_result=[[[2, 1], [0., 0], [1, 1]],
[[0, 0], [0, 0], [0, 0]]],
reduce_instance_dims=False,
input_signature=[
tf.RaggedTensorSpec([None, None, None, None], tf.float32)
]),
dict(
testcase_name='ragged_elementwise_with_nans',
x=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=np.array([1., 2., 3., 4., 5., np.nan],
np.float32),
row_splits=np.array([0, 2, 2, 4, 6])),
row_splits=np.array([0, 3, 3, 4])),
row_splits=np.array([0, 2, 3])),
expected_result=[[[2, 1], [0., 0], [1, 1]],
[[0, 0], [0, 0], [0, 0]]],
reduce_instance_dims=False,
input_signature=[
tf.RaggedTensorSpec([None, None, None, None], tf.float32)
]),
]))
def test_reduce_batch_count(self, x, input_signature, expected_result,
reduce_instance_dims, function_handler):
@function_handler(input_signature=input_signature)
def _reduce_batch_count(x):
result = tf_utils.reduce_batch_count(
x, reduce_instance_dims=reduce_instance_dims)
# Verify that the output shape is maintained.
# TODO(b/178189903): This will fail if _dense_shape_default isn't set in
# reduce_batch_count.
if (not isinstance(x, tf.RaggedTensor) and not reduce_instance_dims and
x.get_shape().ndims):
self.assertEqual(x.get_shape()[1:].as_list(),
result.get_shape().as_list())
return result
result = _reduce_batch_count(x)
self.assertAllEqual(result, expected_result)
@test_case.named_parameters(
test_case.cross_with_function_handlers([
dict(
testcase_name='dense',
x=[[[1], [2]], [[3], [4]]],
expected_count=4,
expected_mean=2.5,
expected_var=1.25,
reduce_instance_dims=True,
input_signature=[tf.TensorSpec(None, tf.float32)]),
dict(
testcase_name='dense_with_nans',
x=[[[1], [2]], [[3], [np.nan]], [[np.nan], [4]]],
expected_count=4,
expected_mean=2.5,
expected_var=1.25,
reduce_instance_dims=True,
input_signature=[tf.TensorSpec(None, tf.float32)]),
dict(
testcase_name='dense_elementwise',
x=[[[1], [2]], [[3], [4]]],
expected_count=[[2.], [2.]],
expected_mean=[[2.], [3.]],
expected_var=[[1.], [1.]],
reduce_instance_dims=False,
input_signature=[tf.TensorSpec(None, tf.float32)]),
dict(
testcase_name='dense_elementwise_with_nans',
x=[[[1], [2]], [[3], [np.nan]], [[np.nan], [4]]],
expected_count=[[2.], [2.]],
expected_mean=[[2.], [3.]],
expected_var=[[1.], [1.]],
reduce_instance_dims=False,
input_signature=[tf.TensorSpec(None, tf.float32)]),
dict(
testcase_name='sparse',
x=tf.compat.v1.SparseTensorValue(
indices=[[0, 0], [0, 2], [1, 1], [1, 2]],
values=[1., 2., 3., 4.],
dense_shape=[2, 4]),
expected_count=4,
expected_mean=2.5,
expected_var=1.25,
reduce_instance_dims=True,
input_signature=[tf.SparseTensorSpec([None, 4], tf.float32)]),
dict(
testcase_name='sparse_with_nans',
x=tf.compat.v1.SparseTensorValue(
indices=[[0, 0], [0, 2], [1, 1], [1, 2], [1, 3]],
values=[1., 2., 3., 4., np.nan],
dense_shape=[2, 4]),
expected_count=4,
expected_mean=2.5,
expected_var=1.25,
reduce_instance_dims=True,
input_signature=[tf.SparseTensorSpec([None, 4], tf.float32)]),
dict(
testcase_name='sparse_elementwise',
x=tf.compat.v1.SparseTensorValue(
indices=[[0, 0], [0, 3], [1, 1], [1, 3]],
values=[1., 2., 3., 4.],
dense_shape=[2, 5]),
expected_count=[1.0, 1.0, 0.0, 2.0, 0.0],
expected_mean=[1.0, 3.0, 0.0, 3.0, 0.0],
expected_var=[0.0, 0.0, 0.0, 1.0, 0.0],
reduce_instance_dims=False,
input_signature=[tf.SparseTensorSpec([None, 5], tf.float32)]),
dict(
testcase_name='sparse_elementwise_with_nans',
x=tf.compat.v1.SparseTensorValue(
indices=[[0, 0], [0, 3], [1, 1], [1, 2], [1, 3]],
values=[1., 2., 3., np.nan, 4.],
dense_shape=[2, 5]),
expected_count=[1.0, 1.0, 0.0, 2.0, 0.0],
expected_mean=[1.0, 3.0, 0.0, 3.0, 0.0],
expected_var=[0.0, 0.0, 0.0, 1.0, 0.0],
reduce_instance_dims=False,
input_signature=[tf.SparseTensorSpec([None, 5], tf.float32)]),
dict(
testcase_name='sparse_3d_elementwise',
x=tf.compat.v1.SparseTensorValue(
indices=[[0, 0, 3], [0, 1, 0], [0, 1, 3], [1, 1, 1],
[1, 1, 3]],
values=[-10., 1., 2., 3., 4.],
dense_shape=[2, 3, 5]),
expected_count=[[0, 0, 0, 1, 0], [1, 1, 0, 2, 0], [0] * 5],
expected_mean=[[0, 0, 0, -10, 0], [1, 3, 0, 3, 0], [0] * 5],
expected_var=[[0] * 5, [0, 0, 0, 1, 0], [0] * 5],
reduce_instance_dims=False,
input_signature=[tf.SparseTensorSpec([None, 3, 5], tf.float32)]),
dict(
testcase_name='ragged',
x=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=np.array([1., 2., 3., 4., 5.], np.float32),
row_splits=np.array([0, 2, 3, 4, 5])),
row_splits=np.array([0, 2, 3, 4])),
row_splits=np.array([0, 2, 3])),
expected_count=5,
expected_mean=3,
expected_var=2,
reduce_instance_dims=True,
input_signature=[
tf.RaggedTensorSpec([None, None, None, None], tf.float32)
]),
dict(
testcase_name='ragged_with_nans',
x=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=np.array([1., 2., 3., 4., 5., np.nan],
np.float32),
row_splits=np.array([0, 2, 3, 4, 6])),
row_splits=np.array([0, 2, 3, 4])),
row_splits=np.array([0, 2, 3])),
expected_count=5,
expected_mean=3,
expected_var=2,
reduce_instance_dims=True,
input_signature=[
tf.RaggedTensorSpec([None, None, None, None], tf.float32)
]),
dict(
testcase_name='ragged_elementwise',
x=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=np.array([1., 2., 3., 4., 5.], np.float32),
row_splits=np.array([0, 2, 2, 4, 5])),
row_splits=np.array([0, 3, 3, 4])),
row_splits=np.array([0, 2, 3])),
expected_count=[[[2., 1.], [0., 0.], [1., 1.]],
[[0., 0.], [0., 0.], [0., 0.]]],
expected_mean=[[[3., 2.], [0., 0.], [3., 4.]],
[[0., 0.], [0., 0.], [0., 0.]]],
expected_var=[[[4., 0.], [0., 0.], [0., 0.]],
[[0., 0.], [0., 0.], [0., 0.]]],
reduce_instance_dims=False,
input_signature=[
tf.RaggedTensorSpec([None, None, None, None], tf.float32)
]),
dict(
testcase_name='ragged_elementwise_with_nans',
x=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=np.array([1., 2., 3., 4., 5., np.nan],
np.float32),
row_splits=np.array([0, 2, 2, 4, 6])),
row_splits=np.array([0, 3, 3, 4])),
row_splits=np.array([0, 2, 3])),
expected_count=[[[2., 1.], [0., 0.], [1., 1.]],
[[0., 0.], [0., 0.], [0., 0.]]],
expected_mean=[[[3., 2.], [0., 0.], [3., 4.]],
[[0., 0.], [0., 0.], [0., 0.]]],
expected_var=[[[4., 0.], [0., 0.], [0., 0.]],
[[0., 0.], [0., 0.], [0., 0.]]],
reduce_instance_dims=False,
input_signature=[
tf.RaggedTensorSpec([None, None, None, None], tf.float32)
]),
]))
def test_reduce_batch_count_mean_and_var(
self, x, input_signature, expected_count, expected_mean, expected_var,
reduce_instance_dims, function_handler):
@function_handler(input_signature=input_signature)
def _reduce_batch_count_mean_and_var(x):
result = tf_utils.reduce_batch_count_mean_and_var(
x, reduce_instance_dims=reduce_instance_dims)
# Verify that the output shapes are maintained.
# TODO(b/178189903): This will fail if _dense_shape_default isn't set in
# reduce_batch_count.
if (not isinstance(x, tf.RaggedTensor) and not reduce_instance_dims and
x.get_shape().ndims):
for tensor in result:
self.assertEqual(x.get_shape()[1:].as_list(),
tensor.get_shape().as_list())
return result
count, mean, var = _reduce_batch_count_mean_and_var(x)
self.assertAllEqual(expected_count, count)
self.assertAllEqual(expected_mean, mean)
self.assertAllEqual(expected_var, var)
@test_case.named_parameters([
dict(
testcase_name='num_samples_1',
num_samples=1,
dtype=tf.float32,
expected_counts=np.array([1, 0, 0, 0], np.float32),
expected_factors=np.array([[1.0], [0.0], [0.0], [0.0]], np.float32)),
dict(
testcase_name='num_samples_2',
num_samples=2,
dtype=tf.float32,
expected_counts=np.array([2, 1, 0, 0], np.float32),
expected_factors=np.array(
[[1. / 2., 1. / 2.], [-1. / 2., 1. / 2.], [0., 0.], [0., 0.]],
np.float32)),
dict(
testcase_name='num_samples_3',
num_samples=3,
dtype=tf.float32,
expected_counts=np.array([3, 3, 1, 0], np.float32),
expected_factors=np.array(
[[1. / 3., 1. / 3., 1. / 3.], [-1. / 3., 0., 1. / 3.],
[1. / 3., -2. / 3., 1. / 3.], [0., 0., 0.]], np.float32)),
dict(
testcase_name='num_samples_4',
num_samples=4,
dtype=tf.float32,
expected_counts=np.array([4, 6, 4, 1], np.float32),
expected_factors=np.array(
[[1. / 4., 1. / 4., 1. / 4., 1. / 4.],
[-3. / 12., -1. / 12., 1. / 12., 3. / 12.],
[1. / 4., -1. / 4., -1. / 4., 1. / 4.],
[-1. / 4., 3. / 4., -3. / 4., 1. / 4.]], np.float32))
])
def test_num_terms_and_factors(
self, num_samples, dtype, expected_counts, expected_factors):
results = tf_utils._num_terms_and_factors(num_samples, dtype)
counts = results[0:4]
assert len(expected_counts) == len(counts), (expected_counts, counts)
for result, expected_count in zip(counts, expected_counts):
self.assertEqual(result.dtype, dtype)
self.assertAllClose(result, expected_count)
factors = results[4:]
assert len(expected_factors) == len(factors), (expected_factors, factors)
for result, expected_factor in zip(factors, expected_factors):
self.assertEqual(result.dtype, dtype)
self.assertAllClose(result, expected_factor)
@test_case.named_parameters(
test_case.cross_with_function_handlers([
dict(
testcase_name='dense',
x=[[[1], [2]], [[3], [4]]],
expected_counts=np.array([4., 6., 4., 1.], np.float32),
expected_moments=np.array([2.5, 10.0 / 12.0, 0.0, 0.0],
np.float32),
reduce_instance_dims=True,
input_signature=[tf.TensorSpec(None, tf.float32)]),
dict(
testcase_name='dense_large',
x=[2.0, 3.0, 4.0, 2.4, 5.5, 1.2, 5.4, 2.2, 7.1, 1.3, 1.5],
expected_counts=np.array(
[11, 11 * 10 // 2, 11 * 10 * 9 // 6, 11 * 10 * 9 * 8 // 24],
np.float32),
expected_moments=np.array([
3.2363636363636363, 1.141818181818182, 0.31272727272727263,
0.026666666666666616
], np.float32),
reduce_instance_dims=True,
input_signature=[tf.TensorSpec(None, tf.float32)]),
dict(
testcase_name='dense_very_large',
x=-np.log(1.0 - np.arange(0, 1, 1e-6, dtype=np.float32)),
expected_counts=np.array([
1000000, 499999500000.0, 1.66666166667e+17,
4.1666416667125e+22
], np.float32),
expected_moments=np.array([
0.99999217330, 0.4999936732947, 0.166660839941,
0.0833278399134
], np.float32),
reduce_instance_dims=True,
input_signature=[tf.TensorSpec(None, tf.float32)]),
dict(
testcase_name='dense_elementwise',
x=[[[1], [2]], [[3], [4]]],
expected_counts=np.array(
[[[2], [2]], [[1], [1]], [[0], [0]], [[0], [0]]], np.float32),
expected_moments=np.array([[[2.0], [3.0]], [[1.0], [1.0]],
[[0.0], [0.0]], [[0.0], [0.0]]],
np.float32),
reduce_instance_dims=False,
input_signature=[tf.TensorSpec(None, tf.float32)]),
dict(
testcase_name='sparse',
x=tf.compat.v1.SparseTensorValue(
indices=[[0, 0], [0, 2], [2, 0], [2, 2]],
values=[1., 2., 3., 4.],
dense_shape=[3, 4]),
expected_counts=np.array([4, 6, 4, 1], np.float32),
expected_moments=np.array([2.5, 10.0 / 12.0, 0.0, 0.0],
np.float32),
reduce_instance_dims=True,
input_signature=[tf.SparseTensorSpec([None, 4], tf.float32)]),
dict(
testcase_name='sparse_elementwise',
x=tf.compat.v1.SparseTensorValue(
indices=[[0, 0, 0], [0, 2, 0], [2, 0, 0], [2, 2, 0],
[3, 3, 0]],
values=[1., 2., 3., 4., 5.],
dense_shape=[3, 5, 1]),
expected_counts=np.array(
[[[2], [0], [2], [1], [0]], [[1], [0], [1], [0], [0]],
[[0], [0], [0], [0], [0]], [[0], [0], [0], [0], [0]]],
np.float32),
expected_moments=np.array([[[2.0], [0.0], [3.0], [5.0], [0.0]],
[[1.0], [0.0], [1.0], [0.0], [0.0]],
[[0.0], [0.0], [0.0], [0.0], [0.0]],
[[0.0], [0.0], [0.0], [0.0], [0.0]]],
np.float32),
reduce_instance_dims=False,
input_signature=[tf.SparseTensorSpec([None, 5, 1], tf.float32)]),
dict(
testcase_name='ragged',
x=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=np.array([1., 2., 3., 4., 5.], np.float32),
row_splits=np.array([0, 2, 3, 4, 5])),
row_splits=np.array([0, 2, 3, 4])),
row_splits=np.array([0, 2, 3])),
expected_counts=np.array([5., 10., 10., 5.], np.float32),
expected_moments=np.array([3., 1., 0., 0.], np.float32),
reduce_instance_dims=True,
input_signature=[
tf.RaggedTensorSpec([None, None, None, None], tf.float32)
]),
]))
def test_reduce_batch_count_l_moments(
self, x, input_signature, expected_counts, expected_moments,
reduce_instance_dims, function_handler):
@function_handler(input_signature=input_signature)
def _reduce_batch_count_l_moments(x):
result = tf_utils.reduce_batch_count_l_moments(
x, reduce_instance_dims=reduce_instance_dims)
for tensor in result:
if not reduce_instance_dims and x.get_shape().ndims:
self.assertEqual(x.get_shape()[1:].as_list(),
tensor.get_shape().as_list())
return result
count_and_moments = _reduce_batch_count_l_moments(x)
counts = count_and_moments[0::2]
moments = count_and_moments[1::2]
for i in range(0, 4):
self.assertEqual(counts[i].dtype, expected_counts[i].dtype)
self.assertAllClose(counts[i], expected_counts[i], rtol=1e-8)
self.assertEqual(moments[i].dtype, expected_moments[i].dtype)
self.assertAllClose(moments[i], expected_moments[i], rtol=1e-8)
@test_case.named_parameters(
test_case.cross_with_function_handlers([
dict(
testcase_name='dense',
x=[[1], [2], [3], [4], [4]],
key=['a', 'a', 'a', 'b', 'a'],
expected_key_vocab=[b'a', b'b'],
expected_count=[4., 1.],
expected_mean=[2.5, 4.],
expected_var=[1.25, 0.],
reduce_instance_dims=True,
input_signature=[
tf.TensorSpec([None, 1], tf.float32),
tf.TensorSpec([None], tf.string)
]),
dict(
testcase_name='dense_with_nans',
x=[[1], [2], [3], [4], [4], [np.nan], [np.nan]],
key=['a', 'a', 'a', 'b', 'a', 'a', 'b'],
expected_key_vocab=[b'a', b'b'],
expected_count=[4., 1.],
expected_mean=[2.5, 4.],
expected_var=[1.25, 0.],
reduce_instance_dims=True,
input_signature=[
tf.TensorSpec([None, 1], tf.float32),
tf.TensorSpec([None], tf.string)
]),
dict(
testcase_name='dense_elementwise',
x=[[1, 2], [3, 4], [1, 2]],
key=['a', 'a', 'b'],
expected_key_vocab=[b'a', b'b'],
expected_count=[[2., 2.], [1., 1.]],
expected_mean=[[2., 3.], [1., 2.]],
expected_var=[[1., 1.], [0., 0.]],
reduce_instance_dims=False,
input_signature=[
tf.TensorSpec([None, 2], tf.float32),
tf.TensorSpec([None], tf.string)
]),
dict(
testcase_name='dense_elementwise_with_nans',
x=[[1, 2], [3, 4], [1, 2], [np.nan, np.nan]],
key=['a', 'a', 'b', 'a'],
expected_key_vocab=[b'a', b'b'],
expected_count=[[2., 2.], [1., 1.]],
expected_mean=[[2., 3.], [1., 2.]],
expected_var=[[1., 1.], [0., 0.]],
reduce_instance_dims=False,
input_signature=[
tf.TensorSpec([None, 2], tf.float32),
tf.TensorSpec([None], tf.string)
]),
dict(
testcase_name='sparse',
x=tf.compat.v1.SparseTensorValue(
indices=[[0, 0], [0, 2], [1, 1], [1, 2], [2, 3]],
values=[1., 2., 3., 4., 4.],
dense_shape=[3, 4]),
key=tf.compat.v1.SparseTensorValue(
indices=[[0, 0], [0, 2], [1, 1], [1, 2], [2, 3]],
values=['a', 'a', 'a', 'a', 'b'],
dense_shape=[3, 4]),
expected_key_vocab=[b'a', b'b'],
expected_count=[4, 1],
expected_mean=[2.5, 4],
expected_var=[1.25, 0],
reduce_instance_dims=True,
input_signature=[
tf.SparseTensorSpec([None, 4], tf.float32),
tf.SparseTensorSpec([None, 4], tf.string)
]),
dict(
testcase_name='sparse_with_nans',
x=tf.compat.v1.SparseTensorValue(
indices=[[0, 0], [0, 2], [1, 1], [1, 2], [2, 2], [2, 3]],
values=[1., 2., 3., 4., np.nan, 4.],
dense_shape=[3, 4]),
key=tf.compat.v1.SparseTensorValue(
indices=[[0, 0], [0, 2], [1, 1], [1, 2], [2, 2], [2, 3]],
values=['a', 'a', 'a', 'a', 'a', 'b'],
dense_shape=[3, 4]),
expected_key_vocab=[b'a', b'b'],
expected_count=[4, 1],
expected_mean=[2.5, 4],
expected_var=[1.25, 0],
reduce_instance_dims=True,
input_signature=[
tf.SparseTensorSpec([None, 4], tf.float32),
tf.SparseTensorSpec([None, 4], tf.string)
]),
dict(
testcase_name='sparse_x_dense_key',
x=tf.compat.v1.SparseTensorValue(
indices=[[0, 0], [0, 2], [1, 1], [1, 2], [2, 3]],
values=[1., 2., 3., 4., 4.],
dense_shape=[3, 4]),
key=['a', 'a', 'b'],
expected_key_vocab=[b'a', b'b'],
expected_count=[4, 1],
expected_mean=[2.5, 4],
expected_var=[1.25, 0],
reduce_instance_dims=True,
input_signature=[
tf.SparseTensorSpec([None, 4], tf.float32),
tf.TensorSpec([None], tf.string)
]),
dict(
testcase_name='ragged',
x=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values=tf.compat.v1.ragged.RaggedTensorValue(
values= | np.array([3., 2., 3., 4., 5.], np.float32) | numpy.array |
import tensorflow as tf
import cv2
import os
import numpy as np
from environment import ROOT_DIR
def lip_reading_image_processing(image):
#image = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(image, (33, 33), 0)
image = image / (blurred ) * 255
#normalized = cv2.normalize(image, None, 0, 1, cv2.NORM_MINMAX)
#print(normalized.shape)
# image = cv2.cvtColor(normalized, cv2.COLOR_BGR2GRAY)
#print(normalized)
#image = 255 - image
#image = cv2.fastNlMeansDenoising(image, None, 10, 10, 7)
#image = cv2.equalizeHist(image)
#image = image/255
return image
def lip_reading_augmentation(images):
#images = transform_image(images, 20, 3, 20)
#images = transform_image(images, 30, 0, 0)
return images
def LR_preprocessor(ID,augmentation=False):
images = []
vidcap = cv2.VideoCapture(ID)
success, image = vidcap.read()
success = True
while success:
image = lip_reading_image_processing(image)
images.append(image)
success, image = vidcap.read()
if augmentation:
images = lip_reading_augmentation(images)
# big_window = []
# for i in range(len(images)-4):
# small_window = []
# for j in range(5):
# small_window.append((images[i+j]))
# big_window.append(small_window)
return images
def augment_brightness_camera_images(image):
image1 = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
random_bright = .25 + np.random.uniform()
# print(random_bright)
image1[:, :, 2] = image1[:, :, 2] * random_bright
image1 = cv2.cvtColor(image1, cv2.COLOR_HSV2RGB)
return image1
def transform_image(images, ang_range, shear_range, trans_range, brightness=0):
# Rotation
ang_rot = np.random.uniform(ang_range) - ang_range / 2
rows, cols = images[0].shape
Rot_M = cv2.getRotationMatrix2D((cols / 2, rows / 2), ang_rot, 1)
# Translation
tr_x = trans_range * np.random.uniform() - trans_range / 2
tr_y = trans_range * np.random.uniform() - trans_range / 2
Trans_M = np.float32([[1, 0, tr_x], [0, 1, tr_y]])
# Shear
pts1 = np.float32([[5, 5], [20, 5], [5, 20]])
pt1 = 5 + shear_range * | np.random.uniform() | numpy.random.uniform |
#!/usr/bin/env python3
# coding: utf-8
# coding: utf-8
# Created on Sun Jun 14 2020
# @author: <NAME>
import os, sys
import numpy as np
#import corner as corner
import corner as corner
import dill
from scipy.ndimage import gaussian_filter
import time
sys.path.append('../lib/') #RV_mod directory must be in your path
import RV_mod as rv
################# Plotting #############################
import matplotlib.pyplot as plt
from matplotlib import gridspec
import matplotlib as mpl
from pathos.pools import ProcessPool as Pool
###### For nice plotting ##############
mpl.rcParams['axes.linewidth'] = 2.0 #set the value globally
mpl.rcParams['xtick.major.pad']='8'
mpl.rcParams['ytick.major.pad']='2'
# set tick width
mpl.rcParams['xtick.major.size'] = 8
mpl.rcParams['xtick.major.width'] = 2
mpl.rcParams['xtick.minor.size'] = 5
mpl.rcParams['xtick.minor.width'] = 2
mpl.rcParams['ytick.major.size'] = 8
mpl.rcParams['ytick.major.width'] = 2
mpl.rcParams['ytick.minor.size'] = 5
mpl.rcParams['ytick.minor.width'] = 2
mpl.rc('text',usetex=True)
font = {'family' : 'normal','weight' : 'bold','size' : 18,'serif':['Helvetica']}
mpl.rc('font', **font)
################# Check script arguments ####################
arguments = len(sys.argv) - 1
if arguments != 0 and sys.argv[1] == '-ses' and os.path.exists(sys.argv[2]):
try:
file_pi = open(sys.argv[2], 'rb')
fit = dill.load(file_pi)
file_pi.close()
except (ImportError, KeyError, AttributeError) as e:
print("You have entered wrong session. %s cannot be recognaized or it does not exist"%sys.argv[2])
else:
print("You have entered wrong session. %s cannot be recognaized or it does not exist"%sys.argv[2])
sys.exit(0)
if '-mass' in sys.argv:
make_mass = True
else:
make_mass = False
if '-semimajor' in sys.argv:
make_a = True
else:
make_a = False
if '-stab' in sys.argv:
make_stab = True
else:
make_stab = False
if '-best' in sys.argv:
best = True
else:
best = False
if '-median' in sys.argv:
median = True
else:
median = False
if '-mean' in sys.argv:
mean = True
else:
mean = False
if '-print_output' in sys.argv:
print_output = True
else:
print_output = False
if '-help' in sys.argv:
help = True
else:
help = False
################# Work starts here ########################
#### load the samples, labels and lnL values
ln = np.hstack(fit.mcmc_sampler.lnL)
samples = np.array(fit.mcmc_sampler.samples)
labels = fit.e_for_mcmc
if help:
help_text ="""
Permitted keywords are:
-mass (calculates and includes the pl. mass)
-semimajor (calculates and includes the pl. semimajor axis)
-median (posterior medians showed)
-mean (posterior medians showed)
-print_output (prints |best|median|mean| parameters and 1sigma errors)
-help (shows this)
"""
print(help_text)
print(" ")
print("parameter indecies are:")
for i in range(len(labels)):
print("%s %s"%(i,labels[i]))
sys.exit(0)
if mean:
best_fit_par = fit.mcmc_stat["mean"]
median = False
elif median:
best_fit_par = fit.mcmc_stat["median"]
else:
best_fit_par = fit.mcmc_stat["best"]
############### make "Gaussan" samples of the stellar parameters ##############
m_s = np.random.normal(loc=fit.stellar_mass, scale=fit.stellar_mass_err, size=len(samples[:,0]))
r_s = np.random.normal(loc=fit.stellar_radius, scale=fit.stellar_radius_err, size=len(samples[:,0]))
L_s = np.random.normal(loc=fit.stellar_luminosity,scale=fit.stellar_luminosity_err,size=len(samples[:,0]))
vsini = np.random.normal(loc=fit.stellar_vsini, scale=fit.stellar_vsini_err, size=len(samples[:,0]))
######### define new samples, labels and best-fit params to be refilled again
######### with masses, semi-major axes, etc. (to be re-worked).
samp = []
samp_labels = []
samp_best_fit_par = []
for i in range(len(labels)):
ss = np.hstack(samples[:,i])
samp.append(ss)
samp_labels.append(labels[i])
samp_best_fit_par.append(best_fit_par[i])
letters = ['b','c','d','e'] #... For the planets
if make_mass:
for i in range(fit.npl):
let = letters[i]
K = np.hstack(samples[:,[ii for ii, j in enumerate(labels) if j == 'K$_%s$'%let]])
P = np.hstack(samples[:,[ii for ii, j in enumerate(labels) if j == 'P$_%s$'%let]])
ecc = np.hstack(samples[:,[ii for ii, j in enumerate(labels) if j == 'e$_%s$'%let]])
# i = samples[:,[ii for ii, j in enumerate(labels) if j == 'i$_%s$'%let]]
samp.append(np.array(rv.get_mass(K,P, ecc, 90.0, m_s)))
samp_labels.append(r'm $\sin i_%s$'%let)
if mean:
samp_best_fit_par.append(rv.get_mass(np.mean(K),np.mean(P),np.mean(ecc), 90.0, np.mean(m_s)))
elif median:
samp_best_fit_par.append(rv.get_mass(np.median(K),np.median(P),np.median(ecc), 90.0, | np.median(m_s) | numpy.median |
import numpy as np
import random
from scipy.spatial.transform import Rotation as R
def rotation_2_quaternion(rot):
r = R.from_matrix(rot)
q_array = R.as_quat(r)
return q_array # retrun numpy array
def rotateVecRot(vec, rot):
r = R.from_matrix(rot)
return r.apply(vec)
def rotateVecQuat(vec, quat):
r = R.from_quat(quat)
return r.apply(vec)
def quaternion_2_rotation(q):
n = np.dot(q, q)
# epsilon for testing whether a number is close to zero
_EPS = np.finfo(float).eps * 4.0
if n < _EPS:
return np.identity(3)
q *= np.sqrt(2.0 / n)
q = np.outer(q, q)
return np.array([
[1.0-(q[1, 1]+q[2, 2]), -(q[2, 3]-q[1, 0]), (q[1, 3]+q[2, 0])],
[q[2, 3]+q[1, 0], -(1.0-(q[1, 1]+q[3, 3])), (q[1, 2]-q[3, 0])],
[-(q[1, 3]-q[2, 0]), (q[1, 2]+q[3, 0]), -(1.0-(q[2, 2]+q[3, 3]))]])
def quaternion_2_matrix(quat):
q = np.array(quat[3:7],dtype=np.float64, copy=True)
n = np.dot(q, q)
# epsilon for testing whether a number is close to zero
_EPS = np.finfo(float).eps * 4.0
if n < _EPS:
return np.identity(4)
q *= np.sqrt(2.0 / n)
q = np.outer(q, q)
return np.array([
[1.0-(q[1, 1]+q[2, 2]), -(q[2, 3]-q[1, 0]), (q[1, 3]+q[2, 0]), quat[0]],
[q[2, 3]+q[1, 0], -(1.0-(q[1, 1]+q[3, 3])), (q[1, 2]-q[3, 0]), quat[1]],
[-(q[1, 3]-q[2, 0]), (q[1, 2]+q[3, 0]), -(1.0-(q[2, 2]+q[3, 3])), quat[2]],
[0.0, 0.0, 0.0, 1.0]])
def isRotationMatrix(R) :
Rt = np.transpose(R)
shouldBeIdentity = np.dot(Rt, R)
I = np.identity(3, dtype = R.dtype)
n = np.linalg.norm(I - shouldBeIdentity)
return n < 1e-8
# Calculates rotation matrix to euler angles
# The result is the same as MATLAB except the order
# of the euler angles ( x and z are swapped ).
def rotationMatrixToEulerAngles(R) :
# assert(isRotationMatrix(R))
sy = np.sqrt(R[0,0] * R[0,0] + R[1,0] * R[1,0])
singular = sy < 1e-8
if not singular :
x = np.arctan2(R[2,1] , R[2,2])
y = np.arctan2(-R[2,0], sy)
z = np.arctan2(R[1,0], R[0,0])
else :
x = np.arctan2(-R[1,2], R[1,1])
y = np.arctan2(-R[2,0], sy)
z = 0
return np.array([x, y, z])
def eulerAnglesToRotationMatrix(theta):
R_x = np.array([[1, 0, 0],
[0, np.cos(theta[0]), -np.sin(theta[0]) ],
[0, np.sin(theta[0]), np.cos(theta[0]) ]
])
R_y = np.array([[np.cos(theta[1]), 0, np.sin(theta[1])],
[0, 1, 0],
[-np.sin(theta[1]), 0, np.cos(theta[1])]])
R_z = np.array([[np.cos(theta[2]), -np.sin(theta[2]), 0],
[np.sin(theta[2]), np.cos(theta[2]), 0],
[0, 0, 1]
])
R = np.dot(R_z, np.dot( R_y, R_x ))
return R
def randamPose(translation):
randomAngles = [0, 0, 0]
randomAngles[2] = random.random()*math.pi*2.0
transformMatrix = np.eye(4)
transformMatrix[:3,:3] = eulerAnglesToRotationMatrix(randomAngles)
transformMatrix[:3,-1] = np.array(translation)
return transformMatrix
def refineRotationTransform(T):
return_T = T
R = T[:3,:3]
angles = rotationMatrixToEulerAngles(R)
angles[0] = 0
angles[1] = 0
R_new = eulerAnglesToRotationMatrix(angles)
return_T[:3,:3] = R_new
return return_T
def lookat2rotation(vec_x, vec_y, vec_z):
vec_x = vec_x.reshape((1, 3))
vec_y = vec_y.reshape((1, 3))
vec_z = vec_z.reshape((1, 3))
vec_x = vec_x / np.linalg.norm(vec_x)
vec_y = vec_y / np.linalg.norm(vec_y)
vec_z = vec_z / np.linalg.norm(vec_z)
rot = np.transpose(np.vstack((vec_x, vec_y, vec_z)))
return rot
## Verified
def lookat2RotationTransform(v,t,option = "RightHand"):
# ref link: https://www.scichart.com/documentation/win/current/Orientation%20(3D%20Space)%20in%20the%20SciChart3DSurface.html
T = np.eye(4)
if option == "RightHand":# Zup
world_up = np.array([0,0,1]).reshape((1,3)) # all in 3 by 1 vector
if v.shape[0] == 1:
cam_forward = v
else:
cam_forward = np.transpose(v)
if np.sum(np.cross(cam_forward, world_up)) == 0:
world_up = np.array([0,1,1]).reshape((1,3))
cam_right = np.cross(cam_forward, world_up)
cam_right= cam_right/np.linalg.norm(cam_right)
cam_up= np.cross(cam_right, cam_forward)
cam_up = cam_up/np.linalg.norm(cam_up)
# ### right hand trial
R = np.transpose(np.vstack((cam_right, -cam_up, cam_forward))) #ok
T[:3,:3] = R
T[:3,3] = | np.transpose(t) | numpy.transpose |
# -*- coding: utf-8 -*-
"""
pipeline
data pipeline from image root folder to processed tensors of train test batches
for images and labels
"""
import os
import functools
import collections
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn import model_selection, preprocessing
def folder_traverse(root_dir, ext=None):
"""recusively map all image files from root directory"""
if not os.path.exists(root_dir):
raise RuntimeError('{0} doesn\'t exist.'.format(root_dir))
file_structure = collections.defaultdict(list)
for item in os.scandir(root_dir):
if item.is_dir():
file_structure.update(folder_traverse(item.path, ext))
elif item.is_file() and item.name.endswith(ext):
file_structure[os.path.dirname(item.path)].append(item.name)
return file_structure
def resample(feature_index, labels, balance='auto'):
"""use oversampling to balance class, after split of training set."""
from imblearn.over_sampling import RandomOverSampler
ros = RandomOverSampler(ratio=balance)
feature_index = np.array(feature_index).reshape(-1, 1)
resampled_index, _ = ros.fit_sample(feature_index, labels)
resampled_index = [i for nested in resampled_index for i in nested]
return resampled_index
def generate_data_skeleton(root_dir,
ext=None,
valid_size=None,
oversample=False):
"""turn file structure into human-readable pandas dataframe"""
file_structure = folder_traverse(root_dir, ext=ext)
reversed_fs = {k + '/' + f: os.path.splitext(f)[0]
for k, v in file_structure.items() for f in v}
# find the first csv and load it in memory and remove it from dictionary
for key in reversed_fs:
if key.endswith('.csv'):
df_csv = pd.read_csv(key, dtype=np.str)
reversed_fs.pop(key)
break
df = pd.DataFrame.from_dict(data=reversed_fs, orient='index').reset_index()
df.rename(columns={'index': 'path_to_file', 0: 'filename'}, inplace=True)
df.reset_index(inplace=True, drop=True)
df = df_csv.merge(right=df,
how='left',
left_on='image_name',
right_on='filename').dropna(axis=0)
discrete_labels = [string.split(' ') for string in df['tags'].tolist()]
mlb = preprocessing.MultiLabelBinarizer()
mlb.fit(discrete_labels)
X = | np.array(df['path_to_file']) | numpy.array |
import numpy as np
import random
class ReplayBuffer(object):
def __init__(self, max_size=1e6):
self.storage = []
self.max_size = max_size
self.ptr = 0
self.k = 0
def add(self, transition):
self.k += 1
if len(self.storage) == self.max_size:
self.storage[int(self.ptr)] = transition
self.ptr = (self.ptr + 1) % self.max_size
else:
self.storage.append(transition)
def sample(self, batch_size):
ind = np.random.randint(0, len(self.storage), size=batch_size)
batch_states, batch_next_states, batch_actions, batch_rewards, batch_dones = [], [], [], [], []
for i in ind:
state, next_state, action, reward, done = self.storage[i]
batch_states.append(np.array(state, copy=False))
batch_next_states.append(np.array(next_state, copy=False))
batch_actions.append(np.array(action, copy=False))
batch_rewards.append(np.array(reward, copy=False))
batch_dones.append(np.array(done, copy=False))
return np.array(batch_states), np.array(batch_next_states), np.array(batch_actions), np.array(batch_rewards).reshape(-1, 1), np.array(batch_dones).reshape(-1, 1)
def get_last_k_trajectories(self):
print("get k ", self.k)
ind = [x for x in range(self.ptr - self.k, self.ptr)]
batch_states, batch_next_states, batch_actions, batch_rewards, batch_dones = [], [], [], [], []
for i in ind:
state, next_state, action, reward, done = self.storage[i]
batch_states.append(np.array(state, copy=False))
batch_next_states.append(np.array(next_state, copy=False))
batch_actions.append( | np.array(action, copy=False) | numpy.array |
import numpy as np
import scipy.misc
import tensorflow as tf
import tensorflow.contrib.slim as slim
import cv2
import os
import sys
import slim_net
NUM_CLASSES = 2
COLOR_SET = [
[0, 0, 0], [1, 1, 1], [1, 1, 1], [1, 1, 1], [1, 1, 1],
[187, 119, 132], [142, 6, 59], [74, 111, 227], [133, 149, 225],
[181, 187, 227], [230, 175, 185], [224, 123, 145], [211, 63, 106],
[17, 198, 56], [141, 213, 147], [198, 222, 199], [234, 211, 198],
[240, 185, 141], [239, 151, 8], [15, 207, 192], [156, 222, 214],
[213, 234, 231], [243, 225, 235], [246, 196, 225], [247, 156, 212]
]
def build_image(filename):
MEAN_VALUES = np.array([104.00698793, 116.66876762, 122.67891434])
MEAN_VALUES = MEAN_VALUES.reshape((1, 1, 1, 3))
img = scipy.misc.imread(filename, mode='RGB')[:, :, ::-1]
height, width, _ = img.shape
img = np.reshape(img, (1, height, width, 3)) - MEAN_VALUES
return img
def save_masked_image(result, srcfilename, filename):
srcimg = scipy.misc.imread(srcfilename, mode='RGB')
height, width, _ = srcimg.shape
srcimg = np.reshape(srcimg, (height, width, 3))
srcimg = | np.asarray(srcimg, np.float32) | numpy.asarray |
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 25 11:30:35 2017
@author: Bartek
"""
# from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import scipy.signal as dsp
import scipy.stats as stats
import os
import pandas as pd
from numpy.core.multiarray import ndarray
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from pandas import read_csv, DataFrame
from sklearn.feature_selection import SelectKBest, mutual_info_classif
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from keras.wrappers.scikit_learn import KerasClassifier
from keras.models import Sequential
from keras.layers import Dense, Conv1D, Flatten, Dropout, MaxPooling1D, Conv2D, MaxPooling2D
from keras.utils import to_categorical
from keras import backend as K
# TODO:
# 1 zastąpić CSP FBCSP: najpierw porwnać z oryginalnym csp, potem dać domylne pasma...done
# 2 zastąpić SelectKBest na RFE...pass
# 3 dopisać więcej cech...ale jakich??
# 4 przykład działania dla kNN, SVM linear, SVN rbf, RandomTrees...done
# 5 usuwanie jakich podobnych predyktorw?...pass
# 6 normalizowanie cech wczeniej1!!...done
class Log:
counter = 0
def LogInc(self, object):
fileName = "c:\\test%d.txt" % Log.counter
pd.DataFrame(object).to_csv(fileName)
Log.counter = Log.counter + 1
def Log(self, object):
fileName = "c:\\test24.txt"
pd.DataFrame(object).to_csv(fileName)
class CSP:
def __init__(self):
self.cspmat = []
def transform(self, signal):
return signal.transpose().dot(self.cspmat[0, :, :])
def fit(self, class1, class2):
self.val, self.cspmat = np.linalg.eig((np.cov(class1), np.cov(class1) + np.cov(class2)))
class FBCSP:
def __init__(self, noBands):
self.cspmat = []
self.noBands = noBands
def transform(self, signal):
cspsig = np.zeros((signal.shape[1], signal.shape[0] * self.noBands))
# print(np.shape(cspsig))
for fid in range(0, self.noBands):
# print("****")
# print(fid)
# print(fid*signal.shape[0])
##print(fid*signal.shape[0]+signal.shape[0])
# print(np.shape(signal[:,:,fid].transpose()))
# print(np.shape(self.cspmat[:,:,fid]))
temp = signal[:, :, fid].transpose().dot(self.cspmat[:, :, fid])
# print(np.shape(temp))
cspsig[:, range(fid * signal.shape[0], fid * signal.shape[0] + signal.shape[0])] = temp
return cspsig
def fit(self, class1, class2):
self.cspmat = np.zeros((signal.shape[1], signal.shape[1], self.noBands))
for fid in range(0, self.noBands):
C1 = class1[:, :, fid]
C2 = class2[:, :, fid]
# print(np.shape(self.cspmat))
# print(fid)
self.val, temp = np.linalg.eig((np.cov(C1), np.cov(C1) + np.cov(C2)))
# print(np.shape(temp))
self.cspmat[:, :, fid] = temp[0, :, :]
class featureExtractor:
def __init__(self, name):
self.name = name
def transform(self, signal, labels1, labels2, labels3):
noEvents = int(labels1.max())
noRepetitedSignals = int(labels3.max()) + 1
labels = np.zeros((2 * noEvents * noRepetitedSignals, 1))
features = np.zeros((2 * noEvents * noRepetitedSignals, signal.shape[1]))
if self.name == "all":
features = np.zeros((2 * noEvents * noRepetitedSignals, 2 * signal.shape[1]))
# Log.Log(signal)
for tid in range(0, noEvents):
for rid in range(0, noRepetitedSignals):
# tid+1
if self.name == "logvar":
features[rid + tid * noRepetitedSignals, :] = np.log(
np.var(signal[((labels1 == tid + 1) & (labels3 == rid)), :], axis=0))
# Log.Log(features[:, range(0, 4)])
features[rid + tid * noRepetitedSignals + noEvents * noRepetitedSignals, :] = np.log(
np.var(signal[((labels2 == tid + 1) & (labels3 == rid)), :], axis=0))
# Log.Log(features[:, range(0, 4)])
dataA = signal[((labels1 == tid + 1) & (labels3 == rid)), :]
# Log.LogInc(dataA)
dataA = signal[((labels2 == tid + 1) & (labels3 == rid)), :]
# Log.LogInc(dataA)
if self.name == "pearsonr":
features[tid, :] = stats.pearsonr(signal[labels1 == tid + 1, :], signal[labels1 == tid + 1, :])
features[tid + noEvents, :] = stats.pearsonr(signal[labels2 == tid + 1, :],
signal[labels2 == tid + 1, :])
if self.name == "lyapunov":
features[tid, :] = 0 # (signal[labels1==tid+1,:],axis=0)
features[tid + noEvents, :] = 0 # np.log(np.var(signal[labels2==tid+1,:],axis=0))
if self.name == "all":
# wyznacz wszystkie i złącz...kolumnami w jedną duż cechę
a1 = np.log(np.var(signal[labels1 == tid + 1, :], axis=0))
b1 = np.log( | np.var(signal[labels2 == tid + 1, :], axis=0) | numpy.var |
"""
Bayesian Degree Corrected Stochastic Block Model
roughly based on Infinite-degree-corrected stochastic block model by Herlau et. al.,
but with a fixed number of cluster sizes and a different update equation for the collapsed Gibbs sampler;
see accompanying documentation
"""
import numpy as np
import sys
sys.path.append("/home/victor/Documents/community_detection/MCMC")
from cgs_llhds import diri_multi_llhd
from multi_sbm_helpers import comp_edge_cts, softmax
from dcsbm_helpers import GD, BD, samp_shape_post_step, samp_rate_post_step, samp_gam_post_step
class gen_data:
def __init__(self, n, phis, eta):
"""
:param n: number of vertices in each community
:param phis: list of probability distributions, phis[l] should be length n[l] and sum to 1
:param eta: symmetric matrix, eta[k,l] is expected number of edges between vertex in k and vertex in l
"""
self.n = n
self.n_vert = sum(n)
self.n_comm = len(n)
self.phis = phis
self.eta = eta
z = np.repeat(0, self.n_vert)
acc = 0
for l in range(self.n_comm - 1):
acc += self.n[l]
z[acc: acc + self.n[l + 1]] = l + 1
self.z = z
phi = | np.repeat(0., self.n_vert) | numpy.repeat |
# This module has been generated automatically from space group information
# obtained from the Computational Crystallography Toolbox
#
"""
Space groups
This module contains a list of all the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer arrays (rot, tn, td), where
rot is the rotation matrix and tn/td
are the numerator and denominator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.transposed_rotations = N.array([N.transpose(t[0])
for t in transformations])
self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.array_type
:return: a tuple (miller_indices, phase_factor) of two arrays
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.transposed_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(39, 'A b m 2', transformations)
space_groups[39] = sg
space_groups['A b m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(40, 'A m a 2', transformations)
space_groups[40] = sg
space_groups['A m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(41, 'A b a 2', transformations)
space_groups[41] = sg
space_groups['A b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(42, 'F m m 2', transformations)
space_groups[42] = sg
space_groups['F m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(43, 'F d d 2', transformations)
space_groups[43] = sg
space_groups['F d d 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(44, 'I m m 2', transformations)
space_groups[44] = sg
space_groups['I m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(45, 'I b a 2', transformations)
space_groups[45] = sg
space_groups['I b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(46, 'I m a 2', transformations)
space_groups[46] = sg
space_groups['I m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(47, 'P m m m', transformations)
space_groups[47] = sg
space_groups['P m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(48, 'P n n n :2', transformations)
space_groups[48] = sg
space_groups['P n n n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(49, 'P c c m', transformations)
space_groups[49] = sg
space_groups['P c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(50, 'P b a n :2', transformations)
space_groups[50] = sg
space_groups['P b a n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(51, 'P m m a', transformations)
space_groups[51] = sg
space_groups['P m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(52, 'P n n a', transformations)
space_groups[52] = sg
space_groups['P n n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(53, 'P m n a', transformations)
space_groups[53] = sg
space_groups['P m n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(54, 'P c c a', transformations)
space_groups[54] = sg
space_groups['P c c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(55, 'P b a m', transformations)
space_groups[55] = sg
space_groups['P b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(56, 'P c c n', transformations)
space_groups[56] = sg
space_groups['P c c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(57, 'P b c m', transformations)
space_groups[57] = sg
space_groups['P b c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(58, 'P n n m', transformations)
space_groups[58] = sg
space_groups['P n n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(59, 'P m m n :2', transformations)
space_groups[59] = sg
space_groups['P m m n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(60, 'P b c n', transformations)
space_groups[60] = sg
space_groups['P b c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(61, 'P b c a', transformations)
space_groups[61] = sg
space_groups['P b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(62, 'P n m a', transformations)
space_groups[62] = sg
space_groups['P n m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(63, 'C m c m', transformations)
space_groups[63] = sg
space_groups['C m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(64, 'C m c a', transformations)
space_groups[64] = sg
space_groups['C m c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(65, 'C m m m', transformations)
space_groups[65] = sg
space_groups['C m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(66, 'C c c m', transformations)
space_groups[66] = sg
space_groups['C c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(67, 'C m m a', transformations)
space_groups[67] = sg
space_groups['C m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(68, 'C c c a :2', transformations)
space_groups[68] = sg
space_groups['C c c a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(69, 'F m m m', transformations)
space_groups[69] = sg
space_groups['F m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(70, 'F d d d :2', transformations)
space_groups[70] = sg
space_groups['F d d d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(71, 'I m m m', transformations)
space_groups[71] = sg
space_groups['I m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(72, 'I b a m', transformations)
space_groups[72] = sg
space_groups['I b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(73, 'I b c a', transformations)
space_groups[73] = sg
space_groups['I b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(74, 'I m m a', transformations)
space_groups[74] = sg
space_groups['I m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(75, 'P 4', transformations)
space_groups[75] = sg
space_groups['P 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(76, 'P 41', transformations)
space_groups[76] = sg
space_groups['P 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(77, 'P 42', transformations)
space_groups[77] = sg
space_groups['P 42'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(78, 'P 43', transformations)
space_groups[78] = sg
space_groups['P 43'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(79, 'I 4', transformations)
space_groups[79] = sg
space_groups['I 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(80, 'I 41', transformations)
space_groups[80] = sg
space_groups['I 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(81, 'P -4', transformations)
space_groups[81] = sg
space_groups['P -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(82, 'I -4', transformations)
space_groups[82] = sg
space_groups['I -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(83, 'P 4/m', transformations)
space_groups[83] = sg
space_groups['P 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(84, 'P 42/m', transformations)
space_groups[84] = sg
space_groups['P 42/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(85, 'P 4/n :2', transformations)
space_groups[85] = sg
space_groups['P 4/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(86, 'P 42/n :2', transformations)
space_groups[86] = sg
space_groups['P 42/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(87, 'I 4/m', transformations)
space_groups[87] = sg
space_groups['I 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(88, 'I 41/a :2', transformations)
space_groups[88] = sg
space_groups['I 41/a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(89, 'P 4 2 2', transformations)
space_groups[89] = sg
space_groups['P 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(90, 'P 4 21 2', transformations)
space_groups[90] = sg
space_groups['P 4 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(91, 'P 41 2 2', transformations)
space_groups[91] = sg
space_groups['P 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(92, 'P 41 21 2', transformations)
space_groups[92] = sg
space_groups['P 41 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(93, 'P 42 2 2', transformations)
space_groups[93] = sg
space_groups['P 42 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(94, 'P 42 21 2', transformations)
space_groups[94] = sg
space_groups['P 42 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(95, 'P 43 2 2', transformations)
space_groups[95] = sg
space_groups['P 43 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(96, 'P 43 21 2', transformations)
space_groups[96] = sg
space_groups['P 43 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(97, 'I 4 2 2', transformations)
space_groups[97] = sg
space_groups['I 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(98, 'I 41 2 2', transformations)
space_groups[98] = sg
space_groups['I 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(99, 'P 4 m m', transformations)
space_groups[99] = sg
space_groups['P 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(100, 'P 4 b m', transformations)
space_groups[100] = sg
space_groups['P 4 b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(101, 'P 42 c m', transformations)
space_groups[101] = sg
space_groups['P 42 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(102, 'P 42 n m', transformations)
space_groups[102] = sg
space_groups['P 42 n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(103, 'P 4 c c', transformations)
space_groups[103] = sg
space_groups['P 4 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(104, 'P 4 n c', transformations)
space_groups[104] = sg
space_groups['P 4 n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(105, 'P 42 m c', transformations)
space_groups[105] = sg
space_groups['P 42 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(106, 'P 42 b c', transformations)
space_groups[106] = sg
space_groups['P 42 b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(107, 'I 4 m m', transformations)
space_groups[107] = sg
space_groups['I 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(108, 'I 4 c m', transformations)
space_groups[108] = sg
space_groups['I 4 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(109, 'I 41 m d', transformations)
space_groups[109] = sg
space_groups['I 41 m d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(110, 'I 41 c d', transformations)
space_groups[110] = sg
space_groups['I 41 c d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(111, 'P -4 2 m', transformations)
space_groups[111] = sg
space_groups['P -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(112, 'P -4 2 c', transformations)
space_groups[112] = sg
space_groups['P -4 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(113, 'P -4 21 m', transformations)
space_groups[113] = sg
space_groups['P -4 21 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(114, 'P -4 21 c', transformations)
space_groups[114] = sg
space_groups['P -4 21 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(115, 'P -4 m 2', transformations)
space_groups[115] = sg
space_groups['P -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(116, 'P -4 c 2', transformations)
space_groups[116] = sg
space_groups['P -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(117, 'P -4 b 2', transformations)
space_groups[117] = sg
space_groups['P -4 b 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(118, 'P -4 n 2', transformations)
space_groups[118] = sg
space_groups['P -4 n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(119, 'I -4 m 2', transformations)
space_groups[119] = sg
space_groups['I -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(120, 'I -4 c 2', transformations)
space_groups[120] = sg
space_groups['I -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(121, 'I -4 2 m', transformations)
space_groups[121] = sg
space_groups['I -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(122, 'I -4 2 d', transformations)
space_groups[122] = sg
space_groups['I -4 2 d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(123, 'P 4/m m m', transformations)
space_groups[123] = sg
space_groups['P 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(124, 'P 4/m c c', transformations)
space_groups[124] = sg
space_groups['P 4/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(125, 'P 4/n b m :2', transformations)
space_groups[125] = sg
space_groups['P 4/n b m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(126, 'P 4/n n c :2', transformations)
space_groups[126] = sg
space_groups['P 4/n n c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(127, 'P 4/m b m', transformations)
space_groups[127] = sg
space_groups['P 4/m b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(128, 'P 4/m n c', transformations)
space_groups[128] = sg
space_groups['P 4/m n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(129, 'P 4/n m m :2', transformations)
space_groups[129] = sg
space_groups['P 4/n m m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(130, 'P 4/n c c :2', transformations)
space_groups[130] = sg
space_groups['P 4/n c c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(131, 'P 42/m m c', transformations)
space_groups[131] = sg
space_groups['P 42/m m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(132, 'P 42/m c m', transformations)
space_groups[132] = sg
space_groups['P 42/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(133, 'P 42/n b c :2', transformations)
space_groups[133] = sg
space_groups['P 42/n b c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(134, 'P 42/n n m :2', transformations)
space_groups[134] = sg
space_groups['P 42/n n m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(135, 'P 42/m b c', transformations)
space_groups[135] = sg
space_groups['P 42/m b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(136, 'P 42/m n m', transformations)
space_groups[136] = sg
space_groups['P 42/m n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(137, 'P 42/n m c :2', transformations)
space_groups[137] = sg
space_groups['P 42/n m c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(138, 'P 42/n c m :2', transformations)
space_groups[138] = sg
space_groups['P 42/n c m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(139, 'I 4/m m m', transformations)
space_groups[139] = sg
space_groups['I 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(140, 'I 4/m c m', transformations)
space_groups[140] = sg
space_groups['I 4/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(141, 'I 41/a m d :2', transformations)
space_groups[141] = sg
space_groups['I 41/a m d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(142, 'I 41/a c d :2', transformations)
space_groups[142] = sg
space_groups['I 41/a c d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(143, 'P 3', transformations)
space_groups[143] = sg
space_groups['P 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(144, 'P 31', transformations)
space_groups[144] = sg
space_groups['P 31'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(145, 'P 32', transformations)
space_groups[145] = sg
space_groups['P 32'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(146, 'R 3 :H', transformations)
space_groups[146] = sg
space_groups['R 3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(147, 'P -3', transformations)
space_groups[147] = sg
space_groups['P -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(148, 'R -3 :H', transformations)
space_groups[148] = sg
space_groups['R -3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(149, 'P 3 1 2', transformations)
space_groups[149] = sg
space_groups['P 3 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(150, 'P 3 2 1', transformations)
space_groups[150] = sg
space_groups['P 3 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(151, 'P 31 1 2', transformations)
space_groups[151] = sg
space_groups['P 31 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(152, 'P 31 2 1', transformations)
space_groups[152] = sg
space_groups['P 31 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(153, 'P 32 1 2', transformations)
space_groups[153] = sg
space_groups['P 32 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(154, 'P 32 2 1', transformations)
space_groups[154] = sg
space_groups['P 32 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(155, 'R 3 2 :H', transformations)
space_groups[155] = sg
space_groups['R 3 2 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(156, 'P 3 m 1', transformations)
space_groups[156] = sg
space_groups['P 3 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(157, 'P 3 1 m', transformations)
space_groups[157] = sg
space_groups['P 3 1 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(158, 'P 3 c 1', transformations)
space_groups[158] = sg
space_groups['P 3 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(159, 'P 3 1 c', transformations)
space_groups[159] = sg
space_groups['P 3 1 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(160, 'R 3 m :H', transformations)
space_groups[160] = sg
space_groups['R 3 m :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(161, 'R 3 c :H', transformations)
space_groups[161] = sg
space_groups['R 3 c :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(162, 'P -3 1 m', transformations)
space_groups[162] = sg
space_groups['P -3 1 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(163, 'P -3 1 c', transformations)
space_groups[163] = sg
space_groups['P -3 1 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(164, 'P -3 m 1', transformations)
space_groups[164] = sg
space_groups['P -3 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(165, 'P -3 c 1', transformations)
space_groups[165] = sg
space_groups['P -3 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(166, 'R -3 m :H', transformations)
space_groups[166] = sg
space_groups['R -3 m :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(167, 'R -3 c :H', transformations)
space_groups[167] = sg
space_groups['R -3 c :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(168, 'P 6', transformations)
space_groups[168] = sg
space_groups['P 6'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(169, 'P 61', transformations)
space_groups[169] = sg
space_groups['P 61'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(170, 'P 65', transformations)
space_groups[170] = sg
space_groups['P 65'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(171, 'P 62', transformations)
space_groups[171] = sg
space_groups['P 62'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(172, 'P 64', transformations)
space_groups[172] = sg
space_groups['P 64'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(173, 'P 63', transformations)
space_groups[173] = sg
space_groups['P 63'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(174, 'P -6', transformations)
space_groups[174] = sg
space_groups['P -6'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(175, 'P 6/m', transformations)
space_groups[175] = sg
space_groups['P 6/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(176, 'P 63/m', transformations)
space_groups[176] = sg
space_groups['P 63/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(177, 'P 6 2 2', transformations)
space_groups[177] = sg
space_groups['P 6 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(178, 'P 61 2 2', transformations)
space_groups[178] = sg
space_groups['P 61 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(179, 'P 65 2 2', transformations)
space_groups[179] = sg
space_groups['P 65 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(180, 'P 62 2 2', transformations)
space_groups[180] = sg
space_groups['P 62 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(181, 'P 64 2 2', transformations)
space_groups[181] = sg
space_groups['P 64 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(182, 'P 63 2 2', transformations)
space_groups[182] = sg
space_groups['P 63 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(183, 'P 6 m m', transformations)
space_groups[183] = sg
space_groups['P 6 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(184, 'P 6 c c', transformations)
space_groups[184] = sg
space_groups['P 6 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(185, 'P 63 c m', transformations)
space_groups[185] = sg
space_groups['P 63 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(186, 'P 63 m c', transformations)
space_groups[186] = sg
space_groups['P 63 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(187, 'P -6 m 2', transformations)
space_groups[187] = sg
space_groups['P -6 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(188, 'P -6 c 2', transformations)
space_groups[188] = sg
space_groups['P -6 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(189, 'P -6 2 m', transformations)
space_groups[189] = sg
space_groups['P -6 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(190, 'P -6 2 c', transformations)
space_groups[190] = sg
space_groups['P -6 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(191, 'P 6/m m m', transformations)
space_groups[191] = sg
space_groups['P 6/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(192, 'P 6/m c c', transformations)
space_groups[192] = sg
space_groups['P 6/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(193, 'P 63/m c m', transformations)
space_groups[193] = sg
space_groups['P 63/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(194, 'P 63/m m c', transformations)
space_groups[194] = sg
space_groups['P 63/m m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(195, 'P 2 3', transformations)
space_groups[195] = sg
space_groups['P 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(196, 'F 2 3', transformations)
space_groups[196] = sg
space_groups['F 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(197, 'I 2 3', transformations)
space_groups[197] = sg
space_groups['I 2 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(198, 'P 21 3', transformations)
space_groups[198] = sg
space_groups['P 21 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(199, 'I 21 3', transformations)
space_groups[199] = sg
space_groups['I 21 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(200, 'P m -3', transformations)
space_groups[200] = sg
space_groups['P m -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(201, 'P n -3 :2', transformations)
space_groups[201] = sg
space_groups['P n -3 :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(202, 'F m -3', transformations)
space_groups[202] = sg
space_groups['F m -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,-1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,-1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,1,-1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,-1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,0,0,-1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,-1,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,-1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,0,0,1,1,0,0])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,0,1,1,0,0,0,1,0])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = | N.array([2,2,1]) | numpy.array |
"""
Copyright: Intel Corp. 2018
Author: <NAME>
Email: <EMAIL>
Created Date: Oct 19th 2018
For ICLR 2019 Reproducibility Challenge
"""
import numpy as np
import tensorflow as tf
import tqdm
from VSR.Util import Config, to_list
from ..Arch import Discriminator
from ..Framework.GAN import (
gradient_penalty, inception_score, loss_bce_gan,
loss_lsgan, loss_relative_bce_gan,
loss_relative_lsgan, loss_wgan
)
from ..Framework.SuperResolution import SuperResolution
from ..Framework.Trainer import VSR
class GAN(SuperResolution):
"""Base class of GAN.
Args:
name: model name.
patch_size: generated image size.
z_dim: latent space dimension.
init_filter: filter size. (ideally, 512 for 32x32, 1024 for 64x64).
linear: boolean, toggle FC layer after random vector.
norm_g: normalization of G.
norm_d: normalization of D.
use_bias: boolean, use bias variables.
optimizer: str: 'adam', 'rmsprop', 'momentum', 'sgd'.
arch: G/D architecture: 'dcgan' or 'resnet'.
nd_iter: number of D updates for each G update.
"""
def __init__(self, name='gan', patch_size=32, z_dim=128, init_filter=512,
linear=False, norm_g=None, norm_d=None, use_bias=False,
optimizer=None, arch=None, nd_iter=1, **kwargs):
super(GAN, self).__init__(**kwargs)
self.name = name
self._trainer = GanTrainer
self.output_size = patch_size
self.z_dim = z_dim
self.init_filter = init_filter
self.linear = linear
self.bias = use_bias
self.nd_iter = nd_iter
if isinstance(norm_g, str):
self.bn = np.any([word in norm_g for word in ('bn', 'batch')])
self.sn = np.any([word in norm_g for word in ('sn', 'spectral')])
self.d_outputs = [] # (real, fake)
self.g_outputs = [] # (real, fake)
# monitor probability of being real and fake
self.p_fake = None
self.p_real = None
self.opt = optimizer
if self.opt is None:
self.opt = Config(name='adam')
if arch is None or arch == 'dcgan':
self.G = self.dcgan_g
self.D = Discriminator.dcgan_d(
self, [patch_size, patch_size, self.channel],
norm=norm_d, name_or_scope='D')
elif arch == 'resnet':
self.G = self.resnet_g
self.D = Discriminator.resnet_d(
self, [patch_size, patch_size, self.channel], times_pooling=4,
norm=norm_d, name_or_scope='D')
@staticmethod
def _normalize(x):
return x / 127.5 - 1
@staticmethod
def _denormalize(x):
return (x + 1) * 127.5
def dcgan_g(self, inputs):
with tf.variable_scope('G', reuse=tf.AUTO_REUSE):
f = self.init_filter
size = 4
n_up = int(np.log2(self.output_size // size)) + 1
kwargs = dict(use_sn=self.sn,
kernel_initializer='random_normal_0.02')
x = self.dense(inputs, f * size * size, use_sn=self.sn,
kernel_initializer='random_normal_0.02')
if self.bn:
x = self.batch_norm(x, self.training_phase, epsilon=2e-5)
x = tf.nn.relu(x)
x = tf.reshape(x, [-1, size, size, f])
for i in range(1, n_up):
x = self.deconv2d(x, f // 2 ** i, 4, 2, **kwargs)
if self.bn:
x = self.batch_norm(x, self.training_phase, epsilon=2e-5)
x = tf.nn.relu(x)
x = tf.nn.relu(x)
x = self.deconv2d(x, self.channel, 3, 1, **kwargs)
x = tf.nn.tanh(x)
return x
def resnet_g(self, inputs):
with tf.variable_scope('G', reuse=tf.AUTO_REUSE):
f = self.init_filter // 2
size = 4
n_up = int(np.log2(self.output_size // size))
x = self.dense(inputs, f * size * size, use_sn=self.sn,
kernel_initializer='random_normal_0.02')
x = tf.reshape(x, [-1, size, size, f])
for _ in range(n_up):
# up
x = self.upscale(x, 'nearest', 2)
x = self.resblock(x, 256, 3, activation='relu',
use_batchnorm=self.bn,
use_sn=self.sn, use_bias=self.bias)
x = self.batch_norm(x, self.training_phase)
x = tf.nn.relu(x)
x = self.tanh_conv2d(x, self.channel, 3)
return x
def build_graph(self):
self.inputs.append(
tf.placeholder('float32', (None, self.z_dim,), name='input/noise'))
self.label.append(tf.placeholder(
'float32', [None, self.output_size, self.output_size, self.channel],
name='label/image'))
with tf.variable_scope(self.name, reuse=tf.AUTO_REUSE):
fake_image = self.G(self.inputs[0])
real_image = self._normalize(self.label[0])
real_disc = self.D(real_image)
fake_disc = self.D(fake_image)
self.outputs.append(self._denormalize(fake_image))
self.d_outputs = (real_disc, fake_disc)
self.g_outputs = (real_image, fake_image)
self.p_fake = tf.reduce_mean(tf.sigmoid(fake_disc))
self.p_real = tf.reduce_mean(tf.sigmoid(real_disc))
def _build_loss(self, g_loss, d_loss):
# used in sub-class
var_d = tf.trainable_variables(self.name + '/D')
var_g = tf.trainable_variables(self.name + '/G')
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
opt_d = self.get_optimizer(self.opt)
opt_g = self.get_optimizer(self.opt)
op_d = opt_d.minimize(d_loss, self.global_steps, var_list=var_d)
op_g = opt_g.minimize(g_loss, var_list=var_g)
self.loss = [op_g, op_d]
self.train_metric = {'gloss': g_loss, 'd_loss': d_loss,
'p_real': self.p_real, 'p_fake': self.p_fake}
self.metrics['inception-score'] = inception_score(self.outputs[0], 1)
self.metrics['p_real'] = self.p_real
self.metrics['p_fake'] = self.p_fake
def build_summary(self):
tf.summary.scalar('FakeD', self.p_fake)
tf.summary.scalar('RealD', self.p_real)
tf.summary.scalar('Inception_Score', self.metrics['inception-score'])
def build_saver(self):
var_g = tf.global_variables(self.name + '/G')
var_d = tf.global_variables(self.name + '/D')
var_loss = tf.global_variables('Loss')
steps = [self.global_steps]
self.savers['gen'] = tf.train.Saver(var_g + steps, max_to_keep=1)
self.savers['disc'] = tf.train.Saver(var_d, max_to_keep=1)
self.savers['loss'] = tf.train.Saver(var_loss, max_to_keep=1)
def get_optimizer(self, config=None):
if config is None:
config = self.opt
name = config.name
if name == 'adam':
return tf.train.AdamOptimizer(self.learning_rate, **config)
elif name == 'rmsprop':
return tf.train.RMSPropOptimizer(self.learning_rate, **config)
elif name == 'momentum':
return tf.train.MomentumOptimizer(self.learning_rate, **config)
elif name == 'sgd':
return tf.train.GradientDescentOptimizer(
self.learning_rate, **config)
return None
def train_batch(self, feature, label, learning_rate=1e-4, **kwargs):
feature = to_list(feature)
label = to_list(label)
self.feed_dict.update(
{self.training_phase: True, self.learning_rate: learning_rate})
for i in range(len(self.inputs)):
self.feed_dict[self.inputs[i]] = feature[i]
for i in range(len(self.label)):
self.feed_dict[self.label[i]] = label[i]
loss = kwargs.get('loss') or self.loss
loss = to_list(loss)
step = kwargs['steps']
sess = tf.get_default_session()
if step % self.nd_iter == 0:
# update G-net
sess.run(loss[0], feed_dict=self.feed_dict)
# update D-net
sess.run(loss[1:], feed_dict=self.feed_dict)
loss = sess.run(list(self.train_metric.values()),
feed_dict=self.feed_dict)
ret = {}
for k, v in zip(self.train_metric, loss):
ret[k] = v
return ret
class SGAN(GAN):
def build_loss(self):
with tf.name_scope('Loss'):
g_loss, d_loss = loss_bce_gan(*self.d_outputs)
self._build_loss(g_loss, d_loss)
class SGANGP(GAN):
def build_loss(self):
with tf.name_scope('Loss'):
g_loss, d_loss = loss_bce_gan(*self.d_outputs)
with tf.variable_scope(self.name, reuse=True):
gp = gradient_penalty(*self.g_outputs, graph_fn=self.D, lamb=10)
d_loss += gp
self._build_loss(g_loss, d_loss)
class LSGAN(GAN):
def build_loss(self):
with tf.name_scope('Loss'):
g_loss, d_loss = loss_lsgan(*self.d_outputs)
self._build_loss(g_loss, d_loss)
class WGAN(GAN):
def build_loss(self):
with tf.name_scope('Loss'):
g_loss, d_loss = loss_wgan(*self.d_outputs)
self._build_loss(g_loss, d_loss)
# weights clip
var_d = tf.trainable_variables(self.name + '/D')
clip_bounds = [-.01, .01]
clip_ops = [tf.assign(var, tf.clip_by_value(var, *clip_bounds)) for
var in var_d]
clip_disc_weights = tf.group(*clip_ops)
self.loss.append(clip_disc_weights)
def build_saver(self):
var_g = tf.global_variables(self.name + '/G')
var_d = tf.global_variables(self.name + '/D')
steps = [self.global_steps]
self.savers['gen'] = tf.train.Saver(var_g + steps, max_to_keep=1)
self.savers['disc'] = tf.train.Saver(var_d, max_to_keep=1)
class WGANGP(GAN):
def build_loss(self):
with tf.name_scope('Loss'):
g_loss, d_loss = loss_wgan(*self.d_outputs)
with tf.variable_scope(self.name, reuse=True):
gp = gradient_penalty(*self.g_outputs, graph_fn=self.D, lamb=10)
d_loss += gp
self._build_loss(g_loss, d_loss)
class RGAN(GAN):
def build_loss(self):
with tf.name_scope('Loss'):
g_loss, d_loss = loss_relative_bce_gan(*self.d_outputs, average=False)
self._build_loss(g_loss, d_loss)
class RGANGP(GAN):
def build_loss(self):
with tf.name_scope('Loss'):
g_loss, d_loss = loss_relative_bce_gan(*self.d_outputs, average=False)
with tf.variable_scope(self.name, reuse=True):
gp = gradient_penalty(*self.g_outputs, graph_fn=self.D, lamb=10)
d_loss += gp
self._build_loss(g_loss, d_loss)
class RaGAN(GAN):
def build_loss(self):
with tf.name_scope('Loss'):
g_loss, d_loss = loss_relative_bce_gan(*self.d_outputs, average=True)
self._build_loss(g_loss, d_loss)
class RaGANGP(GAN):
def build_loss(self):
with tf.name_scope('Loss'):
g_loss, d_loss = loss_relative_bce_gan(*self.d_outputs, average=True)
with tf.variable_scope(self.name, reuse=True):
gp = gradient_penalty(*self.g_outputs, graph_fn=self.D, lamb=10)
d_loss += gp
self._build_loss(g_loss, d_loss)
class RLSGAN(GAN):
def build_loss(self):
with tf.name_scope('Loss'):
g_loss, d_loss = loss_relative_lsgan(*self.d_outputs, average=False)
self._build_loss(g_loss, d_loss)
class RaLSGAN(GAN):
def build_loss(self):
with tf.name_scope('Loss'):
g_loss, d_loss = loss_relative_lsgan(*self.d_outputs, average=True)
self._build_loss(g_loss, d_loss)
class GanTrainer(VSR):
def query_config(self, config, **kwargs):
# add [batch, patch_size] to collector `self.v`
self.v.batch = config.batch
self.v.patch_size = config.patch_size
return super(GanTrainer, self).query_config(config, **kwargs)
def fit_init(self):
# disable data augmentation of GAN training
self.v.train_loader.aug = False
return super(GanTrainer, self).fit_init()
def fn_train_each_step(self, label=None, feature=None, name=None,
post=None):
"""override this method for:
- sample feature from random noise (uniform distributed from [-1,1]).
- pass step number to `train_batch` call.
"""
v = self.v
feature = | np.random.uniform(-1, 1, [v.batch, self.model.z_dim]) | numpy.random.uniform |
import numpy as np
from sklearn.model_selection import train_test_split
# Load file names and labels
x, y = np.load("/home/ubuntu/capstone/filenames.npy"), np.load("/home/ubuntu/capstone/labels.npy")
print(x.shape)
print(y.shape)
# Loop through labels and keep track of indices where the non-faces are
# Also drop None and Uncertain categories
# Also drop Contempt and Disgust categories
drop_indices = []
for _ in range(len(y)):
if y[_][10] == 1 or y[_][9] == 1 or y[_][8] == 1 or y[_][7] == 1 or y[_][5] == 1: # or y[_][4] ... add back to drop Fear
drop_indices.append(_)
# Drop label rows where indices match
y = np.delete(y, drop_indices, axis=0)
y = np.delete(y, 10, axis=1) # Drop last column because all vals are 0 after removing non-face rows
y = np.delete(y, 9, axis=1) # Do the same for None and Uncertain categories
y = np.delete(y, 8, axis=1)
y = np.delete(y, 7, axis=1) # Do the same for Contempt and Disgust categories
y = np.delete(y, 5, axis=1)
#y = np.delete(y, 4, axis=1) # Do the same for Fear category
# Drop image names where indices match
x = | np.delete(x, drop_indices) | numpy.delete |
import numpy as np
import torch
import gym
import pybulletgym
import os
import sys
import warnings
from datetime import datetime
import mujoco_py
import ray
from ray import tune
from ray.tune.schedulers import ASHAScheduler
import utils
import TD3
import OurDDPG
import DDPG
import plotter
from parser import parse_our_args
# Runs policy for X episodes and returns average reward
# A fixed seed is used for the eval environment
def eval_policy(policy, env_name, seed, eval_episodes=10,
utils_object=None):
if utils_object.args.custom_env:
eval_env = gym.make('OurReacher-v0')
else:
eval_env = gym.make(env_name)
if utils_object.fetch_reach and utils_object.args.fetch_reach_dense:
eval_env.env.reward_type = "dense"
eval_env.seed(int(seed) + 100)
rewards = np.zeros(eval_episodes)
original_rewards = np.zeros(eval_episodes)
for i in range(eval_episodes):
returns = 0.0
original_returns = 0.0
state, done = eval_env.reset(), False
while not done:
x, goal = utils_object.compute_x_goal(state, eval_env, sigma=0)
action = policy.select_action(x)
if utils_object.args.custom_env:
eval_env.set_goal(goal)
state, reward, done, _ = eval_env.step(action)
returns += reward
if utils_object.args.custom_env:
original_returns += eval_env.original_rewards
rewards[i] = returns
original_rewards[i] = original_returns
avg_reward = np.mean(rewards)
std_reward = np.std(rewards)
avg_original_reward = np.mean(original_rewards)
std_original_reward = np.std(original_rewards)
print("---------------------------------------")
print(f"Evaluation over {eval_episodes} episodes: {avg_reward:.3f} from {rewards} with original reward as {avg_original_reward:.3f}")
print("---------------------------------------")
if utils_object.args.custom_env:
return [avg_reward, std_reward, avg_original_reward, std_original_reward]
return [avg_reward, std_reward]
def train(config, args):
if not os.path.exists("./results"):
os.makedirs("./results")
if args.save_model and not os.path.exists("./models"):
os.makedirs("./models")
import pybulletgym
warnings.filterwarnings("ignore")
eps_bounds = args.reacher_epsilon_bounds # just aliasing with shorter variable name
utils_object = utils.GeneralUtils(args)
if args.tune_run:
if args.prioritized_replay:
args.alpha = float(config["alpha"])
args.beta = float(config["beta"])
args.discount = float(config.get("discount", args.discount))
args.tau = float(config.get("tau", args.tau))
elif args.custom_env and args.use_hindsight:
eps_bounds = [float(config["epsilons"][0]), float(config["epsilons"][1])]
args.seed = int(config["seed"])
else:
args.discount = float(config.get("discount", args.discount))
args.tau = float(config.get("tau", args.tau))
if args.custom_env:
gym.envs.register(
id='OurReacher-v0',
entry_point='our_reacher_env:OurReacherEnv',
max_episode_steps=50,
reward_threshold=100.0,
)
# this is assuming we only use epsilon for custom env or fetch reach, where episode tsteps is 50 !!!!
max_episode_steps = 50
# retrieve epsilon range
[a, b] = eps_bounds
epsilons = utils_object.epsilon_calc(a, b, max_episode_steps)
env = gym.make('OurReacher-v0', epsilon=epsilons[0], render=False)
else:
env = gym.make(args.env)
if utils_object.fetch_reach and utils_object.args.fetch_reach_dense:
env.env.reward_type = "dense"
# Set seeds
env.seed(int(args.seed))
torch.manual_seed(args.seed)
np.random.seed(args.seed)
if utils_object.fetch_reach:
state_dim = env.reset()["observation"].shape[0]
else:
state_dim = env.observation_space.shape[0]
if args.use_hindsight: # include both current state and goal state
if args.custom_env:
state_dim += 2 # reacher nonsense; goal = (x, y)
elif utils_object.fetch_reach:
state_dim += 3 # include fetchreach goal state (x,y,z position)
else:
state_dim *= 2
action_dim = env.action_space.shape[0]
max_action = float(env.action_space.high[0])
kwargs = {
"state_dim": state_dim,
"action_dim": action_dim,
"max_action": max_action,
"discount": args.discount,
"tau": args.tau,
}
# Initialize policy
if args.policy == "TD3":
# Target policy smoothing is scaled wrt the action scale
kwargs["policy_noise"] = args.policy_noise * max_action
kwargs["noise_clip"] = args.noise_clip * max_action
kwargs["policy_freq"] = args.policy_freq
kwargs["prioritized_replay"] = args.prioritized_replay
kwargs["use_rank"] = args.use_rank
kwargs["use_hindsight"] = args.use_hindsight
policy = TD3.TD3(**kwargs)
elif args.policy == "OurDDPG":
policy = OurDDPG.DDPG(**kwargs)
elif args.policy == "DDPG":
policy = DDPG.DDPG(**kwargs)
exp_descriptors = [
args.policy, 'CustomReacher' if args.custom_env else args.env,
f"{'rank' if args.use_rank else 'proportional'}PER" if args.prioritized_replay else '',
'HER' if args.use_hindsight else '',
f"{args.decay_type}decay-eps{f'{eps_bounds[0]}-{eps_bounds[1]}' if eps_bounds[0] != eps_bounds[1] else f'{eps_bounds[0]}'}" if args.custom_env else "",
f"k{args.k}",
datetime.now().strftime('%Y%m%d%H%M')
]
if args.tune_run:
# fudgy: assumes tune_run for non-HER experiments
exp_descriptors = [
args.policy, 'CustomReacher' if args.custom_env else args.env,
f"{'rank' if args.use_rank else 'proportional'}PER" if args.prioritized_replay else '',
f"tau{args.tau}", f"discount{args.discount}",
f"alpha{args.alpha}" if args.prioritized_replay else '',
f"beta{args.beta}" if args.prioritized_replay else '',
f"k{args.k}",
datetime.now().strftime('%Y%m%d%H%M')
]
exp_descriptors = [x for x in exp_descriptors if len(x) > 0]
file_name = "_".join(exp_descriptors)
if args.load_model != "":
policy_file = file_name if args.load_model == "default" else args.load_model
policy.load(f"./models/{policy_file}")
if args.prioritized_replay:
replay_buffer = utils.PrioritizedReplayBuffer(state_dim, action_dim,
args.max_timesteps, args.start_timesteps,
alpha=args.alpha, beta=args.beta)
else:
replay_buffer = utils.ReplayBuffer(state_dim, action_dim)
# Evaluate untrained policy
evaluations = [eval_policy(policy, args.env, args.seed, utils_object=utils_object)]
state, done = env.reset(), False
original_episode_reward = 0
episode_reward = 0
episode_timesteps = 0
episode_num = 0
trajectory = []
for t in range(int(args.max_timesteps)):
episode_timesteps += 1
x, goal = utils_object.compute_x_goal(state, env)
# Select action randomly or according to policy
if t < args.start_timesteps:
action = env.action_space.sample()
else:
action = (
policy.select_action(np.array(x))
+ np.random.normal(0, max_action * args.expl_noise, size=action_dim)
).clip(-max_action, max_action)
# Perform action
next_state, reward, done, _ = env.step(action)
done_bool = float(done) if episode_timesteps < env._max_episode_steps else 0
if args.use_hindsight:
if utils_object.fetch_reach:
goal = state["desired_goal"]
next_x = np.concatenate([np.array(next_state["observation"]), goal])
else:
# env.set_goal(goal)
next_x = np.concatenate([np.array(next_state), goal])
elif utils_object.fetch_reach:
next_x = | np.array(next_state["observation"]) | numpy.array |
from typing import List
import numpy as np
from .simplex_method import SimplexMethod
from ._log_modes import FULL_LOG, MEDIUM_LOG, LOG_OFF
class CuttingPlaneMethod:
"""
In mathematical optimization, the cutting-plane method is any of a variety of optimization methods that
iteratively refine a feasible set or objective function by means of linear inequalities, termed cuts.
Such procedures are commonly used to find integer solutions to mixed integer linear programming (MILP) problems,
as well as to solve general, not necessarily differentiable convex optimization problems.
The use of cutting planes to solve MILP was introduced by <NAME>.
"""
def __init__(self, func_vec: List[int or float] or np.ndarray,
conditions_matrix: List[List[int or float]] or np.ndarray,
constraints_vec: List[int or float] or np.ndarray,
var_tag: str = "x", func_tag: str = "F", log_mode: int = LOG_OFF):
"""
Initialization of an object of the "Cutting Plane method" class.
:param func_vec: Coefficients of the equation.
:param conditions_matrix: The left part of the restriction system.
:param constraints_vec: The right part of the restriction system.
:param var_tag: The name of the variables, default is "x".
:param func_tag: The name of the function, default is "F".
:param log_mode: So much information about the solution to write to the console.
"""
self.c_vec = np.array(func_vec)
self.a_matrix = np.array(conditions_matrix)
self.b_vec = np.array(constraints_vec)
self._num_of_vars = self.c_vec.shape[0]
self.var_tag = var_tag
self.func_tag = func_tag
self.log_mode = log_mode
self._simplex: SimplexMethod = NotImplemented
def solve(self) -> (np.ndarray, np.float64):
"""
Solve the integer problem of linear programming by the Cutting Plane method.
:return: The solution and the value of the function.
"""
self._simplex = SimplexMethod(self.c_vec, self.a_matrix, self.b_vec, var_tag=self.var_tag,
func_tag=self.func_tag, log_mode=self.log_mode)
start_table = np.copy(self._simplex.table.table)
self._simplex.solve()
while self._check_solution():
table = self._simplex.table.table
fractional_parts = np.modf(table[:-1, 0])[0]
row_index = fractional_parts.argmax()
coefficients = table[row_index]
coefficients -= np.floor(coefficients)
coefficients[0] *= -1
new_equation = np.zeros(self.c_vec.shape[0] + 1)
for i_1, x_i in enumerate(self._simplex.table.column_indices):
if x_i >= self.c_vec.shape[0]:
for i_2, y_i in enumerate(np.arange(table.shape[0] - 1) + table.shape[1]):
if x_i == y_i:
for i_3 in range(new_equation.shape[0]):
new_equation[i_3] += coefficients[i_1] * start_table[i_2, i_3]
break
else:
for column_index in range(self.c_vec.shape[0]):
if column_index == x_i:
new_equation[column_index] += coefficients[i_1]
break
if self.log_mode in [FULL_LOG, MEDIUM_LOG]:
var_tags = [f"{self.var_tag}{i + 1}" for i in range(self._num_of_vars)]
equation = " + ".join([f"{c}*{v}" for c, v in zip(np.around(new_equation[1:], 3), var_tags)])
print(f"New equation: {equation} <= {new_equation[0]}")
self.a_matrix = np.vstack((self.a_matrix, new_equation[1:]))
self.b_vec = | np.append(self.b_vec, new_equation[0]) | numpy.append |
from functools import total_ordering
import pyarrow as pa
import numpy as np
from numba import jit, prange
from spatialpandas.geometry.base import Geometry, GeometryArray
from ._algorithms.bounds import (
total_bounds_interleaved, total_bounds_interleaved_1d, bounds_interleaved
)
def _validate_nested_arrow_type(nesting_levels, pyarrow_type):
if pyarrow_type == pa.null():
return pa.null()
pyarrow_element_type = pyarrow_type
for i in range(nesting_levels):
if not isinstance(pyarrow_element_type, pa.ListType):
raise ValueError(
"Expected input data to have {} nested layer(s)".format(
nesting_levels)
)
pyarrow_element_type = pyarrow_element_type.value_type
pyarrow_element_type = pyarrow_element_type
numpy_element_dtype = pyarrow_element_type.to_pandas_dtype()
if (numpy_element_dtype() is None
or numpy_element_dtype().dtype.kind not in ('i', 'u', 'f')):
raise ValueError(
"Invalid nested element type {}, expected numeric type".format(
pyarrow_element_type
))
return pyarrow_element_type
class _ListArrayBufferMixin:
"""
Mixin of buffer utilities for classes that store a pyarrow ListArray as their
listarray property. The numpy data type of the inner ListArray elements must be
stored as the numpy_dtype property
"""
@property
def buffer_values(self):
value_buffer = self.listarray.buffers()[-1]
if value_buffer is None:
return np.array([], dtype=self.numpy_dtype)
else:
return np.asarray(value_buffer).view(self.numpy_dtype)
@property
def buffer_offsets(self):
"""
Tuple of offsets arrays, one for each tested level.
"""
buffers = self.listarray.buffers()
if len(buffers) < 2:
return (np.array([0]),)
elif len(buffers) < 3:
# offset values that include everything
return (np.array([0, len(self.listarray)]),)
# Slice first offsets array to match any current extension array slice
# All other buffers remain unchanged
start = self.listarray.offset
stop = start + len(self.listarray) + 1
offsets1 = np.asarray(buffers[1]).view(np.uint32)[start:stop]
remaining_offsets = tuple(
np.asarray(buffers[i]).view(np.uint32)
for i in range(3, len(buffers) - 1, 2)
)
return (offsets1,) + remaining_offsets
@property
def flat_values(self):
"""
Flat array of the valid values. This differs from buffer_values if the pyarrow
ListArray backing this object is a slice. buffer_values will contain all
values from the original (pre-sliced) object whereas flat_values will contain
only the sliced values.
"""
# Compute valid start/stop index into buffer values array.
buffer_offsets = self.buffer_offsets
start = buffer_offsets[0][0]
stop = buffer_offsets[0][-1]
for offsets in buffer_offsets[1:]:
start = offsets[start]
stop = offsets[stop]
return self.buffer_values[start:stop]
@property
def buffer_outer_offsets(self):
"""
Array of the offsets into buffer_values that separate the outermost nested
structure of geometry object(s), regardless of the number of nesting levels.
"""
buffer_offsets = self.buffer_offsets
flat_offsets = buffer_offsets[0]
for offsets in buffer_offsets[1:]:
flat_offsets = offsets[flat_offsets]
return flat_offsets
@property
def buffer_inner_offsets(self):
"""
Array of the offsets into buffer_values that separate the innermost nested
structure of geometry object(s), regardless of the number of nesting levels.
"""
buffer_offsets = self.buffer_offsets
start = buffer_offsets[0][0]
stop = buffer_offsets[0][-1]
for offsets in buffer_offsets[1:-1]:
start = offsets[start]
stop = offsets[stop]
return buffer_offsets[-1][start:stop + 1]
@total_ordering
class GeometryList(Geometry, _ListArrayBufferMixin):
"""
Base class for elements of GeometryListArray subclasses
"""
_nesting_levels = 0
@staticmethod
def _pa_element_value_type(data):
"""
Get value type of pyarrow ListArray element for different versions of pyarrow
"""
try:
# Try pyarrow 1.0 API
return data.values.type
except AttributeError:
# Try pre 1.0 API
return data.value_type
@staticmethod
def _pa_element_values(data):
"""
Get values of nested pyarrow ListArray element for different versions of pyarrow
"""
try:
# Try pyarrow 1.0 API
return data.values
except AttributeError:
# Try pre 1.0 API
return pa.array(data.as_py(), data.value_type)
def __init__(self, data, dtype=None):
super().__init__(data)
if len(self.data) > 0:
value_type = GeometryList._pa_element_value_type(self.data)
_validate_nested_arrow_type(self._nesting_levels, value_type)
# create listarray for _ListArrayBufferMixin
self.listarray = GeometryList._pa_element_values(self.data)
def __lt__(self, other):
if type(other) is not type(self):
return NotImplemented
return _lexographic_lt(np.asarray(self.listarray), | np.asarray(other.listarray) | numpy.asarray |
# -*- coding: utf-8 -*-
import os
import tempfile
import zipfile
from mantarray_magnet_finding.utils import calculate_magnetic_flux_density_from_memsic
from mantarray_magnet_finding.utils import load_h5_folder_as_array
import numpy as np
from pulse3D import magnet_finding
from pulse3D import plate_recording
from pulse3D.constants import BASELINE_MEAN_NUM_DATA_POINTS
from pulse3D.magnet_finding import fix_dropped_samples
from pulse3D.magnet_finding import format_well_file_data
from pulse3D.plate_recording import load_files
from pulse3D.plate_recording import PlateRecording
import pytest
from stdlib_utils import get_current_file_abs_directory
PATH_OF_CURRENT_FILE = get_current_file_abs_directory()
def test_load_files__loads_zipped_folder_with_calibration_recordings_correctly():
path = os.path.join(
PATH_OF_CURRENT_FILE,
"magnet_finding",
"MA200440001__2020_02_09_190359__with_calibration_recordings__zipped_as_folder.zip",
)
with tempfile.TemporaryDirectory() as tmpdir:
zf = zipfile.ZipFile(path)
zf.extractall(path=tmpdir)
tissue_recordings, baseline_recordings = load_files(tmpdir)
assert len(tissue_recordings) == 24
assert len(baseline_recordings) == 24
def test_load_files__loads_zipped_files_with_calibration_recordings_correctly():
path = os.path.join(
PATH_OF_CURRENT_FILE,
"magnet_finding",
"MA200440001__2020_02_09_190359__with_calibration_recordings__zipped_as_files.zip",
)
with tempfile.TemporaryDirectory() as tmpdir:
zf = zipfile.ZipFile(path)
zf.extractall(path=tmpdir)
tissue_recordings, baseline_recordings = load_files(tmpdir)
assert len(tissue_recordings) == 24
assert len(baseline_recordings) == 24
def test_PlateRecording__uses_mean_of_baseline_by_default(mocker):
# mock instead of spy so magnet finding alg doesn't run
mocked_process_data = mocker.patch.object(PlateRecording, "_process_plate_data", autospec=True)
pr = PlateRecording(
os.path.join(
PATH_OF_CURRENT_FILE,
"magnet_finding",
"MA200440001__2020_02_09_190359__with_calibration_recordings__zipped_as_folder.zip",
)
)
mocked_process_data.assert_called_once_with(pr, mocker.ANY, use_mean_of_baseline=True)
def test_PlateRecording__creates_mean_of_baseline_data_correctly(mocker):
# spy for easy access to baseline data array
spied_mfd_from_memsic = mocker.spy(plate_recording, "calculate_magnetic_flux_density_from_memsic")
# mock instead of spy so magnet finding alg doesn't run
mocked_find_positions = mocker.patch.object(
plate_recording,
"find_magnet_positions",
autospec=True,
side_effect=lambda x, y: {"X": np.zeros((x.shape[-1], 24))},
)
PlateRecording(
os.path.join(
PATH_OF_CURRENT_FILE,
"magnet_finding",
"MA200440001__2020_02_09_190359__with_calibration_recordings__zipped_as_folder.zip",
)
)
raw_baseline_data = spied_mfd_from_memsic.spy_return
actual_baseline_mean_arr = mocked_find_positions.call_args[0][1]
assert actual_baseline_mean_arr.shape == (24, 3, 3, 1)
for well_idx in range(actual_baseline_mean_arr.shape[0]):
for sensor_idx in range(actual_baseline_mean_arr.shape[1]):
for axis_idx in range(actual_baseline_mean_arr.shape[2]):
expected_mean = np.mean(
raw_baseline_data[well_idx, sensor_idx, axis_idx, -BASELINE_MEAN_NUM_DATA_POINTS:]
)
assert actual_baseline_mean_arr[well_idx, sensor_idx, axis_idx] == expected_mean, (
well_idx,
sensor_idx,
axis_idx,
)
def test_PlateRecording__wrties_time_force_csv_with_no_errors(mocker):
# mock instead of spy so magnet finding alg doesn't run
mocker.patch.object(
plate_recording,
"find_magnet_positions",
autospec=True,
side_effect=lambda x, y: {"X": np.zeros((x.shape[-1], 24))},
)
zip_pr = PlateRecording(
os.path.join(
PATH_OF_CURRENT_FILE,
"magnet_finding",
"MA200440001__2020_02_09_190359__with_calibration_recordings__zipped_as_folder.zip",
)
)
h5_pr = PlateRecording.from_directory(
os.path.join(
PATH_OF_CURRENT_FILE,
"h5",
"v0.3.2",
)
)
# raw_baseline_data = spied_mfd_from_memsic.spy_return
with tempfile.TemporaryDirectory() as output_dir:
zip_pr.write_time_force_csv(output_dir)
for pr in h5_pr:
df, _ = pr.write_time_force_csv(output_dir)
assert len(df.index) == 7975
assert len(df.columns) == 25
assert (
"MA200440001__2020_02_09_190359__with_calibration_recordings__zipped_as_folder.csv"
in os.listdir(output_dir)
)
assert "MA20223322__2020_09_02_173919.csv" in os.listdir(output_dir)
def test_PlateRecording__removes_dropped_samples_from_raw_tissue_signal_before_converting_to_mfd(mocker):
spied_fix = mocker.spy(plate_recording, "fix_dropped_samples")
spied_mfd = mocker.spy(plate_recording, "calculate_magnetic_flux_density_from_memsic")
# mock instead of spy so magnet finding alg doesn't run
mocker.patch.object(
plate_recording,
"find_magnet_positions",
autospec=True,
side_effect=lambda x, y: {"X": np.zeros((x.shape[-1], 24))},
)
pr = PlateRecording(
os.path.join(
PATH_OF_CURRENT_FILE,
"magnet_finding",
"MA200440001__2020_02_09_190359__with_calibration_recordings__zipped_as_folder.zip",
)
)
actual_plate_data = spied_fix.call_args[0][0]
expected_plate_data = format_well_file_data(pr.wells)
np.testing.assert_array_equal(actual_plate_data, expected_plate_data)
np.testing.assert_array_equal(spied_mfd.call_args_list[0][0][0], spied_fix.spy_return)
def test_PlateRecording__passes_data_to_magnet_finding_alg_correctly__using_mean_of_baseline_data(
mocker,
):
# mock so slow function doesn't actually run
mocked_get_positions = mocker.patch.object(
magnet_finding,
"get_positions",
autospec=True,
side_effect=lambda x: {"X": np.zeros((x.shape[-1], 24))},
)
test_zip_file_path = os.path.join(
PATH_OF_CURRENT_FILE,
"magnet_finding",
"MA200440001__2020_02_09_190359__with_calibration_recordings__zipped_as_folder.zip",
)
# create expected input
with tempfile.TemporaryDirectory() as tmpdir:
zf = zipfile.ZipFile(test_zip_file_path)
zf.extractall(path=tmpdir)
tissue_data_memsic, baseline_data_memsic = load_h5_folder_as_array(
os.path.join(tmpdir, "MA200440001__2020_02_09_190359")
)
tissue_data_mt = calculate_magnetic_flux_density_from_memsic(tissue_data_memsic)
baseline_data_mt = calculate_magnetic_flux_density_from_memsic(baseline_data_memsic)
baseline_data_mt_mean = np.mean(
baseline_data_mt[:, :, :, -BASELINE_MEAN_NUM_DATA_POINTS:], axis=3
).reshape((24, 3, 3, 1))
expected_input_data = tissue_data_mt - baseline_data_mt_mean
# test alg input
PlateRecording(test_zip_file_path)
mocked_get_positions.assert_called_once()
np.testing.assert_array_almost_equal(mocked_get_positions.call_args[0][0], expected_input_data)
@pytest.mark.parametrize(
"test_array,expected_array",
[
(np.array([0, 1, 2, 0, 4, 5, 0]), | np.array([1, 1, 2, 3, 4, 5, 5]) | numpy.array |
# RCS14_entrainment_naive.py
# Generate timeseries analysis and power estimate
# Author: maria.olaru@
"""
Created on Mon May 3 18:22:44 2021
@author: mariaolaru
"""
import numpy as np
from matplotlib import pyplot as plt
import matplotlib.dates as md
import scipy.signal as signal
import pandas as pd
import math
import os
from preprocess_script import *
def subset_md(md, ts_range):
"""
Parameters
----------
md : meta data output from preprocess_data()
msc : meta settings data
i_int: row index of interest in msc
padding: 2 integer vector +/- amount of seconds to pad
ts_int : timestamp of interest (must contain UNIX millisecond scale).
front_pad : amount of seconds to front-pad ts_int.
back_pad : amount of seconds to back-pad ts_int.
DESCRIPTION.
Returns
-------
a subset of the meta data with preferred times
"""
ts_int = msc['timestamp_unix'].iloc[i_int]
ts_start = ts_int - padding[0] * 1000
ts_stop = ts_int + padding[1] * 1000
ts_starta = md['timestamp_unix'].sub(ts_start).abs().idxmin()
ts_stopa = md['timestamp_unix'].sub(ts_stop).abs().idxmin()
mds = md.iloc[ts_starta:ts_stopa, :]
mds = mds.reset_index(drop=True)
amp1 = msc['amplitude_ma'].iloc[i_int-1]
amp2 = msc['amplitude_ma'].iloc[i_int]
mds = mds.assign(amp=np.where(mds['timestamp_unix'] < ts_int, amp1, amp2))
ts_dt = convert_unix2dt(mds['timestamp_unix'])
mds.insert(1, 'timestamp', ts_dt)
return mds
def melt_mds(mds, step_size, fs):
"""
Parameters
----------
df : wide-form meta-data as pandas object
step_size : increment in seconds with which to group data
fs : sample rate
Returns
-------
long-form of meta-data
"""
step_rep = step_size * fs
num_steps = round(len(mds)/step_rep) #Note: Can be buggy if user does not enter in reasonable times and step-size combos
steps = | np.arange(0, num_steps, 1) | numpy.arange |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.