prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
from astropy.io import fits
import pandas as pd
import numpy as np
from lightkurve import KeplerLightCurve
def find_flares(flux, error, N1=4, N2=4, N3=3):
'''
The algorithm for local changes due to flares defined by
<NAME> et al. (2015), Eqn. 3a-d
http://arxiv.org/abs/1510.01005
Note: these equations were originally in magnitude units, i.e. smaller
values are increases in brightness. The signs have been changed, but
coefficients have not been adjusted to change from log(flux) to flux.
Parameters:
----------
flux : numpy array
data to search over
error : numpy array
errors corresponding to data.
N1 : int, optional
Coefficient from original paper (Default is 3 in paper, 4 here)
How many times above the stddev is required.
N2 : int, optional
Coefficient from original paper (Default is 1 in paper, 4 here)
How many times above the stddev and uncertainty is required
N3 : int, optional
Coefficient from original paper (Default is 3)
The number of consecutive points required to flag as a flare
Return:
------------
isflare : numpy array of booleans
datapoints are flagged with 1 if they belong to a flare candidate
'''
median = np.nanmedian(flux)
sigma = np.nanstd(flux)
T0 = flux - median # excursion should be positive #"N0"
T1 = np.abs(flux - median) / sigma #N1
T2 = np.abs(flux - median - error) / sigma #N2
# apply thresholds N0-N2:
pass_thresholds = np.where((T0 > 0) & (T1 > N1) & (T2 > N2))
#array of indices where thresholds are exceeded:
is_pass_thresholds = np.zeros_like(flux)
is_pass_thresholds[pass_thresholds] = 1
# Need to find cumulative number of points that pass_thresholds
# Counted in reverse!
# Examples reverse_counts = [0 0 0 3 2 1 0 0 1 0 4 3 2 1 0 0 0 1 0 2 1 0]
# isflare = [0 0 0 1 1 1 0 0 0 0 1 1 1 1 0 0 0 0 0 0 0 0]
reverse_counts = np.zeros_like(flux, dtype='int')
for k in range(2, len(flux)):
reverse_counts[-k] = (is_pass_thresholds[-k]
* (reverse_counts[-(k-1)]
+ is_pass_thresholds[-k]))
# find flare start where values in reverse_counts switch from 0 to >=N3
istart_i = np.where((reverse_counts[1:] >= N3) &
(reverse_counts[:-1] - reverse_counts[1:] < 0))[0] + 1
# use the value of reverse_counts to determine how many points away stop is
istop_i = istart_i + (reverse_counts[istart_i])
isflare = np.zeros_like(flux, dtype='int')
for (l,r) in list(zip(istart_i,istop_i)):
isflare[l:r+1] = 1
return isflare
def wrapper(lc, minsep=3):
'''
Main wrapper to obtain and process a light curve.
Parameters:
-------------
lc : light curve
FlareLightCurve object
minsep : 1 or int
minimum distance between two candidate start times in datapoints
Return:
----------
numpy arrays of start and stop cadence numbers of flare candidates
'''
#find continuous observing periods
lc.find_gaps()
istart = np.array([], dtype='int')
istop = | np.array([], dtype='int') | numpy.array |
"""
Alternative method for estimating the canopy inertial properties. Very ugly and
hacky, but still useful for checking the much improved mesh-based method.
"""
import numpy as np
from scipy.integrate import simps
import pfh.glidersim as gsim
# Yanked from from `AirfoilGeometry`
def airfoil_mass_properties(airfoil, r_upper=0, r_lower=0, N=200):
"""
Calculate the inertial properties for the curves and planar area.
These unitless magnitudes, centroids, and inertia matrices can be
scaled by the physical units of the target application in order to
calculate the upper and lower surface areas, internal volume, and
inertia matrix of a 3D wing.
This procedure treats the 2D geometry as perfectly flat 3D objects,
with a new `z` axis added according to the right-hand rule. See
"Notes" for more details.
Parameters
----------
r_upper, r_lower : float
The starting coordinates of the upper and lower surfaces. Requires
that `-1 <= r_lower <= r_upper, 1`.
N : integer
The number of chordwise sample points. Used to create the vertical
strips for calculating the area, and for creating line segments of
the parametric curves for the upper and lower surfaces.
Returns
-------
dictionary
upper_length : float
The total length of the upper surface curve
upper_centroid : array of float, shape (2,)
The centroid of the upper surface curve as (x, y) in acs
upper_inertia : array of float, shape (3,3)
The inertia matrix of the upper surface curve
area : float
The area of the airfoil
area_centroid : array of float, shape (2,)
The centroid of the area as (x, y) in acs
area_inertia : array of float, shape (3,3)
The inertia matrix of the area
lower_length : float
The total length of the lower surface curve
lower_centroid : array of float, shape (2,)
The centroid of the lower surface curve as (x, y) in acs
lower_inertia : array of float, shape (3,3)
The inertia matrix of the lower surface curve
These are unitless quantities. The inertia matrices for each
component are for rotations about that components' centroid.
Notes
-----
In traditional airfoil definitions, the positive x-axis lies along the
chord, directed from the leading edge to the trailing edge, and the
positive y-axis points towards the upper surface.
Here, a z-axis that satisfies the right hand rule is added for the
purpose of creating a well-defined inertia matrix. Let this set of axes
be called the "airfoil coordinate system" (acs).
Translating these acs coordinates into the front-right-down (frd)
coordinate system requires reordering and reversing the direction of
vector components. To convert acs -> frd: [x, y, z] -> [-x, -z, -y]
In terms of code, to convert from acs to frd coordinates:
>>> C = np.array([[-1, 0, 0], [0, 0, -1], [0, -1, 0]])
>>> centroid_frd = C @ [*centroid_acs, 0] # Augment with z_acs=0
>>> inertia_frd = C @ inertia_acs @ C
"""
if r_lower < -1:
raise ValueError("Required: r_lower >= -1")
if r_lower > r_upper:
raise ValueError("Required: r_lower <= r_upper")
if r_upper > 1:
raise ValueError("Required: r_upper <= 1")
# -------------------------------------------------------------------
# 1. Area calculations
r = (1 - np.cos(np.linspace(0, np.pi, N))) / 2 # `0 <= r <= 1`
top = airfoil.profile_curve(r).T # Top half (above r = 0)
bottom = airfoil.profile_curve(-r).T # Bottom half (below r = 0)
Tx, Ty = top[0], top[1]
Bx, By = bottom[0], bottom[1]
area = simps(Ty, Tx) - simps(By, Bx)
xbar = (simps(Tx * Ty, Tx) - simps(Bx * By, Bx)) / area
ybar = (simps(Ty**2 / 2, Tx) + simps(By**2 / 2, Bx)) / area
area_centroid = np.array([xbar, ybar])
# Area moments of inertia about the origin
# FIXME: verify, especially `Ixy_o`. Check airfoils where some `By > 0`
Ixx_o = 1 / 3 * (simps(Ty**3, Tx) - simps(By**3, Bx))
Iyy_o = simps(Tx**2 * Ty, Tx) - simps(Bx**2 * By, Bx)
Ixy_o = 1 / 2 * (simps(Tx * Ty**2, Tx) - simps(Bx * By**2, Bx))
# Use the parallel axis theorem to find the inertias about the centroid
Ixx = Ixx_o - area * ybar**2
Iyy = Iyy_o - area * xbar**2
Ixy = Ixy_o - area * xbar * ybar
Izz = Ixx + Iyy # Perpendicular axis theorem
# Inertia matrix for the area about the origin
# fmt: off
area_inertia = np.array([
[ Ixx, -Ixy, 0], # noqa: E201, E241
[-Ixy, Iyy, 0], # noqa: E201, E241
[ 0, 0, Izz], # noqa: E201, E241
])
# fmt: on
# -------------------------------------------------------------------
# 2. Surface line calculations
su = np.linspace(r_upper, 1, N)
sl = np.linspace(r_lower, -1, N)
upper = airfoil.profile_curve(su).T
lower = airfoil.profile_curve(sl).T
# Line segment lengths and midpoints
norm_U = np.linalg.norm(np.diff(upper), axis=0) # Segment lengths
norm_L = np.linalg.norm(np.diff(lower), axis=0)
mid_U = (upper[:, :-1] + upper[:, 1:]) / 2 # Segment midpoints
mid_L = (lower[:, :-1] + lower[:, 1:]) / 2
# Total line lengths and centroids
upper_length = norm_U.sum()
lower_length = norm_L.sum()
upper_centroid = np.einsum("ij,j->i", mid_U, norm_U) / upper_length
lower_centroid = np.einsum("ij,j->i", mid_L, norm_L) / lower_length
# Surface line moments of inertia about their centroids
# FIXME: not proper line integrals: treats segments as point masses
cmUx, cmUy = upper_centroid
mid_Ux, mid_Uy = mid_U[0], mid_U[1]
Ixx_U = np.sum(mid_Uy**2 * norm_U) - upper_length * cmUy**2
Iyy_U = np.sum(mid_Ux**2 * norm_U) - upper_length * cmUx**2
Ixy_U = np.sum(mid_Ux * mid_Uy * norm_U) - upper_length * cmUx * cmUy
Izz_U = Ixx_U + Iyy_U
cmLx, cmLy = lower_centroid
mid_Lx, mid_Ly = mid_L[0], mid_L[1]
Ixx_L = np.sum(mid_Ly**2 * norm_L) - lower_length * cmLy**2
Iyy_L = | np.sum(mid_Lx**2 * norm_L) | numpy.sum |
import numpy as np
from rnmu.pme.proj_geom_utils import keep_finite, remove_repeated, normalize_2d
class Fundamental(object):
def __init__(self, data=None, weights=None):
self.F = None
if data is not None:
self.fit(data, weights=weights)
@property
def min_sample_size(self):
return 8
def fit(self, data, weights=None):
mask1 = keep_finite(data)
mask2 = remove_repeated(data)
mask = np.logical_and(mask1, mask2)
data = data[mask, :]
if weights is not None:
weights = weights[mask]
if len(data) < self.min_sample_size:
self.F = None
return
if (weights is not None and
np.count_nonzero(weights) < self.min_sample_size):
self.F = None
return
if data.shape[1] != 6:
raise ValueError('Points must be 6D (2 x 3D)')
pts1, trans1 = normalize_2d(data[:, :3], weights=weights)
pts2, trans2 = normalize_2d(data[:, 3:], weights=weights)
mat = np.zeros((9, 9))
for i in range(np.count_nonzero(mask)):
row = np.hstack([pts1[i, :] * pts2[i, 0],
pts1[i, :] * pts2[i, 1],
pts1[i, :]])
if weights is not None:
row *= weights[i]
mat += np.outer(row, row)
try:
_, v = np.linalg.eigh(mat)
u, s, vt = np.linalg.svd(v[:, 0].reshape((3, 3)))
s[2] = 0
self.F = u.dot(np.diag(s)).dot(vt)
self.F = trans1.dot(self.F.T.dot(trans2.T))
self.F /= self.F[2, 2]
except np.linalg.LinAlgError:
self.F = None
def distances(self, data):
if self.F is None:
return np.ones((len(data),)) * np.inf
pts1 = data[:, :3]
pts2 = data[:, 3:]
epi_lines1 = np.dot(pts1, self.F)
epi_lines1 /= | np.linalg.norm(epi_lines1[:, :2], axis=1) | numpy.linalg.norm |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# sunpos.py is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Authors/Copyright(2012-2018):
# -<NAME> (<EMAIL>)
# -<NAME> (<EMAIL>)
# -<NAME> (<EMAIL>)
'''
sunpos.py is a library for calculation of the sun position, based on
an algorithm from the Astronomical Alamanac. This algorithm was
compared in Michalsky (1988a,1988b) with other popular approximate
formulae and was found to be the most accurate. It is thus recommended
by the WMO Guide to Meteorological Instruments and Methods of
Observations for practical application.
References
----------
.. [1] United States Naval Observatory, 1993: The Astronomical Almanac,
Nautical Almanac Office, Washington DC.
.. [2] <NAME>., 1988a: The Astronomical Almanac’s algorithm for
approximate solar position (1950–2050).
.. [3] <NAME>., 1988b: Errata. The astronomical almanac’s algorithm
for approximate solar position (1950–2050).
.. [4] World Meteorological Organization, 2014: Guide to Meteorological
Instruments and Methods of Observation. Geneva, Switzerland, World
Meteorological Organization, 1128p. (WMO-No.8, 2014).
'''
import numpy as np
from numpy import sin, cos
from numpy import arcsin as asin, arccos as acos, arctan2 as atan2
from numpy import deg2rad, rad2deg
import datetime, pytz
#: Default epoch used for sun position calculation
EPOCH_J2000_0 = np.datetime64("2000-01-01T12:00:00")
#: UNIX standard epoch
EPOCH_UNIX = np.datetime64("1970-01-01T00:00:00")
def sincos(x):
'''
Evaluate sin/cos simultaneously.
Parameters
----------
x: float or ndarray of float
Angle in radians.
Returns
-------
sin : float or ndarray of float
the sine of x
cos : float or ndarray of float
the cosine of x
'''
return sin(x),cos(x)
def datetime2julday(dt, epoch=EPOCH_J2000_0):
'''
Convert datetime to Julian day number.
Parameters
---------
Returns
-------
jd: double or ndarray of double
The
'''
jd = (dt-epoch)/np.timedelta64(1,"D")
return jd
def julday2datetime(jd,epoch=EPOCH_J2000_0):
'''
Convert Julian day to datetime.
Parameters
----------
jd: double or ndarray of double:
The Julian day, relative to the epoch
epoch: datetime64
The epoch used as reference
Returns
-------
dt: datetime64 or ndarray of datetime64
The datetime64
'''
return epoch + np.timedelta64(1,'D')*jd
def julday2gmst(jd):
'''
Convert Julian day to Greenwich mean sideral time.
Parameters
----------
jd: double or ndarray of double
Returns
-------
gmst : double or ndarray of double
The Greenwich mean sideral time [in hours].
Examples
--------
>>> import numpy as np, solpos
>>> dt = np.datetime64('2012-07-01T12:00')
>>> jd = sunpos.datetime2julday(dt)
>>> gmst = sunpos.julday2gmst(jd)
'''
hh = np.remainder(jd-0.5,1.0)*24.0
gmst = 6.697375 + 0.0657098242 *jd + hh
return np.remainder(gmst,24.0)
def mean_longitude(jd):
'''
Mean solar longitude.
Parameters
----------
jd(double): the Julian day.
Returns
-------
mnlon: double or ndarray
The mean solar longitude (in degrees).
'''
mnlon = 280.460 + 0.9856474*jd;
return np.remainder(mnlon,360.0)
def mean_anomaly(jd):
'''
Mean solar anomaly.
Args:
jd(double): the Julian day.
Returns:
double: the mean solar anomaly [in degrees].
'''
mnanom = 357.528 + 0.9856003*jd
return np.remainder(mnanom,360.0)
def ecliptic_longitude(jd):
'''
Calculate the ecliptic longitude of the sun.
Parameters
----------
jd : double or ndarray
The Julian day.
Returns
------
eclon : double or ndarray
The ecliptic longitude [in degrees].
'''
eclon = 280.460 + 0.9856474*jd
mnanom = 357.528 + 0.9856003*jd
mnanom = np.deg2rad(mnanom)
eclon += 1.915*sin(mnanom)+ 0.020*sin(2.0*mnanom)
return np.remainder(eclon,360.0)
def oblq_ecliptic(jd):
'''
Get obliquity of ecliptic.
Parameters
----------
jd : double or ndarray
The Julian day.
Returns
-------
ep: double or ndarray
The obliquity of ecliptic [in degrees].
'''
oblqec = 23.439 - 0.0000004*jd;
return oblqec
def celestial_coords(jd):
'''
Get celestial coordinates of sun.
Parameters
----------
jd : double or ndarray
The Julian day.
Returns
-------
(dec,ra): tuple of doubles or ndarrays
The declination/right ascension of the sun [in radians].
'''
# get ecliptic longitude and obliquity of ecliptic
# and convert to radians
eclon = deg2rad(ecliptic_longitude(jd))
oblqec = deg2rad(oblq_ecliptic(jd))
# get trig. functions
(sin_eclon,cos_eclon) = sincos(eclon)
(sin_oblqec,cos_oblqec) = sincos(oblqec)
# Calculate declination
dec = asin(sin_oblqec*sin_eclon)
# Calculate right ascension
num = cos_oblqec*sin_eclon
den = cos_eclon
ra = atan2(num,den)
ra = np.remainder(ra,2.0*np.pi)
return (dec,ra)
def zenith_azimuth(jd, lat, lon):
'''
Get solar zenith/azimuth angle for a specific geographic location.
Parameters
----------
jd: double or array_like
The Julian day
lat: double or array_like
The latitude [in degrees]
lon: double or array_like
The longitude [in degrees]
Returns
-------
tuple of zenith/azimuth angle [in degrees].
'''
# Get celestial coordinates and
# Greenwich mean sideral time
(dec,ra) = celestial_coords(jd)
(sin_dec,cos_dec) = sincos(dec)
gmst = julday2gmst(jd)
# Calculate Greenwich hour angle, trig. funcs.
gha = deg2rad((gmst*15.0))-ra
(sin_gha,cos_gha) = sincos(gha)
# Calc. trig functions of lat/lon
(sin_lat,cos_lat) = sincos(deg2rad(lat))
(sin_lon,cos_lon) = sincos(deg2rad(lon))
# Calculate trig. functions of hour angle
sin_ha = sin_gha*cos_lon + cos_gha*sin_lon
cos_ha = cos_gha*cos_lon - sin_gha*sin_lon
# Calculate cos(sun zenith)
mu0 = sin_dec*sin_lat + cos_dec*cos_lat*cos_ha
# Calculate azimuth
#azi = asin(-cos_dec*sin_ha/np.sqrt(1.0-mu0**2))
#if np.isscalar(azi):
# if sin_dec > mu0*sin_lat:
# if azi<0.0:
# azi += 2.0*pi
# else:
# azi = pi-azi
#else:
# i = sin_dec<=mu0*sin_lat
# azi[i]=pi-azi[i]
# i = np.logical_and(azi<0.0,np.logical_not(i))
# azi[i]+=2.0*pi
sin_azi = -cos_dec*sin_ha ## skip divide by cos_el
cos_azi = (sin_dec-mu0*sin_lat)/cos_lat
azi = atan2(sin_azi,cos_azi)
azi = np.remainder(azi,2.0*np.pi)
return (np.rad2deg(acos(mu0)),np.rad2deg(azi))
def earth_sun_distance(jd):
'''
Calculate the sun-earth distance
Args:
jd(double): the Julian day.
Returns:
double the Earth-sun distance [in AE].
'''
g = deg2rad(mean_anomaly(jd))
esd = 1.00014-0.01671*cos(g)+0.00014*cos(2.0*g)
return esd
def hour_angle(jd,lon):
'''
Calculate the sun hour angle.
Args:
jd(double): the Julian day.
Returns:
double the hour angle [in degree].
'''
gmst = julday2gmst(jd)
(dec,ra) = celestial_coords(jd)
ha = np.remainder(gmst*15.0+lon-rad2deg(ra),360.0)-180.0
return ha
def noon(jd,lon):
'''
Calculate the time of local noon.
Args:
jd(double): date given as Julian day.
lon(double): the longitude of the location [in degrees].
Returns:
double: the date and time of local noon.
'''
# convergence limit, gives msec resolution
eps = 1.0e-8
# first guess based on longitude
noon = np.double(np.round(jd))
noon = noon-lon/360.0
# iterate noon till convergence
for i in np.arange(0,10):
prev = noon
ha = hour_angle(noon,lon)
noon = noon-ha/360.0
if np.fabs(prev-noon)<eps: break
return noon
def sunrise(jd,lat,lon,mu0=np.cos((90.0+34.0/60.0)*np.pi/180.0)):
'''
Calculate the time of sunrise.
Args:
jd(double): date given as Julian day.
lat(double): the latitude of the location [in degrees].
lon(double): the longitude of the location [in degrees].
Returns:
double: the date and time of sunrise.
'''
# get noon time
jd_noon = noon(jd,lon)
# get min/max mu0
(sin_lat,cos_lat) = sincos(np.pi/180.0*lat)
(dec,ra_noon) = celestial_coords(jd_noon)
(sin_dec,cos_dec) = sincos(dec)
# Check if we do have a sunset ...
mu0_min = sin_dec*sin_lat-cos_dec*cos_lat
mu0_max = sin_dec*sin_lat+cos_dec*cos_lat
if mu0_max<mu0 or mu0_min>mu0: return None
# Iteratively adjust hour angle at sunset/sunset time
dra = 0.0
for i in np.arange(0,5):
# Calculate hour angle at sunset
cos_ha = (mu0-(sin_dec*sin_lat))/(cos_dec*cos_lat)
ha = -np.arccos(cos_ha)*12.0/np.pi
# relation: dha = (1.0+0.0657098242/24.0)*delta - dra
delta = (ha+dra)/(1.0+0.0657098242/24.0)
(dec,ra) = celestial_coords(jd_noon+delta/24.0)
(sin_dec,cos_dec) = sincos(dec)
dra = (ra-ra_noon)*12.0/np.pi
return jd_noon+delta/24.0
def sunset(jd,lat,lon,mu0=np.cos((90.0+34.0/60.0)*np.pi/180.0)):
'''
Calculate the time of sunset.
Args:
jd(double): the date given as Julian day.
lat(double): the latitude of the location [in degrees].
lon(double): the longitude of the location [in degrees].
Returns:
double: the date and time of sunset.
'''
# get noon time
jd_noon = noon(jd,lon)
# get min/max mu0
(sin_lat,cos_lat) = sincos(np.pi/180.0*lat)
(dec,ra_noon) = celestial_coords(jd_noon)
(sin_dec,cos_dec) = sincos(dec)
# Check if we do have a sunset ...
mu0_min = sin_dec*sin_lat-cos_dec*cos_lat
mu0_max = sin_dec*sin_lat+cos_dec*cos_lat
if mu0_max<mu0 or mu0_min>mu0: return None
# Iteratively adjust hour angle at sunset/sunset time
dra = 0.0
for i in | np.arange(0,5) | numpy.arange |
import cv2
import sys
import math
import numpy as np
ROI_THRESHOLD=[10, 100, 200]
def flatten(img):
r, g, b = cv2.split(img)
r_filter = (r == np.maximum(np.maximum(r, g), b)) & (r >= 120) & (g < 150) & (b < 150)
g_filter = (g == np.maximum(np.maximum(r, g), b)) & (g >= 120) & (r < 150) & (b < 150)
b_filter = (b == np.maximum(np.maximum(r, g), b)) & (b >= 120) & (r < 150) & (g < 150)
y_filter = ((r >= 128) & (g >= 128) & (b < 100))
r[y_filter], g[y_filter] = 255, 255
b[np.invert(y_filter)] = 0
b[b_filter], b[np.invert(b_filter)] = 255, 0
r[r_filter], r[np.invert(r_filter)] = 255, 0
g[g_filter], g[np.invert(g_filter)] = 255, 0
flattened = cv2.merge((r, g, b))
return flattened
def _mask(img):
ga = cv2.GaussianBlur(img, (5,5), 0)
rgb = flatten(img)
b, g, r = cv2.split(rgb)
mask = cv2.threshold(r, 200, 255, cv2.THRESH_BINARY)[1]
blur = cv2.blur(mask, (5,5))
mask = cv2.threshold(blur, 127, 255, cv2.THRESH_BINARY)[1]
# cv2.imshow("mask", mask)
return mask
def r_mask(img):
color_low = np.array([10, 10, 120])
color_high =np.array([70, 60, 200])
ga = cv2.GaussianBlur(img, (5,5), 0)
mask = cv2.inRange(ga, color_low, color_high)
blur = cv2.blur(mask, (5,5))
mask = cv2.threshold(blur, 127, 255, cv2.THRESH_BINARY)[1]
return mask
def draw_box(img, locs):
# print("draw box locs:", locs)
max_x = locs[0][0]
max_y = locs[0][1]
min_x = locs[1][0]
min_y = locs[1][1]
if max_x < 0 or min_x < 0 or max_y < 0 or min_y < 0:
return
img = cv2.rectangle(img, (max_x, max_y), (min_x, min_y), (0, 255, 0), 1)
cv2.imshow("box", img)
# cv2.waitKey(1)
def get_rectangle_locs(contour):
h, w, l = contour.shape
locs = contour.reshape((h, l))
x_locs = locs[0:h, 0]
y_locs = locs[0:h, 1]
max_x = np.max(x_locs)
max_y = np.max(y_locs)
min_x = np.min(x_locs)
min_y = np.min(y_locs)
return np.array([[max_x, max_y], [min_x, min_y]])
def locs_distance(loc1, loc2):
d = loc1 - loc2
d = d * d
d = math.sqrt(np.sum(d))
return d
def locs_filter(mask, locs):
h, w = mask.shape[:2]
max_x = locs[0]
max_y = locs[1]
min_x = locs[2]
min_y = locs[3]
xd = locs[0] - locs[2]
yd = locs[1] - locs[3]
# print("height/3:", h/3, "weight/3:", h/3)
# print("xd:", xd, "yd:", yd)
if xd > h*2/3 or xd > w/3 or xd < 6 or yd < 6:
return [-1, -1, -1, -1]
ratio = 0.2
xd = max_x - min_x
yd = max_y - min_y
max_x = min(max_x + int(ratio*xd), h)
if min_x - int(ratio*xd) > 0:
min_x = min_x - int(ratio*xd)
else:
min_x = 0
max_y = min(max_y + int(ratio*yd), w)
if min_y - int(ratio*yd) > 0:
min_y = min_y - int(ratio*yd)
else:
min_y = 0
return locs
def detect(img, sen = 0):
mask = _mask(img)
binary, contours, hierarchy = cv2.findContours(mask, cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
sum = 0
if len(contours) < 1:
return False, mask
for i in range(len(contours)):
sum += cv2.contourArea(contours[i])
nums = | np.sum(mask != 0) | numpy.sum |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.2'
# jupytext_version: 1.2.3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# # Lucas Asset Pricing Model
#
# ## A notebook by [<NAME>](http://www.econ2.jhu.edu/people/ccarroll/) and [<NAME>](https://mv77.github.io/)
# ### Inspired by its [Quantecon counterpart](https://julia.quantecon.org/multi_agent_models/lucas_model.html)
#
# This notebook presents simple computational tools to solve Lucas' asset-pricing model when the logarithm of the asset's dividend follows an autoregressive process of order 1,
#
# \begin{equation*}
# \ln d_{t+1} = \alpha \ln d_t + \varepsilon_{t+1}, \qquad \varepsilon \sim \mathcal{N}(\mu, \sigma).
# \end{equation*}
#
# A presentation of this model can be found in [Christopher D. Carroll's lecture notes](http://www.econ2.jhu.edu/people/ccarroll/public/lecturenotes/AssetPricing/LucasAssetPrice/).
#
# Those notes [use the Bellman equation to derive](http://www.econ2.jhu.edu/people/ccarroll/public/lecturenotes/AssetPricing/LucasAssetPrice/#pofc) a relationship between the price of the asset in the current period $t$ and the next period $t+1$:
#
# \begin{equation*}
# P_{t} =
# \overbrace{\left(\frac{1}{1+\vartheta}\right)}
# ^{\beta}\mathbb{E}_{t}\left[ \frac{u^{\prime}(d_{t+1})}{u^{\prime}(d_t)} (P_{t+1} + d_{t+1}) \right]
# \end{equation*}
#
# The equilibrium pricing equation is a relationship between the price and the dividend (a "pricing kernel") $P^{*}(d)$ such that, if everyone _believes_ that to be the pricing kernel, everyone's Euler equation will be satisfied:
#
# \begin{equation*}
# P^*(d_t) = \left(\frac{1}{1+\vartheta}\right)\mathbb{E}_{t}\left[ \frac{u^{\prime}(d_{t+1})}{u^{\prime}(d_t)} (P^*(d_{t+1}) + d_{t+1}) \right]
# \end{equation*}
#
# As noted in the handout, there are some special circumstances in which it is possible to solve for $P^{*}$ analytically:
#
# | Shock Process | CRRA | Solution for Pricing Kernel |
# | --- | --- | --- |
# | bounded | 1 (log) | $P^*(d) = \frac{d}{\vartheta}$ |
# | lognormal, mean 1 | $\rho$ | $P^*(d) = d_t^\rho\ e^{\rho(\rho-1)\sigma^2/2}\frac{\beta}{1-\beta}$ |
#
# However, under less special circumstances, the only way to obtain the pricing function $P^{*}$ is by solving for it numerically, as outlined below.
# %% [markdown]
# # Finding the equilibrium pricing function.
#
# We know that the equilibrium pricing function must satisfy the equation above. Let's define an operator that allows us to evaluate whether any candidate pricing function satisfies this requirement.
#
# Let $T$ be an operator which takes as argument a function and returns another function (these are usually called [functionals or higher-order functions](https://en.wikipedia.org/wiki/Functional_(mathematics))). For some function $f$, denote with $T[f]$ the function that results from applying $T$ to $f$. Then, for any real number $x$, $T[f](x)$ will be the real number that one obtains when the function $T[f]$ is given $x$ as an input.
#
# We define our particular operator as follows. For any function $g:\mathbb{R}\rightarrow\mathbb{R}$, $T[g]$ is obtained as
#
# \begin{equation*}
# \forall~d_t \in \mathbb{R},\,\,\,\, T[g](d_t) := \beta~\mathbb{E}_{t}\left[ \frac{u^{\prime}(d_{t+1})}{u^{\prime}(d_t)} (f(d_{t+1}) + d_{t+1}) \right].
# \end{equation*}
#
#
# We can use $T$ to re-express our pricing equation. If $P^*(\bullet)$ is our equilibrium pricing funtion, it must satisfy
#
# \begin{equation*}
# \forall~d_t,\,\,\,\,P^*(d_t) = \beta\mathbb{E}_{t}\left[ \frac{u^{\prime}(d_{t+1})}{u^{\prime}(d_t)} (P^*(d_{t+1}) + d_{t+1}) \right] = T[P^*](d_t).
# \end{equation*}
# or, expressed differently,
# \begin{equation*}
# P^* = T[P^*].
# \end{equation*}
#
# Our equilibrium pricing function is therefore a *fixed point* of the operator $T$.
#
# It turns out that $T$ is a [contraction mapping](https://en.wikipedia.org/wiki/Contraction_mapping). This is useful because it implies, through [Banach's fixed-point theorem](https://en.wikipedia.org/wiki/Contraction_mapping), that:
# - $T$ has **exactly one** fixed point.
# - Starting from an arbitrary function $f$, the sequence $\{T^n[f]\}_{n=1}^{\infty}$ converges to such fixed point.
#
# For our purposes, this translates to:
# - Our equilibrium pricing function not only exists, but it is unique.
# - We can get arbitrarily close to the equilibrium pricing function by making some initial guess $f$ and applying the operator $T$ to it repeatedly.
#
# The code below creates a representation of our model and implements a solution routine to find $P^*$. The main components of this routine are:
#
# - `priceOnePeriod`: this is operator $T$ from above. It takes a function $f$, computes $\beta~\mathbb{E}_{t}\left[ \frac{u^{\prime}(d_{t+1})}{u^{\prime}(d_t)} (f(d_{t+1}) + d_{t+1}) \right]$ for a grid of $d_t$ values, and uses the result to construct a linear interpolator that approximates $T[f]$.
#
# - `solve`: this is our iterative solution procedure. It generates an initial guess $f$ and applies `priceOnePeriod` to it iteratively. At each application, it constructs a measure of how much the candidate pricing function changed. Once changes between successive iterations are small enough, it declares that the solution has converged.
# %% [markdown]
# # A computational representation of the problem and its solution.
# %% [markdown]
# `Uninteresting setup:`
# %% Preamble {"code_folding": [0], "jupyter": {"source_hidden": true}, "tags": []}
# Setup
import numpy as np
import matplotlib.pyplot as plt
from copy import copy
from HARK.utilities import CRRAutilityP
from HARK.distribution import Normal, calc_expectation
from HARK.interpolation import LinearInterp, ConstantFunction
# %% Definitions {"code_folding": [0]}
# A python class representing log-AR1 dividend processes.
class DivProcess:
def __init__(self, α, σ, μ = 0.0, nApprox = 7):
self.α = α
self.σ = σ
self.μ = μ
self.nApprox = nApprox
# Create a discrete approximation to the random shock
self.ShkAppDstn = Normal(mu = μ, sigma = σ).approx(N = nApprox)
def getLogdGrid(self, n = 100):
'''
A method for creating a reasonable grid for log-dividends.
'''
uncond_sd = self.σ / np.sqrt(1 - self.α**2)
uncond_mean = self.μ/(1-self.α)
logDGrid = np.linspace(-5*uncond_sd, 5*uncond_sd, n) + uncond_mean
return(logDGrid)
# A class representing economies with Lucas' trees.
class LucasEconomy:
'''
A representation of an economy in which there are Lucas trees
whose dividends' logarithm follows an AR1 process.
'''
def __init__(self, CRRA, DiscFac, DivProcess):
self.CRRA = CRRA
self.DiscFac = DiscFac
self.DivProcess = DivProcess
self.uP = lambda c: CRRAutilityP(c, self.CRRA)
def priceOnePeriod(self, Pfunc_next, logDGrid):
# Create a function that, given current dividends
# and the value of next period's shock, returns
# the discounted value derived from the asset next period.
def discounted_value(shock, log_d_now):
# Find dividends
d_now = np.exp(log_d_now)
log_d_next = self.DivProcess.α * log_d_now + shock
d_next = | np.exp(log_d_next) | numpy.exp |
import logging
import time
import random
import pickle
import os
from sys import maxsize
from collections import OrderedDict
import torch
from tensorboardX import SummaryWriter
from baselines.common.schedules import LinearSchedule
import numpy as np
from copy import deepcopy
from abp.utils import clear_summary_path
from abp.models import DQNModel
# TODO: Generalize it
from abp.examples.pysc2.tug_of_war.models_mb.transition_model import TransModel
from abp.utils.search_tree import Node
logger = logging.getLogger('root')
use_cuda = torch.cuda.is_available()
FloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if use_cuda else torch.LongTensor
IntTensor = torch.cuda.IntTensor if use_cuda else torch.IntTensor
ByteTensor = torch.cuda.ByteTensor if use_cuda else torch.ByteTensor
Tensor = FloatTensor
building_types = {
'Marine': 0,
'Viking': 1,
'Colossus': 2,
'Pylon': 3
}
class MBTSAdaptive(object):
"""Adaptive which uses the Model base Tree search algorithm"""
def __init__(self, name, state_length, network_config, reinforce_config, models_path, env, player = 1):
super(MBTSAdaptive, self).__init__()
self.name = name
#self.choices = choices
self.network_config = network_config
self.reinforce_config = reinforce_config
self.explanation = False
self.state_length = state_length\
# Global
self.steps = 0
self.episode = 0
self.transition_model_HP = TransModel(state_length, 2)
self.transition_model_unit = TransModel(state_length, 6)
self.value_model = DQNModel(self.name + "_eval", self.network_config, use_cuda)
self.load_model(models_path)
self.env = env
self.player = player
self.index_hp = np.array([4, 9])
self.index_units = np.array(range(11, 17))
self.look_forward_step = 1
# Generalize it
self.load_model(models_path)
self.eval_mode()
self.normalization_array = np.array([30, 30, 30, 30, 2000,
30, 30, 30, 30, 2000,
1500, 60, 60, 60, 60, 60, 60])
self.reset()
def reward_func(self, state, next_states):
# print("reward func")
# print(state.shape, next_states.shape)
# for n_s in next_states:
# print("===================================")
# print(state.tolist())
# print(n_s.tolist())
# # print(state, next_states)
# print(next_states[:, self.index_hp] > 2000)
next_states[next_states > 2000] = 2000
rewards = (next_states - state.reshape(-1,))[:, self.index_hp]
rewards[rewards > 0] = 1
rewards[:, 1] *= -1
rewards = np.sum(rewards, axis = 1)
# print(rewards)
# input()
return rewards
def eval_mode(self):
self.value_model.eval_mode()
self.transition_model_HP.eval_mode()
self.transition_model_unit.eval_mode()
def load_model(self, models_path):
HP_state_dict = torch.load(models_path + 'transition_model_HP.pt')
unit_state_dict = torch.load(models_path + 'transition_model_unit.pt')
# print(HP_state_dict.model)
new_HP_state_dict = OrderedDict()
new_unit_state_dict = OrderedDict()
for old_key_value_hp, old_key_value_unit in zip(list(HP_state_dict.items()), list(unit_state_dict.items())):
new_key_hp, new_value_hp = "module." + old_key_value_hp[0], old_key_value_hp[1]
new_key_unit, new_value_unit = "module." + old_key_value_unit[0], old_key_value_unit[1]
# print(new_key_hp, new_key_unit)
# print(old_key_hp, old_key_unit)
new_HP_state_dict[new_key_hp] = new_value_hp
new_unit_state_dict[new_key_unit] = new_value_unit
self.transition_model_HP.load_weight(new_HP_state_dict)
# TODO: get unit transition model
self.transition_model_unit.load_weight(new_unit_state_dict)
self.value_model.load_weight(torch.load(models_path + 'value_model.pt'))
def predict(self, state, minerals_enemy):
# Get actions of self
root = Node('root', state)
parents = [root]
if self.player == 1:
fifo_self = self.env.fifo_player_1
fifo_enemy = self.env.fifo_player_2
else:
fifo_self = self.env.fifo_player_2
fifo_enemy = self.env.fifo_player_1
leaf_node = []
leaf_node_states = []
leaf_fifo = []
for i in range(self.look_forward_step):
for n in parents:
next_states, next_fifo_self, length_enemy_action = self.expand_node(n, minerals_enemy, fifo_self, fifo_enemy)
if i == (self.look_forward_step - 1):
next_states = self.same_self_action_block(next_states, length_enemy_action)
children = self.same_self_action_block(np.array(n.children), length_enemy_action)
leaf_node_states.append(next_states)
leaf_fifo.append(next_fifo_self)
leaf_node.append(children)
# print(len(leaf_node_states[0]), len(leaf_fifo[0]), len(leaf_node[0]))
# input()
if self.look_forward_step == 0:
self.rollout_root([parents[0].state], parents, [fifo_self])
else:
for lns, ln, ff in zip(leaf_node_states, leaf_node, leaf_fifo):
self.rollout(lns, ln, ff)
# print(root.best_reward)
# action, _ = self.value_model.predict(state, 0, False)
# print(root.best_action)
return root.best_action
def same_self_action_block(self, states_or_nodes, length_enemy_action):
return np.array( | np.split(states_or_nodes, length_enemy_action) | numpy.split |
import matplotlib.pyplot as plt
from matplotlib.patches import RegularPolygon, Polygon
import numpy as np
from typing import NamedTuple
import time
import math
import libhex, hex_pieces
## Settings
SIZE_MAP = 4 # Map size
# Hex_Grid class. Contains hex position, and grid status.
# Grid val:
# 0 - not occupied
# 1 - occupied
class Hex_Grid(NamedTuple):
hex: libhex.Hex
val: int
# Generate hex map
def Hex_generate_map(map_size):
Map = []
for x in range(-map_size, map_size+1):
for y in range(max(-map_size, -x-map_size), min(+map_size, -x+map_size)+1):
z = -x-y
this_grid = Hex_Grid(libhex.Hex(x, y, z), 0)
Map.append(this_grid)
return Map
# Toggle grid value if found
def Hex_find_and_set_grid_conditional(Map, q, r, s, new_val):
val_set_success = False
found_match = False
for i in range(len(Map)):
grid = Map[i]
hex = grid.hex
if hex.q == q and hex.r == r and hex.s == s:
found_match = True
if grid.val != new_val:
val_set_success = True
Map[i] = grid._replace(val=new_val)
break
return found_match, val_set_success
def Hex_reachable(Map, hex_start, movement):
t0 = time.time()
# add start to visited
fringes = [] # array of arrays of hexes
Hex_find_and_set_grid_conditional(Map, hex_start.q, hex_start.r, hex_start.s, 1)
fringes.append([hex_start])
cavity_num = 1 # hex start is the first cavity
for k in range(1, movement):
fringes.append([])
for hex in fringes[k-1]:
for dir in range(6):
hex_neighbor = libhex.hex_neighbor(hex, dir)
found_match, val_set_success = Hex_find_and_set_grid_conditional(Map, hex_neighbor.q, hex_neighbor.r, hex_neighbor.s, 1)
if found_match and val_set_success:
# if neighbor not in visited and not blocked:
#add neighbor to visited
fringes[k].append(hex_neighbor)
cavity_num = cavity_num + 1
# print("Time elapsed - Hex_reachable:", time.time()-t0)
return fringes, cavity_num
def Hex_check_if_all_remaining_hex_good(Map):
t0 = time.time()
Map_copy = Map.copy()
for grid in Map_copy:
if grid.val == 0:
fringes, cavity_num = Hex_reachable(Map_copy, grid.hex, 30)
# print(fringes, cavity_num)
if cavity_num % 5 != 0:
return False
# print("Time elapsed - Hex_check_if_all_remaininng_hex_good:", time.time()-t0)
return True
_hex_global_ctr = 0
def Hex_verify_candi_with_level(Map_copy, Puzzle_pieces, Candi):
if Candi:
level = len(Candi)
else:
level = 0
# check if next level has a solution (recursively)
for j in range(3):
for grid in Map_copy:
# skip grids that are occupied
if (grid.val == 1):
continue
q_offset = grid.hex.q
r_offset = grid.hex.r
s_offset = grid.hex.s
Candi_next = [level, j, q_offset, r_offset, s_offset]
result, Map_out, Candi_out = Hex_verify_candi_with_next_level(Map_copy, Puzzle_pieces, Candi, Candi_next)
if result:
if len(Candi_out) != hex_pieces.NUM_OF_PIECES:
# if the current solution is bad with impossible vacant grids to fill, skip and continue
if not Hex_check_if_all_remaining_hex_good(Map_out):
continue
global _hex_global_ctr
_hex_global_ctr = _hex_global_ctr + 1
if _hex_global_ctr % 20 == 0:
# print(_hex_global_ctr)
Hex_plot_current_candi(Hex_Map_Original_Copy, pin_hex, Candi_out)
Hex_verify_candi_with_level(Map_out, Puzzle_pieces, Candi_out)
else:
# Found a solution! Return it
print("Found a solution:", Candi_out)
Hex_plot_current_candi(Hex_Map_Original_Copy, pin_hex, Candi_out)
return Candi_out
# Verify whether a new piece (defined by Candi_next) can fit in a map status
# Return success/fail with updated map and candi
def Hex_verify_candi_with_next_level(Map_copy, Puzzle_pieces, Candi_old, Candi_next):
t0 = time.time()
Map_test = Map_copy.copy()
this_piece = Puzzle_pieces[Candi_next[0]]
this_piece_form = this_piece.form(Candi_next[1])
q_offset = Candi_next[2]
r_offset = Candi_next[3]
s_offset = Candi_next[4]
for k in range(5):
this_hex = this_piece_form[k]
q_in = this_hex.q + q_offset
r_in = this_hex.r + r_offset
s_in = this_hex.s + s_offset
found_match, success = Hex_find_and_set_grid_conditional(Map_test, q_in, r_in, s_in, 1)
# print(found_match, success)
if not found_match or not success:
return False, Map_copy, Candi_old
# if there is a fit
if Candi_old:
Candi_new = Candi_old.copy()
Candi_new.append(Candi_next)
else:
Candi_new = [Candi_next]
# print("Time elapsed - Hex_verify_candi_with_next_level:", time.time()-t0)
return True, Map_test, Candi_new
def Hex_plot_cube_edges(x, y):
plt.plot(x + 0.75*np.array([0, 0]), y + 0.75*np.array([0, -1]), 'k', alpha=0.3)
plt.plot(x + 0.75*np.array([0, math.sqrt(3)/2]), y + 0.75*np.array([0, 0.5]), 'k', alpha=0.3)
plt.plot(x + 0.75*np.array([0, -math.sqrt(3)/2]), y + 0.75*np.array([0, 0.5]), 'k', alpha=0.3)
def Hex_plot_current_candi(Hex_Map, pin_hex, Sol_candi):
t0 = time.time()
if not Sol_candi:
return
flag_show_number = False
plt.close()
fig, ax = plt.subplots(1)
ax.set_aspect('equal')
cube_array = 0.75 * np.array([[-math.sqrt(3)/2, 0], [-math.sqrt(3)/2, 0.5], [-math.sqrt(3)/4, 0.75], [0, 0.5],
[math.sqrt(3)/4, 0.75], [math.sqrt(3)/2, 0.5], [math.sqrt(3)/2, 0], [math.sqrt(3)/4, -0.25],
[math.sqrt(3)/4, -0.75], [0, -1], [-math.sqrt(3)/4, -0.75], [-math.sqrt(3)/4, -0.25]])
for grid in Hex_Map:
hex = grid.hex
x = hex.q
y = 2. * np.sin(np.radians(60)) * (hex.r - hex.s) /3.
cube = Polygon([x, y] + cube_array, closed=True,
facecolor='orange', alpha=0.2, edgecolor='None')
ax.add_patch(cube)
# Plot pin hex
x = pin_hex.q
y = 2. * np.sin( | np.radians(60) | numpy.radians |
import collections
import matplotlib.pyplot as plt
import numpy as np
from environment.random_walk_1000_states import RandomWalk1000
def constant_factory(n):
probability_list = np.ones(n)
return lambda: probability_list / np.sum(probability_list)
class StateAggregation:
def __init__(self, min_state, max_state, aggregation_size):
self.min_state = min_state
self.max_state = max_state
self.aggregation_size = aggregation_size
self.aggregation_num = int((max_state - min_state) / aggregation_size) + 1
if (max_state - min_state) % aggregation_size == 0:
self.aggregation_num -= 1
self.weight = np.zeros(self.aggregation_num)
def __call__(self, x):
current_position = int(x / self.aggregation_size)
return self.weight[current_position]
def derivation(self, x):
derivative = np.zeros(self.aggregation_num)
current_position = int(x / self.aggregation_size)
derivative[current_position] = 1.0
return derivative
class Agent:
def __init__(self, env, n, min_state, max_state, aggregation_size):
self.env = env
self.n = n
self.value_state = StateAggregation(min_state, max_state, aggregation_size)
self.policies = collections.defaultdict(constant_factory(env.action_space.n))
def select_action(self, state):
probability_distribution = self.policies[state]
action = np.random.choice(self.env.action_space.n, 1, p=probability_distribution)
return action[0]
def SGTD_n_app(self, number_of_episodes, learning_rate, state_num=1000, gamma=1.):
mu = np.zeros(state_num)
for _ in range(number_of_episodes):
n_queue = collections.deque()
state = self.env.reset()
action = self.select_action(state)
mu[state] += 1.0
new_state, reward, is_done, _ = self.env.step(action)
while True:
mu[new_state] += 1.0
n_queue.append([new_state, reward, is_done])
if is_done:
while len(n_queue) != 0:
state_updated, _, _ = n_queue.popleft()
if state_updated is None:
break
gamma_temp = 1.0
g_value = 0.0
for iter_n in n_queue:
g_value += gamma_temp * iter_n[1]
gamma_temp *= gamma
if new_state is not None:
self.value_state.weight += learning_rate * (reward +
gamma * self.value_state(new_state) -
self.value_state(state_updated)) * delta_value
else:
self.value_state.weight += learning_rate * (
reward - self.value_state(state_updated)) * delta_value
break
else:
if len(n_queue) == self.n + 1:
state_updated, _, _ = n_queue.popleft()
gamma_temp = 1.0
g_value = 0.0
for iter_n in n_queue:
g_value += gamma_temp * iter_n[1]
gamma_temp *= gamma
action_next = self.select_action(new_state)
new_state, reward, is_done, _ = env.step(action_next)
if new_state is not None:
g_value += (reward * gamma_temp + self.value_state(new_state))
else:
g_value += reward * gamma_temp
delta_value = self.value_state.derivation(state_updated)
if new_state is not None:
self.value_state.weight += learning_rate * (reward +
gamma * self.value_state(new_state) -
self.value_state(state_updated)) * delta_value
else:
self.value_state.weight += learning_rate * (
reward - self.value_state(state_updated)) * delta_value
else:
action_next = self.select_action(new_state)
new_state, reward, is_done, _ = env.step(action_next)
return mu
if __name__ == '__main__':
env = RandomWalk1000()
agent = Agent(env, 0, 0, 1000, 100)
mu = agent.SGTD_n_app(10000, 1e-2, gamma=0.99)
mu = mu / | np.sum(mu) | numpy.sum |
# Import both skimage and cv
from skimage import transform as tf
from skimage import io
import cv2
import numpy as np
from scipy import optimize
import matplotlib.pyplot as plt
# Could use either skimage or cv to read the image
# img = cv2.imread('label.png')
img = cv2.imread('./picture/mask.jpg')
img = cv2.resize(img, (img.shape[1]//2, img.shape[0]//2))
gray_image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray_image, 0, 255, cv2.THRESH_BINARY)
edges = cv2.Canny(thresh, 0, 200)
cv2.imshow("edges", edges)
cv2.waitKey(0)
# Find largest contour (should be the label)
_, contours, hierarchy = cv2.findContours(edges, 0, 1)
areas = [cv2.contourArea(c) for c in contours]
max_index = np.argmax(areas)
cnt = contours[max_index]
# Create a mask of the label
mask = | np.zeros(img.shape, np.uint8) | numpy.zeros |
"""Test gates defined in `qibo/core/gates.py`."""
import pytest
import numpy as np
from qibo import gates, K
from qibo.config import raise_error
from qibo.tests.utils import random_state, random_density_matrix
def apply_gates(gatelist, nqubits=None, initial_state=None):
if initial_state is None:
state = K.qnp.zeros(2 ** nqubits)
state[0] = 1
elif isinstance(initial_state, np.ndarray):
state = np.copy(initial_state)
if nqubits is None:
nqubits = int(np.log2(len(state)))
else: # pragma: no cover
assert nqubits == int(np.log2(len(state)))
else: # pragma: no cover
raise_error(TypeError, "Invalid initial state type {}."
"".format(type(initial_state)))
state = K.cast(state)
for gate in gatelist:
state = gate(state)
return state
def test__control_unitary(backend):
matrix = K.cast(np.random.random((2, 2)))
gate = gates.Unitary(matrix, 0)
unitary = gate._control_unitary(matrix)
target_unitary = np.eye(4, dtype=K._dtypes.get('DTYPECPX'))
target_unitary[2:, 2:] = K.to_numpy(matrix)
K.assert_allclose(unitary, target_unitary)
with pytest.raises(ValueError):
unitary = gate._control_unitary(np.random.random((16, 16)))
def test_h(backend):
final_state = apply_gates([gates.H(0), gates.H(1)], nqubits=2)
target_state = np.ones_like(final_state) / 2
K.assert_allclose(final_state, target_state)
def test_x(backend):
final_state = apply_gates([gates.X(0)], nqubits=2)
target_state = np.zeros_like(final_state)
target_state[2] = 1.0
K.assert_allclose(final_state, target_state)
def test_y(backend):
final_state = apply_gates([gates.Y(1)], nqubits=2)
target_state = np.zeros_like(final_state)
target_state[1] = 1j
K.assert_allclose(final_state, target_state)
def test_z(backend):
final_state = apply_gates([gates.H(0), gates.H(1), gates.Z(0)], nqubits=2)
target_state = np.ones_like(final_state) / 2.0
target_state[2] *= -1.0
target_state[3] *= -1.0
K.assert_allclose(final_state, target_state)
def test_s(backend):
final_state = apply_gates([gates.H(0), gates.H(1), gates.S(1)], nqubits=2)
target_state = np.array([0.5, 0.5j, 0.5, 0.5j])
K.assert_allclose(final_state, target_state)
def test_sdg(backend):
final_state = apply_gates([gates.H(0), gates.H(1), gates.SDG(1)], nqubits=2)
target_state = np.array([0.5, -0.5j, 0.5, -0.5j])
K.assert_allclose(final_state, target_state)
def test_t(backend):
final_state = apply_gates([gates.H(0), gates.H(1), gates.T(1)], nqubits=2)
target_state = np.array([0.5, (1 + 1j) / np.sqrt(8),
0.5, (1 + 1j) / np.sqrt(8)])
K.assert_allclose(final_state, target_state)
def test_tdg(backend):
final_state = apply_gates([gates.H(0), gates.H(1), gates.TDG(1)], nqubits=2)
target_state = np.array([0.5, (1 - 1j) / np.sqrt(8),
0.5, (1 - 1j) / np.sqrt(8)])
K.assert_allclose(final_state, target_state)
def test_identity(backend):
gatelist = [gates.H(0), gates.H(1), gates.I(0), gates.I(1)]
final_state = apply_gates(gatelist, nqubits=2)
target_state = np.ones_like(final_state) / 2.0
K.assert_allclose(final_state, target_state)
gatelist = [gates.H(0), gates.H(1), gates.I(0, 1)]
final_state = apply_gates(gatelist, nqubits=2)
K.assert_allclose(final_state, target_state)
def test_align(backend):
gate = gates.Align(0, 1)
gatelist = [gates.H(0), gates.H(1), gate]
final_state = apply_gates(gatelist, nqubits=2)
target_state = np.ones_like(final_state) / 2.0
K.assert_allclose(final_state, target_state)
gate_matrix = gate._construct_unitary()
K.assert_allclose(gate_matrix, np.eye(4))
# :class:`qibo.core.cgates.M` is tested seperately in `test_measurement_gate.py`
def test_rx(backend):
theta = 0.1234
final_state = apply_gates([gates.H(0), gates.RX(0, theta=theta)], nqubits=1)
phase = np.exp(1j * theta / 2.0)
gate = np.array([[phase.real, -1j * phase.imag],
[-1j * phase.imag, phase.real]])
target_state = gate.dot(np.ones(2)) / np.sqrt(2)
K.assert_allclose(final_state, target_state)
def test_ry(backend):
theta = 0.1234
final_state = apply_gates([gates.H(0), gates.RY(0, theta=theta)], nqubits=1)
phase = np.exp(1j * theta / 2.0)
gate = np.array([[phase.real, -phase.imag],
[phase.imag, phase.real]])
target_state = gate.dot(np.ones(2)) / np.sqrt(2)
K.assert_allclose(final_state, target_state)
@pytest.mark.parametrize("applyx", [True, False])
def test_rz(backend, applyx):
theta = 0.1234
if applyx:
gatelist = [gates.X(0)]
else:
gatelist = []
gatelist.append(gates.RZ(0, theta))
final_state = apply_gates(gatelist, nqubits=1)
target_state = np.zeros_like(final_state)
p = int(applyx)
target_state[p] = np.exp((2 * p - 1) * 1j * theta / 2.0)
K.assert_allclose(final_state, target_state)
def test_u1(backend):
theta = 0.1234
final_state = apply_gates([gates.X(0), gates.U1(0, theta)], nqubits=1)
target_state = np.zeros_like(final_state)
target_state[1] = np.exp(1j * theta)
K.assert_allclose(final_state, target_state)
def test_u2(backend):
phi = 0.1234
lam = 0.4321
initial_state = random_state(1)
final_state = apply_gates([gates.U2(0, phi, lam)], initial_state=initial_state)
matrix = np.array([[np.exp(-1j * (phi + lam) / 2), -np.exp(-1j * (phi - lam) / 2)],
[np.exp(1j * (phi - lam) / 2), np.exp(1j * (phi + lam) / 2)]])
target_state = matrix.dot(initial_state) / np.sqrt(2)
K.assert_allclose(final_state, target_state)
def test_u3(backend):
theta = 0.1111
phi = 0.1234
lam = 0.4321
initial_state = random_state(1)
final_state = apply_gates([gates.U3(0, theta, phi, lam)],
initial_state=initial_state)
cost, sint = np.cos(theta / 2), np.sin(theta / 2)
ep = np.exp(1j * (phi + lam) / 2)
em = np.exp(1j * (phi - lam) / 2)
matrix = np.array([[ep.conj() * cost, - em.conj() * sint],
[em * sint, ep * cost]])
target_state = matrix.dot(initial_state)
K.assert_allclose(final_state, target_state)
@pytest.mark.parametrize("applyx", [False, True])
def test_cnot(backend, applyx):
if applyx:
gatelist = [gates.X(0)]
else:
gatelist = []
gatelist.append(gates.CNOT(0, 1))
final_state = apply_gates(gatelist, nqubits=2)
target_state = np.zeros_like(final_state)
target_state[3 * int(applyx)] = 1.0
K.assert_allclose(final_state, target_state)
@pytest.mark.parametrize("controlled_by", [False, True])
def test_cz(backend, controlled_by):
initial_state = random_state(2)
matrix = np.eye(4)
matrix[3, 3] = -1
target_state = matrix.dot(initial_state)
if controlled_by:
gate = gates.Z(1).controlled_by(0)
else:
gate = gates.CZ(0, 1)
final_state = apply_gates([gate], initial_state=initial_state)
assert gate.name == "cz"
K.assert_allclose(final_state, target_state)
@pytest.mark.parametrize("name,params",
[("CRX", {"theta": 0.1}),
("CRY", {"theta": 0.2}),
("CRZ", {"theta": 0.3}),
("CU1", {"theta": 0.1}),
("CU2", {"phi": 0.1, "lam": 0.2}),
("CU3", {"theta": 0.1, "phi": 0.2, "lam": 0.3})])
def test_cun(backend, name, params):
initial_state = random_state(2)
gate = getattr(gates, name)(0, 1, **params)
final_state = apply_gates([gate], initial_state=initial_state)
target_state = np.dot(K.to_numpy(gate.matrix), initial_state)
K.assert_allclose(final_state, target_state)
def test_swap(backend):
final_state = apply_gates([gates.X(1), gates.SWAP(0, 1)], nqubits=2)
target_state = np.zeros_like(final_state)
target_state[2] = 1.0
K.assert_allclose(final_state, target_state)
def test_multiple_swap(backend):
gatelist = [gates.X(0), gates.X(2), gates.SWAP(0, 1), gates.SWAP(2, 3)]
final_state = apply_gates(gatelist, nqubits=4)
gatelist = [gates.X(1), gates.X(3)]
target_state = apply_gates(gatelist, nqubits=4)
K.assert_allclose(final_state, target_state)
def test_fsim(backend):
theta = 0.1234
phi = 0.4321
gatelist = [gates.H(0), gates.H(1), gates.fSim(0, 1, theta, phi)]
final_state = apply_gates(gatelist, nqubits=2)
target_state = np.ones_like(K.to_numpy(final_state)) / 2.0
rotation = np.array([[np.cos(theta), -1j * np.sin(theta)],
[-1j * np.sin(theta), np.cos(theta)]])
matrix = np.eye(4, dtype=target_state.dtype)
matrix[1:3, 1:3] = rotation
matrix[3, 3] = np.exp(-1j * phi)
target_state = matrix.dot(target_state)
K.assert_allclose(final_state, target_state)
def test_generalized_fsim(backend):
phi = np.random.random()
rotation = np.random.random((2, 2)) + 1j * np.random.random((2, 2))
gatelist = [gates.H(0), gates.H(1), gates.H(2)]
gatelist.append(gates.GeneralizedfSim(1, 2, rotation, phi))
final_state = apply_gates(gatelist, nqubits=3)
target_state = np.ones_like(K.to_numpy(final_state)) / np.sqrt(8)
matrix = np.eye(4, dtype=target_state.dtype)
matrix[1:3, 1:3] = rotation
matrix[3, 3] = np.exp(-1j * phi)
target_state[:4] = matrix.dot(target_state[:4])
target_state[4:] = matrix.dot(target_state[4:])
K.assert_allclose(final_state, target_state)
def test_generalized_fsim_parameter_setter(backend):
phi = np.random.random()
matrix = np.random.random((2, 2))
gate = gates.GeneralizedfSim(0, 1, matrix, phi)
K.assert_allclose(gate.parameters[0], matrix)
assert gate.parameters[1] == phi
matrix = np.random.random((4, 4))
with pytest.raises(ValueError):
gate = gates.GeneralizedfSim(0, 1, matrix, phi)
@pytest.mark.parametrize("applyx", [False, True])
def test_toffoli(backend, applyx):
if applyx:
gatelist = [gates.X(0), gates.X(1), gates.TOFFOLI(0, 1, 2)]
else:
gatelist = [gates.X(1), gates.TOFFOLI(0, 1, 2)]
final_state = apply_gates(gatelist, nqubits=3)
target_state = np.zeros_like(final_state)
if applyx:
target_state[-1] = 1
else:
target_state[2] = 1
K.assert_allclose(final_state, target_state)
@pytest.mark.parametrize("nqubits", [2, 3])
def test_unitary(backend, nqubits):
initial_state = np.ones(2 ** nqubits) / np.sqrt(2 ** nqubits)
matrix = np.random.random(2 * (2 ** (nqubits - 1),))
target_state = np.kron(np.eye(2), matrix).dot(initial_state)
gatelist = [gates.H(i) for i in range(nqubits)]
gatelist.append(gates.Unitary(matrix, *range(1, nqubits), name="random"))
final_state = apply_gates(gatelist, nqubits=nqubits)
K.assert_allclose(final_state, target_state)
def test_unitary_initialization(backend):
matrix = np.random.random((4, 4))
gate = gates.Unitary(matrix, 0, 1)
K.assert_allclose(gate.parameters, matrix)
matrix = np.random.random((8, 8))
with pytest.raises(ValueError):
gate = gates.Unitary(matrix, 0, 1)
with pytest.raises(TypeError):
gate = gates.Unitary("abc", 0, 1)
def test_unitary_common_gates(backend):
target_state = apply_gates([gates.X(0), gates.H(1)], nqubits=2)
gatelist = [gates.Unitary(np.array([[0, 1], [1, 0]]), 0),
gates.Unitary(np.array([[1, 1], [1, -1]]) / np.sqrt(2), 1)]
final_state = apply_gates(gatelist, nqubits=2)
K.assert_allclose(final_state, target_state)
thetax = 0.1234
thetay = 0.4321
gatelist = [gates.RX(0, theta=thetax), gates.RY(1, theta=thetay),
gates.CNOT(0, 1)]
target_state = apply_gates(gatelist, nqubits=2)
rx = np.array([[np.cos(thetax / 2), -1j * np.sin(thetax / 2)],
[-1j * np.sin(thetax / 2), np.cos(thetax / 2)]])
ry = np.array([[np.cos(thetay / 2), -np.sin(thetay / 2)],
[np.sin(thetay / 2), np.cos(thetay / 2)]])
cnot = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]])
gatelist = [gates.Unitary(rx, 0), gates.Unitary(ry, 1),
gates.Unitary(cnot, 0, 1)]
final_state = apply_gates(gatelist, nqubits=2)
K.assert_allclose(final_state, target_state)
def test_unitary_multiqubit(backend):
gatelist = [gates.H(i) for i in range(4)]
gatelist.append(gates.CNOT(0, 1))
gatelist.append(gates.CNOT(2, 3))
gatelist.extend(gates.X(i) for i in range(4))
h = np.array([[1, 1], [1, -1]]) / np.sqrt(2)
x = np.array([[0, 1], [1, 0]])
cnot = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]])
matrix = np.kron(np.kron(x, x), np.kron(x, x))
matrix = matrix @ np.kron(cnot, cnot)
matrix = matrix @ np.kron(np.kron(h, h), np.kron(h, h))
unitary = gates.Unitary(matrix, 0, 1, 2, 3)
if K.name == "qibotf":
with pytest.raises(NotImplementedError):
final_state = apply_gates([unitary], nqubits=4)
else:
final_state = apply_gates([unitary], nqubits=4)
target_state = apply_gates(gatelist, nqubits=4)
K.assert_allclose(final_state, target_state)
@pytest.mark.parametrize("nqubits", [5, 6])
def test_variational_layer(backend, nqubits):
theta = 2 * np.pi * np.random.random(nqubits)
gatelist = [gates.RY(i, t) for i, t in enumerate(theta)]
gatelist.extend(gates.CZ(i, i + 1) for i in range(0, nqubits - 1, 2))
target_state = apply_gates(gatelist, nqubits=nqubits)
pairs = list((i, i + 1) for i in range(0, nqubits - 1, 2))
gate = gates.VariationalLayer(range(nqubits), pairs,
gates.RY, gates.CZ,
theta)
final_state = apply_gates([gate], nqubits=nqubits)
K.assert_allclose(target_state, final_state)
def test_variational_layer__construct_unitary(backend):
pairs = list((i, i + 1) for i in range(0, 5, 2))
theta = 2 * np.pi * np.random.random(6)
gate = gates.VariationalLayer(range(6), pairs, gates.RY, gates.CZ, theta)
with pytest.raises(ValueError):
gate._construct_unitary()
def test_flatten(backend):
target_state = np.ones(4) / 2.0
final_state = apply_gates([gates.Flatten(target_state)], nqubits=2)
K.assert_allclose(final_state, target_state)
target_state = np.ones(4) / 2.0
gate = gates.Flatten(target_state)
with pytest.raises(ValueError):
gate._construct_unitary()
def test_callback_gate_errors():
from qibo import callbacks
entropy = callbacks.EntanglementEntropy([0])
gate = gates.CallbackGate(entropy)
with pytest.raises(ValueError):
gate._construct_unitary()
def test_general_channel(backend):
a1 = np.sqrt(0.4) * np.array([[0, 1], [1, 0]])
a2 = np.sqrt(0.6) * np.array([[1, 0, 0, 0], [0, 1, 0, 0],
[0, 0, 0, 1], [0, 0, 1, 0]])
a1, a2 = K.cast(a1), K.cast(a2)
initial_rho = random_density_matrix(2)
gate = gates.KrausChannel([((1,), a1), ((0, 1), a2)])
assert gate.target_qubits == (0, 1)
final_rho = gate(np.copy(initial_rho))
m1 = np.kron(np.eye(2), K.to_numpy(a1))
m2 = K.to_numpy(a2)
target_rho = (m1.dot(initial_rho).dot(m1.conj().T) +
m2.dot(initial_rho).dot(m2.conj().T))
K.assert_allclose(final_rho, target_rho)
def test_krauss_channel_errors(backend):
# bad Kraus matrix shape
a1 = np.sqrt(0.4) * np.array([[0, 1], [1, 0]])
with pytest.raises(ValueError):
gate = gates.KrausChannel([((0, 1), a1)])
# Using KrausChannel on state vectors
channel = gates.KrausChannel([((0,), np.eye(2))])
with pytest.raises(ValueError):
channel._state_vector_call(np.random.random(4))
# Attempt to construct unitary for KrausChannel
with pytest.raises(ValueError):
channel._construct_unitary()
def test_controlled_by_channel_error():
with pytest.raises(ValueError):
gates.PauliNoiseChannel(0, px=0.5).controlled_by(1)
a1 = np.sqrt(0.4) * np.array([[0, 1], [1, 0]])
a2 = np.sqrt(0.6) * np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1],
[0, 0, 1, 0]])
config = [((1,), a1), ((0, 1), a2)]
with pytest.raises(ValueError):
gates.KrausChannel(config).controlled_by(1)
def test_unitary_channel(backend):
a1 = np.array([[0, 1], [1, 0]])
a2 = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]])
probs = [0.4, 0.3]
matrices = [((0,), a1), ((2, 3), a2)]
initial_state = random_density_matrix(4)
gate = gates.UnitaryChannel(probs, matrices)
gate.density_matrix = True
final_state = gate(K.cast(np.copy(initial_state)))
eye = np.eye(2)
ma1 = np.kron(np.kron(a1, eye), np.kron(eye, eye))
ma2 = np.kron(np.kron(eye, eye), a2)
target_state = (0.3 * initial_state
+ 0.4 * ma1.dot(initial_state.dot(ma1))
+ 0.3 * ma2.dot(initial_state.dot(ma2)))
K.assert_allclose(final_state, target_state)
def test_unitary_channel_errors():
"""Check errors raised by ``gates.UnitaryChannel``."""
a1 = np.array([[0, 1], [1, 0]])
a2 = np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]])
probs = [0.4, 0.3]
matrices = [((0,), a1), ((2, 3), a2)]
# Invalid probability length
with pytest.raises(ValueError):
gate = gates.UnitaryChannel([0.1, 0.3, 0.2], matrices)
# Probability > 1
with pytest.raises(ValueError):
gate = gates.UnitaryChannel([1.1, 0.2], matrices)
# Probability sum = 0
with pytest.raises(ValueError):
gate = gates.UnitaryChannel([0.0, 0.0], matrices)
def test_pauli_noise_channel(backend):
initial_rho = random_density_matrix(2)
gate = gates.PauliNoiseChannel(1, px=0.3)
gate.density_matrix = True
final_rho = gate(K.cast(np.copy(initial_rho)))
gate = gates.X(1)
gate.density_matrix = True
initial_rho = K.cast(initial_rho)
target_rho = 0.3 * gate(K.copy(initial_rho))
target_rho += 0.7 * initial_rho
K.assert_allclose(final_rho, target_rho)
def test_reset_channel(backend):
initial_rho = random_density_matrix(3)
gate = gates.ResetChannel(0, p0=0.2, p1=0.2)
gate.density_matrix = True
final_rho = gate(K.cast(np.copy(initial_rho)))
dtype = initial_rho.dtype
collapsed_rho = np.copy(initial_rho).reshape(6 * (2,))
collapsed_rho[0, :, :, 1, :, :] = np.zeros(4 * (2,), dtype=dtype)
collapsed_rho[1, :, :, 0, :, :] = np.zeros(4 * (2,), dtype=dtype)
collapsed_rho[1, :, :, 1, :, :] = | np.zeros(4 * (2,), dtype=dtype) | numpy.zeros |
"""
Created on Tue Jan 26 16:41:35 2021
@author: mark
"""
import numpy as np
import torch
import torch.nn as nn
from aircraft_detector.utils.utils import print_verbose
import aircraft_detector.utils.pytorch_earlystopping as es
"""
conv1d: in shape = (N, C_in, L_in) -> out shape = (N, C_out, L_out)
conv2d: in shape = (N, C_in, H_in, W_in) -> out_shape = (N, C_out, H_out, W_out)
linear: in_shape = (N, L_in) -> out shape = (N, L_out)
Gru/Lstm: in_shape = L_in, out_shape = L_out
batchnorm1d: num_features = L_out
batchnorm2d: num_features = C_out
all dropout
"""
class Net(nn.Module):
# assumes conv->fc, rec->fc, fc or conv+fc2->fc
def __init__(
self, config, input_shape, output_size, device=None, input_2_size=None
):
super().__init__()
# get number of layers per type
n_conv_layers = 0
n_rec_layers = 0
n_fc_layers = 0
n_fc2_layers = 0
for layer in config:
if "Conv" in layer["layer_type"]:
n_conv_layers += 1
elif layer["layer_type"] == "Linear":
n_fc_layers += 1
elif layer["layer_type"] == "Linear_2":
n_fc2_layers += 1
else:
n_rec_layers += 1
# set network type and generate empty network
if n_conv_layers > 0:
self._net_type = "CNN"
self._modules_conv, in_features = self._generate_conv_modules(
config, input_shape
)
if n_fc2_layers > 0:
self._modules_lin2, in_features_2 = self._generate_linear_modules(
config, input_2_size, layer_name="Linear_2"
)
in_features += in_features_2
self._modules_lin, in_features = self._generate_linear_modules(
config, in_features
)
elif n_rec_layers > 0:
self._net_type = "RNN"
self._rnn_type = config[0]["layer_type"]
self._device = device
(
self._modules_rec,
in_features,
self._hidden_sizes,
) = self._generate_recurrent_modules(config, input_shape)
self._modules_lin, in_features = self._generate_linear_modules(
config, in_features,
)
else:
self._net_type = "MLP"
self._modules_lin, in_features = self._generate_linear_modules(
config, np.prod(input_shape)
)
# use a linear output layer
self._out = nn.Linear(in_features, output_size)
def _generate_conv_modules(self, config, input_shape, modules=None):
# empty module list
if modules is None:
modules = nn.ModuleList()
in_channels = input_shape[0]
input_sizes = input_shape[1:] # HxW
# go over all type conv layers in config
i = 0
while i < len(config):
# empty module, to be filled with 'layers' from the config
layers_in_module = []
# current layer settings
layer_dict = config[i].copy()
layer_type = layer_dict.pop("layer_type")
# stop as soon as a fully-connected layer is found
if layer_type == "Linear" or layer_type == "Linear_2":
break
# set up layer and its parameters
layer = getattr(nn, layer_type) # i.e. nn.Conv2d
layer = layer(in_channels, **layer_dict)
# add to module
layers_in_module.append(layer)
i += 1
# set new 'in_channels' to current 'out_channels'
in_channels = layer_dict["out_channels"]
# calculate new 'input_sizes' (height, width)
input_sizes = _calc_conv_layer_output_shape(
layer_type, layer_dict, input_sizes
)
# apply batch normalization if in config and before relu
if i < len(config):
if (
"BatchNorm" in config[i]["layer_type"]
and config[i].get("location", None) == "before"
):
bn_dict = config[i].copy()
_ = bn_dict.pop("location")
bn_type = bn_dict.pop("layer_type")
bn = getattr(nn, bn_type)
# supply new 'in_channels' and eps, along with layer parameters
bn = bn(in_channels, eps=1e-8, **bn_dict)
layers_in_module.append(bn)
i += 1
# apply pooling if in config
if i < len(config):
if "MaxPool" in config[i]["layer_type"]:
pool_dict = config[i].copy()
pool_type = pool_dict.pop("layer_type")
pool = getattr(nn, pool_type)
# supply parameters
pool = pool(**pool_dict)
layers_in_module.append(pool)
i += 1
# calculate new 'input_sizes' (height, width)
input_sizes = _calc_conv_layer_output_shape(
pool_type, pool_dict, input_sizes
)
# add ReLU
layers_in_module.append(nn.ReLU())
# apply dropout if in config
if i < len(config):
if "Dropout" in config[i]["layer_type"]:
dropout_dict = config[i].copy()
_ = dropout_dict.pop("layer_type")
# supply parameters in dict
dropout = nn.Dropout(**dropout_dict)
layers_in_module.append(dropout)
i += 1
# apply batch normalization if in config and after relu (default)
if i < len(config):
if (
"BatchNorm" in config[i]["layer_type"]
and config[i].get("location", None) != "before"
):
bn_dict = config[i].copy()
_ = bn_dict.pop("location")
bn_type = bn_dict.pop("layer_type")
bn = getattr(nn, bn_type)
# supply new 'in_channels' and eps, along with layer parameters
bn = bn(in_channels, eps=1e-8, **bn_dict)
layers_in_module.append(bn)
i += 1
# add module to module list
module = nn.Sequential(*layers_in_module)
modules.append(module)
# calculate number of output units (required for FC layers)
output_units = in_channels * np.prod(input_sizes)
return modules, output_units
def _generate_recurrent_modules(self, config, input_shape, modules=None):
# empty module list
if modules is None:
modules = nn.ModuleList()
# hidden sizes are used in forward() to init. hidden states with 0s
hidden_sizes = []
input_size = input_shape[-1] # no. of input features (freq. bins)
# go over all recurrent layers in config
i = 0
while i < len(config):
# current layer settings
layer_dict = config[i].copy()
layer_type = layer_dict.pop("layer_type")
# stop as soon as a fully-connected layer is found
if layer_type == "Linear":
break
# set up layer and its parameters
layer = getattr(nn, layer_type) # i.e. nn.Conv2d
layer = layer(input_size, **layer_dict, batch_first=True)
# add to module list
modules.append(layer)
i += 1
# set new 'input_size' to current 'hidden_size'
input_size = layer_dict["hidden_size"]
hidden_sizes.append(input_size)
return modules, input_size, hidden_sizes
def _generate_linear_modules(
self, config, in_features, modules=None, layer_name="Linear"
):
# empty module list
if modules is None:
modules = nn.ModuleList()
# skip layers until the first fully-connected layer is found
i = 0
while config[i]["layer_type"] != layer_name:
i += 1
if i >= len(config):
# in case no linear layers in config (not recommended)
return modules, in_features
# search remaining layers
while i < len(config):
# check for interference with second linear module
if (config[i]["layer_type"] == "Linear" and layer_name == "Linear_2") or (
config[i]["layer_type"] == "Linear_2" and layer_name == "Linear"
):
break
# empty module, to be filled with 'layers' from the config
layers_in_module = []
# current layer settings
layer_dict = config[i].copy()
layer_type = layer_dict.pop("layer_type").split("_")[0]
# set up layer and its parameters
layer = getattr(nn, layer_type) # i.e. nn.Conv2d
layer = layer(in_features, **layer_dict)
# add to module
layers_in_module.append(layer)
i += 1
# set new 'in_features' to current 'out_features'
in_features = layer_dict["out_features"]
# add ReLU
layers_in_module.append(nn.ReLU())
# apply dropout if in config
if i < len(config):
if "Dropout" in config[i]["layer_type"]:
dropout_dict = config[i].copy()
_ = dropout_dict.pop("layer_type")
# supply parameters in dict
dropout = nn.Dropout(**dropout_dict)
layers_in_module.append(dropout)
i += 1
# add module to module list
module = nn.Sequential(*layers_in_module)
modules.append(module)
return modules, in_features
def forward(self, x, x2=None): # add support for double input net!!!
if self._net_type == "CNN":
x = self._forward_convolutional(x)
elif self._net_type == "RNN":
x = self._forward_recurrent(x)
else:
pass
# reshape output for fully-connected layer
x = x.reshape(x.size(0), -1)
if x2 is not None:
for module in self._modules_lin2:
x2 = module(x2)
x = torch.cat((x, x2), dim=1)
# forward pass: FC layers
for module in self._modules_lin:
x = module(x)
# linear output layer
x = self._out(x)
return x
def _forward_convolutional(self, x):
for module in self._modules_conv:
x = module(x)
return x
def _forward_recurrent(self, x):
for i, module in enumerate(self._modules_rec):
# initialize hidden state with zeros
hidden_size = self._hidden_sizes[i]
h0 = (
torch.zeros(1, x.size(0), hidden_size).requires_grad_().to(self._device)
)
# initialize hidden cell state with zeros if lstm is used
if self._rnn_type == "LSTM": # or check type in module??
c0 = (
torch.zeros(1, x.size(0), hidden_size)
.requires_grad_()
.to(self._device)
)
# detach h0, c0 to avoid BPTT through previous batches
x, _ = module(x, (h0.detach(), c0.detach()))
else:
# detach h0 to avoid BPTT through previous batches
x, _ = module(x, h0.detach())
# extract output from last timestep
x = x[:, -1, :]
return x
def _calc_conv_layer_output_shape(layer_type, layer_dict, input_shape):
assert layer_type in ["Conv1d", "Conv2d", "MaxPool1d", "MaxPool2d"]
if "1d" in layer_type:
in_shape = input_shape[0]
# get layer params or default
kernel = layer_dict["kernel_size"]
if layer_type == "Conv1d":
stride = layer_dict.get("stride", 1)
else:
stride = layer_dict.get("stride", 2)
padding = layer_dict.get("padding", 0)
dilation = layer_dict.get("dilation", 1)
# compute output length
out_shape = int(
(in_shape + 2 * padding - dilation * (kernel - 1) - 1) / stride + 1
)
else:
in_height = input_shape[0]
in_width = input_shape[1]
# get layer params or default
kernel = layer_dict["kernel_size"]
if layer_type == "Conv2d":
stride = layer_dict.get("stride", (1, 1))
else:
stride = layer_dict.get("stride", (2, 2))
padding = layer_dict.get("padding", (0, 0))
dilation = layer_dict.get("dilation", (1, 1))
# compute output shape
out_height = int(
(in_height + 2 * padding[0] - dilation[0] * (kernel[0] - 1) - 1) / stride[0]
+ 1
)
out_width = int(
(in_width + 2 * padding[1] - dilation[1] * (kernel[1] - 1) - 1) / stride[1]
+ 1
)
out_shape = [out_height, out_width]
return out_shape
# network configuration
def set_net_configuration(layers, test_set):
# fetch input shape and output size from test set
input_shape = list(test_set.tensors[0].size()[1:])
output_size = int(np.prod(test_set.tensors[-1].size()[1:])) # cast to int for json
# set device
if torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
# set network configuration
net_config = {
"layers": layers,
"input_shape": input_shape,
"output_size": output_size,
"device": device,
}
if len(test_set.tensors) > 2:
net_config["input_2_size"] = np.prod(test_set.tensors[1].size()[1:])
return net_config
def train_network(
train_settings,
train_set,
val_set,
net_config,
loss_fn,
verbose=True,
super_verbose=False,
):
# reset the seed
torch.manual_seed(42)
# create network from config
network = _create_network(net_config)
# copy optimizer settings to avoid modifying train_settings
dict_optimizer = train_settings["optimizer"].copy()
# select the optimizer in torch.optim from settings
optimizer = getattr(torch.optim, dict_optimizer.pop("optimizer"))
# bind network, unpack optimizer settings
optimizer = optimizer(network.parameters(), **dict_optimizer)
if "lr_scheduler" in train_settings:
# copy scheduler settings to avoid modifying train_settings
dict_scheduler = train_settings["lr_scheduler"].copy()
# select the lr scheduler in torch.optim from settings
lr_scheduler = getattr(
torch.optim.lr_scheduler, dict_scheduler.pop("scheduler")
)
# bind optimizer, unpack scheduler settings
lr_scheduler = lr_scheduler(optimizer, **dict_scheduler)
# create train dataloader
train_loader = torch.utils.data.DataLoader(
dataset=train_set,
batch_size=train_settings["batch_size"],
shuffle=True,
drop_last=False,
)
# create validation dataloader
if len(val_set) > 2048:
val_batch_size = 2048 # cap batch size to avoid memory issues
else:
val_batch_size = len(val_set)
val_loader = torch.utils.data.DataLoader(
dataset=val_set, batch_size=val_batch_size, drop_last=False
)
if "es_patience" in train_settings:
# set up early stopping checkpoint
fp_checkpoint = "checkpoint-es.pt"
early_stopping = es.EarlyStopping(
patience=train_settings["es_patience"],
delta=1e-7,
verbose=super_verbose,
output_fp=fp_checkpoint,
)
training_loss_history = []
validation_loss_history = []
# loop over epochs
for epoch in range(train_settings["epochs"]):
train_losses = []
# set in training mode
network.train()
for data in train_loader:
# to device (gpu/cpu)
x_train = data[0].to(net_config["device"])
if len(data) > 2:
x2_train = data[1].to(net_config["device"])
y_train = data[-1].to(net_config["device"])
# clear gradient of optimizer
optimizer.zero_grad()
# forward pass
if len(data) == 2:
yhat = network(x_train)
else:
yhat = network(x_train, x2_train)
# compute loss
loss = loss_fn(yhat, y_train)
# backward pass
loss.backward()
# record loss
train_losses.append(loss.item())
# update parameters
optimizer.step()
# record loss and update loss history
training_loss = np.mean(train_losses)
training_loss_history.append(training_loss)
# validation loss
with torch.no_grad():
val_losses = []
# set in eval mode
network.eval()
for data in val_loader:
# to device (gpu/cpu)
x_val = data[0].to(net_config["device"])
if len(data) > 2:
x2_val = data[1].to(net_config["device"])
y_val = data[-1].to(net_config["device"])
# forward pass
if len(data) == 2:
yhat = network(x_val)
else:
yhat = network(x_val, x2_val)
# compute loss
val_loss = loss_fn(yhat, y_val)
# record loss
val_losses.append(val_loss.item())
# record loss and update loss history
validation_loss = | np.mean(val_losses) | numpy.mean |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import io
import logging
import contextlib
import os
import datetime
import json
import numpy as np
import cv2
import math
import torch
from PIL import Image
from fvcore.common.timer import Timer
from detectron2.structures import BoxMode, PolygonMasks, Boxes
from fvcore.common.file_io import PathManager, file_lock
from detectron2.data.catalog import MetadataCatalog, DatasetCatalog
"""
This file contains functions to parse COCO-format annotations into dicts in "Detectron2 format".
"""
DOTA_CATEGORIES = [
{"color": [220, 20, 60], "isthing": 1, "id": 0, "name": "small-vehicle"},
{"color": [119, 11, 32], "isthing": 1, "id": 1, "name": 'large-vehicle'},
{"color": [0, 0, 142], "isthing": 1, "id": 2, "name": 'ship'},
{"color": [0, 0, 230], "isthing": 1, "id": 3, "name": 'container-crane'},
{"color": [106, 0, 228], "isthing": 1, "id": 4, "name": 'storage-tank'},
{"color": [0, 60, 100], "isthing": 1, "id": 5, "name": 'plane'},
{"color": [0, 80, 100], "isthing": 1, "id": 6, "name": 'tennis-court'},
{"color": [0, 0, 70], "isthing": 1, "id": 7, "name": 'harbor'},
{"color": [0, 0, 192], "isthing": 1, "id": 8, "name": 'bridge'},
{"color": [250, 170, 30], "isthing": 1, "id": 9, "name": 'baseball-diamond'},
{"color": [100, 170, 30], "isthing": 1, "id": 10, "name": 'roundabout'},
{"color": [220, 220, 0], "isthing": 1, "id": 11, "name": 'basketball-court'},
{"color": [175, 116, 175], "isthing": 1, "id": 12, "name": 'swimming-pool'},
{"color": [250, 0, 30], "isthing": 1, "id": 13, "name": 'soccer-ball-field'},
{"color": [165, 42, 42], "isthing": 1, "id": 14, "name": 'ground-track-field'},
{"color": [0, 82, 0], "isthing": 1, "id": 15, "name": "helicopter"},
]
class DotaAPI:
def __init__(self, json_file):
with open(json_file) as f:
data = json.load(f)
self.features = data['features']
@staticmethod
def cvt_dota_to_detectron(dota_bbox: list, patch_size: tuple) -> list:
""" Processes a coordinate array from a geojson into (cy, cx, height, width, theta) format
:param (list) coords: an array of shape (N, 8) with 4 corner points of boxes
:return: (numpy.ndarray) an array of shape (N, 5) with coordinates in proper format
"""
coord = np.asarray(dota_bbox)
pts = | np.reshape(coord, (-1, 5)) | numpy.reshape |
import numpy as np
from skimage.exposure import equalize_adapthist
import torch
from scipy.ndimage import gaussian_filter
import scipy
import random
import torch as th
from PIL import Image
from scipy.interpolate import RectBivariateSpline
class MyRandomImageContrastTransform(object):
def __init__(self, random_state=None, is_labelmap=[False, True], clip_limit_range=[0.01, 1], nbins=256,
enable=False):
"""
Perform Contrast Limited Adaptive Histogram Equalization (CLAHE)
. An algorithm for local contrast enhancement, that uses histograms computed over different tile regions of the
image. Local details can therefore be enhanced even in regions that are darker or lighter than most of the image.
Based on https://scikit-image.org/docs/dev/api/skimage.exposure.html?highlight=equalize_adapthist#skimage
.exposure.equalize_adapthist
Arguments
---------
"""
self.random_state = random_state
self.clip_limit_range = clip_limit_range # [0,1] The larger the value, the higher the contrast
self.nbins = nbins
self.is_label_map = is_labelmap
self.enable = enable
def __call__(self, *inputs):
if self.enable:
outputs = []
assert len(self.is_label_map) == len(
inputs), 'for each input, must clarify whether this is a label map or not.'
clip_limit = np.random.uniform(low=self.clip_limit_range[0], high=self.clip_limit_range[1])
for idx, _input in enumerate(inputs):
_input = _input.numpy()
flag = self.is_label_map[idx]
if flag:
result = _input
else:
print(_input.shape)
result = np.zeros(_input.shape, dtype=_input.dtype)
for i in range(_input.shape[0]):
temp = _input[i]
print('temp shape', temp.shape)
_input_min = temp.min()
_input_max = temp.max()
## clahe requires intensity to be Uint16
temp = intensity_normalise(temp, perc_threshold=(0., 100.0), min_val=0, max_val=255)
temp = np.int16(temp)
clahe_output = equalize_adapthist(temp, clip_limit=clip_limit, nbins=self.nbins)
## recover intensity range
result[i] = intensity_normalise(clahe_output, perc_threshold=(0., 100.0), min_val=_input_min,
max_val=_input_max)
tensorresult = torch.from_numpy(result).float()
outputs.append(tensorresult)
return outputs if idx >= 1 else outputs[0]
else:
outputs = inputs
return outputs
class RandomGamma(object):
'''
Perform Random Gamma Contrast Adjusting
support 2D and 3D
'''
def __init__(self, p_thresh=0.5, gamma_range=[0.8, 1.4], gamma_flag=True, preserve_range=True):
"""
Randomly do gamma to a torch tensor
Arguments
--------
:param gamma_flag: [bool] list of flags for gamma aug
"""
self.gamma_range = gamma_range
self.p_thresh = p_thresh
self.gamma_flag = gamma_flag
self.preserve_range = preserve_range ## if preserve the range to be in [min,max]
def __call__(self, *inputs):
outputs = []
if np.random.rand() < self.p_thresh:
gamma = random.random() * (self.gamma_range[1] - self.gamma_range[0]) + self.gamma_range[0] #
# print ('gamma: %f',gamma)
for idx, _input in enumerate(inputs):
assert inputs[0].size() == _input.size()
if (self.gamma_flag[idx]):
assert gamma > 0
if self.preserve_range:
self.c_min = _input.min()
self.c_max = _input.max()
_input = _input ** (1.0 / gamma)
if self.preserve_range:
_input[_input < self.c_min] = self.c_min
_input[_input > self.c_max] = self.c_max
outputs.append(_input)
else:
idx = len(inputs)
outputs = inputs
return outputs if idx >= 1 else outputs[0]
class RandomBrightnessFluctuation(object):
'''
Perform image contrast and brightness augmentation.
support 2D and 3D
'''
def __init__(self, p=0.5, contrast_range=[0.8, 1.2], brightness_range=[-0.1, 0.1], flag=True, preserve_range=True):
"""
Arguments
--------
:param flag: [bool] list of flags for aug
"""
self.contrast_range = contrast_range
self.brightness_range = brightness_range
self.p_thresh = p
self.flag = flag
self.preserve_range = preserve_range ## if preserve the range to be in [min,max]
def __call__(self, *inputs):
outputs = []
if | np.random.rand() | numpy.random.rand |
from __future__ import division
import numpy as np
from data_tools import get_inputFileParameter
import matplotlib
matplotlib.use('PDF') # or Agg (for png), SVG, PS
import matplotlib.pyplot as plt
from data_tools import commentHdr
#--------------------------------------------------------------------------------------------
def uvel(DI, profName="uvel") :
mfile = DI['pdir']+"means_" + profName + ".dat"
sfile = DI['pdir']+"sig_" + profName + ".dat"
data = np.loadtxt(mfile, comments=commentHdr)
y = data[:,1:]
data = np.loadtxt(sfile, comments=commentHdr)
ys = data[:,1:]
npts, ntimes = np.shape(data)
ntimes = ntimes - 1
cLine = np.zeros(ntimes)
scLine = np.zeros(ntimes)
imid = int(npts/2) + 1
for it in range(ntimes) :
print("uvel: time %i of %i" %(it+1, ntimes))
cLine[it] = y[imid,it]
scLine[it] = ys[imid,it]
times = get_inputFileParameter(DI, ("dumpTimes",))
D = get_inputFileParameter(DI, ("initParams","djeti"))
times = np.array(times)
if len(times) != ntimes :
raise ValueError("uvel.py: wrong number of times in data file and in input file.")
data = np.vstack([times/D, cLine, scLine]).T
head = " x/D, centerline, rms_cL"
fname = DI['pdir']+ profName + "_cl.dat"
np.savetxt(fname, data, header=head, fmt="%15.8e ", comments=commentHdr)
#--------------------------------------------------------------------------------------------
def plot_uvel(DI, profName='uvel') :
fname = DI['pdir']+"uvel_cl.dat"
odt = np.loadtxt(fname, comments=commentHdr)
U0 = get_inputFileParameter(DI, ("initParams","vel_max"))
fit = 1.0/5.8*(odt[:,0]-4.0)
Exp = np.loadtxt("exp_u_cl.dat")
ua = get_inputFileParameter(DI, ("initParams","vel_min"))
matplotlib.rcParams.update({'font.size':20, 'figure.autolayout': True}) #, 'font.weight':'bold'})
fig, axL = plt.subplots()
#axL.plot(odt[:,0],(U0-ua)/(odt[:,1]-ua),'ko-')
axL.plot(odt[:,0],(U0-ua)/(odt[:,1]-ua),'k-')
axL.plot(Exp[:,0],Exp[:,1],'^')
axL.plot(odt[:,0],fit,'k--')
axL.set_ylabel(r'$v_0/v_{cL}$', fontsize=22)
axL.set_xlabel("y/D", fontsize=22)
axL.legend(("ODT", "EXP", "Fit"), loc='upper right', frameon=False, fontsize=16)
axL.set_ylim([0,25])
plt.savefig(DI['pdir']+"uvel_cl_" + DI['cn'].replace(".","o"))
#-----------------------------------------------
D = get_inputFileParameter(DI, ("initParams","djeti"))
Exp = | np.loadtxt("exp_u_rad.dat") | numpy.loadtxt |
import gym
from gym.utils import seeding
import numpy as np
from gym import spaces
from os import path
import sys
def return_classes():
unwanted = ["EzPickle", "circleShape", "contactListener",
"edgeShape", "fixtureDef", "polygonShape",
"revoluteJointDef", "FrictionDetector", "ContactDetector", "Car"]
current_module = sys.modules[__name__]
class_names = []
for key in dir(current_module):
if key in unwanted: continue
if isinstance(getattr(current_module, key), type):
class_names.append(key)
return class_names
DEBUG = False
class Pendulum(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 30
}
def __init__(self, reward=None):
self.max_speed = 8
self.max_torque = 2.
self.dt = .05
self.g = 10.0
self.m = 1.
self.l = 1.
self.reward_fn = reward
self.viewer = None
high = np.array([1., 1., self.max_speed], dtype=np.float32)
self.action_space = spaces.Box(
low=-self.max_torque,
high=self.max_torque, shape=(1,),
dtype=np.float32
)
self.observation_space = spaces.Box(
low=-high,
high=high,
dtype=np.float32
)
self.seed()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, u):
th, thdot = self.state # th := theta
g = self.g
m = self.m
l = self.l
dt = self.dt
u = np.clip(u, -self.max_torque, self.max_torque)[0]
self.last_u = u # for rendering
costs = self.reward_fn.calculateReward(th, thdot, u)
newthdot = thdot + (-3 * g / (2 * l) * np.sin(th + np.pi) + 3. / (m * l ** 2) * u) * dt
newth = th + newthdot * dt
newthdot = np.clip(newthdot, -self.max_speed, self.max_speed)
if DEBUG: print("Inside step 1")
self.state = np.array([newth, newthdot])
return self._get_obs(), -costs, False, {}
def reset(self):
high = np.array([np.pi, 1])
print("Reset 1")
self.state = self.np_random.uniform(low=-high, high=high)
print("Reset 2")
self.last_u = None
print(self.state)
return self._get_obs()
def _get_obs(self):
theta, thetadot = self.state
print("Get obs 1")
return np.array([np.cos(theta), np.sin(theta), thetadot])
def render(self, mode='human'):
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.Viewer(500, 500)
self.viewer.set_bounds(-2.2, 2.2, -2.2, 2.2)
rod = rendering.make_capsule(1, .2)
rod.set_color(.8, .3, .3)
self.pole_transform = rendering.Transform()
rod.add_attr(self.pole_transform)
self.viewer.add_geom(rod)
axle = rendering.make_circle(.05)
axle.set_color(0, 0, 0)
self.viewer.add_geom(axle)
if DEBUG: print("Inside render 1")
try:
fname = path.join(path.dirname(__file__), "../assets/clockwise.png")
if DEBUG: print("Inside render 2")
self.img = rendering.Image(fname, 1., 1.)
except Exception as e:
print(e)
if DEBUG: print("Inside render 3")
self.imgtrans = rendering.Transform()
if DEBUG: print("Inside render 4")
self.img.add_attr(self.imgtrans)
if DEBUG: print("Inside render 5")
self.viewer.add_onetime(self.img)
if DEBUG: print("Inside render 6")
self.pole_transform.set_rotation(self.state[0] + np.pi / 2)
if DEBUG: print("Inside render 7")
if self.last_u:
if DEBUG: print("Inside render 8")
self.imgtrans.scale = (-self.last_u / 2, np.abs(self.last_u) / 2)
if DEBUG: print("Inside render 9")
return self.viewer.render(return_rgb_array=mode == 'rgb_array')
def close(self):
if self.viewer:
self.viewer.close()
self.viewer = None
"""
http://incompleteideas.net/sutton/MountainCar/MountainCar1.cp
permalink: https://perma.cc/6Z2N-PFWC
"""
import math
import numpy as np
import gym
from gym import spaces
from gym.utils import seeding
class MountainCarEnv(gym.Env):
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 30
}
def __init__(self, goal_velocity=0, reward=None):
self.min_position = -1.2
self.max_position = 0.6
self.max_speed = 0.07
self.goal_position = 0.5
self.goal_velocity = goal_velocity
self.force = 0.001
self.gravity = 0.0025
self.reward_fn = reward
self.low = np.array(
[self.min_position, -self.max_speed], dtype=np.float32
)
self.high = np.array(
[self.max_position, self.max_speed], dtype=np.float32
)
self.viewer = None
self.action_space = spaces.Discrete(3)
self.observation_space = spaces.Box(
self.low, self.high, dtype=np.float32
)
self.seed()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, action):
assert self.action_space.contains(action), "%r (%s) invalid" % (action, type(action))
position, velocity = self.state
velocity += (action - 1) * self.force + math.cos(3 * position) * (-self.gravity)
velocity = np.clip(velocity, -self.max_speed, self.max_speed)
position += velocity
position = np.clip(position, self.min_position, self.max_position)
if (position == self.min_position and velocity < 0):
velocity = 0
done = bool(
position >= self.goal_position and velocity >= self.goal_velocity
)
reward = self.reward_fn.calculateReward()
self.state = (position, velocity)
return np.array(self.state), reward, done, {}
def reset(self):
self.state = np.array([self.np_random.uniform(low=-0.6, high=-0.4), 0])
return np.array(self.state)
def _height(self, xs):
return | np.sin(3 * xs) | numpy.sin |
import numpy as np
import pandas as pd
from keras.layers import Dense, CuDNNGRU
from keras.models import Sequential
from keras.utils import to_categorical
from keras_preprocessing.sequence import TimeseriesGenerator
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
window_size = 3
batch_size = 128
epochs_num = 32
split = 0.8
df = pd.read_csv('./data/processed/GloVe.csv')
print('Number of samples:', len(df))
X = df.loc[:, df.columns != 'Class'].values
y = to_categorical(df.loc[:, 'Class'])
X = | np.concatenate((X[1:], y[:-1]), axis=1) | numpy.concatenate |
"""a module that houses TOV solvers in the "standard" formulation
"""
__author__ = "<NAME> (<EMAIL>)"
#-------------------------------------------------
import numpy as np
from scipy.integrate import odeint
from scipy.special import hyp2f1
from universality.utils.units import (G, c2, Msun)
#-------------------------------------------------
#DEFAULT_MAX_DR = 1e5 ### maximum step size allowed within the integrator (in standard units, which should be in cm)
DEFAULT_MAX_DR = 1e6
DEFAULT_MIN_DR = 1.0 ### the smallest step size we allow (in standard units, which should be cm)
DEFAULT_GUESS_FRAC = 0.1 ### how much of the way to the vanishing pressure we guess via Newton's method
DEFAULT_INITIAL_FRAC = 1e-3 ### the initial change in pressure we allow when setting the intial conditions
DEFAULT_RTOL = 1e-4
DEFAULT_MXSTEP = 10000
#------------------------
TWOPI = 2*np.pi
FOURPI = 2*TWOPI
Gc2 = G/c2
#-------------------------------------------------
### Standard formulation of the TOV equations
#-------------------------------------------------
### basic evolutionary equations
def dmdr(r, epsc2):
return FOURPI * r**2 * epsc2
def dmbdr(r, rho, m):
return dmdr(r, rho) * (1 - 2*Gc2*m/r)**-0.5
def dpc2dr(r, pc2, m, epsc2):
return - Gc2 * (epsc2 + pc2)*(m + FOURPI * r**3 * pc2)/(r * (r - 2*Gc2*m))
def detadr(r, pc2, m, eta, epsc2, cs2c2):
invf = (1. - 2.*Gc2*m/r)**-1
A = 2. * invf * (1. - 3.*Gc2*m/r - TWOPI*Gc2*r**2 * (epsc2 + 3.*pc2))
B = invf * (6. - FOURPI*Gc2*r**2 * (epsc2 + pc2)*(3. + 1./cs2c2))
return -1.*(eta*(eta - 1.) + A*eta - B)/r
def domegadr(r, pc2, m, omega, epsc2):
P = FOURPI * Gc2 * r**3 * (epsc2 + pc2)/ (r - 2.*Gc2*m)
return (P*(omega + 4.) - omega*(omega + 3.))/r
#-------------------------------------------------
# functions for values at the stellar surface
#-------------------------------------------------
def eta2lambda(r, m, eta): ### dimensionless tidal deformability
C = Gc2*m/r # compactness
fR = 1.-2.*C
F = hyp2f1(3., 5., 6., 2.*C) # a hypergeometric function
z = 2.*C
dFdz = (5./(2.*z**6.)) * (z*(z*(z*(3.*z*(5. + z) - 110.) + 150.) - 60.) / (z - 1.)**3 + 60.*np.log(1. - z))
RdFdr = -2.*C*dFdz # log derivative of hypergeometric function
k2el = 0.5*(eta - 2. - 4.*C/fR) / (RdFdr -F*(eta + 3. - 4.*C/fR)) # gravitoelectric quadrupole Love number
return (2./3.)*(k2el/C**5)
def omega2i(r, omega): ### moment of inertia
return (omega/(3. + omega)) * r**3/(2.*Gc2)
#-------------------------------------------------
# initial conditions
#-------------------------------------------------
def initial_pc2(pc2i, frac):
return (1. - frac)*pc2i ### assume a constant slope over a small change in the pressure
def initial_r(pc2i, ec2i, frac):
return (frac*pc2i / ( G * (ec2i + pc2i) * (ec2i/3. + pc2i) * TWOPI ) )**0.5 ### solve for the radius that corresponds to that small change
def initial_m(r, ec2i):
return FOURPI * r**3 * ec2i / 3. # gravitational mass
def initial_mb(r, rhoi):
return FOURPI * r**3 * rhoi / 3. # gravitational mass
def initial_eta(r, pc2i, ec2i, cs2c2i):
return 2. + FOURPI * Gc2 * r**2 * (9.*pc2i + 13.*ec2i + 3.*(pc2i+ec2i)/cs2c2i)/21. # intial perturbation for dimensionless tidal deformability
def initial_omega(r, pc2i, ec2i):
return 16.*np.pi * Gc2 * r**2 * (pc2i + ec2i)/5. # initial frame-dgragging function
#-------------------------------------------------
# central loop that solves the TOV equations given a set of coupled ODEs
#-------------------------------------------------
def engine(
r,
vec,
eos,
dvecdr_func,
min_dr=DEFAULT_MIN_DR,
max_dr=DEFAULT_MAX_DR,
guess_frac=DEFAULT_GUESS_FRAC,
initial_frac=DEFAULT_INITIAL_FRAC,
rtol=DEFAULT_RTOL,
mxstp=DEFAULT_MXSTEP,
):
"""integrate the TOV equations with central pressure "pc" and equation of state described by energy density "eps" and pressure "p"
expects eos = (pressure, energy_density)
"""
vec = np.array(vec, dtype=float)
while vec[0] > 0: ### continue until pressure vanishes
vec0 = vec[:] # store the current location as the old location
r0 = r
### estimate the radius at which this p will vanish via Newton's method
r = r0 + max(min_dr, min(max_dr, guess_frac * abs(vec[0]/dvecdr_func(vec, r, eos)[0])))
### integrate out until we hit that estimate
vec[:] = odeint(dvecdr_func, vec0, (r0, r), args=(eos,), rtol=rtol, hmax=max_dr, mxstep=mxstep)[-1,:] ### retain only the last point
### return to client, who will then interpolate to find the surface
### interpolate to find stellar surface
p = [vec0[0], vec[0]]
# radius
r = | np.interp(0, p, [r0, r]) | numpy.interp |
import numpy as np
import iminuit as minuit
import time
import functools
import logging
from scipy.special import ndtri, erf
from collections import OrderedDict
from copy import deepcopy
import numpy as np
# --- Helper functions ------------------------------------- #
def set_default(func = None, passed_kwargs = {}):
"""
Read in default keywords of the simulation and pass to function
"""
if func is None:
return functools.partial(set_default, passed_kwargs = passed_kwargs)
@functools.wraps(func)
def init(*args, **kwargs):
for k in passed_kwargs.keys():
kwargs.setdefault(k,passed_kwargs[k])
return func(*args, **kwargs)
return init
# ---------------------------------------------------------- #
# --- minuit defaults ------------------------------------- #
# The default tolerance is 0.1.
# Minimization will stop when the estimated vertical distance to the minimum (EDM)
# is less than 0.001*tolerance*UP (see SET ERR).
minuit_def = {
'verbosity': 0,
'int_steps': 1e-4,
'strategy': 2,
'tol': 1e-3,
'up': 1.,
'max_tol_increase': 3000.,
'tol_increase': 1000.,
'ncall': 5000,
'pedantic': True,
'precision': None,
'pinit': {'norm' : -10.,
'index': -3.,
'alphac': 1.,
'r': 17.},
'fix': {'norm' : False,
'index': False,
'alphac': False,
'r': False },
'limits': {'norm' : [-20,-5],
'index': [-5,5],
'alphac': [0.1,10.],
'r': [16.,18.]},
'islog': {'norm' : True,
'index': False,
'alphac': False,
'r': True},
}
# ---------------------------------------------------------- #
class FitCrab(object):
def __init__(self, crab_ssc, crab_data, fit_sync=False, fit_ic=False, dsys=0.):
self._parnames = None
self._par_islog = None
self._ssc = crab_ssc
self._data = crab_data
self._fit_sync = fit_sync
self._fit_ic = fit_ic
self._x = None
self._y = None
self._dy = None
self._y_theo = None
self._dsys = dsys
self._minimize_f = None
self._m = None
self._fitarg = None
self._n_pars = 0
# save the initial electron spectrum parameters
# and magnetic field for the IC fit
self._p0 = deepcopy(self._ssc.params_n_el)
self._p0['B'] = deepcopy(self._ssc.B)
pass
@property
def ssc(self):
return self._ssc
@property
def data(self):
return self._data
@property
def fit_sync(self):
return self._fit_sync
@fit_sync.setter
def fit_sync(self, fit_sync):
self._fit_sync = fit_sync
@property
def fit_ic(self):
return self._fit_ic
@fit_ic.setter
def fit_ic(self, fit_ic):
self._fit_ic = fit_ic
@property
def dsys(self):
return self._dsys
@property
def m(self):
return self._m
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def dy(self):
return self._dy
@property
def y_theo(self):
return self._y_theo
@property
def n_pars(self):
return self._n_pars
@dsys.setter
def dsys(self, dsys):
self._dsys= dsys
def init_values_crab_meyer(self,
islog_keys=('Nradio', 'gmin', 'gmax', 'gradio_min',
'gradio_max', 'Nwind', 'gwind_min', 'gwind_max', 'gwind_b', 'dust_norm'),
fix_keys=('dust_extension', 'gmin', 'gmax'),
exclude=('ic_dust', 'ic_sync', 'ic_cmb')
):
"""
Generate initial guess dictionaries needed for fitting
from current SSC model.
Tuned to the fit parameters of Meyer et al. const B-field model.
Returns
-------
dict with four sub dictionaries:
- pinit: initial guesses for parameters
- limits: limits for the parameters
- fix: whether parameters are fixed or not
- islog: whether parameters are fitted in log10 space
"""
pinit, limits, islog, fix = {}, {}, {}, {}
for d in [self._ssc.params_n_seed, self._ssc.params_n_el]:
for k, v in d.items():
if k in exclude:
continue
if k in islog_keys:
pinit[k] = np.log10(v)
limits[k] = [pinit[k] - 2., pinit[k] + 2.]
islog[k] = True
else:
pinit[k] = v
limits[k] = np.sort([pinit[k] * 0.3, pinit[k] * 3.])
islog[k] = False
if k in fix_keys:
fix[k] = True
else:
fix[k] = False
pinit['B'] = self._ssc.B
limits['B'] = [pinit['B'] * 0.1, pinit['B'] * 10.]
islog['B'] = False
fix['B'] = self._fit_sync
result = dict(pinit=pinit, islog=islog, fix=fix, limits=limits)
return result
def _fill_chisq(self, *args):
"""
likelihood function passed to iMinuit
"""
params = {}
for i, p in enumerate(self._parnames):
if self._par_islog[p]:
params[p] = np.power(10., args[i])
else:
params[p] = args[i]
return self.chisq(params)
def chisq(self, params):
"""Calculate the chi^2 sq"""
chi_sq = 0
# update the model parameters
# in the ssc module
for k, v in params.items():
if k in self._ssc.params_n_el.keys():
self._ssc.params_n_el[k] = v
elif k in self._ssc.params_n_seed.keys():
self._ssc.params_n_seed[k] = v
elif k == 'B':
self._ssc.B = v
# perform fit to synchrotron data only
if self._fit_sync and not self._fit_ic:
self._y_theo = self._x * (self._ssc.sync(self._x, g_steps=50) + self._ssc.grey_body(self._x))
# perform fit to IC data only
elif self._fit_ic and not self._fit_sync:
self._ssc.FSyncInterp = None # init new synchrotron interpolation upon each call
# rescale gamma factors to keep synchrotron part constant
self._ssc.params_n_el['gradio_min'] = self._p0['gradio_min'] * np.sqrt(self._p0['B']/ self._ssc.B)
self._ssc.params_n_el['gradio_max'] = self._p0['gradio_max'] * np.sqrt(self._p0['B']/ self._ssc.B)
# rescale normalization to keep synchrotron part constant
self._ssc.params_n_el['Nradio'] = self._p0['Nradio'] * \
(self._p0['B'] / self._ssc.B) ** \
((-self._ssc.params_n_el['Sradio'] + 1.)/2.)
# see Blumenthal & Gould (1970), Eq. 4.59
self._ssc.params_n_el['gwind_min'] = self._p0['gwind_min'] * np.sqrt(self._p0['B']/ self._ssc.B)
self._ssc.params_n_el['gwind_max'] = self._p0['gwind_max'] * np.sqrt(self._p0['B']/ self._ssc.B)
# divide since it's the inverse we are fitting
self._ssc.params_n_el['gwind_b'] = self._p0['gwind_b'] / np.sqrt(self._p0['B']/ self._ssc.B)
# rescale normalization to keep synchrotron part constant
# #ssc._ssc.params_n_el['Nwind'] = self._p0['Nwind'] * (self._p0['B']/ self._ssc.B) \
# ** ((-self._ssc.params_n_el['Swind']+ 1.)/2.)
# why like this and not as above???
self._ssc.params_n_el['Nwind'] = self._p0['Nwind'] * (self._p0['B']/ self._ssc.B) ** 0.5
self._y_theo = self._ssc.ic(self._x, g_steps=100, e_steps=50) * self._x
# perform fit over both IC and sync
else:
self._ssc.FSyncInterp = None # init new synchrotron interpolation upon each call
m_sync = self._x < 1e22
self._y_theo = np.zeros_like(self._x)
self._y_theo[m_sync] = self._x[m_sync] * (self._ssc.sync(self._x[m_sync], g_steps=50) +
self._ssc.grey_body(self._x[m_sync]))
self._y_theo[~m_sync] = self._x[~m_sync] * (self._ssc.sync(self._x[~m_sync], g_steps=50) +
self._ssc.ic(self._x[~m_sync], g_steps=100, e_steps=50))
chi_sq = np.sum((self._y_theo - self._y)**2.
/ (np.sqrt(self._dy ** 2. + self._dsys**2. * self._y**2.))**2.)
return chi_sq
@set_default(passed_kwargs = minuit_def)
def fill_fitarg(self, **kwargs):
"""
Helper function to fill the dictionary for minuit fitting
"""
# set the fit arguments
fitarg = {}
fitarg.update(kwargs['pinit'])
for k in kwargs['limits'].keys():
fitarg['limit_{0:s}'.format(k)] = kwargs['limits'][k]
fitarg['fix_{0:s}'.format(k)] = kwargs['fix'][k]
fitarg['error_{0:s}'.format(k)] = kwargs['pinit'][k] * kwargs['int_steps']
fitarg = OrderedDict(sorted(fitarg.items()))
# get the names of the parameters
self._parnames = kwargs['pinit'].keys()
self._par_islog = kwargs['islog']
return fitarg
@set_default(passed_kwargs=minuit_def)
def run_migrad(self, fitarg, **kwargs):
"""
Helper function to initialize migrad and run the fit.
Initial parameters are estimated with scipy fit.
"""
self._fitarg = fitarg
values, bounds = [],[]
for k in self._parnames:
values.append(fitarg[k])
bounds.append(fitarg['limit_{0:s}'.format(k)])
logging.info(self._parnames)
logging.info(values)
logging.info(self._fill_chisq(*values))
cmd_string = "lambda {0}: self.__fill_chisq({0})".format(
(", ".join(self._parnames), ", ".join(self._parnames)))
string_args = ", ".join(self._parnames)
global f # needs to be global for eval to find it
f = lambda *args: self._fill_chisq(*args)
cmd_string = "lambda %s: f(%s)" % (string_args, string_args)
logging.debug(cmd_string)
# work around so that the parameters get names for minuit
self._minimize_f = eval(cmd_string, globals(), locals())
self._m = minuit.Minuit(self._minimize_f,
print_level =kwargs['verbosity'],
errordef = kwargs['up'],
pedantic = kwargs['pedantic'],
**fitarg)
self._m.tol = kwargs['tol']
self._m.strategy = kwargs['strategy']
logging.debug("tol {0:.2e}, strategy: {1:n}".format(
self._m.tol, self._m.strategy))
self._m.migrad(ncall=kwargs['ncall']) #, precision = kwargs['precision'])
return
def __print_failed_fit(self):
"""print output if migrad failed"""
if not self._m.migrad_ok():
fmin = self._m.get_fmin()
logging.warning(
'*** migrad minimum not ok! Printing output of get_fmin'
)
logging.warning('{0:s}:\t{1}'.format('*** has_accurate_covar',
fmin.has_accurate_covar))
logging.warning('{0:s}:\t{1}'.format('*** has_covariance',
fmin.has_covariance))
logging.warning('{0:s}:\t{1}'.format('*** has_made_posdef_covar',
fmin.has_made_posdef_covar))
logging.warning('{0:s}:\t{1}'.format('*** has_posdef_covar',
fmin.has_posdef_covar))
logging.warning('{0:s}:\t{1}'.format('*** has_reached_call_limit',
fmin.has_reached_call_limit))
logging.warning('{0:s}:\t{1}'.format('*** has_valid_parameters',
fmin.has_valid_parameters))
logging.warning('{0:s}:\t{1}'.format('*** hesse_failed',
fmin.hesse_failed))
logging.warning('{0:s}:\t{1}'.format('*** is_above_max_edm',
fmin.is_above_max_edm))
logging.warning('{0:s}:\t{1}'.format('*** is_valid',
fmin.is_valid))
return
def __repeat_migrad(self, **kwargs):
"""Repeat fit if fit was above edm"""
fmin = self._m.get_fmin()
if not self._m.migrad_ok() and fmin['is_above_max_edm']:
logging.warning(
'Migrad did not converge, is above max edm. Increasing tol.'
)
tol = self._m.tol
self._m.tol *= self._m.edm / (self._m.tol * self._m.errordef) * kwargs['tol_increase']
logging.info('New tolerance : {0}'.format(self._m.tol))
if self._m.tol >= kwargs['max_tol_increase']:
logging.warning(
'New tolerance to large for required precision'
)
else:
self._m.migrad(
ncall=kwargs['ncall'])#,
#precision = kwargs['precision']
#)
logging.info(
'Migrad status after second try: {0}'.format(
self._m.migrad_ok()
)
)
self._m.tol = tol
return
@set_default(passed_kwargs = minuit_def)
def fit(self, nu_min, nu_max, fit_sync=True, minos=0., exclude=(), **kwargs):
"""
Fit the SSC model
Parameters
----------
exclude: list
list with data set names that should be excluded
nu_min: float
minimum considered frequency of data points in Hz
nu_max: float
maximum considered frequency of data points in Hz
fit_sync: bool
only fit synchrotron part. If false, only fit IC part
kwargs
------
pinit: dict
initial guess for intrinsic spectral parameters
fix: dict
booleans for freezing parameters
bounds: dict
dict with list for each parameter with min and max value
Returns
-------
tuple with likelihood profile for distance of
gamma-ray emitting region
"""
self._fit_sync = fit_sync
self._x, self._y, self._dy = self._data.build_data_set_for_fitting(exclude=exclude,
nu_min=nu_min, nu_max=nu_max,
log=False, yunit='flux')
fitarg = self.fill_fitarg(**kwargs)
t1 = time.time()
self.run_migrad(fitarg, **kwargs)
try:
self._m.hesse()
logging.debug("Hesse matrix calculation finished")
except RuntimeError as e:
logging.warning(
"*** Hesse matrix calculation failed: {0}".format(e)
)
logging.debug(self._m.fval)
self.__repeat_migrad(**kwargs)
logging.debug(self._m.fval)
fmin = self._m.get_fmin()
if not fmin.hesse_failed:
try:
self._corr = self._m.np_matrix(correlation=True)
except:
self._corr = -1
logging.debug(self._m.values)
if self._m.migrad_ok():
if minos:
for k in self.m.values.keys():
if kwargs['fix'][k]:
continue
self.m.minos(k,minos)
logging.debug("Minos finished")
else:
self.__print_failed_fit()
logging.info('fit took: {0}s'.format(time.time() - t1))
self._npars = 0
for k in self._m.values.keys():
if kwargs['fix'][k]:
err = np.nan
else:
err = self._m.errors[k]
self._npars += 1
logging.info('best fit {0:s}: {1:.5e} +/- {2:.5e}'.format(k, self._m.values[k], err))
def write_best_fit(self):
"""
Return the current best-fit parameters as a dictionary
with which a new CrabSSC module can be initialized
"""
config = dict(params_n_el={}, params_n_seed={})
for k, v in self._m.values.items():
if self._par_islog[k]:
x = 10.**v
else:
x = v
if k in self._ssc.params_n_el.keys():
config['params_n_el'][k] = x
elif k in self._ssc.params_n_seed.keys():
config['params_n_seed'][k] = x
elif k == 'B':
config['B'] = x
for k in ['ic_sync', 'ic_cmb', 'ic_dust']:
config['params_n_seed'][k] = self._ssc.params_n_seed[k]
config['d'] = self._ssc.d
return config
def llhscan(self, parname, bounds, steps, log = False):
"""
Perform a manual scan of the likelihood for one parameter
(inspired by mnprofile)
Parameters
----------
parname: str
parameter that is scanned
bounds: list or tuple
scan bounds for parameter
steps: int
number of scanning steps
{options}
log: bool
if true, use logarithmic scale
Returns
-------
tuple of 4 lists containing the scan values, likelihood values,
best fit values at each scanning step, migrad_ok status
"""
llh, pars, ok = [], [], []
if log:
values = np.logscape( | np.log10(bounds[0]) | numpy.log10 |
"""
Homework4.
Replace 'pass' by your implementation.
"""
import numpy as np
import helper
# Insert your package here
from sympy import *
from scipy.ndimage.filters import gaussian_filter
from scipy.optimize import leastsq, minimize
'''
Q2.1: Eight Point Algorithm
Input: pts1, Nx2 Matrix
pts2, Nx2 Matrix
M, a scalar parameter computed as max (imwidth, imheight)
Output: F, the fundamental matrix
'''
def eightpoint(pts1, pts2, M):
# Replace pass by your implementation
T = np.eye(3) / M
T[2, 2] = 1;
pts1 = pts1.astype('float')/M
pts2 = pts2.astype('float')/M
A = np.vstack([
pts1[:, 0]*pts2[:, 0],pts1[:, 0]*pts2[:, 1], pts1[:, 0],
pts1[:, 1]*pts2[:, 0],pts1[:, 1]*pts2[:, 1], pts1[:, 1],
pts2[:, 0],pts2[:, 1], np.ones(pts1.shape[0])
]).T
[U, S, V] = np.linalg.svd(A)
F = np.reshape(V[-1,:], (3,3))
F = helper.refineF(F, pts1, pts2)
F = T.T @ F @ T
return F
'''
Q2.2: Seven Point Algorithm
Input: pts1, Nx2 Matrix
pts2, Nx2 Matrix
M, a scalar parameter computed as max (imwidth, imheight)
Output: Farray, a list of estimated fundamental matrix.
'''
def sevenpoint(pts1, pts2, M):
T = np.eye(3) / M
T[2, 2] = 1;
pts1 = pts1.astype('float')/M
pts2 = pts2.astype('float')/M
Fs = []
A = np.vstack([
pts1[:, 0]*pts2[:, 0],pts1[:, 0]*pts2[:, 1], pts1[:, 0],
pts1[:, 1]*pts2[:, 0],pts1[:, 1]*pts2[:, 1], pts1[:, 1],
pts2[:, 0],pts2[:, 1], np.ones(pts1.shape[0])
]).T
[U, S, V] = np.linalg.svd(A)
F1 = np.reshape(V[-1,:], (3,3))
F2 = np.reshape(V[-2,:], (3,3))
alpha = Symbol('alpha')
eqn = Matrix(F1 + alpha*F2).det()
solns = roots(eqn)
for i, sol in enumerate(solns):
if re(sol)==sol:
sol = float(sol)
F = F1 + sol*F2
F = helper.refineF(F, pts1, pts2)
Fs.append(T.T @ F @ T)
return Fs
'''
Q3.1: Compute the essential matrix E.
Input: F, fundamental matrix
K1, internal camera calibration matrix of camera 1
K2, internal camera calibration matrix of camera 2
Output: E, the essential matrix
'''
def essentialMatrix(F, K1, K2):
return K2.T @ F @ K1
'''
Q3.2: Triangulate a set of 2D coordinates in the image to a set of 3D points.
Input: C1, the 3x4 camera matrix
pts1, the Nx2 matrix with the 2D image coordinates per row
C2, the 3x4 camera matrix
pts2, the Nx2 matrix with the 2D image coordinates per row
Output: P, the Nx3 matrix with the corresponding 3D points per row
err, the reprojection error.
'''
def triangulate(C1, pts1, C2, pts2):
P = []
error = 0
for pt1, pt2 in zip(pts1, pts2):
x1, y1, x2, y2 = pt1[0], pt1[1], pt2[0], pt2[1]
A = np.vstack([(x1*C1[2, :]-C1[0, :]),
(y1*C1[2, :]-C1[1, :]),
(x2*C2[2, :]-C2[0, :]),
(y2*C2[2, :]-C2[1, :])])
[U, S, V] = np.linalg.svd(A)
w = V[-1,:]/V[-1,-1]
p1_reproj = C1 @ w
p2_reproj = C2 @ w
p1_reproj = p1_reproj/p1_reproj[-1]
p2_reproj = p2_reproj/p2_reproj[-1]
error += (np.linalg.norm(p1_reproj[:2]- pt1)**2 + np.linalg.norm(p2_reproj[:2]- pt2)**2)
P.append(w[:3])
P = np.vstack(P)
return P, error
'''
Q4.1: 3D visualization of the temple images.
Input: im1, the first image
im2, the second image
F, the fundamental matrix
x1, x-coordinates of a pixel on im1
y1, y-coordinates of a pixel on im1
Output: x2, x-coordinates of the pixel on im2
y2, y-coordinates of the pixel on im2
'''
def epipolarCorrespondence(im1, im2, F, x1, y1):
sy, sx, _ = im2.shape
v = np.array([x1, y1, 1])
l = F.dot(v)
s = np.sqrt(l[0]**2+l[1]**2)
if s==0:
error('Zero line vector in displayEpipolar')
l = l/s
if l[0] != 0:
ye = sy-1;
ys = 0
Y = np.arange(np.ceil(min(ys,ye)), np.ceil(max(ys,ye)))
X = | np.round(-(l[1]*Y + l[2])/l[0]) | numpy.round |
"""CM3 algorithm for Checkers environment.
Same algorithm as alg_credit.py, except that Checkers global state is defined
by three parts (s_env, s^n, s^{-n}) instead of just (s^n, s^{-n})
"""
import numpy as np
import tensorflow.compat.v1 as tf
import sys
import networks
class Alg(object):
def __init__(self, experiment, dimensions, stage=1, n_agents=1,
tau=0.01, lr_V=0.001, lr_Q=0.001,
lr_actor=0.0001, gamma=0.99, use_Q_credit=1,
use_V=0, nn={}):
"""
Same as alg_credit. Checkers global state has two parts
Inputs:
experiment - string
dimensions - dictionary containing tensor dimensions
(h,w,c) for tensor
l for 1D vector
stage - 1: Q_global and actor, does not use Q_credit
2: Q_global, actor and Q_credit
tau - target variable update rate
lr_V, lr_Q, lr_actor - learning rates for optimizer
gamma - discount factor
use_Q_credit - if 1, activates Q_credit network for use in policy gradient
use_V - if 1, uses V_n(s) as the baseline in the policy gradient (this is an ablation)
nn : neural net architecture parameters
"""
self.experiment = experiment
if self.experiment == "checkers":
# Global state
self.rows_state = dimensions['rows_state']
self.columns_state = dimensions['columns_state']
self.channels_state = dimensions['channels_state']
self.l_state = n_agents * dimensions['l_state_one']
self.l_state_one_agent = dimensions['l_state_one']
self.l_state_other_agents = (n_agents-1) * dimensions['l_state_one']
# Agent observations
self.l_obs_others = dimensions['l_obs_others']
self.l_obs_self = dimensions['l_obs_self']
# Dimensions for image input
self.rows_obs = dimensions['rows_obs']
self.columns_obs = dimensions['columns_obs']
self.channels_obs = dimensions['channels_obs']
# Dimension of agent's observation of itself
self.l_action = dimensions['l_action']
self.l_goal = dimensions['l_goal']
self.n_agents = n_agents
self.tau = tau
self.lr_V = lr_V
self.lr_Q = lr_Q
self.lr_actor = lr_actor
self.gamma = gamma
self.use_V = use_V
self.use_Q_credit = use_Q_credit
self.nn = nn
self.agent_labels = np.eye(self.n_agents)
self.actions = np.eye(self.l_action)
# Initialize computational graph
self.create_networks(stage)
self.list_initialize_target_ops, self.list_update_target_ops = self.get_assign_target_ops(tf.trainable_variables())
# Use Q_global when n_agents == 1 (the choice is arbitrary,
# since both networks share the same Stage 1 architecture)
self.create_Q_global_train_op()
if self.n_agents > 1 and self.use_Q_credit:
self.list_initialize_credit_ops = self.get_assign_global_to_credit_ops()
self.create_Q_credit_train_op()
elif self.n_agents > 1 and self.use_V:
self.create_V_train_op()
self.create_policy_gradient_op()
# TF summaries
self.create_summary()
def create_networks(self, stage):
# Placeholders
self.state_env = tf.placeholder(tf.float32, [None, self.rows_state, self.columns_state, self.channels_state], 'state_env')
self.v_state_one_agent = tf.placeholder(tf.float32, [None, self.l_state_one_agent], 'v_state_one_agent')
self.v_state_m = tf.placeholder(tf.float32, [None, self.l_state_one_agent], 'v_state_m')
self.v_state_other_agents = tf.placeholder(tf.float32, [None, self.l_state_other_agents], 'v_state_other_agents')
self.v_goal = tf.placeholder(tf.float32, [None, self.l_goal], 'v_goal')
self.v_goal_others = tf.placeholder(tf.float32, [None, (self.n_agents-1)*self.l_goal], 'v_goal_others')
self.v_labels = tf.placeholder(tf.float32, [None, self.n_agents])
self.action_others = tf.placeholder(tf.float32, [None, self.n_agents-1, self.l_action], 'action_others')
self.action_one = tf.placeholder(tf.float32, [None, self.l_action], 'action_one')
if self.experiment == "checkers":
self.obs_self_t = tf.placeholder(tf.float32, [None, self.rows_obs, self.columns_obs, self.channels_obs], 'obs_self_t')
self.obs_self_v = tf.placeholder(tf.float32, [None, self.l_obs_self], 'obs_self_v')
self.obs_others = tf.placeholder(tf.float32, [None, self.l_obs_others], 'obs_others')
self.actions_prev = tf.placeholder(tf.float32, [None, self.l_action], 'action_prev')
# Actor network
self.epsilon = tf.placeholder(tf.float32, None, 'epsilon')
with tf.variable_scope("Policy_main"):
if self.experiment == 'checkers':
probs = networks.actor_checkers(self.actions_prev, self.obs_self_t, self.obs_self_v, self.obs_others, self.v_goal, f1=self.nn['A_conv_f'], k1=self.nn['A_conv_k'], n_h1=self.nn['A_n_h1'], n_h2=self.nn['A_n_h2'], n_actions=self.l_action, stage=stage)
# probs is normalized
self.probs = (1-self.epsilon) * probs + self.epsilon/float(self.l_action)
self.action_samples = tf.multinomial(tf.log(self.probs), 1)
with tf.variable_scope("Policy_target"):
if self.experiment == 'checkers':
probs_target = networks.actor_checkers(self.actions_prev, self.obs_self_t, self.obs_self_v, self.obs_others, self.v_goal, f1=self.nn['A_conv_f'], k1=self.nn['A_conv_k'], n_h1=self.nn['A_n_h1'], n_h2=self.nn['A_n_h2'], n_actions=self.l_action, stage=stage)
self.action_samples_target = tf.multinomial(tf.log( (1-self.epsilon)*probs_target + self.epsilon/float(self.l_action) ), 1)
# Q_n(s,\abf)
with tf.variable_scope("Q_global_main"):
if self.experiment == 'checkers':
self.Q_global = networks.Q_global_checkers(self.state_env, self.v_state_one_agent, self.v_goal, self.action_one, self.v_state_other_agents, self.action_others, self.obs_self_t, self.obs_self_v, f1=self.nn['Q_conv_f'], k1=self.nn['Q_conv_k'], n_h1_1=self.nn['Q_n_h1_1'], n_h1_2=self.nn['Q_n_h1_2'], n_h2=self.nn['Q_n_h2'], stage=stage)
with tf.variable_scope("Q_global_target"):
if self.experiment == 'checkers':
self.Q_global_target = networks.Q_global_checkers(self.state_env, self.v_state_one_agent, self.v_goal, self.action_one, self.v_state_other_agents, self.action_others, self.obs_self_t, self.obs_self_v, f1=self.nn['Q_conv_f'], k1=self.nn['Q_conv_k'], n_h1_1=self.nn['Q_n_h1_1'], n_h1_2=self.nn['Q_n_h1_2'], n_h2=self.nn['Q_n_h2'], stage=stage)
# Q_n(s,a^m)
if self.n_agents > 1 and self.use_Q_credit:
with tf.variable_scope("Q_credit_main"):
if self.experiment == 'checkers':
self.Q_credit = networks.Q_credit_checkers(self.state_env, self.v_state_one_agent, self.v_goal, self.action_one, self.v_state_m, self.v_state_other_agents, self.obs_self_t, self.obs_self_v, f1=self.nn['Q_conv_f'], k1=self.nn['Q_conv_k'], n_h1_1=self.nn['Q_n_h1_1'], n_h1_2=self.nn['Q_n_h1_2'], n_h2=self.nn['Q_n_h2'], stage=stage)
with tf.variable_scope("Q_credit_target"):
if self.experiment == 'checkers':
self.Q_credit_target = networks.Q_credit_checkers(self.state_env, self.v_state_one_agent, self.v_goal, self.action_one, self.v_state_m, self.v_state_other_agents, self.obs_self_t, self.obs_self_v, f1=self.nn['Q_conv_f'], k1=self.nn['Q_conv_k'], n_h1_1=self.nn['Q_n_h1_1'], n_h1_2=self.nn['Q_n_h1_2'], n_h2=self.nn['Q_n_h2'], stage=stage)
# V(s,g^n), used as ablation at stage 2
if self.n_agents > 1 and self.use_V:
with tf.variable_scope("V_main"):
self.V = networks.V_checkers_ablation(self.state_env, self.v_state_one_agent, self.v_goal, self.v_state_other_agents)
with tf.variable_scope("V_target"):
self.V_target = networks.V_checkers_ablation(self.state_env, self.v_state_one_agent, self.v_goal, self.v_state_other_agents)
def get_assign_target_ops(self, list_vars):
# ops for equating main and target
list_initial_ops = []
# ops for slow update of target toward main
list_update_ops = []
list_Q_main = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'Q_global_main')
map_name_Q_main = {v.name.split('main')[1] : v for v in list_Q_main}
list_Q_target = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'Q_global_target')
map_name_Q_target = {v.name.split('target')[1] : v for v in list_Q_target}
if len(list_Q_main) != len(list_Q_target):
raise ValueError("get_initialize_target_ops : lengths of Q_main and Q_target do not match")
for name, var in map_name_Q_main.items():
# create op that assigns value of main variable to target variable of the same name
list_initial_ops.append( map_name_Q_target[name].assign(var) )
for name, var in map_name_Q_main.items():
# incremental update of target towards main
list_update_ops.append( map_name_Q_target[name].assign( self.tau*var + (1-self.tau)*map_name_Q_target[name] ) )
# For policy
list_P_main = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'Policy_main')
map_name_P_main = {v.name.split('main')[1] : v for v in list_P_main}
list_P_target = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'Policy_target')
map_name_P_target = {v.name.split('target')[1] : v for v in list_P_target}
if len(list_P_main) != len(list_P_target):
raise ValueError("get_initialize_target_ops : lengths of P_main and P_target do not match")
for name, var in map_name_P_main.items():
# op that assigns value of main variable to target variable
list_initial_ops.append( map_name_P_target[name].assign(var) )
for name, var in map_name_P_main.items():
# incremental update of target towards main
list_update_ops.append( map_name_P_target[name].assign( self.tau*var + (1-self.tau)*map_name_P_target[name] ) )
# Repeat for Q_credit
if self.n_agents > 1 and self.use_Q_credit:
list_Qc_main = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'Q_credit_main')
map_name_Qc_main = {v.name.split('main')[1] : v for v in list_Qc_main}
list_Qc_target = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'Q_credit_target')
map_name_Qc_target = {v.name.split('target')[1] : v for v in list_Qc_target}
if len(list_Qc_main) != len(list_Qc_target):
raise ValueError("get_initialize_target_ops : lengths of Q_credit_main and Q_credit_target do not match")
for name, var in map_name_Qc_main.items():
# create op that assigns value of main variable to target variable of the same name
list_initial_ops.append( map_name_Qc_target[name].assign(var) )
for name, var in map_name_Qc_main.items():
# incremental update of target towards main
list_update_ops.append( map_name_Qc_target[name].assign( self.tau*var + (1-self.tau)*map_name_Qc_target[name] ) )
if self.n_agents > 1 and self.use_V:
list_V_main = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'V_main')
map_name_V_main = {v.name.split('main')[1] : v for v in list_V_main}
list_V_target = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'V_target')
map_name_V_target = {v.name.split('target')[1] : v for v in list_V_target}
if len(list_V_main) != len(list_V_target):
raise ValueError("get_initialize_target_ops : lengths of V_main and V_target do not match")
for name, var in map_name_V_main.items():
# create op that assigns value of main variable to target variable of the same name
list_initial_ops.append( map_name_V_target[name].assign(var) )
for name, var in map_name_V_main.items():
# incremental update of target towards main
list_update_ops.append( map_name_V_target[name].assign( self.tau*var + (1-self.tau)*map_name_V_target[name] ) )
return list_initial_ops, list_update_ops
def get_assign_global_to_credit_ops(self):
"""Get ops that assign value of Q_global network to Q_credit network.
To be used at the start of Stage 2, after Q_global network has been initialized
with the Stage 1 weights
"""
list_update_ops = []
list_Q_global = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'Q_global_main')
map_name_Q_global = {v.name.split('main')[1] : v for v in list_Q_global}
list_Q_credit = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'Q_credit_main')
map_name_Q_credit = {v.name.split('main')[1] : v for v in list_Q_credit}
if len(list_Q_global) != len(list_Q_credit):
raise ValueError("get_assign_global_to_credit_ops : lengths of Q_global and Q_credit do not match")
for name, var in map_name_Q_global.items():
list_split = name.split('/')
if ('stage-2' not in list_split):
# op that assigns value of Q_global variable to Q_credit variable of the same name
list_update_ops.append( map_name_Q_credit[name].assign(var) )
return list_update_ops
def run_actor(self, actions_prev, obs_others, obs_self_t, obs_self_v, goals, epsilon, sess):
"""Get actions for all agents as a batch
Args:
actions_prev: list of integers
obs_others: list of vector or tensor describing other agents
obs_self_t: list of observation grid centered on self
obs_self_v: list of 1D observation vectors
goals: [n_agents, n_lanes]
epsilon: float
sess: TF session
"""
# convert to batch
obs_others = np.array(obs_others)
obs_self_t = np.array(obs_self_t)
obs_self_v = np.array(obs_self_v)
actions_prev_1hot = np.zeros([self.n_agents, self.l_action])
actions_prev_1hot[np.arange(self.n_agents), actions_prev] = 1
feed = {self.obs_others:obs_others, self.obs_self_t:obs_self_t,
self.obs_self_v:obs_self_v, self.v_goal:goals,
self.actions_prev: actions_prev_1hot, self.epsilon:epsilon}
action_samples_res = sess.run(self.action_samples, feed_dict=feed)
return np.reshape(action_samples_res, action_samples_res.shape[0])
def run_actor_target(self, actions_prev, obs_others, obs_self_t, obs_self_v, goals, epsilon, sess):
"""Gets actions from the slowly-updating policy."""
feed = {self.obs_others:obs_others, self.obs_self_t:obs_self_t,
self.obs_self_v:obs_self_v, self.v_goal:goals,
self.actions_prev: actions_prev, self.epsilon:epsilon}
action_samples_res = sess.run(self.action_samples_target, feed_dict=feed)
return np.reshape(action_samples_res, action_samples_res.shape[0])
def create_Q_credit_train_op(self):
# TD target calculated in train_step() using V_target
self.Q_credit_td_target= tf.placeholder(tf.float32, [None], 'Q_credit_td_target')
# Q_credit network has only one output
self.loss_Q_credit = tf.reduce_mean(tf.square(self.Q_credit_td_target - tf.squeeze(self.Q_credit)))
self.Q_credit_opt = tf.train.AdamOptimizer(self.lr_Q)
self.Q_credit_op = self.Q_credit_opt.minimize(self.loss_Q_credit)
def create_Q_global_train_op(self):
# TD target calculated in train_step() using Q_target
self.Q_global_td_target = tf.placeholder(tf.float32, [None], 'Q_global_td_target')
# Q_global network has only one output
self.loss_Q_global = tf.reduce_mean(tf.square(self.Q_global_td_target - tf.squeeze(self.Q_global)))
self.Q_global_opt = tf.train.AdamOptimizer(self.lr_Q)
self.Q_global_op = self.Q_global_opt.minimize(self.loss_Q_global)
def create_V_train_op(self):
self.V_td_target = tf.placeholder(tf.float32, [None], 'V_td_target')
self.loss_V = tf.reduce_mean(tf.square(self.V_td_target - tf.squeeze(self.V)))
self.V_opt = tf.train.AdamOptimizer(self.lr_V)
self.V_op = self.V_opt.minimize(self.loss_V)
def create_policy_gradient_op(self):
# batch of 1-hot action vectors
self.action_taken = tf.placeholder(tf.float32, [None, self.l_action], 'action_taken')
# self.probs has shape [batch, l_action]
log_probs = tf.log(tf.reduce_sum(tf.multiply(self.probs, self.action_taken), axis=1)+1e-15)
# if stage==2, must be [batch*n_agents, n_agents], consecutive <n_agents> rows are same
self.Q_actual = tf.placeholder(tf.float32, [None, self.n_agents], 'Q_actual')
# First dim is n_agents^2 * batch;
# If n_agents=1, first dim is batch; second dim is l_action
# For <n_agents> > 1, the rows are Q_1(s,a^1),...,Q_N(s,a^1),Q_1(s,a^2),...,Q_N(s,a^2), ... , Q_1(s,a^N),...,Q_N(s,a^N)
# where each row contains <l_action> Q-values, one for each possible action
# Note that all Q networks have only one output, and the <l_action> dimension is due to evaluating all possible actions before feeding in feed_dict
self.Q_cf = tf.placeholder(tf.float32, [None, self.l_action], 'Q_cf')
# First dim is n_agents^2 * batch;
# If n_agents=1, first dim is batch; second dim is l_action
self.probs_evaluated = tf.placeholder(tf.float32, [None, self.l_action])
if self.n_agents == 1:
advantage2 = tf.reduce_sum(tf.multiply(self.Q_cf, self.probs_evaluated), axis=1)
advantage = tf.subtract(tf.squeeze(self.Q_actual), advantage2)
self.policy_loss = -tf.reduce_mean( tf.multiply(log_probs, advantage) )
else:
if self.use_Q_credit:
# For the general case of any number of agents (covers n_agents==1)
pi_mult_Q = tf.multiply( self.probs_evaluated, self.Q_cf )
# [batch*n_agents, n_agents]
counterfactuals = tf.reshape( tf.reduce_sum(pi_mult_Q, axis=1), [-1,self.n_agents] )
# [batch*n_agents, n_agents], each block of nxn is matrix A_{mn} at one time step
advantages = tf.subtract(self.Q_actual, counterfactuals)
# [batch, n_agents]
sum_n_A = tf.reshape( tf.reduce_sum(advantages, axis=1), [-1, self.n_agents] )
elif self.use_V:
self.V_evaluated = tf.placeholder(tf.float32, [None, self.n_agents], 'V_evaluated')
advantages = tf.subtract(self.Q_actual, self.V_evaluated)
sum_n_A = tf.reshape( tf.reduce_sum(advantages, axis=1), [-1, self.n_agents] )
else:
sum_n_A = tf.reshape( tf.reduce_sum(self.Q_actual, axis=1), [-1, self.n_agents] )
log_probs_shaped = tf.reshape(log_probs, [-1, self.n_agents]) # [batch, n_agents]
m_terms = tf.multiply( log_probs_shaped, sum_n_A ) # [batch, n_agents]
self.policy_loss = -tf.reduce_mean( tf.reduce_sum(m_terms, axis=1) )
self.policy_opt = tf.train.AdamOptimizer(self.lr_actor)
self.policy_op = self.policy_opt.minimize(self.policy_loss)
def create_summary(self):
summaries_Q_global = [tf.summary.scalar('loss_Q_global', self.loss_Q_global)]
Q_global_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'Q_global_main')
for v in Q_global_variables:
summaries_Q_global.append( tf.summary.histogram(v.op.name, v) )
grads = self.Q_global_opt.compute_gradients(self.loss_Q_global, Q_global_variables)
for grad, var in grads:
if grad is not None:
summaries_Q_global.append( tf.summary.histogram(var.op.name+'/gradient', grad) )
self.summary_op_Q_global = tf.summary.merge(summaries_Q_global)
if self.n_agents > 1 and self.use_Q_credit:
summaries_Q_credit = [tf.summary.scalar('loss_Q_credit', self.loss_Q_credit)]
Q_credit_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'Q_credit_main')
for v in Q_credit_variables:
summaries_Q_credit.append( tf.summary.histogram(v.op.name, v) )
grads = self.Q_credit_opt.compute_gradients(self.loss_Q_credit, Q_credit_variables)
for grad, var in grads:
if grad is not None:
summaries_Q_credit.append( tf.summary.histogram(var.op.name+'/gradient', grad) )
self.summary_op_Q_credit = tf.summary.merge(summaries_Q_credit)
elif self.n_agents > 1 and self.use_V:
summaries_V = [tf.summary.scalar('loss_V', self.loss_V)]
V_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'V_main')
for v in V_variables:
summaries_V.append( tf.summary.histogram(v.op.name, v) )
grads = self.V_opt.compute_gradients(self.loss_V, V_variables)
for grad, var in grads:
if grad is not None:
summaries_V.append( tf.summary.histogram(var.op.name+'/gradient', grad) )
self.summary_op_V = tf.summary.merge(summaries_V)
summaries_policy = [tf.summary.scalar('policy_loss', self.policy_loss)]
policy_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'Policy_main')
for v in policy_variables:
summaries_policy.append(tf.summary.histogram(v.op.name, v))
grads = self.policy_opt.compute_gradients(self.policy_loss, policy_variables)
for grad, var in grads:
if grad is not None:
summaries_policy.append( tf.summary.histogram(var.op.name+'/gradient', grad) )
self.summary_op_policy = tf.summary.merge(summaries_policy)
def process_actions(self, n_steps, actions):
"""Reformats actions for better matrix computation.
Args:
n_steps: int
actions: np.array shape [time, agents], values are action indices
Returns
1. actions_1hot [n_agents * n_steps, l_action] : each row is
action taken by one agent at one time step
2. actions_others_1hot [n_agents * n_steps, n_agents-1, l_action] :
each row is for one agent at one time step, containing all
(n-1) other agents' actions
"""
# Each row of actions is one time step,
# row contains action indices for all agents
# Convert to [time, agents, l_action]
# so each agent gets its own 1-hot row vector
actions_1hot = np.zeros([n_steps, self.n_agents, self.l_action], dtype=int)
grid = np.indices((n_steps, self.n_agents))
actions_1hot[grid[0], grid[1], actions] = 1
# Convert to format [time*agents, agents-1, l_action]
# so that the set of <n_agent> actions at each time step
# is duplicated <n_agent> times, and each duplicate
# now contains all <n_agent>-1 actions representing
# the OTHER agents actions
list_to_interleave = []
for n in range(self.n_agents):
# extract all actions except agent n's action
list_to_interleave.append( actions_1hot[:, np.arange(self.n_agents)!=n, :] )
# interleave
actions_others_1hot = | np.zeros([self.n_agents*n_steps, self.n_agents-1, self.l_action]) | numpy.zeros |
'''
@FileName : init_guess.py
@EditTime : 2021-12-13 13:37:50
@Author : <NAME>
@Email : <EMAIL>
@Description :
'''
from core.utils.recompute3D import recompute3D
import torch
import numpy as np
from core.utils.umeyama import umeyama
import cv2
from core.utils.visualization3d import Visualization
from core.affinity.affinity import ComposedAffinity
from core.assignment.associate import simple_associate
from scipy import signal
def joint_interpolation(poses, n_joints):
""" Interpolate poses to a complete motion, the empty frame is None """
start = 0
# If the first frame is None
if poses[start] is None:
poses[start] = np.zeros((n_joints, 3))
for n, joint in enumerate(poses):
if joint is not None:
if n >= len(poses)-1:
break
if poses[start+1] is not None:
start += 1
if n != start:
j1 = poses[start]
start_t = start
det = (joint - j1) / (n - start_t)
for i in range(n - start_t):
poses[start] = j1 + det * i
start += 1
t = poses[start]
# If the last frame is None
while(start<n+1):
poses[start] = t
start += 1
return poses
def rec_3D_joints(keypoints, extris, intris, idx, filter_joints_idx, first_frame=False):
keypoints = np.array(keypoints, dtype=np.float32)
keypoints = keypoints[:,:,idx,filter_joints_idx]
n_views, n_frames, n_joints = keypoints.shape[:3]
joints = []
for f in range(n_frames):
if first_frame and f > 0:
break
# Filter out unreliable detection
pack = [[keypoints[v][f], extris[v], intris[v]] for v in range(n_views) if keypoints[v][f][:,2].max() > 0.2]
if len(pack) < 2: # Do not process single view case
joints.append(None)
continue
keps = np.array([p[0] for p in pack])
cam_extris = np.array([p[1] for p in pack])
cam_intris = np.array([p[2] for p in pack])
rec_joints3d = recompute3D(cam_extris, cam_intris, keps.copy())
joints.append(rec_joints3d)
# Interpolation
joints = joint_interpolation(joints, n_joints)
return joints
def physics_geometry_filter(keypoints, extris, intris, frames_seq, flags, img_paths, dataset_obj, filter_joints_idx):
"""
Filter out the noisy detection and recompute 3D joints using the filtered keypoints
"""
# Calculate the joints in first frame
last_js = []
for idx in range(dataset_obj.num_people):
rec_joints3d = rec_3D_joints(keypoints, extris, intris, idx, filter_joints_idx, first_frame=True)
last_js.append(rec_joints3d[0])
joints = []
affinity_model = ComposedAffinity(cameras=[extris, intris])
n_views = len(extris)
n_people = dataset_obj.num_people
n_joints = len(filter_joints_idx)
total_n_joints = dataset_obj.num_joints
Pall = np.array([intri @ extri[:3] for extri, intri in zip(extris, intris)])
# Container to save filtered results
filterd_keypoints = [[[np.zeros((total_n_joints,3)) for n in range(n_people)] for f in range(frames_seq)] for v in range(n_views)]
last_2d = [[keypoints[v][0][idx] for v in range(n_views)] for idx in range(n_people)]
total_joints = []
for i in range(frames_seq):
keyps = [keypoints[v][i] for v in range(n_views)]
joint = []
for idx, last_j in enumerate(last_js):
# Filter
affinity, dimGroups = affinity_model(keyps, None, last_2d[idx], last_j, images=img_paths)
keyps, output = simple_associate(keyps, affinity, dimGroups, Pall, idx)
# Recompute 3D joints from the filtered keypoints and the initial cameras
pack = [[k, extris[i], intris[i]] for i, k in enumerate(output) if k is not None]
if len(pack) < 2: # do not process single view case
joint.append(None)
continue
keps = np.array([p[0][filter_joints_idx] for p in pack])
cam_extris = | np.array([p[1] for p in pack]) | numpy.array |
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers
import matplotlib.pyplot as plt
from glob import glob
"""Prevent error while using GPU"""
physical_devices = tf.config.list_physical_devices("GPU")
tf.config.experimental.set_memory_growth(physical_devices[0], True)
print('TensorFlow version:',tf.__version__)
IMG_PATH ='./pokemonGen7Sprite/'
# Reshape to (64,64,3)
IMG_DIM = (64,64,3)
BATCH_SIZE = 128
BATCH_PER_EPOCH = np.floor(1076 / BATCH_SIZE).astype(int)
EPOCHS = 10000
LEARNING_RATE_DISC = 0.00001
LEARNING_RATE_GEN = 0.0005
random_normal_dimensions = 128
KERNEL_SIZE = (4,4)
n_critic = 5
# Network layers of discriminator and generator
PARAMETERS_DISC = [64,128,256]
PARAMETERS_GEN = [256,128,64]
gen_activation = 'tanh'
AUTOTUNE = tf.data.AUTOTUNE
data_augmentation = tf.keras.Sequential([
layers.experimental.preprocessing.Rescaling(scale=1./127.5, offset=-1),
layers.experimental.preprocessing.RandomFlip("horizontal"),
layers.experimental.preprocessing.RandomRotation(0.2),
layers.experimental.preprocessing.RandomContrast(0.2),
])
# Use
@tf.function
def make_dataset(path, batch_size):
def parse_image(filename):
image = tf.io.read_file(filename)
image = tf.image.decode_png(image, channels=IMG_DIM[-1])
image = tf.image.resize(image, [IMG_DIM[0], IMG_DIM[1]])
return image
def configure_for_performance(ds):
ds = ds.shuffle(buffer_size=100)
ds = ds.batch(BATCH_SIZE, drop_remainder=True)
ds = ds.map(data_augmentation, num_parallel_calls=AUTOTUNE)
ds = ds.repeat()
ds = ds.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
return ds
filenames = glob(path + '/*/*')
np.random.shuffle(filenames)
filenames_ds = tf.data.Dataset.from_tensor_slices(filenames)
ds = filenames_ds.map(parse_image, num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds = configure_for_performance(ds)
return ds
train_ds = make_dataset(IMG_PATH,BATCH_SIZE)
# clip model weights to a given hypercube
class ClipConstraint(tf.keras.constraints.Constraint):
# set clip value when initialized
def __init__(self, clip_value):
self.clip_value = clip_value
# clip model weights to hypercube
def __call__(self, weights):
return tf.keras.backend.clip(weights, -self.clip_value, self.clip_value)
# get the config
def get_config(self):
return {'clip_value': self.clip_value}
def discriminator():
model = tf.keras.Sequential()
activation = layers.LeakyReLU(0.2)
# weight initialization
init = tf.keras.initializers.RandomNormal(stddev=0.02)
# weight constraint
const = ClipConstraint(0.01)
firstLayer = True
for PARAMETER in PARAMETERS_DISC:
if firstLayer:
model.add(layers.Conv2D(PARAMETER,KERNEL_SIZE, strides=(2, 2),padding='same', kernel_initializer=init, kernel_constraint=const, input_shape=(64, 64, 3)))
model.add(activation)
firstLayer=False
else:
model.add(layers.Conv2D(PARAMETER,KERNEL_SIZE, strides=(2, 2),padding='same', kernel_initializer=init, kernel_constraint=const))
model.add(activation)
model.add(layers.Flatten())
model.add(layers.Dropout(0.2))
model.add(layers.Dense(1))
model.add(layers.Activation('linear'))
return model
discriminator = discriminator()
discriminator.summary()
def generator():
model = tf.keras.Sequential()
activation = layers.LeakyReLU(0.2)
# weight initialization
init = tf.keras.initializers.RandomNormal(stddev=0.02)
BOTTLE_DIM = IMG_DIM[0]//(2**len(PARAMETERS_GEN))
BOTTLENECK = (BOTTLE_DIM)**2*256
model.add(layers.Dense(BOTTLENECK,input_shape=(random_normal_dimensions,)))
model.add(activation)
model.add(layers.Reshape((BOTTLE_DIM, BOTTLE_DIM, 256)))
for PARAMETER in PARAMETERS_GEN:
model.add(layers.Conv2DTranspose(PARAMETER,KERNEL_SIZE, strides=(2, 2),padding='same', kernel_initializer=init))
model.add(activation)
model.add(layers.Conv2D(IMG_DIM[-1],(7,7),padding='same'))
model.add(layers.Activation(gen_activation))
return model
generator = generator()
generator.summary()
# wasserstein loss
def wasserstein_loss(y_true, y_pred):
return tf.keras.backend.mean(y_true * y_pred)
gan = tf.keras.models.Sequential([generator, discriminator])
discriminator.compile(loss=wasserstein_loss, optimizer=tf.keras.optimizers.RMSprop(learning_rate=LEARNING_RATE_DISC))
discriminator.trainable = False
gan.compile(loss=wasserstein_loss, optimizer=tf.keras.optimizers.RMSprop(learning_rate=LEARNING_RATE_GEN))
def plot_results(images, n_cols=None):
'''visualizes fake images'''
n_cols = n_cols or len(images)
n_rows = (len(images) - 1) // n_cols + 1
if images.shape[-1] == 1:
images = np.squeeze(images, axis=-1)
plt.figure(figsize=(n_cols, n_rows))
for index, image in enumerate(images):
plt.subplot(n_rows, n_cols, index + 1)
plt.imshow(((image+1) /2))
plt.axis("off")
train_dataset = iter(train_ds)
generator, discriminator = gan.layers
for epoch in range(EPOCHS):
print("Epoch {}/{}".format(epoch + 1, EPOCHS))
dis_loss_epoch = []
gen_loss_epoch = []
for _ in range(BATCH_PER_EPOCH):
real_images = next(train_dataset)
for _ in range(n_critic):
noise = tf.random.normal(shape=[BATCH_SIZE, random_normal_dimensions])
fake_images = generator(noise)
mixed_images = tf.concat([fake_images, real_images], axis=0)
# Create labels for the discriminator
discriminator_labels = tf.constant([[1.]] * BATCH_SIZE + [[-1.]] * BATCH_SIZE)
discriminator.trainable = True
dis_loss = discriminator.train_on_batch(mixed_images, discriminator_labels)
dis_loss_epoch.append(dis_loss)
real_images = next(train_dataset)
noise = tf.random.normal(shape=[BATCH_SIZE, random_normal_dimensions])
generator_labels = tf.constant([[-1.]] * BATCH_SIZE)
discriminator.trainable = False
gen_loss = gan.train_on_batch(noise, generator_labels)
gen_loss_epoch.append(gen_loss)
if epoch%5==0:
# Plot results
plot_results(fake_images[:16], 4)
plt.show()
print('Discriminator loss=',round(np.mean(dis_loss_epoch),5))
print('Generator loss=',round( | np.mean(gen_loss_epoch) | numpy.mean |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 10 09:16:42 2022
@author: mbonnema
"""
import os
from netCDF4 import Dataset
import matplotlib.pyplot as plt
#import geopandas as geo
import datetime
import numpy as np
from datetime import timedelta
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import pandas as pd
import ee
ee.Initialize()
import shapely
import matplotlib.lines as mlines
import csv
from readCSV import readCSV
from FilterS1 import FilterS1
from FilterJRC import FilterJRC
from InterpS1 import InterpS1
from InterpJRC import InterpJRC
print('Preparing Data...')
dataDir = '../../Results/World_Ver3_CSV/'
print('\tReading data csv files...')
D,A,LE,WE,ND = readCSV(dataDir)
Ds1 = {}
As1 = {}
Dgsw = {}
Agsw = {}
#print(LE['1646'])
[Ds1, Dgsw] = map(lambda keys: {x: D[x] for x in keys}, [WE.keys(), ND.keys()])
[As1, Agsw] = map(lambda keys: {x: A[x] for x in keys}, [WE.keys(), ND.keys()])
print('\t\tComplete')
print('\tFiltering area data...')
Ds1,As1,WE,LE = FilterS1(Ds1,As1,WE,LE)
Dgsw,Agsw,ND = FilterJRC(Dgsw,Agsw,ND)
D = {}
A = {}
D.update(Ds1)
D.update(Dgsw)
A.update(As1)
A.update(Agsw)
print('\t\tComplete')
print('\tLoading Lake Database Fields...')
lakes = ee.FeatureCollection('users/matthewbonnema/HydroLAKES')
largeLakes = lakes.filter(ee.Filter.gte('Lake_area',1))
lakeID = largeLakes.aggregate_array('Hylak_id').getInfo()
lakeType = largeLakes.aggregate_array('Lake_type').getInfo()
lakeLat = largeLakes.aggregate_array('Pour_lat').getInfo()
lakeLon = largeLakes.aggregate_array('Pour_long').getInfo()
lakeArea = largeLakes.aggregate_array('Lake_area').getInfo()
print('\t\tComplete')
print('\tCompute Area Variations...')
Av = []
Avp = []
Am = []
A_database = []
Amin = []
Amax = []
lat = []
lon = []
Ltype = []
for key in D:
try:
a = A[key]
stda = np.std(a)
mina = np.nanmin(a)
maxa = np.nanmax(a)
vara = maxa - mina
meana = np.nanmean(a)
varap = vara/meana
ad = lakeArea[lakeID.index(int(key))]
index = lakeID.index(int(key))
if np.isnan(mina) or np.isnan(maxa) or | np.isnan(meana) | numpy.isnan |
""" Misc utilities for the library
Authors: <NAME>, <NAME>, 2015
"""
import sys
import scipy.linalg as spl
import numpy as np
from scipy.stats import norm
from warnings import warn
import pandas as pd
py3 = sys.version_info[0] >= 3
def _check_list_length_match(list_1, list_2, var_name_1, var_name_2):
"""Check length match of two given lists to raise error if necessary"""
if len(list_1) != len(list_2):
raise ValueError(
'len(%s) %d does not match len(%s) %d'
% (str(var_name_1), len(list_1), str(var_name_2), len(list_2)))
def _check_and_load_tables(tables_, var_name):
"""Check tables can be loaded in DataFrame to raise error if necessary"""
tables = []
for table_idx, table in enumerate(tables_):
if isinstance(table, _basestring):
try:
loaded = pd.read_csv(table, index_col=0)
except:
raise ValueError('table path %s could not be loaded' % table)
tables.append(loaded)
elif isinstance(table, pd.DataFrame):
tables.append(table)
else:
raise TypeError('%s can only be a pandas DataFrames or a'
'string. A %s was provided at idx %d' %
(var_name, type(table), table_idx))
return tables
def _check_run_tables(run_imgs, tables_, tables_name):
"""Check fMRI runs and corresponding tables to raise error if necessary"""
if isinstance(tables_, (_basestring, pd.DataFrame)):
tables_ = [tables_]
_check_list_length_match(run_imgs, tables_, 'run_imgs', tables_name)
tables_ = _check_and_load_tables(tables_, tables_name)
return tables_
def z_score(pvalue):
""" Return the z-score corresponding to a given p-value.
"""
pvalue = np.minimum(np.maximum(pvalue, 1.e-300), 1. - 1.e-16)
return norm.isf(pvalue)
def multiple_fast_inv(a):
"""Compute the inverse of a set of arrays.
Parameters
----------
a: array_like of shape (n_samples, n_dim, n_dim)
Set of square matrices to be inverted. A is changed in place.
Returns
-------
a: ndarray
yielding the inverse of the inputs
Raises
------
LinAlgError :
If `a` is singular.
ValueError :
If `a` is not square, or not 2-dimensional.
Notes
-----
This function is borrowed from scipy.linalg.inv,
but with some customizations for speed-up.
"""
if a.shape[1] != a.shape[2]:
raise ValueError('a must have shape (n_samples, n_dim, n_dim)')
from scipy.linalg import calc_lwork
from scipy.linalg.lapack import get_lapack_funcs
a1, n = a[0], a.shape[0]
getrf, getri = get_lapack_funcs(('getrf', 'getri'), (a1,))
for i in range(n):
if (getrf.module_name[:7] == 'clapack'
and getri.module_name[:7] != 'clapack'):
# ATLAS 3.2.1 has getrf but not getri.
lu, piv, info = getrf(np.transpose(a[i]), rowmajor=0,
overwrite_a=True)
a[i] = np.transpose(lu)
else:
a[i], piv, info = getrf(a[i], overwrite_a=True)
if info == 0:
if getri.module_name[:7] == 'flapack':
lwork = calc_lwork.getri(getri.prefix, a1.shape[0])
lwork = lwork[1]
# XXX: the following line fixes curious SEGFAULT when
# benchmarking 500x500 matrix inverse. This seems to
# be a bug in LAPACK ?getri routine because if lwork is
# minimal (when using lwork[0] instead of lwork[1]) then
# all tests pass. Further investigation is required if
# more such SEGFAULTs occur.
lwork = int(1.01 * lwork)
a[i], _ = getri(a[i], piv, lwork=lwork, overwrite_lu=1)
else: # clapack
a[i], _ = getri(a[i], piv, overwrite_lu=1)
else:
raise ValueError('Matrix LU decomposition failed')
return a
def multiple_mahalanobis(effect, covariance):
"""Returns the squared Mahalanobis distance for a given set of samples
Parameters
----------
effect: array of shape (n_features, n_samples),
Each column represents a vector to be evaluated
covariance: array of shape (n_features, n_features, n_samples),
Corresponding covariance models stacked along the last axis
Returns
-------
sqd: array of shape (n_samples,)
the squared distances (one per sample)
"""
# check size
if effect.ndim == 1:
effect = effect[:, np.newaxis]
if covariance.ndim == 2:
covariance = covariance[:, :, np.newaxis]
if effect.shape[0] != covariance.shape[0]:
raise ValueError('Inconsistant shape for effect and covariance')
if covariance.shape[0] != covariance.shape[1]:
raise ValueError('Inconsistant shape for covariance')
# transpose and make contuguous for the sake of speed
Xt, Kt = np.ascontiguousarray(effect.T), np.ascontiguousarray(covariance.T)
# compute the inverse of the covariances
Kt = multiple_fast_inv(Kt)
# derive the squared Mahalanobis distances
sqd = np.sum(np.sum(Xt[:, :, np.newaxis] * Xt[:, np.newaxis] * Kt, 1), 1)
return sqd
def full_rank(X, cmax=1e15):
""" Computes the condition number of X and if it is larger than cmax,
returns a matrix with a condition number smaller than cmax.
Parameters
----------
X : array of shape (nrows, ncols)
input array
cmax : float, optional (default:1.e15),
tolerance for condition number
Returns
-------
X : array of shape (nrows, ncols)
output array
cond : float,
actual condition number
"""
U, s, V = spl.svd(X, full_matrices=False)
smax, smin = s.max(), s.min()
cond = smax / smin
if cond < cmax:
return X, cond
warn('Matrix is singular at working precision, regularizing...')
lda = (smax - cmax * smin) / (cmax - 1)
X = np.dot(U, np.dot(np.diag(s + lda), V))
return X, cmax
def pos_recipr(X):
""" Return element-wise reciprocal of array, setting `X`>=0 to 0
Return the reciprocal of an array, setting all entries less than or
equal to 0 to 0. Therefore, it presumes that X should be positive in
general.
Parameters
----------
X : array-like
Returns
-------
rX : array
array of same shape as `X`, dtype np.float, with values set to
1/X where X > 0, 0 otherwise
"""
X = | np.asarray(X) | numpy.asarray |
#!/usr/bin/env python
from math import ceil, sqrt
import numpy as np
from functools import partial
from pyKrig.utilities import pairwise_distance, mmcriterion
def latin_hypercube(nsample, ndv):
"""
create sample points for latin hypercube sampling
:param nsample: number of samples
:param ndv: number of design variables
"""
min_level = (1 / nsample) * 0.5
max_level = 1 - min_level
levels = np.linspace(min_level, max_level, nsample)
lhs = np.empty((nsample, ndv))
index_levels = np.arange(nsample)
for j in range(ndv):
order = np.random.permutation(index_levels)
lhs[:, j] = levels[order]
return lhs
def perturbate(X):
"""
randomly choose a pair of sampling points, and interchange the values of a randomly chosen design variable
"""
ns, ndv = X.shape
is1 = np.random.randint(ns)
is2 = np.random.randint(ns)
idv = np.random.randint(ndv)
X[is1, idv], X[is2, idv] = X[is2, idv], X[is1, idv]
def optimize_lhs(X, criterion_func):
"""
optimize a latin hypercube via simulated annealing
:param X: a latin hypercube
:param criterion_func: the function used to evaluate the latin hypercube
"""
# initialize
phi = criterion_func(X)
phi_best = phi
Xbest = | np.array(X, copy=True) | numpy.array |
import clustertools as ctools
import numpy as np
units=['pckms','kpckms','nbody','galpy']
def test_init_default():
cluster=ctools.StarCluster()
assert cluster.tphys == 0.0
assert cluster.units is None
assert cluster.origin is None
assert cluster.ctype == 'snapshot'
assert cluster.projected == False
return None
def test_init_custom():
for u in units:
cluster=ctools.StarCluster(10.0,'cluster',u,'nbody6',True)
assert cluster.tphys == 10.0
assert cluster.units == 'cluster'
assert cluster.origin == u
assert cluster.ctype == 'nbody6'
assert cluster.projected == True
return None
def test_add_stars_default():
nstar=100
cluster=ctools.StarCluster()
x,y,z=np.ones(nstar),np.ones(nstar),np.ones(nstar)
vx,vy,vz=np.ones(nstar),np.ones(nstar),np.ones(nstar)
cluster.add_stars(x,y,z,vx,vy,vz)
np.testing.assert_array_equal(x,cluster.x)
np.testing.assert_array_equal(y,cluster.y)
np.testing.assert_array_equal(z,cluster.z)
np.testing.assert_array_equal(vx,cluster.vx)
np.testing.assert_array_equal(vy,cluster.vy)
np.testing.assert_array_equal(vz,cluster.vz)
np.testing.assert_array_equal(cluster.m,np.ones(len(x)))
np.testing.assert_array_equal(cluster.id, np.linspace(0, len(x) - 1, len(x), dtype=int))
assert cluster.ntot == nstar
np.testing.assert_array_equal(cluster.kw,np.zeros(nstar))
def test_add_stars_custom():
nstar=100
cluster=ctools.StarCluster()
x,y,z=np.ones(nstar),np.ones(nstar),np.ones(nstar)
vx,vy,vz=np.ones(nstar),np.ones(nstar),np.ones(nstar)
m=np.ones(nstar)*0.1
id=np.linspace(0,nstar,nstar)
cluster.add_stars(x,y,z,vx,vy,vz,m,id)
np.testing.assert_array_equal(m,cluster.m)
np.testing.assert_array_equal(id,cluster.id)
def test_add_stars_lenghtdif():
nstar=100
cluster=ctools.StarCluster()
x,y=np.ones(nstar),np.ones(nstar)
vx,vy=np.ones(nstar),np.ones(nstar)
z,vz,m=1.,1.,1.
cluster.add_stars(x,y,z,vx,vy,vz,m)
np.testing.assert_array_equal(cluster.m,np.ones(nstar)*m)
np.testing.assert_array_equal(cluster.z,np.ones(nstar)*z)
np.testing.assert_array_equal(cluster.vz,np.ones(nstar)*vz)
def test_add_stars_radec():
nstar=100
cluster=ctools.StarCluster(units='radec',origin='sky')
x,y,z=np.ones(nstar),np.ones(nstar),np.ones(nstar)
vx,vy,vz=np.ones(nstar),np.ones(nstar),np.ones(nstar)
cluster.add_stars(x,y,z,vx,vy,vz)
np.testing.assert_array_equal(x,cluster.ra)
np.testing.assert_array_equal(y,cluster.dec)
np.testing.assert_array_equal(z,cluster.dist)
np.testing.assert_array_equal(vx,cluster.pmra)
np.testing.assert_array_equal(vy,cluster.pmdec)
| np.testing.assert_array_equal(vz,cluster.vlos) | numpy.testing.assert_array_equal |
import os
import time
from copy import deepcopy
import torch
import torch.nn as nn
import numpy as np
import numpy.random as rd
import gym
class ActorPPO(nn.Module):
def __init__(self, mid_dim, state_dim, action_dim):
super().__init__()
self.net = nn.Sequential(nn.Linear(state_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, mid_dim), nn.Hardswish(),
nn.Linear(mid_dim, mid_dim), nn.Hardswish(),
nn.Linear(mid_dim, action_dim), )
self.a_std_log = nn.Parameter(torch.zeros((1, action_dim)) - 0.5, requires_grad=True) # trainable parameter
self.sqrt_2pi_log = 0.9189385332046727 # =np.log(np.sqrt(2 * np.pi))
layer_norm(self.net[-1], std=0.1) # output layer for action
def forward(self, state):
return self.net(state).tanh() # action
def get_action_noise(self, state):
a_avg = self.net(state)
a_std = self.a_std_log.exp()
noise = torch.randn_like(a_avg)
action = a_avg + noise * a_std
return action, noise
def compute_logprob(self, state, action):
a_avg = self.net(state)
a_std = self.a_std_log.exp()
delta = ((a_avg - action) / a_std).pow(2).__mul__(0.5) # __mul__(0.5) is * 0.5
logprob = -(self.a_std_log + self.sqrt_2pi_log + delta)
return logprob.sum(1)
class CriticAdv(nn.Module):
def __init__(self, state_dim, mid_dim):
super().__init__()
self.net = nn.Sequential(nn.Linear(state_dim, mid_dim), nn.ReLU(),
nn.Linear(mid_dim, mid_dim), nn.Hardswish(),
nn.Linear(mid_dim, mid_dim), nn.Hardswish(),
nn.Linear(mid_dim, 1))
layer_norm(self.net[-1], std=0.5) # output layer for Q value
def forward(self, state):
return self.net(state) # Q value
def layer_norm(layer, std=1.0, bias_const=1e-6):
torch.nn.init.orthogonal_(layer.weight, std)
torch.nn.init.constant_(layer.bias, bias_const)
class AgentPPO:
def __init__(self):
super().__init__()
self.learning_rate = 1e-4
self.ratio_clip = 0.25 # ratio.clamp(1 - clip, 1 + clip)
self.lambda_entropy = 0.01 # could be 0.02
self.lambda_gae_adv = 0.98 # could be 0.95~0.99, GAE (Generalized Advantage Estimation. ICLR.2016.)
self.if_use_gae = True
self.compute_reward = None
self.state = None # set for self.update_buffer(), initialize before training
self.noise = None
self.act = self.act_target = None
self.cri = self.cri_target = None
self.optimizer = None
self.criterion = None
self.device = None
def init(self, net_dim, state_dim, action_dim):
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.compute_reward = self.compute_reward_gae if self.if_use_gae else self.compute_reward_adv
self.act = ActorPPO(net_dim, state_dim, action_dim).to(self.device)
self.cri = CriticAdv(state_dim, net_dim).to(self.device)
self.criterion = torch.nn.SmoothL1Loss()
self.optimizer = torch.optim.Adam([{'params': self.act.parameters(), 'lr': self.learning_rate},
{'params': self.cri.parameters(), 'lr': self.learning_rate}])
def select_action(self, state):
states = torch.as_tensor((state,), dtype=torch.float32, device=self.device).detach()
actions, noises = self.act.get_action_noise(states)
return actions[0].cpu().numpy(), noises[0].cpu().numpy()
def store_transition(self, env, buffer, target_step, reward_scale, gamma):
buffer.empty_buffer_before_explore() # NOTICE! necessary for on-policy
actual_step = 0
while actual_step < target_step:
state = env.reset()
for _ in range(env.max_step):
action, noise = self.select_action(state)
next_state, reward, done, _ = env.step(np.tanh(action))
actual_step += 1
other = (reward * reward_scale, 0.0 if done else gamma, *action, *noise)
buffer.append_buffer(state, other)
if done:
break
state = next_state
return actual_step
def update_net(self, buffer, _target_step, batch_size, repeat_times=8):
buffer.update_now_len_before_sample()
max_memo = buffer.now_len # assert max_memo >= _target_step
with torch.no_grad(): # Trajectory using reverse reward
buf_reward, buf_mask, buf_action, buf_noise, buf_state = buffer.sample_for_ppo()
bs = 2 ** 10 # set a smaller 'bs: batch size' when out of GPU memory.
buf_value = torch.cat([self.cri(buf_state[i:i + bs]) for i in range(0, buf_state.size(0), bs)], dim=0)
buf_logprob = -(buf_noise.pow(2).__mul__(0.5) + self.act.a_std_log + self.act.sqrt_2pi_log).sum(1)
buf_r_sum, buf_advantage = self.compute_reward(max_memo, buf_reward, buf_mask, buf_value)
del buf_reward, buf_mask, buf_noise
obj_critic = None
for _ in range(int(repeat_times * max_memo / batch_size)): # PPO: Surrogate objective of Trust Region
indices = torch.randint(max_memo, size=(batch_size,), requires_grad=False, device=self.device)
state = buf_state[indices]
action = buf_action[indices]
r_sum = buf_r_sum[indices]
logprob = buf_logprob[indices]
advantage = buf_advantage[indices]
new_logprob = self.act.compute_logprob(state, action) # it is obj_actor
ratio = (new_logprob - logprob).exp()
obj_surrogate1 = advantage * ratio
obj_surrogate2 = advantage * ratio.clamp(1 - self.ratio_clip, 1 + self.ratio_clip)
obj_surrogate = -torch.min(obj_surrogate1, obj_surrogate2).mean()
obj_entropy = (new_logprob.exp() * new_logprob).mean() # policy entropy
obj_actor = obj_surrogate + obj_entropy * self.lambda_entropy
value = self.cri(state).squeeze(1) # critic network predicts the reward_sum (Q value) of state
obj_critic = self.criterion(value, r_sum)
obj_united = obj_actor + obj_critic / (r_sum.std() + 1e-5)
self.optimizer.zero_grad()
obj_united.backward()
self.optimizer.step()
return self.act.a_std_log.mean().item(), obj_critic.item()
def compute_reward_adv(self, max_memo, buf_reward, buf_mask, buf_value):
buf_r_sum = torch.empty(max_memo, dtype=torch.float32, device=self.device) # reward sum
pre_r_sum = 0 # reward sum of previous step
for i in range(max_memo - 1, -1, -1):
buf_r_sum[i] = buf_reward[i] + buf_mask[i] * pre_r_sum
pre_r_sum = buf_r_sum[i]
buf_advantage = buf_r_sum - (buf_mask * buf_value.squeeze(1))
buf_advantage = (buf_advantage - buf_advantage.mean()) / (buf_advantage.std() + 1e-5)
return buf_r_sum, buf_advantage
def compute_reward_gae(self, max_memo, buf_reward, buf_mask, buf_value):
buf_r_sum = torch.empty(max_memo, dtype=torch.float32, device=self.device) # old policy value
buf_advantage = torch.empty(max_memo, dtype=torch.float32, device=self.device) # advantage value
pre_r_sum = 0 # reward sum of previous step
pre_advantage = 0 # advantage value of previous step
for i in range(max_memo - 1, -1, -1):
buf_r_sum[i] = buf_reward[i] + buf_mask[i] * pre_r_sum
pre_r_sum = buf_r_sum[i]
buf_advantage[i] = buf_reward[i] + buf_mask[i] * pre_advantage - buf_value[i]
pre_advantage = buf_value[i] + buf_advantage[i] * self.lambda_gae_adv
buf_advantage = (buf_advantage - buf_advantage.mean()) / (buf_advantage.std() + 1e-5)
return buf_r_sum, buf_advantage
class ReplayBuffer:
def __init__(self, max_len, state_dim, action_dim):
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.max_len = max_len
self.now_len = 0
self.next_idx = 0
self.if_full = False
self.action_dim = action_dim # for self.sample_for_ppo(
self.if_gpu = False
other_dim = 1 + 1 + action_dim * 2
self.buf_other = np.empty((max_len, other_dim), dtype=np.float32)
self.buf_state = np.empty((max_len, state_dim), dtype=np.float32)
def append_buffer(self, state, other): # CPU array to CPU array
self.buf_state[self.next_idx] = state
self.buf_other[self.next_idx] = other
self.next_idx += 1
if self.next_idx >= self.max_len:
self.if_full = True
self.next_idx = 0
def extend_buffer(self, state, other): # CPU array to CPU array
size = len(other)
next_idx = self.next_idx + size
if next_idx > self.max_len:
if next_idx > self.max_len:
self.buf_state[self.next_idx:self.max_len] = state[:self.max_len - self.next_idx]
self.buf_other[self.next_idx:self.max_len] = other[:self.max_len - self.next_idx]
self.if_full = True
next_idx = next_idx - self.max_len
self.buf_state[0:next_idx] = state[-next_idx:]
self.buf_other[0:next_idx] = other[-next_idx:]
else:
self.buf_state[self.next_idx:next_idx] = state
self.buf_other[self.next_idx:next_idx] = other
self.next_idx = next_idx
def sample_batch(self, batch_size):
indices = rd.randint(self.now_len - 1, size=batch_size)
r_m_a = self.buf_other[indices]
return (r_m_a[:, 0:1], # reward
r_m_a[:, 1:2], # mask = 0.0 if done else gamma
r_m_a[:, 2:], # action
self.buf_state[indices], # state
self.buf_state[indices + 1]) # next_state
def sample_for_ppo(self):
all_other = torch.as_tensor(self.buf_other[:self.now_len], device=self.device)
return (all_other[:, 0], # reward
all_other[:, 1], # mask = 0.0 if done else gamma
all_other[:, 2:2 + self.action_dim], # action
all_other[:, 2 + self.action_dim:], # noise
torch.as_tensor(self.buf_state[:self.now_len], device=self.device)) # state
def update_now_len_before_sample(self):
self.now_len = self.max_len if self.if_full else self.next_idx
def empty_buffer_before_explore(self):
self.next_idx = 0
self.now_len = 0
self.if_full = False
'''Utils'''
class Evaluator:
def __init__(self, cwd, agent_id, eval_times, show_gap, env, device):
self.recorder = [(0., -np.inf, 0., 0., 0.), ] # total_step, r_avg, r_std, obj_a, obj_c
self.r_max = -np.inf
self.total_step = 0
self.cwd = cwd # constant
self.device = device
self.agent_id = agent_id
self.show_gap = show_gap
self.eva_times = eval_times
self.env = env
self.target_reward = env.target_reward
self.used_time = None
self.start_time = time.time()
self.print_time = time.time()
print(f"{'ID':>2} {'Step':>8} {'MaxR':>8} |{'avgR':>8} {'stdR':>8} {'objA':>8} {'objC':>8}")
def evaluate_save(self, act, steps, obj_a, obj_c):
reward_list = [get_episode_return(self.env, act, self.device)
for _ in range(self.eva_times)]
r_avg = np.average(reward_list) # episode return average
r_std = float(np.std(reward_list)) # episode return std
if r_avg > self.r_max: # save checkpoint with highest episode return
self.r_max = r_avg # update max reward (episode return)
act_save_path = f'{self.cwd}/actor.pth'
torch.save(act.state_dict(), act_save_path)
print(f"{self.agent_id:<2} {self.total_step:8.2e} {self.r_max:8.2f} |")
self.total_step += steps # update total training steps
self.recorder.append((self.total_step, r_avg, r_std, obj_a, obj_c)) # update recorder
if_solve = bool(self.r_max > self.target_reward) # check if_solve
if if_solve and self.used_time is None:
self.used_time = int(time.time() - self.start_time)
print(f"{'ID':>2} {'Step':>8} {'TargetR':>8} |"
f"{'avgR':>8} {'stdR':>8} {'UsedTime':>8} ########\n"
f"{self.agent_id:<2} {self.total_step:8.2e} {self.target_reward:8.2f} |"
f"{r_avg:8.2f} {r_std:8.2f} {self.used_time:>8} ########")
if time.time() - self.print_time > self.show_gap:
self.print_time = time.time()
print(f"{self.agent_id:<2} {self.total_step:8.2e} {self.r_max:8.2f} |"
f"{r_avg:8.2f} {r_std:8.2f} {obj_a:8.2f} {obj_c:8.2f}")
return if_solve
def get_episode_return(env, act, device) -> float:
episode_return = 0.0 # sum of rewards in an episode
max_step = env.max_step
if_discrete = env.if_discrete
state = env.reset()
for _ in range(max_step):
s_tensor = torch.as_tensor((state,), device=device)
a_tensor = act(s_tensor)
if if_discrete:
a_tensor = a_tensor.argmax(dim=1)
action = a_tensor.cpu().numpy()[0] # not need detach(), because with torch.no_grad() outside
state, reward, done, _ = env.step(action)
episode_return += reward
if done:
break
return env.episode_return if hasattr(env, 'episode_return') else episode_return
'''env.py'''
class PreprocessEnv(gym.Wrapper): # env wrapper
def __init__(self, env, if_print=True, data_type=np.float32):
super(PreprocessEnv, self).__init__(env)
self.env = env
self.data_type = data_type
(self.env_name, self.state_dim, self.action_dim, self.action_max,
self.if_discrete, self.target_reward, self.max_step
) = get_gym_env_info(env, if_print)
self.step = self.step_type
def reset(self):
state = self.env.reset()
return state.astype(self.data_type)
def step_type(self, action): # there are not type error of action
state, reward, done, info = self.env.step(action * self.action_max)
return state.astype(self.data_type), reward, done, info
def get_gym_env_info(env, if_print):
import gym # gym of OpenAI is not necessary for ElegantRL (even RL)
gym.logger.set_level(40) # Block warning: 'WARN: Box bound precision lowered by casting to float32'
assert isinstance(env, gym.Env)
env_name = env.unwrapped.spec.id
state_shape = env.observation_space.shape
state_dim = state_shape[0] if len(state_shape) == 1 else state_shape # sometimes state_dim is a list
target_reward = getattr(env, 'target_reward', None)
target_reward_default = getattr(env.spec, 'reward_threshold', None)
if target_reward is None:
target_reward = target_reward_default
if target_reward is None:
target_reward = 2 ** 16
max_step = getattr(env, 'max_step', None)
max_step_default = getattr(env, '_max_episode_steps', None)
if max_step is None:
max_step = max_step_default
if max_step is None:
max_step = 2 ** 10
if_discrete = isinstance(env.action_space, gym.spaces.Discrete)
if if_discrete: # make sure it is discrete action space
action_dim = env.action_space.n
action_max = int(1)
elif isinstance(env.action_space, gym.spaces.Box): # make sure it is continuous action space
action_dim = env.action_space.shape[0]
action_max = float(env.action_space.high[0])
else:
raise RuntimeError('| Please set these value manually: if_discrete=bool, action_dim=int, action_max=1.0')
print(f"\n| env_name: {env_name}, action space if_discrete: {if_discrete}"
f"\n| state_dim: {state_dim}, action_dim: {action_dim}, action_max: {action_max}"
f"\n| max_step: {max_step} target_reward: {target_reward}") if if_print else None
return env_name, state_dim, action_dim, action_max, if_discrete, target_reward, max_step
'''DEMO'''
class Arguments:
def __init__(self, agent=None, env=None, gpu_id=None, if_on_policy=False):
self.agent = agent # Deep Reinforcement Learning algorithm
self.cwd = None # current work directory. cwd is None means set it automatically
self.env = env # the environment for training
self.env_eval = None # the environment for evaluating
self.gpu_id = gpu_id # choose the GPU for running. gpu_id is None means set it automatically
'''Arguments for training (off-policy)'''
self.net_dim = 2 ** 8 # the network width
self.batch_size = 2 ** 8 # num of transitions sampled from replay buffer.
self.repeat_times = 2 ** 0 # repeatedly update network to keep critic's loss small
self.target_step = 2 ** 10 # collect target_step, then update network
self.max_memo = 2 ** 17 # capacity of replay buffer
if if_on_policy: # (on-policy)
self.net_dim = 2 ** 9
self.batch_size = 2 ** 8
self.repeat_times = 2 ** 4
self.target_step = 2 ** 12
self.max_memo = self.target_step
self.reward_scale = 2 ** 0 # an approximate target reward usually be closed to 256
self.gamma = 0.99 # discount factor of future rewards
self.num_threads = 4 # cpu_num for evaluate model, torch.set_num_threads(self.num_threads)
'''Arguments for evaluate'''
self.if_remove = True # remove the cwd folder? (True, False, None:ask me)
self.if_allow_break = True # allow break training when reach goal (early termination)
self.break_step = 2 ** 20 # break training after 'total_step > break_step'
self.eval_times = 2 ** 1 # evaluation times if 'eval_reward > target_reward'
self.show_gap = 2 ** 8 # show the Reward and Loss value per show_gap seconds
self.random_seed = 0 # initialize random seed in self.init_before_training(
def init_before_training(self):
self.gpu_id = '0' if self.gpu_id is None else str(self.gpu_id)
self.cwd = f'./{self.env.env_name}_{self.gpu_id}' if self.cwd is None else self.cwd
print(f'| GPU id: {self.gpu_id}, cwd: {self.cwd}')
import shutil # remove history according to bool(if_remove)
if self.if_remove is None:
self.if_remove = bool(input("PRESS 'y' to REMOVE: {}? ".format(self.cwd)) == 'y')
if self.if_remove:
shutil.rmtree(self.cwd, ignore_errors=True)
print("| Remove history")
os.makedirs(self.cwd, exist_ok=True)
os.environ['CUDA_VISIBLE_DEVICES'] = str(self.gpu_id)
torch.set_num_threads(self.num_threads)
torch.set_default_dtype(torch.float32)
torch.manual_seed(self.random_seed)
| np.random.seed(self.random_seed) | numpy.random.seed |
import numpy
import scipy.ndimage
from . import _deform_grid
def deform_random_grid(X, sigma=25, points=3, order=3, mode='constant', cval=0.0, crop=None, prefilter=True, axis=None):
"""
Elastic deformation with a random deformation grid
This generates a random, square deformation grid with displacements
sampled from from a normal distribution with standard deviation `sigma`.
The deformation is then applied to the image or list of images,
See ``deform_grid`` for a full description of the parameters.
Parameters
----------
X : numpy array or list of arrays
image, or list of images of the same size
sigma : float
standard deviation of the normal distribution
points : array
number of points of the deformation grid
See Also
--------
deform_grid : for a full description of the parameters.
"""
# prepare inputs and axis selection
Xs = _normalize_inputs(X)
axis, deform_shape = _normalize_axis_list(axis, Xs)
if not isinstance(points, (list, tuple)):
points = [points] * len(deform_shape)
displacement = numpy.random.randn(len(deform_shape), *points) * sigma
return deform_grid(X, displacement, order, mode, cval, crop, prefilter, axis)
def deform_grid(X, displacement, order=3, mode='constant', cval=0.0, crop=None, prefilter=True, axis=None):
"""
Elastic deformation with a deformation grid
The procedure generates a coarse displacement grid with a random displacement
for each grid point. This grid is then interpolated to compute a displacement for
each pixel in the input image. The input image is then deformed using the
displacement vectors and a spline interpolation.
Parameters
----------
X : numpy array or list of arrays
image, or list of images of the same size
If X is a list of images, the values for order, mode and cval can be lists
to specify a different value for every image in X.
displacement : numpy array
displacement vectors for each control point
displacement is a NumPy array with displacement vectors for each
control points. For example, to deform a 2D image with 3 x 5 control
points, provide a displacement matrix of shape 2 x 3 x 5.
order : {0, 1, 2, 3, 4}
interpolation order
mode : ({nearest, wrap, reflect, mirror, constant})
border mode
cval : float
constant value to be used if mode == 'constant'
crop : None or list
None, or a list of slice() objects to crop the output
crop can be a list of slice() objects to crop the output with.
Only very simple slicing is supported: the slice start and stop values must
be positive and should not be larger than the output. Note that this parameter
is dependent of the axis parameter: if an axis list is given, crop must only
contain slice() objects for the dimensions in axis.
prefilter : bool
if True the input X will be pre-filtered with a spline filter
axis : None, int, a list of ints, or a list of lists of ints
the axes to deform over
axis indicates the axes on which the deformation should be applied.
The default (None) is to apply a deformation to all dimensions of the input.
Giving a single axis (int) or a tuple of axes will apply the deformation only
to those axes. The shape of the displacement must match this number of axes.
If multiple inputs are given, axis should be None or a list of tuples with
the axes for each input.
Returns
-------
numpy array or list of arrays
The deformed image, or a list of deformed images if a list of inputs is given.
Notes
-----
See the SciPy documentation for scipy.ndimage.interpolation.map_coordinates
for more details on some of the parameters.
The elastic deformation approach is found in
* Ronneberger, Fischer, and Brox, "U-Net: Convolutional Networks for Biomedical
Image Segmentation" https://arxiv.org/abs/1505.04597
* Cicek et al., "3D U-Net: Learning Dense Volumetric
Segmentation from Sparse Annotation" https://arxiv.org/abs/1606.06650
Based on a Python implementation by <NAME>.
"""
# prepare inputs and axis selection
Xs = _normalize_inputs(X)
axis, deform_shape = _normalize_axis_list(axis, Xs)
# prepare output cropping
output_shapes, output_offset = _compute_output_shapes(Xs, axis, deform_shape, crop)
# prepare other parameters
displacement = _normalize_displacement(displacement, Xs, axis)
order = _normalize_order(order, Xs)
mode = _normalize_mode(mode, Xs)
cval = _normalize_cval(cval, Xs)
# prefilter inputs
Xs_f = []
for i, x in enumerate(Xs):
if prefilter and order[i] > 1:
x_f = numpy.zeros_like(x)
for d in axis[i]:
scipy.ndimage.spline_filter1d(x, axis=d, order=order[i], output=x_f)
x = x_f
Xs_f.append(x_f)
else:
Xs_f.append(x)
# prefilter displacement
displacement_f = | numpy.zeros_like(displacement) | numpy.zeros_like |
import pytest
import numpy as np
import cmath
import math
from numpy.testing import assert_almost_equal
def test_phasor():
from electricpy.phasor import phasor
magnitude = 10
# basic angles test case 0
z1 = phasor(magnitude, 0)
z2 = phasor(magnitude, 30)
z3 = phasor(magnitude, 45)
z4 = phasor(magnitude, 60)
z5 = phasor(magnitude, 90)
assert_almost_equal(z1, complex(magnitude, 0))
assert_almost_equal(z2, complex(magnitude * np.sqrt(3) / 2, magnitude / 2))
assert_almost_equal(z3, complex(magnitude / np.sqrt(2), magnitude / np.sqrt(2)))
assert_almost_equal(z4, complex(magnitude / 2, magnitude * np.sqrt(3) / 2))
assert_almost_equal(z5, complex(0, magnitude))
# z(theta) = z(theta+360) test case 1
theta = | np.random.randint(360) | numpy.random.randint |
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 25 16:29:42 2019
@author: Weike (Vicky) Sun <EMAIL>/<EMAIL>
(c) 2020 <NAME>, all rights reserved
"""
"""
Load packages and Set reproduceble results
"""
from sklearn.preprocessing import StandardScaler
import RNN_feedback as RNN_fd
import matplotlib.pyplot as plt
# Seed value
seed_value= 1
# 1. Set `PYTHONHASHSEED` environment variable at a fixed value
import os
os.environ['PYTHONHASHSEED']=str(seed_value)
seed_value += 1
# 2. Set `python` built-in pseudo-random generator at a fixed value
import random
random.seed(seed_value)
seed_value += 1
# 3. Set `numpy` pseudo-random generator at a fixed value
import numpy as np
np.random.seed(seed_value)
seed_value += 1
# 4. Set `tensorflow` pseudo-random generator at a fixed value
import tensorflow as tf
tf.set_random_seed(seed_value)
def timeseries_RNN_feedback_single_train(X, Y, X_val = None, Y_val = None, X_test=None, Y_test=None, train_ratio = 0.8,\
cell_type='e',activation = 'tanh', state_size = 2,\
batch_size = 1, epoch_overlap = None,num_steps = 10,\
num_layers = 1, learning_rate = 1e-2, lambda_l2_reg = 1e-3,\
num_epochs =200, input_prob = 0.95, output_prob = 0.95, state_prob = 0.95,\
input_prob_test = 1, output_prob_test = 1, state_prob_test = 1,\
max_checks_without_progress = 100,epoch_before_val=50, location='RNN_feedback_0', round_number = '', plot=False):
'''This function fits RNN_feedback model to training data, using validation data to determine when to stop,
when test data is given, it is used to choose the hyperparameter, otherwise AIC will be returned based on training data
to select the hyper parameter
Input:
X: training data predictors numpy array: Nxm
y: training data response numy array: Nx1
X_test: testing data predictors numpy arrray: N_testxm
y_test: testing data response numpy array: N_test x 1
train_ratio: float, portion of training data used to train the model, and the rest is used as validation data
if X_val is provided, this value is overrided
cell_type: str, type of RNN cell, can be either LSTM, GRU, others for BasicRNN, default = basicRNN
activation: str, type of activation function, can be relu, tanh, sigmoid, linear, default = tanh
state_size: int, number of states in the model
batch_size: int, number of batch used in training
epoch_overlap: None or int, None indicate no overlap between each training patch, int number represnets the space between each path, (e.g. 0 represtns adjacent patch)
num_steps: int, number of steps of memory used in dyanmic_RNN training
num_layer: int, number of RNN layer in the system, default = 1
learning_rate: float, learning rate for Adam, default= 1e-2
labda_l2_reg: float, regularization weight, <=0 indicate no regularization, default = 1e-3,
num_epochs: int, maximum number of epochs considered in the system
intput_prob, output_prob, state_prob: float, (0, 1], the keep probability for dropout during training, default = 0.95
intput_prob_test, output_prob_test, state_prob_test: float (0,1], the keep probability for dropout during testing, default = 1 (no dropout)
max_chekcs_without_progress: int, number of epochs in validation does not improve error for early stopping, default = 100
epoch_before_val: int, number of epochs in training before using validation set to early stop, default = 50
location: str, name for saving the trained RNN-feedback model
plot: Boolean, whether to plot the training results or not
Output:
(AIC or test results, prediction_train, prediction_test)
'''
print('========= Loading data =========')
"""
Load and arrange data for regression
"""
#parameter for the data sets
if X_val is None:
num_train = round(X.shape[0]*train_ratio)
else:
num_train = X.shape[0]
if X_test is not None:
test = True
num_test = X_test.shape[0]
else:
test = False
x_num_features = X.shape[1]
y_num_features = Y.shape[1]
print('======== Pre-process Data =========')
if X_val is None:
scaler = StandardScaler()
scaler.fit(X[:num_train])
X_train = scaler.transform(X[:num_train])
X_val = scaler.transform(X[num_train:])
scalery = StandardScaler()
scalery.fit(Y[:num_train])
Y_train=scalery.transform(Y[:num_train])
Y_val = scalery.transform(Y[num_train:])
else:
scaler = StandardScaler()
scaler.fit(X)
X_train = scaler.transform(X)
X_val = scaler.transform(X_val)
scalery = StandardScaler()
scalery.fit(Y)
Y_train=scalery.transform(Y)
Y_val = scalery.transform(Y_val)
if test:
X_test = scaler.transform(X_test)
Y_test = scalery.transform(Y_test)
input_size_x = x_num_features
input_size_y = y_num_features
print('======== Training =========')
g_train=RNN_fd.build_multilayer_rnn_graph_with_dynamic_rnn(cell_type=cell_type, activation=activation,state_size=state_size,
num_steps=num_steps, num_layers=num_layers, input_size_x=input_size_x,
input_size_y=input_size_y , learning_rate=learning_rate, lambda_l2_reg=lambda_l2_reg)
train_loss,val_loss,num_parameter = RNN_fd.train_rnn(X_train,Y_train,X_val,Y_val,
g_train ,num_epochs, num_steps, batch_size, input_prob, output_prob, state_prob,
verbose=True, save=location, epoch_overlap=epoch_overlap, max_checks_without_progress=max_checks_without_progress,
epoch_before_val = epoch_before_val)
if train_loss is None:
return (None, None, None, (100000,100000,100000), 100000,100000,100000)
val_loss = np.array(val_loss)
if plot:
'''Plot the result'''
plt.figure()
s = 12
plt.plot(train_loss, color='xkcd:sky blue', label = 'train loss')
plt.plot(np.linspace(epoch_before_val-1,epoch_before_val+val_loss.shape[0]-1, num = val_loss.shape[0]), val_loss, color= 'xkcd:coral', label = 'val loss')
plt.title('Traingin and validation loss')
plt.ylabel('Loss')
plt.xlabel('# of epoch')
plt.legend(fontsize=s)
plt.tight_layout()
plt.savefig('Training and validation error round ' + round_number +'.png', dpi = 600,bbox_inches='tight')
############################################################################
"""Training Final Results"""
g_train_final = RNN_fd.build_multilayer_rnn_graph_with_dynamic_rnn(cell_type=cell_type, activation=activation,state_size=state_size,
num_steps= num_train , num_layers=num_layers, input_size_x=input_size_x,
input_size_y = input_size_y , learning_rate = learning_rate, lambda_l2_reg=lambda_l2_reg)
prediction_train,train_loss_final,_ = RNN_fd.test_rnn(X_train,Y_train, g_train_final, location, input_prob_test, output_prob_test, state_prob_test, num_train)
AIC = num_train*np.log(np.sum(train_loss_final)/y_num_features) + 2*num_parameter
AICc = num_train*np.log( | np.sum(train_loss_final) | numpy.sum |
"""
Copyright (c) 2018-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import datetime
from functools import partial
import cv2 as cv
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torchvision import transforms as t
from scipy.spatial.distance import cosine
from tqdm import tqdm
import numpy as np
def get_subset(container, subset_bounds):
"""Returns a subset of the given list with respect to the list of bounds"""
subset = []
for bound in subset_bounds:
subset += container[bound[0]: bound[1]]
return subset
def get_roc(scores_with_gt, n_threshs=400):
"""Computes a ROC cureve on the LFW dataset"""
thresholds = np.linspace(0., 4., n_threshs)
fp_rates = []
tp_rates = []
for threshold in thresholds:
fp = 0
tp = 0
for score_with_gt in scores_with_gt:
predict_same = score_with_gt['score'] < threshold
actual_same = score_with_gt['is_same']
if predict_same and actual_same:
tp += 1
elif predict_same and not actual_same:
fp += 1
fp_rates.append(float(fp) / len(scores_with_gt) * 2)
tp_rates.append(float(tp) / len(scores_with_gt) * 2)
return np.array(fp_rates), np.array(tp_rates)
def get_auc(fprs, tprs):
"""Computes AUC under a ROC curve"""
sorted_fprs, sorted_tprs = zip(*sorted(zip(*(fprs, tprs))))
sorted_fprs = list(sorted_fprs)
sorted_tprs = list(sorted_tprs)
if sorted_fprs[-1] != 1.0:
sorted_fprs.append(1.0)
sorted_tprs.append(sorted_tprs[-1])
return | np.trapz(sorted_tprs, sorted_fprs) | numpy.trapz |
import skimage.io as io
import skimage.transform as skt
import numpy as np
from PIL import Image
from src.models.class_patcher import patcher
from src.utils.imgproc import *
class patcher(patcher):
def __init__(self, body='./body/body_hakka.png', **options):
super().__init__('薄荷', body=body, pantie_position=[0, 0], **options)
self.mask_front = io.imread('./mask/mask_hakka_front.png')
self.mask_back = io.imread('./mask/mask_hakka_back.png')
def convert_front(self, image):
pantie = np.array(image)
patch = np.copy(pantie[-120:-5, 546:, :])
[pr, pc, d] = patch.shape
pantie[125:125 + pr, :pc, :] = patch[::-1, ::-1]
front = pantie[:, :330]
arrx = np.zeros(64)
arrx[20:] += np.sin(np.linspace(0, np.pi / 2, 44)) * 79
arrx[10:20] += np.sin(np.linspace(0, np.pi, 10)) * 8
arrx -= 80
arry = np.zeros(64)
front = affine_transform_by_arr(front, arrx, arry)[:320]
front = np.uint8(resize(front, [1.2, 1.12]) * 255)[:, 8:]
front = np.bitwise_and(front, self.mask_front)
front = np.concatenate([front[:, ::-1], front], axis=1)
return Image.fromarray(front)
def convert_back(self, image):
pantie = np.array(image)
pantie[-120:, 546:, :] = 0
back = np.rot90(pantie[:-15, 330:][:, ::-1])
arrx = np.zeros(36)
arry = | np.zeros(36) | numpy.zeros |
"""
adapted from https://github.com/openai/spinningup/blob/master/spinup/algos/pytorch/vpg/vpg.py
"""
import torch
import carla.agents.core as core
import numpy as np
from collections import deque
from PIL import Image
from torch.utils.data import Dataset
class MultipleWorkerBuffer:
"""
A buffer for storing trajectories experienced of a multiple worker agent interacting
with the environment, and using Generalized Advantage Estimation (GAE-Lambda)
for calculating the advantages of state-action pairs.
"""
def __init__(self, obs_space, act_dim, size, device, gamma=0.99, lam=1., n_proc=1):
self.device = device
self.obs_space = obs_space
self.obs_dim = {}
self.obs_buf = {}
for space in self.obs_space:
obs_dim = self.obs_space[space].shape
self.obs_dim[space] = obs_dim
self.obs_buf[space] = np.zeros((n_proc,) + core.combined_shape(size, obs_dim), dtype=np.float32)
self.act_buf = np.zeros((n_proc,) + core.combined_shape(size, act_dim), dtype=np.float32)
self.adv_buf = np.zeros((n_proc, size), dtype=np.float32)
self.rew_buf = np.zeros((n_proc, size), dtype=np.float32)
self.ret_buf = np.zeros((n_proc, size), dtype=np.float32)
self.val_buf = np.zeros((n_proc, size), dtype=np.float32)
self.logp_buf = np.zeros((n_proc, size), dtype=np.float32)
self.gamma, self.lam = gamma, lam
self.ptr, self.path_start_idx, self.max_size = np.zeros(n_proc, dtype=np.int32), np.zeros(n_proc,
dtype=np.int32), size
self.n_proc = n_proc
def store(self, obs, act, rew, val, logp):
"""
Append one timestep of agent-environment interaction to the buffer.
"""
assert (self.ptr < self.max_size).all() # buffer has to have room so you can store
for i, ptr in enumerate(self.ptr):
for space in obs.keys():
self.obs_buf[space][i][ptr] = obs[space][i]
self.act_buf[i][ptr] = act[i]
self.rew_buf[i][ptr] = rew[i]
self.val_buf[i][ptr] = val[i]
self.logp_buf[i][ptr] = logp[i]
self.ptr[i] += 1
def finish_path(self, proc_idx, last_val=0):
"""
Call this at the end of a trajectory, or when one gets cut off
by an epoch ending. This looks back in the buffer to where the
trajectory started, and uses rewards and value estimates from
the whole trajectory to compute advantage estimates with GAE-Lambda,
as well as compute the rewards-to-go for each state, to use as
the targets for the value function.
The "last_val" argument should be 0 if the trajectory ended
because the agent reached a terminal state (died), and otherwise
should be V(s_T), the value function estimated for the last state.
This allows us to bootstrap the reward-to-go calculation to account
for timesteps beyond the arbitrary episode horizon (or epoch cutoff).
"""
path_slice = slice(self.path_start_idx[proc_idx], self.ptr[proc_idx])
rews = np.append(self.rew_buf[proc_idx][path_slice], last_val)
vals = | np.append(self.val_buf[proc_idx][path_slice], last_val) | numpy.append |
import warnings
import numpy as np
import numpy.linalg as la
from numpy.random import Generator, PCG64
from modpy.optimize._optim_util import OptimizeResult, OptimizePath, _function, _chk_callable
from modpy.optimize._constraints import _prepare_constraints
ID_INFEASIBLE = -12
ID_MAX_SIGMA = -11
ID_TOL_X = -10
ID_TOL_FUN = -9
ID_TOL_X_UP = -8
ID_COND_COV = -7
ID_STAGNATION = -6
ID_EQUAL_FUN = -5
ID_NO_EFFECT_COORD = -4
ID_NO_EFFECT_AXIS = -3
ID_MAX_RESTART = -2
ID_LIN_ALG = -1
ID_MAX_ITER = 0
ID_CONV = 1
TERMINATION_MESSAGES = {
ID_INFEASIBLE: 'No feasible candidates',
ID_MAX_SIGMA: 'MaxSigma criteria.',
ID_TOL_X: 'TolX criteria.',
ID_TOL_FUN: 'TolFun criteria.',
ID_TOL_X_UP: 'TolXUp criteria',
ID_COND_COV: 'ConditionCov criteria.',
ID_STAGNATION: 'Stagnation criteria.',
ID_EQUAL_FUN: 'EqualFunValues criteria.',
ID_NO_EFFECT_COORD: 'NoEffectCoord criteria.',
ID_NO_EFFECT_AXIS: 'NoEffectAxis criteria.',
ID_MAX_RESTART: 'Maximum number of restart reached.',
ID_LIN_ALG: 'LinAlgError due to indeterminate system.',
ID_MAX_ITER: 'Maximum number of iterations reached.',
ID_CONV: 'Tolerance termination condition is satisfied.',
}
MAX_SIGMA = 1e32
def cma_es(obj, x0, bounds=None, constraints=(), method='IPOP', sigma0=1., C0=None, mu=None, lam=None, lbound=np.inf,
tol=1e-6, ftol=1e-12, xtol=1e-12, stol=1e-12, maxiter=None, max_restart=5, seed=None, keep_path=False,
args=(), kwargs={}):
"""
Wrapper method for Covariance Matrix Adaptation Evolution Strategy algorithm (CMA-ES).
Parameters
----------
obj : callable
Objective function
x0 : array_like or int
If array_like `x0` is used as a start guess, if int it is the problem dimension.
bounds : 2-tuple of array_like, optional
Bounds on the solution vector, should be (array_like (n,), array_like (n,)). If None no bounds are applied.
constraints : tuple or Constraint
Tuple of class Constraints.
method : {'1+1, 'mu-lam', 'IPOP'}, optional
Which CMA-ES implementation to use.
sigma0 : float, optional
Coordinate wise standard deviation (step size).
C0 : array_like, shape (n, n), optional
Initial correlation matrix.
mu : int, optional
Number of selected candidates from sampled population.
lam : int, optional
Number of sampled candidates in the population.
lbound : float, optional
Analytical or empirical bound for the function value to at least obtain in order to have converged.
tol : float, optional
Tolerance related to change in fitness value over sequence of populations.
ftol : float, optional
Tolerance of the TolFun termination criteria.
xtol : float, optional
Tolerance of the TolX termination criteria.
stol : float, optional
Tolerance of the NoEffectAxis and NoEffectCoord termination criteria.
maxiter : int, optional
Maximum number of allowed iterations.
max_restart : int, optional
Maximum number of allowed restarts.
seed : int, optional
Seed of the random number generator used in generation step.
keep_path : bool, optional
Whether to save path information. Can require substantial memory.
args : tuple, optional
Additional arguments to `fun`.
kwargs : dict, optional
Additional key-word arguments to `fun`.
Returns
-------
OptimizeResult with the following fields:
x : array_like, shape (n,)
Solution vector.
success : bool,
True if algorithm converged within its optimality conditions.
status : int
Reason for algorithm termination
message : str
Description of the termination reason.
nit : int
Number of iterations used to converge.
"""
# prepare start guess and initial objective function value
if isinstance(x0, int):
x0 = np.random.randn(x0)
n = x0.size
# wrap function call with args and kwargs
f = _function(obj, args, kwargs)
_chk_callable(x0, f)
if sigma0 < 0.:
raise ValueError('CMA-ES requires sigma0 > 0.')
# prepare constraints
if (bounds is not None) or constraints:
con = _prepare_constraints(bounds, constraints, n)
# if con.any_equal():
# raise ValueError('CMA-ES algorithms does not allow equality constraints.')
else:
con = None
# try:
#
# if method == 'mu-lam':
#
# sol, f_opt, status, nit, path = _cma_es_mu_lam(f, x0, generator, constraints=con, sigma0=sigma0, C0=C0,
# mu=mu, lam=lam, lbound=lbound, tol=tol, ftol=ftol, xtol=xtol,
# stol=stol, maxiter=maxiter, keep_path=keep_path)
#
# elif method == 'IPOP':
#
# sol, f_opt, status, nit, path = _cma_es_ipop(f, x0, generator, constraints=con, sigma0=sigma0, C0=C0, mu=mu,
# lam=lam, lbound=lbound, tol=tol, ftol=ftol, xtol=xtol, stol=stol,
# maxiter=maxiter, max_restart=max_restart, keep_path=keep_path)
#
# elif method == '1+1':
#
# if con is None:
#
# sol, f_opt, status, nit, path = _cma_es_1p1(f, x0, sigma0=sigma0, tol=tol, stol=stol,
# maxiter=maxiter, keep_path=keep_path)
#
# else:
#
# sol, f_opt, status, nit, path = _cma_es_1p1_con(f, x0, con, sigma0=sigma0, tol=tol, stol=stol,
# maxiter=maxiter, keep_path=keep_path)
#
# else:
#
# raise ValueError("`method` must be either '1+1' or 'mu-lam'.")
#
# f_opt = float(f_opt)
#
# except la.LinAlgError:
#
# f_opt = None
# sol = None
# status = -1
# nit = 0
# path = None
#
# if sol is None:
# x = None
# else:
# x = sol
#
# res = OptimizeResult(x, f_opt, sol, status=status, nit=nit, tol=tol)
# res.success = status > 0
# res.message = TERMINATION_MESSAGES[res.status]
# res.path = path
opt = CMAES(obj, x0, method, con, sigma0, C0, mu, lam, lbound, tol, ftol, xtol, stol, maxiter, max_restart, seed, keep_path)
opt.run()
res = opt.get_result()
return res
class CMAES:
"""
Covariance Matrix Adaptation Evolution Strategy algorithm (CMA-ES).
"""
def __init__(self, obj, x0, method='IPOP', constraints=None, sigma0=1., C0=None, mu=None, lam=None, lbound=np.inf,
tol=1e-6, ftol=1e-12, xtol=1e-12, stol=1e-10, maxiter=None, max_restart=5, seed=None, keep_path=False):
"""
Initializer of the CMA-ES algorithm class.
Parameters
----------
obj : callable
Objective function
x0 : array_like, shape (n,)
Start guess
method : {'mu-lam', 'IPOP'}, optional
Which CMA-ES implementation to use.
constraints : Constraints, optional
Class Constraints.
sigma0 : float, optional
Coordinate wise standard deviation (step size).
C0 : array_like, shape (n, n), optional
Initial correlation matrix.
mu : int, optional
Number of selected candidates from sampled population.
lam : int, optional
Number of sampled candidates in the population.
lbound : float, optional
Analytical or empirical bound for the function value to at least obtain in order to have converged.
tol : float, optional
Tolerance related to change in fitness value over sequence of populations.
ftol : float, optional
Tolerance of the TolFun termination criteria.
xtol : float, optional
Tolerance of the TolX termination criteria.
stol : float, optional
Tolerance of the NoEffectAxis and NoEffectCoord termination criteria.
maxiter : int, optional
Maximum number of allowed iterations
max_restart : int, optional
Maximum number of allowed restarts, for IPOP only.
seed : int, optional
Seed of the random number generator used in generation step.
keep_path : bool, optional
Whether to save path information. Can require substantial memory.
"""
# problem definition
self.dim = None
self.obj = obj
self.method = method
self.constraints = constraints
# final solution variables
self.x_opt = None
self.f_opt = None
# current solution variables
self.m = np.array(x0)
self.sigma = sigma0
self.C = C0
# provided parameters ------------------------------------------------------------------------------------------
self.lam = lam # population sample size
self.mu_ori = mu # original recombination sample size
self.mu = None # recombination sample size at iteration k (may change due to infeasible samples)
self._reduced = False # if mu is updated during iteration
# derived parameters -------------------------------------------------------------------------------------------
# static
self.chiN = None # E[||N(0, I)||]
self.gamma = None # constraint violation parameter
# dynamic
self.mu_eff = None # variance - effectiveness of sum w_i x_i
self.w = None # recombination weights array
self.cc = None # time constant for cumulation of C
self.cs = None # time constant for cumulation of sigma
self.c1 = None # learning rate for rank-1 update of C
self.cmu = None # learning rate for rank-mu update of C
self.ds = None # damping for sigma
# storage variables --------------------------------------------------------------------------------------------
# static
self.yw = None # weighted recombination of random variables (stored for int. calculations)
self.pc = None # evolution path for C
self.ps = None # evolution paths for sigma
self.B = None # orthonormal eigendecomposition matrix of C
self.D = None # vector of eigenvalues of C
self.BD = None # matrix-product of B and D
self.C = None # covariance matrix C
self.invsqrtC = None # square-root of inverse covariance matrix
self.D_min = None # minimum eigenvalue
self.D_max = None # maximum eigenvalue
self.max_sigma = None # maximum sigma of current iteration
# dynamic
self.y = None # array of sampled random numbers
self.x = None # array of candidate solutions
self.f = None # vector of objective function values
self.x_all = None # store all feasible candidates (for later save)
# convergence options ------------------------------------------------------------------------------------------
# termination criteria
self.tol = tol
self.ftol = ftol
self.xtol = xtol
self.stol = stol
self.lbound = lbound
self.tolx = None
# maximum iterations
self.maxiter = maxiter # maximum iterations for a given (mu/mu_w-lam)-CMA-ES loop
self.max_redraw = 20 # maximum number of redraws allowed if sample is infeasible
self.max_restart = max_restart # maximum number of restarts for IPOP-CMA-ES
# counters
self.it = 0
self.it_total = 0
self.it_restart = 0
self.eig_it = 0
# convergence status
self.status = 1
self.converged = False
self.success = False
# random generation --------------------------------------------------------------------------------------------
self.seed = seed
self.generator = None
# optimization path --------------------------------------------------------------------------------------------
self.keep_path = keep_path
self.path = OptimizePath(keep=keep_path)
def get_result(self):
res = OptimizeResult(self.x_opt, self.f_opt, self.x_opt, status=self.status, nit=self.it_total, tol=self.tol)
res.success = self.status > 0
res.message = TERMINATION_MESSAGES[self.status]
res.path = self.path
return res
def run(self):
if self.method == 'mu-lam':
self.initialize()
self._run_mu_lam()
self.it_total = self.it
elif self.method == 'IPOP':
self._run_ipop()
else:
raise ValueError("`method` must be either 'mu-lam' or 'IPOP'.")
# if the algorithm terminated with an infeasible solution, it has diverged
if self.constraints is not None:
if np.any(self.constraints.f(self.m) < 0.):
self.status = ID_INFEASIBLE
self.converged = False
# TODO: several termination criterias will be considered as converged, reasonable?
if self.status in (ID_NO_EFFECT_AXIS, ID_NO_EFFECT_COORD):
self.status = 1
self.converged = 1
def _run_ipop(self):
"""
An IPOP-(mu/muw lambda)-CMA-ES.
References
----------
[1] <NAME>., <NAME>. (2005). A Restart CMA Evolution Strategy With Increasing Population Size
Link: http://www.cmap.polytechnique.fr/~nikolaus.hansen/cec2005ipopcmaes.pdf
"""
while self.it_restart < self.max_restart:
# call sub (mu/lam)-CMA-ES
self.initialize()
self._run_mu_lam()
self.it_total += self.it
self.it = 0
# check if the inner optimizer converged
if self.converged:
break
else:
# otherwise update population size and step-size
self.lam *= 2
if self.status == ID_TOL_X_UP:
self.sigma *= 10.
else:
self.sigma *= 2.
self.it_restart += 1
if self.it_restart == self.max_restart:
self.status = ID_MAX_RESTART
def _run_mu_lam(self):
"""
A classical (mu/mu_w lambda)-CMA-ES. Primarily based on the work of <NAME> [1], but further
improvements made for convergence checking. Constraint handling is implemented according to [2].
References
----------
[1] <NAME>. (2011). The CMA Evolution Strategy: A Tutorial
Link: http://www.cmap.polytechnique.fr/~nikolaus.hansen/cmatutorial110628.pdf
[2] <NAME>., <NAME>., <NAME>. (2015). Modified Covariance Matrix Adaptation - Evolution Strategy
for constrained optimization under uncertainty, application to rocket design.
International Journal for Simulation and Multidisciplinary Design Optimization.
"""
while self.it < self.maxiter:
# draw candidates and sort based on fitness
self._draw_candidates()
self._sort_candidates()
# handle constraint violations
self._handle_constraints()
self._adjust_constrained_candidates()
if self.y.size:
# update solution estimate and evolution path
self._recombine_solution()
self._update_evolution_paths()
self._decompose_covariance()
# check if algorithm has converged
self.converged = self._check_convergence()
if self.converged:
break
# check if algorithm should be terminated/restarted
self.status = self._check_termination_criteria()
if self.status < 1:
break
# escape flat fitness
self._escape_flat_fitness()
elif self.status < 1:
break
# save optimization path for later plotting
if self.keep_path:
self.path.append(self.m, self.f[0], self.f[0], sigma=self.sigma, candidates=self.x_all)
# revert recombination parameters to default values
if self._reduced:
self._define_parameters(self.mu_ori)
self._reduced = False
self.it += 1
# assign optimal values at the point of termination
self.x_opt = self.m
self.f_opt = self.obj(self.m)
if self.it == self.maxiter:
self.status = 0
self.success = self.status > 0
def initialize(self):
self.dim = self.m.size
# strategy parameter setting: selection
if self.maxiter is None:
self.maxiter = 1e3 * self.dim ** 2
if self.lam is None:
self.lam = int(4 + np.floor(3 * np.log(self.dim)))
else:
self.lam = int(self.lam)
if self.lam < 2:
raise ValueError('CMA-ES requires lam >= 2.')
if self.mu_ori is None:
self.mu_ori = self.lam / 2
if self.mu_ori > self.lam:
raise ValueError('CMA-ES requires mu <= lam.')
self.chiN = self.dim ** 0.5 * (1. - 1. / (4. * self.dim) + 1. / (21. * self.dim ** 2.))
self.gamma = 0.1 / (self.dim + 2.)
self.max_sigma = float(self.sigma)
self._define_parameters(self.mu_ori)
self._initialize_static_arrays()
# random sampling
if isinstance(self.seed, Generator):
self.generator = self.seed
else:
self.generator = Generator(PCG64(self.seed))
self._initialize_tolerances()
def _define_parameters(self, mu):
n = self.dim
# set recombination weights
w = np.log(mu + .5) - np.log(range(1, int(mu + 1)))
self.w = w / np.sum(w)
# set number of points to include in recombination
mu_eff = 1. / np.sum(w ** 2.)
self.mu = int(np.floor(mu))
# strategy parameter setting: adaptation
self.cc = (4. + mu_eff / n) / (n + 4. + 2. * mu_eff / n)
self.cs = (mu_eff + 2.) / (n + mu_eff + 5.)
self.c1 = 2. / ((n + 1.3) ** 2. + mu_eff)
self.cmu = np.minimum(1. - self.c1, 2. * (mu_eff - 2. + 1. / mu_eff) / ((n + 2.) ** 2. + mu_eff))
self.ds = 1. + 2. * np.maximum(0., 0. if mu_eff < 1. else np.sqrt((mu_eff - 1.) / (n + 1.)) - 1.) + self.cs
self.mu_eff = mu_eff
def _initialize_static_arrays(self):
n = self.m.size
self.ps = np.zeros((n,))
self.pc = np.zeros((n,))
if self.C is None:
self.C = | np.eye(n) | numpy.eye |
from CameraClient import CameraClient, ImageType, Command, CameraIntri
import pyzed.sl as sl
import numpy as np
import open3d
from os.path import join
import sys
import argparse
import cv2
import os
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
SAVE_PATH = "/home/wangqingyu/桌面/mecheye_python_interface"
MECHEYE_IP = "169.254.108.2"
ZED_BASELINE = 0.1216
ZED_FOCAL_LENGTH = 1384.4
SAVE_FILE = True
K_ZED_LEFT = np.array([[1385.8813, 0, 1093.5342],
[0, 1386.0066, 606.4279],
[0, 0, 1]], dtype=float) # 3*3
K_MECH = np.array([[2734.9911, 0, 964.9017],
[0, 2734.7569, 622.0475],
[0, 0, 1]], dtype=float) # 3*3
R_MECH_ZED = np.array([[0.99022184, 0.01741097, -0.13840878],
[-0.02072163, 0.99953165, -0.0225139],
[0.13795202, 0.02516189, 0.990118958]], dtype=float) # 3*3
T_MECH_ZED = np.array([[36.8203540533],
[-54.5903607433],
[-23.53339511167]], dtype=float) # 3*1
def connect_to_mecheye(camera_ip):
camera = CameraClient()
camera_ip = camera_ip
if not camera.connect(camera_ip):
exit(-1)
return camera
def set_mech_exposure(exposure_mode, exposure_time):
camera.setParameter(paraName="scan2dExposureMode", value=exposure_mode)
camera.setParameter(paraName="scan2dExposureTime", value=exposure_time)
def zed_capture():
zed = sl.Camera()
# Create a InitParameters object and set configuration parameters
init_params = sl.InitParameters()
init_params.camera_resolution = sl.RESOLUTION.HD2K # Use 2K video mode
init_params.camera_fps = 30 # Set fps at 30
# Open the camera
err = zed.open(init_params)
if err != sl.ERROR_CODE.SUCCESS:
exit(1)
i = 0
image_left = sl.Mat()
image_right = sl.Mat()
runtime_parameters = sl.RuntimeParameters()
while i < 5:
# Grab an image, a RuntimeParameters object must be given to grab()
if zed.grab(runtime_parameters) == sl.ERROR_CODE.SUCCESS:
# A new image is available if grab() returns SUCCESS
zed.retrieve_image(image_left, sl.VIEW.LEFT)
zed.retrieve_image(image_right, sl.VIEW.RIGHT)
timestamp = zed.get_timestamp(sl.TIME_REFERENCE.CURRENT)
# Get the timestamp at the time the image was captured
print("Image resolution: {0} x {1} || Image timestamp: {2}\n".format(image_left.get_width(),
image_left.get_height(),
timestamp.get_milliseconds()))
i = i + 1
if SAVE_FILE:
image_left = image_left.get_data()[:, :, :3]
image_right = image_right.get_data()[:, :, :3]
cv2.imwrite(SAVE_PATH + "/uncrop_left.png", image_left)
cv2.imwrite(SAVE_PATH + "/uncrop_right.png", image_right)
zed.close()
return image_left, image_right, image_left.shape[0], image_left.shape[1]
def mech_zed_alignment(depth, mech_height, mech_width, zed_height, zed_width):
ground_truth = | np.zeros(shape=(zed_height, zed_width), dtype=float) | numpy.zeros |
# pylint: disable=F841
"""
unit test for GAM
Author: <NAME>
Created on 08/07/2015
"""
import os
import numpy as np
from numpy.testing import assert_allclose
import pandas as pd
from scipy.linalg import block_diag
import pytest
from statsmodels.tools.linalg import matrix_sqrt
from statsmodels.gam.smooth_basis import (
UnivariatePolynomialSmoother, PolynomialSmoother, BSplines,
GenericSmoothers, UnivariateCubicSplines, CyclicCubicSplines)
from statsmodels.gam.generalized_additive_model import (
GLMGam, LogitGam, make_augmented_matrix, penalized_wls)
from statsmodels.gam.gam_cross_validation.gam_cross_validation import (
MultivariateGAMCV, MultivariateGAMCVPath, _split_train_test_smoothers)
from statsmodels.gam.gam_penalties import (UnivariateGamPenalty,
MultivariateGamPenalty)
from statsmodels.gam.gam_cross_validation.cross_validators import KFold
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.genmod.families.family import Gaussian
from statsmodels.genmod.generalized_linear_model import lm
sigmoid = np.vectorize(lambda x: 1.0 / (1.0 + np.exp(-x)))
def polynomial_sample_data():
"""A polynomial of degree 4
poly = ax^4 + bx^3 + cx^2 + dx + e
second der = 12ax^2 + 6bx + 2c
integral from -1 to 1 of second der^2 is
(288 a^2)/5 + 32 a c + 8 (3 b^2 + c^2)
the gradient of the integral is der
[576*a/5 + 32 * c, 48*b, 32*a + 16*c, 0, 0]
Returns
-------
poly : smoother instance
y : ndarray
generated function values, demeaned
"""
n = 10000
x = np.linspace(-1, 1, n)
y = 2 * x ** 3 - x
y -= y.mean()
degree = [4]
pol = PolynomialSmoother(x, degree)
return pol, y
def integral(params):
d, c, b, a = params
itg = (288 * a ** 2) / 5 + (32 * a * c) + 8 * (3 * b ** 2 + c ** 2)
itg /= 2
return itg
def grad(params):
d, c, b, a = params
grd = np.array([576 * a / 5 + 32 * c, 48 * b, 32 * a + 16 * c, 0])
grd = grd[::-1]
return grd / 2
def hessian(params):
hess = np.array([[576 / 5, 0, 32, 0],
[0, 48, 0, 0],
[32, 0, 16, 0],
[0, 0, 0, 0]
])
return hess / 2
def cost_function(params, pol, y, alpha):
# this should be the MSE or log likelihood value
lin_pred = np.dot(pol.basis, params)
gaussian = Gaussian()
expval = gaussian.link.inverse(lin_pred)
loglike = gaussian.loglike(y, expval)
# this is the vale of the GAM penalty. For the example polynomial
itg = integral(params)
# return the cost function of the GAM for the given polynomial
return loglike - alpha * itg, loglike, itg
def test_gam_penalty():
"""
test the func method of the gam penalty
:return:
"""
pol, y = polynomial_sample_data()
univ_pol = pol.smoothers[0]
alpha = 1
gp = UnivariateGamPenalty(alpha=alpha, univariate_smoother=univ_pol)
for _ in range(10):
params = np.random.randint(-2, 2, 4)
gp_score = gp.func(params)
itg = integral(params)
assert_allclose(gp_score, itg, atol=1.e-1)
def test_gam_gradient():
# test the gam gradient for the example polynomial
np.random.seed(1)
pol, y = polynomial_sample_data()
alpha = 1
smoother = pol.smoothers[0]
gp = UnivariateGamPenalty(alpha=alpha, univariate_smoother=smoother)
for _ in range(10):
params = np.random.uniform(-2, 2, 4)
params = np.array([1, 1, 1, 1])
gam_grad = gp.deriv(params)
grd = grad(params)
assert_allclose(gam_grad, grd, rtol=1.e-2, atol=1.e-2)
def test_gam_hessian():
# test the deriv2 method of the gam penalty
np.random.seed(1)
pol, y = polynomial_sample_data()
univ_pol = pol.smoothers[0]
alpha = 1
gp = UnivariateGamPenalty(alpha=alpha, univariate_smoother=univ_pol)
for _ in range(10):
params = np.random.randint(-2, 2, 5)
gam_der2 = gp.deriv2(params)
hess = hessian(params)
hess = np.flipud(hess)
hess = np.fliplr(hess)
assert_allclose(gam_der2, hess, atol=1.e-13, rtol=1.e-3)
def test_approximation():
np.random.seed(1)
poly, y = polynomial_sample_data()
alpha = 1
for _ in range(10):
params = np.random.uniform(-1, 1, 4)
cost, err, itg = cost_function(params, poly, y, alpha)
glm_gam = GLMGam(y, smoother=poly, alpha=alpha)
# TODO: why do we need pen_weight=1
gam_loglike = glm_gam.loglike(params, scale=1, pen_weight=1)
assert_allclose(err - itg, cost, rtol=1e-10)
assert_allclose(gam_loglike, cost, rtol=0.1)
def test_gam_glm():
cur_dir = os.path.dirname(os.path.abspath(__file__))
file_path = os.path.join(cur_dir, "results", "prediction_from_mgcv.csv")
data_from_r = pd.read_csv(file_path)
# dataset used to train the R model
x = data_from_r.x.values
y = data_from_r.y.values
df = [10]
degree = [3]
bsplines = BSplines(x, degree=degree, df=df, include_intercept=True)
# y_mgcv is obtained from R with the following code
# g = gam(y~s(x, k = 10, bs = "cr"), data = data, scale = 80)
y_mgcv = np.asarray(data_from_r.y_est)
alpha = 0.1 # chosen by trial and error
glm_gam = GLMGam(y, smoother=bsplines, alpha=alpha)
res_glm_gam = glm_gam.fit(method='bfgs', max_start_irls=0,
disp=1, maxiter=10000)
y_gam0 = np.dot(bsplines.basis, res_glm_gam.params)
y_gam = np.asarray(res_glm_gam.fittedvalues)
assert_allclose(y_gam, y_gam0, rtol=1e-10)
# plt.plot(x, y_gam, '.', label='gam')
# plt.plot(x, y_mgcv, '.', label='mgcv')
# plt.plot(x, y, '.', label='y')
# plt.legend()
# plt.show()
assert_allclose(y_gam, y_mgcv, atol=1.e-2)
def test_gam_discrete():
cur_dir = os.path.dirname(os.path.abspath(__file__))
file_path = os.path.join(cur_dir, "results", "prediction_from_mgcv.csv")
data_from_r = pd.read_csv(file_path)
# dataset used to train the R model
x = data_from_r.x.values
y = data_from_r.ybin.values
df = [10]
degree = [5]
bsplines = BSplines(x, degree=degree, df=df, include_intercept=True)
# y_mgcv is obtained from R with the following code
# g = gam(y~s(x, k = 10, bs = "cr"), data = data, scale = 80)
y_mgcv = data_from_r.ybin_est
alpha = 0.00002
# gp = UnivariateGamPenalty(alpha=alpha, univariate_smoother=bsplines)
# lg_gam = LogitGam(y, bsplines.basis, penal=gp)
#
lg_gam = LogitGam(y, bsplines, alpha=alpha)
res_lg_gam = lg_gam.fit(maxiter=10000)
y_gam = np.dot(bsplines.basis, res_lg_gam.params)
y_gam = sigmoid(y_gam)
y_mgcv = sigmoid(y_mgcv)
# plt.plot(x, y_gam, label='gam')
# plt.plot(x, y_mgcv, label='mgcv')
# plt.plot(x, y, '.', label='y')
# plt.ylim(-0.4, 1.4)
# plt.legend()
# plt.show()
assert_allclose(y_gam, y_mgcv, rtol=1.e-10, atol=1.e-1)
def multivariate_sample_data(seed=1):
n = 1000
x1 = np.linspace(-1, 1, n)
x2 = np.linspace(-10, 10, n)
x = np.vstack([x1, x2]).T
np.random.seed(seed)
y = x1 * x1 * x1 + x2 + | np.random.normal(0, 0.01, n) | numpy.random.normal |
from __future__ import absolute_import, division
import sys
import argparse
import numpy as np
from numpy.linalg.linalg import LinAlgError
import astropy.io.fits as pyfits
from numpy.polynomial.legendre import legval,legfit
from scipy.signal import fftconvolve
import specter.psf
from lvmspec.io import read_image
from lvmutil.log import get_logger
from lvmspec.linalg import cholesky_solve,cholesky_solve_and_invert
from lvmspec.interpolation import resample_flux
def read_psf_and_traces(psf_filename) :
"""
Reads PSF and traces in PSF fits file
Args:
psf_filename : Path to input fits file which has to contain XTRACE and YTRACE HDUs
Returns:
psf : specter PSF object
xtrace : 2D np.array of shape (nfibers,ncoef) containing Legendre coefficents for each fiber to convert wavelenght to XCCD
ytrace : 2D np.array of shape (nfibers,ncoef) containing Legendre coefficents for each fiber to convert wavelenght to YCCD
wavemin : float
wavemax : float. wavemin and wavemax are used to define a reduced variable legx(wave,wavemin,wavemax)=2*(wave-wavemin)/(wavemax-wavemin)-1
used to compute the traces, xccd=legval(legx(wave,wavemin,wavemax),xtrace[fiber])
"""
log=get_logger()
psf=None
xtrace=None
ytrace=None
wavemin=None
wavemax=None
wavemin2=None
wavemax2=None
fits_file = pyfits.open(psf_filename)
try :
psftype=fits_file[0].header["PSFTYPE"]
except KeyError :
psftype=""
if psftype=="GAUSS-HERMITE" :
psf = specter.psf.GaussHermitePSF(psf_filename)
elif psftype=="SPOTGRID" :
psf = specter.psf.SpotGridPSF(psf_filename)
# now read trace coefficients
log.info("psf is a '%s'"%psftype)
if psftype == "bootcalib" :
wavemin = fits_file[0].header["WAVEMIN"]
wavemax = fits_file[0].header["WAVEMAX"]
xcoef = fits_file[0].data
ycoef = fits_file[1].data
wavemin2 = wavemin
wavemax2 = wavemax
elif "XTRACE" in fits_file :
xtrace=fits_file["XTRACE"].data
ytrace=fits_file["YTRACE"].data
wavemin=fits_file["XTRACE"].header["WAVEMIN"]
wavemax=fits_file["XTRACE"].header["WAVEMAX"]
wavemin2=fits_file["YTRACE"].header["WAVEMIN"]
wavemax2=fits_file["YTRACE"].header["WAVEMAX"]
elif psftype == "GAUSS-HERMITE" :
table=fits_file["PSF"].data
i=np.where(table["PARAM"]=="X")[0][0]
wavemin=table["WAVEMIN"][i]
wavemax=table["WAVEMAX"][i]
xtrace=table["COEFF"][i]
i=np.where(table["PARAM"]=="Y")[0][0]
ytrace=table["COEFF"][i]
wavemin2=table["WAVEMIN"][i]
wavemax2=table["WAVEMAX"][i]
if xtrace is None or ytrace is None :
raise ValueError("could not find XTRACE and YTRACE in psf file %s"%psf_filename)
if wavemin != wavemin2 :
raise ValueError("XTRACE and YTRACE don't have same WAVEMIN %f %f"%(wavemin,wavemin2))
if wavemax != wavemax2 :
raise ValueError("XTRACE and YTRACE don't have same WAVEMAX %f %f"%(wavemax,wavemax2))
if xtrace.shape[0] != ytrace.shape[0] :
raise ValueError("XTRACE and YTRACE don't have same number of fibers %d %d"%(xtrace.shape[0],ytrace.shape[0]))
fits_file.close()
return psf,xtrace,ytrace,wavemin,wavemax
def write_traces_in_psf(input_psf_filename,output_psf_filename,xcoef,ycoef,wavemin,wavemax) :
"""
Writes traces in a PSF.
Args:
input_psf_filename : Path to input fits file which has to contain XTRACE and YTRACE HDUs
output_psf_filename : Path to output fits file which has to contain XTRACE and YTRACE HDUs
xcoef : 2D np.array of shape (nfibers,ncoef) containing Legendre coefficents for each fiber to convert wavelenght to XCCD
ycoef : 2D np.array of shape (nfibers,ncoef) containing Legendre coefficents for each fiber to convert wavelenght to YCCD
wavemin : float
wavemax : float. wavemin and wavemax are used to define a reduced variable legx(wave,wavemin,wavemax)=2*(wave-wavemin)/(wavemax-wavemin)-1
used to compute the traces, xccd=legval(legx(wave,wavemin,wavemax),xtrace[fiber])
"""
log = get_logger()
psf_fits=pyfits.open(input_psf_filename)
psftype=psf_fits[0].header["PSFTYPE"]
modified_x=False
modified_y=False
if psftype=="GAUSS-HERMITE" :
if "X" in psf_fits["PSF"].data["PARAM"] :
i=np.where(psf_fits["PSF"].data["PARAM"]=="X")[0][0]
ishape=psf_fits["PSF"].data["COEFF"][i].shape
if ishape != xcoef.shape :
log.warning("xcoef from file and from arg don't have same shape : %s != %s"%(str(ishape),str(xcoef.shape)))
n0=min(ishape[0],xcoef.shape[0])
n1=min(ishape[1],xcoef.shape[1])
psf_fits["PSF"].data["COEFF"][i] *= 0.
psf_fits["PSF"].data["COEFF"][i][:n0,:n1]=xcoef[:n0,:n1]
psf_fits["PSF"].data["WAVEMIN"][i]=wavemin
psf_fits["PSF"].data["WAVEMAX"][i]=wavemax
modified_x=True
if "Y" in psf_fits["PSF"].data["PARAM"] :
i=np.where(psf_fits["PSF"].data["PARAM"]=="Y")[0][0]
ishape=psf_fits["PSF"].data["COEFF"][i].shape
if ishape != ycoef.shape :
log.warning("xcoef from file and from arg don't have same shape : %s != %s"%(str(ishape),str(ycoef.shape)))
n0=min(psf_fits["PSF"].data["COEFF"][i].shape[0],ycoef.shape[0])
n1=min(psf_fits["PSF"].data["COEFF"][i].shape[1],ycoef.shape[1])
psf_fits["PSF"].data["COEFF"][i] *= 0.
psf_fits["PSF"].data["COEFF"][i][:n0,:n1]=ycoef[:n0,:n1]
psf_fits["PSF"].data["WAVEMIN"][i]=wavemin
psf_fits["PSF"].data["WAVEMAX"][i]=wavemax
modified_y=True
if "XTRACE" in psf_fits :
psf_fits["XTRACE"].data = xcoef
psf_fits["XTRACE"].header["WAVEMIN"] = wavemin
psf_fits["XTRACE"].header["WAVEMAX"] = wavemax
modified_x=True
if "YTRACE" in psf_fits :
psf_fits["YTRACE"].data = ycoef
psf_fits["YTRACE"].header["WAVEMIN"] = wavemin
psf_fits["YTRACE"].header["WAVEMAX"] = wavemax
modified_y=True
if not modified_x :
log.error("didn't change the X coefs in the psf: I/O error")
raise IOError("didn't change the X coefs in the psf")
if not modified_y :
log.error("didn't change the Y coefs in the psf: I/O error")
raise IOError("didn't change the Y coefs in the psf")
psf_fits.writeto(output_psf_filename,clobber=True)
log.info("wrote traces and psf in %s"%output_psf_filename)
def legx(wave,wavemin,wavemax) :
"""
Reduced coordinate (range [-1,1]) for calls to legval and legfit
Args:
wave : ND np.array
wavemin : float, min. val
wavemax : float, max. val
Returns:
array of same shape as wave
"""
return 2.*(wave-wavemin)/(wavemax-wavemin)-1.
# beginning of routines for cross-correlation method for trace shifts
def boxcar_extraction(xcoef,ycoef,wavemin,wavemax, image, fibers=None, width=7) :
"""
Fast boxcar extraction of spectra from a preprocessed image and a trace set
Args:
xcoef : 2D np.array of shape (nfibers,ncoef) containing Legendre coefficents for each fiber to convert wavelenght to XCCD
ycoef : 2D np.array of shape (nfibers,ncoef) containing Legendre coefficents for each fiber to convert wavelenght to YCCD
wavemin : float
wavemax : float. wavemin and wavemax are used to define a reduced variable legx(wave,wavemin,wavemax)=2*(wave-wavemin)/(wavemax-wavemin)-1
used to compute the traces, xccd=legval(legx(wave,wavemin,wavemax),xtrace[fiber])
image : DESI preprocessed image object
Optional:
fibers : 1D np.array of int (default is all fibers, the first fiber is always = 0)
width : extraction boxcar width, default is 7
Returns:
flux : 2D np.array of shape (nfibers,n0=image.shape[0]), sum of pixel values per row of length=width per fiber
ivar : 2D np.array of shape (nfibers,n0), ivar[f,j] = 1/( sum_[j,b:e] (1/image.ivar) ), ivar=0 if at least 1 pixel in the row has image.ivar=0 or image.mask!=0
wave : 2D np.array of shape (nfibers,n0), determined from the traces
"""
log=get_logger()
log.info("Starting boxcar extraction...")
if fibers is None :
fibers = np.arange(psf.nspec)
log.info("wavelength range : [%f,%f]"%(wavemin,wavemax))
if image.mask is not None :
image.ivar *= (image.mask==0)
# Applying a mask that keeps positive value to get the Variance by inversing the inverse variance.
var=np.zeros(image.ivar.size)
ok=image.ivar.ravel()>0
var[ok] = 1./image.ivar.ravel()[ok]
var=var.reshape(image.ivar.shape)
badimage=(image.ivar==0)
n0 = image.pix.shape[0]
n1 = image.pix.shape[1]
frame_flux = np.zeros((fibers.size,n0))
frame_ivar = np.zeros((fibers.size,n0))
frame_wave = np.zeros((fibers.size,n0))
xx = np.tile(np.arange(n1),(n0,1))
hw = width//2
ncoef=ycoef.shape[1]
twave=np.linspace(wavemin, wavemax, ncoef+2)
for f,fiber in enumerate(fibers) :
log.info("extracting fiber #%03d"%fiber)
y_of_wave = legval(legx(twave, wavemin, wavemax), ycoef[fiber])
coef = legfit(legx(y_of_wave, 0, n0), twave, deg=ncoef) # add one deg
frame_wave[f] = legval(legx(np.arange(n0).astype(float), 0, n0), coef)
x_of_y = np.floor( legval(legx(frame_wave[f], wavemin, wavemax), xcoef[fiber]) + 0.5 ).astype(int)
mask=((xx.T>=x_of_y-hw)&(xx.T<=x_of_y+hw)).T
frame_flux[f]=image.pix[mask].reshape((n0,width)).sum(-1)
tvar=var[mask].reshape((n0,width)).sum(-1)
frame_ivar[f]=(tvar>0)/(tvar+(tvar==0))
bad=(badimage[mask].reshape((n0,width)).sum(-1))>0
frame_ivar[f,bad]=0.
return frame_flux, frame_ivar, frame_wave
def resample_boxcar_frame(frame_flux,frame_ivar,frame_wave,oversampling=2) :
"""
Resamples the spectra in a frame obtained with boxcar extraction to the same wavelength grid, with oversampling.
Uses resample_flux routine.
Args:
frame_flux : 2D np.array of shape (nfibers,nwave), sum of pixel values per row of length=width per fiber
frame_ivar : 2D np.array of shape (nfibers,nwave), ivar[f,j] = 1/( sum_[j,b:e] (1/image.ivar) ), ivar=0 if at least 1 pixel in the row has image.ivar=0 or image.mask!=0
frame_wave : 2D np.array of shape (nfibers,nwave), determined from the traces
Optional:
oversampling : int , oversampling factor , default is 2
Returns:
flux : 2D np.array of shape (nfibers,nwave*oversampling)
ivar : 2D np.array of shape (nfibers,nwave*oversampling)
frame_wave : 1D np.array of size (nwave*oversampling)
"""
log=get_logger()
log.info("resampling with oversampling")
nfibers=frame_flux.shape[0]
wave=frame_wave[nfibers//2]
dwave=np.median(np.gradient(frame_wave))/oversampling
wave=np.linspace(wave[0],wave[-1],int((wave[-1]-wave[0])/dwave))
nwave=wave.size
flux=np.zeros((nfibers,nwave))
ivar=np.zeros((nfibers,nwave))
for i in range(nfibers) :
log.info("resampling fiber #%03d"%i)
flux[i],ivar[i] = resample_flux(wave, frame_wave[i],frame_flux[i],frame_ivar[i])
return flux,ivar,wave
def compute_dy_from_spectral_cross_correlation(flux,wave,refflux,ivar=None,hw=3.,deg=2) :
"""
Measure y offsets from two spectra expected to be on the same wavelength grid.
refflux is the assumed well calibrated spectrum.
A relative flux calibration of the two spectra is done internally.
Args:
flux : 1D array of spectral flux as a function of wavelenght
wave : 1D array of wavelength (in Angstrom)
refflux : 1D array of reference spectral flux
Optional:
ivar : 1D array of inverse variance of flux
hw : half width in Angstrom of the cross-correlation chi2 scan, default=3A corresponding approximatly to 5 pixels for DESI
deg : degree of polynomial fit as a function of wavelength, only used to find and mask outliers
Returns:
x : 1D array of x coordinates on CCD (axis=1 in numpy image array, AXIS=0 in FITS, cross-dispersion axis = fiber number direction)
y : 1D array of y coordinates on CCD (axis=0 in numpy image array, AXIS=1 in FITS, wavelength dispersion axis)
dx : 1D array of shifts along x coordinates on CCD
ex : 1D array of uncertainties on dx
fiber : 1D array of fiber ID (first fiber = 0)
wave : 1D array of wavelength
"""
# absorb differences of calibration (fiberflat not yet applied)
x=(wave-wave[wave.size//2])/500.
kernel=np.exp(-x**2/2)
f1=fftconvolve(flux,kernel,mode='same')
f2=fftconvolve(refflux,kernel,mode='same')
scale=f1/f2
refflux *= scale
error_floor=0.01 #A
if ivar is None :
ivar=np.ones(flux.shape)
dwave=wave[1]-wave[0]
ihw=int(hw/dwave)+1
chi2=np.zeros((2*ihw+1))
ndata=np.sum(ivar[ihw:-ihw]>0)
for i in range(2*ihw+1) :
d=i-ihw
b=ihw+d
e=-ihw+d
if e==0 :
e=wave.size
chi2[i] = np.sum(ivar[ihw:-ihw]*(flux[ihw:-ihw]-refflux[b:e])**2)
i=np.argmin(chi2)
if i<2 or i>=chi2.size-2 :
# something went wrong
delta=0.
sigma=100.
else :
# refine minimum
hh=int(0.6/dwave)+1
b=i-hh
e=i+hh+1
if b<0 :
b=0
e=b+2*hh+1
if e>2*ihw+1 :
e=2*ihw+1
b=e-(2*hh+1)
x=dwave*(np.arange(b,e)-ihw)
c=np.polyfit(x,chi2[b:e],deg)
if c[0]>0 :
delta=-c[1]/(2.*c[0])
sigma=np.sqrt(1./c[0] + error_floor**2)
if ndata>1 :
chi2pdf=(c[0]*delta**2+c[1]*delta+c[2])/(ndata+1)
if chi2pdf>1 : sigma *= np.sqrt(chi2pdf)
else :
# something else went wrong
delta=0.
sigma=100.
'''
print("dw= %f +- %f"%(delta,sigma))
if np.abs(delta)>1. :
print("chi2/ndf=%f/%d=%f"%(chi2[i],(ndata-1),chi2[i]/(ndata-1)))
import matplotlib.pyplot as plt
x=dwave*(np.arange(chi2.size)-ihw)
plt.plot(x,chi2,"o-")
pol=np.poly1d(c)
xx=np.linspace(x[b],x[e-1],20)
plt.plot(xx,pol(xx))
plt.axvline(delta)
plt.axvline(delta-sigma)
plt.axvline(delta+sigma)
plt.show()
'''
return delta,sigma
def compute_dy_from_spectral_cross_correlations_of_frame(flux, ivar, wave , xcoef, ycoef, wavemin, wavemax, reference_flux , n_wavelength_bins = 4) :
"""
Measures y offsets from a set of resampled spectra and a reference spectrum that are on the same wavelength grid.
reference_flux is the assumed well calibrated spectrum.
Calls compute_dy_from_spectral_cross_correlation per fiber
Args:
flux : 2D np.array of shape (nfibers,nwave)
ivar : 2D np.array of shape (nfibers,nwave) , inverse variance of flux
wave : 1D array of wavelength (in Angstrom) of size nwave
refflux : 1D array of reference spectral flux of size nwave
Optional:
n_wavelength_bins : number of bins along wavelength
Returns:
x : 1D array of x coordinates on CCD (axis=1 in numpy image array, AXIS=0 in FITS, cross-dispersion axis = fiber number direction)
y : 1D array of y coordinates on CCD (axis=0 in numpy image array, AXIS=1 in FITS, wavelength dispersion axis)
dy : 1D array of shifts along y coordinates on CCD
ey : 1D array of uncertainties on dy
fiber : 1D array of fiber ID (first fiber = 0)
wave : 1D array of wavelength
"""
log=get_logger()
x_for_dy=np.array([])
y_for_dy=np.array([])
dy=np.array([])
ey=np.array([])
fiber_for_dy=np.array([])
wave_for_dy=np.array([])
nfibers = flux.shape[0]
for fiber in range(nfibers) :
log.info("computing dy for fiber #%03d"%fiber)
for b in range(n_wavelength_bins) :
wmin=wave[0]+((wave[-1]-wave[0])/n_wavelength_bins)*b
if b<n_wavelength_bins-1 :
wmax=wave[0]+((wave[-1]-wave[0])/n_wavelength_bins)*(b+1)
else :
wmax=wave[-1]
ok=(wave>=wmin)&(wave<=wmax)
sw=np.sum(ivar[fiber,ok]*flux[fiber,ok]*(flux[fiber,ok]>0))
if sw<=0 :
continue
dwave,err = compute_dy_from_spectral_cross_correlation(flux[fiber,ok],wave[ok],reference_flux[ok],ivar=ivar[fiber,ok],hw=3.)
block_wave = np.sum(ivar[fiber,ok]*flux[fiber,ok]*(flux[fiber,ok]>0)*wave[ok])/sw
if err > 1 :
continue
rw = legx(block_wave,wavemin,wavemax)
tx = legval(rw,xcoef[fiber])
ty = legval(rw,ycoef[fiber])
eps=0.1
yp = legval(legx(block_wave+eps,wavemin,wavemax),ycoef[fiber])
dydw = (yp-ty)/eps
tdy = -dwave*dydw
tey = err*dydw
x_for_dy=np.append(x_for_dy,tx)
y_for_dy=np.append(y_for_dy,ty)
dy=np.append(dy,tdy)
ey=np.append(ey,tey)
fiber_for_dy=np.append(fiber_for_dy,fiber)
wave_for_dy=np.append(wave_for_dy,block_wave)
return x_for_dy,y_for_dy,dy,ey,fiber_for_dy,wave_for_dy
def compute_dy_using_boxcar_extraction(xcoef,ycoef,wavemin,wavemax, image, fibers, width=7, degyy=2) :
"""
Measures y offsets (internal wavelength calibration) from a preprocessed image and a trace set using a cross-correlation of boxcar extracted spectra.
Uses boxcar_extraction , resample_boxcar_frame , compute_dy_from_spectral_cross_correlations_of_frame
Args:
xcoef : 2D np.array of shape (nfibers,ncoef) containing Legendre coefficents for each fiber to convert wavelenght to XCCD
ycoef : 2D np.array of shape (nfibers,ncoef) containing Legendre coefficents for each fiber to convert wavelenght to YCCD
wavemin : float
wavemax : float. wavemin and wavemax are used to define a reduced variable legx(wave,wavemin,wavemax)=2*(wave-wavemin)/(wavemax-wavemin)-1
used to compute the traces, xccd=legval(legx(wave,wavemin,wavemax),xtrace[fiber])
image : DESI preprocessed image object
Optional:
fibers : 1D np.array of int (default is all fibers, the first fiber is always = 0)
width : int, extraction boxcar width, default is 7
degyy : int, degree of polynomial fit of shifts as a function of y, used to reject outliers.
Returns:
x : 1D array of x coordinates on CCD (axis=1 in numpy image array, AXIS=0 in FITS, cross-dispersion axis = fiber number direction)
y : 1D array of y coordinates on CCD (axis=0 in numpy image array, AXIS=1 in FITS, wavelength dispersion axis)
dy : 1D array of shifts along y coordinates on CCD
ey : 1D array of uncertainties on dy
fiber : 1D array of fiber ID (first fiber = 0)
wave : 1D array of wavelength
"""
log=get_logger()
# boxcar extraction
boxcar_flux, boxcar_ivar, boxcar_wave = boxcar_extraction(xcoef,ycoef,wavemin,wavemax, image, fibers=fibers, width=7)
# resampling on common finer wavelength grid
flux, ivar, wave = resample_boxcar_frame(boxcar_flux, boxcar_ivar, boxcar_wave, oversampling=4)
# median flux used as internal spectral reference
mflux=np.median(flux,axis=0)
# measure y shifts
return compute_dy_from_spectral_cross_correlations_of_frame(flux=flux, ivar=ivar, wave=wave, xcoef=xcoef, ycoef=ycoef, wavemin=wavemin, wavemax=wavemax, reference_flux = mflux , n_wavelength_bins = degyy+4)
def compute_dx_from_cross_dispersion_profiles(xcoef,ycoef,wavemin,wavemax, image, fibers=None, width=7,deg=2) :
"""
Measure x offsets from a preprocessed image and a trace set
Args:
xcoef : 2D np.array of shape (nfibers,ncoef) containing Legendre coefficents for each fiber to convert wavelenght to XCCD
ycoef : 2D np.array of shape (nfibers,ncoef) containing Legendre coefficents for each fiber to convert wavelenght to YCCD
wavemin : float
wavemax : float. wavemin and wavemax are used to define a reduced variable legx(wave,wavemin,wavemax)=2*(wave-wavemin)/(wavemax-wavemin)-1
used to compute the traces, xccd=legval(legx(wave,wavemin,wavemax),xtrace[fiber])
image : DESI preprocessed image object
Optional:
fibers : 1D np.array of int (default is all fibers, the first fiber is always = 0)
width : extraction boxcar width, default is 5
deg : degree of polynomial fit as a function of y, only used to find and mask outliers
Returns:
x : 1D array of x coordinates on CCD (axis=1 in numpy image array, AXIS=0 in FITS, cross-dispersion axis = fiber number direction)
y : 1D array of y coordinates on CCD (axis=0 in numpy image array, AXIS=1 in FITS, wavelength dispersion axis)
dx : 1D array of shifts along x coordinates on CCD
ex : 1D array of uncertainties on dx
fiber : 1D array of fiber ID (first fiber = 0)
wave : 1D array of wavelength
"""
log=get_logger()
log.info("Starting compute_dx_from_cross_dispersion_profiles ...")
if fibers is None :
fibers = np.arange(psf.nspec)
log.info("wavelength range : [%f,%f]"%(wavemin,wavemax))
if image.mask is not None :
image.ivar *= (image.mask==0)
error_floor = 0.04 # pixel
# Variance based on inverse variance's size
var = np.zeros(image.ivar.shape)
# Applying a mask that keeps positive value to get the Variance by inversing the inverse variance.
n0 = image.pix.shape[0]
n1 = image.pix.shape[1]
y = np.arange(n0)
xx = np.tile(np.arange(n1),(n0,1))
hw = width//2
ncoef=ycoef.shape[1]
twave=np.linspace(wavemin, wavemax, ncoef+2)
ox=np.array([])
oy=np.array([])
odx=np.array([])
oex=np.array([])
of=np.array([])
ol=np.array([])
for f,fiber in enumerate(fibers) :
log.info("computing dx for fiber #%03d"%fiber)
y_of_wave = legval(legx(twave, wavemin, wavemax), ycoef[fiber])
coef = legfit(legx(y_of_wave, 0, n0), twave, deg=ncoef) # add one deg
twave = legval(legx(np.arange(n0).astype(float), 0, n0), coef)
x_of_y = legval(legx(twave, wavemin, wavemax), xcoef[fiber])
x_of_y_int = np.floor(x_of_y+0.5).astype(int)
dx = (xx.T-x_of_y).T
mask=((xx.T>=x_of_y_int-hw)&(xx.T<=x_of_y_int+hw)).T
ok = ((image.ivar[mask]==0).reshape((n0,width)).sum(-1)==0)
swdx = (dx[mask] * image.pix[mask] ).reshape((n0,width)).sum(-1)
swdxvar = (dx[mask]**2/(image.ivar[mask]+0.1*(image.ivar[mask]==0) )).reshape((n0,width)).sum(-1)
sw = (image.pix[mask]).reshape((n0,width)).sum(-1)
swy = sw*y
swx = sw*x_of_y
swl = sw*twave
# rebin
rebin = 200
ok = ((ok[:(n0//rebin)*rebin].reshape(n0//rebin,rebin)==0).sum(-1)==0)
sw = sw[:(n0//rebin)*rebin].reshape(n0//rebin,rebin).sum(-1)
swdx = swdx[:(n0//rebin)*rebin].reshape(n0//rebin,rebin).sum(-1)
swdxvar = swdxvar[:(n0//rebin)*rebin].reshape(n0//rebin,rebin).sum(-1)
swx = swx[:(n0//rebin)*rebin].reshape(n0//rebin,rebin).sum(-1)
swy = swy[:(n0//rebin)*rebin].reshape(n0//rebin,rebin).sum(-1)
swl = swl[:(n0//rebin)*rebin].reshape(n0//rebin,rebin).sum(-1)
'''
import matplotlib.pyplot as plt
i=np.where((sw>0.01)&(ok>0))[0]
plt.errorbar(swy[i]/sw[i],swdx[i]/sw[i],np.sqrt(swdxvar[i])/sw[i],fmt="o")
plt.show()
'''
sw[sw<0] = 0
fex = np.sqrt(swdxvar/(sw+(sw==0))**2 + error_floor**2) # error on dx, with an error floor
ok &= (fex>0)&(fex<10) # ok means no ivar=0 pixel
fex = fex[ok]
fdx = (swdx/(sw+(sw==0)))[ok]
fx = (swx/(sw+(sw==0)))[ok]
fy = (swy/(sw+(sw==0)))[ok]
fl = (swl/(sw+(sw==0)))[ok]
good_fiber=True
for loop in range(10) :
if fdx.size < deg+2 :
good_fiber=False
break
try :
c = np.polyfit(fy,fdx,deg,w=1/fex**2)
pol = np.poly1d(c)
chi2 = (fdx-pol(fy))**2/fex**2
mchi2 = np.median(chi2)
#log.info("mchi2=%f"%mchi2)
#if mchi2>1 :
# fex *= np.sqrt(mchi2)
ok = np.where(chi2<=25.*mchi2)[0]
nbad = fdx.size-ok.size
fex = fex[ok]
fdx = fdx[ok]
fx = fx[ok]
fy = fy[ok]
fl = fl[ok]
except LinAlgError :
good_fiber=False
break
if nbad==0 :
break
#print("removing %d bad measurements"%nbad)
# we return the original sample of offset values
if good_fiber :
ox = np.append(ox,fx)
oy = np.append(oy,fy)
odx = np.append(odx,fdx)
oex = np.append(oex,fex)
of = np.append(of,fiber*np.ones(fy.size))
ol = np.append(ol,fl)
return ox,oy,odx,oex,of,ol
def shift_ycoef_using_external_spectrum(psf,xcoef,ycoef,wavemin,wavemax,image,fibers,spectrum_filename,degyy=2,width=7) :
"""
Measure y offsets (external wavelength calibration) from a preprocessed image , a PSF + trace set using a cross-correlation of boxcar extracted spectra
and an external well-calibrated spectrum.
The PSF shape is used to convolve the input spectrum. It could also be used to correct for the PSF asymetry (disabled for now).
A relative flux calibration of the spectra is performed internally.
Args:
psf : specter PSF
xcoef : 2D np.array of shape (nfibers,ncoef) containing Legendre coefficents for each fiber to convert wavelenght to XCCD
ycoef : 2D np.array of shape (nfibers,ncoef) containing Legendre coefficents for each fiber to convert wavelenght to YCCD
wavemin : float
wavemax : float. wavemin and wavemax are used to define a reduced variable legx(wave,wavemin,wavemax)=2*(wave-wavemin)/(wavemax-wavemin)-1
used to compute the traces, xccd=legval(legx(wave,wavemin,wavemax),xtrace[fiber])
image : DESI preprocessed image object
fibers : 1D np.array of fiber indices
spectrum_filename : path to input spectral file ( read with np.loadtxt , first column is wavelength (in vacuum and Angstrom) , second column in flux (arb. units)
Optional:
width : int, extraction boxcar width, default is 7
degyy : int, degree of polynomial fit of shifts as a function of y, used to reject outliers.
Returns:
ycoef : 2D np.array of same shape as input, with modified Legendre coefficents for each fiber to convert wavelenght to YCCD
"""
log = get_logger()
tmp=np.loadtxt(spectrum_filename).T
ref_wave=tmp[0]
ref_spectrum=tmp[1]
log.info("read reference spectrum in %s with %d entries"%(spectrum_filename,ref_wave.size))
log.info("rextract spectra with boxcar")
# boxcar extraction
boxcar_flux, boxcar_ivar, boxcar_wave = boxcar_extraction(xcoef,ycoef,wavemin,wavemax, image, fibers=fibers, width=width)
# resampling on common finer wavelength grid
flux, ivar, wave = resample_boxcar_frame(boxcar_flux, boxcar_ivar, boxcar_wave, oversampling=2)
# median flux used as internal spectral reference
mflux=np.median(flux,axis=0)
mivar=np.median(ivar,axis=0)*flux.shape[0]*(2./np.pi) # very appoximate !
# trim ref_spectrum
i=(ref_wave>=wave[0])&(ref_wave<=wave[-1])
ref_wave=ref_wave[i]
ref_spectrum=ref_spectrum[i]
# check wave is linear or make it linear
if np.abs((ref_wave[1]-ref_wave[0])-(ref_wave[-1]-ref_wave[-2]))>0.0001*(ref_wave[1]-ref_wave[0]) :
log.info("reference spectrum wavelength is not on a linear grid, resample it")
dwave = np.min(np.gradient(ref_wave))
tmp_wave = np.linspace(ref_wave[0],ref_wave[-1],int((ref_wave[-1]-ref_wave[0])/dwave))
ref_spectrum = resample_flux(tmp_wave, ref_wave , ref_spectrum)
ref_wave = tmp_wave
try :
# compute psf at most significant line of ref_spectrum
i=np.argmax(ref_spectrum)
central_wave_for_psf_evaluation = ref_wave[i]
fiber_for_psf_evaluation = (boxcar_flux.shape[0]//2)
dwave=ref_wave[i+1]-ref_wave[i]
hw=int(3./dwave)+1 # 3A half width
wave_range = ref_wave[i-hw:i+hw+1]
x,y=psf.xy(fiber_for_psf_evaluation,wave_range)
x=np.tile(x[hw]+np.arange(-hw,hw+1)*(y[-1]-y[0])/(2*hw+1),(y.size,1))
y=np.tile(y,(2*hw+1,1)).T
kernel2d=psf._value(x,y,fiber_for_psf_evaluation,central_wave_for_psf_evaluation)
kernel1d=np.sum(kernel2d,axis=1)
log.info("convolve reference spectrum using PSF at fiber %d and wavelength %dA"%(fiber_for_psf_evaluation,central_wave_for_psf_evaluation))
ref_spectrum=fftconvolve(ref_spectrum,kernel1d, mode='same')
except :
log.warning("couldn't convolve reference spectrum: %s %s"%(sys.exc_info()[0],sys.exc_info()[1]))
# resample input spectrum
log.info("resample convolved reference spectrum")
ref_spectrum = resample_flux(wave, ref_wave , ref_spectrum)
log.info("absorb difference of calibration")
x=(wave-wave[wave.size//2])/50.
kernel=np.exp(-x**2/2)
f1=fftconvolve(mflux,kernel,mode='same')
f2=fftconvolve(ref_spectrum,kernel,mode='same')
scale=f1/f2
ref_spectrum *= scale
log.info("fit shifts on wavelength bins")
# define bins
n_wavelength_bins = degyy+4
y_for_dy=np.array([])
dy=np.array([])
ey=np.array([])
wave_for_dy=np.array([])
for b in range(n_wavelength_bins) :
wmin=wave[0]+((wave[-1]-wave[0])/n_wavelength_bins)*b
if b<n_wavelength_bins-1 :
wmax=wave[0]+((wave[-1]-wave[0])/n_wavelength_bins)*(b+1)
else :
wmax=wave[-1]
ok=(wave>=wmin)&(wave<=wmax)
sw= np.sum(mflux[ok]*(mflux[ok]>0))
if sw==0 :
continue
dwave,err = compute_dy_from_spectral_cross_correlation(mflux[ok],wave[ok],ref_spectrum[ok],ivar=mivar[ok],hw=3.)
bin_wave = np.sum(mflux[ok]*(mflux[ok]>0)*wave[ok])/sw
x,y=psf.xy(fiber_for_psf_evaluation,bin_wave)
eps=0.1
x,yp=psf.xy(fiber_for_psf_evaluation,bin_wave+eps)
dydw=(yp-y)/eps
if err*dydw<1 :
dy=np.append(dy,-dwave*dydw)
ey=np.append(ey,err*dydw)
wave_for_dy=np.append(wave_for_dy,bin_wave)
y_for_dy=np.append(y_for_dy,y)
log.info("wave = %fA , y=%d, measured dwave = %f +- %f A"%(bin_wave,y,dwave,err))
if False : # we don't need this for now
try :
log.info("correcting bias due to asymmetry of PSF")
hw=5
oversampling=4
xx=np.tile(np.arange(2*hw*oversampling+1)-hw*oversampling,(2*hw*oversampling+1,1))/float(oversampling)
yy=xx.T
x,y=psf.xy(fiber_for_psf_evaluation,central_wave_for_psf_evaluation)
prof=psf._value(xx+x,yy+y,fiber_for_psf_evaluation,central_wave_for_psf_evaluation)
dy_asym_central = np.sum(yy*prof)/np.sum(prof)
for i in range(dy.size) :
x,y=psf.xy(fiber_for_psf_evaluation,wave_for_dy[i])
prof=psf._value(xx+x,yy+y,fiber_for_psf_evaluation,wave_for_dy[i])
dy_asym = np.sum(yy*prof)/np.sum(prof)
log.info("y=%f, measured dy=%f , bias due to PSF asymetry = %f"%(y,dy[i],dy_asym-dy_asym_central))
dy[i] -= (dy_asym-dy_asym_central)
except :
log.warning("couldn't correct for asymmetry of PSF: %s %s"%(sys.exc_info()[0],sys.exc_info()[1]))
log.info("polynomial fit of shifts and modification of PSF ycoef")
# pol fit
coef = np.polyfit(wave_for_dy,dy,degyy,w=1./ey**2)
pol = | np.poly1d(coef) | numpy.poly1d |
from numpy.random import seed
import scipy.io
from keras.utils import np_utils
import numpy as np
import pickle
import scipy as sc
def createDataset_12(path):
seed(0)
sample = []
labels = []
subject = []
mat = scipy.io.loadmat(path)
for i in range(mat['muestras']['Experiment_ID'].size):
sample.append(mat['muestras'].item(i)[18][:, 1:4])
subject.append(np_utils.to_categorical(int(mat['muestras'].item(i)[2][0][-1]), 30))
label = mat['muestras'].item(i)[7]
filter_label = lambda label: 1 if label == "Fall" else 0
label = filter_label(label)
labels.append(np_utils.to_categorical(label, 2))
sample = np.expand_dims(sample, 1)
return sample, np.array(labels), np.array(subject)
def createDataset_11(path):
seed(0)
sample = []
labels = []
subject = []
ages = []
mat = scipy.io.loadmat(path)
for i in range(mat['muestras']['Experiment_ID'].size):
sample.append(mat['muestras'].item(i)[18][:, 1:4])
subject.append(np_utils.to_categorical(int(mat['muestras'].item(i)[2][0][-1]), 30))
label = mat['muestras'].item(i)[7]
age = mat['muestras'].item(i)[3]
filter_label = lambda label: 1 if label == "Fall" else 0
label = filter_label(label)
labels.append(np_utils.to_categorical(label, 2))
ages.append(age)
sample = np.expand_dims(sample, 1)
return sample, np.array(labels), np.array(subject), np.array(ages)
def createDataset_15(path):
seed(0)
sample = []
labels = []
subject = []
mat = scipy.io.loadmat(path)
for i in range(mat['muestras']['Experiment_ID'].size):
if np.any(mat['muestras'].item(i)[23][:, 1:4]):
sample.append(mat['muestras'].item(i)[23][:, 1:4])
subject.append(np_utils.to_categorical(int(mat['muestras'].item(i)[2][0][-1]), 30))
label = mat['muestras'].item(i)[7]
filter_label = lambda label: 1 if label == "Fall" else 0
label = filter_label(label)
labels.append(np_utils.to_categorical(label, 2))
sample = np.expand_dims(sample, 1)
return sample, np.array(labels), np.array(subject)
def createDataset_07(path):
seed(0)
sample = []
labels = []
subject = []
mat = scipy.io.loadmat(path)
for i in range(mat['muestras']['Experiment_ID'].size):
if np.any(mat['muestras'].item(i)[19][:, 1:4]):
sample.append(mat['muestras'].item(i)[19][:, 1:4])
subject.append(np_utils.to_categorical(int(mat['muestras'].item(i)[2][0][-1]), 30))
label = mat['muestras'].item(i)[7]
filter_label = lambda label: 1 if label == "Fall" else 0
label = filter_label(label)
labels.append(np_utils.to_categorical(label, 2))
sample = | np.expand_dims(sample, 1) | numpy.expand_dims |
import numpy as np
import os
import glob
import sys
sys.path.append(r"../../../utils")
from partImage import img2patches
IMG_PATCH_SIZE = 128
def rejectOutliers(data, m=3.5):
data = np.array(data)
d = np.abs(data - np.median(data))
mdev = np.median(d)
s = d / (mdev if mdev else 1.)
return data[s < m]
def evaluateNoiseEstimationMedianMinMax(dirsIn, pathLogFile, noiseLevels):
"""
Evaluate robust (5% outliers rejected) median, min and max statistics of noise estimations.
:param dirsIn: Main directories of noise estimation results. Each directory is supposed to contain
sub-directories with noise estimation results (.npz) according to the applied noise levels (e.g., 0, 1, 2, ...).
:param pathLogFile: Path to the output log file.
:param noiseLevels: Applied ground truth noise levels when corrupting images.
:return: Null
"""
def writeLogEntry(logStream, entry):
logStream.write(entry + "\n")
print(entry)
with open(pathLogFile, "w") as log:
for dirIdx, dirPath in enumerate(dirsIn):
writeLogEntry(log, "Method: " + dirPath)
dirsInSplit = dirPath.rsplit(os.sep, 1)
baseDir, corruptionTypeDir = dirsInSplit[-2], dirsInSplit[-1]
medianByNoiseLevel = {key: [] for key in noiseLevels}
minMaxErrorsByNoiseLevel = {key: [[], []] for key in noiseLevels}
for noiseLevel in noiseLevels:
noiseLevelStr = os.path.join(corruptionTypeDir, str(noiseLevel))
resultFilPaths = glob.glob(os.path.join(baseDir, noiseLevelStr, "*.npz"))
numResultFiles = 0
writeLogEntry(log, "Noise Level: " + str(noiseLevel) + ", file count: " + str(len(resultFilPaths)) + ".")
for idx, resultPath in enumerate(resultFilPaths):
try:
noiseResultMat = np.load(resultPath)["arr_0"]
except:
continue
patches = img2patches(noiseResultMat, IMG_PATCH_SIZE, IMG_PATCH_SIZE, IMG_PATCH_SIZE, IMG_PATCH_SIZE)
if len(patches) == 0 or len(patches.shape) != 3:
print("Skip result file", resultPath, "due to an invalid patch size.")
continue
# Since the CNN estimates the noise level pixelwise, aggregate the estimations patch-wise using their median values first.
# Then, buffer min and max statistics.
patchMedians = np.median(patches, axis=(1,2))
minVal = np.min(patchMedians)
maxVal = | np.max(patchMedians) | numpy.max |
"""
csalt_models.py
Usage:
- import modules
Outputs:
- various
"""
import os, sys
import numpy as np
from astropy.io import fits
from vis_sample import vis_sample
from scipy.ndimage import convolve1d
from scipy.interpolate import interp1d
from vis_sample.classes import *
from simple_disk import simple_disk
import const as const
import matplotlib.pyplot as plt
def cube_parser(pars, FOV=8, Npix=128, dist=150, r_min=0, r_max=500, r0=10,
RA=240, DEC=-40, restfreq=230.538e9, Vsys=0, vel=None,
datafile=None, outfile=None):
### Generate a model disk
disk = simple_disk(pars[0], pars[1], x0=0, y0=0, dist=dist, mstar=pars[2],
r_min=r_min, r_max=r_max, r0=r0, r_l=pars[3],
z0=pars[4], zpsi=pars[5], zphi=np.inf,
Tb0=pars[6], Tbq=pars[7], Tbeps=np.inf, Tbmax=1000,
Tbmax_b=pars[8], tau0=1000, tauq=0, taueta=np.inf,
taumax=5000, dV0=pars[9], dVq=0.5*pars[7], dVmax=1000,
FOV=FOV, Npix=Npix)
### Set velocities for cube (either use the channels in an already-existing
### cube from a .FITS file, or use the provided values)
if datafile is not None:
hd = fits.open(datafile)[0].header
f0, ix, nf, df = hd['CRVAL4'], hd['CRPIX4'], hd['NAXIS4'], hd['CDELT4']
freqs = f0 + (np.arange(nf) - ix + 1) * df
vel = const.c_ * (1 - freqs / restfreq)
else:
freqs = restfreq * (1 - vel / const.c_)
# adjust for systemic velocity
vlsr = vel - Vsys
### Generate the spectral line cube
cube = disk.get_cube(vlsr)
# convert from brightness temperatures to Jy / pixel
pixel_area = (disk.cell_sky * np.pi / (180 * 3600))**2
for i in range(len(freqs)):
cube[i,:,:] *= 1e26 * pixel_area * 2 * freqs[i]**2 * \
const.k_ / const.c_**2
### Prepare the output: either into the specified .FITS file or into a
### vis_sample "SKY OBJECT".
if outfile is not None:
hdu = fits.PrimaryHDU(cube[:,::-1,:])
header = hdu.header
# basic header inputs
header['EPOCH'] = 2000.
header['EQUINOX'] = 2000.
header['LATPOLE'] = -1.436915713634E+01
header['LONPOLE'] = 180.
# spatial coordinates
header['CTYPE1'] = 'RA---SIN'
header['CUNIT1'] = 'DEG'
header['CDELT1'] = -disk.cell_sky / 3600.
header['CRPIX1'] = 0.5 * disk.Npix + 0.5
header['CRVAL1'] = RA
header['CTYPE2'] = 'DEC--SIN'
header['CUNIT2'] = 'DEG'
header['CDELT2'] = disk.cell_sky / 3600.
header['CRPIX2'] = 0.5 * disk.Npix + 0.5
header['CRVAL2'] = DEC
# frequency coordinates
header['CTYPE3'] = 'FREQ'
header['CUNIT3'] = 'Hz'
header['CRPIX3'] = 1.
header['CDELT3'] = freqs[1]-freqs[0]
header['CRVAL3'] = freqs[0]
header['SPECSYS'] = 'LSRK'
header['VELREF'] = 257
# intensity units
header['BSCALE'] = 1.
header['BZERO'] = 0.
header['BUNIT'] = 'JY/PIXEL'
header['BTYPE'] = 'Intensity'
# output FITS
hdu.writeto(outfile, overwrite=True)
return cube[:,::-1,:]
# otherwise, return a vis_sample SkyObject
else:
# adjust cube formatting
mod_data = np.rollaxis(cube[:,::-1,:], 0, 3)
# spatial coordinates
npix_ra = disk.Npix
mid_pix_ra = 0.5 * disk.Npix + 0.5
delt_ra = -disk.cell_sky / 3600
if (delt_ra < 0):
mod_data = np.fliplr(mod_data)
mod_ra = (np.arange(npix_ra) - (mid_pix_ra-0.5))*np.abs(delt_ra)*3600
npix_dec = disk.Npix
mid_pix_dec = 0.5 * disk.Npix + 0.5
delt_dec = disk.cell_sky / 3600
if (delt_dec < 0):
mod_data = np.flipud(mod_data)
mod_dec = (np.arange(npix_dec)-(mid_pix_dec-0.5))*np.abs(delt_dec)*3600
# spectral coordinates
try:
nchan_freq = len(freqs)
mid_chan_freq = freqs[0]
mid_chan = 1
delt_freq = freqs[1] - freqs[0]
mod_freqs = (np.arange(nchan_freq)-(mid_chan-1))*delt_freq + \
mid_chan_freq
except:
mod_freqs = [0]
# return a vis_sample SkyImage object
return SkyImage(mod_data, mod_ra, mod_dec, mod_freqs, None)
def vismodel_full(pars, fixed, dataset,
chpad=3, oversample=None, noise_inject=None):
### - Prepare inputs
# Parse fixed parameters
restfreq, FOV, Npix, dist, rmax = fixed
npars = len(pars)
# Spatial frequencies to lambda units
uu = dataset.um * np.mean(dataset.nu_TOPO) / const.c_
vv = dataset.vm * np.mean(dataset.nu_TOPO) / const.c_
# Pad the frequency arrays
dnu_TOPO = np.diff(dataset.nu_TOPO)[0]
nu_TOPO_s = dataset.nu_TOPO[0] + dnu_TOPO * np.arange(-chpad, 0, 1)
nu_TOPO_f = dataset.nu_TOPO[-1] + dnu_TOPO * np.arange(1, chpad+1, 1)
dataset.nu_TOPO = np.concatenate((nu_TOPO_s, dataset.nu_TOPO, nu_TOPO_f))
dnu_LSRK = np.diff(dataset.nu_LSRK, axis=1)[:,0]
nu_LSRK_s = (dataset.nu_LSRK[:,0])[:,None] + \
dnu_LSRK[:,None] * np.arange(-chpad, 0, 1)[None,:]
nu_LSRK_f = (dataset.nu_LSRK[:,-1])[:,None] + \
dnu_LSRK[:,None] * np.arange(1, chpad+1, 1)[None,:]
dataset.nu_LSRK = np.concatenate((nu_LSRK_s, dataset.nu_LSRK, nu_LSRK_f),
axis=1)
# Upsample in the spectral domain (if necessary)
if oversample is not None:
nchan = dataset.nchan + 2 * chpad
nu_TOPO = np.interp(np.arange((nchan-1) * oversample + 1),
np.arange(0, nchan * oversample, oversample),
dataset.nu_TOPO)
nch = len(nu_TOPO)
nu_LSRK = | np.empty((dataset.nstamps, nch)) | numpy.empty |
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2011, <NAME> <<EMAIL>>
# vim: set ts=4 sts=4 sw=4 expandtab smartindent:
#
# License: MIT. See COPYING.MIT file in the milk distribution
'''
================
Tree Classifier
================
Decision tree based classifier
------------------------------
'''
from __future__ import division
import numpy as np
from .classifier import normaliselabels
from .base import supervised_model
__all__ = [
'tree_learner',
'stump_learner',
]
class Leaf(object):
'''
v : value
w : weight
'''
def __init__(self, v, w):
self.v = v
self.w = w
def __repr__(self):
return 'Leaf(%s,%s)' % (self.v, self.w)
class Node(object): # This could be replaced by a namedtuple
def __init__(self, featid, featval, left, right):
self.featid = featid
self.featval = featval
self.left = left
self.right = right
def _split(features, labels, weights, criterion, subsample, R):
N,f = features.shape
if subsample is not None:
samples = np.array(R.sample(range(features.shape[1]), subsample))
features = features[:, samples]
f = subsample
best = None
best_val = float('-Inf')
for i in range(f):
domain_i = sorted(set(features[:,i]))
for d in domain_i[1:]:
cur_split = (features[:,i] < d)
if weights is not None:
value = criterion(labels[cur_split], labels[~cur_split], weights[cur_split], weights[~cur_split])
else:
value = criterion(labels[cur_split], labels[~cur_split])
if value > best_val:
best_val = value
if subsample is not None:
ti = samples[i]
else:
ti = i
best = ti,d
return best
from ._tree import set_entropy
from ._tree import information_gain as _information_gain
def information_gain(labels0, labels1, include_entropy=False):
'''
ig = information_gain(labels0, labels1, include_entropy=False)
Information Gain
See http://en.wikipedia.org/wiki/Information_gain_in_decision_trees
The function calculated here does not include the original entropy unless
you explicitly ask for it (by passing include_entropy=True)
'''
if include_entropy:
return set_entropy(np.concatenate( (labels0, labels1) )) + \
_information_gain(labels0, labels1)
return _information_gain(labels0, labels1)
def z1_loss(labels0, labels1, weights0=None, weights1=None):
'''
z = z1_loss(labels0, labels1)
z = z1_loss(labels0, labels1, weights0, weights1)
zero-one loss
'''
def _acc(labels, weights):
c = (labels.mean() > .5)
if weights is not None:
return np.dot((labels != c), weights)
return np.sum(labels != c)
return _acc(labels0, weights0) + _acc(labels1, weights1)
def neg_z1_loss(labels0, labels1, weights0=None, weights1=None):
'''
z = neg_z1_loss(labels0, labels1)
z = neg_z1_loss(labels0, labels1, weights0, weights1)
zero-one loss, with the sign reversed so it can be *maximised*.
'''
return -z1_loss(labels0,labels1,weights0,weights1)
def build_tree(features, labels, criterion, min_split=4, subsample=None, R=None, weights=None):
'''
tree = build_tree(features, labels, criterion, min_split=4, subsample=None, R=None, weights={all 1s})
Parameters
----------
features : sequence
features to use
labels : sequence
labels
criterion : function {labels} x {labels} -> float
function to measure goodness of split
min_split : integer
minimum size to split on
subsample : integer, optional
if given, then, at each step, choose
R : source of randomness, optional
See `milk.util.get_pyrandom`
weights : sequence, optional
weight of instance (default: all the same)
Returns
-------
tree : Tree
'''
assert len(features) == len(labels), 'build_tree: Nr of labels does not match nr of features'
features = np.asanyarray(features)
labels = | np.asanyarray(labels, dtype=np.int) | numpy.asanyarray |
#!/usr/bin/env python3
"""
RGBD camera sensor simulation for pybullet_ros based on pybullet.getCameraImage().
"""
import math
import numpy as np
import rospy
from cv_bridge import CvBridge
from sensor_msgs.msg import Image
class RGBDCamera:
def __init__(self, pybullet, robot):
self._pb = pybullet
self._robot = robot
# create image msg placeholder for publication
self._image_msg = Image()
# get RGBD camera parameters from ROS param server
self._image_msg.width = rospy.get_param('~rgbd_camera/resolution/width', 640)
self._image_msg.height = rospy.get_param('~rgbd_camera/resolution/height', 480)
assert (self._image_msg.width > 5)
assert (self._image_msg.height > 5)
cam_frame_name = rospy.get_param('~rgbd_camera/frame_name', None)
if not cam_frame_name:
rospy.logerr("[RGBDCamera::init] Required parameter 'rgbd_camera/frame_name' not set, will exit now.")
rospy.signal_shutdown("[RGBDCamera::init] Required param 'rgbd_camera/frame_name' not set.")
return
# get pybullet camera link id from its name
if cam_frame_name not in self._robot.link_names:
rospy.logerr(
"[RGBDCamera::init] RGBD camera reference frame '{}' not found in URDF model, cannot continue.".format(
cam_frame_name))
rospy.logwarn("[RGBDCamera::init] Available frames are: {}.".format(self._robot.link_names))
rospy.signal_shutdown("[RGBDCamera::init] Required param 'rgbd_camera/frame_name' not set properly.")
return
self._camera_link_id = self._robot.get_link_index_by_name(cam_frame_name)
self._image_msg.header.frame_id = cam_frame_name
# create publisher
self._publisher = rospy.Publisher('rgb_image', Image, queue_size=1)
self._image_msg.encoding = rospy.get_param('~rgbd_camera/resolution/encoding', 'rgb8')
self._image_msg.is_bigendian = rospy.get_param('~rgbd_camera/resolution/encoding', 0)
self._image_msg.step = rospy.get_param('~rgbd_camera/resolution/encoding', 1920)
# projection matrix
hfov = rospy.get_param('~rgbd_camera/hfov', 56.3)
vfov = rospy.get_param('~rgbd_camera/vfov', 43.7)
near_plane = rospy.get_param('~rgbd_camera/near_plane', 0.4)
far_plane = rospy.get_param('~rgbd_camera/far_plane', 8)
self._projection_matrix = self._compute_projection_matrix(hfov, vfov, near_plane, far_plane)
# use cv_bridge ros to convert cv matrix to ros format
self._image_bridge = CvBridge()
# variable used to run this plugin at a lower frequency, HACK
self._count = 0
def _compute_projection_matrix(self, hfov, vfov, near_plane, far_plane):
return self._pb.computeProjectionMatrix(
left=-math.tan(math.pi * hfov / 360.0) * near_plane,
right=math.tan(math.pi * hfov / 360.0) * near_plane,
bottom=-math.tan(math.pi * vfov / 360.0) * near_plane,
top=math.tan(math.pi * vfov / 360.0) * near_plane,
nearVal=near_plane,
farVal=far_plane)
def _extract_frame(self, camera_image):
bgr_image = | np.zeros((self._image_msg.height, self._image_msg.width, 3)) | numpy.zeros |
from collections import defaultdict
from functools import partial
import itertools
import operator
import re
from typing import List, Optional, Sequence, Tuple, Union
import numpy as np
from pandas._libs import Timedelta, Timestamp, internals as libinternals, lib
from pandas.util._validators import validate_bool_kwarg
from pandas.core.dtypes.cast import (
find_common_type,
infer_dtype_from_scalar,
maybe_convert_objects,
maybe_promote,
)
from pandas.core.dtypes.common import (
_NS_DTYPE,
is_extension_array_dtype,
is_list_like,
is_scalar,
is_sparse,
)
from pandas.core.dtypes.concat import concat_compat
from pandas.core.dtypes.dtypes import ExtensionDtype
from pandas.core.dtypes.generic import ABCExtensionArray, ABCSeries
from pandas.core.dtypes.missing import isna
import pandas.core.algorithms as algos
from pandas.core.base import PandasObject
from pandas.core.index import Index, MultiIndex, ensure_index
from pandas.core.indexers import maybe_convert_indices
from pandas.io.formats.printing import pprint_thing
from .blocks import (
Block,
CategoricalBlock,
DatetimeTZBlock,
ExtensionBlock,
ObjectValuesExtensionBlock,
_extend_blocks,
_merge_blocks,
_safe_reshape,
get_block_type,
make_block,
)
from .concat import ( # all for concatenate_block_managers
combine_concat_plans,
concatenate_join_units,
get_mgr_concatenation_plan,
is_uniform_join_units,
)
# TODO: flexible with index=None and/or items=None
class BlockManager(PandasObject):
"""
Core internal data structure to implement DataFrame, Series, etc.
Manage a bunch of labeled 2D mixed-type ndarrays. Essentially it's a
lightweight blocked set of labeled data to be manipulated by the DataFrame
public API class
Attributes
----------
shape
ndim
axes
values
items
Methods
-------
set_axis(axis, new_labels)
copy(deep=True)
get_dtype_counts
get_ftype_counts
get_dtypes
get_ftypes
apply(func, axes, block_filter_fn)
get_bool_data
get_numeric_data
get_slice(slice_like, axis)
get(label)
iget(loc)
take(indexer, axis)
reindex_axis(new_labels, axis)
reindex_indexer(new_labels, indexer, axis)
delete(label)
insert(loc, label, value)
set(label, value)
Parameters
----------
Notes
-----
This is *not* a public API class
"""
__slots__ = [
"axes",
"blocks",
"_ndim",
"_shape",
"_known_consolidated",
"_is_consolidated",
"_blknos",
"_blklocs",
]
def __init__(
self,
blocks: Sequence[Block],
axes: Sequence[Index],
do_integrity_check: bool = True,
):
self.axes = [ensure_index(ax) for ax in axes]
self.blocks = tuple(blocks) # type: Tuple[Block, ...]
for block in blocks:
if self.ndim != block.ndim:
raise AssertionError(
"Number of Block dimensions ({block}) must equal "
"number of axes ({self})".format(block=block.ndim, self=self.ndim)
)
if do_integrity_check:
self._verify_integrity()
self._consolidate_check()
self._rebuild_blknos_and_blklocs()
def make_empty(self, axes=None):
""" return an empty BlockManager with the items axis of len 0 """
if axes is None:
axes = [ensure_index([])] + [ensure_index(a) for a in self.axes[1:]]
# preserve dtype if possible
if self.ndim == 1:
blocks = np.array([], dtype=self.array_dtype)
else:
blocks = []
return self.__class__(blocks, axes)
def __nonzero__(self):
return True
# Python3 compat
__bool__ = __nonzero__
@property
def shape(self):
return tuple(len(ax) for ax in self.axes)
@property
def ndim(self) -> int:
return len(self.axes)
def set_axis(self, axis, new_labels):
new_labels = ensure_index(new_labels)
old_len = len(self.axes[axis])
new_len = len(new_labels)
if new_len != old_len:
raise ValueError(
"Length mismatch: Expected axis has {old} elements, new "
"values have {new} elements".format(old=old_len, new=new_len)
)
self.axes[axis] = new_labels
def rename_axis(self, mapper, axis, copy=True, level=None):
"""
Rename one of axes.
Parameters
----------
mapper : unary callable
axis : int
copy : boolean, default True
level : int, default None
"""
obj = self.copy(deep=copy)
obj.set_axis(axis, _transform_index(self.axes[axis], mapper, level))
return obj
@property
def _is_single_block(self):
if self.ndim == 1:
return True
if len(self.blocks) != 1:
return False
blk = self.blocks[0]
return blk.mgr_locs.is_slice_like and blk.mgr_locs.as_slice == slice(
0, len(self), 1
)
def _rebuild_blknos_and_blklocs(self):
"""
Update mgr._blknos / mgr._blklocs.
"""
new_blknos = np.empty(self.shape[0], dtype=np.int64)
new_blklocs = np.empty(self.shape[0], dtype=np.int64)
new_blknos.fill(-1)
new_blklocs.fill(-1)
for blkno, blk in enumerate(self.blocks):
rl = blk.mgr_locs
new_blknos[rl.indexer] = blkno
new_blklocs[rl.indexer] = np.arange(len(rl))
if (new_blknos == -1).any():
raise AssertionError("Gaps in blk ref_locs")
self._blknos = new_blknos
self._blklocs = new_blklocs
@property
def items(self):
return self.axes[0]
def _get_counts(self, f):
""" return a dict of the counts of the function in BlockManager """
self._consolidate_inplace()
counts = dict()
for b in self.blocks:
v = f(b)
counts[v] = counts.get(v, 0) + b.shape[0]
return counts
def get_dtype_counts(self):
return self._get_counts(lambda b: b.dtype.name)
def get_ftype_counts(self):
return self._get_counts(lambda b: b.ftype)
def get_dtypes(self):
dtypes = np.array([blk.dtype for blk in self.blocks])
return algos.take_1d(dtypes, self._blknos, allow_fill=False)
def get_ftypes(self):
ftypes = np.array([blk.ftype for blk in self.blocks])
return algos.take_1d(ftypes, self._blknos, allow_fill=False)
def __getstate__(self):
block_values = [b.values for b in self.blocks]
block_items = [self.items[b.mgr_locs.indexer] for b in self.blocks]
axes_array = list(self.axes)
extra_state = {
"0.14.1": {
"axes": axes_array,
"blocks": [
dict(values=b.values, mgr_locs=b.mgr_locs.indexer)
for b in self.blocks
],
}
}
# First three elements of the state are to maintain forward
# compatibility with 0.13.1.
return axes_array, block_values, block_items, extra_state
def __setstate__(self, state):
def unpickle_block(values, mgr_locs):
return make_block(values, placement=mgr_locs)
if isinstance(state, tuple) and len(state) >= 4 and "0.14.1" in state[3]:
state = state[3]["0.14.1"]
self.axes = [ensure_index(ax) for ax in state["axes"]]
self.blocks = tuple(
unpickle_block(b["values"], b["mgr_locs"]) for b in state["blocks"]
)
else:
# discard anything after 3rd, support beta pickling format for a
# little while longer
ax_arrays, bvalues, bitems = state[:3]
self.axes = [ensure_index(ax) for ax in ax_arrays]
if len(bitems) == 1 and self.axes[0].equals(bitems[0]):
# This is a workaround for pre-0.14.1 pickles that didn't
# support unpickling multi-block frames/panels with non-unique
# columns/items, because given a manager with items ["a", "b",
# "a"] there's no way of knowing which block's "a" is where.
#
# Single-block case can be supported under the assumption that
# block items corresponded to manager items 1-to-1.
all_mgr_locs = [slice(0, len(bitems[0]))]
else:
all_mgr_locs = [
self.axes[0].get_indexer(blk_items) for blk_items in bitems
]
self.blocks = tuple(
unpickle_block(values, mgr_locs)
for values, mgr_locs in zip(bvalues, all_mgr_locs)
)
self._post_setstate()
def _post_setstate(self):
self._is_consolidated = False
self._known_consolidated = False
self._rebuild_blknos_and_blklocs()
def __len__(self) -> int:
return len(self.items)
def __repr__(self) -> str:
output = pprint_thing(self.__class__.__name__)
for i, ax in enumerate(self.axes):
if i == 0:
output += "\nItems: {ax}".format(ax=ax)
else:
output += "\nAxis {i}: {ax}".format(i=i, ax=ax)
for block in self.blocks:
output += "\n{block}".format(block=pprint_thing(block))
return output
def _verify_integrity(self):
mgr_shape = self.shape
tot_items = sum(len(x.mgr_locs) for x in self.blocks)
for block in self.blocks:
if block._verify_integrity and block.shape[1:] != mgr_shape[1:]:
construction_error(tot_items, block.shape[1:], self.axes)
if len(self.items) != tot_items:
raise AssertionError(
"Number of manager items must equal union of "
"block items\n# manager items: {0}, # "
"tot_items: {1}".format(len(self.items), tot_items)
)
def apply(
self,
f,
axes=None,
filter=None,
do_integrity_check=False,
consolidate=True,
**kwargs,
):
"""
iterate over the blocks, collect and create a new block manager
Parameters
----------
f : the callable or function name to operate on at the block level
axes : optional (if not supplied, use self.axes)
filter : list, if supplied, only call the block if the filter is in
the block
do_integrity_check : boolean, default False. Do the block manager
integrity check
consolidate: boolean, default True. Join together blocks having same
dtype
Returns
-------
Block Manager (new object)
"""
result_blocks = []
# filter kwarg is used in replace-* family of methods
if filter is not None:
filter_locs = set(self.items.get_indexer_for(filter))
if len(filter_locs) == len(self.items):
# All items are included, as if there were no filtering
filter = None
else:
kwargs["filter"] = filter_locs
if consolidate:
self._consolidate_inplace()
if f == "where":
align_copy = True
if kwargs.get("align", True):
align_keys = ["other", "cond"]
else:
align_keys = ["cond"]
elif f == "putmask":
align_copy = False
if kwargs.get("align", True):
align_keys = ["new", "mask"]
else:
align_keys = ["mask"]
elif f == "fillna":
# fillna internally does putmask, maybe it's better to do this
# at mgr, not block level?
align_copy = False
align_keys = ["value"]
else:
align_keys = []
# TODO(EA): may interfere with ExtensionBlock.setitem for blocks
# with a .values attribute.
aligned_args = {
k: kwargs[k]
for k in align_keys
if not isinstance(kwargs[k], ABCExtensionArray)
and hasattr(kwargs[k], "values")
}
for b in self.blocks:
if filter is not None:
if not b.mgr_locs.isin(filter_locs).any():
result_blocks.append(b)
continue
if aligned_args:
b_items = self.items[b.mgr_locs.indexer]
for k, obj in aligned_args.items():
axis = obj._info_axis_number
kwargs[k] = obj.reindex(b_items, axis=axis, copy=align_copy)
applied = getattr(b, f)(**kwargs)
result_blocks = _extend_blocks(applied, result_blocks)
if len(result_blocks) == 0:
return self.make_empty(axes or self.axes)
bm = self.__class__(
result_blocks, axes or self.axes, do_integrity_check=do_integrity_check
)
bm._consolidate_inplace()
return bm
def quantile(
self,
axis=0,
consolidate=True,
transposed=False,
interpolation="linear",
qs=None,
numeric_only=None,
):
"""
Iterate over blocks applying quantile reduction.
This routine is intended for reduction type operations and
will do inference on the generated blocks.
Parameters
----------
axis: reduction axis, default 0
consolidate: boolean, default True. Join together blocks having same
dtype
transposed: boolean, default False
we are holding transposed data
interpolation : type of interpolation, default 'linear'
qs : a scalar or list of the quantiles to be computed
numeric_only : ignored
Returns
-------
Block Manager (new object)
"""
# Series dispatches to DataFrame for quantile, which allows us to
# simplify some of the code here and in the blocks
assert self.ndim >= 2
if consolidate:
self._consolidate_inplace()
def get_axe(block, qs, axes):
# Because Series dispatches to DataFrame, we will always have
# block.ndim == 2
from pandas import Float64Index
if is_list_like(qs):
ax = Float64Index(qs)
else:
ax = axes[0]
return ax
axes, blocks = [], []
for b in self.blocks:
block = b.quantile(axis=axis, qs=qs, interpolation=interpolation)
axe = get_axe(b, qs, axes=self.axes)
axes.append(axe)
blocks.append(block)
# note that some DatetimeTZ, Categorical are always ndim==1
ndim = {b.ndim for b in blocks}
assert 0 not in ndim, ndim
if 2 in ndim:
new_axes = list(self.axes)
# multiple blocks that are reduced
if len(blocks) > 1:
new_axes[1] = axes[0]
# reset the placement to the original
for b, sb in zip(blocks, self.blocks):
b.mgr_locs = sb.mgr_locs
else:
new_axes[axis] = Index(np.concatenate([ax.values for ax in axes]))
if transposed:
new_axes = new_axes[::-1]
blocks = [
b.make_block(b.values.T, placement=np.arange(b.shape[1]))
for b in blocks
]
return self.__class__(blocks, new_axes)
# single block, i.e. ndim == {1}
values = concat_compat([b.values for b in blocks])
# compute the orderings of our original data
if len(self.blocks) > 1:
indexer = np.empty(len(self.axes[0]), dtype=np.intp)
i = 0
for b in self.blocks:
for j in b.mgr_locs:
indexer[j] = i
i = i + 1
values = values.take(indexer)
return SingleBlockManager(
[make_block(values, ndim=1, placement=np.arange(len(values)))], axes[0]
)
def isna(self, func, **kwargs):
return self.apply("apply", func=func, **kwargs)
def where(self, **kwargs):
return self.apply("where", **kwargs)
def setitem(self, **kwargs):
return self.apply("setitem", **kwargs)
def putmask(self, **kwargs):
return self.apply("putmask", **kwargs)
def diff(self, **kwargs):
return self.apply("diff", **kwargs)
def interpolate(self, **kwargs):
return self.apply("interpolate", **kwargs)
def shift(self, **kwargs):
return self.apply("shift", **kwargs)
def fillna(self, **kwargs):
return self.apply("fillna", **kwargs)
def downcast(self, **kwargs):
return self.apply("downcast", **kwargs)
def astype(self, dtype, **kwargs):
return self.apply("astype", dtype=dtype, **kwargs)
def convert(self, **kwargs):
return self.apply("convert", **kwargs)
def replace(self, value, **kwargs):
assert np.ndim(value) == 0, value
return self.apply("replace", value=value, **kwargs)
def replace_list(self, src_list, dest_list, inplace=False, regex=False):
""" do a list replace """
inplace = validate_bool_kwarg(inplace, "inplace")
# figure out our mask a-priori to avoid repeated replacements
values = self.as_array()
def comp(s, regex=False):
"""
Generate a bool array by perform an equality check, or perform
an element-wise regular expression matching
"""
if isna(s):
return isna(values)
if isinstance(s, (Timedelta, Timestamp)) and getattr(s, "tz", None) is None:
return _compare_or_regex_search(
maybe_convert_objects(values), s.asm8, regex
)
return _compare_or_regex_search(values, s, regex)
masks = [comp(s, regex) for i, s in enumerate(src_list)]
result_blocks = []
src_len = len(src_list) - 1
for blk in self.blocks:
# its possible to get multiple result blocks here
# replace ALWAYS will return a list
rb = [blk if inplace else blk.copy()]
for i, (s, d) in enumerate(zip(src_list, dest_list)):
# TODO: assert/validate that `d` is always a scalar?
new_rb = []
for b in rb:
m = masks[i][b.mgr_locs.indexer]
convert = i == src_len
result = b._replace_coerce(
mask=m,
to_replace=s,
value=d,
inplace=inplace,
convert=convert,
regex=regex,
)
if m.any():
new_rb = _extend_blocks(result, new_rb)
else:
new_rb.append(b)
rb = new_rb
result_blocks.extend(rb)
bm = self.__class__(result_blocks, self.axes)
bm._consolidate_inplace()
return bm
def is_consolidated(self):
"""
Return True if more than one block with the same dtype
"""
if not self._known_consolidated:
self._consolidate_check()
return self._is_consolidated
def _consolidate_check(self):
ftypes = [blk.ftype for blk in self.blocks]
self._is_consolidated = len(ftypes) == len(set(ftypes))
self._known_consolidated = True
@property
def is_mixed_type(self):
# Warning, consolidation needs to get checked upstairs
self._consolidate_inplace()
return len(self.blocks) > 1
@property
def is_numeric_mixed_type(self):
# Warning, consolidation needs to get checked upstairs
self._consolidate_inplace()
return all(block.is_numeric for block in self.blocks)
@property
def is_datelike_mixed_type(self):
# Warning, consolidation needs to get checked upstairs
self._consolidate_inplace()
return any(block.is_datelike for block in self.blocks)
@property
def any_extension_types(self):
"""Whether any of the blocks in this manager are extension blocks"""
return any(block.is_extension for block in self.blocks)
@property
def is_view(self):
""" return a boolean if we are a single block and are a view """
if len(self.blocks) == 1:
return self.blocks[0].is_view
# It is technically possible to figure out which blocks are views
# e.g. [ b.values.base is not None for b in self.blocks ]
# but then we have the case of possibly some blocks being a view
# and some blocks not. setting in theory is possible on the non-view
# blocks w/o causing a SettingWithCopy raise/warn. But this is a bit
# complicated
return False
def get_bool_data(self, copy=False):
"""
Parameters
----------
copy : boolean, default False
Whether to copy the blocks
"""
self._consolidate_inplace()
return self.combine([b for b in self.blocks if b.is_bool], copy)
def get_numeric_data(self, copy=False):
"""
Parameters
----------
copy : boolean, default False
Whether to copy the blocks
"""
self._consolidate_inplace()
return self.combine([b for b in self.blocks if b.is_numeric], copy)
def combine(self, blocks, copy=True):
""" return a new manager with the blocks """
if len(blocks) == 0:
return self.make_empty()
# FIXME: optimization potential
indexer = np.sort(np.concatenate([b.mgr_locs.as_array for b in blocks]))
inv_indexer = lib.get_reverse_indexer(indexer, self.shape[0])
new_blocks = []
for b in blocks:
b = b.copy(deep=copy)
b.mgr_locs = algos.take_1d(
inv_indexer, b.mgr_locs.as_array, axis=0, allow_fill=False
)
new_blocks.append(b)
axes = list(self.axes)
axes[0] = self.items.take(indexer)
return self.__class__(new_blocks, axes, do_integrity_check=False)
def get_slice(self, slobj, axis=0):
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
if axis == 0:
new_blocks = self._slice_take_blocks_ax0(slobj)
else:
slicer = [slice(None)] * (axis + 1)
slicer[axis] = slobj
slicer = tuple(slicer)
new_blocks = [blk.getitem_block(slicer) for blk in self.blocks]
new_axes = list(self.axes)
new_axes[axis] = new_axes[axis][slobj]
bm = self.__class__(new_blocks, new_axes, do_integrity_check=False)
bm._consolidate_inplace()
return bm
def __contains__(self, item):
return item in self.items
@property
def nblocks(self):
return len(self.blocks)
def copy(self, deep=True):
"""
Make deep or shallow copy of BlockManager
Parameters
----------
deep : boolean o rstring, default True
If False, return shallow copy (do not copy data)
If 'all', copy data and a deep copy of the index
Returns
-------
copy : BlockManager
"""
# this preserves the notion of view copying of axes
if deep:
if deep == "all":
copy = lambda ax: ax.copy(deep=True)
else:
copy = lambda ax: ax.view()
new_axes = [copy(ax) for ax in self.axes]
else:
new_axes = list(self.axes)
return self.apply("copy", axes=new_axes, deep=deep, do_integrity_check=False)
def as_array(self, transpose=False, items=None):
"""Convert the blockmanager data into an numpy array.
Parameters
----------
transpose : boolean, default False
If True, transpose the return array
items : list of strings or None
Names of block items that will be included in the returned
array. ``None`` means that all block items will be used
Returns
-------
arr : ndarray
"""
if len(self.blocks) == 0:
arr = np.empty(self.shape, dtype=float)
return arr.transpose() if transpose else arr
if items is not None:
mgr = self.reindex_axis(items, axis=0)
else:
mgr = self
if self._is_single_block and mgr.blocks[0].is_datetimetz:
# TODO(Block.get_values): Make DatetimeTZBlock.get_values
# always be object dtype. Some callers seem to want the
# DatetimeArray (previously DTI)
arr = mgr.blocks[0].get_values(dtype=object)
elif self._is_single_block or not self.is_mixed_type:
arr = np.asarray(mgr.blocks[0].get_values())
else:
arr = mgr._interleave()
return arr.transpose() if transpose else arr
def _interleave(self):
"""
Return ndarray from blocks with specified item order
Items must be contained in the blocks
"""
dtype = _interleaved_dtype(self.blocks)
# TODO: https://github.com/pandas-dev/pandas/issues/22791
# Give EAs some input on what happens here. Sparse needs this.
if is_sparse(dtype):
dtype = dtype.subtype
elif is_extension_array_dtype(dtype):
dtype = "object"
result = np.empty(self.shape, dtype=dtype)
itemmask = np.zeros(self.shape[0])
for blk in self.blocks:
rl = blk.mgr_locs
result[rl.indexer] = blk.get_values(dtype)
itemmask[rl.indexer] = 1
if not itemmask.all():
raise AssertionError("Some items were not contained in blocks")
return result
def to_dict(self, copy=True):
"""
Return a dict of str(dtype) -> BlockManager
Parameters
----------
copy : boolean, default True
Returns
-------
values : a dict of dtype -> BlockManager
Notes
-----
This consolidates based on str(dtype)
"""
self._consolidate_inplace()
bd = {}
for b in self.blocks:
bd.setdefault(str(b.dtype), []).append(b)
return {dtype: self.combine(blocks, copy=copy) for dtype, blocks in bd.items()}
def fast_xs(self, loc):
"""
get a cross sectional for a given location in the
items ; handle dups
return the result, is *could* be a view in the case of a
single block
"""
if len(self.blocks) == 1:
return self.blocks[0].iget((slice(None), loc))
items = self.items
# non-unique (GH4726)
if not items.is_unique:
result = self._interleave()
if self.ndim == 2:
result = result.T
return result[loc]
# unique
dtype = _interleaved_dtype(self.blocks)
n = len(items)
if is_extension_array_dtype(dtype):
# we'll eventually construct an ExtensionArray.
result = np.empty(n, dtype=object)
else:
result = np.empty(n, dtype=dtype)
for blk in self.blocks:
# Such assignment may incorrectly coerce NaT to None
# result[blk.mgr_locs] = blk._slice((slice(None), loc))
for i, rl in enumerate(blk.mgr_locs):
result[rl] = blk.iget((i, loc))
if is_extension_array_dtype(dtype):
result = dtype.construct_array_type()._from_sequence(result, dtype=dtype)
return result
def consolidate(self):
"""
Join together blocks having same dtype
Returns
-------
y : BlockManager
"""
if self.is_consolidated():
return self
bm = self.__class__(self.blocks, self.axes)
bm._is_consolidated = False
bm._consolidate_inplace()
return bm
def _consolidate_inplace(self):
if not self.is_consolidated():
self.blocks = tuple(_consolidate(self.blocks))
self._is_consolidated = True
self._known_consolidated = True
self._rebuild_blknos_and_blklocs()
def get(self, item):
"""
Return values for selected item (ndarray or BlockManager).
"""
if self.items.is_unique:
if not isna(item):
loc = self.items.get_loc(item)
else:
indexer = np.arange(len(self.items))[isna(self.items)]
# allow a single nan location indexer
if not is_scalar(indexer):
if len(indexer) == 1:
loc = indexer.item()
else:
raise ValueError("cannot label index with a null key")
return self.iget(loc)
else:
if isna(item):
raise TypeError("cannot label index with a null key")
indexer = self.items.get_indexer_for([item])
return self.reindex_indexer(
new_axis=self.items[indexer], indexer=indexer, axis=0, allow_dups=True
)
def iget(self, i):
"""
Return the data as a SingleBlockManager if possible
Otherwise return as a ndarray
"""
block = self.blocks[self._blknos[i]]
values = block.iget(self._blklocs[i])
# shortcut for select a single-dim from a 2-dim BM
return SingleBlockManager(
[
block.make_block_same_class(
values, placement=slice(0, len(values)), ndim=1
)
],
self.axes[1],
)
def delete(self, item):
"""
Delete selected item (items if non-unique) in-place.
"""
indexer = self.items.get_loc(item)
is_deleted = np.zeros(self.shape[0], dtype=np.bool_)
is_deleted[indexer] = True
ref_loc_offset = -is_deleted.cumsum()
is_blk_deleted = [False] * len(self.blocks)
if isinstance(indexer, int):
affected_start = indexer
else:
affected_start = is_deleted.nonzero()[0][0]
for blkno, _ in _fast_count_smallints(self._blknos[affected_start:]):
blk = self.blocks[blkno]
bml = blk.mgr_locs
blk_del = is_deleted[bml.indexer].nonzero()[0]
if len(blk_del) == len(bml):
is_blk_deleted[blkno] = True
continue
elif len(blk_del) != 0:
blk.delete(blk_del)
bml = blk.mgr_locs
blk.mgr_locs = bml.add(ref_loc_offset[bml.indexer])
# FIXME: use Index.delete as soon as it uses fastpath=True
self.axes[0] = self.items[~is_deleted]
self.blocks = tuple(
b for blkno, b in enumerate(self.blocks) if not is_blk_deleted[blkno]
)
self._shape = None
self._rebuild_blknos_and_blklocs()
def set(self, item, value):
"""
Set new item in-place. Does not consolidate. Adds new Block if not
contained in the current set of items
"""
# FIXME: refactor, clearly separate broadcasting & zip-like assignment
# can prob also fix the various if tests for sparse/categorical
value_is_extension_type = is_extension_array_dtype(value)
# categorical/sparse/datetimetz
if value_is_extension_type:
def value_getitem(placement):
return value
else:
if value.ndim == self.ndim - 1:
value = _safe_reshape(value, (1,) + value.shape)
def value_getitem(placement):
return value
else:
def value_getitem(placement):
return value[placement.indexer]
if value.shape[1:] != self.shape[1:]:
raise AssertionError(
"Shape of new values must be compatible with manager shape"
)
try:
loc = self.items.get_loc(item)
except KeyError:
# This item wasn't present, just insert at end
self.insert(len(self.items), item, value)
return
if isinstance(loc, int):
loc = [loc]
blknos = self._blknos[loc]
blklocs = self._blklocs[loc].copy()
unfit_mgr_locs = []
unfit_val_locs = []
removed_blknos = []
for blkno, val_locs in libinternals.get_blkno_placements(blknos, group=True):
blk = self.blocks[blkno]
blk_locs = blklocs[val_locs.indexer]
if blk.should_store(value):
blk.set(blk_locs, value_getitem(val_locs))
else:
unfit_mgr_locs.append(blk.mgr_locs.as_array[blk_locs])
unfit_val_locs.append(val_locs)
# If all block items are unfit, schedule the block for removal.
if len(val_locs) == len(blk.mgr_locs):
removed_blknos.append(blkno)
else:
self._blklocs[blk.mgr_locs.indexer] = -1
blk.delete(blk_locs)
self._blklocs[blk.mgr_locs.indexer] = np.arange(len(blk))
if len(removed_blknos):
# Remove blocks & update blknos accordingly
is_deleted = np.zeros(self.nblocks, dtype=np.bool_)
is_deleted[removed_blknos] = True
new_blknos = np.empty(self.nblocks, dtype=np.int64)
new_blknos.fill(-1)
new_blknos[~is_deleted] = np.arange(self.nblocks - len(removed_blknos))
self._blknos = algos.take_1d(
new_blknos, self._blknos, axis=0, allow_fill=False
)
self.blocks = tuple(
blk for i, blk in enumerate(self.blocks) if i not in set(removed_blknos)
)
if unfit_val_locs:
unfit_mgr_locs = np.concatenate(unfit_mgr_locs)
unfit_count = len(unfit_mgr_locs)
new_blocks = []
if value_is_extension_type:
# This code (ab-)uses the fact that sparse blocks contain only
# one item.
new_blocks.extend(
make_block(
values=value.copy(),
ndim=self.ndim,
placement=slice(mgr_loc, mgr_loc + 1),
)
for mgr_loc in unfit_mgr_locs
)
self._blknos[unfit_mgr_locs] = np.arange(unfit_count) + len(self.blocks)
self._blklocs[unfit_mgr_locs] = 0
else:
# unfit_val_locs contains BlockPlacement objects
unfit_val_items = unfit_val_locs[0].append(unfit_val_locs[1:])
new_blocks.append(
make_block(
values=value_getitem(unfit_val_items),
ndim=self.ndim,
placement=unfit_mgr_locs,
)
)
self._blknos[unfit_mgr_locs] = len(self.blocks)
self._blklocs[unfit_mgr_locs] = np.arange(unfit_count)
self.blocks += tuple(new_blocks)
# Newly created block's dtype may already be present.
self._known_consolidated = False
def insert(self, loc: int, item, value, allow_duplicates: bool = False):
"""
Insert item at selected position.
Parameters
----------
loc : int
item : hashable
value : array_like
allow_duplicates: bool
If False, trying to insert non-unique item will raise
"""
if not allow_duplicates and item in self.items:
# Should this be a different kind of error??
raise ValueError("cannot insert {}, already exists".format(item))
if not isinstance(loc, int):
raise TypeError("loc must be int")
# insert to the axis; this could possibly raise a TypeError
new_axis = self.items.insert(loc, item)
block = make_block(values=value, ndim=self.ndim, placement=slice(loc, loc + 1))
for blkno, count in _fast_count_smallints(self._blknos[loc:]):
blk = self.blocks[blkno]
if count == len(blk.mgr_locs):
blk.mgr_locs = blk.mgr_locs.add(1)
else:
new_mgr_locs = blk.mgr_locs.as_array.copy()
new_mgr_locs[new_mgr_locs >= loc] += 1
blk.mgr_locs = new_mgr_locs
if loc == self._blklocs.shape[0]:
# np.append is a lot faster, let's use it if we can.
self._blklocs = np.append(self._blklocs, 0)
self._blknos = np.append(self._blknos, len(self.blocks))
else:
self._blklocs = np.insert(self._blklocs, loc, 0)
self._blknos = np.insert(self._blknos, loc, len(self.blocks))
self.axes[0] = new_axis
self.blocks += (block,)
self._shape = None
self._known_consolidated = False
if len(self.blocks) > 100:
self._consolidate_inplace()
def reindex_axis(
self, new_index, axis, method=None, limit=None, fill_value=None, copy=True
):
"""
Conform block manager to new index.
"""
new_index = ensure_index(new_index)
new_index, indexer = self.axes[axis].reindex(
new_index, method=method, limit=limit
)
return self.reindex_indexer(
new_index, indexer, axis=axis, fill_value=fill_value, copy=copy
)
def reindex_indexer(
self, new_axis, indexer, axis, fill_value=None, allow_dups=False, copy=True
):
"""
Parameters
----------
new_axis : Index
indexer : ndarray of int64 or None
axis : int
fill_value : object
allow_dups : bool
pandas-indexer with -1's only.
"""
if indexer is None:
if new_axis is self.axes[axis] and not copy:
return self
result = self.copy(deep=copy)
result.axes = list(self.axes)
result.axes[axis] = new_axis
return result
self._consolidate_inplace()
# some axes don't allow reindexing with dups
if not allow_dups:
self.axes[axis]._can_reindex(indexer)
if axis >= self.ndim:
raise IndexError("Requested axis not found in manager")
if axis == 0:
new_blocks = self._slice_take_blocks_ax0(indexer, fill_tuple=(fill_value,))
else:
new_blocks = [
blk.take_nd(
indexer,
axis=axis,
fill_tuple=(
fill_value if fill_value is not None else blk.fill_value,
),
)
for blk in self.blocks
]
new_axes = list(self.axes)
new_axes[axis] = new_axis
return self.__class__(new_blocks, new_axes)
def _slice_take_blocks_ax0(self, slice_or_indexer, fill_tuple=None):
"""
Slice/take blocks along axis=0.
Overloaded for SingleBlock
Returns
-------
new_blocks : list of Block
"""
allow_fill = fill_tuple is not None
sl_type, slobj, sllen = _preprocess_slice_or_indexer(
slice_or_indexer, self.shape[0], allow_fill=allow_fill
)
if self._is_single_block:
blk = self.blocks[0]
if sl_type in ("slice", "mask"):
return [blk.getitem_block(slobj, new_mgr_locs=slice(0, sllen))]
elif not allow_fill or self.ndim == 1:
if allow_fill and fill_tuple[0] is None:
_, fill_value = maybe_promote(blk.dtype)
fill_tuple = (fill_value,)
return [
blk.take_nd(
slobj,
axis=0,
new_mgr_locs=slice(0, sllen),
fill_tuple=fill_tuple,
)
]
if sl_type in ("slice", "mask"):
blknos = self._blknos[slobj]
blklocs = self._blklocs[slobj]
else:
blknos = algos.take_1d(
self._blknos, slobj, fill_value=-1, allow_fill=allow_fill
)
blklocs = algos.take_1d(
self._blklocs, slobj, fill_value=-1, allow_fill=allow_fill
)
# When filling blknos, make sure blknos is updated before appending to
# blocks list, that way new blkno is exactly len(blocks).
#
# FIXME: mgr_groupby_blknos must return mgr_locs in ascending order,
# pytables serialization will break otherwise.
blocks = []
for blkno, mgr_locs in libinternals.get_blkno_placements(blknos, group=True):
if blkno == -1:
# If we've got here, fill_tuple was not None.
fill_value = fill_tuple[0]
blocks.append(
self._make_na_block(placement=mgr_locs, fill_value=fill_value)
)
else:
blk = self.blocks[blkno]
# Otherwise, slicing along items axis is necessary.
if not blk._can_consolidate:
# A non-consolidatable block, it's easy, because there's
# only one item and each mgr loc is a copy of that single
# item.
for mgr_loc in mgr_locs:
newblk = blk.copy(deep=True)
newblk.mgr_locs = slice(mgr_loc, mgr_loc + 1)
blocks.append(newblk)
else:
blocks.append(
blk.take_nd(
blklocs[mgr_locs.indexer],
axis=0,
new_mgr_locs=mgr_locs,
fill_tuple=None,
)
)
return blocks
def _make_na_block(self, placement, fill_value=None):
# TODO: infer dtypes other than float64 from fill_value
if fill_value is None:
fill_value = np.nan
block_shape = list(self.shape)
block_shape[0] = len(placement)
dtype, fill_value = infer_dtype_from_scalar(fill_value)
block_values = np.empty(block_shape, dtype=dtype)
block_values.fill(fill_value)
return make_block(block_values, placement=placement)
def take(self, indexer, axis=1, verify=True, convert=True):
"""
Take items along any axis.
"""
self._consolidate_inplace()
indexer = (
np.arange(indexer.start, indexer.stop, indexer.step, dtype="int64")
if isinstance(indexer, slice)
else | np.asanyarray(indexer, dtype="int64") | numpy.asanyarray |
#!/usr/bin/env python
"""
@author: <NAME>
Date Created: 9/24/2011
"""
from __future__ import division, print_function
from future.utils import iteritems, viewitems
from builtins import int, map, zip
import os
import sys
import copy
import fnmatch
import time
import argparse
import inspect
from joblib import Parallel, delayed
import shutil
import platform
import subprocess
from collections import OrderedDict
from . import vector_tools
from .helpers import random_float, overwrite_file, check_and_create_dir, _iteration_parameters
from .errors import EmptyImage, LenError, MissingRequirement, ropenError, ArrayShapeError, ArrayOffsetError, logger
from .veg_indices import veg_indices, VegIndicesEquations
try:
import deprecation
except:
logger.error('deprecation must be installed (pip install deprecation)')
raise ImportError
# GDAL
try:
from osgeo import gdal, osr
from osgeo.gdalconst import GA_ReadOnly, GA_Update
except:
logger.error(' GDAL Python must be installed')
raise ImportError
# NumPy
try:
import numpy as np
except:
logger.error(' NumPy must be installed')
raise ImportError
# Matplotlib
try:
import matplotlib as mpl
if (os.environ.get('DISPLAY', '') == '') or (platform.system() == 'Darwin'):
mpl.use('Agg')
try:
mpl.pyplot.switch_backend('agg')
except:
pass
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib import ticker, colors, colorbar
import matplotlib.cm as cm
from mpl_toolkits.axes_grid1 import make_axes_locatable
except:
logger.warning(' Matplotlib must be installed for plotting')
# Scikit-learn
try:
from sklearn.preprocessing import StandardScaler
except:
logger.warning(' Scikit-learn must be installed for pixel_stats z-scores.')
# Scikit-image
try:
from skimage import exposure
except:
logger.warning(' Scikit-image must be installed for image color balancing.')
# SciPy
try:
from scipy.stats import mode as sci_mode
from scipy.ndimage.measurements import label as lab_img
except:
logger.error(' SciPy must be installed')
raise ImportError
# Pandas
try:
import pandas as pd
except:
logger.warning(' Pandas must be installed to parse metadata')
# OpenCV
try:
import cv2
except:
logger.warning(' OpenCV must be installed to use stat functions.')
# BeautifulSoup4
try:
from bs4 import BeautifulSoup
except:
logger.warning(' BeautifulSoup4 must be installed to parse metadata')
gdal.UseExceptions()
gdal.PushErrorHandler('CPLQuietErrorHandler')
gdal.SetCacheMax(int(2.0**30.0))
DRIVER_DICT = {'.bin': 'ENVI',
'.bsq': 'ENVI',
'.dat': 'ENVI',
'.ecw': 'ECW',
'.img': 'HFA',
'.hdf': 'HDF4',
'.hdf4': 'HDF4',
'.hdf5': 'HDF5',
'.h5': 'HDF5',
'.hdr': 'ENVI',
'.jp2': 'JPEG2000',
'.kea': 'KEA',
'.mem': 'MEM',
'.nc': 'netCDF',
'.ntf': 'NITF',
'.pix': 'PCRaster',
'.hgt': 'SRTMHGT',
'.sid': 'MrSID',
'.tif': 'GTiff',
'.tiff': 'GTiff',
'.til': 'TIL',
'.vrt': 'VRT'}
FORMAT_DICT = dict((v, k) for k, v in iteritems(DRIVER_DICT))
STORAGE_DICT = {'byte': 'uint8',
'int16': 'int16',
'uint16': 'uint16',
'int32': 'int32',
'uint32': 'uint32',
'int64': 'int64',
'uint64': 'uint64',
'float32': 'float32',
'float64': 'float64'}
STORAGE_DICT_r = dict((v, k) for k, v in iteritems(STORAGE_DICT))
STORAGE_DICT_GDAL = {'unknown': gdal.GDT_Unknown,
'byte': gdal.GDT_Byte,
'uint16': gdal.GDT_UInt16,
'int16': gdal.GDT_Int16,
'uint32': gdal.GDT_UInt32,
'int32': gdal.GDT_Int32,
'float32': gdal.GDT_Float32,
'float64': gdal.GDT_Float64,
'cint16': gdal.GDT_CInt16,
'cint32': gdal.GDT_CInt32,
'cfloat32': gdal.GDT_CFloat32,
'cfloat64': gdal.GDT_CFloat64}
STORAGE_DICT_NUMPY = {'byte': np.uint8,
'int16': np.int16,
'uint16': np.uint16,
'int32': np.int32,
'uint32': np.uint32,
'int64': np.int64,
'uint64': np.uint64,
'float32': np.float32,
'float64': np.float64}
RESAMPLE_DICT = dict(average=gdal.GRA_Average,
bilinear=gdal.GRA_Bilinear,
nearest=gdal.GRA_NearestNeighbour,
cubic=gdal.GRA_Cubic)
PANSHARPEN_WEIGHTS = dict(oli_tirs=dict(bw=0.2,
gw=1.0,
rw=1.0,
iw=0.1),
etm=dict(bw=0.1,
gw=1.0,
rw=1.0,
iw=1.0))
GOOGLE_CLOUD_SENSORS = dict(oli_tirs='LC08',
etm='LE07',
tm='LT05')
SENSOR_DICT = {'landsat_tm': 'tm',
'lt4': 'tm',
'lt5': 'tm',
'tm': 'tm',
'landsat_etm_slc_off': 'etm',
'landsat_etm': 'etm',
'landsat_et': 'etm',
'landsat_etm_slc_on': 'etm',
'etm': 'etm',
'le7': 'etm',
'lt7': 'etm',
'landsat_oli_tirs': 'oli_tirs',
'oli_tirs': 'oli_tirs',
'oli': 'oli_tirs',
'tirs': 'oli_tirs',
'lc8': 'oli_tirs',
'lt8': 'oli_tirs'}
def create_memory_raster(image_info,
rows,
cols,
left,
top):
"""
Creates an in-memory raster object
Args:
image_info (object)
rows (int)
cols (int)
left (float)
top (float)
Returns:
Datasource object
"""
# Create a raster to rasterize into.
target_ds = gdal.GetDriverByName('MEM').Create('', cols, rows, 1, gdal.GDT_Byte)
target_ds.SetGeoTransform([left, image_info.cellY, 0.0, top, 0.0, -image_info.cellY])
target_ds.SetProjection(image_info.projection)
return target_ds
def nd_to_rgb(array2reshape):
"""
Reshapes an array from nd layout to RGB
"""
if len(array2reshape.shape) != 3:
logger.error(' The array must be 3 dimensions.')
raise LenError
if array2reshape.shape[0] != 3:
logger.error(' The array must be 3 bands.')
raise ArrayShapeError
return np.ascontiguousarray(array2reshape.transpose(1, 2, 0))
def rgb_to_nd(array2reshape):
"""
Reshapes an array RGB layout to nd layout
"""
if len(array2reshape.shape) != 3:
logger.error(' The array must be 3 dimensions.')
raise LenError
if array2reshape.shape[2] != 3:
logger.error(' The array must be 3 bands.')
raise ArrayShapeError
return np.ascontiguousarray(array2reshape.transpose(2, 0, 1))
def nd_to_columns(array2reshape, layers, rows, columns):
"""
Reshapes an array from nd layout to [samples (rows*columns) x dimensions]
"""
if layers == 1:
return array2reshape.flatten()[:, np.newaxis]
else:
return array2reshape.reshape(layers, rows, columns).transpose(1, 2, 0).reshape(rows*columns, layers)
def columns_to_nd(array2reshape, layers, rows, columns):
"""
Reshapes an array from columns layout to [n layers x rows x columns]
"""
if layers == 1:
return array2reshape.reshape(columns, rows).T
else:
return array2reshape.T.reshape(layers, rows, columns)
class ReadWrite(object):
def read(self,
bands2open=1,
i=0,
j=0,
rows=-1,
cols=-1,
d_type=None,
compute_index='none',
sensor='Landsat',
sort_bands2open=True,
predictions=False,
y=0.,
x=0.,
check_x=None,
check_y=None,
as_xarray=False,
xarray_dims=None,
xarray_coords=None,
**viargs):
"""
Reads a raster as an array
Args:
bands2open (Optional[int or int list or dict]: Band position to open, list of bands to open, or a
dictionary of name-band pairs. Default is 1.
Examples:
bands2open = 1 (open band 1)
bands2open = [1,2,3] (open first three bands)
bands2open = [4,3,2] (open bands in a specific order)
*When opening bands in a specific order, be sure to set ``sort_bands2open`` as ``False``.
bands2open = -1 (open all bands)
bands2open = {'blue': 1, 'green': 2, 'nir': 4} (open bands 1, 2, and 4)
i (Optional[int]): Starting row position. Default is 0, or first row.
j (Optional[int]): Starting column position. Default is 0, or first column.
rows (Optional[int]): Number of rows to extract. Default is -1, or all rows.
cols (Optional[int]): Number of columns to extract. Default is -1, or all columns.
d_type (Optional[str]): Type of array to return. Choices are ['byte', 'int16', 'uint16',
'int32', 'uint32', 'int64', 'uint64', 'float32', 'float64']. Default is None, or gathered
from <i_info>.
compute_index (Optional[str]): A spectral index to compute. Default is 'none'.
sensor (Optional[str]): The input sensor type (used with ``compute_index``). Default is 'Landsat'.
sort_bands2open (Optional[bool]): Whether to sort ``bands2open``. Default is True.
predictions (Optional[bool]): Whether to return reshaped array for Scikit-learn formatted
predictions (i.e., samples x dimensions).
y (Optional[float]): A y index coordinate (latitude, in map units). Default is 0.
If greater than 0, overrides ``i``.
x (Optional[float]): A x index coordinate (longitude, in map units). Default is 0.
If greater than 0, overrides ``j``.
check_x (Optional[float]): Check the x offset against ``check_x``. Default is None.
check_y (Optional[float]): Check the y offset against ``check_y``. Default is None.
as_xarray (Optional[bool]): Whether to open the array as a xarray, otherwise as a Numpy array.
Default is False.
xarray_dims (Optional[list]): Dimension names for xarray. Default is None.
xarray_coords (Optional[list]): Coordinates for xarray. Default is None.
viargs (Optional[dict]): Keyword arguments passed to `veg_indices`. Default is None.
Attributes:
array (ndarray)
Returns:
``ndarray``, where shape is (rows x cols) if 1 band or (bands x rows x cols) if more than 1 band.
Examples:
>>> import mpglue as gl
>>>
>>> i_info = mp.ropen('image.tif')
>>> i_info = mp.open('image.tif')
>>>
>>> # Open 1 band.
>>> array = i_info.read(bands2open=1)
>>>
>>> # Open multiple bands.
>>> array = i_info.read(bands2open=[1, 2, 3])
>>> band_1 = array[0]
>>>
>>> # Open as a dictionary of arrays.
>>> bands = i_info.read(bands2open={'blue': 1, 'red': 2, 'nir': 4})
>>> red = bands['red']
>>>
>>> # Index an image by pixel positions.
>>> array = i_info.read(i=1000, j=4000, rows=500, cols=500)
>>>
>>> # Index an image by map coordinates.
>>> array = i_info.read(y=1200000., x=4230000., rows=500, cols=500)
"""
self.i = i
self.j = j
# `self.rows` and `self.cols` are the
# image dimension info, so don't overwrite.
self.rrows = rows
self.ccols = cols
self.sort_bands2open = sort_bands2open
self.as_xarray = as_xarray
self.xarray_dims = xarray_dims
self.xarray_coords = xarray_coords
if isinstance(bands2open, dict):
if isinstance(d_type, str):
self.d_type = STORAGE_DICT_NUMPY[d_type]
else:
self.d_type = STORAGE_DICT_NUMPY[self.storage.lower()]
else:
if isinstance(d_type, str):
self.d_type = STORAGE_DICT[d_type]
else:
self.d_type = STORAGE_DICT[self.storage.lower()]
if compute_index != 'none':
bh = veg_indices.BandHandler(sensor)
bh.get_band_order()
# Overwrite the bands to open
bands2open = bh.get_band_positions(bh.wavelength_lists[compute_index.upper()])
self.d_type = 'float32'
if self.rrows == -1:
self.rrows = self.rows
else:
if self.rrows > self.rows:
self.rrows = self.rows
logger.warning(' The requested rows cannot be larger than the image rows.')
if self.ccols == -1:
self.ccols = self.cols
else:
if self.ccols > self.cols:
self.ccols = self.cols
logger.warning(' The requested columns cannot be larger than the image columns.')
# Index the image by x, y coordinates (in map units).
if (abs(y) > 0) and (abs(x) > 0):
__, __, self.j, self.i = vector_tools.get_xy_offsets(self,
x=x,
y=y,
check_position=False)
if isinstance(check_x, float) and isinstance(check_y, float):
__, __, x_offset, y_offset = vector_tools.get_xy_offsets(self,
x=check_x,
y=check_y,
check_position=False)
self.i += y_offset
self.j += x_offset
#################
# Bounds checking
#################
# Row indices
if self.i < 0:
self.i = 0
if self.i >= self.rows:
self.i = self.rows - 1
# Number of rows
self.rrows = n_rows_cols(self.i, self.rrows, self.rows)
# Column indices
if self.j < 0:
self.j = 0
if self.j >= self.cols:
self.j = self.cols - 1
# Number of columns
self.ccols = n_rows_cols(self.j, self.ccols, self.cols)
#################
# format_dict = {'byte': 'B', 'int16': 'i', 'uint16': 'I', 'float32': 'f', 'float64': 'd'}
# values = struct.unpack('%d%s' % ((rows * cols * len(bands2open)), format_dict[i_info.storage.lower()]),
# i_info.datasource.ReadRaster(yoff=i, xoff=j, xsize=cols, ysize=rows, band_list=bands2open))
if hasattr(self, 'band'):
self.array = self.band.ReadAsArray(self.j,
self.i,
self.ccols,
self.rrows).astype(self.d_type)
self.array_shape = [1, self.rrows, self.ccols]
if predictions:
self._norm2predictions(1)
else:
# Check ``bands2open`` type.
bands2open = self._check_band_list(bands2open)
# Open the array.
self._open_array(bands2open)
if predictions:
self._norm2predictions(len(bands2open))
if compute_index != 'none':
vie = VegIndicesEquations(self.array,
chunk_size=-1)
# exec('self.{} = vie.compute(compute_index.upper())'.format(compute_index.lower()))
self.array = vie.compute(compute_index.upper(),
**viargs)
self.array[np.isnan(self.array) | np.isinf(self.array)] = 0
if self.as_xarray:
self._as_xarray()
return self.array
def _as_xarray(self):
"""
Transforms the NumPy array to a xarray object
"""
try:
import xarray as xr
except:
logger.error(' Cannot import xarray')
raise ImportError
if len(self.array.shape) == 3:
n_bands = self.array.shape[0]
if not self.xarray_coords:
self.xarray_coords = dict(z=('B' + ',B'.join(list(map(str, range(1, n_bands+1))))).split(','))
if not self.xarray_dims:
self.xarray_dims = ['z', 'y', 'x']
else:
if not self.xarray_dims:
self.xarray_dims = ['y', 'x']
self.array = xr.DataArray(self.array,
coords=self.xarray_coords,
dims=self.xarray_dims)
def _open_array(self, bands2open):
"""
Opens image bands into a ndarray.
Args:
bands2open (int or list)
"""
# Open the image as a dictionary of arrays.
if isinstance(bands2open, dict):
self.array = dict()
for band_name, band_position in viewitems(bands2open):
if self.hdf_file:
self.array[band_name] = self.hdf_datasources[band_position-1].ReadAsArray(self.j,
self.i,
self.ccols,
self.rrows).astype(self.d_type)
else:
self.array[band_name] = self.datasource.GetRasterBand(band_position).ReadAsArray(self.j,
self.i,
self.ccols,
self.rrows).astype(self.d_type)
# Open the image as an array.
else:
if self.hdf_file:
self.array = np.asarray([self.hdf_datasources[band-1].ReadAsArray(self.j,
self.i,
self.ccols,
self.rrows) for band in bands2open],
dtype=self.d_type)
else:
self.array = list()
for band in bands2open:
arr = self.datasource.GetRasterBand(band).ReadAsArray(self.j,
self.i,
self.ccols,
self.rrows)
if not isinstance(arr, np.ndarray):
logger.info(type(arr))
logger.error(' Band {:d} is not a NumPy array.'.format(band))
raise TypeError
self.array.append(arr)
self.array = np.asarray(self.array, dtype=self.d_type)
self.array = self._reshape(self.array, bands2open)
def _predictions2norm(self, n_bands):
"""
Reshapes an array from predictions to nd array
Args:
n_bands (int)
"""
self.array = columns_to_nd(self.array, n_bands, self.rrows, self.ccols)
self.array_shape = [n_bands, self.rrows, self.ccols]
def _norm2predictions(self, n_bands):
"""
Reshapes an array from normal layout to Scikit-learn compatible shape (i.e., samples x dimensions)
Args:
n_bands (int)
"""
self.array = nd_to_columns(self.array, n_bands, self.rrows, self.ccols)
self.array_shape = [1, self.rrows*self.ccols, n_bands]
def _reshape(self, array2reshape, band_list):
"""
Reshapes an array into [rows X columns] or [dimensions X rows X columns].
Args:
array2reshape (ndarray)
band_list (list)
"""
if len(band_list) == 1:
array2reshape = array2reshape.reshape(self.rrows, self.ccols)
else:
array2reshape = array2reshape.reshape(len(band_list), self.rrows, self.ccols)
self.array_shape = [len(band_list), self.rrows, self.ccols]
return array2reshape
def _check_band_list(self, bands2open):
"""
Checks whether a band list is valid.
Args:
bands2open (dict, list, or int)
"""
if isinstance(bands2open, dict):
return bands2open
elif isinstance(bands2open, list):
if not bands2open:
logger.error(' A band list must be declared.\n')
raise LenError
if 0 in bands2open:
logger.error(' A band list cannot have any zeros. GDAL indexes starting at 1.\n')
raise ValueError
if not self.hdf_file:
if max(bands2open) > self.bands:
logger.error(' The requested band position cannot be greater than the image bands.\n')
raise ValueError
elif isinstance(bands2open, int):
if not self.hdf_file:
if bands2open > self.bands:
logger.error(' The requested band position cannot be greater than the image bands.\n')
raise ValueError
if bands2open == -1:
bands2open = list(range(1, self.bands+1))
else:
bands2open = [bands2open]
else:
logger.error(' The `bands2open` parameter must be a dict, list, or int.\n')
raise TypeError
if self.sort_bands2open and not isinstance(bands2open, dict):
bands2open = sorted(bands2open)
return bands2open
def write2raster(self,
out_name,
write_which='array',
o_info=None,
x=0,
y=0,
out_rst=None,
write2bands=None,
compress='deflate',
tile=False,
close_band=True,
flush_final=False,
write_chunks=False,
**kwargs):
"""
Writes an array to file.
Args:
out_name (str): The output image name.
write_which (Optional[str]): The array type to write. Choices are ['array', '<spectral indices>'].
Default is 'array'.
o_info (Optional[object]): Output image information, instance of ``ropen``.
Needed if <out_rst> not given. Default is None.
x (Optional[int]): Column starting position. Default is 0.
y (Optional[int]): Row starting position. Default is 0.
out_rst (Optional[object]): GDAL object to right to, otherwise created. Default is None.
write2bands (Optional[int or int list]): Band positions to write to, otherwise takes the order of the input
array dimensions. Default is None.
compress (Optional[str]): Needed if <out_rst> not given. Default is 'deflate'.
tile (Optional[bool]): Needed if <out_rst> not given. Default is False.
close_band (Optional[bool]): Whether to flush the band cache. Default is True.
flush_final (Optional[bool]): Whether to flush the raster cache. Default is False.
write_chunks (Optional[bool]): Whether to write to file in <write_chunks> chunks. Default is False.
Returns:
None, writes <out_name>.
"""
if isinstance(write_which, str):
if write_which == 'ndvi':
out_arr = self.ndvi
elif write_which == 'evi2':
out_arr = self.evi2
elif write_which == 'pca':
out_arr = self.pca_components
else:
out_arr = self.array
elif isinstance(write_which, np.ndarray):
out_arr = write_which
d_name, f_name = os.path.split(out_name)
if not os.path.isdir(d_name):
os.makedirs(d_name)
array_shape = out_arr.shape
if len(array_shape) == 2:
out_rows, out_cols = out_arr.shape
out_dims = 1
else:
out_dims, out_rows, out_cols = out_arr.shape
new_file = False
if not out_rst:
new_file = True
if kwargs:
try:
o_info.storage = kwargs['storage']
except:
pass
try:
o_info.bands = kwargs['bands']
except:
o_info.bands = out_dims
o_info.rows = out_rows
o_info.cols = out_cols
out_rst = create_raster(out_name, o_info, compress=compress, tile=tile)
# Specify a band to write to.
if isinstance(write2bands, int) or isinstance(write2bands, list):
if isinstance(write2bands, int):
write2bands = [write2bands]
for n_band in write2bands:
out_rst.get_band(n_band)
if write_chunks:
out_rst.get_chunk_size()
for i in range(0, out_rst.rows, out_rst.chunk_size):
n_rows = n_rows_cols(i, out_rst.chunk_size, out_rst.rows)
for j in range(0, out_rst.cols, out_rst.chunk_size):
n_cols = n_rows_cols(j, out_rst.chunk_size, out_rst.cols)
out_rst.write_array(out_arr[i:i+n_rows, j:j+n_cols], i=i, j=j)
else:
out_rst.write_array(out_arr, i=y, j=x)
if close_band:
out_rst.close_band()
# Write in order of the 3rd array dimension.
else:
arr_shape = out_arr.shape
if len(arr_shape) > 2:
out_bands = arr_shape[0]
for n_band in range(1, out_bands+1):
out_rst.write_array(out_arr[n_band-1], i=y, j=x, band=n_band)
if close_band:
out_rst.close_band()
else:
out_rst.write_array(out_arr, i=y, j=x, band=1)
if close_band:
out_rst.close_band()
# close the dataset if it was created or prompted by <flush_final>
if flush_final or new_file:
out_rst.close_file()
class DataChecks(object):
"""
A class for spatial and cloud checks
"""
def contains(self, iinfo):
"""
Tests whether the open image contains another image.
Args:
iinfo (object): An image instance of ``ropen`` to test.
"""
if (iinfo.left > self.left) and (iinfo.right < self.right) \
and (iinfo.top < self.top) and (iinfo.bottom > self.bottom):
return True
else:
return False
def contains_value(self, value, array=None):
"""
Tests whether a value is within the array
Args:
value (int): The value to check.
array (Optional[ndarray]): An array to check. Otherwise, check self.array. Default is None.
Returns:
Whether the array contains `value` (bool)
"""
if not isinstance(array, np.ndarray) and not hasattr(self, 'array'):
logger.exception(' No array to check')
if hasattr(self, 'array'):
return np.in1d(np.array([value]), self.array)[0]
else:
return np.in1d(np.array([value]), array)[0]
def intersects(self, iinfo):
"""
Tests whether the open image intersects another image.
Args:
iinfo (object): An image instance of ``ropen`` to test.
"""
image_intersects = False
# At least within the longitude frame.
if ((iinfo.left > self.left) and (iinfo.left < self.right)) or \
((iinfo.right < self.right) and (iinfo.right > self.left)):
# Also within the latitude frame.
if ((iinfo.bottom > self.bottom) and (iinfo.bottom < self.top)) or \
((iinfo.top < self.top) and (iinfo.top > self.bottom)):
image_intersects = True
return image_intersects
def within(self, iinfo):
"""
Tests whether the open image falls within another image.
Args:
iinfo (object or dict): An image instance of ``ropen`` to test.
"""
if isinstance(iinfo, ropen):
iinfo = self._info2dict(iinfo)
if (self.left > iinfo['left']) and (self.right < iinfo['right']) \
and (self.top < iinfo['top']) and (self.bottom > iinfo['bottom']):
return True
else:
return False
def outside(self, iinfo):
"""
Tests whether the open image falls outside coordinates
Args:
iinfo (object or dict): An image instance of ``ropen`` to test.
"""
if isinstance(iinfo, dict):
iif = ImageInfo()
for k, v in viewitems(iinfo):
setattr(iif, k, v)
iinfo = iif.copy()
has_extent = False
if hasattr(self, 'left') and hasattr(self, 'right'):
has_extent = True
if self.left > iinfo.right:
return True
if self.right < iinfo.left:
return True
if hasattr(self, 'top') and hasattr(self, 'bottom'):
has_extent = True
if self.top < iinfo.bottom:
return True
if self.bottom > iinfo.top:
return True
if not has_extent:
logger.error('The `iinfo` parameter did not contain extent information.')
return False
def check_clouds(self, cloud_band=7, clear_value=0, background_value=255):
"""
Checks cloud information.
Args:
cloud_band (Optional[int]): The cloud band position. Default is 7.
clear_value (Optional[int]): The clear pixel value. Default is 0.
background_value (Optional[int]): The background pixel value. Default is 255.
"""
cloud_array = self.read(bands2open=cloud_band)
clear_pixels = (cloud_array == clear_value).sum()
non_background_pixels = (cloud_array != background_value).sum()
self.clear_percent = (float(clear_pixels) / float(non_background_pixels)) * 100.
class RegisterDriver(object):
"""
Class handler for driver registration
Args:
out_name (str): The file to register.
in_memory (bool): Whether to create the file in memory.
Attributes:
out_name (str)
driver (object)
file_format (str)
"""
def __init__(self, out_name, in_memory):
gdal.AllRegister()
if not in_memory:
self._get_file_format(out_name)
self.driver = gdal.GetDriverByName(self.file_format)
else:
self.driver = gdal.GetDriverByName('MEM')
self.driver.Register
def _get_file_format(self, image_name):
d_name, f_name = os.path.split(image_name)
__, file_extension = os.path.splitext(f_name)
self.hdr_file = False
if os.path.isfile(os.path.join(d_name, '{}.hdr'.format(f_name))):
file_extension = '.hdr'
self.hdr_file = True
self.file_format = self._get_driver_name(file_extension)
@staticmethod
def _get_driver_name(file_extension):
if file_extension.lower() not in DRIVER_DICT:
logger.error('{} is not an image, or is not a supported raster format.'.format(file_extension))
raise TypeError
else:
return DRIVER_DICT[file_extension.lower()]
class CreateDriver(RegisterDriver):
"""
Class handler for driver creation
Args:
out_name (str): The output file name.
out_rows (int): The output number of rows.
out_cols (int): The output number of columns.
n_bands (int): The output number of bands.
storage_type (str): The output storage type.
in_memory (bool): Whether to create the file in memory.
overwrite (bool): Whether to overwrite an existing file.
parameters (str list): A list of GDAL creation parameters.
Attributes:
datasource (object)
"""
def __init__(self, out_name, out_rows, out_cols, n_bands, storage_type, in_memory, overwrite, parameters):
RegisterDriver.__init__(self, out_name, in_memory)
if overwrite and not in_memory:
if os.path.isfile(out_name):
os.remove(out_name)
# Create the output driver.
if in_memory:
self.datasource = self.driver.Create('', out_cols, out_rows, n_bands, storage_type)
else:
self.datasource = self.driver.Create(out_name, out_cols, out_rows, n_bands, storage_type, parameters)
class DatasourceInfo(object):
def datasource_info(self):
if self.datasource is None:
if hasattr(self, 'file_name'):
logger.error(' {} appears to be empty.'.format(self.file_name))
raise EmptyImage
else:
logger.error(' The datasource appears to be empty.')
raise EmptyImage
try:
self.meta_dict = self.datasource.GetMetadata_Dict()
except:
logger.error(gdal.GetLastErrorMsg())
self.meta_dict = 'none'
try:
self.storage = gdal.GetDataTypeName(self.datasource.GetRasterBand(1).DataType)
except:
self.storage = 'none'
if hasattr(self, 'file_name'):
self.directory, self.filename = os.path.split(self.file_name)
if self.hdf_file:
self.bands = len(self.hdf_datasources)
else:
self.bands = self.datasource.RasterCount
# Initiate the data checks object.
# DataChecks.__init__(self)
# Check if any of the bands are corrupted.
if hasattr(self, 'check_corrupted'):
if self.check_corrupted:
self.check_corrupted_bands()
self.projection = self.datasource.GetProjection()
self.sp_ref = osr.SpatialReference()
self.sp_ref.ImportFromWkt(self.projection)
self.proj4 = self.sp_ref.ExportToProj4()
self.color_interpretation = self.datasource.GetRasterBand(1).GetRasterColorInterpretation()
if 'PROJ' in self.projection[:4]:
if self.sp_ref.GetAttrValue('PROJCS|AUTHORITY', 1):
self.epsg = int(self.sp_ref.GetAttrValue('PROJCS|AUTHORITY', 1))
else:
self.epsg = 'none'
elif 'GEOG' in self.projection[:4]:
try:
self.epsg = int(self.sp_ref.GetAttrValue('GEOGCS|AUTHORITY', 1))
except:
logger.error(gdal.GetLastErrorMsg())
if 'WGS' in self.sp_ref.GetAttrValue('GEOGCS') and '84' in self.sp_ref.GetAttrValue('GEOGCS'):
self.epsg = 4326 # WGS 1984
else:
self.epsg = 'none'
else:
self.epsg = 'none'
# Set georeference and projection.
self.geo_transform = self.datasource.GetGeoTransform()
# adfGeoTransform[0] :: top left x
# adfGeoTransform[1] :: w-e pixel resolution
# adfGeoTransform[2] :: rotation, 0 if image is north up
# adfGeoTransform[3] :: top left y
# adfGeoTransform[4] :: rotation, 0 if image is north up
# adfGeoTransform[5] :: n-s pixel resolution
self.left = self.geo_transform[0] # get left extent
self.top = self.geo_transform[3] # get top extent
self.cellY = self.geo_transform[1] # pixel height
self.cellX = self.geo_transform[5] # pixel width
self.rotation1 = self.geo_transform[2]
self.rotation2 = self.geo_transform[4]
self.rows = self.datasource.RasterYSize # get number of rows
self.cols = self.datasource.RasterXSize # get number of columns
self.center_x = self.left + ((self.cols / 2) * self.cellY)
self.center_y = self.top - ((self.rows / 2) * self.cellY)
if not self.projection:
self._get_hdr_info()
self.shape = dict(bands=self.bands,
rows='{:,d}'.format(self.rows),
columns='{:,d}'.format(self.cols),
pixels='{:,d}'.format(self.bands * self.rows * self.cols),
row_units='{:,.2f}'.format(self.rows * self.cellY),
col_units='{:,.2f}'.format(self.cols * self.cellY))
self.right = self.left + (self.cols * abs(self.cellY)) # get right extent
self.bottom = self.top - (self.rows * abs(self.cellX)) # get bottom extent
self.image_envelope = [self.left, self.right, self.bottom, self.top]
self.extent = dict(left=self.left,
right=self.right,
bottom=self.bottom,
top=self.top)
self.name = self.datasource.GetDriver().ShortName
try:
self.block_x = self.datasource.GetRasterBand(1).GetBlockSize()[0]
self.block_y = self.datasource.GetRasterBand(1).GetBlockSize()[1]
except:
logger.error(gdal.GetLastErrorMsg())
self.block_x = 'none'
self.block_y = 'none'
class FileManager(DataChecks, RegisterDriver, DatasourceInfo):
"""
Class for file handling
Args:
open2read (bool)
hdf_band (int)
check_corrupted (bool)
Attributes:
band (GDAL object)
datasource (GDAL object)
chunk_size (int)
Methods:
build_overviews
get_band
write_array
close_band
close_file
close_all
get_chunk_size
remove_overviews
Returns:
None
"""
def get_image_info(self, open2read, hdf_band, check_corrupted):
self.hdf_file = False
self.check_corrupted = check_corrupted
# HDF subdatasets given as files in `vrt_builder`.
# Find the file name in the subdataset name.
if '.hdf' in self.file_name.lower() and not self.file_name.lower().endswith('.hdf'):
stris = [stri for stri, strt in enumerate(self.file_name) if strt == ':']
self.file_name = self.file_name[stris[1]+2:stris[2]-1]
if not os.path.isfile(self.file_name):
raise IOError('\n{} does not exist.\n'.format(self.file_name))
self._get_file_format(self.file_name)
# Open input raster.
try:
if open2read:
self.datasource = gdal.Open(self.file_name, GA_ReadOnly)
self.image_mode = 'read only'
else:
self.datasource = gdal.Open(self.file_name, GA_Update)
self.image_mode = 'update'
self.file_open = True
except:
logger.error(gdal.GetLastErrorMsg())
logger.warning('\nCould not open {}.\n'.format(self.file_name))
return
if self.file_name.lower().endswith('.hdf'):
self.hdf_file = True
if self.datasource is None:
logger.warning('\n1) {} appears to be empty.\n'.format(self.file_name))
return
# self.hdf_layers = self.datasource.GetSubDatasets()
self.hdf_layers = self.datasource.GetMetadata('SUBDATASETS')
self.hdf_key_list = [k for k in list(self.hdf_layers) if '_NAME' in k]
self.hdf_name_dict = dict()
for hdf_str in self.hdf_key_list:
str_digit = hdf_str[hdf_str.find('_')+1:len(hdf_str)-hdf_str[::-1].find('_')-1]
if len(str_digit) == 1:
self.hdf_name_dict[hdf_str.replace(str_digit, '0{}'.format(str_digit))] = self.hdf_layers[hdf_str]
else:
self.hdf_name_dict[hdf_str] = self.hdf_layers[hdf_str]
self.hdf_name_list = [self.hdf_name_dict[k] for k in sorted(self.hdf_name_dict)]
# self.hdf_name_list = [self.hdf_layers[k] for k in list(self.hdf_layers) if '_NAME' in k]
self.hdf_datasources = [self._open_dataset(hdf_name, True) for hdf_name in self.hdf_name_list]
self.datasource = self.hdf_datasources[hdf_band-1]
# self.datasource = gdal.Open(self.datasource.GetSubDatasets()[hdf_band - 1][0], GA_ReadOnly)
self.datasource_info()
@staticmethod
def _open_dataset(image_name, open2read):
"""
Opens the image dataset.
Args:
image_name (str): The full path, name, and extension of the image to open.
open2read (bool): Whether to open the image in 'Read Only' mode.
Returns:
The datasource pointer.
"""
if open2read:
source_dataset = gdal.Open(image_name, GA_ReadOnly)
else:
source_dataset = gdal.Open(image_name, GA_Update)
if source_dataset is None:
logger.error('{} appears to be empty'.format(image_name))
raise EmptyImage
return source_dataset
def _get_hdr_info(self):
hdr_file = '{}.hdr'.format(self.file_name)
if not os.path.isfile(hdr_file):
return
with open(hdr_file, 'rb') as hdr_open:
for line in hdr_open:
if line.startswith('samples'):
line_parsed = line.replace('samples = ', '')
self.rows = int(line_parsed)
elif line.startswith('lines'):
line_parsed = line.replace('lines = ', '')
self.cols = int(line_parsed)
elif line.startswith('map info'):
line_parsed = line.replace('map info = {', '')
line_parsed = line_parsed.replace('}', '').split(',')
self.left = float(line_parsed[3].strip())
self.top = float(line_parsed[4].strip())
self.cellY = float(line_parsed[5].strip())
self.cellX = -self.cellY
elif line.startswith('coordinate'):
self.projection = line.replace('coordinate system string = {', '')
self.projection = self.projection.replace('}\n', '')
def build_overviews(self, sampling_method='nearest', levels=None, be_quiet=False):
"""
Builds image overviews.
Args:
sampling_method (Optional[str]): The sampling method to use. Default is 'nearest'.
levels (Optional[int list]): The levels to build. Default is [2, 4, 8, 16].
be_quiet (Optional[bool]): Whether to be quiet and do not print progress. Default is False.
"""
if not levels:
levels = [2, 4, 8, 16]
else:
levels = list(map(int, levels))
try:
if not be_quiet:
logger.info(' Building pyramid overviews ...')
self.datasource.BuildOverviews(sampling_method.upper(), overviewlist=levels)
except:
logger.error(gdal.GetLastErrorMsg())
raise ValueError('Failed to build overviews.')
def get_band(self, band_position):
"""
Loads a raster band pointer.
Args:
band_position (int): The band position to load.
"""
if not isinstance(band_position, int) or band_position < 1:
logger.error('The band position must be an integer > 0.')
raise ValueError
try:
self.band = self.datasource.GetRasterBand(band_position)
self.band_open = True
except:
logger.error(gdal.GetLastErrorMsg())
raise ValueError('Failed to load the band.')
def get_stats(self, band_position):
"""
Get band statistics.
Args:
band_position (int)
Returns:
Minimum, Maximum, Mean, Standard deviation
"""
self.get_band(band_position)
return self.band.GetStatistics(1, 1)
def check_corrupted_bands(self):
"""Checks whether corrupted bands exist."""
self.corrupted_bands = list()
for band in range(1, self.bands+1):
try:
self.datasource.GetRasterBand(band).Checksum()
if gdal.GetLastErrorType() != 0:
logger.info('\nBand {:d} of {} appears to be corrupted.\n'.format(band, self.file_name))
self.corrupted_bands.append(str(band))
except:
logger.error(gdal.GetLastErrorMsg())
logger.info('\nBand {:d} of {} appears to be corrupted.\n'.format(band, self.file_name))
self.corrupted_bands.append(str(band))
def write_array(self, array2write, i=0, j=0, band=None):
"""
Writes array to the loaded band object (``self.band`` of ``get_band``).
Args:
array2write (ndarray): The array to write.
i (Optional[int]): The starting row position to write to. Default is 0.
j (Optional[int]): The starting column position to write to. Default is 0.
band (Optional[int]): The band position to write to. Default is None. If None, an object of
``get_band`` must be open.
"""
if not isinstance(array2write, np.ndarray):
logger.error(' The array must be an ndarray.')
raise ValueError
if not isinstance(i, int) or (i < 0):
logger.error(' The row index must be a positive integer.')
raise ValueError
if not isinstance(j, int) or (j < 0):
logger.error(' The column index must be a positive integer.')
raise ValueError
if isinstance(band, int):
self.get_band(band_position=band)
try:
self.band.WriteArray(array2write, j, i)
except:
logger.error(gdal.GetLastErrorMsg())
if (array2write.shape[0] > self.rows) or (array2write.shape[1] > self.cols):
logger.error('\nThe array is larger than the file size.\n')
raise ArrayShapeError
elif (i + array2write.shape[0]) > self.rows:
logger.error('\nThe starting row position + the array rows spills over.\n')
raise ArrayOffsetError
elif (j + array2write.shape[j]) > self.cols:
logger.error('\nThe starting column position + the array columns spills over.\n')
raise ArrayOffsetError
else:
if not hasattr(self, 'band'):
logger.error('\nThe band must be set either with `get_band` or `write_array`.\n')
raise AttributeError
else:
logger.error('\nFailed to write the array to file (issue not apparent).')
def close_band(self):
"""Closes a band object"""
if hasattr(self, 'band') and self.band_open:
# try:
# self.band.SetColorInterpretation(self.color_interpretation)
# self.band.SetRasterColorInterpretation(self.color_interpretation)
# except:
# logger.warning('The band color could not be set.')
# logger.error(gdal.GetLastErrorMsg())
# pass
try:
self.band.GetStatistics(0, 1)
except:
logger.warning('The band statistics could not be calculated.')
logger.warning(gdal.GetLastErrorMsg())
try:
self.band.FlushCache()
except:
logger.warning('The band statistics could not be flushed.')
logger.warning(gdal.GetLastErrorMsg())
self.band = None
self.band_open = False
def close_file(self):
"""Closes a file object"""
if hasattr(self, 'datasource'):
if hasattr(self, 'hdf_file'):
if self.hdf_file:
if self.hdf_datasources:
for hdfd in range(0, len(self.hdf_datasources)):
if hasattr(self.hdf_datasources[hdfd], 'FlushCache'):
self.hdf_datasources[hdfd].FlushCache()
self.hdf_datasources[hdfd] = None
# try:
# hdfd.FlushCache()
# except:
#
# logger.warning('The HDF subdataset could not be flushed.')
# logger.error(gdal.GetLastErrorMsg())
#
# pass
# hdfd = None
if hasattr(self.datasource, 'FlushCache'):
try:
self.datasource.FlushCache()
except:
logger.warning('The dataset could not be flushed.')
if hasattr(self, 'output_image'):
# Unlink memory images
if self.output_image.lower().endswith('.mem'):
gdal.Unlink(self.output_image)
try:
os.remove(self.output_image)
except:
pass
self.datasource = None
self.hdf_datasources = None
self.file_open = False
def close_all(self):
"""Closes a band object and a file object"""
self.close_band()
self.close_file()
def fill(self, fill_value, band=None):
"""
Fills a band with a specified value.
Args:
fill_value (int): The value to fill.
band (Optional[int]): The band to fill. Default is None.
"""
if isinstance(band, int):
self.get_band(band_position=band)
self.band.Fill(fill_value)
def get_chunk_size(self):
"""Gets the band block size"""
try:
self.chunk_size = self.band.GetBlockSize()[0]
except:
raise IOError('\nFailed to get the block size.\n')
def remove_overviews(self):
"""Removes image overviews"""
if self.image_mode != 'update':
raise NameError('\nOpen the image in update mode (open2read=False) to remove overviews.\n')
else:
self.build_overviews(levels=[])
def calculate_stats(self, band=1):
"""
Calculates image statistics and can be used to check for empty images.
Args:
band (Optional[int])
"""
self.get_band(band_position=band)
image_metadata = self.band.GetMetadata()
use_exceptions = gdal.GetUseExceptions()
gdal.UseExceptions()
try:
image_min, image_max, image_mu, image_std = self.band.GetStatistics(False, True)
image_metadata['STATISTICS_MINIMUM'] = repr(image_min)
image_metadata['STATISTICS_MAXIMUM'] = repr(image_max)
image_metadata['STATISTICS_MEAN'] = repr(image_mu)
image_metadata['STATISTICS_STDDEV'] = repr(image_std)
image_metadata['STATISTICS_SKIPFACTORX'] = '1'
image_metadata['STATISTICS_SKIPFACTORY'] = '1'
if not use_exceptions:
gdal.DontUseExceptions()
self.band.SetMetadata(image_metadata)
return True
except:
logger.error(gdal.GetLastErrorMsg())
if not use_exceptions:
gdal.DontUseExceptions()
return False
class UpdateInfo(object):
"""A class for updating attributes"""
def update_info(self, **kwargs):
for k, v in viewitems(kwargs):
setattr(self, k, v)
class ImageInfo(UpdateInfo, ReadWrite, FileManager, DatasourceInfo):
"""An empty class for passing image information"""
def __init__(self):
pass
def copy(self):
return copy.copy(self)
class LandsatParser(object):
"""
A class to parse Landsat metadata
Args:
metadata (str)
band_order (Optional[list])
"""
def __init__(self, metadata, band_order=[]):
self.bo = copy.copy(band_order)
if metadata.endswith('MTL.txt'):
self.parse_mtl(metadata)
elif metadata.endswith('.xml'):
self.parse_xml(metadata)
else:
raise NameError('Parser type not supported')
def _cleanup(self):
if os.path.isdir(self.temp_dir):
for landsat_file in os.listdir(self.temp_dir):
os.remove(os.path.join(self.temp_dir, landsat_file))
shutil.rmtree(self.temp_dir)
def parse_mtl(self, metadata):
df = pd.read_table(metadata, header=None, sep='=')
df.rename(columns={0: 'Variable', 1: 'Value'}, inplace=True)
df['Variable'] = df['Variable'].str.strip()
df['Value'] = df['Value'].str.strip()
self.scene_id = df.loc[df['Variable'] == 'LANDSAT_SCENE_ID', 'Value'].values[0].replace('"', '').strip()
if not df.loc[df['Variable'] == 'DATE_ACQUIRED', 'Value'].empty:
self.date = df.loc[df['Variable'] == 'DATE_ACQUIRED', 'Value'].values[0].replace('"', '').strip()
else:
self.date = df.loc[df['Variable'] == 'ACQUISITION_DATE', 'Value'].values[0].replace('"', '').strip()
self.date_ = self.date.split('-')
self.year = self.date_[0]
self.month = self.date_[1]
self.day = self.date_[2]
self.sensor = df.loc[df['Variable'] == 'SENSOR_ID', 'Value'].values[0].replace('"', '').strip()
self.series = df.loc[df['Variable'] == 'SPACECRAFT_ID', 'Value'].values[0].replace('"', '').strip()
self.path = df.loc[df['Variable'] == 'WRS_PATH', 'Value'].astype(int).astype(str).values[0].strip()
if not df.loc[df['Variable'] == 'WRS_ROW', 'Value'].empty:
self.row = df.loc[df['Variable'] == 'WRS_ROW', 'Value'].astype(int).astype(str).values[0].strip()
else:
self.row = df.loc[df['Variable'] == 'STARTING_ROW', 'Value'].astype(int).astype(str).values[0].strip()
self.elev = df.loc[df['Variable'] == 'SUN_ELEVATION', 'Value'].astype(float).values[0]
self.zenith = 90. - self.elev
self.azimuth = df.loc[df['Variable'] == 'SUN_AZIMUTH', 'Value'].astype(float).values[0]
self.cloudCover = df.loc[df['Variable'] == 'CLOUD_COVER', 'Value'].astype(float).astype(str).values[0].strip()
try:
self.imgQuality = df.loc[df['Variable'] == 'IMAGE_QUALITY', 'Value'].astype(int).astype(str).values[0].strip()
except:
self.img_quality_oli = df.loc[df['Variable'] ==
'IMAGE_QUALITY_OLI', 'Value'].astype(int).astype(str).values[0].strip()
self.img_quality_tirs = df.loc[df['Variable'] ==
'IMAGE_QUALITY_TIRS', 'Value'].astype(int).astype(str).values[0].strip()
self.LMAX_dict = {1: 0., 2: 0., 3: 0., 4: 0., 5: 0., 6: 0., 7: 0., 8: 0., 9: 0.}
self.LMIN_dict = {1: 0., 2: 0., 3: 0., 4: 0., 5: 0., 6: 0., 7: 0., 8: 0., 9: 0.}
self.no_coeff = 999
# Landsat 8 radiance
self.rad_mult_dict = {1: 0., 2: 0., 3: 0., 4: 0., 5: 0., 6: 0., 7: 0., 8: 0., 9: 0., 10: 0., 11: 0.}
self.rad_add_dict = {1: 0., 2: 0., 3: 0., 4: 0., 5: 0., 6: 0., 7: 0., 8: 0., 9: 0., 10: 0., 11: 0.}
# Landsat 8 reflectance
self.refl_mult_dict = {1: 0., 2: 0., 3: 0., 4: 0., 5: 0., 6: 0., 7: 0., 8: 0., 9: 0.}
self.refl_add_dict = {1: 0., 2: 0., 3: 0., 4: 0., 5: 0., 6: 0., 7: 0., 8: 0., 9: 0.}
self.k1 = 0
self.k2 = 0
if self.sensor.lower() == 'oli_tirs':
if not self.bo:
self.bo = [2, 3, 4, 5, 6, 7]
else:
if not self.bo:
self.bo = [1, 2, 3, 4, 5, 7]
for bi in self.bo:
if not df.loc[df['Variable'] == 'RADIANCE_MAXIMUM_BAND_{:d}'.format(bi), 'Value'].empty:
self.LMAX_dict[bi] = df.loc[df['Variable'] == 'RADIANCE_MAXIMUM_BAND_{:d}'.format(bi),
'Value'].astype(float).values[0]
self.LMIN_dict[bi] = df.loc[df['Variable'] == 'RADIANCE_MINIMUM_BAND_{:d}'.format(bi),
'Value'].astype(float).values[0]
self.no_coeff = 1000
else:
self.LMAX_dict[bi] = df.loc[df['Variable'] == 'LMAX_BAND{:d}'.format(bi),
'Value'].astype(float).values[0]
self.LMIN_dict[bi] = df.loc[df['Variable'] == 'LMIN_BAND{:d}'.format(bi),
'Value'].astype(float).values[0]
self.no_coeff = 1000
if self.sensor.lower() == 'oli_tirs':
self.rad_mult_dict[bi] = df.loc[df['Variable'] == 'RADIANCE_MULT_BAND_{:d}'.format(bi),
'Value'].astype(float).values[0]
self.rad_add_dict[bi] = df.loc[df['Variable'] == 'RADIANCE_ADD_BAND_{:d}'.format(bi),
'Value'].astype(float).values[0]
self.refl_mult_dict[bi] = df.loc[df['Variable'] == 'REFLECTANCE_MULT_BAND_{:d}'.format(bi),
'Value'].astype(float).values[0]
self.refl_add_dict[bi] = df.loc[df['Variable'] == 'REFLECTANCE_ADD_BAND_{:d}'.format(bi),
'Value'].astype(float).values[0]
# TODO: add k1 and k2 values
# self.k1 =
# self.k2 =
def parse_xml(self, metadata):
with open(metadata) as mo:
meta = mo.read()
soup = BeautifulSoup(meta)
wrs = soup.find('wrs')
try:
self.product_id = str(soup.find('product_id').text.strip())
except:
self.product_id = None
self.scene_id = soup.find('lpgs_metadata_file').text
si_index = self.scene_id.find('_')
self.scene_id = self.scene_id[:si_index].strip()
self.path = wrs['path'].strip()
self.row = wrs['row'].strip()
self.sensor = soup.find('instrument').text.strip()
self.series = soup.find('satellite').text.strip()
self.date = soup.find('acquisition_date').text.strip()
self.date_ = self.date.split('-')
self.year = self.date_[0].strip()
self.month = self.date_[1].strip()
self.day = self.date_[2].strip()
solar_angles = soup.find('solar_angles')
self.solar_zenith = float(solar_angles['zenith'].strip())
self.solar_azimuth = float(solar_angles['azimuth'].strip())
self.solar_elevation = 90. - self.solar_zenith
class SentinelParser(object):
"""A class to parse Sentinel 2 metadata"""
def parse_xml(self, metadata):
"""
Args:
metadata (str)
mgrs (Optional[str])
"""
self.complete = False
# xmltodict
try:
import xmltodict
except ImportError:
raise ImportError('xmltodict must be installed to parse Sentinel data')
if not metadata.endswith('.xml'):
logger.warning(' Parser type not supported')
return
if metadata.endswith('_report.xml'):
logger.warning(' Cannot process <report> files.')
return
with open(metadata) as xml_tree:
xml_object = xmltodict.parse(xml_tree.read())
safe_dir = os.path.split(metadata)[0]
self.level = '1C' if 'n1:Level-1C_User_Product' in list(xml_object) else '2A'
base_xml = xml_object['n1:Level-{LEVEL}_User_Product'.format(LEVEL=self.level)]
general_info = base_xml['n1:General_Info']
quality_info = base_xml['n1:Quality_Indicators_Info']
self.cloud_cover = float(quality_info['Cloud_Coverage_Assessment'])
product_info = general_info['L{LEVEL}_Product_Info'.format(LEVEL=self.level)]
self.year, self.month, self.day = product_info['GENERATION_TIME'][:10].split('-')
# self.band_list = product_info['Query_Options']['Band_List']
# self.band_list = [bn for bn in self.band_list['BAND_NAME']]
granule_list = product_info['L{LEVEL}_Product_Organisation'.format(LEVEL=self.level)]['Granule_List']
self.band_name_dict = dict()
for granule_index in range(0, len(granule_list)):
granule_key = 'Granule' if 'Granule' in granule_list[granule_index] else 'Granules'
image_key = 'IMAGE_FILE_2A' if 'IMAGE_FILE_2A' in granule_list[granule_index][granule_key] else 'IMAGE_ID_2A'
granule_identifier = granule_list[granule_index][granule_key]['@granuleIdentifier']
img_data_dir = os.path.join(safe_dir, 'GRANULE', granule_identifier, 'IMG_DATA')
qi_data_dir = os.path.join(safe_dir, 'GRANULE', granule_identifier, 'QI_DATA')
# List of image names
granule_image_list = granule_list[granule_index][granule_key][image_key]
mgrs_code = granule_image_list[0][-13:-8]
# Check if the file name has 20m.
if '20m' in granule_image_list[0]:
granule_image_list_full = list()
for granule_image in granule_image_list:
if '_CLD_' in granule_image:
granule_image_list_full.append(os.path.join(qi_data_dir, granule_image))
else:
granule_image_list_full.append(os.path.join(img_data_dir, 'R20m', granule_image))
self.band_name_dict['{MGRS}-20m'.format(MGRS=mgrs_code)] = granule_image_list_full
elif '10m' in granule_image_list[0]:
granule_image_list_full = list()
for granule_image in granule_image_list:
if '_CLD_' in granule_image:
granule_image_list_full.append(os.path.join(qi_data_dir, granule_image))
else:
granule_image_list_full.append(os.path.join(img_data_dir, 'R10m', granule_image))
self.band_name_dict['{MGRS}-10m'.format(MGRS=mgrs_code)] = granule_image_list_full
image_format = granule_list[granule_index][granule_key]['@imageFormat']
# self.granule_dict = dict()
#
# for granule in granule_list:
#
# tile = granule['Granules']
# tile_id = tile['@granuleIdentifier']
# image_ids = tile['IMAGE_ID']
#
# image_format = tile['@imageFormat']
#
# self.granule_dict[tile_id] = image_ids
self.image_ext = FORMAT_DICT[image_format]
# print self.granule_dict
# self.level = product_info['PROCESSING_LEVEL']
self.product = product_info['PRODUCT_TYPE']
self.series = product_info['Datatake']['SPACECRAFT_NAME']
self.no_data = int(general_info['L{LEVEL}_Product_Image_Characteristics'.format(LEVEL=self.level)]['Special_Values'][0]['SPECIAL_VALUE_INDEX'])
self.saturated = int(general_info['L{LEVEL}_Product_Image_Characteristics'.format(LEVEL=self.level)]['Special_Values'][1]['SPECIAL_VALUE_INDEX'])
self.complete = True
class ropen(FileManager, LandsatParser, SentinelParser, UpdateInfo, ReadWrite):
"""
Gets image information and returns a file pointer object.
Args:
file_name (Optional[str]): Image location, name, and extension. Default is 'none'.
open2read (Optional[bool]): Whether to open image as 'read only' (True) or writeable (False).
Default is True.
metadata (Optional[str]): A metadata file. Default is None.
sensor (Optional[str]): The satellite sensor to parse with ``metadata``. Default is 'Landsat'. Choices are
['Landsat', 'Sentinel2']. This is only used for inplace spectral transformations. It will not
affect the image otherwise.
hdf_band (Optional[int])
Attributes:
file_name (str)
datasource (object)
directory (str)
filename (str)
bands (int)
projection (str)
geo_transform (list)
left (float)
top (float)
right (float)
bottom (float)
cellY (float)
cellX (float)
rows (int)
cols (int)
shape (str)
name (str)
block_x (int)
block_y (int)
Returns:
None
Examples:
>>> # typical usage
>>> import mpglue as gl
>>>
>>> i_info = mp.ropen('/some_raster.tif')
>>> # <ropen> has its own array instance
>>> i_info = mp.open('/some_raster.tif')
>>> # <rinfo> has its own array instance
>>> array = i_info.read() # opens band 1, all rows and columns
>>> print array
>>>
>>> # use the <read> function
>>> # open specific rows and columns
>>> array = mp.read(i_info,
>>> bands2open=[-1],
>>> i=100, j=100,
>>> rows=500, cols=500)
>>>
>>> # compute the NDVI (for Landsat-like band channels only)
>>> i_info.read(compute_index='ndvi')
>>> print i_info.ndvi
>>> print i_info.array.shape # note that the image array is a 2xrowsxcolumns array
>>> # display the NDVI
>>> i_info.show('ndvi')
>>> # display band 1 of the image (band 1 of <array> is the red band)
>>> i_info.show(band=1)
>>> # write the NDVI to file
>>> i_info.write2raster('/ndvi.tif', write_which='ndvi', \
>>> o_info=i_info.copy(), storage='float32')
>>>
>>> # write an array to file
>>> array = np.random.randn(3, 1000, 1000)
>>> i_info.write2raster('/array.tif', write_which=array, \
>>> o_info=i_info.copy(), storage='float32')
>>>
>>> # create info from scratch
>>> i_info = mp.ropen('create', left=, right=, top=, bottom=, \
>>> i_info = mp.open('create', left=, right=, top=, bottom=, \
>>> cellY=, cellX=, bands=, storage=, projection=, \
>>> rows=, cols=)
>>>
>>> # build overviews
>>> i_info = mp.ropen('/some_raster.tif')
>>> i_info = mp.open('/some_raster.tif')
>>> i_info.build_overviews()
>>> i_info.close()
>>>
>>> # remove overviews
>>> i_info = mp.ropen('/some_raster.tif', open2read=False)
>>> i_info = mp.open('/some_raster.tif', open2read=False)
>>> i_info.remove_overviews()
>>> i_info.close()
"""
def __init__(self,
file_name='none',
open2read=True,
metadata=None,
sensor='Landsat',
hdf_band=1,
check_corrupted=False,
**kwargs):
self.file_name = os.path.normpath(file_name)
passed = True
if file_name == 'create':
self.update_info(**kwargs)
elif file_name != 'none':
self.get_image_info(open2read, hdf_band, check_corrupted)
else:
passed = False
if isinstance(metadata, str):
self.get_metadata(metadata, sensor)
else:
if not passed:
logger.warning(' No image or metadata file was given.')
# Check open files before closing.
# atexit.register(self.close)
def get_metadata(self, metadata, sensor):
"""
Args:
metadata (str): The metadata file.
sensor (str): The satellite sensor to search. Default is 'Landsat'. Choices are ['Landsat', 'Sentinel2'].
"""
if sensor == 'Landsat':
LandsatParser.__init__(self, metadata)
elif sensor == 'Sentinel2':
SentinelParser.__init__(self, metadata)
else:
logger.error('The {} sensor is not an option.'.format(sensor))
raise NameError
def copy(self):
return copy.copy(self)
def close(self):
"""Closes the dataset"""
self.close_all()
def warp(self, output_image, epsg, resample='nearest', cell_size=0., **kwargs):
"""
Warp transforms a dataset
Args:
output_image (str): The output image.
epsg (int): The output EPSG projection code.
resample (Optional[str]): The resampling method. Default is 'nearest'.N
cell_size (Optional[float]): The output cell size. Default is 0.
kwargs:
format='GTiff', outputBounds=None (minX, minY, maxX, maxY),
outputBoundsSRS=None, targetAlignedPixels=False,
width=0, height=0, srcAlpha=False, dstAlpha=False, warpOptions=None,
errorThreshold=None, warpMemoryLimit=None,
creationOptions=None, outputType=0, workingType=0,
resampleAlg=resample_dict[resample], srcNodata=None, dstNodata=None,
multithread=False, tps=False, rpc=False, geoloc=False,
polynomialOrder=None, transformerOptions=None, cutlineDSName=None,
cutlineLayer=None, cutlineWhere=None, cutlineSQL=None,
cutlineBlend=None, cropToCutline=False, copyMetadata=True,
metadataConflictValue=None, setColorInterpretation=False,
callback=None, callback_data=None
Returns:
None, writes to `output_image'.
"""
warp_options = gdal.WarpOptions(srcSRS=None, dstSRS='EPSG:{:d}'.format(epsg),
xRes=cell_size, yRes=cell_size,
resampleAlg=RESAMPLE_DICT[resample],
**kwargs)
out_ds = gdal.Warp(output_image, self.file_name, options=warp_options)
out_ds = None
def translate(self, output_image, cell_size=0, **kwargs):
"""
Args:
output_image (str): The output image.
cell_size (Optional[float]): The output cell size. Default is 0.
kwargs:
format='GTiff', outputType=0, bandList=None, maskBand=None, width=0, height=0,
widthPct=0.0, heightPct=0.0, xRes=0.0, yRes=0.0, creationOptions=None, srcWin=None,
projWin=None, projWinSRS=None, strict=False, unscale=False, scaleParams=None,
exponents=None, outputBounds=None, metadataOptions=None, outputSRS=None, GCPs=None,
noData=None, rgbExpand=None, stats=False, rat=True, resampleAlg=None,
callback=None, callback_data=None
"""
translate_options = gdal.TranslateOptions(xRes=cell_size, yRes=cell_size, **kwargs)
out_ds = gdal.Translate(output_image, self.file_name, options=translate_options)
out_ds = None
def hist(self,
input_array=None,
band=1,
i=0,
j=0,
rows=-1,
cols=-1,
d_type='byte',
name_dict=None,
bins=256,
**kwargs):
"""
Prints the image histogram
Args:
input_array (Optional[2d array]): An array to get the histogram from, otherwise, open the array.
band (Optional[int]): The band to get the histogram from.
i (Optional[int]): The starting row position.
j (Optional[int]): The starting column position.
rows (Optional[int]): The number of rows to take.
cols (Optional[int]): The number of columns to take.
d_type (Optional[str]): The image data type.
name_dict (Optional[dict]): A dictionary of {value: 'name'} for discrete value arrays.
bins (Optional[int]): The number of bins.
kwargs:
Other arguments passed to `numpy.histogram`.
range (Optional[tuple]): The histogram range.
normed (Optional[bool])
weights
density
Example:
>>> import mpglue as gl
>>>
>>> i_info = gl.ropen('image_name.tif')
>>>
>>> i_info.hist()
>>>
>>> # Print the histogram dictionary.
>>> print(i_info.hist_dict)
"""
if 'range' not in kwargs:
kwargs['range'] = (0, bins-1)
if isinstance(input_array, np.ndarray):
the_hist, bin_edges = np.histogram(input_array,
bins=bins,
**kwargs)
elif hasattr(self, 'array') and not isinstance(input_array, np.ndarray):
the_hist, bin_edges = np.histogram(self.array,
bins=bins,
**kwargs)
else:
the_hist, bin_edges = np.histogram(self.read(bands2open=band,
i=i,
j=j,
rows=rows,
cols=cols,
d_type=d_type),
bins=bins,
**kwargs)
if kwargs['range'][0] == 0:
self.total_samples = float(the_hist[1:].sum())
else:
self.total_samples = float(the_hist.sum())
the_hist_pct = (the_hist / self.total_samples) * 100.
self.hist_dict = dict()
for i in range(0, bins):
if the_hist[i] > 0:
if isinstance(name_dict, dict):
if i not in name_dict:
label = 'unknown'
else:
label = name_dict[i]
self.hist_dict[i] = dict(value=i,
name=label,
count=the_hist[i],
perc=round(the_hist_pct[i], 4))
else:
self.hist_dict[i] = dict(value=i,
count=the_hist[i],
perc=round(the_hist_pct[i], 4))
# Sort the values, largest to smallest
self.hist_dict = OrderedDict(sorted(list(iteritems(self.hist_dict)),
key=lambda item: item[1]['count'],
reverse=True))
def pca(self, n_components=3):
"""
Computes Principle Components Analysis
Args:
n_components (Optional[int]): The number of components to return. Default is 3.
Attributes:
pca_components (ndarray)
Returns:
None
"""
# Scikit-learn
try:
from sklearn import decomposition
from sklearn.preprocessing import StandardScaler
except ImportError:
raise ImportError('Scikit-learn must be installed to run PCA')
if n_components > self.bands:
n_components = self.bands
embedder = decomposition.PCA(n_components=n_components)
dims, rs, cs = self.array.shape
x = self.array.T.reshape(rs*cs, dims)
scaler = StandardScaler().fit(x)
x = scaler.transform(x.astype(np.float32)).astype(np.float32)
x_fit = embedder.fit(x.astype(np.float32))
x_reduced = x_fit.transform(x)
self.pca_components = x_reduced.reshape(cs, rs, n_components).T
def show(self,
show_which='array',
band=1,
color_map='gist_stern',
discrete=False,
class_list=None,
out_fig=None,
dpi=300,
clip_percentiles=(2, 98),
equalize_hist=False,
equalize_adapthist=False,
gammas=None,
sigmoid=None):
"""
Displays an array
Args:
show_which (Optional[str]): Which array to display. Default is 'array'. Choices are ['array',
'evi2', 'gndvi', 'ndbai', 'ndvi', 'ndwi', 'savi'].
band (Optional[int]): The band to display. Default is 1.
color_map (Optional[str]): The colormap to use. Default is 'gist_stern'. For more colormaps, visit
http://matplotlib.org/examples/color/colormaps_reference.html.
discrete (Optional[bool]): Whether the colormap is discrete. Otherwise, continuous. Default is False.
class_list (Optional[int list]): A list of the classes to display. Default is [].
out_fig (Optional[str]): An output image to save to. Default is None.
dpi (Optional[int]): The DPI of the output figure. Default is 300.
clip_percentiles (Optional[tuple]): The lower and upper clip percentiles to rescale RGB images.
Default is (2, 98).
equalize_hist (Optional[bool]): Whether to equalize the histogram. Default is False.
equalize_adapthist (Optional[bool]): Whether to equalize the histogram using a localized approach.
Default is False.
gammas (Optional[float list]): A list of gamma corrections for each band. Default is [].
sigmoid (Optional[float list]): A list of sigmoid contrast and gain values. Default is [].
Examples:
>>> import mpglue as gl
>>> i_info = mp.ropen('image')
>>> i_info = mp.open('image')
>>>
>>> # Plot a discrete map with specified colors
>>> color_map = ['#000000', '#DF7401', '#AEB404', '#0B6121', '#610B0B', '#A9D0F5',
>>> '#8181F7', '#BDBDBD', '#3A2F0B', '#F2F5A9', '#5F04B4']
>>> i_info.show(color_map=color_map, discrete=True,
>>> class_list=[0,1,2,3,4,5,6,7,8,9,10])
>>>
>>> # Plot the NDVI
>>> i_info.read(compute_index='ndvi')
>>> i_info.show(show_which='ndvi')
>>>
>>> # Plot a single band array as greyscale
>>> i_info.read(bands2open=4)
>>> i_info.show(color_map='Greys')
>>>
>>> # Plot a 3-band array as RGB true color
>>> i_info.read(bands2open=[3, 2, 1], sort_bands2open=False)
>>> i_info.show(band='rgb')
Returns:
None
"""
fig = plt.figure()
ax = fig.add_subplot(111)
plt.axis('off')
if show_which == 'ndvi':
self.array[self.array != 0] += 1.1
if equalize_hist:
self.array = exposure.equalize_hist(self.array)
ip = ax.imshow(self.array)
im_min = np.percentile(self.array, clip_percentiles[0])
im_max = np.percentile(self.array, clip_percentiles[1])
elif show_which == 'evi2':
if equalize_hist:
self.array = exposure.equalize_hist(self.array)
ip = ax.imshow(self.array)
im_min = np.percentile(self.array, clip_percentiles[0])
im_max = np.percentile(self.array, clip_percentiles[1])
elif show_which == 'pca':
if equalize_hist:
self.pca_components[band-1] = exposure.equalize_hist(self.pca_components[band-1])
ip = ax.imshow(self.pca_components[band-1])
im_min = np.percentile(self.pca_components[band-1], clip_percentiles[0])
im_max = np.percentile(self.pca_components[band-1], clip_percentiles[1])
else:
if self.array_shape[0] > 1:
if band == 'rgb':
for ii, im in enumerate(self.array):
pl, pu = np.percentile(im, clip_percentiles)
self.array[ii] = exposure.rescale_intensity(im, in_range=(pl, pu), out_range=(0, 255))
if equalize_hist:
self.array[ii] = exposure.equalize_hist(im)
if equalize_adapthist:
self.array[ii] = exposure.equalize_adapthist(im, ntiles_x=4, ntiles_y=4, clip_limit=0.5)
if gammas:
self.array[ii] = exposure.adjust_gamma(im, gammas[ii])
if sigmoid:
self.array[ii] = exposure.adjust_sigmoid(im, cutoff=sigmoid[0], gain=sigmoid[1])
# ip = ax.imshow(cv2.merge([self.array[2], self.array[1], self.array[0]]))
ip = ax.imshow(np.ascontiguousarray(self.array.transpose(1, 2, 0)))
# ip = ax.imshow(np.dstack((self.array[0], self.array[1], self.array[2])), interpolation='nearest')
else:
ip = ax.imshow(self.array[band-1])
im_min = np.percentile(self.array[band-1], clip_percentiles[0])
im_max = np.percentile(self.array[band-1], clip_percentiles[1])
else:
ip = ax.imshow(self.array)
im_min = np.percentile(self.array, clip_percentiles[0])
im_max = np.percentile(self.array, clip_percentiles[1])
ip.axes.get_xaxis().set_visible(False)
ip.axes.get_yaxis().set_visible(False)
if discrete:
if isinstance(color_map, list):
color_map = colors.ListedColormap(color_map)
# color_map = colorbar.ColorbarBase(ax, cmap=color_map_)
ip.set_cmap(color_map)
elif color_map.lower() == 'random':
ip.set_cmap(colors.ListedColormap(np.random.rand(len(class_list), 3)))
else:
ip.set_cmap(_discrete_cmap(len(class_list), base_cmap=color_map))
ip.set_clim(min(class_list), max(class_list))
else:
if band != 'rgb':
ip.set_cmap(color_map)
ip.set_clim(im_min, im_max)
cbar = plt.colorbar(ip, fraction=0.046, pad=0.04, orientation='horizontal')
cbar.solids.set_edgecolor('face')
# Remove colorbar container frame
cbar.outline.set_visible(False)
# cbar.set_ticks([])
# cbar.set_ticklabels(class_list)
if band == 'rgb':
colorbar_label = 'RGB'
else:
if show_which == 'array':
colorbar_label = 'Band {:d} of {:d} bands'.format(band, self.array_shape[0])
else:
colorbar_label = show_which.upper()
cbar.ax.set_xlabel(colorbar_label)
# Remove color bar tick lines, while keeping the tick labels
cbarytks = plt.getp(cbar.ax.axes, 'xticklines')
plt.setp(cbarytks, visible=False)
if isinstance(out_fig, str):
plt.savefig(out_fig, dpi=dpi, bbox_inches='tight', pad_inches=.1, transparent=True)
else:
plt.show()
if show_which == 'ndvi':
self.array[self.array != 0] -= 1.1
plt.clf()
plt.close(fig)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
self.close()
def __del__(self):
self.__exit__(None, None, None)
class PanSharpen(object):
"""
A class to pan sharpen an image
Args:
multi_image (str)
pan_array (str)
out_dir (Optional[str])
method (Optional[str])
Equation:
DNF = (P - IW * I) / (RW * R + GW * G + BW * B)
Red_out = R * DNF
Green_out = G * DNF
Blue_out = B * DNF
Infrared_out = I * DNF
pan / ((rgb[0] + rgb[1] + rgb[2] * weight) / (2 + weight))
Example:
>>> ps = PanSharpen('/multi.tif', '/pan.tif', method='brovey')
>>> ps.sharpen()
"""
def __init__(self, multi_image, pan_image, out_dir=None, method='brovey'):
self.multi_image = multi_image
self.pan_image = pan_image
self.method = method
if isinstance(out_dir, str):
self.out_dir = out_dir
else:
self.out_dir = os.path.split(self.multi_image)[0]
f_name = os.path.split(self.multi_image)[1]
self.f_base, self.f_ext = os.path.splitext(f_name)
self.multi_image_ps = os.path.join(self.out_dir, '{}_pan.tif'.format(self.f_base))
def sharpen(self, bw=.2, gw=1., rw=1., iw=.5):
self.bw = bw
self.gw = gw
self.rw = rw
self.iw = iw
# self._sharpen_gdal()
self._warp_multi()
self._sharpen()
def _sharpen_gdal(self):
with ropen(self.multi_image) as m_info:
m_bands = m_info.bands
m_info = None
logger.info('\nPan-sharpening ...\n')
if m_bands == 4:
com = 'gdal_pansharpen.py {} {} {} ' \
'-w {:f} -w {:f} -w {:f} -w {:f} -r cubic ' \
'-bitdepth 16 -threads ALL_CPUS -co TILED=YES -co COMPRESS=DEFLATE'.format(self.pan_image,
self.multi_image,
self.multi_image_ps,
self.w1,
self.w2,
self.w3,
self.w4)
else:
com = 'gdal_pansharpen.py {} {} {} ' \
'-w {:f} -w {:f} -w {:f} -r cubic ' \
'-bitdepth 16 -threads ALL_CPUS -co TILED=YES -co COMPRESS=DEFLATE'.format(self.pan_image,
self.multi_image,
self.multi_image_ps,
self.w1,
self.w2,
self.w3)
subprocess.call(com, shell=True)
def _do_sharpen(self, im):
try:
import numexpr as ne
except:
raise ImportError('Numexpr is needed for pan-sharpening.')
blue = im[0][0]
green = im[0][1]
red = im[0][2]
pan_array = im[1]
bw = self.bw
gw = self.gw
rw = self.rw
iw = self.iw
if im[0].shape[0] == 4:
nir = im[0][3]
if self.method == 'esri':
dnf = ne.evaluate('pan_array - ((red*.166 + green*.167 + blue*.167 + nir*.5) / (.166+.167+.167+.5))')
elif self.method == 'brovey':
# dnf = ne.evaluate('(pan_array - (iw * nir)) / ((rw * red) + (gw * green) + (bw * blue))')
dnf = ne.evaluate('pan_array / (((blue * bw) + (green * gw) + (red * rw) + (nir * iw)) / (bw + gw + rw + iw))')
# TODO
# else:
# dnf = ne.evaluate('(pan_array - iw * nir) / (rw * red + gw * green + bw * blue)')
im = im[0]
# plt.subplot(121)
# plt.imshow(im[0]+dnf)
# plt.axis('off')
# plt.subplot(122)
# plt.imshow(dnf)
# plt.axis('off')
# plt.show()
# sys.exit()
for bi in range(0, im.shape[0]):
if self.method == 'esri':
im[bi] += dnf
elif self.method == 'brovey':
im[bi] *= dnf
return im
def _sharpen(self):
with ropen(self.multi_warped) as m_info, ropen(self.pan_image) as p_info:
o_info = m_info.copy()
bp = BlockFunc(self._do_sharpen,
[m_info, p_info],
self.multi_image_ps,
o_info,
band_list=[range(1, m_info.bands+1), 1],
d_types=['float32', 'float32'],
block_rows=4000,
block_cols=4000,
print_statement='\nPan sharpening using {} ...\n'.format(self.method.title()),
method=self.method)
bp.run()
m_info = None
p_info = None
def _warp_multi(self):
# Get warping info.
with ropen(self.pan_image) as p_info:
extent = p_info.extent
cell_size = p_info.cellY
p_info = None
self.multi_warped = os.path.join(self.out_dir, '{}_warped.tif'.format(self.f_base))
logger.info('Resampling to pan scale ...')
# Resample the multi-spectral bands.
warp(self.multi_image,
self.multi_warped,
cell_size=cell_size,
resample='cubic',
outputBounds=[extent['left'],
extent['bottom'],
extent['right'],
extent['top']],
multithread=True,
creationOptions=['GDAL_CACHEMAX=256',
'TILED=YES'])
def gdal_open(image2open, band):
"""
A direct file open from GDAL.
"""
driver_o = gdal.Open(image2open, GA_ReadOnly)
return driver_o, driver_o.GetRasterBand(band)
def gdal_read(image2open, band, i, j, rows, cols):
"""
A direct array read from GDAL.
"""
driver_o = gdal.Open(image2open, GA_ReadOnly)
if isinstance(band, list):
band_array = []
for bd in band:
band_object_o = driver_o.GetRasterBand(bd)
band_array.append(band_object_o.ReadAsArray(j, i, cols, rows))
band_object_o = None
driver_o = None
return np.array(band_array, dtype='float32').reshape(len(band), rows, cols)
else:
band_object_o = driver_o.GetRasterBand(band)
band_array = np.float32(band_object_o.ReadAsArray(j, i, cols, rows))
band_object_o = None
driver_o = None
return band_array
def gdal_write(band_object_w, array2write, io=0, jo=0):
band_object_w.WriteArray(array2write, jo, io)
def gdal_close_band(band_object_c):
try:
band_object_c.FlushCache()
except:
logger.error(gdal.GetLastErrorMsg())
pass
band_object_c = None
return band_object_c
def gdal_close_datasource(datasource_d):
try:
datasource_d.FlushCache()
except:
logger.error(gdal.GetLastErrorMsg())
pass
datasource_d = None
return datasource_d
def gdal_register(image_name, in_memory=False):
__, f_name = os.path.split(image_name)
__, file_extension = os.path.splitext(f_name)
if file_extension.lower() not in DRIVER_DICT:
raise TypeError('{} is not an image, or is not a supported raster format.'.format(file_extension))
else:
file_format = DRIVER_DICT[file_extension.lower()]
gdal.AllRegister()
if in_memory:
driver_r = gdal.GetDriverByName('MEM')
else:
driver_r = gdal.GetDriverByName(file_format)
driver_r.Register
return driver_r
def gdal_create(image_name, driver_cr, out_rows, out_cols, n_bands, storage_type,
left, top, cellY, cellX, projection,
in_memory=False, overwrite=False, parameters=[]):
if overwrite:
if os.path.isfile(image_name):
os.remove(image_name)
# Create the output driver.
if in_memory:
return driver_cr.Create('', out_cols, out_rows, n_bands, storage_type)
else:
ds = driver_cr.Create(image_name, out_cols, out_rows, n_bands, storage_type, parameters)
# Set the geo-transformation.
ds.SetGeoTransform([left, cellY, 0., top, 0., cellX])
# Set the projection.
ds.SetProjection(projection)
return ds
def gdal_get_band(datasource_b, band_position):
return datasource_b.GetRasterBand(band_position)
def _parallel_blocks(out_image,
band_list,
ii,
jj,
y_offset,
x_offset,
nn_rows,
nn_cols,
left,
top,
cellY,
cellX,
projection,
**kwargs):
"""
Args:
out_image:
band_list:
ii:
jj:
y_offset:
x_offset:
n_rows:
n_cols:
**kwargs:
Returns:
"""
# out_info_tile = out_info.copy()
# out_info_tile.update_info(rows=nn_rows, cols=nn_cols,
# left=out_info.left+(jj*out_info.cellY),
# top=out_info.top-(ii*out_info.cellY))
d_name_, f_name_ = os.path.split(out_image)
f_base_, f_ext_ = os.path.splitext(f_name_)
d_name_ = os.path.join(d_name_, 'temp')
rsn = '{:f}'.format(abs(np.random.randn(1)[0]))[-4:]
out_image_tile = os.path.join(d_name_, '{}_{}{}'.format(f_base_, rsn, f_ext_))
datasource = gdal_create(out_image_tile,
driver_pp,
nn_rows,
nn_cols,
1,
STORAGE_DICT_GDAL['float32'],
left,
top,
cellY,
cellX,
projection)
band_object = gdal_get_band(datasource, 1)
# out_raster = create_raster(out_image_tile, out_info_tile)
# out_raster.get_band(1)
image_arrays = [gdal_read(image_infos_list[imi],
band_list[imi],
ii+y_offset[imi],
jj+x_offset[imi],
nn_rows,
nn_cols) for imi in range(0, len(image_infos_list))]
output = block_func(image_arrays, **kwargs)
gdal_write(band_object, output)
band_object = gdal_close_band(band_object)
datasource = gdal_close_datasource(datasource)
return out_image_tile
class BlockFunc(object):
"""
A class for block by block processing
Args:
func
image_infos (list): A list of ``ropen`` instances.
out_image (str): The output image.
out_info (object): An instance of ``ropen``.
band_list (Optional[list]): A list of band positions. Default is [].
proc_info (Optional[object]): An instance of ``ropen``. Overrides image_infos[0]. Default is None.
y_offset (Optional[list]): The row offset. Default is [0].
x_offset (Optional[list]): The column offset. Default is [0].
y_pad (Optional[list]): The row padding. Default is [0].
x_pad (Optional[list]): The column padding. Default is [0].
block_rows (Optional[int]): The block row chunk size. Default is 2048.
block_cols (Optional[int]): The block column chunk size. Default is 2048.
d_types (Optional[str list]): A list of read data types. Default is None.
be_quiet (Optional[bool]): Whether to be quiet and do not print progress. Default is False.
print_statement (Optional[str]): A string to print. Default is None.
out_attributes (Optional[list]): A list of output attribute names. Default is [].
write_array (Optional[bool]): Whether to write the output array to file. Default is True.
bigtiff (Optional[str]): GDAL option passed to `create_raster`. Default is 'no'. See `create_raster`
for details.
boundary_file (Optional[str]): A file to use for block intersection. Default is None.
Skip blocks that do not intersect ``boundary_file``.
mask_file (Optional[str]): A file to use for block masking. Default is None.
Recode blocks to binary 1 and 0 that intersect ``mask_file``.
n_jobs (Optional[int]): The number of blocks to process in parallel. Default is 1.
no_data_values (Optional[list]): A list of no data values for each image. Default is None.
kwargs (Optional[dict]): Function specific parameters.
Returns:
None, writes to ``out_image``.
"""
def __init__(self,
func,
image_infos,
out_image,
out_info,
band_list=None,
proc_info=None,
y_offset=None,
x_offset=None,
y_pad=None,
x_pad=None,
block_rows=2000,
block_cols=2000,
be_quiet=False,
d_types=None,
print_statement=None,
out_attributes=None,
write_array=True,
bigtiff='no',
boundary_file=None,
mask_file=None,
n_jobs=1,
close_files=True,
no_data_values=None,
overwrite=False,
**kwargs):
self.func = func
self.image_infos = image_infos
self.out_image = out_image
self.out_info = out_info
self.band_list = band_list
self.proc_info = proc_info
self.y_offset = y_offset
self.x_offset = x_offset
self.y_pad = y_pad
self.x_pad = x_pad
self.block_rows = block_rows
self.block_cols = block_cols
self.d_types = d_types
self.be_quiet = be_quiet
self.print_statement = print_statement
self.out_attributes = out_attributes
self.write_array = write_array
self.bigtiff = bigtiff
self.boundary_file = boundary_file
self.mask_file = mask_file
self.n_jobs = n_jobs
self.close_files = close_files
self.no_data_values = no_data_values
self.kwargs = kwargs
self.out_attributes_dict = dict()
if not isinstance(self.d_types, list):
self.d_types = ['byte'] * len(self.image_infos)
if not self.y_offset:
self.y_offset = [0] * len(self.image_infos)
if not self.x_offset:
self.x_offset = [0] * len(self.image_infos)
if not isinstance(self.out_image, str) and write_array:
logger.error(' The output image was not given.')
raise NameError
if overwrite:
if os.path.isfile(self.out_image):
os.remove(self.out_image)
if self.n_jobs in [0, 1]:
if not self.proc_info:
self.proc_info = self.image_infos[0]
for imi in range(0, len(self.image_infos)):
if not isinstance(self.image_infos[imi], ropen):
if not isinstance(self.image_infos[imi], GetMinExtent):
if not isinstance(self.image_infos[imi], ImageInfo):
logger.error(' The image info list should be instances of `ropen`, `GetMinExtent`, or `ImageInfo`.')
raise ropenError
if not isinstance(self.band_list, list) and isinstance(self.band_list, int):
self.band_list = [self.band_list] * len(self.image_infos)
else:
if self.band_list:
if len(self.band_list) != len(self.image_infos):
logger.error(' The band list and image info list much be the same length.')
raise LenError
else:
self.band_list = [1] * len(self.image_infos)
if isinstance(out_image, str):
if not isinstance(self.out_info, ropen):
if not isinstance(self.out_info, GetMinExtent):
logger.error(' The output image object is not a `raster_tools` instance.')
raise ropenError
if not isinstance(self.image_infos, list):
logger.error(' The image infos must be given as a list.')
raise TypeError
if not len(self.y_offset) == len(self.x_offset) == len(self.image_infos):
logger.error(' The offset lists and input image info lists must be the same length.')
raise LenError
def run(self):
global block_func, image_infos_list, driver_pp
if self.n_jobs in [0, 1]:
for imi in range(0, len(self.image_infos)):
if isinstance(self.band_list[imi], int):
self.image_infos[imi].get_band(self.band_list[imi])
self._process_blocks()
else:
block_func = self.func
image_infos_list = self.image_infos
self._get_pairs()
dn, fn = os.path.split(self.out_image)
if not dn and not os.path.isabs(fn):
dn = os.path.join(os.path.abspath('.'), 'temp')
else:
check_and_create_dir(os.path.join(dn, 'temp'))
driver_pp = gdal_register(self.out_image)
tile_list = Parallel(n_jobs=self.n_jobs,
max_nbytes=None)(delayed(_parallel_blocks)(self.out_image,
self.band_list,
pair[0], pair[1],
self.y_offset,
self.x_offset,
pair[2], pair[3],
self.out_info.left+(pair[1]*self.out_info.cellY),
self.out_info.top-(pair[0]*self.out_info.cellY),
self.out_info.cellY,
self.out_info.cellX,
self.out_info.projection,
**self.kwargs) for pair in self.pairs)
def _get_pairs(self):
self.pairs = []
for i in range(0, self.proc_info.rows, self.block_rows):
n_rows = n_rows_cols(i, self.block_rows, self.proc_info.rows)
for j in range(0, self.proc_info.cols, self.block_cols):
n_cols = n_rows_cols(j, self.block_cols, self.proc_info.cols)
self.pairs.append((i, j, n_rows, n_cols))
def _process_blocks(self):
if self.write_array:
out_raster = create_raster(self.out_image,
self.out_info,
bigtiff=self.bigtiff)
# n_blocks = 0
# for i in range(0, self.proc_info.rows, self.block_rows):
# for j in range(0, self.proc_info.cols, self.block_cols):
# n_blocks += 1
#
# n_block = 1
if isinstance(self.print_statement, str):
logger.info(self.print_statement)
# set widget and pbar
if not self.be_quiet:
ctr, pbar = _iteration_parameters(self.proc_info.rows, self.proc_info.cols,
self.block_rows, self.block_cols)
# iterate over the images and get change pixels
for i in range(0, self.proc_info.rows, self.block_rows):
n_rows = n_rows_cols(i, self.block_rows, self.proc_info.rows)
if isinstance(self.y_pad, int):
y_pad_minus = 0 if i == 0 else self.y_pad
y_pad_plus = 0 if i + n_rows + self.y_pad > self.proc_info.rows else self.proc_info.rows - (i + n_rows)
else:
y_pad_minus = 0
y_pad_plus = 0
for j in range(0, self.proc_info.cols, self.block_cols):
n_cols = n_rows_cols(j, self.block_cols, self.proc_info.cols)
if isinstance(self.x_pad, int):
x_pad_minus = 0 if j == 0 else self.x_pad
x_pad_plus = 0 if j + n_cols + self.x_pad > self.proc_info.cols else self.proc_info.cols - (j + n_cols)
else:
x_pad_minus = 0
x_pad_plus = 0
if isinstance(self.boundary_file, str):
# Get the extent of the current block.
self.get_block_extent(i, j, n_rows, n_cols)
# Check if the block intersects the boundary file.
if not vector_tools.intersects_boundary(self.extent_dict, self.boundary_file):
continue
# if not self.be_quiet:
#
# if n_block == 1:
# print 'Blocks 1--19 of {:,d} ...'.format(n_blocks)
# elif n_block % 20 == 0:
# n_block_ = n_block + 19 if n_blocks - n_block > 20 else n_blocks
# print 'Block {:,d}--{:,d} of {:,d} ...'.format(n_block, n_block_, n_blocks)
#
# n_block += 1
image_arrays = [self.image_infos[imi].read(bands2open=self.band_list[imi],
i=i+self.y_offset[imi]-y_pad_minus,
j=j+self.x_offset[imi]-x_pad_minus,
rows=n_rows+y_pad_plus,
cols=n_cols+x_pad_plus,
d_type=self.d_types[imi])
for imi in range(0, len(self.image_infos))]
skip_block = False
# Check for no data values.
if isinstance(self.no_data_values, list):
for no_data, im_block in zip(self.no_data_values, image_arrays):
if isinstance(no_data, int) or isinstance(no_data, float):
if im_block.max() == no_data:
skip_block = True
break
if skip_block:
continue
if isinstance(self.mask_file, str):
self.get_block_extent(i, j, n_rows, n_cols)
orw = create_raster('none',
None,
in_memory=True,
rows=n_rows,
cols=n_cols,
bands=1,
projection=self.proc_info.projection,
cellY=self.proc_info.cellY,
cellX=self.proc_info.cellX,
left=self.extent_dict['UL'][0],
top=self.extent_dict['UL'][1],
storage='byte')
# Rasterize the vector at the current block.
with vector_tools.vopen(self.mask_file) as v_info:
gdal.RasterizeLayer(orw.datasource, [1], v_info.lyr, burn_values=[1])
block_array = orw.datasource.GetRasterBand(1).ReadAsArray(0, 0, n_cols, n_rows)
for imib, image_array in enumerate(image_arrays):
image_array[block_array == 0] = 0
image_arrays[imib] = image_array
v_info = None
gdal.Unlink('none')
output = self.func(image_arrays,
**self.kwargs)
if isinstance(output, tuple):
if self.write_array:
if output[0].shape[0] > 1:
for obi, obb in enumerate(output[0]):
out_raster.write_array(obb,
i=i,
j=j,
band=obi + 1)
else:
out_raster.write_array(output[0],
i=i,
j=j,
band=1)
# Get the other results.
for ri in range(1, len(output)):
# self.kwargs[self.out_attributes[ri-1]] = output[ri]
if self.out_attributes[ri-1] not in self.out_attributes_dict:
self.out_attributes_dict[self.out_attributes[ri-1]] = [output[ri]]
else:
self.out_attributes_dict[self.out_attributes[ri-1]].append(output[ri])
else:
if self.write_array:
if len(output.shape) > 2:
for obi, obb in enumerate(output):
out_raster.write_array(obb,
i=i,
j=j,
band=obi+1)
else:
out_raster.write_array(output,
i=i,
j=j,
band=1)
if not self.be_quiet:
pbar.update(ctr)
ctr += 1
if self.out_attributes_dict:
for ri in range(1, len(output)):
setattr(self, self.out_attributes[ri-1], self.out_attributes_dict[self.out_attributes[ri-1]])
if not self.be_quiet:
pbar.finish()
if isinstance(self.out_image, str):
if self.close_files:
for imi in range(0, len(self.image_infos)):
self.image_infos[imi].close()
self.out_info.close()
if self.write_array:
out_raster.close_all()
def get_block_extent(self, ii, jj, nn_rows, nn_cols):
adj_left = self.proc_info.left + (jj * self.proc_info.cellY)
adj_right = adj_left + (nn_cols * self.proc_info.cellY) + self.proc_info.cellY
adj_top = self.proc_info.top - (ii * self.proc_info.cellY)
adj_bottom = adj_top - (nn_rows * self.proc_info.cellY) - self.proc_info.cellY
self.extent_dict = {'UL': [adj_left, adj_top],
'UR': [adj_right, adj_top],
'LL': [adj_left, adj_bottom],
'LR': [adj_right, adj_bottom]}
def _read_parallel(image, image_info, bands2open, y, x, rows2open, columns2open, n_jobs, d_type, predictions):
"""
Opens image bands into arrays using multiple processes
Args:
image (str): The image to open.
image_info (instance)
bands2open (int or int list: Band position to open or list of bands to open.
y (int): Starting row position.
x (int): Starting column position.
rows2open (int): Number of rows to extract.
columns2open (int): Number of columns to extract.
n_jobs (int): The number of jobs to run in parallel.
d_type (str): Type of array to return.
predictions (bool): Whether to return reshaped array for predictions.
Returns:
Ndarray where [rows, cols] if 1 band and [bands, rows, cols] if more than 1 band
"""
if isinstance(bands2open, list):
if max(bands2open) > image_info.bands:
raise ValueError('\nCannot open more bands than exist in the image.\n')
else:
if bands2open == -1:
bands2open = list(range(1, image_info.bands+1))
if rows2open == -1:
rows2open = image_info.rows
if columns2open == -1:
columns2open = image_info.cols
image_info.close()
band_arrays = Parallel(n_jobs=n_jobs)(delayed(gdal_read)(image,
band2open,
y,
x,
rows2open,
columns2open)
for band2open in bands2open)
if predictions:
# Check for empty images.
band_arrays = [b_ if b_.shape else np.zeros((rows2open, columns2open), dtype=d_type) for b_ in band_arrays]
return np.array(band_arrays,
dtype=d_type).reshape(len(bands2open),
rows2open,
columns2open).transpose(1, 2, 0).reshape(rows2open*columns2open,
len(bands2open))
else:
return np.array(band_arrays, dtype=d_type).reshape(len(bands2open), rows2open, columns2open)
def read(image2open=None,
i_info=None,
bands2open=1,
i=0,
j=0,
rows=-1,
cols=-1,
d_type=None,
predictions=False,
sort_bands2open=True,
y=0.,
x=0.,
n_jobs=0):
"""
Reads a raster as an array
Args:
image2open (Optional[str]): An image to open. Default is None.
i_info (Optional[object]): An instance of `ropen`. Default is None
bands2open (Optional[int list or int]: Band position to open or list of bands to open. Default is 1.
Examples:
bands2open = 1 (open band 1)
bands2open = [1,2,3] (open first three bands)
bands2open = -1 (open all bands)
i (Optional[int]): Starting row position. Default is 0, or first row.
j (Optional[int]): Starting column position. Default is 0, or first column.
rows (Optional[int]): Number of rows to extract. Default is all rows.
cols (Optional[int]): Number of columns to extract. Default is all columns.
d_type (Optional[str]): Type of array to return. Default is None, or gathered from <i_info>.
Choices are ['uint8', 'int8', 'uint16', 'uint32', 'int16', 'float32', 'float64'].
predictions (Optional[bool]): Whether to return reshaped array for predictions.
sort_bands2open (Optional[bool]): Whether to sort ``bands2open``. Default is True.
y (Optional[float]): A y index coordinate. Default is 0. If greater than 0, overrides `i`.
x (Optional[float]): A x index coordinate. Default is 0. If greater than 0, overrides `j`.
n_jobs (Optional[int]): The number of bands to open in parallel. Default is 0.
Attributes:
array (ndarray)
Returns:
Ndarray where [rows, cols] if 1 band and [bands, rows, cols] if more than 1 band
Examples:
>>> import mpglue as gl
>>>
>>> array = mp.read('image.tif')
>>>
>>> array = mp.read('image.tif', bands2open=[1, 2, 3])
>>> print(a.shape)
>>>
>>> array = mp.read('image.tif', bands2open={'green': 3, 'nir': 4})
>>> print(len(array))
>>> print(array['nir'].shape)
"""
if not isinstance(i_info, ropen) and not isinstance(image2open, str):
logger.error('Either `i_info` or `image2open` must be declared.')
raise MissingRequirement
elif isinstance(i_info, ropen) and isinstance(image2open, str):
logger.error('Choose either `i_info` or `image2open`, but not both.')
raise OverflowError
elif not isinstance(i_info, ropen) and isinstance(image2open, str):
i_info = ropen(image2open)
rrows = copy.copy(rows)
ccols = copy.copy(cols)
if rrows == -1:
rrows = i_info.rows
else:
if rrows > i_info.rows:
rrows = i_info.rows
logger.warning(' The requested rows cannot be larger than the image rows.')
if ccols == -1:
ccols = i_info.cols
else:
if ccols > i_info.cols:
ccols = i_info.cols
logger.warning(' The requested columns cannot be larger than the image columns.')
#################
# Bounds checking
#################
# Row indices
if i < 0:
i = 0
if i >= i_info.rows:
i = i_info.rows - 1
# Number of rows
rrows = n_rows_cols(i, rrows, i_info.rows)
# Column indices
if j < 0:
j = 0
if j >= i_info.cols:
j = i_info.cols - 1
# Number of columns
ccols = n_rows_cols(j, ccols, i_info.cols)
if isinstance(bands2open, list):
if len(bands2open) == 0:
raise ValueError('\nA band list must be declared.\n')
if max(bands2open) > i_info.bands:
raise ValueError('\nThe requested band position cannot be greater than the image bands.\n')
elif isinstance(bands2open, int):
if bands2open > i_info.bands:
raise ValueError('\nThe requested band position cannot be greater than the image bands.\n')
if bands2open == -1:
bands2open = list(range(1, i_info.bands+1))
else:
bands2open = [bands2open]
if sort_bands2open:
bands2open = sorted(bands2open)
# Index the image by x, y coordinates (in map units).
if abs(y) > 0:
__, __, __, i = vector_tools.get_xy_offsets(i_info, x=x, y=y)
if abs(x) > 0:
__, __, j, __ = vector_tools.get_xy_offsets(i_info, x=x, y=y)
if (n_jobs in [0, 1]) and not predictions:
kwargs = dict(bands2open=bands2open,
i=i,
j=j,
rows=rrows,
cols=ccols,
d_type=d_type,
sort_bands2open=sort_bands2open,
y=y,
x=x)
return i_info.read(**kwargs)
else:
# Convert to NumPy dtype.
if isinstance(d_type, str):
d_type = STORAGE_DICT[d_type]
else:
d_type = STORAGE_DICT[i_info.storage.lower()]
# format_dict = {'byte': 'B', 'int16': 'i', 'uint16': 'I', 'float32': 'f', 'float64': 'd'}
if n_jobs in [0, 1]:
values = np.asarray([i_info.datasource.GetRasterBand(band).ReadAsArray(j, i, ccols, rrows)
for band in bands2open], dtype=d_type)
# values = struct.unpack('%d%s' % ((rows * cols * len(bands2open)), format_dict[i_info.storage.lower()]),
# i_info.datasource.ReadRaster(yoff=i, xoff=j, xsize=cols, ysize=rows, band_list=bands2open))
if predictions:
return values.reshape(len(bands2open), rrows, ccols).transpose(1, 2, 0).reshape(rrows*ccols,
len(bands2open))
else:
if len(bands2open) == 1:
return values.reshape(rrows, ccols)
else:
return values.reshape(len(bands2open), rrows, ccols)
# only close the image if it was opened internally
# if isinstance(image2open, str):
# i_info.close()
else:
return _read_parallel(image2open, i_info, bands2open, i, j, rrows, ccols, n_jobs, d_type, predictions)
def build_vrt(file_list,
output_image,
cell_size=0.0,
return_datasource=False,
overwrite=False,
**kwargs):
"""
Build a VRT file
Args:
file_list (str list): A list of files.
output_image (str): The output image.
cell_size (Optional[float]): The output cell size. Default is 0.
return_datasource (Optional[bool]: Whether to return the raster datasource. Default is False.
overwrite (Optional[bool]): Whether to overwrite an existing VRT file. Default is False.
kwargs:
resolution=None,
outputBounds=None (minX, minY, maxX, maxY),
targetAlignedPixels=None,
separate=None,
bandList=None,
addAlpha=None,
resampleAlg=None,
outputSRS=None,
allowProjectionDifference=None,
srcNodata=None,
VRTNodata=None,
hideNodata=None,
callback=None,
callback_data=None
"""
if overwrite:
if os.path.isfile(output_image):
os.remove(output_image)
vrt_options = gdal.BuildVRTOptions(xRes=cell_size,
yRes=cell_size,
**kwargs)
out_ds = gdal.BuildVRT(output_image,
file_list,
options=vrt_options)
if return_datasource:
return out_ds
else:
out_ds = None
def _merge_dicts(dict1, dict2):
dict3 = dict1.copy()
dict3.update(dict2)
return dict3
def warp(input_image,
output_image,
out_epsg=None,
out_proj=None,
in_epsg=None,
in_proj=None,
resample='nearest',
cell_size=0,
d_type=None,
return_datasource=False,
overwrite=False,
**kwargs):
"""
Warp transforms a dataset
Args:
input_image (str): The image to warp.
output_image (str): The output image.
out_epsg (Optional[int]): The output EPSG projection code.
out_proj (Optional[str]): The output proj4 projection code.
in_epsg (Optional[int]): An input EPSG code. Default is None.
in_proj (Optional[str]): An input projection string. Default is None.
resample (Optional[str]): The resampling method. Default is 'nearest'.
cell_size (Optional[float]): The output cell size. Default is 0.
d_type (Optional[str]): Data type to overwrite `outputType`. Default is None.
return_datasource (Optional[bool]): Whether to return the datasource object. Default is False.
overwrite (Optional[bool]): Whether to overwrite `out_vrt`, if it exists. Default is False.
kwargs:
format=None, outputBounds=None (minX, minY, maxX, maxY),
outputBoundsSRS=None, targetAlignedPixels=False,
width=0, height=0, srcAlpha=False, dstAlpha=False, warpOptions=None,
errorThreshold=None, warpMemoryLimit=None,
creationOptions=None, outputType=0, workingType=0,
resampleAlg=resample_dict[resample], srcNodata=None, dstNodata=None,
multithread=False, tps=False, rpc=False, geoloc=False,
polynomialOrder=None, transformerOptions=None, cutlineDSName=None,
cutlineLayer=None, cutlineWhere=None, cutlineSQL=None,
cutlineBlend=None, cropToCutline=False, copyMetadata=True,
metadataConflictValue=None, setColorInterpretation=False,
callback=None, callback_data=None
E.g.,
creationOptions=['GDAL_CACHEMAX=256', 'TILED=YES']
Returns:
None, writes to `output_image'.
Examples:
>>> from mpglue import raster_tools
>>>
>>> # Resample a subset of an image in memory
>>> warp_info = raster_tools.warp('/input_image.tif',
>>> 'memory_image.mem',
>>> resample='nearest',
>>> cell_size=10.0,
>>> return_datasource=True,
>>> outputBounds=[<left, bottom, right, top>])
>>>
>>> # Load the resampled array
>>> resampled_array = warp_info.read()
"""
if output_image.endswith('.mem'):
while True:
output_image = '{:f}'.format(abs(np.random.randn(1)[0]))[-5:] + '.mem'
if not os.path.isfile(output_image):
break
else:
d_name, f_name = os.path.split(output_image)
if not d_name and not os.path.isabs(f_name):
d_name = os.path.abspath('.')
else:
check_and_create_dir(d_name)
if isinstance(out_epsg, int):
out_proj = 'EPSG:{:d}'.format(out_epsg)
if isinstance(in_epsg, int):
in_proj = 'EPSG:{:d}'.format(in_epsg)
if cell_size == 0:
cell_size = (None, None)
else:
cell_size = (cell_size, -cell_size)
if overwrite:
if os.path.isfile(output_image):
os.remove(output_image)
if isinstance(d_type, str):
awargs = _merge_dicts(dict(srcSRS=in_proj,
dstSRS=out_proj,
xRes=cell_size[0],
yRes=cell_size[1],
outputType=STORAGE_DICT_GDAL[d_type],
resampleAlg=RESAMPLE_DICT[resample]),
kwargs)
else:
awargs = _merge_dicts(dict(srcSRS=in_proj,
dstSRS=out_proj,
xRes=cell_size[0],
yRes=cell_size[1],
resampleAlg=RESAMPLE_DICT[resample]),
kwargs)
warp_options = gdal.WarpOptions(**awargs)
try:
out_ds = gdal.Warp(output_image,
input_image,
options=warp_options)
except:
if 'outputBounds' in awargs:
logger.info(' Input image extent:')
with ropen(input_image) as info:
logger.info(info.extent)
info = None
logger.info('')
logger.info(' Requested image extent (left, bottom, right, top):')
logger.info(awargs['outputBounds'])
logger.warning(' GDAL returned an exception--check the output file, {}.'.format(output_image))
out_ds = None
if return_datasource:
if out_ds is None:
return None
i_info = ImageInfo()
i_info.update_info(datasource=out_ds,
hdf_file=False,
output_image=output_image)
i_info.datasource_info()
out_ds = None
return i_info
else:
out_ds = None
if output_image.endswith('.mem'):
gdal.Unlink(output_image)
def translate(input_image,
output_image,
cell_size=0,
d_type=None,
return_datasource=False,
overwrite=False,
**kwargs):
"""
Args:
input_image (str): The image to translate.
output_image (str): The output image.
cell_size (Optional[float]): The output cell size. Default is 0.
d_type (Optional[str]): Data type to overwrite `outputType`. Default is None.
return_datasource (Optional[bool]): Whether to return the datasource object. Default is False.
overwrite (Optional[bool]): Whether to overwrite `out_vrt`, if it exists. Default is False.
kwargs:
format='GTiff', outputType=0, bandList=None, maskBand=None, width=0, height=0,
widthPct=0.0, heightPct=0.0, xRes=0.0, yRes=0.0, creationOptions=None, srcWin=None,
projWin=None [ulx, uly, lrx, lry], projWinSRS=None, strict=False, unscale=False,
scaleParams=None [[srcmin, srcmax, dstmin, dstmax]],
exponents=None, outputBounds=None, metadataOptions=None, outputSRS=None, GCPs=None,
noData=None, rgbExpand=None, stats=False, rat=True, resampleAlg=None,
callback=None, callback_data=None
Examples:
>>> from mpglue import raster_tools
>>>
>>> raster_tools.translate('input.tif', 'output.tif',
>>> cell_size=30.,
>>> format='GTiff', d_type='byte',
>>> creationOptions=['GDAL_CACHEMAX=256', 'TILED=YES'])
"""
d_name, f_name = os.path.split(output_image)
if not d_name and not os.path.isabs(f_name):
d_name = os.path.abspath('.')
else:
check_and_create_dir(d_name)
if overwrite:
if os.path.isfile(output_image):
os.remove(output_image)
if isinstance(d_type, str):
translate_options = gdal.TranslateOptions(xRes=cell_size,
yRes=cell_size,
outputType=STORAGE_DICT_GDAL[d_type],
**kwargs)
else:
translate_options = gdal.TranslateOptions(xRes=cell_size,
yRes=cell_size,
**kwargs)
try:
out_ds = gdal.Translate(output_image, input_image, options=translate_options)
except:
logger.warning(' GDAL returned an exception--check the output file, {}.'.format(output_image))
if return_datasource:
i_info = ImageInfo()
i_info.update_info(datasource=out_ds,
hdf_file=False)
i_info.datasource_info()
return i_info
else:
out_ds = None
def vis2rgb(image_array):
"""
Converts a layer x rows x columns array to RGB
"""
return image_array.transpose(1, 2, 0)
class create_raster(CreateDriver, FileManager, UpdateInfo):
"""
Creates a raster driver to write to.
Args:
out_name (str): Output raster name.
o_info (object): Instance of ``ropen``.
compress (Optional[str]): The type of compression to use. Default is 'deflate'.
Choices are ['none' 'lzw', 'packbits', 'deflate'].
bigtiff (Optional[str]): How to manage large TIFF files. Default is 'no'.
Choices are ['yes', 'no', 'if_needed', 'if_safer'].
tile (Optional[bool]): Whether to tile the new image. Default is True.
project_epsg (Optional[int]): Project the new raster to an EPSG code projection.
create_tiles (Optional[str]): If positive, image is created in separate file tiles. Default is 0.
overwrite (Optional[str]): Whether to overwrite an existing file. Default is False.
in_memory (Optional[str]): Whether to create the raster dataset in memory. Default is False.
Attributes:
filename (str)
rows (int)
cols (int)
bands (int)
storage (str)
Returns:
Raster driver GDAL object or list of GDAL objects (if create_tiles > 0).
"""
def __init__(self,
out_name,
o_info,
compress='deflate',
tile=True,
bigtiff='no',
project_epsg=None,
create_tiles=0,
overwrite=False,
in_memory=False,
**kwargs):
if not in_memory:
d_name, f_name = os.path.split(out_name)
f_base, f_ext = os.path.splitext(f_name)
if not d_name and not os.path.isabs(f_name):
d_name = os.path.abspath('.')
else:
check_and_create_dir(d_name)
storage_type = STORAGE_DICT_GDAL[o_info.storage.lower()] if 'storage' not in kwargs \
else STORAGE_DICT_GDAL[kwargs['storage'].lower()]
out_rows = o_info.rows if 'rows' not in kwargs else kwargs['rows']
out_cols = o_info.cols if 'cols' not in kwargs else kwargs['cols']
n_bands = o_info.bands if 'bands' not in kwargs else kwargs['bands']
projection = o_info.projection if 'projection' not in kwargs else kwargs['projection']
cellY = o_info.cellY if 'cellY' not in kwargs else kwargs['cellY']
cellX = o_info.cellX if 'cellX' not in kwargs else kwargs['cellX']
left = o_info.left if 'left' not in kwargs else kwargs['left']
top = o_info.top if 'top' not in kwargs else kwargs['top']
if tile:
tile = 'YES'
else:
tile = 'NO'
if abs(cellY) == 0:
raise ValueError('The cell y size must be greater than 0.')
if abs(cellX) == 0:
raise ValueError('The cell x size must be greater than 0.')
if cellX > 0:
cellX *= -1.
if cellY < 0:
cellY *= -1.
if out_name.lower().endswith('.img'):
if compress.upper() == 'NONE':
parameters = ['COMPRESS=NO']
else:
parameters = ['COMPRESS=YES']
elif out_name.lower().endswith('.tif'):
if compress.upper() == 'NONE':
parameters = ['TILED={}'.format(tile),
'BIGTIFF={}'.format(bigtiff.upper())]
else:
parameters = ['TILED={}'.format(tile),
'COMPRESS={}'.format(compress.upper()),
'BIGTIFF={}'.format(bigtiff.upper())]
elif (out_name.lower().endswith('.dat')) or (out_name.lower().endswith('.bin')):
parameters = ['INTERLEAVE=BSQ']
elif out_name.lower().endswith('.kea'):
parameters = ['DEFLATE=1']
else:
parameters = list()
if isinstance(project_epsg, int):
osng = osr.SpatialReference()
osng.ImportFromWkt(o_info.projection)
srs = osr.SpatialReference()
srs.ImportFromEPSG(project_epsg)
new_projection = srs.ExportToWkt()
tx = osr.CoordinateTransformation(osng, srs)
# Work out the boundaries of the new dataset in the target projection
ulx, uly, ulz = tx.TransformPoint(o_info.left, o_info.top)
lrx, lry, lrz = tx.TransformPoint(o_info.left + o_info.cellY*o_info.cols,
o_info.top + o_info.cellX*o_info.rows)
# project_rows = int((uly - lry) / o_info.cellY)
# project_cols = int((lrx - ulx) / o_info.cellY)
# Calculate the new geotransform
new_geo = [ulx, o_info.cellY, o_info.rotation1, uly, o_info.rotation2, o_info.cellX]
# out_rows = int((uly - lry) / o_info.cellY)
# out_cols = int((lrx - ulx) / o_info.cellY)
# Create driver for output image.
if create_tiles > 0:
d_name_tiles = os.path.join(d_name, '{}_tiles'.format(f_base))
if not os.path.isdir(d_name_tiles):
os.makedirs(d_name_tiles)
out_rst = {}
if out_rows >= create_tiles:
blk_size_rows = create_tiles
else:
blk_size_rows = copy.copy(out_rows)
if out_cols >= create_tiles:
blk_size_cols = create_tiles
else:
blk_size_cols = copy.copy(out_cols)
topo = copy.copy(top)
image_counter = 1
for i in range(0, out_rows, blk_size_rows):
lefto = copy.copy(left)
out_rows = n_rows_cols(i, blk_size_rows, out_rows)
for j in range(0, out_cols, blk_size_cols):
out_cols = n_rows_cols(j, blk_size_cols, out_cols)
out_name = os.path.join(d_name_tiles, '{}_{:d}_{:d}{}'.format(f_base, i, j, f_ext))
out_rst[image_counter] = out_name
image_counter += 1
if overwrite:
if os.path.isfile(out_name):
try:
os.remove(out_name)
except OSError:
raise OSError('\nCould not delete {}.'.format(out_name))
else:
if os.path.isfile(out_name):
logger.warning('\n{} already exists.'.format(out_name))
continue
CreateDriver.__init__(self,
out_name,
out_rows,
out_cols,
n_bands,
storage_type,
in_memory,
overwrite,
parameters)
# FileManager.__init__(self)
# out_rst_ = self.driver.Create(out_name, out_cols, out_rows, bands, storage_type, parameters)
# set the geo-transformation
self.datasource.SetGeoTransform([lefto, cellY, 0.0, topo, 0.0, cellX])
# set the projection
self.datasource.SetProjection(projection)
self.close_file()
lefto += (out_cols * cellY)
topo -= (out_rows * cellY)
else:
if not in_memory:
if overwrite:
if os.path.isfile(out_name):
try:
os.remove(out_name)
except:
logger.warning(' Could not delete {}.\nWill attempt to write over the image'.format(out_name))
else:
if os.path.isfile(out_name):
logger.warning(' {} already exists.\nWill not attempt to overwrite.'.format(out_name))
return
CreateDriver.__init__(self,
out_name,
out_rows,
out_cols,
n_bands,
storage_type,
in_memory,
overwrite,
parameters)
# FileManager.__init__(self)
# self.datasource = self.driver.Create(out_name, out_cols, out_rows, bands, storage_type, parameters)
if isinstance(project_epsg, int):
# set the geo-transformation
self.datasource.SetGeoTransform(new_geo)
# set the projection
self.datasource.SetProjection(new_projection)
# gdal.ReprojectImage(o_info.datasource, out_rst, o_info.proj, new_projection, GRA_NearestNeighbour)
else:
# Set the geo-transformation.
self.datasource.SetGeoTransform([left, cellY, 0., top, 0., cellX])
# Set the projection.
self.datasource.SetProjection(projection)
self.filename = out_name
self.rows = out_rows
self.cols = out_cols
self.bands = n_bands
self.storage = storage_type
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
self.close_all()
# @deprecation.deprecated(deprecated_in='0.1.3',
# removed_in='0.1.5',
# current_version=__version__,
# details='Variables `x` and `y` will be replaced with `j` and `i`, respectively.')
def write2raster(out_array,
out_name,
o_info=None,
x=0,
y=0,
out_rst=None,
write2bands=None,
close_band=True,
flush_final=False,
write_chunks=False,
**kwargs):
"""
Writes an ndarray to file.
Args:
out_array (ndarray): The array to write to file.
out_name (str): The output image name.
o_info (Optional[object]): Output image information. Needed if ``out_rst`` not given. Default is None.
x (Optional[int]): Column starting position. Default is 0.
y (Optional[int]): Row starting position. Default is 0.
out_rst (Optional[object]): GDAL object to write to, otherwise created. Default is None.
write2bands (Optional[int or int list]): Band positions to write to, otherwise takes the order of the input
array dimensions. Default is None.
close_band (Optional[bool]): Whether to flush the band cache. Default is True.
flush_final (Optional[bool]): Whether to flush the raster cache. Default is False.
write_chunks (Optional[bool]): Whether to write to file in <write_chunks> chunks. Default is False.
kwargs (Optional[dict]): Arguments passed to `create_raster`.
Returns:
None, writes <out_name>.
Examples:
>>> # Example
>>> from mpglue import raster_tools
>>> i_info = raster_tools.ropen('/in_raster.tif')
>>>
>>> out_array = np.random.randn(3, 100, 100).astype(np.float32)
>>>
>>> raster_tools.write2raster(out_array,
>>> '/out_name.tif',
>>> o_info=i_info.copy())
"""
# Get the output information.
d_name, f_name = os.path.split(out_name)
if not d_name and not os.path.isabs(f_name):
d_name = os.path.abspath('.')
else:
check_and_create_dir(d_name)
array_shape = out_array.shape
if len(array_shape) > 3:
logger.error('The array shape should be 2d or 3d.')
raise ArrayShapeError
if len(array_shape) == 2:
out_rows, out_cols = out_array.shape
out_dims = 1
else:
out_dims, out_rows, out_cols = out_array.shape
new_file = False
# Does the file need to be created?
if not out_rst:
if not isinstance(o_info, ropen):
if not isinstance(o_info, ImageInfo):
logger.error('The output information must be set.')
raise ropenError
new_file = True
o_info.update_info(bands=out_dims,
rows=out_rows,
cols=out_cols)
if kwargs:
out_rst = create_raster(out_name, o_info, **kwargs)
else:
out_rst = create_raster(out_name, o_info)
##########################
# pack the data to binary
##########################
# format_dict = {'byte': 'B', 'int16': 'i', 'uint16': 'I', 'float32': 'f', 'float64': 'd'}
# specifiy a band to write to
if isinstance(write2bands, int) or isinstance(write2bands, list):
if isinstance(write2bands, int):
write2bands = [write2bands]
for n_band in write2bands:
out_rst.get_band(n_band)
if write_chunks:
out_rst.get_chunk_size()
for i in range(0, out_rst.rows, out_rst.chunk_size):
n_rows = n_rows_cols(i, out_rst.chunk_size, out_rst.rows)
for j in range(0, out_rst.cols, out_rst.chunk_size):
n_cols = n_rows_cols(j, out_rst.chunk_size, out_rst.cols)
out_rst.write_array(out_array[i:i+n_rows, j:j+n_cols], i=i, j=j)
else:
out_rst.write_array(out_array, i=y, j=x)
if close_band:
out_rst.close_band()
else:
if out_dims >= 2:
for n_band in range(1, out_dims+1):
out_rst.write_array(out_array[n_band-1], i=y, j=x, band=n_band)
if close_band:
out_rst.close_band()
else:
out_rst.write_array(out_array, i=y, j=x, band=1)
if close_band:
out_rst.close_band()
# close the dataset if it was created or prompted by <flush_final>
if flush_final or new_file:
out_rst.close_file()
out_rst = None
class GetMinExtent(UpdateInfo):
"""
Args:
info1 (ropen or GetMinExtent object)
info2 (ropen or GetMinExtent object)
Attributes:
Inherits from ``info1``.
"""
def __init__(self, info1, info2):
if not isinstance(info1, ropen):
if not isinstance(info1, GetMinExtent):
if not isinstance(info1, ImageInfo):
raise TypeError('The first info argument must be an instance of ropen, GetMinExtent, or ImageInfo.')
if not isinstance(info2, ropen):
if not isinstance(info2, GetMinExtent):
if not isinstance(info2, ImageInfo):
if not isinstance(info2, vector_tools.vopen):
raise TypeError('The second info argument must be an instance of ropen, vopen, GetMinExtent, or ImageInfo.')
# Pass the image info properties.
attributes = inspect.getmembers(info1, lambda ia: not (inspect.isroutine(ia)))
attributes = [ia for ia in attributes if not (ia[0].startswith('__') and ia[0].endswith('__'))]
for attribute in attributes:
setattr(self, attribute[0], attribute[1])
self.get_overlap_info(info2)
def copy(self):
return copy.copy(self)
def close(self):
pass
def get_overlap_info(self, info2):
self.left = np.maximum(self.left, info2.left)
self.right = np.minimum(self.right, info2.right)
self.top = np.minimum(self.top, info2.top)
self.bottom = np.maximum(self.bottom, info2.bottom)
if (self.left < 0) and (self.right < 0) or (self.left >= 0) and (self.right >= 0):
self.cols = int(abs(abs(self.right) - abs(self.left)) / self.cellY)
elif (self.left < 0) and (self.right >= 0):
self.cols = int(abs(abs(self.right) + abs(self.left)) / self.cellY)
if (self.top < 0) and (self.bottom < 0) or (self.top >= 0) and (self.bottom >= 0):
self.rows = int(abs(abs(self.top) - abs(self.bottom)) / self.cellY)
elif (self.top >= 0) and (self.bottom < 0):
self.rows = int(abs(abs(self.top) + abs(self.bottom)) / self.cellY)
# Rounded dimensions for aligning pixels.
left_max = np.minimum(self.left, info2.left)
top_max = np.maximum(self.top, info2.top)
if (left_max < 0) and (self.left < 0):
n_col_pixels = int((abs(left_max) - abs(self.left)) / self.cellY)
self.left_rounded = left_max + (n_col_pixels * self.cellY)
elif (left_max >= 0) and (self.left >= 0):
n_col_pixels = int((abs(left_max) - abs(self.left)) / self.cellY)
self.left_rounded = left_max + (n_col_pixels * self.cellY)
elif (left_max < 0) and (self.left >= 0):
n_col_pixels1 = int(abs(left_max) / self.cellY)
n_col_pixels2 = int(abs(self.left) / self.cellY)
self.left_rounded = left_max + (n_col_pixels1 * self.cellY) + (n_col_pixels2 * self.cellY)
if (top_max >= 0) and (self.top >= 0):
n_row_pixels = int((abs(top_max) - abs(self.top)) / self.cellY)
self.top_rounded = top_max - (n_row_pixels * self.cellY)
elif (top_max < 0) and (self.top < 0):
n_row_pixels = int((abs(top_max) - abs(self.top)) / self.cellY)
self.top_rounded = top_max - (n_row_pixels * self.cellY)
elif (top_max >= 0) and (self.top < 0):
n_row_pixels1 = int(abs(top_max) / self.cellY)
n_row_pixels2 = int(abs(self.top) / self.cellY)
self.top_rounded = top_max - (n_row_pixels1 * self.cellY) - (n_row_pixels2 * self.cellY)
if (self.left_rounded < 0) and (self.right < 0):
n_col_pixels_r = int((abs(self.left_rounded) - abs(self.right)) / self.cellY)
self.right_rounded = self.left_rounded + (n_col_pixels_r * self.cellY)
elif (self.left_rounded >= 0) and (self.right >= 0):
n_col_pixels_r = int((abs(self.left_rounded) - abs(self.right)) / self.cellY)
self.right_rounded = self.left_rounded + (n_col_pixels_r * self.cellY)
elif (self.left_rounded < 0) and (self.right >= 0):
n_col_pixels_r1 = int(abs(self.left_rounded) / self.cellY)
n_col_pixels_r2 = int(abs(self.right) / self.cellY)
self.right_rounded = self.left_rounded + (n_col_pixels_r1 * self.cellY) + (n_col_pixels_r2 * self.cellY)
if (self.top_rounded < 0) and (self.bottom < 0):
n_row_pixels_r = int((abs(self.top_rounded) - abs(self.bottom)) / self.cellY)
self.bottom_rounded = self.top_rounded - (n_row_pixels_r * self.cellY)
elif (self.top_rounded >= 0) and (self.bottom >= 0):
n_row_pixels_r = int((abs(self.top_rounded) - abs(self.bottom)) / self.cellY)
self.bottom_rounded = self.top_rounded - (n_row_pixels_r * self.cellY)
elif (self.top_rounded >= 0) and (self.bottom < 0):
n_row_pixels_r1 = int(abs(self.top_rounded) / self.cellY)
n_row_pixels_r2 = int(abs(self.bottom) / self.cellY)
self.bottom_rounded = self.top_rounded - (n_row_pixels_r1 * self.cellY) + (n_row_pixels_r2 * self.cellY)
def get_min_extent(image1, image2):
"""
Finds the minimum extent of two rasters
Args:
image1 (dict or object): The first image. If a ``dict``, {left: <left>, right: <right>,
top: <top>, bottom: <bottom>}.
image2 (dict or object): The second image. If a ``dict``, {left: <left>, right: <right>,
top: <top>, bottom: <bottom>}.
Returns:
List as [left, right, top, bottom].
"""
if isinstance(image1, ropen):
left1 = image1.left
top1 = image1.top
right1 = image1.right
bottom1 = image1.bottom
else:
left1 = image1['left']
top1 = image1['top']
right1 = image1['right']
bottom1 = image1['bottom']
if isinstance(image2, ropen):
left2 = image2.left
top2 = image2.top
right2 = image2.right
bottom2 = image2.bottom
else:
left2 = image2['left']
top2 = image2['top']
right2 = image2['right']
bottom2 = image2['bottom']
left = np.maximum(left1, left2)
right = np.minimum(right1, right2)
top = np.minimum(top1, top2)
bottom = np.maximum(bottom1, bottom2)
return left, right, top, bottom
def get_min_extent_list(image_list):
lefto = image_list[0].left
righto = image_list[0].right
topo = image_list[0].top
bottomo = image_list[0].bottom
cell_size = image_list[0].cellY
for img in image_list[1:]:
lefto, righto, topo, bottomo = \
get_min_extent(dict(left=lefto, right=righto, top=topo, bottom=bottomo),
dict(left=img.left, right=img.right, top=img.top, bottom=img.bottom))
# Check for East/West, positive/negative dividing line.
if (righto >= 0) and (lefto <= 0):
cs = int((abs(lefto) + righto) / cell_size)
else:
cs = int(abs(abs(righto) - abs(lefto)) / cell_size)
if (topo >= 0) and (bottomo <= 0):
rs = int((abs(bottomo) + topo) / cell_size)
else:
rs = int(abs(abs(topo) - abs(bottomo)) / cell_size)
return [lefto, topo, righto, bottomo, -cell_size, cell_size, rs, cs]
def get_new_dimensions(image_info, kernel_size):
"""
Gets new [output] image dimensions based on kernel size used in processing.
Args:
image_info (object)
kernel_size (int)
Returns:
``new rows``, ``new columns``, ``new cell size y``, ``new cell size x``
"""
image_info.rows = int(np.ceil(float(image_info.rows) / float(kernel_size)))
image_info.cols = int(np.ceil(float(image_info.cols) / float(kernel_size)))
image_info.cellY = float(kernel_size) * float(image_info.cellY)
image_info.cellX = float(kernel_size) * float(image_info.cellX)
return image_info
def n_rows_cols(pixel_index, block_size, rows_cols):
"""
Adjusts block size for the end of image rows and columns.
Args:
pixel_index (int): The current pixel row or column index.
block_size (int): The image block size.
rows_cols (int): The total number of rows or columns in the image.
Example:
>>> n_rows = 5000
>>> block_size = 1024
>>> i = 4050
>>> adjusted_block_size = n_rows_cols(i, block_size, n_rows)
Returns:
Adjusted block size as int.
"""
return block_size if (pixel_index + block_size) < rows_cols else rows_cols - pixel_index
def n_i_j(pixel_index, offset):
"""
Args:
pixel_index (int): Current pixel index.
block_size (int): Block size to use.
Returns:
int
"""
if pixel_index - offset < 0:
samp_out = 0
else:
samp_out = pixel_index - offset
return samp_out
def block_dimensions(image_rows, image_cols, row_block_size=1024, col_block_size=1024):
"""
Args:
image_rows (int): The number of image rows.
image_cols (int): The number of image columns.
row_block_size (Optional[int]): Default is 1024.
col_block_size (Optional[int]): Default is 1024.
Returns:
Row dimensions, Column dimensions
"""
# set the block dimensions
if image_rows >= row_block_size:
row_blocks = row_block_size
else:
row_blocks = copy.copy(image_rows)
if image_cols >= col_block_size:
col_blocks = col_block_size
else:
col_blocks = copy.copy(image_cols)
return row_blocks, col_blocks
def stats_func(im,
ignore_value=None,
stat=None,
stats_functions=None,
set_below=None,
set_above=None,
set_common=None,
no_data_value=None):
im = im[0][:]
if isinstance(ignore_value, int):
stat = 'nan{}'.format(stat)
im[im == ignore_value] = np.nan
if stat in stats_functions:
out_array = stats_functions[stat](im, axis=0)
elif stat == 'nancv':
out_array = stats_functions['nanstd'](im, axis=0)
out_array /= stats_functions['nanmean'](im, axis=0)
elif stat == 'nanmode':
out_array = sci_mode(im, axis=0, nan_policy='omit')
elif stat == 'cv':
out_array = im.std(axis=0)
out_array /= im.mean(axis=0)
elif stat == 'min':
out_array = im.min(axis=0)
elif stat == 'max':
out_array = im.max(axis=0)
elif stat == 'mean':
out_array = im.mean(axis=0)
elif stat == 'var':
out_array = im.var(axis=0)
elif stat == 'std':
out_array = im.std(axis=0)
elif stat == 'sum':
out_array = im.sum(axis=0)
elif stat == 'zscore':
dims, rows, cols = im.shape
scaler = StandardScaler(with_mean=True, with_std=True)
out_array = columns_to_nd(scaler.fit_transform(nd_to_columns(im, dims, rows, cols)), dims, rows, cols)
# Filter values.
if isinstance(set_below, int):
out_array[out_array < set_below] = no_data_value
if isinstance(set_above, int):
if set_common:
# Mask unwanted to 1 above threshold.
out_array[out_array > set_above] = set_above + 1
# Invert the array values.
__, out_array = cv2.threshold(np.uint8(out_array), 0, 1, cv2.THRESH_BINARY_INV)
# Add the common value among all bands.
out_array *= np.uint8(im[0])
else:
out_array[out_array > set_above] = no_data_value
# Reset no data pixels
out_array[np.isnan(out_array) | | np.isinf(out_array) | numpy.isinf |
# -*- coding: utf-8 -*-
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from decorator import decorator
# Moving Average
def MA(ds, n):
MA = pd.Series(ds.rolling(n).mean(), name = 'MA_' + str(n))
return MA
# difference between short MA and long MA
def diffMA(ds, l=60, s=5):
"""
ds: dataset is pandas data series
"""
ma_l = ds.rolling(l, min_periods=l).mean()
ma_s = ds.rolling(s, min_periods=s).mean()
return (ma_s/ma_l)-1
# Linear Regression
import statsmodels.formula.api as smf
def liner_regression(x,y):
model = smf.OLS(y,x)
results = model.fit()
b = results.params
R = results.rsquared
pvalue = results.pvalues
t='Y=%0.4fX --- R2=%0.2f%% --- p-value=%0.4f' %(b[0], R*100, pvalue[0])
return b,t
# slope of MA
def slopeMA(ds, m=60, dw=5):
ma = ds.rolling(m, min_periods=1).mean()
slope = ma.copy()
x = np.arange(1,dw+1)/100.0
for t in range(dw,len(slope)):
y = ma[t-dw+1:t+1] / ma[t-dw+1:t+1].mean() - 1
slope[t], _ = liner_regression(x,y)
return slope
# garch
def addGARCH(ds, hln=200):
ts = 100*ds.to_returns().dropna()
hts = ts[:hln].values
var = []
# rolling estimate var
while (len(hts)<len(ts)):
f_var, _ = forecast_var_from_garch(hts[-hln:])
var.append(f_var)
hts = np.append(hts, ts.iloc[len(hts)])
print(max(var), min(var))
var = np.append(np.zeros([len(ds)-len(var),1]), var)
return var
# historical var
def addVAR(ds, hln=200):
ts = 100*ds.to_returns().dropna()
hts = ts[:hln].values
var = []
# rolling estimate var
while (len(hts)<len(ts)):
f_var, _ = forecast_var_from_constant_mean(hts[-hln:])
var.append(f_var)
hts = np.append(hts, ts.iloc[len(hts)])
#print(max(var), min(var))
var = np.append(np.zeros([len(ds)-len(var),1]), var)
return var
# historical cov
def addCOV(ds1, ds2, hln=200):
ts1 = ds1.to_returns().dropna().values
ts2 = ds2.to_returns().dropna().values
cov = []
#cov.append(np.nan) # add 1 when dropna at prices->returns
for t in range(hln):
cov.append(np.nan)
for t in range(hln, len(ts1)+1):
f_cov = np.cov(ts1[t-hln:t], ts2[t-hln:t])
cov.append(f_cov[0][1]*10000)
return cov
# Seek Best Garch Model
import statsmodels.tsa.api as smt
def seek_garch_model(TS):
"""
TS is returns of a price-series
numpy array or array
# Seek Best GARCH Model
res_tup = seek_garch_model(ts)
order = res_tup[1]
p_ = order[0]
o_ = order[1]
q_ = order[2]
# Using student T distribution usually provides better fit
am = arch_model(ts, p=p_, o=o_, q=q_, dist='StudentsT')
res = am.fit(update_freq=5, disp='off')
fig = res.plot(annualize='D')
print(res.summary())
ts_plot(res.resid, lags=30)
"""
best_aic = np.inf
best_order = None
best_mdl = None
pq_rng = range(5) # [0,1,2,3,4]
d_rng = range(2) # [0,1]
for i in pq_rng:
for d in d_rng:
for j in pq_rng:
try:
tmp_mdl = smt.ARIMA(TS, order=(i,d,j)).fit(
method='mle', trend='nc'
)
tmp_aic = tmp_mdl.aic
if tmp_aic < best_aic:
best_aic = tmp_aic
best_order = (i, d, j)
best_mdl = tmp_mdl
except: continue
print('aic: {:6.5f} | order: {}'.format(best_aic, best_order))
return best_aic, best_order, best_mdl
#under arch model scheme
@decorator
def forecast_var(model_est_var, *args, **kwargs):
"""
Use historical data (0 to t) to forecast variance at t+1
via the model (defined in arch)
Args:
* args[0]: returns (numpy array or array): Returns for security.
Returns:
forecast variance: float
residuals: array
"""
if len(args)<1:
raise Exception("Not Enough Parameters")
m = model_est_var(*args, **kwargs)
res = m.fit(update_freq=5, disp='off')
return res.forecast().variance.values[-1][0], res.resid
from arch.univariate import ConstantMean
@forecast_var
def forecast_var_from_constant_mean(returns):
"""
returns is historical returns
"""
return ConstantMean(returns)
from arch import arch_model
@forecast_var
def forecast_var_from_garch(returns):
"""
returns is historical returns
"""
return arch_model(returns, vol='Garch', p=1, o=0, q=1, dist='Normal')
@forecast_var
def forecast_var_from_best(returns):
"""
returns is historical returns
"""
from pyetf.algos import seek_garch_model
from arch import arch_model
res_tup = seek_garch_model(returns)
order = res_tup[1]
p_ = order[0]
o_ = order[1]
q_ = order[2]
return arch_model(returns, p=p_, o=o_, q=q_, dist='StudentsT')
# future mean and var
def future_mean_var(p, negative=False):
"""
p is numpy and prices series in future m dates
negative is True: calculate if p(t) < p(0)
negative is False: calculate all p(t)
"""
m = len(p)
dr = []
if negative:
for d in range(1,m):
if p[d]<p[0]:
dr.append((p[d]/p[0])**(1/d)-1)
if len(dr) == 0:
dr.append(0.)
else:
for d in range(1,m):
dr.append((p[d]/p[0])**(1/d)-1)
mean = np.mean(dr)
var = np.var(dr)
return mean, var
# future mean and var
def future_covar(p1, p2=None):
"""
p1 and p2 are numpy and prices series in future fm(30) dates
+ historical hm(200-fm) dates
p1 = p2: calculate var
"""
r1 = np.diff(p1)/p1[0:len(p1)-1]
if p2 is None:
return np.var(r1)
else:
r2 = np.diff(p2)/p1[0:len(p2)-1]
return np.cov(r1, r2)
# under keras model scheme
def strucutre_keras_model(train_model, addFeatures, addTarget, prices, prices_two=None, model_path="\\keras_model\\"):
"""
* prices: pandas series (or dataframe) with date index and prices
* function will save model estimated by keras
to a h5 file named 'est_var(_ticker_).h5'
* load model
from keras.models import load_model
model_load = load_model('est_var(_ticker_).h5')
"""
# 1. Data Process
if prices_two is None:
# 1.1 initial data
dataset, model_filename = initData(prices, model_path)
# 1.2 process data
x_dataset, y_dataset = processData(addFeatures, addTarget, dataset)
else:
dataset, model_filename = initData_two(prices, prices_two, model_path)
x_dataset, y_dataset = processData_two(addFeatures, addTarget, dataset)
# 1.3 split train set and test set
x_train, y_train, x_test, y_test = splitDataset(x_dataset, y_dataset)
# 1.4 shuttle train set
x_train, y_train = shuffleDataset(x_train, y_train)
# 2. Build Model
# 2.1 setup model
# 2.2 train model
model = train_model(x_train, y_train)
# 2.3 save model
model.save(model_filename)
# 3 evaluation
trainScore = model.evaluate(x_train, y_train)
testScore = model.evaluate(x_test, y_test)
print(f"Train Score Loss: {trainScore[0]:0.4f}")
print(f"Test Score Loss: {testScore[0]:0.4f}")
# 4. Plot Results
plt.figure(figsize=(10, 8))
#plt.plot(y_dataset)
#plt.plot(y_predict)
plt.plot(y_test)
plt.plot(model.predict(x_test))
plt.show()
from keras.models import load_model
def load_keras_model(prices, model_path="\\keras_model\\"):
# 1. Data Process
# 1.1 initial data
dataset, model_filename = initData(prices, model_path)
model = load_model(model_filename)
return dataset, model
# stucture X and Y from dataset
def buildXY(dataset, pastDays=30):
"""
Result -> numpy
"""
m = pastDays
x_dataset = dataset.drop(columns='y').values
y_dataset = dataset['y'].values
dataX, dataY = [], []
for t in range(0, len(dataset)-m+1):
dataX.append(x_dataset[t:(t+m)])
dataY.append(y_dataset[t+m-1])
return np.array(dataX), np.array(dataY)
# stucture X from dataset to forecast
def buildX(dataset, pastDays=30):
"""
Result -> numpy
"""
m = pastDays
x_dataset = dataset.values
dataX = []
for t in range(0, len(dataset)-m+1):
dataX.append(x_dataset[t:(t+m)])
return np.array(dataX)
# normalize dataset
from sklearn.preprocessing import MinMaxScaler
def normalise_windows(window_data):
scaler = MinMaxScaler(feature_range=(0, 1))
normalised_data = []
for window in window_data:
normalised_window = scaler.fit_transform(window)
normalised_data.append(normalised_window)
return normalised_data
# split dataset to train and test
def splitDataset(x_dataset, y_dataset, train_size_ratio=0.6):
train_size = int(len(x_dataset) * train_size_ratio)
x_train, x_test = x_dataset[0:train_size], x_dataset[train_size:len(x_dataset)]
y_train, y_test = y_dataset[0:train_size], y_dataset[train_size:len(y_dataset)]
return np.array(x_train), np.array(y_train), np.array(x_test), np.array(y_test)
# random train dataset
def shuffleDataset(x, y):
np.random.seed(10)
randomList = np.arange(x.shape[0])
np.random.shuffle(randomList)
return x[randomList], y[randomList]
# initial Data and model name
def initData(prices, model_path, model_name='est_var'):
if isinstance(prices, pd.core.series.Series):
e = prices.name
dataset = pd.DataFrame(prices)
else:
e = prices.columns[0]
dataset = prices.copy()
print(f"{e}")
dataset = dataset.rename({e:'price'}, axis=1)
model_path = os.getcwd() + model_path
model_filename = model_path + model_name + '(' + e + ').h5'
return dataset, model_filename
# initial Data and model name
def initData_two(prices_one, prices_two, model_path, model_name='est_cov'):
if isinstance(prices_one, pd.core.series.Series):
e1 = prices_one.name
dataset = pd.DataFrame(prices_one)
else:
e1 = prices_one.columns[0]
dataset = prices_one.copy()
dataset = dataset.rename({e1:'price_one'}, axis=1)
if isinstance(prices_two, pd.core.series.Series):
e2 = prices_two.name
dataset[e2] = pd.DataFrame(prices_two)
else:
e2 = prices_two.columns[0]
dataset[e2] = prices_two.columns[0]
dataset = dataset.rename({e2:'price_two'}, axis=1)
print(f"{e1} {e2}")
model_path = os.getcwd() + model_path
model_filename = model_path + model_name + '(' + e1+'_'+e2 + ').h5'
return dataset, model_filename
# process data: add features and add Y
def processData(addFeatures, addTarget, dataset):
# 1.2 add features to X
dataset = addFeatures(dataset)
# 1.3 add targets to Y
dataset = addTarget(dataset)
# 1.4 structure train and test data
dataset = dataset.drop(columns='price')
x_dataset, y_dataset = buildXY(dataset)
# 1.5 normalization
#x_dataset = normalise_windows(x_dataset)
return x_dataset, y_dataset
# process data: add features and add Y
def processData_two(addFeatures, addTarget, dataset, pastDays=30):
# 1.2 add features to X
dataset = addFeatures(dataset)
# 1.3 add targets to Y
dataset = addTarget(dataset)
# 1.4 structure train and test data
dataset = dataset.dropna()
dataset = dataset.drop(columns='price_one')
dataset = dataset.drop(columns='price_two')
#print(dataset.head())
#print(dataset.tail())
x_dataset, y_dataset = buildXY(dataset, pastDays)
# 1.5 normalization
#x_dataset = normalise_windows(x_dataset)
return x_dataset, y_dataset
# lstm var
from time import process_time
def forecast_var_from_lstm(addFeatures, prices, model_path="\\keras_model\\"):
"""
Prices is one asset's price data, in either DataFrame or Pandas Series
"""
# Initializing Data and Load Model
start_time = process_time()
dataset, model = load_keras_model(prices)
print(f"load data and model: {process_time()-start_time:0.4f}s")
start_time = process_time()
dataset = addFeatures(dataset)
x_dataset = dataset.drop(columns='price')
x_dataset = buildX(x_dataset)
print(f"process dataset: {process_time()-start_time:0.4f}s")
start_time = process_time()
f_var = np.append(np.zeros([len(prices)-len(x_dataset),1]), model.predict( | np.array(x_dataset) | numpy.array |
#TODO create a errorplot class
import pyaudio
import numpy as np
import matplotlib.pyplot as plt
import time
import queue
import keyboard
import enum
from scipy.io.wavfile import write
import os
#class used to set the direction of shift or amplitude
class Direction(enum.Enum):
Right = 1.0
Left = -1.0
#class to cancel the frequencies
class AntiSoundController:
def __init__(self,sample_size,sampling_rate,amount_frequencies_to_cancel):
#list of frequencies to cancel
self.sampling_rate = sampling_rate
self.sample_size = sample_size
self.amount_frequencies_to_cancel = amount_frequencies_to_cancel
#filled after init
self.frequencies_to_cancel = None
#variable to declare init
self.init_done = False
#queue with data of pyaudio
self.input_queue = queue.Queue()
#queue with data for pyaudio
self.output_queue = queue.Queue()
#time when to change the amplitude
self.time_till_change_amplitude = 0
#sample of one second of pyaudio
self.microphone_samples_one_second = np.array([])
#costcalculator to determine the energy of the frequencies which is cost
self.costcalculator = CostCalculator(sampling_rate)
#list of sinus generators, shift correcters and amplitude correcters one per frequency to cancel
self.sinus_generators = [SinusGenerator(sample_size,sampling_rate) for i in range(self.amount_frequencies_to_cancel)]
self.amplitude_correcters = [Correcter(0.005,sampling_rate,self.costcalculator) for i in range(self.amount_frequencies_to_cancel)]
#pyaudio object to deal with pyaudio
self.pyaudio_object = pyaudio.PyAudio()
#stream from pyaudio to interact with speakers and microphone
self.stream = self.pyaudio_object.open(channels=1,
frames_per_buffer= sample_size,
rate=sampling_rate,
format=pyaudio.paFloat32,
output=True,
input=True,
output_device_index=3,
input_device_index=1,
stream_callback=self.callback)
#callback function used by pyaudio to get new data
def callback(self, in_data, frame_count, time_info, status):
#data to input queue
self.input_queue.put(in_data)
#data from output queue to pyaudio
output = self.output_queue.get()
return (output, pyaudio.paContinue)
def append_numpy_to_file(self,one_second):
with open('test.npy', 'ab') as f:
np.save(f, one_second)
def write_numpy_to_wav(self):
with open('test.npy', 'rb') as f:
fsz = os.fstat(f.fileno()).st_size
out = np.load(f)
while f.tell() < fsz:
out = np.concatenate([out,np.load(f)])
write("recorded_18-05-2021.wav",self.sampling_rate, out.astype(np.float32))
#function to start the stream
def start_stream(self):
self.stream.start_stream()
#init function to get the main frequencies from the sound
def get_main_frequencies(self):
if not self.input_queue.empty():
#get the inputdata as float 32
microphone_wave = self.input_queue.get()
microphone_wave = np.frombuffer(microphone_wave, dtype=np.float32)
#create 1 second of microphone_waves
self.microphone_samples_one_second = np.concatenate([self.microphone_samples_one_second, microphone_wave])
#when we really have one second, analyse and get main frequencies
if len(self.microphone_samples_one_second) >= self.sampling_rate*10:
#self.microphone_samples_one_second = self.microphone_samples_one_second[:self.sampling_rate]
frequencies,energy = self.costcalculator.calculate_frequencies_energy(self.microphone_samples_one_second)
top_indices = self.costcalculator.get_top_N_indices_energy(energy,AMOUNT_FREQUENCIES_TO_CANCEL)
frequencies_to_cancel = frequencies[top_indices]
self.shift_correcters = []
for frequency in frequencies_to_cancel:
self.shift_correcters.append(Correcter(0.0001, self.sampling_rate,self.costcalculator))
self.microphone_samples_one_second = np.array([])
self.input_queue.queue.clear()
self.output_queue.queue.clear()
self.init_done = True
return frequencies_to_cancel
#play a silent sound to let the stream play and input sound
if self.output_queue.qsize()<5:
silent_wave = np.zeros(self.sample_size).astype(np.float32)
self.output_queue.put(silent_wave.tobytes())
else:
time.sleep(0.01)
#main function cancel the main frequencies by changing shift and amplitude
def cancel_main_frequencies(self):
#cancel program and show error
if keyboard.is_pressed('a'):
self.plot(ERROR_LIST, TOTAL_ERROR_LIST)
self.write_numpy_to_wav()
return
#we have got input data from pyaudio
if not self.input_queue.empty():
#get the inputdata as float 32
microphone_wave = self.input_queue.get()
microphone_wave = np.frombuffer(microphone_wave, dtype=np.float32)
#create 1 second of microphone_waves
self.microphone_samples_one_second = np.concatenate([self.microphone_samples_one_second, microphone_wave])
#when we really have one second update shift and amplitude
if len(self.microphone_samples_one_second) >= self.sampling_rate:
#self.microphone_samples_one_second = self.microphone_samples_one_second[:self.sampling_rate]
#for every frequency update shift
self.append_numpy_to_file(self.microphone_samples_one_second)
total_error = np.sum(np.square(self.microphone_samples_one_second))
TOTAL_ERROR_LIST.append(total_error)
for index, frequency in enumerate(self.frequencies_to_cancel):
error, shift = self.shift_correcters[index].get_corrected_value(self.microphone_samples_one_second, frequency)
ERROR_LIST[index].append(error)
self.sinus_generators[index].change_shift(shift)
#update amplitude only once every 30 seconds
if self.time_till_change_amplitude <= 30:
self.time_till_change_amplitude += 1
else:
#for every frequency update amplitude(double for maybe swap logic)
for index, frequency in enumerate(self.frequencies_to_cancel):
error, amplitude = self.amplitude_correcters[index].get_corrected_value(self.microphone_samples_one_second,frequency)
self.sinus_generators[index].change_amplitude(amplitude)
#reset time to 10 to get the shift right
self.time_till_change_amplitude = 10
#we have parsed one second of data, now clear variable and wait untill new
self.microphone_samples_one_second = np.array([])
#when we have less then 10 samples in out queue put in new samples with the right amplitude and shift
if self.output_queue.qsize()<5:
#print(self.output_queue.qsize())
#create a summed wave
summed_wave = np.zeros(self.sample_size).astype(np.float32)
#sum all frequencies with right amplitude and shift
for index, frequency in enumerate(self.frequencies_to_cancel):
summed_wave += self.sinus_generators[index].get_sample(frequency).astype(np.float32)
#append to output queue so pyaudio can callback for it
self.output_queue.put(summed_wave.tobytes())
else:
time.sleep(0.01)
#main logic program
def main(self):
while self.stream.is_active():
if self.init_done == False:
if os.path.exists("test.npy"):
os.remove("test.npy")
self.frequencies_to_cancel = self.get_main_frequencies()
else:
self.cancel_main_frequencies()
#plot error of every frequency
def plot(self, error,total_error):
fig, axs = plt.subplots(2)
fig.suptitle('Error')
for e in error:
axs[0].plot(e)
axs[1].plot(total_error)
plt.show()
#close stream
def stop_stream(self):
self.stream.stop_stream()
self.stream.close()
self.pyaudio_object.terminate()
#class to calculate the cost of frequencies
class CostCalculator:
def __init__(self, sampling_rate):
self.sampling_rate = sampling_rate
#get all frequencies with their energy
def calculate_frequencies_energy(self, microphone_samples_one_second):
frequencies = np.fft.rfftfreq(microphone_samples_one_second.size, d=1/self.sampling_rate)
energy = | np.fft.rfft(microphone_samples_one_second) | numpy.fft.rfft |
import numpy as np
import unittest
import pytest
from pysph.base.particle_array import ParticleArray
import pysph.tools.mesh_tools as G
from pysph.base.utils import get_particle_array
# Data of a unit length cube
def cube_data():
points = np.array([[0., 0., 0.],
[0., 1., 0.],
[1., 1., 0.],
[1., 0., 0.],
[0., 0., 1.],
[0., 1., 1.],
[1., 0., 1.],
[1., 1., 1.]])
x_cube, y_cube, z_cube = points.T
cells = np.array([[0, 1, 2],
[0, 2, 3],
[0, 4, 5],
[0, 5, 1],
[0, 3, 6],
[0, 6, 4],
[4, 6, 7],
[4, 7, 5],
[3, 2, 7],
[3, 7, 6],
[1, 5, 7],
[1, 7, 2]])
normals = np.array([[0., 0., -1.],
[0., 0., -1.],
[-1., 0., 0.],
[-1., 0., 0.],
[0., -1., 0.],
[0., -1., 0.],
[0., 0., 1.],
[0., 0., 1.],
[1., 0., 0.],
[1., 0., 0.],
[0., 1., 0.],
[0., 1., 0.]])
vectors = np.zeros((len(cells), 3, 3))
for i, cell in enumerate(cells):
idx1, idx2, idx3 = cell
vector = np.array([[x_cube[idx1], y_cube[idx1], z_cube[idx1]],
[x_cube[idx2], y_cube[idx2], z_cube[idx2]],
[x_cube[idx3], y_cube[idx3], z_cube[idx3]]])
vectors[i] = vector
return x_cube, y_cube, z_cube, cells, normals, vectors
class TestGeometry(unittest.TestCase):
def test_in_triangle(self):
assert(G._in_triangle(0.5, 0.5, 0.0, 0.0, 1.5, 0.0, 0.0, 1.5) is True)
assert(G._in_triangle(1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0) is False)
def test_interp_2d(self):
# Check interpolation between two points on line y=x
dx = 0.1
r = G._interp_2d(np.array([0., 0.]), np.array([1., 1.]), dx)
# Check if all points satisfy y=x
np.testing.assert_array_almost_equal(
r[:, 0] - r[:, 1], np.zeros(r.shape[0]))
# Check if distance between consecutive points is lesser than dx
np.testing.assert_array_less(np.linalg.norm(r[1:] - r[0:-1], axis=1),
np.ones(r.shape[0] - 1) * dx)
def test_fill_triangle(self):
triangle = np.array([[0., 0., 0.],
[1., 0., 0.],
[0., 1., 0.]])
dx_triangle = 0.1
x, y, z = G._fill_triangle(triangle, dx_triangle)
EPS = np.finfo(float).eps
np.testing.assert_array_less(-x, np.zeros(x.shape[0]) + EPS)
np.testing.assert_array_less(-y, np.zeros(x.shape[0]) + EPS)
np.testing.assert_array_less(-(x + y), np.ones(x.shape[0]) + EPS)
np.testing.assert_almost_equal(z, np.zeros(x.shape[0]))
def test_fill_triangle_throws_zero_area_triangle_exception(self):
self.assertRaises(G.ZeroAreaTriangleException, G._fill_triangle,
np.zeros((3, 3)), 0.5)
def test_fill_triangle_throws_polygon_mesh_error(self):
self.assertRaises(G.PolygonMeshError, G._fill_triangle,
| np.zeros((4, 3)) | numpy.zeros |
import os
import numpy as np
import warnings
import SimpleITK as sitk
import cv2
from utils.imageutils import pad_resize, resize_by_spacing
__author__ = '<NAME>'
def read_dicom_series(dicom_folder, correct_ct_val=0):
"""Read a DICOM series from a folder as a volume.
:param dicom_folder: a folder containing files of a volume.
:param correct_ct_val: if not 0, the given value is subtracted from the image intensity if necessary.
:return: a SimpleITK image.
"""
# Read a series as a volume
file_names = sitk.ImageSeriesReader.GetGDCMSeriesFileNames(dicom_folder)
reader = sitk.ImageSeriesReader()
reader.SetFileNames(file_names)
image = reader.Execute()
# Correct CT intensity value if required
if correct_ct_val != 0:
# Check conflict
first_slice = sitk.ReadImage(file_names[0])
if first_slice.HasMetaDataKey('0008|0060') and first_slice.GetMetaData('0008|0060') != 'CT':
raise Exception('correct_ct_val != 0 for non-CT image.')
# Correct CT intensity value
minmax = sitk.MinimumMaximumImageFilter()
minmax.Execute(image)
if minmax.GetMinimum() >= 0:
image -= correct_ct_val
return image
def read_image(image_path, gray_scale=True, correct_ct_val=0, check_direction=True):
"""Reads an image by its file extension.
:param image_path: an image file or a DICOM series folder.
:param gray_scale: if True, a gray-scale image is returned.
:param correct_ct_val: if not 0, the given value is subtracted from the image intensity if necessary.
:param check_direction: if True, assert direction matrix == identity matrix for SimpleITK images.
:return: a 2D (height, width) or 3D (depth, height, width) numpy array.
"""
# DICOM series
if os.path.isdir(image_path):
image = read_dicom_series(image_path, correct_ct_val=correct_ct_val)
# Single file
elif any(image_path.endswith(ext) for ext in ['.nii', '.nii.gz', '.dcm']):
image = sitk.ReadImage(image_path)
# Correct CT intensity value if required
if correct_ct_val != 0:
# Check conflict
if image.HasMetaDataKey('0008|0060') and image.GetMetaData('0008|0060') != 'CT':
raise Exception('correct_ct_val != 0 for non-CT image.')
# Correct CT intensity value
minmax = sitk.MinimumMaximumImageFilter()
minmax.Execute(image)
if minmax.GetMinimum() >= 0:
image -= correct_ct_val
elif any(image_path.endswith(ext) for ext in ['.npy', '.npz']):
image = np.load(image_path)
if image_path.endswith('.npz'):
image = image[image.files[0]]
else:
image = cv2.imread(image_path, cv2.IMREAD_UNCHANGED)
# image = sitk.ReadImage(image_path)
# image = sitk.GetArrayFromImage(image)
if gray_scale and image.ndim == 3:
image = image.mean(axis=-1).astype(image.dtype)
# elif not gray_scale and image.ndim == 2:
# image = np.expand_dims(image, -1).repeat(3, axis=-1)
if isinstance(image, sitk.Image):
# Check direction
if check_direction:
direction = image.GetDirection()
if tuple(np.eye(image.GetDimension()).flatten()) != direction:
raise Exception('Non-identity direction: ', direction)
# Change to 2D if necessary
if image.GetDimension() == 3 and image.GetDepth() == 1:
image = sitk.Extract(image, image.GetSize()[:2] + (0,), | np.zeros(3, np.int) | numpy.zeros |
import time
from pathlib import Path
import numpy as np
from py_diff_pd.env.env_base import EnvBase
from py_diff_pd.common.common import create_folder, ndarray
from py_diff_pd.common.quad_mesh import generate_rectangle_mesh
from py_diff_pd.common.display import display_quad_mesh, export_gif
from py_diff_pd.core.py_diff_pd_core import QuadMesh2d, QuadDeformable, StdRealVector
class HopperEnv2d(EnvBase):
def __init__(self, seed, folder, options):
EnvBase.__init__(self, folder)
np.random.seed(seed)
create_folder(folder, exist_ok=True)
refinement = options['refinement'] if 'refinement' in options else 2
youngs_modulus = options['youngs_modulus'] if 'youngs_modulus' in options else 4e5
poissons_ratio = options['poissons_ratio'] if 'poissons_ratio' in options else 0.45
actuator_parameters = options['actuator_parameters'] if 'actuator_parameters' in options else ndarray([5., 5.])
state_force_parameters = options['state_force_parameters'] if 'state_force_parameters' in options else ndarray([0.0, -9.81])
# Mesh parameters.
cell_nums = (2 * refinement, 4 * refinement)
node_nums = (cell_nums[0] + 1, cell_nums[1] + 1)
dx = 0.03 / refinement
origin = (0, dx)
bin_file_name = str(folder / 'mesh.bin')
generate_rectangle_mesh(cell_nums, dx, origin, bin_file_name)
mesh = QuadMesh2d()
mesh.Initialize(bin_file_name)
# FEM parameters.
youngs_modulus = 4e5
poissons_ratio = 0.45
la = youngs_modulus * poissons_ratio / ((1 + poissons_ratio) * (1 - 2 * poissons_ratio))
mu = youngs_modulus / (2 * (1 + poissons_ratio))
density = 1e3
deformable = QuadDeformable()
deformable.Initialize(bin_file_name, density, 'none', youngs_modulus, poissons_ratio)
# External force.
deformable.AddStateForce('gravity', state_force_parameters)
# Elasticity.
deformable.AddPdEnergy('corotated', [2 * mu,], [])
deformable.AddPdEnergy('volume', [la,], [])
# Actuation.
left_muscle_indices = []
right_muscle_indices = []
for j in range(cell_nums[1]):
left_muscle_indices.append(0 * cell_nums[1] + j)
right_muscle_indices.append((2 * refinement - 1) * cell_nums[1] + j)
actuator_stiffness = self._actuator_parameter_to_stiffness(actuator_parameters)
deformable.AddActuation(actuator_stiffness[0], [0.0, 1.0], left_muscle_indices)
deformable.AddActuation(actuator_stiffness[1], [0.0, 1.0], right_muscle_indices)
# Collision.
friction_node_idx = []
for i in range(node_nums[0]):
friction_node_idx.append(i * node_nums[1])
deformable.SetFrictionalBoundary('planar', [0.0, 1.0, 0.0], friction_node_idx)
# Initial conditions.
dofs = deformable.dofs()
# Perturb q0 a bit to avoid singular gradients in SVD.
q0 = ndarray(mesh.py_vertices()) + np.random.uniform(low=-2 * dx * 0.01, high=2 * dx * 0.01, size=dofs)
# 5 body lengths per second at 45 deg from the horizontal
v0_mult = 5 * dx * cell_nums[0]
v0 = | np.ones(dofs) | numpy.ones |
import numpy as np
import math
import scipy
import scipy.stats
def compute_rdp_data_dependent_gnmax(logq, sigma, orders):
"""
Computes data-dependent RDP guarantees for the GNMax mechanism.
This is the bound D_\lambda(M(D) || M(D')) from Theorem 6 (equation 2),
PATE 2018 (Appendix A).
Bounds RDP from above of GNMax given an upper bound on q.
Args:
logq: a union bound on log(Pr[outcome != argmax]) for the GNMax
mechanism.
sigma: std of the Gaussian noise in the GNMax mechanism.
orders: an array-like list of RDP orders.
Returns:
A numpy array of upper bounds on RDP for all orders.
Raises:
ValueError: if the inputs are invalid.
"""
if logq > 0 or sigma < 0 or np.isscalar(orders) or np.any(orders <= 1):
raise ValueError(
"'logq' must be non-positive, 'sigma' must be non-negative, "
"'orders' must be array-like, and all elements in 'orders' must be "
"greater than 1!")
if np.isneginf(logq): # deterministic mechanism with sigma == 0
return np.full_like(orders, 0., dtype=np.float)
variance = sigma ** 2
orders = np.array(orders)
rdp_eps = orders / variance # data-independent bound as baseline
# Two different higher orders computed according to Proposition 10.
# See Appendix A in PATE 2018.
# rdp_order2 = sigma * math.sqrt(-logq)
rdp_order2 = math.sqrt(variance * -logq)
rdp_order1 = rdp_order2 + 1
# Filter out entries to which data-dependent bound does not apply.
mask = np.logical_and(rdp_order1 > orders, rdp_order2 > 1)
# Corresponding RDP guarantees for the two higher orders.
# The GNMAx mechanism satisfies:
# (order = \lambda, eps = \lambda / sigma^2)-RDP.
rdp_eps1 = rdp_order1 / variance
rdp_eps2 = rdp_order2 / variance
log_a2 = (rdp_order2 - 1) * rdp_eps2
# Make sure that logq lies in the increasing range and that A is positive.
if (np.any(mask) and -logq > rdp_eps2 and logq <= log_a2 - rdp_order2 *
(math.log(1 + 1 / (rdp_order1 - 1)) + math.log(
1 + 1 / (rdp_order2 - 1)))):
# Use log1p(x) = log(1 + x) to avoid catastrophic cancellations when x ~ 0.
log1mq = _log1mexp(logq) # log1mq = log(1-q)
log_a = (orders - 1) * (
log1mq - _log1mexp((logq + rdp_eps2) * (1 - 1 / rdp_order2)))
log_b = (orders - 1) * (rdp_eps1 - logq / (rdp_order1 - 1))
# Use logaddexp(x, y) = log(e^x + e^y) to avoid overflow for large x, y.
log_s = np.logaddexp(log1mq + log_a, logq + log_b)
# Values of q close to 1 could result in a looser bound, so minimum
# between the data dependent bound and the data independent bound
# rdp_esp = orders / variance is taken.
rdp_eps[mask] = np.minimum(rdp_eps, log_s / (orders - 1))[mask]
assert np.all(rdp_eps >= 0)
return rdp_eps
def compute_logq_gnmax(votes, sigma):
"""
Computes an upper bound on log(Pr[outcome != argmax]) for the GNMax mechanism.
Implementation of Proposition 7 from PATE 2018 paper.
Args:
votes: a 1-D numpy array of raw ensemble votes for a given query.
sigma: std of the Gaussian noise in the GNMax mechanism.
Returns:
A scalar upper bound on log(Pr[outcome != argmax]) where log denotes natural logarithm.
"""
num_classes = len(votes)
variance = sigma ** 2
idx_max = np.argmax(votes)
votes_gap = votes[idx_max] - votes
votes_gap = votes_gap[np.arange(num_classes) != idx_max] # exclude argmax
# Upper bound log(q) via a union bound rather than a more precise
# calculation.
logq = _logsumexp(
scipy.stats.norm.logsf(votes_gap, scale=math.sqrt(2 * variance)))
return min(logq,
math.log(1 - (1 / num_classes))) # another obvious upper bound
def compute_rdp_data_dependent_gnmax_no_upper_bound(logq, sigma, orders):
"""
If the data dependent bound applies, then use it even though its higher than
the data independent bound. In this case, we are interested in estimating
the privacy budget solely on the data and are not optimizing its value to be
as small as possible.
Computes data-dependent RDP guarantees for the GNMax mechanism.
This is the bound D_\lambda(M(D) || M(D')) from Theorem 6 (equation 2),
PATE 2018 (Appendix A).
Bounds RDP from above of GNMax given an upper bound on q.
Args:
logq: a union bound on log(Pr[outcome != argmax]) for the GNMax
mechanism.
sigma: std of the Gaussian noise in the GNMax mechanism.
orders: an array-like list of RDP orders.
Returns:
A numpy array of upper bounds on RDP for all orders.
Raises:
ValueError: if the inputs are invalid.
"""
if logq > 0 or sigma < 0 or np.isscalar(orders) or np.any(orders <= 1):
raise ValueError(
"'logq' must be non-positive, 'sigma' must be non-negative, "
"'orders' must be array-like, and all elements in 'orders' must be "
"greater than 1!")
if np.isneginf(logq): # deterministic mechanism with sigma == 0
return np.full_like(orders, 0., dtype=np.float)
variance = sigma ** 2
orders = np.array(orders)
rdp_eps = orders / variance # data-independent bound as baseline
# Two different higher orders computed according to Proposition 10.
# See Appendix A in PATE 2018.
# rdp_order2 = sigma * math.sqrt(-logq)
rdp_order2 = math.sqrt(variance * -logq)
rdp_order1 = rdp_order2 + 1
# Filter out entries to which data-dependent bound does not apply.
mask = np.logical_and(rdp_order1 > orders, rdp_order2 > 1)
# Corresponding RDP guarantees for the two higher orders.
# The GNMAx mechanism satisfies:
# (order = \lambda, eps = \lambda / sigma^2)-RDP.
rdp_eps1 = rdp_order1 / variance
rdp_eps2 = rdp_order2 / variance
log_a2 = (rdp_order2 - 1) * rdp_eps2
# Make sure that logq lies in the increasing range and that A is positive.
if (np.any(mask) and -logq > rdp_eps2 and logq <= log_a2 - rdp_order2 *
(math.log(1 + 1 / (rdp_order1 - 1)) + math.log(
1 + 1 / (rdp_order2 - 1)))):
# Use log1p(x) = log(1 + x) to avoid catastrophic cancellations when x ~ 0.
log1mq = _log1mexp(logq) # log1mq = log(1-q)
log_a = (orders - 1) * (
log1mq - _log1mexp(
(logq + rdp_eps2) * (1 - 1 / rdp_order2)))
log_b = (orders - 1) * (rdp_eps1 - logq / (rdp_order1 - 1))
# Use logaddexp(x, y) = log(e^x + e^y) to avoid overflow for large x, y.
log_s = np.logaddexp(log1mq + log_a, logq + log_b)
# Do not apply the minimum between the data independent and data
# dependent bound - but limit the computation to data dependent bound
# only!
rdp_eps[mask] = (log_s / (orders - 1))[mask]
assert np.all(rdp_eps >= 0)
return rdp_eps
def rdp_to_dp(orders, rdp_eps, delta):
"""
Conversion from (lambda, eps)-RDP to conventional (eps, delta)-DP.
Papernot 2018, Theorem 5. (From RDP to DP)
Args:
orders: an array-like list of RDP orders.
rdp_eps: an array-like list of RDP guarantees (of the same length as
orders).
delta: target delta (a scalar).
Returns:
A pair of (dp_eps, optimal_order).
"""
assert not np.isscalar(orders) and not np.isscalar(rdp_eps) and len(
orders) == len(
rdp_eps), "'orders' and 'rdp_eps' must be array-like and of the same length!"
dp_eps = np.array(rdp_eps) - math.log(delta) / (np.array(orders) - 1)
idx_opt = np.argmin(dp_eps)
return dp_eps[idx_opt], orders[idx_opt]
def _logsumexp(x):
"""
Sum in the log space.
An addition operation in the standard linear-scale becomes the
LSE (log-sum-exp) in log-scale.
Args:
x: array-like.
Returns:
A scalar.
"""
x = | np.array(x) | numpy.array |
'''
The contents of this file are focused on the Data class, which is used for storage of
imported data from N-body output files.
'''
#TODO: split functions should fix id values? Maybe not, depends on what the behavior is supposed to do
# this could probably be added into update
#TODO: UNIT TESTS
#===============================================================================
# IMPORTS
#===============================================================================
import numpy as np
import coords as co
import astropy
from astropy.coordinates import SkyCoord
import astropy.units as u
import random
import galpy
import unittest
import os
import mwahpy_glob
import flags
import output_handler
#===============================================================================
# DATA CLASS
#===============================================================================
#AttrDict is used as a helper class in Data to allow referencing attributes
#as dict keys and vice-versa.
#this is probably a bad way to implement this but it works, and it's better than
#making Data inherit from dict, which was the other solution I was able to
#strum up
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
class Data():
def __init__(self, id_val=[], x=[], y=[], z=[], l=[], b=[], r=[], vx=[], vy=[], vz=[], mass=[], vlos=[], centerOfMass=[0, 0, 0], centerOfMomentum=[0, 0, 0], pot_offset=0, *args, **kwargs):
#all this is typically specified by the readOutput function in output_handler
#readOutput is the preferred way to input data to this data structure
#but if you're really feeling adventurous you can always do it yourself
#-----------------------------------------------------------------------
# HOUSEKEEPING
#-----------------------------------------------------------------------
#this data structure allows access to a dictionary like an attribute, i.e.
# d = AttrDict({'a':1, 'b':2})
# d['a'] == d.a //returns True
#it seems like magic but it works by accessing the way python handles attributes
#within each class. There are pros and cons, the most notable thing is
#that it creates a memory leak in python <3.2.3
ad = AttrDict() #this is a private helper class that allows for the
#desired indexing behavior
self.__dict__ = ad.__dict__
#-----------------------------------------------------------------------
self.id = np.array(id_val)
self.x = np.array(x)
self.y = np.array(y)
self.z = np.array(z)
self.l = np.array(l)
self.b = np.array(b)
self.dist = np.array(r)
self.vx = np.array(vx)
self.vy = np.array(vy)
self.vz = np.array(vz)
self.mass = np.array(mass)
self.vlos = np.array(vlos)
#NOTE: Any time you update the position data, you have to update
# the center of mass (and same for velocity and COMomentum)
# This is done automatically if flags.updateData is on
self.centerOfMass = centerOfMass
self.centerOfMomentum = centerOfMomentum
self.msol = self.mass * mwahpy_glob.structToSol
#ICRS information
c = SkyCoord(l=self.l*u.degree, b=self.b*u.degree, frame='galactic')
c_trans = c.transform_to('icrs')
self.ra = c_trans.ra.degree
self.dec = c_trans.dec.degree
self.rv, self.pmra, self.pmdec = co.getrvpm(self.ra, self.dec, self.dist, self.vx, self.vy, self.vz)
self.pmtot = (self.pmra**2 + self.pmdec**2)**0.5
#4.848e-6 is arcsec->rad, 3.086e16 is kpc->km, and 3.156e7 is sidereal yr -> seconds
self.vtan = 4.74*self.dist*self.pmtot #eq. to self.r*np.tan(self.pmtot*4.848e-6) * 3.086e16 / 3.156e7
#angular momentum information
self.lx = self.y * self.vz - self.z * self.vy
self.ly = self.x * self.vz - self.z * self.vx
self.lz = self.x * self.vy - self.y * self.vx
self.lperp = (self.lx**2 + self.ly**2)**0.5
self.ltot = (self.lx**2 + self.ly**2 + self.lz**2)**0.5
#galactocentric information
self.r = (self.x**2 + self.y**2 + self.z**2)**0.5
self.vgsr = self.vlos + 10.1*np.cos(self.b*np.pi/180)*np.cos(self.l*np.pi/180) + 224*np.cos(self.b*np.pi/180)* | np.sin(self.l*np.pi/180) | numpy.sin |
import pandas as pd
from hydroDL.data import usgs, gageII, gridMET, ntn, GLASS, transform, dbBasin
import numpy as np
import matplotlib.pyplot as plt
from hydroDL.post import axplot, figplot
from hydroDL import kPath, utils
import json
import os
import importlib
from hydroDL.master import basinFull
from hydroDL.app.waterQuality import WRTDS
import warnings
# warnings.simplefilter('error')
dataName = 'G200N'
# with warnings.catch_warnings():
# warnings.simplefilter('ignore', category=RuntimeWarning)
# DF = dbBasin.DataFrameBasin(dataName)
DF = dbBasin.DataFrameBasin(dataName)
codeLst = usgs.newC
trainLst = ['rmR20', 'rmL20', 'rmRT20', 'rmYr5', 'B10']
testLst = ['pkR20', 'pkL20', 'pkRT20', 'pkYr5', 'A10']
trainSet = 'rmR20'
testSet = 'pkR20'
# trainSet = 'B10'
# testSet = 'A10'
labelLst = ['QFPRT2C', 'QFRT2C', 'QFPT2C', 'FPRT2C']
nL = len(labelLst)
yLst = list()
for label in labelLst:
outName = '{}-{}-{}'.format(dataName, label, trainSet)
yP, ycP = basinFull.testModel(
outName, DF=DF, testSet=testSet, ep=500)
yOut = np.ndarray(yP.shape)
for k, code in enumerate(codeLst):
m = DF.g[:, DF.varG.index(code+'-M')]
s = DF.g[:, DF.varG.index(code+'-S')]
yOut[:, :, k] = yP[:, :, k]*s+m
yLst.append(yOut)
# WRTDS
# yW = WRTDS.testWRTDS(dataName, trainSet, testSet, codeLst)
dirRoot = os.path.join(kPath.dirWQ, 'modelStat', 'WRTDS-dbBasin')
fileName = '{}-{}-{}'.format(dataName, trainSet, 'all')
yW = np.load(os.path.join(dirRoot, fileName)+'.npz')['arr_0']
code = '00945'
indC = codeLst.index(code)
# correlation matrix
d1 = dbBasin.DataModelBasin(DF, subset=trainSet, varY=codeLst)
d2 = dbBasin.DataModelBasin(DF, subset=testSet, varY=codeLst)
siteNoLst = DF.siteNoLst
matW = np.full([len(siteNoLst), 4], np.nan)
matLst = [np.full([len(siteNoLst), 4], np.nan) for x in labelLst]
for indS, siteNo in enumerate(siteNoLst):
n1 = np.sum(~np.isnan(d1.Y[:, indS, indC]), axis=0)
n2 = np.sum(~np.isnan(d2.Y[:, indS, indC]), axis=0)
if n1 >= 160 and n2 >= 40:
statW = utils.stat.calStat(yW[:, indS, indC], d2.Y[:, indS, indC])
matW[indS, :] = list(statW.values())
for k in range(nL):
yL = yLst[k]
statL = utils.stat.calStat(
yL[:, indS, indC], d2.Y[:, indS, indC])
matLst[k][indS, :] = list(statL.values())
dfCrd = gageII.readData(
varLst=['LAT_GAGE', 'LNG_GAGE'], siteNoLst=siteNoLst)
lat = dfCrd['LAT_GAGE'].values
lon = dfCrd['LNG_GAGE'].values
figM, axM = plt.subplots(nL+1, 1, figsize=(8, 6))
for k, label in enumerate(labelLst):
axplot.mapPoint(axM[k], lat, lon, matLst[k][:, -1], s=12)
axplot.mapPoint(axM[-1], lat, lon, matW[:, -1], s=12)
figM.show()
code = '00955'
indC = codeLst.index(code)
indS = 0
figP, axP = plt.subplots(1, 1, figsize=(12, 3))
dataTS = [y[:, indS, indC] for y in yLst[:3]] + \
[DF.c[:, indS, indC]]
# dataTS = [yLst[2][:, indS, indC], yLst[1][:, indS, indC]] + \
# [yW[:, indS, indC]]+[DF.c[:, indS, indC]]
axplot.plotTS(axP, DF.t, dataTS, cLst='bcgk')
figP.show()
def funcMap():
nM = len(plotVar)
figM, axM = plt.subplots(nM, 1, figsize=(8, 6))
axM = | np.array([axM]) | numpy.array |
"""
Draw Figures - Chapter 4
This script generates all of the figures that appear in Chapter 4 of the textbook.
Ported from MATLAB Code
<NAME>
24 March 2021
"""
import utils
from utils.unit_conversions import lin_to_db, db_to_lin
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
import numpy as np
import scipy as sp
from scipy import stats
from scipy import fftpack
import seaborn as sns
import detector
def make_all_figures(close_figs=False):
"""
Call all the figure generators for this chapter
:close_figs: Boolean flag. If true, will close all figures after generating them; for batch scripting.
Default=False
:return: List of figure handles
"""
# Initializes colorSet - Mx3 RGB vector for successive plot lines
colors = plt.get_cmap("tab10")
# Reset the random number generator, to ensure reproducability
rng = np.random.default_rng(0)
# Find the output directory
prefix = utils.init_output_dir('chapter4')
# Activate seaborn for prettier plots
sns.set()
# Generate all figures
fig1a = make_figure_1a(prefix)
fig1b = make_figure_1b(prefix, rng)
fig2a = make_figure_2a(prefix, rng)
fig2b = make_figure_2b(prefix, rng)
fig3 = make_figure_3(prefix)
fig5 = make_figure_5(prefix, colors)
fig6 = make_figure_6(prefix, rng, colors)
fig7 = make_figure_7(prefix)
fig8 = make_figure_8(prefix, colors)
figs = [fig1a, fig1b, fig2a, fig2b, fig3, fig5, fig6, fig7, fig8]
if close_figs:
for fig in figs:
plt.close(fig)
return None
else:
plt.show()
return figs
def make_figure_1a(prefix=None):
"""
Figure 1a - Alternating Sine Waves
Ported from MATLAB Code
<NAME>
24 March 2021
:param prefix: output directory to place generated figure
:return: figure handle
"""
# Sine wave
num_points = 1024 # Sample points
y_chip = np.exp(1j*(np.pi/2+2*np.pi*np.arange(num_points)/num_points))
# String together multiple periods
code = np.array([0, 1, 1, 0, 1])
symbol = np.exp(1j*np.pi*code)
y_full = np.ravel(np.expand_dims(y_chip, axis=0)*np.expand_dims(symbol, axis=1))
# x axis
t_vec = np.arange(np.size(y_full))
fig1a = plt.figure()
plt.plot(t_vec, np.real(y_full), color='k', linewidth=0.5)
plt.plot(t_vec, np.zeros_like(t_vec), color='k', linewidth=0.5)
for idx, bit in enumerate(code):
plt.text(num_points/2 + num_points*idx-1, 1.5, '{}'.format(bit))
plt.plot(num_points*idx*np.array([1, 1]), np.array([-1, 2]), color='k', linestyle=':')
# Annotation
plt.annotate(s='', xy=(2*num_points, 1.1), xytext=(3*num_points, 1.1), arrowprops=dict(arrowstyle='<->'))
plt.text(2.35*num_points, 1.25, r'$T_{chip}$')
# Turn off the axes
ax = plt.gca()
ax.axis('off')
# Save figure
if prefix is not None:
plt.savefig(prefix + 'fig1a.svg')
plt.savefig(prefix + 'fig1a.png')
return fig1a
def make_figure_1b(prefix=None, rng=None):
"""
Figure 1b - Figure 1b, Bandwidth
Ported from MATLAB Code
<NAME>
24 March 2021
:param prefix: output directory to place generated figure
:param rng: random number generator
:return: figure handle
"""
if rng is None:
rng = np.random.default_rng()
num_samples = 16 # Number of samples per cycle
num_code_bits = 128 # Length of transmit code in bits
y_chip = np.exp(1j*(np.pi/2+2*np.pi*np.arange(num_samples)/num_samples))
num_code_samples = num_code_bits*num_samples
lo = np.exp(1j*2*np.pi*np.arange(num_code_samples)*4/num_samples) # 4 samples/cycle
num_monte_carlo = 100
spectral_average = np.zeros_like(lo)
for ii in range(num_monte_carlo):
# Generate a random code
code = rng.integers(low=0, high=2, size=(num_code_bits, 1)) # with random integers, the interval is [low, high)
symbol = np.exp(1j*np.pi*code)
# Random starting phase
starting_phase = np.exp(1j*rng.uniform(low=0, high=2*np.pi))
# Generate full transmit signal at the intermediate frequency (IF) of y_chip
signal_if = np.ravel(starting_phase*symbol*y_chip)
# Mix with the local oscillator (lo) to get the radio frequency (RF) sample
signal_rf = signal_if*lo
# Take the fourier transform
spectral_average += np.absolute(sp.fft((np.real(signal_rf))))
# Normalize, and use an fftshift to put the center frequency in the middle of the vector
spectral_average = fftpack.fftshift(spectral_average)/np.max(np.absolute(spectral_average))
fig1b = plt.figure()
plt.plot(np.linspace(start=-1, stop=1, num=num_code_samples),
2*lin_to_db( | np.absolute(spectral_average) | numpy.absolute |
#!/usr/bin/env python
"""
Multi-Resolution (2) Binary Classification with underline Gaussian Distributions
<NAME> & <NAME>,
Electrical and Computer Engineering Dept.,
University of Maryland
"""
#%% Import Modules
import pickle
import numpy as np
# import pandas as pd
import matplotlib.pyplot as plt
# from scipy.stats import multivariate_normal
# plt.ioff() # turn off interactive mode: only show figures with plt.show()
plt.close('all')
#%%
save_file = 'data.pkl'
# Gaussian Sampling?
gauss_sampling = True
# gauss_sampling = False
# Number of samples per Gaussian
ns = 120
test_ratio = 20.0/100
# Gaussian Density 2D, Symmetric
def gauss_dens(c,s,X,Y):
# pos = np.empty(X.shape + (2,))
# pos[:, :, 0] = X
# pos[:, :, 1] = Y
# dens = multivariate_normal.pdf(pos, mean=c, cov=np.diag((s,s)))
dens = 1/(2*np.pi*s) * np.exp(-((X-c[0])**2 + (Y-c[1])**2)/(2.0*s**2))
return dens
b = 3
c = 0
centers = [[b-1.4,b+0.4],[b+2.2,b+1.0],[b-0.4+c,b-1.4],[b+1.4+c,b-0.4],[b+0.4+c,b+2.0]]
sigmas = [0.75, 0.5, 0.35, 0.75, 0.5]
sigmas = [0.35, 0.25, 0.15, 0.35, 0.25]
sigmas = [0.20, 0.15, 0.10, 0.20, 0.15]
# Contour alpha
aa = 0.05
# samples alpha
aaa = 0.4
np.random.seed(0)
# np.random.seed(13)
#%% Sample Data Set
data = []
labels = []
if gauss_sampling:
# class o
cx = np.array(centers[0])
sx = sigmas[0]
for i in range(ns):
data.append( np.random.multivariate_normal(cx, [[sx,0],[0,sx]]) )
labels.append(0)
# class o
cx = np.array(centers[1])
sx = sigmas[1]
for i in range(ns):
data.append( np.random.multivariate_normal(cx, [[sx,0],[0,sx]]) )
labels.append(0)
# class o
cx = np.array(centers[2])
sx = sigmas[2]
for i in range(ns):
data.append( np.random.multivariate_normal(cx, [[sx,0],[0,sx]]) )
labels.append(0)
# class x
cx = np.array(centers[3])
sx = sigmas[3]
for i in range(ns):
data.append( np.random.multivariate_normal(cx, [[sx,0],[0,sx]]) )
labels.append(1)
# class x
cx = np.array(centers[4])
sx = sigmas[4]
for i in range(ns):
data.append( np.random.multivariate_normal(cx, [[sx,0],[0,sx]]) )
labels.append(1)
else:
def arc_point(c, r, theta):
c = np.array(c)
d = np.array([r*np.cos(theta), r*np.sin(theta)])
return c + d
# class o
cx = np.array(centers[0])
for r in [0.5,1]:
for theta in np.arange(0,2*np.pi,np.pi/5):
data.append(arc_point(cx,r,theta))
labels.append(0)
# class o
cx = np.array(centers[1])
for r in [0.3,0.5]:
for theta in np.arange(0,2*np.pi,np.pi/5):
data.append(arc_point(cx,r,theta))
labels.append(0)
# class o
cx = | np.array(centers[2]) | numpy.array |
import os
import errno
import h5py
import numpy as np
import scipy
import logging
from shutil import rmtree
from typing import *
from loompy import LoomConnection
class LoomTiles(object):
#############
# DEEP ZOOM #
#############
__slots__ = [
'ds',
'_maxes',
'_mins'
]
def __init__(self, ds: LoomConnection) -> None:
self.ds = ds
self._maxes = None # type: np.ndarray
self._mins = None
def maxes(self) -> Any:
if self._maxes is None:
# colormax = np.percentile(data, 99, axis=1) + 0.1
# minFloat = np.finfo(float).eps;
# def percentileMap(data):
# return np.percentile(data, 99, axis=1) + minFloat;
# Prefer using numpy's built-in method for finding the
# max values faster
# self._maxes = self.ds.map([max], 0)[0]
logging.info('calculating & caching max values')
rows = self.ds.shape[0]
_maxes = np.zeros(rows)
ix = 0
while ix < rows:
rows_per_chunk = min(rows - ix, 64)
chunk = self.ds[ix:ix + rows_per_chunk, :]
_maxes[ix:ix + rows_per_chunk] = np.nanmax(chunk, axis=1)
ix += rows_per_chunk
print('.', end='', flush=True)
self._maxes = _maxes
print(' done\n\n')
return self._maxes
def mins(self) -> Any:
if self._mins is None:
# self._mins = self.ds.map([min], 0)[0]
logging.info('calculating & caching min values')
rows = self.ds.shape[0]
_mins = np.zeros(rows)
ix = 0
while ix < rows:
rows_per_chunk = min(rows - ix, 64)
chunk = self.ds[ix:ix + rows_per_chunk, :]
_mins[ix:ix + rows_per_chunk] = np.nanmin(chunk, axis=1)
ix += rows_per_chunk
print('.', end='', flush=True)
self._mins = _mins
print(' done\n\n')
return self._mins
def prepare_heatmap(self, truncate: bool = False) -> None:
tile_dir = "%s.tiles/" % (self.ds.filename)
if os.path.isdir(tile_dir):
logging.info(" Previous tile folder found at %s)", tile_dir)
if truncate:
logging.info(" Truncate set, removing old tile folder")
rmtree(tile_dir)
else:
logging.info(" Call prepare_heatmap(truncate=True) to overwrite")
return
self.maxes()
self.mins()
logging.info(' Generating and saving tiles')
self.dz_get_zoom_tile(0, 0, 8, truncate)
print(" done\n\n")
def dz_zoom_range(self) -> Tuple[int, int, int]:
"""
Determine the zoom limits for this file.
Returns:
Tuple (middle, min_zoom, max_zoom) of integer zoom levels.
"""
return (8, int(max(np.ceil(np.log2(self.ds.shape)))), int(max(np.ceil(np.log2(self.ds.shape))) + 8))
def dz_dimensions(self) -> Tuple[int, int]:
"""
Determine the total size of the deep zoom image.
Returns:
Tuple (x,y) of integers
"""
(y, x) = np.divide(self.ds.shape, 256) * 256 * pow(2, 8)
return (x, y)
def dz_tile_to_image(self, x: int, y: int, z: int, tile: Any) -> Any:
# Crop outside matrix dimensions
(zmin, zmid, zmax) = self.dz_zoom_range()
(max_x, max_y) = (int(pow(2, z - zmid) * self.ds.shape[1]) - x * 256, int(pow(2, z - zmid) * self.ds.shape[0]) - y * 256)
if max_x < 0:
max_x = -1
if max_y < 0:
max_y = -1
if max_x < 255:
tile[:, max_x + 1:256] = 255
if max_y < 255:
tile[max_y + 1:256, :] = 255
return scipy.misc.toimage(tile, cmin=0, cmax=255, pal=_viridis)
def dz_save_tile(self, x: int, y: int, z: int, tile: Any, truncate: bool = False) -> Any:
(zmin, zmid, zmax) = self.dz_zoom_range()
if (
z < zmin or z > zmid or
x < 0 or y < 0 or
x * 256 * 2**(zmid - z) > self.ds.shape[1] or
y * 256 * 2**(zmid - z) > self.ds.shape[0]
):
# logging.info("Trying to save out of bound tile: x: %02d y: %02d z: %02d" % (x, y, z))
return
tile_dir = '%s.tiles/z%02d/' % (self.ds.filename, z)
tile_path = '%sx%03d_y%03d.png' % (tile_dir, x, y)
# make sure the tile directory exists
# we use a try/error approach so that we
# don't have to worry about race conditions
# (if another process creates the same
# directory we just catch the exception)
try:
os.makedirs(tile_dir, exist_ok=True)
except OSError as exception:
# if the error was that the directory already
# exists, ignore it, since that is expected.
if exception.errno != errno.EEXIST:
raise
if os.path.isfile(tile_path):
if truncate:
# remove old file
os.remove(tile_path)
else:
# load old file instead of generating new image
return scipy.misc.imread(tile_path, mode='P')
img = self.dz_tile_to_image(x, y, z, tile)
# save to file
with open(tile_path, 'wb') as img_io:
# logging.info("saving %s" % tile_path)
print('.', end='', flush=True)
img.save(img_io, 'PNG', compress_level=4)
return img
def dz_merge_tile(self, tl: Any, tr: Any, bl: Any, br: Any) -> Any:
temp = np.empty((512, 512), dtype='float32')
temp[0:256, 0:256] = tl
temp[0:256, 256:512] = tr
temp[256:512, 0:256] = bl
temp[256:512, 256:512] = br
# various strategies of aggregating values for
# zoomed out tiles, each with their own trade-offs
# Pick top-left of four pixels
# fastest, large systematic bias,
# does not preserve structure very well
# return temp[0::2, 0::2]
# Average of four
# biased towards whatever bias the value distribution has
# (typically towards zero)
# Preserves structures better
# temp2 = temp[0::2, 0::2]
# temp2 += temp[1::2, 0::2]
# temp2 += temp[0::2, 1::2]
# temp2 += temp[1::2, 1::2]
# temp2 *= 0.25
# return temp2
# Max value
# Makes everything too bright,
# completely destroys noise profile
# tl = temp[0::2, 0::2]
# tr = temp[1::2, 0::2]
# bl = temp[0::2, 1::2]
# br = temp[1::2, 1::2]
# tl = np.fmax(tl, tr, out=tl)
# bl = np.fmax(bl, br, out=bl)
# np.fmax(tl, bl, out=tl)
# return tl
# Max value per column, average per row
# an almost happy medium of the previous two,
# still introduces too much brightness per zoom level
# tl = temp[0::2, 0::2]
# tr = temp[1::2, 0::2]
# bl = temp[0::2, 1::2]
# br = temp[1::2, 1::2]
# tl = np.fmax(tl, tr, out=tl)
# bl = np.fmax(bl, br, out=bl)
# tl += bl
# tl *= 0.5
# return tl
# Max-biased value per column, average per row
# Looks like a good trade-off, introduces
# a little brightness, but not much
# could be tweaked with different weights
tl = temp[0::2, 0::2]
tr = temp[1::2, 0::2]
bl = temp[0::2, 1::2]
br = temp[1::2, 1::2]
# this is a weighed average, with the higher value 3:1
tmax = np.fmax(tl, tr)
tmax += tmax
tmax += tl
tmax += tr
bmax = np.fmax(bl, br)
bmax += bmax
bmax += bl
bmax += br
tmax += bmax
tmax *= 0.125
return tmax
# Returns a submatrix scaled to 0-255 range
def dz_get_zoom_tile(self, x: int, y: int, z: int, truncate: bool = False) -> Any:
"""
Create a 256x256 pixel matrix corresponding to the tile at x,y and z.
Args:
x (int): Horizontal tile index (0 is left-most)
y (int): Vertical tile index (0 is top-most)
z (int): Zoom level (8 is 'middle' where pixels correspond to data values)
Returns:
Numpy ndarray of shape (256,256)
"""
# logging.debug("Computing tile at x=%i y=%i z=%i" % (x,y,z))
(zmin, zmid, zmax) = self.dz_zoom_range()
if z < zmin:
raise ValueError("z cannot be less than %s" % zmin)
if z > zmax:
raise ValueError("z cannot be greater than %s" % zmax)
if x < 0:
raise ValueError("x cannot be less than zero")
if y < 0:
raise ValueError("y cannot be less than zero")
if x * 256 * 2**(zmid - z) > self.ds.shape[1] or y * 256 * 2**(zmid - z) > self.ds.shape[0]:
return np.zeros((256, 256), dtype='float32')
if z == zmid:
tile = self.ds._file['matrix'][y * 256:y * 256 + 256, x * 256:x * 256 + 256]
# Pad if needed to make it 256x256
if tile.shape[0] < 256 or tile.shape[1] < 256:
tile = np.pad(tile, ((0, 256 - tile.shape[0]), (0, 256 - tile.shape[1])), 'constant', constant_values=0)
# Rescale
maxes = self.maxes()[y * 256:y * 256 + 256]
mins = self.mins()[y * 256:y * 256 + 256]
if maxes.shape[0] < 256:
maxes = np.pad(maxes, (0, 256 - maxes.shape[0]), 'constant', constant_values=0)
mins = np.pad(mins, (0, 256 - mins.shape[0]), 'constant', constant_values=0)
# Human readable version of code below:
# We add one because we want a log2 curve,
# but keep zero values equal to zero, and
# log2(0 + 1) = 0.
#
# p = np.log2(1 + tile.transpose() - mins)
# q = np.log2(1 + maxes - mins)*255
# tile = (p/q).transpose()
mins = mins - 1
maxes = maxes - mins
# avoid allocating new arrays as much as we can
np.log2(maxes, maxes)
# replace zero with smallest non-zero positive number
# to avoid complaints about dividing by zero later
maxes[maxes == 0] = | np.nextafter(0, 1) | numpy.nextafter |
"""
Created on Tue Dec 29 19:42:32 2020
@author: jessm
This file formats and plots the .h5 and fields files from the MEDIS simulation.
There is still unresloved ringing in the fields plot.
"""
import os
import matplotlib.pyplot as plt
import numpy as np
import tables as pt
from matplotlib.colors import LogNorm, SymLogNorm
from skimage.util import img_as_ubyte
from skimage.util import invert
from skimage import color
from skimage import io
from numpy import inf
#import optics as opx
#c=os.chdir('C:/Users/jessm/PycharmProjects')
#print(c)
import sys
sys.path.append("C:/Users/jessm/OneDrive/Documents/Coding/proper_v3.2.3_python_3.x")
def open_obs_sequence_hdf5(obs_seq_file='hyper.h5'):
"""opens existing obs sequence .h5 file and returns it"""
# hdf5_path = "my_data.hdf5"
read_hdf5_file = pt.open_file(obs_seq_file, mode='r')
# Here we slice [:] all the data back into memory, then operate on it
obs_sequence = read_hdf5_file.root.data[:]
# hdf5_clusters = read_hdf5_file.root.clusters[:]
read_hdf5_file.close()
return obs_sequence
def cpx_to_intensity(data_in):
"""
converts complex data to units of intensity
WARNING: if you sum the data sequence over object or wavelength with simple case of np.sum(), must be done AFTER
converting to intensity, else results are invalid
"""
return np.abs(data_in)**2
def crop_center(img):
y,x = img.shape
if img.shape[0]<img.shape[1]:
cropx=img.shape[0]
startx = x//2-(cropx//2)
return img[:,startx:startx+cropx]
elif img.shape[1]<img.shape[0]:
cropy=img.shape[1]
starty = y//2-(cropy//2)
return img[starty:starty+cropy,:]
else :
print("it is already a cube")
return img
"""you have to put in the path to your own file"""
rebinned = open_obs_sequence_hdf5('C:/Users/jessm/.spyder-py3/MEDIS_spy/rebinned_cube5e8.h5')
#C:\Users\jessm\.spyder-py3\MEDIS_spy\
savename='np_rebinned5e8'
"""looking at fields"""
fields0 = open_obs_sequence_hdf5('C:/Users/jessm/.spyder-py3/MEDIS_spy/fields5e8.h5')
fields=fields0.astype(float)
#h5 file=complex image
#'timesteps', 'save planes', 'wavelengths', 'astronomical bodies', 'x', 'y'
print("Fields shape", fields.shape)
focal_sun=rebinned[0,-1,:,:]
focal_planet=fields[0,-1,:,:,:,:]
print("focal planet shape", focal_planet.shape)
print("rebinned cube shape", rebinned.shape)
#FOR REBINNED CUBE
#-no object or plane axis
#-rectangle
"""plotting fields"""
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111)
#cax = ax.imshow(np.sum(focal_planet, axis=(0,1)), vmin=1e-9, vmax=1e-4, origin='lower', norm=SymLogNorm(1e-10))
cax = ax.imshow(np.sum(cpx_to_intensity(focal_planet), axis=(0,1)), origin='lower', norm=LogNorm(vmin=1e-7, vmax=1e-3), cmap = "YlGnBu_r")
plt.title("Star and Planet Broadband - Unresolved Ringing")
plt.xlabel("X Coordinates")
plt.ylabel("Y Coordinates")
cb = plt.colorbar(cax)
plt.show()
"""cropping rebinned cube into cube"""
#print(crop_rebinned.shape)
rebinsum= np.sum(rebinned, axis=(0,1))
print("this is before cropping \n rebinned sum =", rebinsum.shape)
rebinsum=crop_center(rebinsum)
print("this is after cropping \n rebinned sum =", rebinsum.shape)
"""plotting lognorm rebinned cube"""
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(111)
#cax=ax.imshow(np.sum(rebinned))
cax = ax.imshow(rebinsum, origin='lower', norm=SymLogNorm(1e-10,vmin=1e-1, base=np.e), cmap = "YlGnBu_r")
#SymLogNorm values were hand selected
#Symlognorm only uses positive values, but thats ok because we only have positive values
plt.title("Rebinned Cube")
plt.xlabel("X Coordinates")
plt.ylabel("Y Coordinates")
cb = plt.colorbar(cax)
plt.show()
"""log normalizing from 0 to 1"""
x=np.log(rebinsum)
print("max", np.amax(x), "min", | np.amin(x) | numpy.amin |
from service_objects import services
import numpy as np
import pandas as pd
from django.db import connection
import datetime
from front.models import Match, Match_Stats, Player, Tourney, Tourney_Level, Surface
class IngestMatchesService(services.Service):
def process(self):
cursor = connection.cursor()
errors = ''
total_matches_updated = 0
total_matches_inserted = 0
tourneys = {}
surfaces = {}
tourney_levels = {}
players = {}
for year in range(1990, 2021):
csv_file = pd.read_csv('https://raw.githubusercontent.com/JeffSackmann/tennis_atp/master/atp_matches_' + str(year) + '.csv', header=1, names=self.getColumns())
for row in csv_file.itertuples():
created_at = datetime.datetime.now()
updated_at = datetime.datetime.now()
#try:
id = str(row.tourney_id) + '-' + str(row.match_num)
match = Match.objects.filter(id=id)
if (not match):
match = Match()
match.id = id
match.year = row.tourney_id.split('-')[0]
match.match_num = row.match_num
match.result = row.score
match.best_of = row.best_of
match.minutes = None if np.isnan(row.minutes) else row.minutes
match.round = row.round
if not tourneys.get(str(row.tourney_id)):
tourney = Tourney.objects.filter(id=row.tourney_id)
if (not tourney):
tourney = Tourney()
tourney.id = row.tourney_id
tourney.name = row.tourney_name
tourney.date = datetime.datetime.strptime(str(int(row.tourney_date)), '%Y%m%d').date()
tourney.created_at = created_at
tourney.updated_at = updated_at
if not surfaces.get(str(row.surface)):
surfaces[str(row.surface)] = self.getSurface(str(row.surface))
tourney.surface = surfaces[str(row.surface)]
if not tourney_levels.get(str(row.tourney_level)):
tourney_levels[str(row.tourney_level)] = self.getTourneyLevel(str(row.tourney_level))
tourney.tourney_level = tourney_levels[str(row.tourney_level)]
tourney.created_at = created_at
tourney.updated_at = updated_at
tourney.save()
else:
tourney = tourney[0]
tourneys[str(row.tourney_id)] = tourney
match.tourney = tourneys[str(row.tourney_id)]
match.created_at = created_at
match.updated_at = updated_at
match.save()
total_matches_inserted += 1
else:
match[0].year = row.tourney_id.split('-')[0]
match[0].save()
total_matches_updated += 1
match = match[0]
match_stats_id = str(row.tourney_id) + '-' + str(row.match_num) + '-' + str(row.winner_id)
match_stats = Match_Stats.objects.filter(id=match_stats_id)
if (not match_stats):
seed = row.winner_seed
if pd.isnull(row.winner_seed) or not str(row.winner_seed).isnumeric():
seed = None
match_stats = Match_Stats()
match_stats.id = match_stats_id
match_stats.type = ""
match_stats.seed = seed
match_stats.aces = None if np.isnan(row.w_ace) else row.w_ace
match_stats.double_faults = None if | np.isnan(row.w_df) | numpy.isnan |
"""
Programming Project 12
<NAME>
15863
"""
import cmath
from cmath import sin, cos, exp
from math import sqrt, pi, asin
from random import random
import numpy as np
N = 3
psi = np.ndarray((2 ** N, 1), complex) # N-qubit register
comp_state = 0
for i in range(2 ** N):
if i == comp_state:
psi[i] = 1
else:
psi[i] = 0
psi_state_0 = np.copy(psi)
j = cmath.sqrt(-1)
error_prob_percent = 10
error = asin(sqrt(error_prob_percent / 100))
H = 1 / sqrt(2) * np.array([[1, 1], [1, -1]]) # Hadamard gate
I = np.eye(2, dtype=int)
EX = np.array(([cos(error), -j * sin(error)], [-j * sin(error), cos(error)]), complex)
EZ = np.array(([exp(-j * error), 0], [0, exp(j * error)]), complex)
X = np.array(([0, 1], [1, 0]), int)
Z = np.array(([1, 0], [0, -1]), int)
def kd(i, j):
if i == j:
return 1
else:
return 0
C_NOT = np.zeros((4, 4), int)
C_NOT[0][0] = 1
C_NOT[1][1] = 1
C_NOT[2][3] = 1
C_NOT[3][2] = 1
def C_NOTn(a, b):
C_NOTn = np.ndarray((2 ** N, 2 ** N))
bits = 2 ** N
extra = []
for r in range(0, N):
extra.append(int(r))
extra.remove(a - 1)
extra.remove(b - 1)
for i in range(bits):
for j in range(bits):
final = 1
ii = str(format(i, '0{}b'.format(N)))
jj = str(format(j, '0{}b'.format(N)))
index1 = np.zeros((N), str)
index2 = np.zeros((N), str)
for k in range(N):
index1[k] = ii[k]
index2[k] = jj[k]
qp = int(str(index1[a - 1] + index1[b - 1]), 2)
q_p_ = int(str(index2[a - 1] + index2[b - 1]), 2)
for m in extra:
final = final * kd(index1[m], index2[m])
C_NOTn[i][j] = C_NOT[qp][q_p_] * final
return C_NOTn
def multi_gate(gate, n):
right = N - n
left = N - right - 1
if right > 0:
RIGHT = np.eye(2 ** right, dtype=int)
if left > 0:
LEFT = np.eye(2 ** left, dtype=int)
if right == 0:
return np.kron(LEFT, gate)
if left == 0:
return np.kron(gate, RIGHT)
return (np.kron(LEFT, np.kron(gate, RIGHT)))
def R(th): # Phase shift gate
return np.array([[1, 0], [0, cmath.exp(th * j)]], complex)
T = R(pi / 4)
S = R(pi / 2)
T_dag = R(-pi / 4)
def Toffoli(a, b, c):
return C_NOTn(a, b) @ multi_gate(H, c) @ multi_gate(S, a) @ C_NOTn(b, c) @ C_NOTn(c, a) @ multi_gate(T_dag,
a) @ multi_gate(
T_dag, b) @ multi_gate(T, c) @ C_NOTn(b, a) @ multi_gate(T_dag, a) @ C_NOTn(b, c) @ C_NOTn(c, a) @ C_NOTn(a,
b) @ multi_gate(
T_dag, a) @ multi_gate(T, b) @ multi_gate(T, c) @ multi_gate(H, c)
repeat = 100
result = np.ndarray((2, repeat), np.ndarray)
ques = 2
for k in range(repeat):
PSI = []
psi = np.copy(psi_state_0)
'''
Replace I to add errors & can also change qubit number to which error is added.
Will correct 1 bit flip error
Will not correct phase flip error
Fail to correct 2 bit flip errors depending on position of errors
'''
# for project 12 circuit (b)
PSI.append(multi_gate(I, 3) @ psi)
psi = np.copy(psi_state_0)
# for project 12 circuit (d)
H_corrected = C_NOTn(1, 2) @ C_NOTn(1, 3) @ Toffoli(3, 2, 1) @ multi_gate(H, 1) @ Toffoli(3, 2, 1) @ C_NOTn(1,
3) @ C_NOTn(
1, 2)
PSI.append(multi_gate(I, 2) @ H_corrected @ multi_gate(I, 2) @ H_corrected @ multi_gate(I, 3) @ psi)
for n in range(ques):
r = random()
q = 0
for i in range(2 ** N):
q = q + abs(PSI[n][i]) ** 2
if r < q:
result[n][k] = (i)
break
for n in range(ques):
result_freq = np.zeros((2 ** N), int)
if n == 0:
print('\nfor project 12 circuit (b)')
else:
print('\nfor project 12 circuit (d)')
for i in range(2 ** N):
result_freq[i] = | np.count_nonzero(result[n][:] == i) | numpy.count_nonzero |
#!/usr/bin/env python3
import scipy
import os
import argparse
import numpy as np
from skimage.restoration import unwrap_phase
from scipy import io
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='''Unwrap phase using <NAME>, <NAME>, <NAME>.
Lalor, and <NAME>, "Fast two-dimensional phase-unwrapping
algorithm based on sorting by reliability following a noncontinuous
path," Appl. Opt. 41, 7437-7444 (2002).
''',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'file',
type=argparse.FileType('rb'),
help='Wrapped phase, either in a MATLAB or an ASCII file.')
parser.add_argument(
'--debug', action='store_true', help='Plot phases.')
parser.add_argument(
'--quiet', action='store_true', help='No console output.')
args = parser.parse_args()
dout = dict()
matfile = io.loadmat(args.file)
phiw = matfile['phiw']
apmask = matfile['apmask'].astype(np.bool)
masked = np.ma.masked_array(phiw, | np.invert(apmask) | numpy.invert |
import csv
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import csv
from planner.helperFunctions import euclidianDistance
def cmToInches(cm):
return cm / 2.54
class TrajectoryUtils:
def __init__(self, trajectoryDataFile=None, tentacleLength=0.1):
if trajectoryDataFile is not None:
trajectoryData = np.loadtxt(trajectoryDataFile, delimiter=",", skiprows=1)
self.timestamps = np.array([row[0] for row in trajectoryData])
self.positions = np.array([row[1:4] for row in trajectoryData])
self.velocities = np.array([row[4:7] for row in trajectoryData])
self.accelerations = np.array([row[7:10] for row in trajectoryData])
self.yaws = np.array([row[10] for row in trajectoryData])
self.omegas = np.array([row[11:14] for row in trajectoryData])
else:
self.timestamps = np.array([])
self.positions = np.array([])
self.velocities = np.array([])
self.accelerations = np.array([])
self.yaws = np.array([])
self.omegas = np.array([])
self.tentacleLength = tentacleLength
def appendEvent(self, timestamp, position, yaw=0, velocity=[0, 0, 0], acceleration=[0, 0, 0], omega=[0, 0, 0]):
self.timestamps = np.append(self.timestamps, timestamp)
# Update position
old_pos = self.positions
new_pos = np.array([position])
self.positions = new_pos if len(old_pos) == 0 else np.append(old_pos, new_pos, axis=0)
# Update velocities
old_vel = self.velocities
new_vel = np.array([velocity])
self.velocities = new_vel if len(old_vel) == 0 else np.append(old_vel, new_vel, axis=0)
# Update accelerations
old_acc = self.accelerations
new_acc = np.array([acceleration])
self.accelerations = new_acc if len(old_acc) == 0 else np.append(old_acc, new_acc, axis=0)
self.yaws = np.append(self.yaws, yaw)
# Update omegas
old_omegas = self.omegas
new_omegas = np.array([omega])
self.omegas = new_omegas if len(old_omegas) == 0 else np.append(old_omegas, new_omegas, axis=0)
def appendDroneEvent(self, timestamp, drone):
# print(f"Curr: {drone.position()}\n")
self.appendEvent(
timestamp=timestamp,
position=drone.position(),
velocity=drone.velocity(),
acceleration=drone.acceleration(),
yaw=drone.yaw(),
omega=drone.setState.omega
)
def saveTrajectoryToCsv(self, fileName):
with open(fileName, 'w', encoding='UTF8') as f:
writer = csv.writer(f)
header = [
"timestamp",
"pos_x", "pos_y", "pos_z",
"x_vel", "y_vel", "z_vel",
"x_acc", "y_acc", "z_acc",
"yaw",
"roll_vel", "pitch_vel", "yaw_vel"
]
writer.writerow(header)
numEvents = len(self.timestamps)
for i in range(numEvents):
timestamp = self.timestamps[i]
pos = self.positions[i]
pos_x, pos_y, pos_z = pos[0], pos[1], pos[2]
x_vel, y_vel, z_vel = self.velocities[i][0], self.velocities[i][1], self.velocities[i][2]
x_acc, y_acc, z_acc = self.accelerations[i][0], self.accelerations[i][1], self.accelerations[i][2]
yaw = self.yaws[i]
omega = self.omegas[i]
roll_vel, pitch_vel, yaw_vel = omega[0], omega[1], omega[2]
row = [ timestamp,
pos_x, pos_y, pos_z,
x_vel, y_vel, z_vel,
x_acc, y_acc, z_acc,
yaw,
roll_vel, pitch_vel, yaw_vel
]
writer.writerow(row)
def compareWithOtherTrajectory(self, otherTrajectory):
trajectoryDiff = TrajectoryUtils()
trajectoryDiff.timestamps = self.timestamps
trajectoryDiff.positions = self.positions - otherTrajectory.positions
trajectoryDiff.velocities = self.velocities - otherTrajectory.velocities
trajectoryDiff.accelerations = self.accelerations - otherTrajectory.accelerations
trajectoryDiff.yaws = self.yaws - otherTrajectory.yaws
trajectoryDiff.omegas = self.omegas - otherTrajectory.omegas
font = {'size': 30}
matplotlib.rc('font', **font)
fig, axs = plt.subplots(3)
# --- PLOT 1 ---
axs[0].plot(trajectoryDiff.timestamps, trajectoryDiff.velocities[:, 0])
axs[0].plot(trajectoryDiff.timestamps, trajectoryDiff.velocities[:, 1])
axs[0].plot(trajectoryDiff.timestamps, trajectoryDiff.velocities[:, 2])
legend0 = axs[0].legend(('x', 'y', 'z'), loc=(
0.02, 0.7), prop={'size': 30})
for legendObj in legend0.legendHandles:
legendObj.set_linewidth(4.2)
axs[0].set(
xlabel='time (s)', ylabel='velocity (m/s)',
title='Velocity diff between desired trajectory and executed trajectory'
)
axs[0].grid()
# --- PLOT 2 ---
axs[1].plot(trajectoryDiff.timestamps, trajectoryDiff.positions[:, 0])
axs[1].plot(trajectoryDiff.timestamps, trajectoryDiff.positions[:, 1])
axs[1].plot(trajectoryDiff.timestamps, trajectoryDiff.positions[:, 2])
legend1 = axs[1].legend(('x', 'y', 'z'), loc=(
0.02, 0.7), prop={'size': 30})
for legendObj in legend1.legendHandles:
legendObj.set_linewidth(4.2)
axs[1].set(
xlabel='time (s)', ylabel='distance (m)',
title='Position diff between desired trajectory and executed trajectory'
)
axs[1].grid()
# --- PLOT 3 ---
axs[2].plot(trajectoryDiff.timestamps, trajectoryDiff.accelerations[:, 0])
axs[2].plot(trajectoryDiff.timestamps, trajectoryDiff.accelerations[:, 1])
axs[2].plot(trajectoryDiff.timestamps, trajectoryDiff.accelerations[:, 2])
legend1 = axs[2].legend(('x', 'y', 'z'), loc=(
0.02, 0.7), prop={'size': 30})
for legendObj in legend1.legendHandles:
legendObj.set_linewidth(4.2)
axs[2].set(
xlabel='time (s)', ylabel='acceleration (m/s²)',
title='Acceleration diff between desired trajectory and executed trajectory'
)
# axs[2].grid()
# --------------
ratio = 30 / 30
width_in_inches = 65
w = cmToInches(width_in_inches)
h = cmToInches(width_in_inches / ratio)
fig.set_size_inches(w, h)
filename = "diffBetweenTrajectoryAndLoggedTrajectory"
fig.savefig(f"plots/{filename}.png", format='png', dpi=72)
font = {'size': 12}
matplotlib.rc('font', **font)
trajectoryDiff.saveTrajectoryToCsv(f"csvs/{filename}.csv")
class Obstacle:
def __init__(self, x, y, radius):
self.x = x
self.y = y
self.radius = radius
class Position:
def __init__(self, x, y):
self.x = x
self.y = y
def euclidianDistanceToCurrentPosition(self, otherPos):
return euclidianDistance(self.x, self.y, otherPos.x, otherPos.y)
def reachedCurrentPosition(self, otherPos, currentPlanningState):
distance = self.euclidianDistanceToCurrentPosition(otherPos)
# print(f"D#{currentPlanningState.depth} | Distance = {round(distance, 2)}m")
return distance < 0.05
def collidedWithObstacle(self, obstacle):
distanceToObstacleCenter = self.euclidianDistanceToCurrentPosition(obstacle)
haveCollided = distanceToObstacleCenter <= obstacle.radius
return haveCollided
class PlanningState:
def __init__(self, parent, depth, position, yaw):
self.parentState = parent
self.depth = depth
self.position = position
self.yaw = yaw
self.tried = {
"left": False,
"straight": False,
"right": False
}
self.child = None
class LatticePath:
def __init__(self, x, y, direction):
self.x = x
self.y = y
self.direction = direction
self.tentacleLength = euclidianDistance(0, 0, x[-1], y[-1])
startPos = np.array([0, 0])
goalPos = np.array([x[-1], y[-1]])
self.hypotenuse = np.linalg.norm(startPos - goalPos)
if direction == "left":
self.angle = np.arccos(x[-1] / self.hypotenuse)
elif direction == "straight":
self.angle = 0
elif direction == "right":
self.angle = - np.arccos(x[-1] / self.hypotenuse)
# print(f"({self.direction[0]}) Angle = {np.rad2deg(self.angle)}°")
def getNewPos(self, currentYaw, currentPos):
endAngle = currentYaw + self.angle
return Position(currentPos.x + self.hypotenuse * np.cos(endAngle), currentPos.y + self.hypotenuse * | np.sin(endAngle) | numpy.sin |
'''
=========================================================================
Author: <NAME>
Company:
Filename:
Last modifed: 06.04.2017 by <NAME>
Description: Functional test
=========================================================================
=========================================================================
'''
import numpy as np
import ext3DLBPpy
print("=============================================")
test = "RD_LBP_P92g_R1"
array = | np.array([
[[165,226,124],[215,96,200],[138,102,175]],
[[59,241,247],[27,52,111],[209,123,210]],
[[147,113,7],[18,118,196],[253,90,98]]
], dtype=np.int) | numpy.array |
import gzip
import numpy as np
import math
import time
import pandas as pd
start = time.time()
total_class = 10
cols = 28
rows = 28
total_px = cols * rows
dt = np.dtype(np.uint8).newbyteorder(">")
def loadMNIST(data_file, label_file):
f_ti = open(data_file, "rb")
f_tl = open(label_file, "rb")
_ = f_ti.read(4) # magic_number(4 bytes)
img_num = int.from_bytes(f_ti.read(4), "big")
rows = int.from_bytes(f_ti.read(4), "big")
cols = int.from_bytes(f_ti.read(4), "big")
_ = f_tl.read(8) # magic_number(4 bytes), item number(4 bytes)
img_pixels = np.zeros((img_num, rows * cols), dtype=int)
img_label = np.zeros(img_num, dtype=int)
for n in range(img_num):
pixels = f_ti.read(rows * cols)
img_pixels[n] = np.frombuffer(pixels, dtype=dt)
img_label[n] = int.from_bytes(f_tl.read(1), "big")
f_ti.close()
f_tl.close()
return img_pixels, img_label
def printNumber(print_str, guess, labels= | np.arange(10) | numpy.arange |
"""
Create randomly centered, tiled projections to be used in light cones.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, yt Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
import numpy as np
from yt.funcs import \
mylog
from yt.units.yt_array import \
uconcatenate
from yt.visualization.fixed_resolution import \
FixedResolutionBuffer
from yt.utilities.parallel_tools.parallel_analysis_interface import \
parallel_blocking_call
@parallel_blocking_call
def _light_cone_projection(my_slice, field, pixels, weight_field=None,
save_image=False, field_cuts=None):
"Create a single projection to be added into the light cone stack."
# We are just saving the projection object, so only the projection axis
# needs to be considered since the lateral shifting and tiling occurs after
# the projection object is made.
# Likewise, only the box_depth_fraction needs to be considered.
mylog.info("Making projection at z = %f from %s." % \
(my_slice["redshift"], my_slice["filename"]))
region_center = [0.5 * (my_slice["object"].domain_right_edge[q] +
my_slice["object"].domain_left_edge[q]) \
for q in range(my_slice["object"].dimensionality)]
# 1. The Depth Problem
# Use coordinate field cut in line of sight to cut projection to proper depth.
if field_cuts is None:
these_field_cuts = []
else:
these_field_cuts = field_cuts.copy()
if (my_slice["box_depth_fraction"] < 1):
axis = ("x", "y", "z")[my_slice["projection_axis"]]
depthLeft = \
my_slice["projection_center"][my_slice["projection_axis"]] \
- 0.5 * my_slice["box_depth_fraction"]
depthRight = \
my_slice["projection_center"][my_slice["projection_axis"]] \
+ 0.5 * my_slice["box_depth_fraction"]
if (depthLeft < 0):
cut_mask = (
"((obj['index', '%s'] + 0.5*obj['index', 'd%s'] >= 0) & "
" (obj['index', '%s'] - 0.5*obj['index', 'd%s'] <= %f)) | "
"((obj['index', '%s'] + 0.5*obj['index', 'd%s'] >= %f) & "
" (obj['index', '%s'] - 0.5*obj['index', 'd%s'] <= 1))") % \
(axis, axis, axis, axis, depthRight,
axis, axis, (depthLeft+1), axis, axis)
elif (depthRight > 1):
cut_mask = (
"((obj['index', '%s'] + 0.5*obj['index', 'd%s'] >= 0) & "
"(obj['index', '%s'] - 0.5*obj['index', 'd%s'] <= %f)) | "
"((obj['index', '%s'] + 0.5*obj['index', 'd%s'] >= %f) & "
"(obj['index', '%s'] - 0.5*obj['index', 'd%s'] <= 1))") % \
(axis, axis, axis, axis, (depthRight-1),
axis, axis, depthLeft, axis, axis)
else:
cut_mask = (
"(obj['index', '%s'] + 0.5*obj['index', 'd%s'] >= %f) & "
"(obj['index', '%s'] - 0.5*obj['index', '%s'] <= %f)") % \
(axis, axis, depthLeft, axis, axis, depthRight)
these_field_cuts.append(cut_mask)
data_source = my_slice["object"].all_data()
cut_region = data_source.cut_region(these_field_cuts)
# Make projection.
proj = my_slice["object"].proj(field, my_slice["projection_axis"],
weight_field=weight_field, center=region_center,
data_source=cut_region)
proj_field = proj.field[0]
if weight_field is not None:
proj_weight_field = proj.weight_field
# Do a projection of the weight field so we can keep it.
weight_proj = my_slice["object"].proj(
proj_weight_field, my_slice["projection_axis"],
weight_field=None, center=region_center,
data_source=cut_region)
del data_source, cut_region
# 2. The Tile Problem
# Tile projection to specified width.
# Original projection data.
original_px = proj["px"].in_units("code_length").copy()
original_py = proj["py"].in_units("code_length").copy()
original_pdx = proj["pdx"].in_units("code_length").copy()
original_pdy = proj["pdy"].in_units("code_length").copy()
original_field = proj[proj_field].copy()
if weight_field is not None:
original_weight_field = weight_proj[proj_weight_field].copy()
for my_field in ["px", "py", "pdx", "pdy", proj_field]:
proj.field_data[my_field] = [proj.field_data[my_field]]
if weight_field is not None:
weight_proj.field_data[proj_weight_field] = \
[weight_proj.field_data[proj_weight_field]]
# Copy original into offset positions to make tiles.
for x in range(int(np.ceil(my_slice["box_width_fraction"]))):
x = my_slice["object"].quan(x, "code_length")
for y in range(int( | np.ceil(my_slice["box_width_fraction"]) | numpy.ceil |
"""Compute qubit vs toffoli for THC LCU"""
from math import pi
import numpy as np
import itertools
import matplotlib.pyplot as plt
from numpy.lib.scimath import arccos, arcsin # want version that has analytic continuation to cplx
from chemftr.utils import QR, QI
def qubit_vs_toffoli(lam, dE, eps, n, chi, beta, M, algorithm='half', verbose=False):
"""
Args:
lam (float) - the lambda-value for the Hamiltonian
dE (float) - allowable error in phase estimation. usually 0.001
eps (float) - allowable error for synthesis (dE/(10 * lam)) usually
n (int) - number of spin orbitals.
chi (int) - number of bits of precision for state prep
beta (int) - number of bits of precision for rotations
M (int) - THC rank or r_{Thc}
algorithm (str) - 'half', where half of the phasing angles are loaded at a time
'full', where angles loaded from QROM to perform phasing operations are
all loaded at the same time
Note: In 'Even more efficient quantum...' paper (arXiv:2011.03494),
'half' corresponds to Fig 11, while 'full' corresponds to Fig 12.
verbose (bool) - do additional printing of intermediates?
"""
# only valid algorithms accepted
assert algorithm in ['half', 'full']
# (*The number of iterations for the phase estimation.*)
iters = np.ceil(pi * lam / (dE * 2))
# (*The number of bits used for each register.*)
nM = np.ceil(np.log2(M + 1))
# (*This is the number of distinct items of data we need to output, see Eq. (28).*)
d = M * (M + 1) / 2 + n / 2
# (*The number of bits used for the contiguous register.*)
nc=np.ceil(np.log2(d))
# (*The output size is 2*Log[M] for the alt values, χ for the keep value, and 2 for the two sign bits.*)
m=2*nM+2+chi
# QR[L_,M_]:= np.ceil(MinValue[{L/2^k+M*(2^k-1),k>=0},k∈Integers]] (*This gives the minimum cost for a QROM over L values of size M.*)
# QRa[L_,M_]:=ArgMin[{L/2^k+M*(2^k-1),k>=0},k∈Integers] (*Gives the optimal k.*)
# QI[L_]:= np.ceil(MinValue[{L/2^k+2^k,k>=0},k∈Integers]] (*This gives the minimum cost for an inverse QROM over L values.*)
# QIa[L_]:= np.ceil(ArgMin[{L/2^k+2^k,k>=0},k∈Integers]] (*This gives the minimum cost for an inverse QROM over L values.*)
# (*The next block of code finds the optimal number of bits to use for the rotation angle for the amplitude a
# mplification taking into account the probability of failure and the cost of the rotations.*)
oh = np.zeros(20, dtype=float)
for p in range(1, 20 + 1):
cos_term = arccos(np.power(2, nM) / np.sqrt(d) / 2)
# print(cos_term)
v = np.round(np.power(2, p) / (2 * pi) * cos_term)
asin_term = arcsin(np.cos(v*2*pi/np.power(2,p)) * np.sqrt(d) / np.power(2, nM))
sin_term = np.sin(3 * asin_term)**2
oh[p-1] = (20_000 * (1 / sin_term - 1) + 4 * p).real
br= np.argmin(oh) + 1 #(*Here br is the number of bits used in the rotation.*)
# (*Next are the costs for the state preparation.*)
cp1 = 2 * (10 * nM + 2 * br - 9)
# (*There is cost 10*Log[M] for preparing the equal superposition over the input registers. This is the costing from above Eq. (29).*)
cp2 = 2 * (nM ** 2 + nM - 1) # (*This is the cost of computing the contiguous register and inverting it. This is with a sophisticated scheme adding together triplets of bits. This is the cost of step 2 in the list on page 14.*)
cp3 = QR(d, m)[1] + QI(d)[1] # (*This is the cost of the QROM for the state preparation and its inverse.*)
cp4 = 2 * chi # (*The cost for the inequality test.*)
cp5 = 4 * nM # ( *The cost 2*nM for the controlled swaps.*)
cp6 = 2 * nM + 3 # (*Then there is a cost of nM+1 for swapping the μ and ν registers, where the +3 is because we need to control on two registers, and control swap of the spin registers.*)
CPCP = cp1 + cp2 + cp3 + cp4 + cp5 + cp6 # (*The total cost in Eq. (33).*)
# (*Next are the costs for the select operation.*)
cs1 = 2 * n # (*This is the cost of swapping based on the spin register. These costs are from the list on page 15, and this is steps 1 and 7.*)
k1 = 2 ** QI(M + n / 2)[0]
cs2a = M + n / 2 - 2 + np.ceil(M / k1) + np.ceil(n / 2 / k1) + k1
# (*The QROM for the rotation angles the first time. Here M+n/2-2 is the cost for generating them, and the second part is the cost for inverting them with advanced QROM.*)
cs2b = M - 2 + QI(M)[1] # (*The QROM for the rotation angles the second time. Here the cost M-2 is for generating the angles the second time, and QI[M] is for inverting the QROM. Steps 2 and 6.*)
cs3 = 4 * n * (beta - 2) # (*The cost of the rotations steps 3 and 5.*)
cs4 = 1 # (*Cost for extra part in making the Z doubly controlled step 4.*)
CS = cs1 + cs2a + cs2b + cs3 + cs4 # (*The total select cost in Eq. (43).*)
costref = 2 * nM + chi + 3 # (*The cost given slightly above Eq. (44) is 2*nM+5. That is a typo and it should have the aleph (equivalent to χ here) like at the top of the column. Here we have +3 in this line, +1 in the next line and +1 for cs4, to give the same total.*)
cost = CPCP + CS + costref + 1
# (*Next are qubit costs.*)
ac1 = 2 * np.ceil(np.log2(iters + 1)) - 1
ac2 = n
ac3 = 2 * nM
ac47 = 5
ac8 = beta
ac9 = nc
kt = 2 ** QR(d, m)[0]
ac10 = m * kt + np.ceil(np.log2(d / kt))
ac11 = chi # (*This is for the equal superposition state to perform the inequality test with the keep register.*)
ac12 = 1 # (*The qubit to control the swap of the μ and ν registers.*)
aca = ac1 + ac2 + ac3 + ac47 + ac8 + ac9 + ac11 + ac12
ac13 = beta * n / 2 # (*This is the data needed for the rotations.*)
ac14 = beta - 2 # (*These are the ancillas needed for adding into the phase gradient state.*)
acc = ac13 + ac14 + m # (*These are the temporary ancillas in between erasing the first QROM ancillas and inverting that QROM. The +m is the for output of the first QROM.*)
if verbose:
print("Total Toffoli cost ", cost*iters) # (*The total Toffoli cost.*)
print("Ancilla for first QROM ", aca+ac10) # (*This is the ancillas needed up to the point we are computing the first QROM.*)
print("Actual ancilla ... ", np.max([aca+ac10,aca+acc])) # (*This is the actual ancilla cost if we need more ancillas in between.*)
print("Spacetime volume ", np.max([aca+ac10,aca+acc])*cost) # (*Spacetime volume.*)
#TODO: Clean up and re-organize the logic a bit ... this is more or less a direct port from Mathematica
# (*First are the numbers of qubits that must be kept throughout the computation. See page 18.*)
if algorithm == 'half':
ac1 = np.ceil(np.log2(iters + 1)) # (*The qubits used as the control registers for the phase estimation, that must be kept the whole way through. If we used independent controls each time that would increase the Toffoli cost by np.ceil(np.log2iters+1]]-3, while saving np.ceil(np.log2iters+1]]-1 qubits.*)
elif algorithm == 'full':
ac1 = 2 * np.ceil(np.log2(iters + 1)) - 1 # (*The qubits used as the control registers for the phase estimation, that must be kept the whole way through. If we used independent controls each time that would increase the Toffoli cost by np.ceil(np.log2iters+1]]-3, while saving np.ceil(np.log2iters+1]]-1 qubits.*)
ac2 = n # (*The system qubits that must always be included.*)
ac3 = 2 * nM # (*The μ and ν registers, that must be kept because they are control registers that aren't fully erased and must be reflected on.*)
ac4512 = 4 # (*These are the qubits for the spin in the control state as well as the qubit that is rotated for the preparation of the equal superposition state, AND the qubit that is used to control . None of these are fully inversely prepared.*)
ac8 = beta # (*The qubits for the phase gradient state.*)
ac11 = chi # (*This is for the equal superposition state to perform the inequality test with the keep register. It must be kept around and reflected upon.*)
perm = ac1 + ac2 + ac3 + ac4512 + ac8 + ac11 # (*The total number of permanent qubits.*)
# (*In preparing the equal superposition state there are 6 temporary qubits used in the rotation of the ancilla. There are another three that are needed for the temporary results of inequality tests. By far the largest number, however, come from keeping the temporary ancillas from the inequality tests. That should be 3*nM+nN-4. There are an other two qubits in output at the end that will be kept until this step is undone.*)
nN = np.ceil(np.log2(n / 2))
qu1 = perm + 4 * nM - 1 # (*This is the maximum number of qubits used while preparing the equal superposition state.*)
# (*To explain the number of temporary ancillas, we have nM+1 to perform the inequality test on mu and nu with out-of-place addition. We have another nM-2 for the equality test. Then we can do the inequality tests on mu and nu with constants (temporarily) overwriting these variables, and keeping nM-1 qubits on each. Then there are another 2 temporary qubits used for the reflection. That gives 4*nM-1 total.*)
tof1 = 10 * nM + 2 * br - 9 # (*This is the number of Toffolis during this step.*)
perm = perm + 2 # (*This is increasing the running number of permanent ancillas by 2 for the ν=M+1 flag qubit and the success flag qubit.*)
qu2 = perm + nM**2 + nM # (*The number of temporary qubits used in this computation is the the same as the number of Toffolis plus one.*)
tof2 = nM**2 + nM - 1 # (*The Toffoli cost of computing the contiguous register.*)
perm = perm + nc # (*The running number of qubits is increased by the number needed for the contiguous register.*)
if algorithm == 'half':
kt = 16 # (*Here I'm setting the k-value for the QROM by hand instead of choosing the optimal one for Toffolis.*)
elif algorithm == 'full':
kt = 32 # (*Here I'm setting the k-value for the QROM by hand instead of choosing the optimal one for Toffolis.*)
qu3 = perm + m * kt + np.ceil(np.log2(d / kt)) # (*This is the number of qubits needed during the QROM.*)
tof3 = np.ceil(d / kt) + m * (kt - 1) # (*The number of Toffolis for the QROM.*)
perm = perm + m # (*The number of ancillas used increases by the actual output size of the QROM.*)
qu4 = perm + chi # (*The number of ancilla qubits used for the subtraction for the inequality test.
# We can use one of the qubits from the registers that are subtracted as the flag qubit so we don't need an extra flag qubit.*)
tof4 = chi # (*The number of Toffolis needed for the inequality test. The number of permanent ancillas is unchanged.*)
qu5 = perm # (*We don't need any extra ancillas for the controlled swaps.*)
tof5 = 2 * nM # (*We are swapping pairs of registers of size nM*)
qu6 = perm # (*One extra ancilla for the controlled swap of mu and nu because it is controlled on two qubits.*)
tof6 = nM + 1 # (*One more Toffoli for the double controls.*)
qu7 = perm # (*Swapping based on the spin register.*)
tof7 = n / 2
if algorithm == 'half':
qu8 = perm + nM + beta * n / 4 # (*We use these temporary ancillas for the first QROM for the rotation angles.*)
elif algorithm == 'full':
qu8 = perm + nM + beta * n / 2 # (*We use these temporary ancillas for the first QROM for the rotation angles.*)
tof8 = M + n / 2 - 2 # (*The cost of outputting the rotation angles including those for the one-electron part.*)
if algorithm == 'half':
perm = perm + beta * n / 4 # (*We are now need the output rotation angles, though we don't need the temporary qubits from the unary iteration.*)
elif algorithm == 'full':
perm = perm + beta * n / 2 # (*We are now need the output rotation angles, though we don't need the temporary qubits from the unary iteration.*)
qu9 = perm + (beta - 2) # (*We need a few temporary registers for adding into the phase gradient register.*)
if algorithm == 'half':
tof9 = n * (beta - 2) / 2 # (*The cost of the rotations.*)
qu10 = np.array([-j * beta for j in range(int(n / 4))]) + perm + beta - 2 # Table[-j*beta,{j,0,n/4-1}]+perm+(beta-2) # (*Make a list where we keep subtracting the data qubits that can be erased.*)
tof10 = np.array([2 * (beta - 2) for j in range(int(n / 4))]) # Table[2*(beta-2),{j,0,n/4-1}] # (*The cost of the rotations.*)
perm = perm - beta * n / 4 # (*We've erased the data.*)
elif algorithm == 'full':
tof9 = n * (beta - 2) # (*The cost of the rotations.*)
qu10 = np.array([-j * beta for j in range(int(n / 2))]) + perm + beta - 2 # Table[-j*beta,{j,0,n/2-1}]+perm+(beta-2) # (*Make a list where we keep subtracting the data qubits that can be erased.*)
tof10 = np.array([2 * (beta - 2) for j in range(int(n / 2))]) # Table[2*(beta-2),{j,0,n/2-1}] # (*The cost of the rotations.*)
perm = perm - beta * n / 2 # (*We've erased the data.*)
k1 = 2 ** QI(M + n / 2)[0] # (*Find the k for the phase fixup for the erasure of the rotations.*)
qu11 = perm + k1 + np.ceil(np.log2(M / k1)) # (*The temporary qubits used. The data qubits were already erased, so don't change perm.*)
tof11 = np.ceil(M / k1) + np.ceil(n / 2 / k1) + k1
qu12 = perm # (*Swapping based on the spin register.*)
tof12 = n / 2
qu12a = perm
tof12a = 1 # (*Swapping the spin registers.*)
qu13 = perm # (*Swapping based on the spin register.*)
tof13 = n / 2
if algorithm == 'half':
qu14 = perm + nM - 1 + beta * n / 4 # (*We use these temporary ancillas for the second QROM for the rotation angles.*)
perm = perm + beta * n / 4
elif algorithm == 'full':
qu14 = perm + nM - 1 + beta * n / 2 # (*We use these temporary ancillas for the second QROM for the rotation angles.*)
perm = perm + beta * n / 2
tof14 = M - 2
qu15 = perm + (beta - 2) # (*We need a few temporary registers for adding into the phase gradient register.*)
if algorithm == 'half':
tof15 = n * (beta - 2) / 2 # (*The cost of the rotations.*)
elif algorithm == 'full':
tof15 = n * (beta - 2) # (*The cost of the rotations.*)
qu16 = perm # (*Just one Toffoli to do the controlled Z1.*)
tof16 = 1
if algorithm == 'half':
qu17 = np.array([-j * beta for j in range(int(n / 4))]) + perm + beta - 2 # Table[-j*beta,{j,0,n/4-1}]+perm+(beta-2) # (*Make a list where we keep subtracting the data qubits that can be erased.*)
tof17 = np.array([2 * (beta - 2) for j in range(int(n / 4))]) # Table[2*(beta-2),{j,0,n/4-1}] # (*The cost of the rotations.*)
perm = perm - beta * n / 4 # (*We've erased the data.*)
elif algorithm == 'full':
qu17 = np.array([-j * beta for j in range(int(n / 2))]) + perm + beta - 2 # Table[-j*beta,{j,0,n/2-1}]+perm+(beta-2) # (*Make a list where we keep subtracting the data qubits that can be erased.*)
tof17 = np.array([2 * (beta - 2) for j in range(int(n / 2))]) # Table[2*(beat-2),{j,0,n/2-1}] # (*The cost of the rotations.*)
perm = perm - beta * n / 2 # (*We've erased the data.*)
k1 = 2 ** QI(M)[0] # (*Find the k for the phase fixup for the erasure of the rotations.*)
qu18 = perm + k1 + np.ceil(np.log2(M / k1)) # (*The temporary qubits used. The data qubits were already erased, so don't change perm.*)
tof18 = np.ceil(M / k1) + k1
qu19 = perm # (*Swapping based on the spin register.*)
tof19 = n / 2
qu20 = perm + 1 # (*One extra ancilla for the controlled swap of mu and nu because it is controlled on two qubits.*)
tof20 = nM + 1 # (*One extra Toffoli, because we are controlling on two qubits.*)
qu21 = perm # (*We don't need any extra ancillas for the controlled swaps.*)
tof21 = 2 * nM # (*We are swapping pairs of registers of size nM*)
qu22 = perm + chi # (*The number of ancilla qubits used for the subtraction for the inequality test.
# We can use one of the qubits from the registers that are subtracted as the flag qubit so we don't need an extra flag qubit.*)
tof22 = chi # (*The number of Toffolis needed for inverting the inequality test. The number of permanent ancillas is unchanged.*)
perm = perm - m # (*We can erase the data for the QROM for inverting the state preparation, then do the phase fixup.*)
kt=2**QI(d)[0]
qu23 = perm + kt + np.ceil(np.log2(d / kt)) # (*This is the number of qubits needed during the QROM.*)
tof23 = np.ceil(d / kt) + kt # (*The number of Toffolis for the QROM.*)
qu24 = perm - nc + nM**2 + nM # (*The number of temporary qubits used in this computation is the same as the number of Toffolis plus one. We are erasing the contiguous register as we go so can subtract nc.*)
tof24 = nM**2 + nM - 1 # (*The Toffoli cost of computing the contiguous register.*)
perm = perm - nc # (*The contiguous register has now been deleted.*)
qu25 = perm + 4 * nM - 1 # (*This is the maximum number of qubits used while preparing the equal superposition state.*)
tof25 = 10 * nM + 2 * br - 9 # (*This is the number of Toffolis during this step.*)
perm = perm - 2 # (*This is increasing the running number of permanent ancillas by 2 for the ν=M+1 flag qubit and the success flag qubit.*)
if algorithm == 'half':
qu26 = perm + costref + np.ceil(np.log2(iters + 1)) # (*We need some ancillas to perform a reflection on multiple qubits. We are including one more Toffoli to make it controlled.*)
tof26 = costref + np.ceil(np.log2(iters + 1))
elif algorithm == 'full':
qu26 = perm + costref # (*We need some ancillas to perform a reflection on multiple qubits. We are including one more Toffoli to make it controlled.*)
tof26 = costref
# FIXME: are qu27 and tof27 present in the improved algorithm?
qu27 = perm # (*Iterate the control register.*)
tof27 = 1
# Labels
sm = 'small element'
pq = 'preparation QROM'
rq = 'rotation QROM'
ri = 'R$^{\dag}$'
ro = 'R'
iq = 'inverse QROM'
color_dict = {sm: '#435CE8',pq:'#E83935',rq:'#F59236', ri:'#E3D246', ro:'#36B83E',iq:'#E83935'}
if algorithm == 'half':
tgates = np.hstack((np.array([tof1, tof2, tof3, tof4, tof5, tof6, tof7, tof8, tof9, tof8, tof9, tof9, tof8]),
tof10,
| np.array([tof11, tof12, tof12a, tof13, tof14, tof15, tof14, tof15, tof16, tof15, tof14]) | numpy.array |
import argparse
from design_search import RobotDesignEnv, make_graph, build_normalized_robot, presimulate, simulate
import mcts
import numpy as np
import os
import pyrobotdesign as rd
import random
import tasks
import time
class CameraTracker(object):
def __init__(self, viewer, sim, robot_idx):
self.viewer = viewer
self.sim = sim
self.robot_idx = robot_idx
self.reset()
def update(self, time_step):
lower = np.zeros(3)
upper = np.zeros(3)
self.sim.get_robot_world_aabb(self.robot_idx, lower, upper)
# Update camera position to track the robot smoothly
target_pos = 0.5 * (lower + upper)
camera_pos = self.viewer.camera_params.position.copy()
camera_pos += 5.0 * time_step * (target_pos - camera_pos)
self.viewer.camera_params.position = camera_pos
def reset(self):
lower = np.zeros(3)
upper = np.zeros(3)
self.sim.get_robot_world_aabb(self.robot_idx, lower, upper)
self.viewer.camera_params.position = 0.5 * (lower + upper)
def run_trajectory(sim, robot_idx, input_sequence, task, step_callback):
step_callback(0)
for j in range(input_sequence.shape[1]):
for k in range(task.interval):
step_idx = j * task.interval + k
sim.set_joint_targets(robot_idx, input_sequence[:,j].reshape(-1, 1))
task.add_noise(sim, step_idx)
sim.step()
step_callback(step_idx + 1)
def view_trajectory(sim, robot_idx, input_sequence, task):
record_step_indices = set()
sim.save_state()
viewer = rd.GLFWViewer()
# Get robot bounds
lower = | np.zeros(3) | numpy.zeros |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 16 13:50:20 2020
@author: joaovitor
"""
import time
import multiprocessing as mp
import numpy as np
from scipy import signal as ss
from matplotlib import pyplot as plt
#import sounddevice as sd
# 1. Variáveis e álgebra
fs = 44100 # taxa de amostragem [Hz]
dt = 1/fs # intervalo de tempo entre amostras [s]
nsamp = 2**9 # número total de amostras [-]
tlen = (nsamp - 1) / fs # duração do sinal [s]
print (f"{fs=} {dt=}\n{nsamp=} {tlen=}")
b = 0 # Número de semitons [-]
c = 343 # Velocidade de propagação do som no ar [m/s]
f = 440 * 2**(b/12) # frequência [Hz]
T = 1/f # Período [s]
l = c / f # comprimento de onda [m]
print(f"{c=} {f=}\n{T=} {l=}")
omega = 2 * np.pi * f # frequencia angular [rad/s]
k = 2 * np.pi / l # número de onda [rad/m]
kl = 1/l
print (f"{omega=} {k=} {kl=}")
nmark = 2 * nsamp # número de marcações no espaço [-]
dx = 1 / nmark # intervalo entre as marcações [m]
print (f"{nmark=} {dx=}")
xm = np.linspace(0, 1 - dx, nmark) # [m] marcações no espaço
ts = np.linspace(0, tlen, nsamp) # [s] instantes de tempo
cosx = np.exp(k * xm * 1j)
cost = np.exp(-omega * ts * 1j)
"""
Explique o que está sendo calculado nas linhas 59 e 60:
Escreva aqui
"""
print (f"{xm.shape=} {cosx.shape=}")
print (f"{ts.shape=} {cost.shape=}")
fig1, axs = plt.subplots(2, 1)
xlines = []
xlines.append(*axs[0].plot(xm, np.real(cosx)))
xlines.append(*axs[0].plot(xm, np.imag(cosx)))
xlines.append(axs[0].hlines(np.abs(cosx)/(2**0.5), xm[0], xm[-1]))
tlines = []
tlines.append(*axs[1].plot(ts, np.real(cost)))
tlines.append(*axs[1].plot(ts, np.imag(cost)))
tlines.append(axs[1].hlines(np.abs(cost)/(2**0.5), ts[0], ts[-1]))
axs[0].set_title("Gráfico de Re{exp(jkx)} variando no tempo")
axs[0].set_xlabel("Posição [m]")
axs[0].set_ylabel("Amplitude [-]")
axs[1].set_title("Gráfico de Re{exp(-jωt)} variando no espaço")
axs[1].set_xlabel("Tempo [s]")
axs[1].set_ylabel("Amplitude [-]")
fig1.tight_layout()
fig1.canvas.draw()
fig1.show()
def loop(fig1, axs, xlines, tlines, cosx, cost, nsamp):
from time import time
tstart = time()
for n in range(nsamp):
extn = cosx * cost[n]
exnt = cosx[2*n] * cost
xlines[0].set_ydata(np.real(extn))
xlines[1].set_ydata(np.imag(extn))
tlines[0].set_ydata(np.real(exnt))
tlines[1].set_ydata(np.imag(exnt))
axs[0].draw_artist(axs[0].patch)
[axs[0].draw_artist(xline) for xline in xlines]
axs[1].draw_artist(axs[1].patch)
[axs[1].draw_artist(tline) for tline in tlines]
fig1.canvas.update()
fig1.canvas.flush_events()
tend = time()
return tend - tstart
tloop = loop(fig1, axs, xlines, tlines, cosx, cost, nsamp)
print(f"{tloop=:.6f}")
def rms(x: np.ndarray):
return (x**2).mean(axis=0)**0.5
def dB(x: np.ndarray):
return 20*np.log10(rms(x))
print(f"{rms(np.real(cosx))=:.3f} {dB(np.real(cosx))=:.1f}")
print(f"{rms(np.real(cost))=:.3f} {dB(np.real(cost))=:.1f}")
cos_xt = np.zeros((xm.size, ts.size), dtype='complex64')
print (f"{cos_xt.shape=}")
print(f"começo do cálculo de exp(jkx - jωt)")
tstart = time.time()
for n, x in enumerate(xm):
cos_xt[n, :] = | np.exp(1j*k*x - 1j*omega*ts) | numpy.exp |
import numpy as np
from scipy.interpolate import LinearNDInterpolator, interp1d, griddata
from scipy.spatial import cKDTree
from numpy import matlib
import discretize
from discretize import TensorMesh
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import ticker
import warnings
from ....data import Data
from .. import resistivity as dc
from ....utils import (
closestPoints,
mkvc,
surface2ind_topo,
model_builder,
define_plane_from_points,
)
from ....utils.io_utils import (
read_dcip2d_ubc,
write_dcip2d_ubc,
read_dcip3d_ubc,
write_dcip3d_ubc,
)
from ....utils.plot_utils import plot_1d_layer_model
from ....utils.code_utils import deprecate_method
try:
import plotly.graph_objects as grapho
has_plotly = True
except:
has_plotly = False
DATA_TYPES = {
"apparent resistivity": [
"apparent resistivity",
"appresistivity",
"apparentresistivity",
"apparent-resistivity",
"apparent_resistivity",
"appres",
],
"apparent conductivity": [
"apparent conductivity",
"appconductivity",
"apparentconductivity",
"apparent-conductivity",
"apparent_conductivity",
"appcon",
],
"apparent chargeability": [
"apparent chargeability",
"appchargeability",
"apparentchargeability",
"apparent-chargeability",
"apparent_chargeability",
],
"potential": ["potential", "potentials", "volt", "V", "voltages", "voltage"],
}
SPACE_TYPES = {
"half space": ["half space", "half-space", "half_space", "halfspace", "half"],
"whole space": ["whole space", "whole-space", "whole_space", "wholespace", "whole"],
}
#######################################################################
# SURVEY GEOMETRY
#######################################################################
def electrode_separations(survey_object, electrode_pair="all", **kwargs):
"""
Calculate horizontal separation between specific or all electrodes.
Input:
survey_object : SimPEG.electromagnetics.static.survey.Survey
A DC or IP survey object
electrode_pair : str or list of str
A string or list of strings from the following {'all', 'AB', 'MN', 'AM', 'AN', 'BM', 'BN}
Output:
list of np.ndarray
For each electrode pair specified, the electrode distance is returned
in a list.
"""
if "survey_type" in kwargs:
warnings.warn(
"The survey_type is no longer necessary to calculate electrode separations. "
"Feel free to remove it from the call. This option will be removed in SimPEG 0.16.0",
FutureWarning,
)
if not isinstance(electrode_pair, list):
if electrode_pair.lower() == "all":
electrode_pair = ["AB", "MN", "AM", "AN", "BM", "BN"]
elif isinstance(electrode_pair, str):
electrode_pair = [electrode_pair.upper()]
else:
raise TypeError(
"electrode_pair must be either a string, list of strings, or an "
"ndarray containing the electrode separation distances you would "
"like to calculate not {}".format(type(electrode_pair))
)
elecSepDict = {}
AB = []
MN = []
AM = []
AN = []
BM = []
BN = []
for src in survey_object.source_list:
# pole or dipole source
if isinstance(src.location, list):
a_loc = src.location[0]
b_loc = src.location[1]
else:
a_loc = src.location
b_loc = np.inf * np.ones_like(src.location)
for rx in src.receiver_list:
# pole or dipole receiver
if isinstance(rx.locations, list):
M = rx.locations[0]
N = rx.locations[1]
else:
M = rx.locations
N = -np.inf * np.ones_like(rx.locations)
n_rx = np.shape(M)[0]
A = matlib.repmat(a_loc, n_rx, 1)
B = matlib.repmat(b_loc, n_rx, 1)
# Compute distances
AB.append(np.sqrt( | np.sum((A - B) ** 2.0, axis=1) | numpy.sum |
import numpy as np
import scipy
import multiprocessing
from pydoc import locate
from copy import deepcopy, copy
from joblib import Parallel, delayed
from bokeh.layouts import gridplot
from statsmodels.stats.weightstats import ttest_ind
from bokeh.models import HoverTool, Slope, Span
from bokeh.plotting import ColumnDataSource, figure
from scipy.stats import ttest_1samp
from sklearn.model_selection import StratifiedKFold
from tqdm import tqdm
from ..utils import binary_metrics
class permutation_test():
def __init__(self, model, params, X, Y, nperm=100, folds=5):
self.model = locate(model.__name__)
self.params = params
self.skf = StratifiedKFold(n_splits=folds)
self.folds = folds
self.X = X
self.Y = Y
self.nperm = nperm
self.n_cores = multiprocessing.cpu_count()
def _calc_original(self):
skf = self.skf
X = self.X
Y = self.Y
model = self.model(**self.params)
trainidx = []
testidx = []
for train, test in skf.split(X, Y):
trainidx.append(train)
testidx.append(test)
# Calculate binary_metrics for stats_full
y_pred_test = model.train(X, Y)
#y_pred_full = model.test(X)
stats_full = binary_metrics(Y, y_pred_test)
# if seed is set, make sure it's none
if 'seed' in self.params:
self.params['seed'] = None
model = self.model(**self.params)
# Calculate binary_metrics for stats_cv
y_pred_cv = [None] * len(Y)
for j in range(len(trainidx)):
X_train = X[trainidx[j], :]
Y_train = Y[trainidx[j]]
X_test = X[testidx[j], :]
model.train(X_train, Y_train)
y_pred = model.test(X_test)
for (idx, val) in zip(testidx[j], y_pred):
y_pred_cv[idx] = val.tolist()
stats_cv = binary_metrics(Y, y_pred_cv)
self.stats_original = [stats_full, stats_cv]
def _calc_perm(self):
stats = Parallel(n_jobs=self.n_cores)(delayed(self._calc_perm_loop)(i) for i in tqdm(range(self.nperm)))
self.stats_perm = stats
def _calc_perm_loop(self, i):
skf = self.skf
X = self.X
Y = self.Y
folds = self.folds
model_i = self.model(**self.params)
# Shuffle
Y_shuff = Y.copy()
np.random.shuffle(Y_shuff)
# Model and calculate full binary_metrics
model_i.train(X, Y_shuff)
y_pred_full = model_i.test(X)
stats_full = binary_metrics(Y_shuff, y_pred_full)
# Get train and test idx using Stratified KFold for Y_shuff
skf_nperm = StratifiedKFold(n_splits=folds)
trainidx_nperm = []
testidx_nperm = []
for train, test in skf_nperm.split(X, Y_shuff):
trainidx_nperm.append(train)
testidx_nperm.append(test)
# Model and calculate cv binary_metrics
y_pred_cv = [None] * len(Y_shuff)
for j in range(len(trainidx_nperm)):
X_train = X[trainidx_nperm[j], :]
Y_train = Y_shuff[trainidx_nperm[j]]
X_test = X[testidx_nperm[j], :]
model_i.train(X_train, Y_train)
y_pred = model_i.test(X_test)
for (idx, val) in zip(testidx_nperm[j], y_pred):
y_pred_cv[idx] = val.tolist()
stats_cv = binary_metrics(Y_shuff, y_pred_cv)
corr = abs(np.corrcoef(Y_shuff, Y)[0, 1])
stats_comb = [stats_full, stats_cv, corr]
return stats_comb
def run(self):
self._calc_original()
self._calc_perm()
def plot(self, metric="r2q2", hide_pval=True, grid_line=False, legend=True):
# Choose metric to plot
metric_title = np.array(["ACCURACY", "AIC", "AUC", "BIC", "F1-SCORE", "PRECISION", "R²", "SENSITIVITY", "SPECIFICITY", "SSE"])
metric_list = np.array(["acc", "aic", "auc", "bic", "f1score", "prec", "r2q2", "sens", "spec", "sse"])
metric_idx = np.where(metric_list == metric)[0][0]
mname = metric_title[metric_idx]
stats = []
stats.append([self.stats_original[0][mname], self.stats_original[1][mname], 1])
for i in self.stats_perm:
stats.append([i[0][mname], i[1][mname], i[2]])
self.stats = stats
if metric == "r2q2":
full_text = "R²"
cv_text = "Q²"
else:
full_text = mname + "full"
cv_text = mname + "cv"
# Split data for plotting (corr, r2, q2)
stats_r2 = []
stats_q2 = []
stats_corr = []
for i in range(len(stats)):
stats_r2.append(stats[i][0])
stats_q2.append(stats[i][1])
stats_corr.append(stats[i][2])
# Calculate gradient, and y-intercept for plot 1
r2gradient = (stats_r2[0] - np.mean(stats_r2[1:])) / (1 - np.mean(stats_corr[1:]))
q2gradient = (stats_q2[0] - np.mean(stats_q2[1:])) / (1 - | np.mean(stats_corr[1:]) | numpy.mean |
"""
Module work provides classes to compute and analyse active work and active work
autocorrelations and correlations with order parameter.
"""
import numpy as np
from collections import OrderedDict
from operator import itemgetter
from coll_dyn_activem.read import Dat
from coll_dyn_activem.maths import Distribution, mean_sterr,\
linspace, logspace, CurveFit
class ActiveWork(Dat):
"""
Compute and analyse active work from simulation data.
(see https://yketa.github.io/DAMTP_MSC_2019_Wiki/#Active%20Brownian%20particles)
"""
def __init__(self, filename, workPart='all', skip=1, corruption=None):
"""
Loads file.
Parameters
----------
filename : string
Name of input data file.
workPart : string
Part of the active work to consider in computations:
* 'all': active work,
* 'force': force part of the active work,
* 'orientation': orientation part of the active work,
* 'noise': noise part of the active work.
(default: 'all')
NOTE: This can be changed at any time by calling self._setWorkPart.
skip : int
Skip the `skip' first computed values of the active work in the
following calculations. (default: 1)
NOTE: This can be changed at any time by setting self.skip.
corruption : str or None
Pass corruption test for given file type (see
coll_dyn_activem.read.Dat). (default: None)
NOTE: if corruption == None, then the file has to pass corruption
tests.
"""
super().__init__(filename, loadWork=True, corruption=corruption) # initialise with super class
self.workDict = { # hash table of active work parts
'all': self.activeWork,
'force': self.activeWorkForce,
'orientation': self.activeWorkOri,
'noise':
self.activeWork - self.activeWorkForce - self.activeWorkOri}
self._setWorkPart(workPart)
self.skip = skip # skip the `skip' first measurements of the active work in the analysis
def nWork(self, n, int_max=None):
"""
Returns normalised rate of active work averaged on packs of size `n' of
consecutive individual active works.
NOTE: Individual active work refers to the normalised rate of active
work on self.dumpPeriod*self.framesWork consecutive frames and
stored as element of self.workArray.
Parameters
----------
n : int
Size of packs on which to average active work.
int_max : int or None
Maximum number of packs consider. (default: None)
NOTE: If int_max == None, then take the maximum number of packs.
int_max cannot exceed the maximum number of nonoverlapping
packs.
Returns
-------
workAvegared : float numpy array
Array of computed active works.
"""
workAvegared = []
for i in self._time0(n, int_max=int_max):
workAvegared += [self.workArray[i:i + n].mean()]
return | np.array(workAvegared) | numpy.array |
#!/usr/bin/env python
"""
@author: cdeline
bifacial_radiance.py - module to develop radiance bifacial scenes, including gendaylit and gencumulativesky
7/5/2016 - test script based on G173_journal_height
5/1/2017 - standalone module
Pre-requisites:
This software is written for Python >3.6 leveraging many Anaconda tools (e.g. pandas, numpy, etc)
*RADIANCE software should be installed from https://github.com/NREL/Radiance/releases
*If you want to use gencumulativesky, move 'gencumulativesky.exe' from
'bifacial_radiance\data' into your RADIANCE source directory.
*If using a Windows machine you should download the Jaloxa executables at
http://www.jaloxa.eu/resources/radiance/radwinexe.shtml#Download
* Installation of bifacial_radiance from the repo:
1. Clone the repo
2. Navigate to the directory using the command prompt
3. run `pip install -e . `
Overview:
Bifacial_radiance includes several helper functions to make it easier to evaluate
different PV system orientations for rear bifacial irradiance.
Note that this is simply an optical model - identifying available rear irradiance under different conditions.
For a detailed demonstration example, look at the .ipnyb notebook in \docs\
There are two solar resource modes in bifacial_radiance: `gendaylit` uses hour-by-hour solar
resource descriptions using the Perez diffuse tilted plane model.
`gencumulativesky` is an annual average solar resource that combines hourly
Perez skies into one single solar source, and computes an annual average.
bifacial_radiance includes five object-oriented classes:
RadianceObj: top level class to work on radiance objects, keep track of filenames,
sky values, PV module type etc.
GroundObj: details for the ground surface and reflectance
SceneObj: scene information including array configuration (row spacing, clearance or hub height)
MetObj: meteorological data from EPW (energyplus) file.
Future work: include other file support including TMY files
AnalysisObj: Analysis class for plotting and reporting
"""
import logging
logging.basicConfig()
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.DEBUG)
import os, datetime
from subprocess import Popen, PIPE # replacement for os.system()
import pandas as pd
import numpy as np
import warnings
#from input import *
# Mutual parameters across all processes
#daydate=sys.argv[1]
global DATA_PATH # path to data files including module.json. Global context
#DATA_PATH = os.path.abspath(pkg_resources.resource_filename('bifacial_radiance', 'data/') )
DATA_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), 'data'))
def _findme(lst, a): #find string match in a list. script from stackexchange
return [i for i, x in enumerate(lst) if x == a]
def _missingKeyWarning(dictype, missingkey, newvalue): # prints warnings
if type(newvalue) is bool:
valueunit = ''
else:
valueunit = 'm'
print("Warning: {} Dictionary Parameters passed, but {} is missing. ".format(dictype, missingkey))
print("Setting it to default value of {} {} to continue\n".format(newvalue, valueunit))
def _normRGB(r, g, b): #normalize by each color for human vision sensitivity
return r*0.216+g*0.7152+b*0.0722
def _popen(cmd, data_in, data_out=PIPE):
"""
Helper function subprocess.popen replaces os.system
- gives better input/output process control
usage: pass <data_in> to process <cmd> and return results
based on rgbeimage.py (<NAME> 2010)
"""
if type(cmd) == str:
cmd = str(cmd) # gets rid of unicode oddities
shell=True
else:
shell=False
p = Popen(cmd, bufsize=-1, stdin=PIPE, stdout=data_out, stderr=PIPE, shell=shell) #shell=True required for Linux? quick fix, but may be security concern
data, err = p.communicate(data_in)
#if err:
# return 'message: '+err.strip()
#if data:
# return data. in Python3 this is returned as `bytes` and needs to be decoded
if err:
if data:
returntuple = (data.decode('latin1'), 'message: '+err.decode('latin1').strip())
else:
returntuple = (None, 'message: '+err.decode('latin1').strip())
else:
if data:
returntuple = (data.decode('latin1'), None) #Py3 requires decoding
else:
returntuple = (None, None)
return returntuple
def _interactive_load(title=None):
# Tkinter file picker
import tkinter
from tkinter import filedialog
root = tkinter.Tk()
root.withdraw() #Start interactive file input
root.attributes("-topmost", True) #Bring window into foreground
return filedialog.askopenfilename(parent=root, title=title) #initialdir = data_dir
def _interactive_directory(title=None):
# Tkinter directory picker. Now Py3.6 compliant!
import tkinter
from tkinter import filedialog
root = tkinter.Tk()
root.withdraw() #Start interactive file input
root.attributes("-topmost", True) #Bring to front
return filedialog.askdirectory(parent=root, title=title)
def _modDict(originaldict, moddict, relative=False):
'''
Compares keys in originaldict with moddict and updates values of
originaldict to moddict if existing.
Parameters
----------
originaldict : dictionary
Original dictionary calculated, for example frontscan or backscan dictionaries.
moddict : dictionary
Modified dictinoary, for example modscan['xstart'] = 0 to change position of x.
relative : Bool
if passing modscanfront and modscanback to modify dictionarie of positions,
this sets if the values passed to be updated are relative or absolute.
Default is absolute value (relative=False)
Returns
-------
originaldict : dictionary
Updated original dictionary with values from moddict.
'''
newdict = originaldict.copy()
for key in moddict:
try:
if relative:
newdict[key] = moddict[key] + newdict[key]
else:
newdict[key] = moddict[key]
except:
print("Wrong key in modified dictionary")
return newdict
def _heightCasesSwitcher(sceneDict, preferred='hub_height', nonpreferred='clearance_height'):
"""
Parameters
----------
sceneDict : dictionary
Dictionary that might contain more than one way of defining height for
the array: `clearance_height`, `hub_height`, `height`*
* height deprecated from sceneDict. This function helps choose
* which definition to use.
preferred : str, optional
When sceneDict has hub_height and clearance_height, or it only has height,
it will leave only the preferred option.. The default is 'hub_height'.
nonpreferred : TYPE, optional
When sceneDict has hub_height and clearance_height,
it wil ldelete this nonpreferred option. The default is 'clearance_height'.
Returns
-------
sceneDict : TYPE
Dictionary now containing the appropriate definition for system height.
use_clearanceheight : Bool
Helper variable to specify if dictionary has only clearancehet for
use inside `makeScene1axis`. Will get deprecated once that internal
function is streamlined.
"""
# TODO: When we update to python 3.9.0, this could be a Switch Cases (Structural Pattern Matching):
heightCases = '_'
if 'height' in sceneDict:
heightCases = heightCases+'height__'
if 'clearance_height' in sceneDict:
heightCases = heightCases+'clearance_height__'
if 'hub_height' in sceneDict:
heightCases = heightCases+'hub_height__'
use_clearanceheight = False
# CASES:
if heightCases == '_height__':
print("sceneDict Warning: 'height' is being deprecated. "+
"Renaming as "+preferred)
sceneDict[preferred]=sceneDict['height']
del sceneDict['height']
elif heightCases == '_clearance_height__':
#print("Using clearance_height.")
use_clearanceheight = True
elif heightCases == '_hub_height__':
#print("Using hub_height.'")
pass
elif heightCases == '_height__clearance_height__':
print("sceneDict Warning: 'clearance_height and 'height' "+
"(deprecated) are being passed. removing 'height' "+
"from sceneDict for this tracking routine")
del sceneDict['height']
use_clearanceheight = True
elif heightCases == '_height__hub_height__':
print("sceneDict Warning: 'height' is being deprecated. Using 'hub_height'")
del sceneDict['height']
elif heightCases == '_height__clearance_height__hub_height__':
print("sceneDict Warning: 'hub_height', 'clearance_height'"+
", and 'height' are being passed. Removing 'height'"+
" (deprecated) and "+ nonpreferred+ ", using "+preferred)
del sceneDict[nonpreferred]
elif heightCases == '_clearance_height__hub_height__':
print("sceneDict Warning: 'hub_height' and 'clearance_height'"+
" are being passed. Using "+preferred+
" and removing "+ nonpreferred)
del sceneDict[nonpreferred]
else:
print ("sceneDict Error! no argument in sceneDict found "+
"for 'hub_height', 'height' nor 'clearance_height'. "+
"Exiting routine.")
return sceneDict, use_clearanceheight
def _is_leap_and_29Feb(s): # Removes Feb. 29 if it a leap year.
return (s.index.year % 4 == 0) & \
((s.index.year % 100 != 0) | (s.index.year % 400 == 0)) & \
(s.index.month == 2) & (s.index.day == 29)
def _subhourlydatatoGencumskyformat(gencumskydata, label='right'):
# Subroutine to resample, pad, remove leap year and get data in the
# 8760 hourly format
# for saving the temporary files for gencumsky in _saveTempTMY and
# _makeTrackerCSV
#Resample to hourly. Gencumsky wants right-labeled data.
gencumskydata = gencumskydata.resample('60T', closed='right', label='right').mean()
if label == 'left': #switch from left to right labeled by adding an hour
gencumskydata.index = gencumskydata.index + pd.to_timedelta('1H')
# Padding
tzinfo = gencumskydata.index.tzinfo
padstart = pd.to_datetime('%s-%s-%s %s:%s' % (gencumskydata.index.year[0],1,1,1,0 ) ).tz_localize(tzinfo)
padend = pd.to_datetime('%s-%s-%s %s:%s' % (gencumskydata.index.year[0]+1,1,1,0,0) ).tz_localize(tzinfo)
gencumskydata.iloc[0] = 0 # set first datapt to zero to forward fill w zeros
gencumskydata.iloc[-1] = 0 # set last datapt to zero to forward fill w zeros
# check if index exists. I'm sure there is a way to do this backwards.
if any(gencumskydata.index.isin([padstart])):
print("Data starts on Jan. 01")
else:
#gencumskydata=gencumskydata.append(pd.DataFrame(index=[padstart]))
gencumskydata=pd.concat([gencumskydata,pd.DataFrame(index=[padstart])])
if any(gencumskydata.index.isin([padend])):
print("Data ends on Dec. 31st")
else:
#gencumskydata=gencumskydata.append(pd.DataFrame(index=[padend]))
gencumskydata=pd.concat([gencumskydata, pd.DataFrame(index=[padend])])
gencumskydata.loc[padstart]=0
gencumskydata.loc[padend]=0
gencumskydata=gencumskydata.sort_index()
# Fill empty timestamps with zeros
gencumskydata = gencumskydata.resample('60T').asfreq().fillna(0)
# Mask leap year
leapmask = ~(_is_leap_and_29Feb(gencumskydata))
gencumskydata = gencumskydata[leapmask]
if (gencumskydata.index.year[-1] == gencumskydata.index.year[-2]+1) and len(gencumskydata)>8760:
gencumskydata = gencumskydata[:-1]
return gencumskydata
# end _subhourlydatatoGencumskyformat
class RadianceObj:
"""
The RadianceObj top level class is used to work on radiance objects,
keep track of filenames, sky values, PV module configuration, etc.
Parameters
----------
name : text to append to output files
filelist : list of Radiance files to create oconv
nowstr : current date/time string
path : working directory with Radiance materials and objects
Methods
-------
__init__ : initialize the object
_setPath : change the working directory
"""
def __repr__(self):
return str(self.__dict__)
def __init__(self, name=None, path=None, hpc=False):
'''
initialize RadianceObj with path of Radiance materials and objects,
as well as a basename to append to
Parameters
----------
name: string, append temporary and output files with this value
path: location of Radiance materials and objects
hpc: Keeps track if User is running simulation on HPC so some file
reading routines try reading a bit longer and some writing
routines (makeModule) that overwrite themselves are inactivated.
Returns
-------
none
'''
self.metdata = {} # data from epw met file
self.data = {} # data stored at each timestep
self.path = "" # path of working directory
self.name = "" # basename to append
#self.filelist = [] # list of files to include in the oconv
self.materialfiles = [] # material files for oconv
self.skyfiles = [] # skyfiles for oconv
self.radfiles = [] # scene rad files for oconv
self.octfile = [] #octfile name for analysis
self.Wm2Front = 0 # cumulative tabulation of front W/m2
self.Wm2Back = 0 # cumulative tabulation of rear W/m2
self.backRatio = 0 # ratio of rear / front Wm2
self.nMods = None # number of modules per row
self.nRows = None # number of rows per scene
self.hpc = hpc # HPC simulation is being run. Some read/write functions are modified
now = datetime.datetime.now()
self.nowstr = str(now.date())+'_'+str(now.hour)+str(now.minute)+str(now.second)
# DEFAULTS
if name is None:
self.name = self.nowstr # set default filename for output files
else:
self.name = name
self.basename = name # add backwards compatibility for prior versions
#self.__name__ = self.name #optional info
#self.__str__ = self.__name__ #optional info
if path is None:
self._setPath(os.getcwd())
else:
self._setPath(path)
# load files in the /materials/ directory
self.materialfiles = self.returnMaterialFiles('materials')
def _setPath(self, path):
"""
setPath - move path and working directory
"""
self.path = os.path.abspath(path)
print('path = '+ path)
try:
os.chdir(self.path)
except OSError as exc:
LOGGER.error('Path doesn''t exist: %s' % (path))
LOGGER.exception(exc)
raise(exc)
# check for path in the new Radiance directory:
def _checkPath(path): # create the file structure if it doesn't exist
if not os.path.exists(path):
os.makedirs(path)
print('Making path: '+path)
_checkPath('images'); _checkPath('objects')
_checkPath('results'); _checkPath('skies'); _checkPath('EPWs')
# if materials directory doesn't exist, populate it with ground.rad
# figure out where pip installed support files.
from shutil import copy2
if not os.path.exists('materials'): #copy ground.rad to /materials
os.makedirs('materials')
print('Making path: materials')
copy2(os.path.join(DATA_PATH, 'ground.rad'), 'materials')
# if views directory doesn't exist, create it with two default views - side.vp and front.vp
if not os.path.exists('views'):
os.makedirs('views')
with open(os.path.join('views', 'side.vp'), 'w') as f:
f.write('rvu -vtv -vp -10 1.5 3 -vd 1.581 0 -0.519234 '+
'-vu 0 0 1 -vh 45 -vv 45 -vo 0 -va 0 -vs 0 -vl 0')
with open(os.path.join('views', 'front.vp'), 'w') as f:
f.write('rvu -vtv -vp 0 -3 5 -vd 0 0.894427 -0.894427 '+
'-vu 0 0 1 -vh 45 -vv 45 -vo 0 -va 0 -vs 0 -vl 0')
def getfilelist(self):
"""
Return concat of matfiles, radfiles and skyfiles
"""
return self.materialfiles + self.skyfiles + self.radfiles
def save(self, savefile=None):
"""
Pickle the radiance object for further use.
Very basic operation - not much use right now.
Parameters
----------
savefile : str
Optional savefile name, with .pickle extension.
Otherwise default to save.pickle
"""
import pickle
if savefile is None:
savefile = 'save.pickle'
with open(savefile, 'wb') as f:
pickle.dump(self, f)
print('Saved to file {}'.format(savefile))
#def setHPC(self, hpc=True):
# self.hpc = hpc
def addMaterial(self, material, Rrefl, Grefl, Brefl, materialtype='plastic',
specularity=0, roughness=0, material_file=None, comment=None, rewrite=True):
"""
Function to add a material in Radiance format.
Parameters
----------
material : str
DESCRIPTION.
Rrefl : str
Reflectivity for first wavelength, or 'R' bin.
Grefl : str
Reflecstrtivity for second wavelength, or 'G' bin.
Brefl : str
Reflectivity for third wavelength, or 'B' bin.
materialtype : str, optional
Type of material. The default is 'plastic'. Others can be mirror,
trans, etc. See RADIANCe documentation.
specularity : str, optional
Ratio of reflection that is specular and not diffuse. The default is 0.
roughness : str, optional
This is the microscopic surface roughness: the more jagged the
facets are, the rougher it is and more blurry reflections will appear.
material_file : str, optional
DESCRIPTION. The default is None.
comment : str, optional
DESCRIPTION. The default is None.
rewrite : str, optional
DESCRIPTION. The default is True.
Returns
-------
None. Just adds the material to the material_file specified or the
default in ``materials\ground.rad``.
References:
See examples of documentation for more materialtype details.
http://www.jaloxa.eu/resources/radiance/documentation/docs/radiance_tutorial.pdf page 10
Also, you can use https://www.jaloxa.eu/resources/radiance/colour_picker.shtml
to have a sense of how the material would look with the RGB values as
well as specularity and roughness.
To understand more on reflectivity, specularity and roughness values
https://thinkmoult.com/radiance-specularity-and-roughness-value-examples.html
"""
if material_file is None:
material_file = 'ground.rad'
matfile = os.path.join('materials', material_file)
with open(matfile, 'r') as fp:
buffer = fp.readlines()
# search buffer for material matching requested addition
found = False
for i in buffer:
if materialtype and material in i:
loc = buffer.index(i)
found = True
break
if found:
if rewrite:
print('Material exists, overwriting...\n')
if comment is None:
pre = loc - 1
else:
pre = loc - 2
# commit buffer without material match
with open(matfile, 'w') as fp:
for i in buffer[0:pre]:
fp.write(i)
for i in buffer[loc+4:]:
fp.write(i)
if (found and rewrite) or (not found):
# append -- This will create the file if it doesn't exist
file_object = open(matfile, 'a')
file_object.write("\n\n")
if comment is not None:
file_object.write("#{}".format(comment))
file_object.write("\nvoid {} {}".format(materialtype, material))
if materialtype == 'glass':
file_object.write("\n0\n0\n3 {} {} {}".format(Rrefl, Grefl, Brefl))
else:
file_object.write("\n0\n0\n5 {} {} {} {} {}".format(Rrefl, Grefl, Brefl, specularity, roughness))
file_object.close()
print('Added material {} to file {}'.format(material, material_file))
if (found and not rewrite):
print('Material already exists\n')
def exportTrackerDict(self, trackerdict=None,
savefile=None, reindex=None):
"""
Use :py:func:`~bifacial_radiance.load._exportTrackerDict` to save a
TrackerDict output as a csv file.
Parameters
----------
trackerdict
The tracker dictionary to save
savefile : str
path to .csv save file location
reindex : bool
True saves the trackerdict in TMY format, including rows for hours
where there is no sun/irradiance results (empty)
"""
import bifacial_radiance.load
if trackerdict is None:
trackerdict = self.trackerdict
if savefile is None:
savefile = _interactive_load(title='Select a .csv file to save to')
if reindex is None:
if self.cumulativesky is True:
# don't re-index for cumulativesky,
# which has angles for index
reindex = False
else:
reindex = True
if self.cumulativesky is True and reindex is True:
# don't re-index for cumulativesky,
# which has angles for index
print ("\n Warning: For cumulativesky simulations, exporting the "
"TrackerDict requires reindex = False. Setting reindex = "
"False and proceeding")
reindex = False
bifacial_radiance.load._exportTrackerDict(trackerdict,
savefile,
reindex)
def loadtrackerdict(self, trackerdict=None, fileprefix=None):
"""
Use :py:class:`bifacial_radiance.load._loadtrackerdict`
to browse the results directory and load back any results saved in there.
Parameters
----------
trackerdict
fileprefix : str
"""
from bifacial_radiance.load import loadTrackerDict
if trackerdict is None:
trackerdict = self.trackerdict
(trackerdict, totaldict) = loadTrackerDict(trackerdict, fileprefix)
self.Wm2Front = totaldict['Wm2Front']
self.Wm2Back = totaldict['Wm2Back']
def returnOctFiles(self):
"""
Return files in the root directory with `.oct` extension
Returns
-------
oct_files : list
List of .oct files
"""
oct_files = [f for f in os.listdir(self.path) if f.endswith('.oct')]
#self.oct_files = oct_files
return oct_files
def returnMaterialFiles(self, material_path=None):
"""
Return files in the Materials directory with .rad extension
appends materials files to the oconv file list
Parameters
----------
material_path : str
Optional parameter to point to a specific materials directory.
otherwise /materials/ is default
Returns
-------
material_files : list
List of .rad files
"""
if material_path is None:
material_path = 'materials'
material_files = [f for f in os.listdir(os.path.join(self.path,
material_path)) if f.endswith('.rad')]
materialfilelist = [os.path.join(material_path, f) for f in material_files]
self.materialfiles = materialfilelist
return materialfilelist
def setGround(self, material=None, material_file=None):
"""
Use GroundObj constructor class and return a ground object
Parameters
------------
material : numeric or str
If number between 0 and 1 is passed, albedo input is assumed and assigned.
If string is passed with the name of the material desired. e.g. 'litesoil',
properties are searched in `material_file`.
Default Material names to choose from: litesoil, concrete, white_EPDM,
beigeroof, beigeroof_lite, beigeroof_heavy, black, asphalt
material_file : str
Filename of the material information. Default `ground.rad`
Returns
-------
self.ground : tuple
self.ground.normval : numeric
Normalized color value
self.ground.ReflAvg : numeric
Average reflectance
"""
if material is None:
try:
if self.metdata.albedo is not None:
material = self.metdata.albedo
print(" Assigned Albedo from metdata.albedo")
except:
pass
self.ground = GroundObj(material, material_file)
def getEPW(self, lat=None, lon=None, GetAll=False):
"""
Subroutine to download nearest epw files to latitude and longitude provided,
into the directory \EPWs\
based on github/aahoo.
.. warning::
verify=false is required to operate within NREL's network.
to avoid annoying warnings, insecurerequestwarning is disabled
currently this function is not working within NREL's network. annoying!
Parameters
----------
lat : decimal
Used to find closest EPW file.
lon : decimal
Longitude value to find closest EPW file.
GetAll : boolean
Download all available files. Note that no epw file will be loaded into memory
"""
import requests, re
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
hdr = {'User-Agent' : "Magic Browser",
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'
}
path_to_save = 'EPWs' # create a directory and write the name of directory here
if not os.path.exists(path_to_save):
os.makedirs(path_to_save)
def _returnEPWnames():
''' return a dataframe with the name, lat, lon, url of available files'''
r = requests.get('https://github.com/NREL/EnergyPlus/raw/develop/weather/master.geojson', verify=False)
data = r.json() #metadata for available files
#download lat/lon and url details for each .epw file into a dataframe
df = pd.DataFrame({'url':[], 'lat':[], 'lon':[], 'name':[]})
for location in data['features']:
match = re.search(r'href=[\'"]?([^\'" >]+)', location['properties']['epw'])
if match:
url = match.group(1)
name = url[url.rfind('/') + 1:]
lontemp = location['geometry']['coordinates'][0]
lattemp = location['geometry']['coordinates'][1]
dftemp = pd.DataFrame({'url':[url], 'lat':[lattemp], 'lon':[lontemp], 'name':[name]})
#df = df.append(dftemp, ignore_index=True)
df = pd.concat([df, dftemp], ignore_index=True)
return df
def _findClosestEPW(lat, lon, df):
#locate the record with the nearest lat/lon
errorvec = np.sqrt(np.square(df.lat - lat) + np.square(df.lon - lon))
index = errorvec.idxmin()
url = df['url'][index]
name = df['name'][index]
return url, name
def _downloadEPWfile(url, path_to_save, name):
r = requests.get(url, verify=False, headers=hdr)
if r.ok:
filename = os.path.join(path_to_save, name)
# py2 and 3 compatible: binary write, encode text first
with open(filename, 'wb') as f:
f.write(r.text.encode('ascii', 'ignore'))
print(' ... OK!')
else:
print(' connection error status code: %s' %(r.status_code))
r.raise_for_status()
# Get the list of EPW filenames and lat/lon
df = _returnEPWnames()
# find the closest EPW file to the given lat/lon
if (lat is not None) & (lon is not None) & (GetAll is False):
url, name = _findClosestEPW(lat, lon, df)
# download the EPW file to the local drive.
print('Getting weather file: ' + name)
_downloadEPWfile(url, path_to_save, name)
self.epwfile = os.path.join('EPWs', name)
elif GetAll is True:
if input('Downloading ALL EPW files available. OK? [y/n]') == 'y':
# get all of the EPW files
for index, row in df.iterrows():
print('Getting weather file: ' + row['name'])
_downloadEPWfile(row['url'], path_to_save, row['name'])
self.epwfile = None
else:
print('Nothing returned. Proper usage: epwfile = getEPW(lat,lon)')
self.epwfile = None
return self.epwfile
def readWeatherFile(self, weatherFile=None, starttime=None,
endtime=None, label=None, source=None,
coerce_year=None, tz_convert_val=None):
"""
Read either a EPW or a TMY file, calls the functions
:py:class:`~bifacial_radiance.readTMY` or
:py:class:`~bifacial_radiance.readEPW`
according to the weatherfile extention.
Parameters
----------
weatherFile : str
File containing the weather information. EPW, TMY or solargis accepted.
starttime : str
Limited start time option in 'YYYY-mm-dd_HHMM' or 'mm_dd_HH' format
endtime : str
Limited end time option in 'YYYY-mm-dd_HHMM' or 'mm_dd_HH' format
daydate : str DEPRECATED
For single day in 'MM/DD' or MM_DD format. Now use starttime and
endtime set to the same date.
label : str
'left', 'right', or 'center'. For data that is averaged, defines if
the timestamp refers to the left edge, the right edge, or the
center of the averaging interval, for purposes of calculating
sunposition. For example, TMY3 data is right-labeled, so 11 AM data
represents data from 10 to 11, and sun position is calculated
at 10:30 AM. Currently SAM and PVSyst use left-labeled interval
data and NSRDB uses centered.
source : str
To help identify different types of .csv files. If None, it assumes
it is a TMY3-style formated data. Current options: 'TMY3',
'solargis', 'EPW'
coerce_year : int
Year to coerce weather data to in YYYY format, ie 2021.
If more than one year of data in the weather file, year is NOT coerced.
tz_convert_val : int
Convert timezone to this fixed value, following ISO standard
(negative values indicating West of UTC.)
"""
#from datetime import datetime
import warnings
if weatherFile is None:
if hasattr(self,'epwfile'):
weatherFile = self.epwfile
else:
try:
weatherFile = _interactive_load('Select EPW or TMY3 climate file')
except:
raise Exception('Interactive load failed. Tkinter not supported'+
'on this system. Try installing X-Quartz and reloading')
if coerce_year is not None:
coerce_year = int(coerce_year)
if str(coerce_year).__len__() != 4:
warnings.warn('Incorrect coerce_year. Setting to None')
coerce_year = None
def _parseTimes(t, hour, coerce_year):
'''
parse time input t which could be string mm_dd_HH or YYYY-mm-dd_HHMM
or datetime.datetime object. Return pd.datetime object. Define
hour as hour input if not passed directly.
'''
import re
if type(t) == str:
try:
tsplit = re.split('-|_| ', t)
#mm_dd format
if tsplit.__len__() == 2 and t.__len__() == 5:
if coerce_year is None:
coerce_year = 2021 #default year.
tsplit.insert(0,str(coerce_year))
tsplit.append(str(hour).rjust(2,'0')+'00')
#mm_dd_hh or YYYY_mm_dd format
elif tsplit.__len__() == 3 :
if tsplit[0].__len__() == 2:
if coerce_year is None:
coerce_year = 2021 #default year.
tsplit.insert(0,str(coerce_year))
elif tsplit[0].__len__() == 4:
tsplit.append(str(hour).rjust(2,'0')+'00')
#YYYY-mm-dd_HHMM format
if tsplit.__len__() == 4 and tsplit[0].__len__() == 4:
t_out = pd.to_datetime(''.join(tsplit).ljust(12,'0') )
else:
raise Exception(f'incorrect time string passed {t}.'
'Valid options: mm_dd, mm_dd_HH, '
'mm_dd_HHMM, YYYY-mm-dd_HHMM')
except Exception as e:
# Error for incorrect string passed:
raise(e)
else: #datetime or timestamp
try:
t_out = pd.to_datetime(t)
except pd.errors.ParserError:
print('incorrect time object passed. Valid options: '
'string or datetime.datetime or pd.timeIndex. You '
f'passed {type(t)}.')
return t_out, coerce_year
# end _parseTimes
def _tz_convert(metdata, metadata, tz_convert_val):
"""
convert metdata to a different local timzone. Particularly for
SolarGIS weather files which are returned in UTC by default.
----------
tz_convert_val : int
Convert timezone to this fixed value, following ISO standard
(negative values indicating West of UTC.)
Returns: metdata, metadata
"""
import pytz
if (type(tz_convert_val) == int) | (type(tz_convert_val) == float):
metadata['TZ'] = tz_convert_val
metdata = metdata.tz_convert(pytz.FixedOffset(tz_convert_val*60))
return metdata, metadata
# end _tz_convert
if source is None:
if weatherFile[-3:].lower() == 'epw':
source = 'EPW'
else:
print('Warning: CSV file passed for input. Assuming it is TMY3'+
'style format')
source = 'TMY3'
if label is None:
label = 'right' # EPW and TMY are by deffault right-labeled.
if source.lower() == 'solargis':
if label is None:
label = 'center'
metdata, metadata = self._readSOLARGIS(weatherFile, label=label)
if source.lower() =='epw':
metdata, metadata = self._readEPW(weatherFile, label=label)
if source.lower() =='tmy3':
metdata, metadata = self._readTMY(weatherFile, label=label)
metdata, metadata = _tz_convert(metdata, metadata, tz_convert_val)
tzinfo = metdata.index.tzinfo
tempMetDatatitle = 'metdata_temp.csv'
# Parse the start and endtime strings.
if starttime is not None:
starttime, coerce_year = _parseTimes(starttime, 1, coerce_year)
starttime = starttime.tz_localize(tzinfo)
if endtime is not None:
endtime, coerce_year = _parseTimes(endtime, 23, coerce_year)
endtime = endtime.tz_localize(tzinfo)
'''
#TODO: do we really need this check?
if coerce_year is not None and starttime is not None:
if coerce_year != starttime.year or coerce_year != endtime.year:
print("Warning: Coerce year does not match requested sampled "+
"date(s)'s years. Setting Coerce year to None.")
coerce_year = None
'''
tmydata_trunc = self._saveTempTMY(metdata, filename=tempMetDatatitle,
starttime=starttime, endtime=endtime,
coerce_year=coerce_year,
label=label)
if tmydata_trunc.__len__() > 0:
self.metdata = MetObj(tmydata_trunc, metadata, label = label)
else:
self.metdata = None
raise Exception('Weather file returned zero points for the '
'starttime / endtime provided')
return self.metdata
def _saveTempTMY(self, tmydata, filename=None, starttime=None, endtime=None,
coerce_year=None, label=None):
'''
private function to save part or all of tmydata into /EPWs/ for use
in gencumsky -G mode and return truncated tmydata. Gencumsky 8760
starts with Jan 1, 1AM and ends Dec 31, 2400
starttime: tz-localized pd.TimeIndex
endtime: tz-localized pd.TimeIndex
returns: tmydata_truncated : subset of tmydata based on start & end
'''
if filename is None:
filename = 'temp.csv'
gencumskydata = None
gencumdict = None
if len(tmydata) == 8760:
print("8760 line in WeatherFile. Assuming this is a standard hourly"+
" WeatherFile for the year for purposes of saving Gencumulativesky"+
" temporary weather files in EPW folder.")
if coerce_year is None and starttime is not None:
coerce_year = starttime.year
# SILVANA: If user doesn't pass starttime, and doesn't select
# coerce_year, then do we really need to coerce it?
elif coerce_year is None:
coerce_year = 2021
print(f"Coercing year to {coerce_year}")
with warnings.catch_warnings():
warnings.simplefilter("ignore")
tmydata.index.values[:] = tmydata.index[:] + pd.DateOffset(year=(coerce_year))
# Correcting last index to next year.
tmydata.index.values[-1] = tmydata.index[-1] + pd.DateOffset(year=(coerce_year+1))
# FilterDates
filterdates = None
if starttime is not None and endtime is not None:
starttime
filterdates = (tmydata.index >= starttime) & (tmydata.index <= endtime)
else:
if starttime is not None:
filterdates = (tmydata.index >= starttime)
if endtime is not None:
filterdates = (tmydata.index <= endtime)
if filterdates is not None:
print("Filtering dates")
tmydata[~filterdates] = 0
gencumskydata = tmydata.copy()
else:
if len(tmydata.index.year.unique()) == 1:
if coerce_year:
# TODO: check why subhourly data still has 0 entries on the next day on _readTMY3
# in the meantime, let's make Silvana's life easy by just deletig 0 entries
tmydata = tmydata[~(tmydata.index.hour == 0)]
print(f"Coercing year to {coerce_year}")
# TODO: this coercing shows a python warning. Turn it off or find another method? bleh.
tmydata.index.values[:] = tmydata.index[:] + pd.DateOffset(year=(coerce_year))
# FilterDates
filterdates = None
if starttime is not None and endtime is not None:
filterdates = (tmydata.index >= starttime) & (tmydata.index <= endtime)
else:
if starttime is not None:
filterdates = (tmydata.index >= starttime)
if endtime is not None:
filterdates = (tmydata.index <= endtime)
if filterdates is not None:
print("Filtering dates")
tmydata[~filterdates] = 0
gencumskydata = tmydata.copy()
gencumskydata = _subhourlydatatoGencumskyformat(gencumskydata,
label=label)
else:
if coerce_year:
print("More than 1 year of data identified. Can't do coercing")
# Check if years are consecutive
l = list(tmydata.index.year.unique())
if l != list(range(min(l), max(l)+1)):
print("Years are not consecutive. Won't be able to use Gencumsky"+
" because who knows what's going on with this data.")
else:
print("Years are consecutive. For Gencumsky, make sure to select"+
" which yearly temporary weather file you want to use"+
" else they will all get accumulated to same hour/day")
# FilterDates
filterdates = None
if starttime is not None and endtime is not None:
filterdates = (tmydata.index >= starttime) & (tmydata.index <= endtime)
else:
if starttime is not None:
filterdates = (tmydata.index >= starttime)
if endtime is not None:
filterdates = (tmydata.index <= endtime)
if filterdates is not None:
print("Filtering dates")
tmydata = tmydata[filterdates] # Reducing years potentially
# Checking if filtering reduced to just 1 year to do usual savin.
if len(tmydata.index.year.unique()) == 1:
gencumskydata = tmydata.copy()
gencumskydata = _subhourlydatatoGencumskyformat(gencumskydata,
label=label)
else:
gencumdict = [g for n, g in tmydata.groupby(pd.Grouper(freq='Y'))]
for ii in range(0, len(gencumdict)):
gencumskydata = gencumdict[ii]
gencumskydata = _subhourlydatatoGencumskyformat(gencumskydata,
label=label)
gencumdict[ii] = gencumskydata
gencumskydata = None # clearing so that the dictionary style can be activated.
# Let's save files in EPWs folder for Gencumsky
if gencumskydata is not None:
csvfile = os.path.join('EPWs', filename)
print('Saving file {}, # points: {}'.format(csvfile, gencumskydata.__len__()))
gencumskydata.to_csv(csvfile, index=False, header=False, sep=' ', columns=['GHI','DHI'])
self.gencumsky_metfile = csvfile
if gencumdict is not None:
self.gencumsky_metfile = []
for ii in range (0, len(gencumdict)):
gencumskydata = gencumdict[ii]
newfilename = filename.split('.')[0]+'_year_'+str(ii)+'.csv'
csvfile = os.path.join('EPWs', newfilename)
print('Saving file {}, # points: {}'.format(csvfile, gencumskydata.__len__()))
gencumskydata.to_csv(csvfile, index=False, header=False, sep=' ', columns=['GHI','DHI'])
self.gencumsky_metfile.append(csvfile)
return tmydata
def _readTMY(self, tmyfile=None, label = 'right', coerce_year=None):
'''
use pvlib to read in a tmy3 file.
Note: pvlib 0.7 does not currently support sub-hourly files. Until
then, use _readTMYdate() to create the index
Parameters
------------
tmyfile : str
Filename of tmy3 to be read with pvlib.tmy.readtmy3
label : str
'left', 'right', or 'center'. For data that is averaged, defines if
the timestamp refers to the left edge, the right edge, or the
center of the averaging interval, for purposes of calculating
sunposition. For example, TMY3 data is right-labeled, so 11 AM data
represents data from 10 to 11, and sun position is calculated
at 10:30 AM. Currently SAM and PVSyst use left-labeled interval
data and NSRDB uses centered.
coerce_year : int
Year to coerce to. Default is 2021.
Returns
-------
metdata - MetObj collected from TMY3 file
'''
def _convertTMYdate(data, meta):
''' requires pvlib 0.8, updated to handle subhourly timestamps '''
# get the date column as a pd.Series of numpy datetime64
data_ymd = pd.to_datetime(data['Date (MM/DD/YYYY)'])
# shift the time column so that midnite is 00:00 instead of 24:00
shifted_hour = data['Time (HH:MM)'].str[:2].astype(int) % 24
minute = data['Time (HH:MM)'].str[3:].astype(int)
# shift the dates at midnite so they correspond to the next day
data_ymd[shifted_hour == 0] += datetime.timedelta(days=1)
# NOTE: as of pandas>=0.24 the pd.Series.array has a month attribute, but
# in pandas-0.18.1, only DatetimeIndex has month, but indices are immutable
# so we need to continue to work with the panda series of dates `data_ymd`
data_index = pd.DatetimeIndex(data_ymd)
# use indices to check for a leap day and advance it to March 1st
leapday = (data_index.month == 2) & (data_index.day == 29)
data_ymd[leapday] += datetime.timedelta(days=1)
# shifted_hour is a pd.Series, so use pd.to_timedelta to get a pd.Series of
# timedeltas
# NOTE: as of pvlib-0.6.3, min req is pandas-0.18.1, so pd.to_timedelta
# unit must be in (D,h,m,s,ms,us,ns), but pandas>=0.24 allows unit='hour'
data.index = (data_ymd + pd.to_timedelta(shifted_hour, unit='h') +
pd.to_timedelta(minute, unit='min') )
data = data.tz_localize(int(meta['TZ'] * 3600))
return data
import pvlib
#(tmydata, metadata) = pvlib.tmy.readtmy3(filename=tmyfile) #pvlib<=0.6
(tmydata, metadata) = pvlib.iotools.tmy.read_tmy3(filename=tmyfile,
coerce_year=coerce_year)
try:
tmydata = _convertTMYdate(tmydata, metadata)
except KeyError:
print('PVLib >= 0.8.0 is required for sub-hourly data input')
return tmydata, metadata
def _readEPW(self, epwfile=None, label = 'right', coerce_year=None):
"""
Uses readepw from pvlib>0.6.1 but un-do -1hr offset and
rename columns to match TMY3: DNI, DHI, GHI, DryBulb, Wspd
Parameters
------------
epwfile : str
Direction and filename of the epwfile. If None, opens an interactive
loading window.
label : str
'left', 'right', or 'center'. For data that is averaged, defines if
the timestamp refers to the left edge, the right edge, or the
center of the averaging interval, for purposes of calculating
sunposition. For example, TMY3 data is right-labeled, so 11 AM data
represents data from 10 to 11, and sun position is calculated
at 10:30 AM. Currently SAM and PVSyst use left-labeled interval
data and NSRDB uses centered.
coerce_year : int
Year to coerce data to.
"""
import pvlib
#import re
'''
NOTE: In PVLib > 0.6.1 the new epw.read_epw() function reads in time
with a default -1 hour offset. This is reflected in our existing
workflow.
'''
#(tmydata, metadata) = readepw(epwfile) #
(tmydata, metadata) = pvlib.iotools.epw.read_epw(epwfile,
coerce_year=coerce_year) #pvlib>0.6.1
#pvlib uses -1hr offset that needs to be un-done. Why did they do this?
tmydata.index = tmydata.index+pd.Timedelta(hours=1)
# rename different field parameters to match output from
# pvlib.tmy.readtmy: DNI, DHI, DryBulb, Wspd
tmydata.rename(columns={'dni':'DNI',
'dhi':'DHI',
'temp_air':'DryBulb',
'wind_speed':'Wspd',
'ghi':'GHI',
'albedo':'Alb'
}, inplace=True)
return tmydata, metadata
def _readSOLARGIS(self, filename=None, label='center'):
"""
Read solarGIS data file which is timestamped in UTC.
rename columns to match TMY3: DNI, DHI, GHI, DryBulb, Wspd
Timezone is always returned as UTC. Use tz_convert in readWeatherFile
to manually convert to local time
Parameters
------------
filename : str
filename of the solarGIS file.
label : str
'left', 'right', or 'center'. For data that is averaged, defines if
the timestamp refers to the left edge, the right edge, or the
center of the averaging interval. SolarGis default style is center,
unless user requests a right label.
"""
# file format: anything with # preceding is in the header
header = []; lat = None; lon = None; elev = None; name = None
with open(filename, 'r') as result:
for line in result:
if line.startswith('#'):
header.append(line)
if line.startswith('#Latitude:'):
lat = line[11:]
if line.startswith('#Longitude:'):
lon = line[12:]
if line.startswith('#Elevation:'):
elev = line[12:17]
if line.startswith('#Site name:'):
name = line[12:-1]
else:
break
metadata = {'latitude':float(lat),
'longitude':float(lon),
'altitude':float(elev),
'Name':name,
'TZ':0.0}
# read in remainder of data
data = pd.read_csv(filename,skiprows=header.__len__(), delimiter=';')
# rename different field parameters to match output from
# pvlib.tmy.readtmy: DNI, DHI, DryBulb, Wspd
data.rename(columns={'DIF':'DHI',
'TEMP':'DryBulb',
'WS':'Wspd',
}, inplace=True)
# Generate index from Date (DD.HH.YYYY) and Time
data.index = pd.to_datetime(data.Date + ' ' + data.Time,
dayfirst=True, utc=True,
infer_datetime_format = True)
return data, metadata
def getSingleTimestampTrackerAngle(self, metdata, timeindex, gcr=None,
azimuth=180, axis_tilt=0,
limit_angle=45, backtrack=True):
"""
Helper function to calculate a tracker's angle for use with the
fixed tilt routines of bifacial_radiance. It calculates tracker angle for
sun position at the timeindex passed (no left or right time offset,
label = 'center')
Parameters
----------
metdata : :py:class:`~bifacial_radiance.MetObj`
Meterological object to set up geometry. Usually set automatically by
`bifacial_radiance` after running :py:class:`bifacial_radiance.readepw`.
Default = self.metdata
timeindex : int
Index between 0 to 8760 indicating hour to simulate.
gcr : float
Ground coverage ratio for calculation backtracking. Defualt [1.0/3.0]
azimuth : float or int
Orientation axis of tracker torque tube. Default North-South (180 deg)
axis_tilt : float or int
Default 0. Axis tilt -- not implemented in sensors locations so it's pointless
at this release to change it.
limit_angle : float or int
Limit angle (+/-) of the 1-axis tracker in degrees. Default 45
backtrack : boolean
Whether backtracking is enabled (default = True)
"""
'''
elev = metdata.elevation
lat = metdata.latitude
lon = metdata.longitude
timestamp = metdata.datetime[timeindex]
'''
import pvlib
solpos = metdata.solpos.iloc[timeindex]
sunzen = float(solpos.apparent_zenith)
sunaz = float(solpos.azimuth) # not substracting the 180
trackingdata = pvlib.tracking.singleaxis(sunzen, sunaz,
axis_tilt, azimuth,
limit_angle, backtrack, gcr)
tracker_theta = float(np.round(trackingdata['tracker_theta'],2))
tracker_theta = tracker_theta*-1 # bifacial_radiance uses East (morning) theta as positive
return tracker_theta
def gendaylit(self, timeindex, metdata=None, debug=False):
"""
Sets and returns sky information using gendaylit.
Uses PVLIB for calculating the sun position angles instead of
using Radiance internal sun position calculation (for that use gendaylit function)
Parameters
----------
timeindex : int
Index from 0 to ~4000 of the MetObj (daylight hours only)
metdata : ``MetObj``
MetObj object with list of dni, dhi, ghi and location
debug : bool
Flag to print output of sky DHI and DNI
Returns
-------
skyname : str
Sets as a self.skyname and returns filename of sky in /skies/ directory.
If errors exist, such as DNI = 0 or sun below horizon, this skyname is None
"""
import warnings
if metdata is None:
try:
metdata = self.metdata
except:
print('usage: pass metdata, or run after running ' +
'readWeatherfile() ')
return
ground = self.ground
locName = metdata.city
dni = metdata.dni[timeindex]
dhi = metdata.dhi[timeindex]
ghi = metdata.ghi[timeindex]
elev = metdata.elevation
lat = metdata.latitude
lon = metdata.longitude
# Assign Albedos
try:
if ground.ReflAvg.shape == metdata.dni.shape:
groundindex = timeindex
elif self.ground.ReflAvg.shape[0] == 1: # just 1 entry
groundindex = 0
else:
warnings.warn("Shape of ground Albedos and TMY data do not match.")
return
except:
print('usage: make sure to run setGround() before gendaylit()')
return
if debug is True:
print('Sky generated with Gendaylit, with DNI: %0.1f, DHI: %0.1f' % (dni, dhi))
print("Datetime TimeIndex", metdata.datetime[timeindex])
#Time conversion to correct format and offset.
#datetime = metdata.sunrisesetdata['corrected_timestamp'][timeindex]
#Don't need any of this any more. Already sunrise/sunset corrected and offset by appropriate interval
# get solar position zenith and azimuth based on site metadata
#solpos = pvlib.irradiance.solarposition.get_solarposition(datetimetz,lat,lon,elev)
solpos = metdata.solpos.iloc[timeindex]
sunalt = float(solpos.elevation)
# Radiance expects azimuth South = 0, PVlib gives South = 180. Must substract 180 to match.
sunaz = float(solpos.azimuth)-180.0
sky_path = 'skies'
if dhi <= 0:
self.skyfiles = [None]
return None
# We should already be filtering for elevation >0. But just in case...
if sunalt <= 0:
sunalt = np.arcsin((ghi-dhi)/(dni+.001))*180/np.pi # reverse engineer elevation from ghi, dhi, dni
print('Warning: negative sun elevation at '+
'{}. '.format(metdata.datetime[timeindex])+
'Re-calculated elevation: {:0.2}'.format(sunalt))
# Note - -W and -O1 option is used to create full spectrum analysis in units of Wm-2
#" -L %s %s -g %s \n" %(dni/.0079, dhi/.0079, self.ground.ReflAvg) + \
skyStr = ("# start of sky definition for daylighting studies\n" + \
"# location name: " + str(locName) + " LAT: " + str(lat)
+" LON: " + str(lon) + " Elev: " + str(elev) + "\n"
"# Sun position calculated w. PVLib\n" + \
"!gendaylit -ang %s %s" %(sunalt, sunaz)) + \
" -W %s %s -g %s -O 1 \n" %(dni, dhi, ground.ReflAvg[groundindex]) + \
"skyfunc glow sky_mat\n0\n0\n4 1 1 1 0\n" + \
"\nsky_mat source sky\n0\n0\n4 0 0 1 180\n" + \
ground._makeGroundString(index=groundindex, cumulativesky=False)
time = metdata.datetime[timeindex]
#filename = str(time)[2:-9].replace('-','_').replace(' ','_').replace(':','_')
filename = time.strftime('%Y-%m-%d_%H%M')
skyname = os.path.join(sky_path,"sky2_%s_%s_%s.rad" %(lat, lon, filename))
skyFile = open(skyname, 'w')
skyFile.write(skyStr)
skyFile.close()
self.skyfiles = [skyname]
return skyname
def gendaylit2manual(self, dni, dhi, sunalt, sunaz):
"""
Sets and returns sky information using gendaylit.
Uses user-provided data for sun position and irradiance.
.. warning::
This generates the sky at the sun altitude&azimuth provided, make
sure it is the right position relative to how the weather data got
created and read (i.e. label right, left or center).
Parameters
------------
dni: int or float
Direct Normal Irradiance (DNI) value, in W/m^2
dhi : int or float
Diffuse Horizontal Irradiance (DHI) value, in W/m^2
sunalt : int or float
Sun altitude (degrees)
sunaz : int or float
Sun azimuth (degrees)
Returns
-------
skyname : string
Filename of sky in /skies/ directory
"""
print('Sky generated with Gendaylit 2 MANUAL, with DNI: %0.1f, DHI: %0.1f' % (dni, dhi))
sky_path = 'skies'
if sunalt <= 0 or dhi <= 0:
self.skyfiles = [None]
return None
# Assign Albedos
try:
if self.ground.ReflAvg.shape[0] == 1: # just 1 entry
groundindex = 0
else:
print("Ambiguous albedo entry, Set albedo to single value "
"in setGround()")
return
except:
print('usage: make sure to run setGround() before gendaylit()')
return
# Note: -W and -O1 are used to create full spectrum analysis in units of Wm-2
#" -L %s %s -g %s \n" %(dni/.0079, dhi/.0079, self.ground.ReflAvg) + \
skyStr = ("# start of sky definition for daylighting studies\n" + \
"# Manual inputs of DNI, DHI, SunAlt and SunAZ into Gendaylit used \n" + \
"!gendaylit -ang %s %s" %(sunalt, sunaz)) + \
" -W %s %s -g %s -O 1 \n" %(dni, dhi, self.ground.ReflAvg[groundindex]) + \
"skyfunc glow sky_mat\n0\n0\n4 1 1 1 0\n" + \
"\nsky_mat source sky\n0\n0\n4 0 0 1 180\n" + \
self.ground._makeGroundString(index=groundindex, cumulativesky=False)
skyname = os.path.join(sky_path, "sky2_%s.rad" %(self.name))
skyFile = open(skyname, 'w')
skyFile.write(skyStr)
skyFile.close()
self.skyfiles = [skyname]
return skyname
def genCumSky(self, gencumsky_metfile=None, savefile=None):
"""
Generate Skydome using gencumsky.
.. warning::
gencumulativesky.exe is required to be installed,
which is not a standard radiance distribution.
You can find the program in the bifacial_radiance distribution directory
in \Lib\site-packages\bifacial_radiance\data
Use :func:`readWeatherFile(filename, starttime='YYYY-mm-dd_HHMM', endtime='YYYY-mm-dd_HHMM')`
to limit gencumsky simulations instead.
Parameters
------------
gencumsky_metfile : str
Filename with path to temporary created meteorological file usually created
in EPWs folder. This csv file has no headers, no index, and two
space separated columns with values for GHI and DNI for each hour
in the year, and MUST have 8760 entries long otherwise gencumulativesky.exe cries.
savefile : string
If savefile is None, defaults to "cumulative"
Returns
--------
skyname : str
Filename of the .rad file containing cumulativesky info
"""
# TODO: error checking and auto-install of gencumulativesky.exe
# TODO: add check if readWeatherfile has not be done
# TODO: check if it fails if gcc module has been loaded? (common hpc issue)
#import datetime
if gencumsky_metfile is None:
gencumsky_metfile = self.gencumsky_metfile
if isinstance(gencumsky_metfile, str):
print("Loaded ", gencumsky_metfile)
if isinstance(gencumsky_metfile, list):
print("There are more than 1 year of gencumsky temporal weather file saved."+
"You can pass which file you want with gencumsky_metfile input. Since "+
"No year was selected, defaulting to using the first year of the list")
gencumsky_metfile = gencumsky_metfile[0]
print("Loaded ", gencumsky_metfile)
if savefile is None:
savefile = "cumulative"
sky_path = 'skies'
lat = self.metdata.latitude
lon = self.metdata.longitude
timeZone = self.metdata.timezone
'''
cmd = "gencumulativesky +s1 -h 0 -a %s -o %s -m %s %s " %(lat, lon, float(timeZone)*15, filetype) +\
"-time %s %s -date %s %s %s %s %s" % (startdt.hour, enddt.hour+1,
startdt.month, startdt.day,
enddt.month, enddt.day,
gencumsky_metfile)
'''
cmd = (f"gencumulativesky +s1 -h 0 -a {lat} -o {lon} -m "
f"{float(timeZone)*15} -G {gencumsky_metfile}" )
with open(savefile+".cal","w") as f:
_,err = _popen(cmd, None, f)
if err is not None:
print(err)
# Assign Albedos
try:
groundstring = self.ground._makeGroundString(cumulativesky=True)
except:
raise Exception('Error: ground reflection not defined. '
'Run RadianceObj.setGround() first')
return
skyStr = "#Cumulative Sky Definition\n" +\
"void brightfunc skyfunc\n" + \
"2 skybright " + "%s.cal\n" % (savefile) + \
"0\n" + \
"0\n" + \
"\nskyfunc glow sky_glow\n" + \
"0\n" + \
"0\n" + \
"4 1 1 1 0\n" + \
"\nsky_glow source sky\n" + \
"0\n" + \
"0\n" + \
"4 0 0 1 180\n" + \
groundstring
skyname = os.path.join(sky_path, savefile+".rad")
skyFile = open(skyname, 'w')
skyFile.write(skyStr)
skyFile.close()
self.skyfiles = [skyname]#, 'SunFile.rad' ]
return skyname
def set1axis(self, metdata=None, azimuth=180, limit_angle=45,
angledelta=5, backtrack=True, gcr=1.0 / 3, cumulativesky=True,
fixed_tilt_angle=None, useMeasuredTrackerAngle=False,
axis_azimuth=None):
"""
Set up geometry for 1-axis tracking. Pull in tracking angle details from
pvlib, create multiple 8760 metdata sub-files where datetime of met data
matches the tracking angle. Returns 'trackerdict' which has keys equal to
either the tracker angles (gencumsky workflow) or timestamps (gendaylit hourly
workflow)
Parameters
------------
metdata : :py:class:`~bifacial_radiance.MetObj`
Meterological object to set up geometry. Usually set automatically by
`bifacial_radiance` after running :py:class:`bifacial_radiance.readepw`.
Default = self.metdata
azimuth : numeric
Orientation axis of tracker torque tube. Default North-South (180 deg).
For fixed-tilt configuration, input is fixed azimuth (180 is south)
limit_angle : numeric
Limit angle (+/-) of the 1-axis tracker in degrees. Default 45
angledelta : numeric
Degree of rotation increment to parse irradiance bins. Default 5 degrees.
(0.4 % error for DNI). Other options: 4 (.25%), 2.5 (0.1%).
Note: the smaller the angledelta, the more simulations must be run.
backtrack : bool
Whether backtracking is enabled (default = True)
gcr : float
Ground coverage ratio for calculation backtracking. Defualt [1.0/3.0]
cumulativesky : bool
[True] Wether individual csv files are
created with constant tilt angle for the cumulativesky approach.
if false, the gendaylit tracking approach must be used.
fixed_tilt_angle : numeric
If passed, this changes to a fixed tilt simulation where each hour
uses fixed_tilt_angle and axis_azimuth as the tilt and azimuth
useMeasuredTrackerAngle: Bool
If True, and data for tracker angles has been passed by being included
in the WeatherFile object (column name 'Tracker Angle (degrees)'),
then tracker angles will be set to these values instead of being calculated.
NOTE that the value for azimuth passed to set1axis must be surface
azimuth in the morning and not the axis_azimuth
(i.e. for a N-S HSAT, azimuth = 90).
axis_azimuth : numeric
DEPRECATED. returns deprecation warning. Pass the tracker
axis_azimuth through to azimuth input instead.
Returns
-------
trackerdict : dictionary
Keys represent tracker tilt angles (gencumsky) or timestamps (gendaylit)
and list of csv metfile, and datetimes at that angle
trackerdict[angle]['csvfile';'surf_azm';'surf_tilt';'UTCtime']
- or -
trackerdict[time]['tracker_theta';'surf_azm';'surf_tilt']
"""
# Documentation check:
# Removed Internal variables
# -------
# metdata.solpos dataframe with solar position data
# metdata.surface_azimuth list of tracker azimuth data
# metdata.surface_tilt list of tracker surface tilt data
# metdata.tracker_theta list of tracker tilt angle
import warnings
if metdata == None:
metdata = self.metdata
if metdata == {}:
raise Exception("metdata doesnt exist yet. "+
"Run RadianceObj.readWeatherFile() ")
if axis_azimuth:
azimuth = axis_azimuth
warnings.warn("axis_azimuth is deprecated in set1axis; use azimuth "
"input instead.", DeprecationWarning)
#backtrack = True # include backtracking support in later version
#gcr = 1.0/3.0 # default value - not used if backtrack = False.
# get 1-axis tracker angles for this location, rounded to nearest 'angledelta'
trackerdict = metdata._set1axis(cumulativesky=cumulativesky,
azimuth=azimuth,
limit_angle=limit_angle,
angledelta=angledelta,
backtrack=backtrack,
gcr=gcr,
fixed_tilt_angle=fixed_tilt_angle,
useMeasuredTrackerAngle=useMeasuredTrackerAngle
)
self.trackerdict = trackerdict
self.cumulativesky = cumulativesky
return trackerdict
def gendaylit1axis(self, metdata=None, trackerdict=None, startdate=None,
enddate=None, debug=False):
"""
1-axis tracking implementation of gendaylit.
Creates multiple sky files, one for each time of day.
Parameters
------------
metdata
MetObj output from readWeatherFile. Needs to have
RadianceObj.set1axis() run on it first.
startdate : str
DEPRECATED, does not do anything now.
Recommended to downselect metdata when reading Weather File.
enddate : str
DEPRECATED, does not do anything now.
Recommended to downselect metdata when reading Weather File.
trackerdict : dictionary
Dictionary with keys for tracker tilt angles (gencumsky) or timestamps (gendaylit)
Returns
-------
Updated trackerdict dictionary
Dictionary with keys for tracker tilt angles (gencumsky) or timestamps (gendaylit)
with the additional dictionary value ['skyfile'] added
"""
if metdata is None:
metdata = self.metdata
if trackerdict is None:
try:
trackerdict = self.trackerdict
except AttributeError:
print('No trackerdict value passed or available in self')
if startdate is not None or enddate is not None:
print("Deprecation Warning: gendyalit1axis no longer downselects"+
" entries by stardate and enddate. Downselect your data"+
" when loading with readWeatherFile")
return
try:
metdata.tracker_theta # this may not exist
except AttributeError:
print("metdata.tracker_theta doesn't exist. Run RadianceObj.set1axis() first")
if debug is False:
print('Creating ~%d skyfiles. '%(len(trackerdict.keys())))
count = 0 # counter to get number of skyfiles created, just for giggles
trackerdict2={}
for i in range(0, len(trackerdict.keys())):
try:
time = metdata.datetime[i]
except IndexError: #out of range error
break #
#filename = str(time)[5:-12].replace('-','_').replace(' ','_')
filename = time.strftime('%Y-%m-%d_%H%M')
self.name = filename
#check for GHI > 0
#if metdata.ghi[i] > 0:
if (metdata.ghi[i] > 0) & (~np.isnan(metdata.tracker_theta[i])):
skyfile = self.gendaylit(metdata=metdata,timeindex=i, debug=debug)
# trackerdict2 reduces the dict to only the range specified.
trackerdict2[filename] = trackerdict[filename]
trackerdict2[filename]['skyfile'] = skyfile
count +=1
print('Created {} skyfiles in /skies/'.format(count))
self.trackerdict = trackerdict2
return trackerdict2
def genCumSky1axis(self, trackerdict=None):
"""
1-axis tracking implementation of gencumulativesky.
Creates multiple .cal files and .rad files, one for each tracker angle.
Use :func:`readWeatherFile` to limit gencumsky simulations
Parameters
------------
trackerdict : dictionary
Trackerdict generated as output by RadianceObj.set1axis()
Returns
-------
trackerdict : dictionary
Trackerdict dictionary with new entry trackerdict.skyfile
Appends 'skyfile' to the 1-axis dict with the location of the sky .radfile
"""
if trackerdict == None:
try:
trackerdict = self.trackerdict
except AttributeError:
print('No trackerdict value passed or available in self')
for theta in sorted(trackerdict):
# call gencumulativesky with a new .cal and .rad name
csvfile = trackerdict[theta]['csvfile']
savefile = '1axis_%s'%(theta) #prefix for .cal file and skies\*.rad file
skyfile = self.genCumSky(gencumsky_metfile=csvfile, savefile=savefile)
trackerdict[theta]['skyfile'] = skyfile
print('Created skyfile %s'%(skyfile))
# delete default skyfile (not strictly necessary)
self.skyfiles = None
self.trackerdict = trackerdict
return trackerdict
def makeOct(self, filelist=None, octname=None):
"""
Combine everything together into a .oct file
Parameters
----------
filelist : list
Files to include. otherwise takes self.filelist
octname : str
filename (without .oct extension)
Returns
-------
octname : str
filename of .oct file in root directory including extension
err : str
Error message returned from oconv (if any)
"""
if filelist is None:
filelist = self.getfilelist()
if octname is None:
octname = self.name
debug = False
#JSS. With the way that the break is handled now, this will wait the 10 for all the hours
# that were not generated sky files.
if self.hpc :
import time
time_to_wait = 10
time_counter = 0
for file in filelist:
if debug:
print("HPC Checking for file %s" % (file))
if None in filelist: # are we missing any files? abort!
print('Missing files, skipping...')
self.octfile = None
return None
#Filesky is being saved as 'none', so it crashes !
while not os.path.exists(file):
time.sleep(1)
time_counter += 1
if time_counter > time_to_wait:
print ("filenotfound")
break
#os.system('oconv '+ ' '.join(filelist) + ' > %s.oct' % (octname))
if None in filelist: # are we missing any files? abort!
print('Missing files, skipping...')
self.octfile = None
return None
#cmd = 'oconv ' + ' '.join(filelist)
filelist.insert(0,'oconv')
with open('%s.oct' % (octname), "w") as f:
_,err = _popen(filelist, None, f)
#TODO: exception handling for no sun up
if err is not None:
if err[0:5] == 'error':
raise Exception(err[7:])
if err[0:7] == 'message':
warnings.warn(err[9:], Warning)
#use rvu to see if everything looks good.
# use cmd for this since it locks out the terminal.
#'rvu -vf views\side.vp -e .01 monopanel_test.oct'
print("Created %s.oct" % (octname))
self.octfile = '%s.oct' % (octname)
return '%s.oct' % (octname)
def makeOct1axis(self, trackerdict=None, singleindex=None, customname=None):
"""
Combine files listed in trackerdict into multiple .oct files
Parameters
------------
trackerdict
Output from :py:class:`~bifacial_radiance.RadianceObj.makeScene1axis`
singleindex : str
Single index for trackerdict to run makeOct1axis in single-value mode,
format 'YYYY-MM-DD_HHMM'.
customname : str
Custom text string added to the end of the OCT file name.
Returns
-------
trackerdict
Append 'octfile' to the 1-axis dict with the location of the scene .octfile
"""
if customname is None:
customname = ''
if trackerdict is None:
try:
trackerdict = self.trackerdict
except AttributeError:
print('No trackerdict value passed or available in self')
if singleindex is None: # loop through all values in the tracker dictionary
indexlist = trackerdict.keys()
else: # just loop through one single index in tracker dictionary
indexlist = [singleindex]
print('\nMaking {} octfiles in root directory.'.format(indexlist.__len__()))
for index in sorted(indexlist): # run through either entire key list of trackerdict, or just a single value
try:
filelist = self.materialfiles + [trackerdict[index]['skyfile'], trackerdict[index]['radfile']]
octname = '1axis_%s%s'%(index, customname)
trackerdict[index]['octfile'] = self.makeOct(filelist, octname)
except KeyError as e:
print('Trackerdict key error: {}'.format(e))
return trackerdict
def makeModule(self, name=None, x=None, y=None, z=None, modulefile=None,
text=None, customtext='', xgap=0.01, ygap=0.0,
zgap=0.1, numpanels=1, rewriteModulefile=True,
glass=False, modulematerial=None, bifi=1, **kwargs):
"""
pass module generation details into ModuleObj(). See ModuleObj()
docstring for more details
"""
from bifacial_radiance import ModuleObj
if name is None:
print("usage: makeModule(name,x,y,z, modulefile = '\objects\*.rad', "+
" zgap = 0.1 (module offset)"+
"numpanels = 1 (# of panels in portrait), ygap = 0.05 "+
"(slope distance between panels when arrayed), "+
"rewriteModulefile = True (or False), bifi = 1")
print("You can also override module_type info by passing 'text'"+
"variable, or add on at the end for racking details with "+
"'customtext'. See function definition for more details")
print("Optional: tubeParams={} (torque tube details including "
"diameter (torque tube dia. in meters), tubetype='Round' "
"(or 'square', 'hex'), material='Metal_Grey' (or 'black')"
", axisofrotation=True (does scene rotate around tube)")
print("Optional: cellModule={} (create cell-level module by "+
" passing in dictionary with keys 'numcellsx'6 (#cells in "+
"X-dir.), 'numcellsy', 'xcell' (cell size in X-dir. in meters),"+
"'ycell', 'xcellgap' (spacing between cells in X-dir.), 'ycellgap'")
print("Optional: omegaParams={} (create the support structure omega by "+
"passing in dictionary with keys 'omega_material' (the material of "+
"omega), 'mod_overlap'(the length of the module adjacent piece of"+
" omega that overlaps with the module),'x_omega1', 'y_omega' (ideally same"+
" for all the parts of omega),'z_omega1', 'x_omega2' (X-dir length of the"+
" vertical piece), 'x_omega3', z_omega3")
return
"""
# TODO: check for deprecated torquetube and axisofrotationTorqueTube in
kwargs.
"""
if 'tubeParams' in kwargs:
tubeParams = kwargs.pop('tubeParams')
else:
tubeParams = None
if 'torquetube' in kwargs:
torquetube = kwargs.pop('torquetube')
print("\nWarning: boolean input `torquetube` passed into makeModule"
". Starting in v0.4.0 this boolean parameter is deprecated."
" Use module.addTorquetube() with `visible` parameter instead.")
if tubeParams:
tubeParams['visible'] = torquetube
elif (tubeParams is None) & (torquetube is True):
tubeParams = {'visible':True} # create default TT
if 'axisofrotationTorqueTube' in kwargs:
axisofrotation = kwargs.pop('axisofrotationTorqueTube')
print("\nWarning: input boolean `axisofrotationTorqueTube` passed "
"into makeModule. Starting in v0.4.0 this boolean parameter is"
" deprecated. Use module.addTorquetube() with `axisofrotation`"
"parameter instead.")
if tubeParams: #this kwarg only does somehting if there's a TT.
tubeParams['axisofrotation'] = axisofrotation
if self.hpc: # trigger HPC simulation in ModuleObj
kwargs['hpc']=True
self.module = ModuleObj(name=name, x=x, y=y, z=z, bifi=bifi, modulefile=modulefile,
text=text, customtext=customtext, xgap=xgap, ygap=ygap,
zgap=zgap, numpanels=numpanels,
rewriteModulefile=rewriteModulefile, glass=glass,
modulematerial=modulematerial, tubeParams=tubeParams,
**kwargs)
return self.module
def makeCustomObject(self, name=None, text=None):
"""
Function for development and experimenting with extraneous objects in the scene.
This function creates a `name.rad` textfile in the objects folder
with whatever text that is passed to it.
It is up to the user to pass the correct radiance format.
For example, to create a box at coordinates 0,0 (with its bottom surface
on the plane z=0):
.. code-block:
name = 'box'
text='! genbox black PVmodule 0.5 0.5 0.5 | xform -t -0.25 -0.25 0'
Parameters
----------
name : str
String input to name the module type
text : str
Text used in the radfile to generate the module
"""
customradfile = os.path.join('objects', '%s.rad'%(name)) # update in 0.2.3 to shorten radnames
# py2 and 3 compatible: binary write, encode text first
with open(customradfile, 'wb') as f:
f.write(text.encode('ascii'))
print("\nCustom Object Name", customradfile)
self.customradfile = customradfile
return customradfile
def printModules(self):
# print available module types from ModuleObj
from bifacial_radiance import ModuleObj
modulenames = ModuleObj().readModule()
print('Available module names: {}'.format([str(x) for x in modulenames]))
return modulenames
def makeScene(self, module=None, sceneDict=None, radname=None,
moduletype=None):
"""
Create a SceneObj which contains details of the PV system configuration including
tilt, row pitch, height, nMods per row, nRows in the system...
Parameters
----------
module : str or ModuleObj
String name of module created with makeModule()
sceneDict : dictionary
Dictionary with keys: `tilt`, `clearance_height`*, `pitch`,
`azimuth`, `nMods`, `nRows`, `hub_height`*, `height`*
* height deprecated from sceneDict. For makeScene (fixed systems)
if passed it is assumed it reffers to clearance_height.
`clearance_height` recommended for fixed_tracking systems.
`hub_height` can also be passed as a possibility.
radname : str
Gives a custom name to the scene file. Useful when parallelizing.
moduletype: DEPRECATED. use the `module` kwarg instead.
Returns
-------
SceneObj
'scene' with configuration details
"""
if moduletype is not None:
module = moduletype
print("Warning: input `moduletype` is deprecated. Use kwarg "
"`module` instead")
if module is None:
try:
module = self.module
print(f'Using last saved module, name: {module.name}')
except AttributeError:
print('makeScene(module, sceneDict, nMods, nRows). '+\
'Available moduletypes: ' )
self.printModules() #print available module types
return
self.scene = SceneObj(module)
self.scene.hpc = self.hpc #pass HPC mode from parent
if sceneDict is None:
print('makeScene(moduletype, sceneDict, nMods, nRows). '+\
'sceneDict inputs: .tilt .clearance_height .pitch .azimuth')
return self.scene
if 'azimuth' not in sceneDict:
sceneDict['azimuth'] = 180
if 'nRows' not in sceneDict:
sceneDict['nRows'] = 7
if 'nMods' not in sceneDict:
sceneDict['nMods'] = 20
# Fixed tilt routine
# Preferred: clearance_height,
# If only height is passed, it is assumed to be clearance_height.
sceneDict, use_clearanceheight = _heightCasesSwitcher(sceneDict,
preferred='clearance_height',
nonpreferred='hub_height')
self.nMods = sceneDict['nMods']
self.nRows = sceneDict['nRows']
self.sceneRAD = self.scene._makeSceneNxR(sceneDict=sceneDict,
radname=radname)
if 'appendRadfile' not in sceneDict:
appendRadfile = False
else:
appendRadfile = sceneDict['appendRadfile']
if appendRadfile:
debug = False
try:
self.radfiles.append(self.sceneRAD)
if debug:
print( "Radfile APPENDED!")
except:
#TODO: Manage situation where radfile was created with
#appendRadfile to False first..
self.radfiles=[]
self.radfiles.append(self.sceneRAD)
if debug:
print( "Radfile APPENDAGE created!")
else:
self.radfiles = [self.sceneRAD]
return self.scene
def appendtoScene(self, radfile=None, customObject=None, text=''):
"""
Appends to the `Scene radfile` in folder `\objects` the text command in Radiance
lingo created by the user.
Useful when using addCustomObject to the scene.
Parameters
----------
radfile: str
Directory and name of where .rad scene file is stored
customObject : str
Directory and name of custom object .rad file is stored
text : str
Command to be appended to the radfile. Do not leave empty spaces
at the end.
Returns
-------
Nothing, the radfile must already be created and assigned when running this.
"""
#TODO: Add a custom name and replace radfile name
# py2 and 3 compatible: binary write, encode text first
text2 = '\n' + text + ' ' + customObject
debug = False
if debug:
print (text2)
with open(radfile, 'a+') as f:
f.write(text2)
def makeScene1axis(self, trackerdict=None, module=None, sceneDict=None,
cumulativesky=None, moduletype=None):
"""
Creates a SceneObj for each tracking angle which contains details of the PV
system configuration including row pitch, hub_height, nMods per row, nRows in the system...
Parameters
------------
trackerdict
Output from GenCumSky1axis
module : str or ModuleObj
Name or ModuleObj created with makeModule()
sceneDict :
Dictionary with keys:`tilt`, `hub_height`, `pitch`, `azimuth`
cumulativesky : bool
Defines if sky will be generated with cumulativesky or gendaylit.
moduletype: DEPRECATED. use the `module` kwarg instead.
Returns
--------
trackerdict
Append the following keys
'radfile'
directory where .rad scene file is stored
'scene'
SceneObj for each tracker theta
'clearance_height'
Calculated ground clearance based on
`hub height`, `tilt` angle and overall collector width `sceney`
"""
import math
if sceneDict is None:
print('usage: makeScene1axis(module, sceneDict, nMods, nRows).'+
'sceneDict inputs: .hub_height .azimuth .nMods .nRows'+
'and .pitch or .gcr')
return
# If no nRows or nMods assigned on deprecated variable or dictionary,
# assign default.
if 'nRows' not in sceneDict:
sceneDict['nRows'] = 7
if 'nMods' not in sceneDict:
sceneDict['nMods'] = 20
if trackerdict is None:
try:
trackerdict = self.trackerdict
except AttributeError:
print('No trackerdict value passed or available in self')
if cumulativesky is None:
try:
# see if cumulativesky = False was set earlier,
# e.g. in RadianceObj.set1axis
cumulativesky = self.cumulativesky
except AttributeError:
# default cumulativesky = true to maintain backward compatibility.
cumulativesky = True
if moduletype is not None:
module = moduletype
print("Warning: input `moduletype` is deprecated. Use kwarg "
"`module` instead")
if module is None:
try:
module = self.module
print(f'Using last saved module, name: {module.name}')
except AttributeError:
print('usage: makeScene1axis(trackerdict, module, '+
'sceneDict, nMods, nRows). ')
self.printModules() #print available module types
return
if 'orientation' in sceneDict:
raise Exception('\n\n ERROR: Orientation format has been '
'deprecated since version 0.2.4. If you want to flip your '
'modules, on makeModule switch the x and y values.\n\n')
# 1axis routine
# Preferred hub_height
sceneDict, use_clearanceheight = _heightCasesSwitcher(sceneDict,
preferred='hub_height',
nonpreferred='clearance_height')
if use_clearanceheight:
simplefix = 0
hubheight = sceneDict['clearance_height'] # Not really, but this is the fastest
# to make it work with the simplefix as below the actual clearnace height
# gets calculated and the 0 sets the cosine correction to 0.
# TODO CLEAN THIS UP.
else:
#the hub height is the tracker height at center of rotation.
hubheight = sceneDict['hub_height']
simplefix = 1
if cumulativesky is True: # cumulativesky workflow
print('\nMaking .rad files for cumulativesky 1-axis workflow')
for theta in trackerdict:
scene = SceneObj(module)
if trackerdict[theta]['surf_azm'] >= 180:
trackerdict[theta]['surf_azm'] = trackerdict[theta]['surf_azm']-180
trackerdict[theta]['surf_tilt'] = trackerdict[theta]['surf_tilt']*-1
radname = '1axis%s_'%(theta,)
# Calculating clearance height for this theta.
height = hubheight - simplefix*0.5* math.sin(abs(theta) * math.pi / 180) \
* scene.module.sceney + scene.module.offsetfromaxis \
* math.sin(abs(theta)*math.pi/180)
# Calculate the ground clearance height based on the hub height. Add abs(theta) to avoid negative tilt angle errors
trackerdict[theta]['clearance_height'] = height
try:
sceneDict2 = {'tilt':trackerdict[theta]['surf_tilt'],
'pitch':sceneDict['pitch'],
'clearance_height':trackerdict[theta]['clearance_height'],
'azimuth':trackerdict[theta]['surf_azm'],
'nMods': sceneDict['nMods'],
'nRows': sceneDict['nRows'],
'modulez': scene.module.z}
except KeyError:
#maybe gcr is passed, not pitch
sceneDict2 = {'tilt':trackerdict[theta]['surf_tilt'],
'gcr':sceneDict['gcr'],
'clearance_height':trackerdict[theta]['clearance_height'],
'azimuth':trackerdict[theta]['surf_azm'],
'nMods': sceneDict['nMods'],
'nRows': sceneDict['nRows'],
'modulez': scene.module.z}
radfile = scene._makeSceneNxR(sceneDict=sceneDict2,
radname=radname)
trackerdict[theta]['radfile'] = radfile
trackerdict[theta]['scene'] = scene
print('{} Radfiles created in /objects/'.format(trackerdict.__len__()))
else: #gendaylit workflow
print('\nMaking ~%s .rad files for gendaylit 1-axis workflow (this takes a minute..)' % (len(trackerdict)))
count = 0
for time in trackerdict:
scene = SceneObj(module)
if trackerdict[time]['surf_azm'] >= 180:
trackerdict[time]['surf_azm'] = trackerdict[time]['surf_azm']-180
trackerdict[time]['surf_tilt'] = trackerdict[time]['surf_tilt']*-1
theta = trackerdict[time]['theta']
radname = '1axis%s_'%(time,)
# Calculating clearance height for this time.
height = hubheight - simplefix*0.5* math.sin(abs(theta) * math.pi / 180) \
* scene.module.sceney + scene.module.offsetfromaxis \
* math.sin(abs(theta)*math.pi/180)
if trackerdict[time]['ghi'] > 0:
trackerdict[time]['clearance_height'] = height
try:
sceneDict2 = {'tilt':trackerdict[time]['surf_tilt'],
'pitch':sceneDict['pitch'],
'clearance_height': trackerdict[time]['clearance_height'],
'azimuth':trackerdict[time]['surf_azm'],
'nMods': sceneDict['nMods'],
'nRows': sceneDict['nRows'],
'modulez': scene.module.z}
except KeyError:
#maybe gcr is passed instead of pitch
sceneDict2 = {'tilt':trackerdict[time]['surf_tilt'],
'gcr':sceneDict['gcr'],
'clearance_height': trackerdict[time]['clearance_height'],
'azimuth':trackerdict[time]['surf_azm'],
'nMods': sceneDict['nMods'],
'nRows': sceneDict['nRows'],
'modulez': scene.module.z}
radfile = scene._makeSceneNxR(sceneDict=sceneDict2,
radname=radname)
trackerdict[time]['radfile'] = radfile
trackerdict[time]['scene'] = scene
count+=1
print('{} Radfiles created in /objects/'.format(count))
self.trackerdict = trackerdict
self.nMods = sceneDict['nMods'] #assign nMods and nRows to RadianceObj
self.nRows = sceneDict['nRows']
self.hub_height = hubheight
return trackerdict
def analysis1axis(self, trackerdict=None, singleindex=None, accuracy='low',
customname=None, modWanted=None, rowWanted=None,
sensorsy=9, sensorsx=1,
modscanfront = None, modscanback = None, relative=False,
debug=False ):
"""
Loop through trackerdict and runs linescans for each scene and scan in there.
Parameters
----------------
trackerdict
singleindex : str
For single-index mode, just the one index we want to run (new in 0.2.3).
Example format '21_06_14_12_30' for 2021 June 14th 12:30 pm
accuracy : str
'low' or 'high', resolution option used during _irrPlot and rtrace
customname : str
Custom text string to be added to the file name for the results .CSV files
modWanted : int
Module to be sampled. Index starts at 1.
rowWanted : int
Row to be sampled. Index starts at 1. (row 1)
sensorsy : int or list
Number of 'sensors' or scanning points along the collector width
(CW) of the module(s). If multiple values are passed, first value
represents number of front sensors, second value is number of back sensors
sensorsx : int or list
Number of 'sensors' or scanning points along the length, the side perpendicular
to the collector width (CW) of the module(s) for the back side of the module.
If multiple values are passed, first value represents number of
front sensors, second value is number of back sensors.
modscanfront : dict
dictionary with one or more of the following key: xstart, ystart, zstart,
xinc, yinc, zinc, Nx, Ny, Nz, orient. All of these keys are ints or
floats except for 'orient' which takes x y z values as string 'x y z'
for example '0 0 -1'. These values will overwrite the internally
calculated frontscan dictionary for the module & row selected. If modifying
Nx, Ny or Nz, make sure to modify on modscanback to avoid issues on
results writing stage.
modscanback : dict
dictionary with one or more of the following key: xstart, ystart, zstart,
xinc, yinc, zinc, Nx, Ny, Nz, orient. All of these keys are ints or
floats except for 'orient' which takes x y z values as string 'x y z'
for example '0 0 -1'. These values will overwrite the internally
calculated frontscan dictionary for the module & row selected. If modifying
Nx, Ny or Nz, make sure to modify on modscanback to avoid issues on
results writing stage.
relative : Bool
if passing modscanfront and modscanback to modify dictionarie of positions,
this sets if the values passed to be updated are relative or absolute.
Default is absolute value (relative=False)
debug : Bool
Activates internal printing of the function to help debugging.
Returns
-------
trackerdict with new keys:
'AnalysisObj' : analysis object for this tracker theta
'Wm2Front' : list of front Wm-2 irradiances, len=sensorsy_back
'Wm2Back' : list of rear Wm-2 irradiances, len=sensorsy_back
'backRatio' : list of rear irradiance ratios, len=sensorsy_back
RadianceObj with new appended values:
'Wm2Front' : np Array with front irradiance cumulative
'Wm2Back' : np Array with rear irradiance cumulative
'backRatio' : np Array with rear irradiance ratios
"""
import warnings
if customname is None:
customname = ''
if trackerdict == None:
try:
trackerdict = self.trackerdict
except AttributeError:
print('No trackerdict value passed or available in self')
if singleindex is None: # run over all values in trackerdict
trackerkeys = sorted(trackerdict.keys())
else: # run in single index mode.
trackerkeys = [singleindex]
if modWanted == None:
modWanted = round(self.nMods / 1.99)
if rowWanted == None:
rowWanted = round(self.nRows / 1.99)
frontWm2 = 0 # container for tracking front irradiance across module chord. Dynamically size based on first analysis run
backWm2 = 0 # container for tracking rear irradiance across module chord.
for index in trackerkeys: # either full list of trackerdict keys, or single index
name = '1axis_%s%s'%(index,customname)
octfile = trackerdict[index]['octfile']
scene = trackerdict[index]['scene']
if octfile is None:
continue # don't run analysis if the octfile is none
try: # look for missing data
analysis = AnalysisObj(octfile,name)
name = '1axis_%s%s'%(index,customname,)
frontscanind, backscanind = analysis.moduleAnalysis(scene=scene, modWanted=modWanted,
rowWanted=rowWanted,
sensorsy=sensorsy,
sensorsx=sensorsx,
modscanfront=modscanfront, modscanback=modscanback,
relative=relative, debug=debug)
analysis.analysis(octfile=octfile,name=name,frontscan=frontscanind,backscan=backscanind,accuracy=accuracy)
trackerdict[index]['AnalysisObj'] = analysis
except Exception as e: # problem with file. TODO: only catch specific error types here.
warnings.warn('Index: {}. Problem with file. Error: {}. Skipping'.format(index,e), Warning)
return
#combine cumulative front and back irradiance for each tracker angle
try: #on error, trackerdict[index] is returned empty
trackerdict[index]['Wm2Front'] = analysis.Wm2Front
trackerdict[index]['Wm2Back'] = analysis.Wm2Back
trackerdict[index]['backRatio'] = analysis.backRatio
except AttributeError as e: # no key Wm2Front.
warnings.warn('Index: {}. Trackerdict key not found: {}. Skipping'.format(index,e), Warning)
return
if np.sum(frontWm2) == 0: # define frontWm2 the first time through
frontWm2 = np.array(analysis.Wm2Front)
backWm2 = np.array(analysis.Wm2Back)
else:
frontWm2 += np.array(analysis.Wm2Front)
backWm2 += np.array(analysis.Wm2Back)
print('Index: {}. Wm2Front: {}. Wm2Back: {}'.format(index,
np.mean(analysis.Wm2Front), np.mean(analysis.Wm2Back)))
if np.sum(self.Wm2Front) == 0:
self.Wm2Front = frontWm2 # these are accumulated over all indices passed in.
self.Wm2Back = backWm2
else:
self.Wm2Front += frontWm2 # these are accumulated over all indices passed in.
self.Wm2Back += backWm2
self.backRatio = np.mean(backWm2)/np.mean(frontWm2+.001)
# Save compiled results using _saveresults
if singleindex is None:
print ("Saving a cumulative-results file in the main simulation folder." +
"This adds up by sensor location the irradiance over all hours " +
"or configurations considered." +
"\nWarning: This file saving routine does not clean results, so "+
"if your setup has ygaps, or 2+modules or torque tubes, doing "+
"a deeper cleaning and working with the individual results "+
"files in the results folder is highly suggested.")
cumfilename = 'cumulative_results_%s.csv'%(customname)
if self.cumulativesky is True:
frontcum = pd.DataFrame()
rearcum = pd.DataFrame()
temptrackerdict = trackerdict[list(trackerdict)[0]]['AnalysisObj']
#temptrackerdict = trackerdict[0.0]['AnalysisObj']
frontcum ['x'] = temptrackerdict.x
frontcum ['y'] = temptrackerdict.y
frontcum ['z'] = temptrackerdict.z
frontcum ['mattype'] = temptrackerdict.mattype
frontcum ['Wm2'] = self.Wm2Front
rearcum ['x'] = temptrackerdict.x
rearcum ['y'] = temptrackerdict.x
rearcum ['z'] = temptrackerdict.rearZ
rearcum ['mattype'] = temptrackerdict.rearMat
rearcum ['Wm2'] = self.Wm2Back
cumanalysisobj = AnalysisObj()
print ("\nSaving Cumulative results" )
cumanalysisobj._saveResultsCumulative(frontcum, rearcum, savefile=cumfilename)
else: # trackerkeys are day/hour/min, and there's no easy way to find a
# tilt of 0, so making a fake linepoint object for tilt 0
# and then saving.
try:
cumscene = trackerdict[trackerkeys[0]]['scene']
cumscene.sceneDict['tilt']=0
cumscene.sceneDict['clearance_height'] = self.hub_height
cumanalysisobj = AnalysisObj()
frontscancum, backscancum = cumanalysisobj.moduleAnalysis(scene=cumscene, modWanted=modWanted,
rowWanted=rowWanted,
sensorsy=sensorsy,
sensorsx=sensorsx,
modscanfront=modscanfront, modscanback=modscanback,
relative=relative, debug=debug)
x,y,z = cumanalysisobj._linePtsArray(frontscancum)
x,y,rearz = cumanalysisobj._linePtsArray(backscancum)
frontcum = pd.DataFrame()
rearcum = pd.DataFrame()
frontcum ['x'] = x
frontcum ['y'] = y
frontcum ['z'] = z
frontcum ['mattype'] = trackerdict[trackerkeys[0]]['AnalysisObj'].mattype
frontcum ['Wm2'] = self.Wm2Front
rearcum ['x'] = x
rearcum ['y'] = y
rearcum ['z'] = rearz
rearcum ['mattype'] = trackerdict[trackerkeys[0]]['AnalysisObj'].rearMat
rearcum ['Wm2'] = self.Wm2Back
print ("\nSaving Cumulative results" )
cumanalysisobj._saveResultsCumulative(frontcum, rearcum, savefile=cumfilename)
except:
print("Not able to save a cumulative result for this simulation.")
return trackerdict
# End RadianceObj definition
class GroundObj:
"""
Class to set and return details for the ground surface materials and reflectance.
If 1 albedo value is passed, it is used as default.
If 3 albedo values are passed, they are assigned to each of the three wavelength placeholders (RGB),
If material type is known, it is used to get reflectance info.
if material type isn't known, material_info.list is returned
Parameters
------------
materialOrAlbedo : numeric or str
If number between 0 and 1 is passed, albedo input is assumed and assigned.
If string is passed with the name of the material desired. e.g. 'litesoil',
properties are searched in `material_file`.
Default Material names to choose from: litesoil, concrete, white_EPDM,
beigeroof, beigeroof_lite, beigeroof_heavy, black, asphalt
material_file : str
Filename of the material information. Default `ground.rad`
Returns
-------
"""
def __init__(self, materialOrAlbedo=None, material_file=None):
import warnings
from numbers import Number
self.normval = None
self.ReflAvg = None
self.Rrefl = None
self.Grefl = None
self.Brefl = None
self.ground_type = 'custom'
if material_file is None:
material_file = 'ground.rad'
self.material_file = material_file
if materialOrAlbedo is None: # Case where it's none.
print('\nInput albedo 0-1, or string from ground.printGroundMaterials().'
'\nAlternatively, run setGround after readWeatherData()'
'and setGround will read metdata.albedo if available')
return
if isinstance(materialOrAlbedo, str) :
self.ground_type = materialOrAlbedo
# Return the RGB albedo for material ground_type
materialOrAlbedo = self.printGroundMaterials(self.ground_type)
# Check for double and int.
if isinstance(materialOrAlbedo, Number):
materialOrAlbedo = np.array([[materialOrAlbedo,
materialOrAlbedo, materialOrAlbedo]])
if isinstance(materialOrAlbedo, list):
materialOrAlbedo = np.asarray(materialOrAlbedo)
# By this point, materialOrAlbedo should be a np.ndarray:
if isinstance(materialOrAlbedo, np.ndarray):
if materialOrAlbedo.ndim == 0:
# numpy array of one single value, i.e. np.array(0.62)
# after this if, np.array([0.62])
materialOrAlbedo = materialOrAlbedo.reshape([1])
if materialOrAlbedo.ndim == 1:
# If np.array is ([0.62]), this repeats it so at the end it's
# np.array ([0.62, 0.62, 0.62])
materialOrAlbedo = np.repeat(np.array([materialOrAlbedo]),
3, axis=1).reshape(
len(materialOrAlbedo),3)
if (materialOrAlbedo.ndim == 2) & (materialOrAlbedo.shape[1] > 3):
warnings.warn("Radiance only raytraces 3 wavelengths at "
"a time. Trimming albedo np.array input to "
"3 wavelengths.")
materialOrAlbedo = materialOrAlbedo[:,0:3]
# By this point we should have np.array of dim=2 and shape[1] = 3.
# Check for invalid values
if (materialOrAlbedo > 1).any() or (materialOrAlbedo < 0).any():
print('Warning: albedo values greater than 1 or less than 0. '
'Constraining to [0..1]')
materialOrAlbedo = materialOrAlbedo.clip(min=0, max=1)
try:
self.Rrefl = materialOrAlbedo[:,0]
self.Grefl = materialOrAlbedo[:,1]
self.Brefl = materialOrAlbedo[:,2]
self.normval = _normRGB(materialOrAlbedo[:,0],materialOrAlbedo[:,1],
materialOrAlbedo[:,2])
self.ReflAvg = np.round(np.mean(materialOrAlbedo, axis=1),4)
print(f'Loading albedo, {self.ReflAvg.__len__()} value(s), '
f'{self._nonzeromean(self.ReflAvg):0.3f} avg\n'
f'{self.ReflAvg[self.ReflAvg != 0].__len__()} nonzero albedo values.')
except IndexError as e:
print('albedo.shape should be 3 column (N x 3)')
raise e
def printGroundMaterials(self, materialString=None):
"""
printGroundMaterials(materialString=None)
input: None or materialString. If None, return list of acceptable
material types from ground.rad. If valid string, return RGB albedo
of the material type selected.
"""
import warnings
material_path = 'materials'
f = open(os.path.join(material_path, self.material_file))
keys = [] #list of material key names
Rreflall = []; Greflall=[]; Breflall=[] #RGB material reflectance
temp = f.read().split()
f.close()
#return indices for 'plastic' definition
index = _findme(temp,'plastic')
for i in index:
keys.append(temp[i+1])# after plastic comes the material name
Rreflall.append(float(temp[i+5]))#RGB reflectance comes a few more down the list
Greflall.append(float(temp[i+6]))
Breflall.append(float(temp[i+7]))
if materialString is not None:
try:
index = _findme(keys,materialString)[0]
except IndexError:
warnings.warn('Error - materialString not in '
f'{self.material_file}: {materialString}')
return(np.array([[Rreflall[index], Greflall[index], Breflall[index]]]))
else:
return(keys)
def _nonzeromean(self, val):
''' array mean excluding zero. return zero if everything's zero'''
tempmean = np.nanmean(val)
if tempmean > 0:
tempmean = np.nanmean(val[val !=0])
return tempmean
def _makeGroundString(self, index=0, cumulativesky=False):
'''
create string with ground reflectance parameters for use in
gendaylit and gencumsky.
Parameters
-----------
index : integer
Index of time for time-series albedo. Default 0
cumulativesky: Boolean
If true, set albedo to average of time series values.
Returns
-------
groundstring: text with albedo details to append to sky.rad in
gendaylit
'''
try:
if cumulativesky is True:
Rrefl = self._nonzeromean(self.Rrefl)
Grefl = self._nonzeromean(self.Grefl)
Brefl = self._nonzeromean(self.Brefl)
normval = _normRGB(Rrefl, Grefl, Brefl)
else:
Rrefl = self.Rrefl[index]
Grefl = self.Grefl[index]
Brefl = self.Brefl[index]
normval = _normRGB(Rrefl, Grefl, Brefl)
# Check for all zero albedo case
if normval == 0:
normval = 1
groundstring = ( f'\nskyfunc glow ground_glow\n0\n0\n4 '
f'{Rrefl/normval} {Grefl/normval} {Brefl/normval} 0\n'
'\nground_glow source ground\n0\n0\n4 0 0 -1 180\n'
f'\nvoid plastic {self.ground_type}\n0\n0\n5 '
f'{Rrefl:0.3f} {Grefl:0.3f} {Brefl:0.3f} 0 0\n'
f"\n{self.ground_type} ring groundplane\n"
'0\n0\n8\n0 0 -.01\n0 0 1\n0 100' )
except IndexError as err:
print(f'Index {index} passed to albedo with only '
f'{self.Rrefl.__len__()} values.' )
raise err
return groundstring
class SceneObj:
'''
scene information including PV module type, bifaciality, array info
pv module orientation defaults: Azimuth = 180 (south)
pv module origin: z = 0 bottom of frame. y = 0 lower edge of frame.
x = 0 vertical centerline of module
scene includes module details (x,y,bifi, sceney (collector_width), scenex)
'''
def __repr__(self):
return str(self.__dict__)
def __init__(self, module=None):
''' initialize SceneObj
'''
from bifacial_radiance import ModuleObj
# should sceneDict be initialized here? This is set in _makeSceneNxR
if module is None:
return
elif type(module) == str:
self.module = ModuleObj(name=module)
elif type(module) == ModuleObj: # try moduleObj
self.module = module
#self.moduleDict = self.module.getDataDict()
#self.scenex = self.module.scenex
#self.sceney = self.module.sceney
#self.offsetfromaxis = self.moduleDict['offsetfromaxis']
#TODO: get rid of these 4 values
self.modulefile = self.module.modulefile
self.hpc = False #default False. Set True by makeScene after sceneobj created.
def _makeSceneNxR(self, modulename=None, sceneDict=None, radname=None):
"""
Arrange module defined in :py:class:`bifacial_radiance.SceneObj` into a N x R array.
Returns a :py:class:`bifacial_radiance.SceneObj` which contains details
of the PV system configuration including `tilt`, `row pitch`, `hub_height`
or `clearance_height`, `nMod`s per row, `nRows` in the system.
The returned scene has (0,0) coordinates centered at the module at the
center of the array. For 5 rows, that is row 3, for 4 rows, that is
row 2 also (rounds down). For 5 modules in the row, that is module 3,
for 4 modules in the row, that is module 2 also (rounds down)
Parameters
------------
modulename: str
Name of module created with :py:class:`~bifacial_radiance.RadianceObj.makeModule`.
sceneDict : dictionary
Dictionary of scene parameters.
clearance_height : numeric
(meters).
pitch : numeric
Separation between rows
tilt : numeric
Valid input ranges -90 to 90 degrees
azimuth : numeric
A value denoting the compass direction along which the
axis of rotation lies. Measured in decimal degrees East
of North. [0 to 180) possible.
nMods : int
Number of modules per row (default = 20)
nRows : int
Number of rows in system (default = 7)
radname : str
String for name for radfile.
Returns
-------
radfile : str
Filename of .RAD scene in /objects/
scene : :py:class:`~bifacial_radiance.SceneObj `
Returns a `SceneObject` 'scene' with configuration details
"""
if modulename is None:
modulename = self.module.name
if sceneDict is None:
print('makeScene(modulename, sceneDict, nMods, nRows). sceneDict'
' inputs: .tilt .azimuth .nMods .nRows'
' AND .tilt or .gcr ; AND .hub_height or .clearance_height')
if 'orientation' in sceneDict:
raise Exception('\n\n ERROR: Orientation format has been '
'deprecated since version 0.2.4. If you want to flip your '
'modules, on makeModule switch the x and y values.\n\n')
if 'azimuth' not in sceneDict:
sceneDict['azimuth'] = 180
if 'axis_tilt' not in sceneDict:
sceneDict['axis_tilt'] = 0
if 'originx' not in sceneDict:
sceneDict['originx'] = 0
if 'originy' not in sceneDict:
sceneDict['originy'] = 0
if radname is None:
radname = str(self.module.name).strip().replace(' ', '_')
# loading variables
tilt = sceneDict['tilt']
azimuth = sceneDict['azimuth']
nMods = sceneDict['nMods']
nRows = sceneDict['nRows']
axis_tilt = sceneDict['axis_tilt']
originx = sceneDict ['originx']
originy = sceneDict['originy']
# hub_height, clearance_height and height logic.
# this routine uses hub_height to move the panels up so it's important
# to have a value for that, either obtianing from clearance_height
# (if coming from makeScene) or from hub_height itself.
# it is assumed that if no clearance_height or hub_height is passed,
# hub_height = height.
sceneDict, use_clearanceheight = _heightCasesSwitcher(sceneDict, preferred='hub_height',
nonpreferred='clearance_height')
if use_clearanceheight :
hubheight = sceneDict['clearance_height'] + 0.5* np.sin(abs(tilt) * np.pi / 180) \
* self.module.sceney - self.module.offsetfromaxis*np.sin(abs(tilt)*np.pi/180)
title_clearance_height = sceneDict['clearance_height']
else:
hubheight = sceneDict['hub_height']
# this calculates clearance_height, used for the title
title_clearance_height = sceneDict['hub_height'] - 0.5* np.sin(abs(tilt) * np.pi / 180) \
* self.module.sceney + self.module.offsetfromaxis*np.sin(abs(tilt)*np.pi/180)
try:
if sceneDict['pitch'] >0:
pitch = sceneDict['pitch']
else:
raise Exception('default to gcr')
except:
if 'gcr' in sceneDict:
pitch = np.round(self.module.sceney/sceneDict['gcr'],3)
else:
raise Exception('No valid `pitch` or `gcr` in sceneDict')
''' INITIALIZE VARIABLES '''
text = '!xform '
text += '-rx %s -t %s %s %s ' %(tilt, 0, 0, hubheight)
# create nMods-element array along x, nRows along y. 1cm module gap.
text += '-a %s -t %s 0 0 -a %s -t 0 %s 0 ' %(nMods, self.module.scenex, nRows, pitch)
# azimuth rotation of the entire shebang. Select the row to scan here based on y-translation.
# Modifying so center row is centered in the array. (i.e. 3 rows, row 2. 4 rows, row 2 too)
# Since the array is already centered on row 1, module 1, we need to increment by Nrows/2-1 and Nmods/2-1
text += (f'-i 1 -t {-self.module.scenex*(round(nMods/1.999)*1.0-1)} '
f'{-pitch*(round(nRows / 1.999)*1.0-1)} 0 -rz {180-azimuth} '
f'-t {originx} {originy} 0 ' )
#axis tilt only working for N-S trackers
if axis_tilt != 0 and azimuth == 90:
print("Axis_Tilt is still under development. The scene will be "
"created with the proper axis tilt, and the tracking angle"
"will consider the axis_tilt, but the sensors for the "
"analysis might not fall in the correct surfaces unless you"
" manually position them for this version. Sorry! :D ")
text += (f'-rx {axis_tilt} -t 0 0 %s ' %(
self.module.scenex*(round(nMods/1.99)*1.0-1)*np.sin(
axis_tilt * np.pi/180) ) )
filename = (f'{radname}_C_{title_clearance_height:0.5f}_rtr_{pitch:0.5f}_tilt_{tilt:0.5f}_'
f'{nMods}modsx{nRows}rows_origin{originx},{originy}.rad' )
if self.hpc:
text += f'"{os.path.join(os.getcwd(), self.modulefile)}"'
radfile = os.path.join(os.getcwd(), 'objects', filename)
else:
text += os.path.join(self.modulefile)
radfile = os.path.join('objects',filename )
# py2 and 3 compatible: binary write, encode text first
with open(radfile, 'wb') as f:
f.write(text.encode('ascii'))
self.gcr = self.module.sceney / pitch
self.text = text
self.radfiles = radfile
self.sceneDict = sceneDict
# self.hub_height = hubheight
return radfile
def showScene(self):
"""
Method to call objview on the scene included in self
"""
cmd = 'objview %s %s' % (os.path.join('materials', 'ground.rad'),
self.radfiles)
print('Rendering scene. This may take a moment...')
_,err = _popen(cmd,None)
if err is not None:
print('Error: {}'.format(err))
print('possible solution: install radwinexe binary package from '
'http://www.jaloxa.eu/resources/radiance/radwinexe.shtml'
' into your RADIANCE binaries path')
return
# end of SceneObj
class MetObj:
"""
Meteorological data from EPW file.
Initialize the MetObj from tmy data already read in.
Parameters
-----------
tmydata : DataFrame
TMY3 output from :py:class:`~bifacial_radiance.RadianceObj.readTMY` or
from :py:class:`~bifacial_radiance.RadianceObj.readEPW`.
metadata : Dictionary
Metadata output from output from :py:class:`~bifacial_radiance.RadianceObj.readTMY``
or from :py:class:`~bifacial_radiance.RadianceObj.readEPW`.
label : str
label : str
'left', 'right', or 'center'. For data that is averaged, defines if the
timestamp refers to the left edge, the right edge, or the center of the
averaging interval, for purposes of calculating sunposition. For
example, TMY3 data is right-labeled, so 11 AM data represents data from
10 to 11, and sun position should be calculated at 10:30 AM. Currently
SAM and PVSyst use left-labeled interval data and NSRDB uses centered.
"""
def __init__(self, tmydata, metadata, label = 'right'):
import pytz
import pvlib
#import numpy as np
#First prune all GHI = 0 timepoints. New as of 0.4.0
# TODO: is this a good idea? This changes default behavior...
tmydata = tmydata[tmydata.GHI > 0]
# location data. so far needed:
# latitude, longitude, elevation, timezone, city
self.latitude = metadata['latitude']; lat=self.latitude
self.longitude = metadata['longitude']; lon=self.longitude
self.elevation = metadata['altitude']; elev=self.elevation
self.timezone = metadata['TZ']
try:
self.city = metadata['Name'] # readepw version
except KeyError:
self.city = metadata['city'] # pvlib version
#self.location.state_province_region = metadata['State'] # unecessary
self.datetime = tmydata.index.tolist() # this is tz-aware.
self.ghi = np.array(tmydata.GHI)
self.dhi = np.array(tmydata.DHI)
self.dni = np.array(tmydata.DNI)
try:
self.albedo = np.array(tmydata.Alb)
except AttributeError: # no TMY albedo data
self.albedo = None
# Try and retrieve dewpoint and pressure
try:
self.dewpoint = np.array(tmydata['temp_dew'])
except KeyError:
self.dewpoint = None
try:
self.pressure = np.array(tmydata['atmospheric_pressure'])
except KeyError:
self.pressure = None
try:
self.temp_air = np.array(tmydata['temp_air'])
except KeyError:
self.temp_air = None
try:
self.wind_speed = np.array(tmydata['wind_speed'])
except KeyError:
self.wind_speed = None
# Try and retrieve TrackerAngle
try:
self.meastracker_angle = np.array(tmydata['Tracker Angle (degrees)'])
except KeyError:
self.meastracker_angle= None
#v0.2.5: initialize MetObj with solpos, sunrise/set and corrected time
datetimetz = pd.DatetimeIndex(self.datetime)
try: # make sure the data is tz-localized.
datetimetz = datetimetz.tz_localize(pytz.FixedOffset(self.timezone*60))# use pytz.FixedOffset (in minutes)
except TypeError: # data is tz-localized already. Just put it in local time.
datetimetz = datetimetz.tz_convert(pytz.FixedOffset(self.timezone*60))
#check for data interval. default 1h.
try:
interval = datetimetz[1]-datetimetz[0]
except IndexError:
interval = pd.Timedelta('1h') # ISSUE: if 1 datapoint is passed, are we sure it's hourly data?
print ("WARNING: TMY interval was unable to be defined, so setting it to 1h.")
# TODO: Refactor this into a subfunction. first calculate minutedelta
# based on label and interval (-30, 0, +30, +7.5 etc) then correct all.
if label.lower() == 'center':
print("Calculating Sun position for center labeled data, at exact timestamp in input Weather File")
sunup= pvlib.irradiance.solarposition.sun_rise_set_transit_spa(datetimetz, lat, lon) #new for pvlib >= 0.6.1
sunup['corrected_timestamp'] = datetimetz
else:
if interval== pd.Timedelta('1h'):
if label.lower() == 'right':
print("Calculating Sun position for Metdata that is right-labeled ",
"with a delta of -30 mins. i.e. 12 is 11:30 sunpos")
sunup= pvlib.irradiance.solarposition.sun_rise_set_transit_spa(datetimetz, lat, lon) #new for pvlib >= 0.6.1
sunup['minutedelta']= int(interval.seconds/2/60) # default sun angle 30 minutes before timestamp
# vector update of minutedelta at sunrise
sunrisemask = sunup.index.hour-1==sunup['sunrise'].dt.hour
sunup['minutedelta'].mask(sunrisemask,np.floor((60-(sunup['sunrise'].dt.minute))/2),inplace=True)
# vector update of minutedelta at sunset
sunsetmask = sunup.index.hour-1==sunup['sunset'].dt.hour
sunup['minutedelta'].mask(sunsetmask,np.floor((60-(sunup['sunset'].dt.minute))/2),inplace=True)
# save corrected timestamp
sunup['corrected_timestamp'] = sunup.index-pd.to_timedelta(sunup['minutedelta'], unit='m')
elif label.lower() == 'left':
print("Calculating Sun position for Metdata that is left-labeled ",
"with a delta of +30 mins. i.e. 12 is 12:30 sunpos.")
sunup= pvlib.irradiance.solarposition.sun_rise_set_transit_spa(datetimetz, lat, lon)
sunup['minutedelta']= int(interval.seconds/2/60) # default sun angle 30 minutes after timestamp
# vector update of minutedelta at sunrise
sunrisemask = sunup.index.hour==sunup['sunrise'].dt.hour
sunup['minutedelta'].mask(sunrisemask,np.ceil((60+sunup['sunrise'].dt.minute)/2),inplace=True)
# vector update of minutedelta at sunset
sunsetmask = sunup.index.hour==sunup['sunset'].dt.hour
sunup['minutedelta'].mask(sunsetmask,np.ceil((60+sunup['sunset'].dt.minute)/2),inplace=True)
# save corrected timestamp
sunup['corrected_timestamp'] = sunup.index+pd.to_timedelta(sunup['minutedelta'], unit='m')
else: raise ValueError('Error: invalid weather label passed. Valid inputs: right, left or center')
else:
minutedelta = int(interval.seconds/2/60)
print("Interval in weather data is less than 1 hr, calculating"
f" Sun position with a delta of -{minutedelta} minutes.")
print("If you want no delta for sunposition, use "
"readWeatherFile( label='center').")
#datetimetz=datetimetz-pd.Timedelta(minutes = minutedelta) # This doesn't check for Sunrise or Sunset
#sunup= pvlib.irradiance.solarposition.get_sun_rise_set_transit(datetimetz, lat, lon) # deprecated in pvlib 0.6.1
sunup= pvlib.irradiance.solarposition.sun_rise_set_transit_spa(datetimetz, lat, lon) #new for pvlib >= 0.6.1
sunup['corrected_timestamp'] = sunup.index-pd.Timedelta(minutes = minutedelta)
self.solpos = pvlib.irradiance.solarposition.get_solarposition(sunup['corrected_timestamp'],lat,lon,elev)
self.sunrisesetdata=sunup
self.label = label
def _set1axis(self, azimuth=180, limit_angle=45, angledelta=None,
backtrack=True, gcr=1.0/3.0, cumulativesky=True,
fixed_tilt_angle=None, axis_tilt=0, useMeasuredTrackerAngle=False):
"""
Set up geometry for 1-axis tracking cumulativesky. Solpos data
already stored in `metdata.solpos`. Pull in tracking angle details from
pvlib, create multiple 8760 metdata sub-files where datetime of met
data matches the tracking angle.
Parameters
------------
cumulativesky : bool
Whether individual csv files are created
with constant tilt angle for the cumulativesky approach.
if false, the gendaylit tracking approach must be used.
azimuth : numerical
orientation axis of tracker torque tube. Default North-South (180 deg)
For fixed tilt simulations this is the orientation azimuth
limit_angle : numerical
+/- limit angle of the 1-axis tracker in degrees. Default 45
angledelta : numerical
Degree of rotation increment to parse irradiance bins.
Default 5 degrees (0.4 % error for DNI).
Other options: 4 (.25%), 2.5 (0.1%).
(the smaller the angledelta, the more simulations)
backtrack : bool
Whether backtracking is enabled (default = True)
gcr : float
Ground coverage ratio for calculation backtracking. Defualt [1.0/3.0]
axis_tilt : float
Tilt of the axis. While it can be considered for the tracking calculation,
the scene geometry creation of the trackers does not support tilte
axis_trackers yet (but can be done manuallyish. See Tutorials)
fixed_tilt_angle : numeric
If passed, this changes to a fixed tilt simulation where each hour
uses fixed_tilt_angle and azimuth as the tilt and azimuth
Returns
-------
trackerdict : dictionary
Keys for tracker tilt angles and
list of csv metfile, and datetimes at that angle
trackerdict[angle]['csvfile';'surf_azm';'surf_tilt';'UTCtime']
metdata.solpos : dataframe
Dataframe with output from pvlib solar position for each timestep
metdata.sunrisesetdata :
Pandas dataframe with sunrise, sunset and adjusted time data.
metdata.tracker_theta : list
Tracker tilt angle from pvlib for each timestep
metdata.surface_tilt : list
Tracker surface tilt angle from pvlib for each timestep
metdata.surface_azimuth : list
Tracker surface azimuth angle from pvlib for each timestep
"""
#axis_tilt = 0 # only support 0 tilt trackers for now
self.cumulativesky = cumulativesky # track whether we're using cumulativesky or gendaylit
if (cumulativesky is True) & (angledelta is None):
angledelta = 5 # round angle to 5 degrees for cumulativesky
# get 1-axis tracker angles for this location,
# round to nearest 'angledelta'
if self.meastracker_angle is not None and useMeasuredTrackerAngle is True:
print("Tracking Data: Reading from provided Tracker Angles")
elif self.meastracker_angle is None and useMeasuredTrackerAngle is True:
useMeasuredTrackerAngle = False
print("Warning: Using Measured Tracker Angles was specified but DATA"+
" for trackers has not yet been assigned. "+
" Assign it by making it a column on your Weatherdata File "+
"named 'Tracker Angle (degrees)' and run ReadWeatherFile again")
trackingdata = self._getTrackingAngles(azimuth,
limit_angle,
angledelta,
axis_tilt = axis_tilt,
backtrack = backtrack,
gcr = gcr,
fixed_tilt_angle=fixed_tilt_angle,
useMeasuredTrackerAngle=useMeasuredTrackerAngle)
# get list of unique rounded tracker angles
theta_list = trackingdata.dropna()['theta_round'].unique()
if cumulativesky is True:
# create a separate metfile for each unique tracker theta angle.
# return dict of filenames and details
trackerdict = self._makeTrackerCSV(theta_list,trackingdata)
else:
# trackerdict uses timestamp as keys. return azimuth
# and tilt for each timestamp
#times = [str(i)[5:-12].replace('-','_').replace(' ','_') for i in self.datetime]
times = [i.strftime('%Y-%m-%d_%H%M') for i in self.datetime]
#trackerdict = dict.fromkeys(times)
trackerdict = {}
for i,time in enumerate(times) :
# remove NaN tracker theta from trackerdict
if (self.ghi[i] > 0) & (~np.isnan(self.tracker_theta[i])):
trackerdict[time] = {
'surf_azm':self.surface_azimuth[i],
'surf_tilt':self.surface_tilt[i],
'theta':self.tracker_theta[i],
'ghi':self.ghi[i],
'dhi':self.dhi[i]
}
return trackerdict
def _getTrackingAngles(self, azimuth=180, limit_angle=45,
angledelta=None, axis_tilt=0, backtrack=True,
gcr = 1.0/3.0, fixed_tilt_angle=None,
useMeasuredTrackerAngle=False):
'''
Helper subroutine to return 1-axis tracker tilt and azimuth data.
Parameters
----------
same as pvlib.tracking.singleaxis, plus:
angledelta : degrees
Angle to round tracker_theta to. This is for
cumulativesky simulations. Other input options: None (no
rounding of tracker angle)
fixed_tilt_angle : (Optional) degrees
This changes to a fixed tilt simulation where each hour uses
fixed_tilt_angle and azimuth as the tilt and azimuth
Returns
-------
DataFrame with the following columns:
* tracker_theta: The rotation angle of the tracker.
tracker_theta = 0 is horizontal, and positive rotation angles
are clockwise.
* aoi: The angle-of-incidence of direct irradiance onto the
rotated panel surface.
* surface_tilt: The angle between the panel surface and the earth
surface, accounting for panel rotation.
* surface_azimuth: The azimuth of the rotated panel, determined by
projecting the vector normal to the panel's surface to the
earth's surface.
* 'theta_round' : tracker_theta rounded to the nearest 'angledelta'
If no angledelta is specified, it is rounded to the nearest degree.
'''
import pvlib
import warnings
from pvlib.irradiance import aoi
#import numpy as np
#import pandas as pd
solpos = self.solpos
#New as of 0.3.2: pass fixed_tilt_angle and switches to FIXED TILT mode
if fixed_tilt_angle is not None:
# system with fixed tilt = fixed_tilt_angle
surface_tilt=fixed_tilt_angle
surface_azimuth=azimuth
# trackingdata keys: 'tracker_theta', 'aoi', 'surface_azimuth', 'surface_tilt'
trackingdata = pd.DataFrame({'tracker_theta':fixed_tilt_angle,
'aoi':aoi(surface_tilt, surface_azimuth,
solpos['zenith'],
solpos['azimuth']),
'surface_azimuth':azimuth,
'surface_tilt':fixed_tilt_angle})
elif useMeasuredTrackerAngle:
# tracked system
surface_tilt=self.meastracker_angle
surface_azimuth=azimuth
trackingdata = pd.DataFrame({'tracker_theta':self.meastracker_angle,
'aoi':aoi(surface_tilt, surface_azimuth,
solpos['zenith'],
solpos['azimuth']),
'surface_azimuth':azimuth,
'surface_tilt':abs(self.meastracker_angle)})
else:
# get 1-axis tracker tracker_theta, surface_tilt and surface_azimuth
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=RuntimeWarning)
trackingdata = pvlib.tracking.singleaxis(solpos['zenith'],
solpos['azimuth'],
axis_tilt,
azimuth,
limit_angle,
backtrack,
gcr)
# save tracker tilt information to metdata.tracker_theta,
# metdata.surface_tilt and metdata.surface_azimuth
self.tracker_theta = np.round(trackingdata['tracker_theta'],2).tolist()
self.surface_tilt = np.round(trackingdata['surface_tilt'],2).tolist()
self.surface_azimuth = np.round(trackingdata['surface_azimuth'],2).tolist()
# undo the timestamp offset put in by solpos.
#trackingdata.index = trackingdata.index + pd.Timedelta(minutes = 30)
# It may not be exactly 30 minutes any more...
trackingdata.index = self.sunrisesetdata.index #this has the original time data in it
# round tracker_theta to increments of angledelta for use in cumulativesky
def _roundArbitrary(x, base=angledelta):
# round to nearest 'base' value.
# mask NaN's to avoid rounding error message
return base * (x/float(base)).round()
if angledelta == 0:
raise ZeroDivisionError('Angledelta = 0. Use None instead')
elif angledelta is None: # don't round theta
trackingdata['theta_round'] = trackingdata['tracker_theta']
else: # round theta
trackingdata['theta_round'] = \
_roundArbitrary(trackingdata['tracker_theta'], angledelta)
return trackingdata
def _makeTrackerCSV(self, theta_list, trackingdata):
'''
Create multiple new irradiance csv files with data for each unique
rounded tracker angle. Return a dictionary with the new csv filenames
and other details, Used for cumulativesky tracking
Parameters
-----------
theta_list : array
Array of unique tracker angle values
trackingdata : Pandas
Pandas Series with hourly tracker angles from
:pvlib.tracking.singleaxis
Returns
--------
trackerdict : dictionary
keys: *theta_round tracker angle (default: -45 to +45 in
5 degree increments).
sub-array keys:
*datetime: array of datetime strings in this group of angles
*count: number of datapoints in this group of angles
*surf_azm: tracker surface azimuth during this group of angles
*surf_tilt: tilt angle average during this group of angles
*csvfile: name of csv met data file saved in /EPWs/
'''
dt = pd.to_datetime(self.datetime)
trackerdict = dict.fromkeys(theta_list)
for theta in sorted(trackerdict):
trackerdict[theta] = {}
csvfile = os.path.join('EPWs', '1axis_{}.csv'.format(theta))
tempdata = trackingdata[trackingdata['theta_round'] == theta]
#Set up trackerdict output for each value of theta
trackerdict[theta]['csvfile'] = csvfile
trackerdict[theta]['surf_azm'] = tempdata['surface_azimuth'].median()
trackerdict[theta]['surf_tilt'] = abs(theta)
datetimetemp = tempdata.index.strftime('%Y-%m-%d %H:%M:%S') #local time
trackerdict[theta]['datetime'] = datetimetemp
trackerdict[theta]['count'] = datetimetemp.__len__()
#Create new temp csv file with zero values for all times not equal to datetimetemp
# write 8760 2-column csv: GHI,DHI
ghi_temp = []
dhi_temp = []
for g, d, time in zip(self.ghi, self.dhi,
dt.strftime('%Y-%m-%d %H:%M:%S')):
# is this time included in a particular theta_round angle?
if time in datetimetemp:
ghi_temp.append(g)
dhi_temp.append(d)
else:
# mask out irradiance at this time, since it
# belongs to a different bin
ghi_temp.append(0.0)
dhi_temp.append(0.0)
# save in 2-column GHI,DHI format for gencumulativesky -G
savedata = pd.DataFrame({'GHI':ghi_temp, 'DHI':dhi_temp},
index = self.datetime).tz_localize(None)
# Fill partial year. Requires 2021 measurement year.
savedata = _subhourlydatatoGencumskyformat(savedata,
label=self.label)
print('Saving file {}, # points: {}'.format(
trackerdict[theta]['csvfile'], datetimetemp.__len__()))
savedata.to_csv(csvfile,
index=False,
header=False,
sep=' ',
columns=['GHI','DHI'])
return trackerdict
class AnalysisObj:
"""
Analysis class for performing raytrace to obtain irradiance measurements
at the array, as well plotting and reporting results.
"""
def __repr__(self):
return str(self.__dict__)
def __init__(self, octfile=None, name=None, hpc=False):
"""
Initialize AnalysisObj by pointing to the octfile. Scan information
is defined separately by passing scene details into AnalysisObj.moduleAnalysis()
Parameters
------------
octfile : string
Filename and extension of .oct file
name :
hpc : boolean, default False. Waits for octfile for a
longer time if parallel processing.
"""
self.octfile = octfile
self.name = name
self.hpc = hpc
def makeImage(self, viewfile, octfile=None, name=None):
"""
Makes a visible image (rendering) of octfile, viewfile
"""
import time
if octfile is None:
octfile = self.octfile
if name is None:
name = self.name
#TODO: update this for cross-platform compatibility w/ os.path.join
if self.hpc :
time_to_wait = 10
time_counter = 0
filelist = [octfile, "views/"+viewfile]
for file in filelist:
while not os.path.exists(file):
time.sleep(1)
time_counter += 1
if time_counter > time_to_wait:break
print('Generating visible render of scene')
#TODO: update this for cross-platform compatibility w os.path.join
os.system("rpict -dp 256 -ar 48 -ms 1 -ds .2 -dj .9 -dt .1 "+
"-dc .5 -dr 1 -ss 1 -st .1 -ab 3 -aa .1 "+
"-ad 1536 -as 392 -av 25 25 25 -lr 8 -lw 1e-4 -vf views/"
+viewfile+ " " + octfile +
" > images/"+name+viewfile[:-3] +".hdr")
def makeFalseColor(self, viewfile, octfile=None, name=None):
"""
Makes a false-color plot of octfile, viewfile
.. note::
For Windows requires installation of falsecolor.exe,
which is part of radwinexe-5.0.a.8-win64.zip found at
http://www.jaloxa.eu/resources/radiance/radwinexe.shtml
"""
#TODO: error checking for installation of falsecolor.exe
if octfile is None:
octfile = self.octfile
if name is None:
name = self.name
print('Generating scene in WM-2. This may take some time.')
#TODO: update and test this for cross-platform compatibility using os.path.join
cmd = "rpict -i -dp 256 -ar 48 -ms 1 -ds .2 -dj .9 -dt .1 "+\
"-dc .5 -dr 1 -ss 1 -st .1 -ab 3 -aa .1 -ad 1536 -as 392 " +\
"-av 25 25 25 -lr 8 -lw 1e-4 -vf views/"+viewfile + " " + octfile
WM2_out,err = _popen(cmd,None)
if err is not None:
print('Error: {}'.format(err))
return
# determine the extreme maximum value to help with falsecolor autoscale
extrm_out,err = _popen("pextrem",WM2_out.encode('latin1'))
# cast the pextrem string as a float and find the max value
WM2max = max(map(float,extrm_out.split()))
print('Saving scene in false color')
#auto scale false color map
if WM2max < 1100:
cmd = "falsecolor -l W/m2 -m 1 -s 1100 -n 11"
else:
cmd = "falsecolor -l W/m2 -m 1 -s %s"%(WM2max,)
with open(os.path.join("images","%s%s_FC.hdr"%(name,viewfile[:-3]) ),"w") as f:
data,err = _popen(cmd,WM2_out.encode('latin1'),f)
if err is not None:
print(err)
print('possible solution: install radwinexe binary package from '
'http://www.jaloxa.eu/resources/radiance/radwinexe.shtml')
def _linePtsArray(self, linePtsDict):
"""
Helper function to just print the x y and z values in an array format,
just like they will show in the .csv result files.
"""
xstart = linePtsDict['xstart']
ystart = linePtsDict['ystart']
zstart = linePtsDict['zstart']
xinc = linePtsDict['xinc']
yinc = linePtsDict['yinc']
zinc = linePtsDict['zinc']
sx_xinc = linePtsDict['sx_xinc']
sx_yinc = linePtsDict['sx_yinc']
sx_zinc = linePtsDict['sx_zinc']
Nx = int(linePtsDict['Nx'])
Ny = int(linePtsDict['Ny'])
Nz = int(linePtsDict['Nz'])
x = []
y = []
z = []
for iz in range(0,Nz):
for ix in range(0,Nx):
for iy in range(0,Ny):
x . append(xstart+iy*xinc+ix*sx_xinc)
y . append(ystart+iy*yinc+ix*sx_yinc)
z . append(zstart+iy*zinc+ix*sx_zinc)
return x, y, z
def _linePtsMakeDict(self, linePtsDict):
a = linePtsDict
linepts = self._linePtsMake3D(a['xstart'],a['ystart'],a['zstart'],
a['xinc'], a['yinc'], a['zinc'],
a['sx_xinc'], a['sx_yinc'], a['sx_zinc'],
a['Nx'],a['Ny'],a['Nz'],a['orient'])
return linepts
def _linePtsMake3D(self, xstart, ystart, zstart, xinc, yinc, zinc,
sx_xinc, sx_yinc, sx_zinc,
Nx, Ny, Nz, orient):
#create linepts text input with variable x,y,z.
#If you don't want to iterate over a variable, inc = 0, N = 1.
linepts = ""
# make sure Nx, Ny, Nz are ints.
Nx = int(Nx)
Ny = int(Ny)
Nz = int(Nz)
for iz in range(0,Nz):
for ix in range(0,Nx):
for iy in range(0,Ny):
xpos = xstart+iy*xinc+ix*sx_xinc
ypos = ystart+iy*yinc+ix*sx_yinc
zpos = zstart+iy*zinc+ix*sx_zinc
linepts = linepts + str(xpos) + ' ' + str(ypos) + \
' '+str(zpos) + ' ' + orient + " \r"
return(linepts)
def _irrPlot(self, octfile, linepts, mytitle=None, plotflag=None,
accuracy='low'):
"""
(plotdict) = _irrPlot(linepts,title,time,plotflag, accuracy)
irradiance plotting using rtrace
pass in the linepts structure of the view along with a title string
for the plots.
Parameters
------------
octfile : string
Filename and extension of .oct file
linepts :
Output from :py:class:`bifacial_radiance.AnalysisObj._linePtsMake3D`
mytitle : string
Title to append to results files
plotflag : Boolean
Include plot of resulting irradiance
accuracy : string
Either 'low' (default - faster) or 'high'
(better for low light)
Returns
-------
out : dictionary
out.x,y,z - coordinates of point
.r,g,b - r,g,b values in Wm-2
.Wm2 - equal-weight irradiance
.mattype - material intersected
.title - title passed in
"""
if mytitle is None:
mytitle = octfile[:-4]
if plotflag is None:
plotflag = False
if self.hpc :
import time
time_to_wait = 10
time_counter = 0
while not os.path.exists(octfile):
time.sleep(1)
time_counter += 1
if time_counter > time_to_wait:
print('Warning: OCTFILE NOT FOUND')
break
if octfile is None:
print('Analysis aborted. octfile = None' )
return None
keys = ['<KEY>']
out = {key: [] for key in keys}
#out = dict.fromkeys(['Wm2','x','y','z','r','g','b','mattype','title'])
out['title'] = mytitle
print ('Linescan in process: %s' %(mytitle))
#rtrace ambient values set for 'very accurate':
#cmd = "rtrace -i -ab 5 -aa .08 -ar 512 -ad 2048 -as 512 -h -oovs "+ octfile
if accuracy == 'low':
#rtrace optimized for faster scans: (ab2, others 96 is too coarse)
cmd = "rtrace -i -ab 2 -aa .1 -ar 256 -ad 2048 -as 256 -h -oovs "+ octfile
elif accuracy == 'high':
#rtrace ambient values set for 'very accurate':
cmd = "rtrace -i -ab 5 -aa .08 -ar 512 -ad 2048 -as 512 -h -oovs "+ octfile
else:
print('_irrPlot accuracy options: "low" or "high"')
return({})
temp_out,err = _popen(cmd,linepts.encode())
if err is not None:
if err[0:5] == 'error':
raise Exception(err[7:])
else:
print(err)
# when file errors occur, temp_out is None, and err message is printed.
if temp_out is not None:
for line in temp_out.splitlines():
temp = line.split('\t')
out['x'].append(float(temp[0]))
out['y'].append(float(temp[1]))
out['z'].append(float(temp[2]))
out['r'].append(float(temp[3]))
out['g'].append(float(temp[4]))
out['b'].append(float(temp[5]))
out['mattype'].append(temp[6])
out['Wm2'].append(sum([float(i) for i in temp[3:6]])/3.0)
if plotflag is True:
import matplotlib.pyplot as plt
plt.figure()
plt.plot(out['Wm2'])
plt.ylabel('Wm2 irradiance')
plt.xlabel('variable')
plt.title(mytitle)
plt.show()
else:
out = None # return empty if error message.
return(out)
def _saveResults(self, data=None, reardata=None, savefile=None, RGB = False):
"""
Function to save output from _irrPlot
If rearvals is passed in, back ratio is saved
If data = None then only reardata is saved.
Returns
--------
savefile : str
If set to None, will write to default .csv filename in results folder.
"""
if savefile is None:
savefile = data['title'] + '.csv'
if data is None and reardata is not None: # only rear data is passed.
data = reardata
reardata = None
# run process like normal but swap labels at the end
rearswapflag = True
else:
rearswapflag = False
# make savefile dataframe and set self.attributes
if RGB:
data_sub = {key:data[key] for key in ['x', 'y', 'z', 'mattype', 'Wm2','r', 'g', 'b' ]}
else:
data_sub = {key:data[key] for key in ['x', 'y', 'z', 'mattype','Wm2' ]}
df = pd.DataFrame(data_sub)
df = df.rename(columns={'Wm2':'Wm2Front'})
if reardata is not None:
df.insert(3, 'rearZ', reardata['z'])
df.insert(5, 'rearMat', reardata['mattype'])
df.insert(7, 'Wm2Back', reardata['Wm2'])
# add 1mW/m2 to avoid dividebyzero
df.insert(8, 'Back/FrontRatio', df['Wm2Back'] / (df['Wm2Front']+.001))
df['backRatio'] = df['Back/FrontRatio']
df['rearX'] = reardata['x']
df['rearY'] = reardata['y']
if RGB:
df['rearR'] = reardata['r']
df['rearG'] = reardata['g']
df['rearB'] = reardata['b']
#df = df[['x','y','z','rearZ','mattype','rearMat',
# 'Wm2Front','Wm2Back','Back/FrontRatio',
# 'r','g','b', 'rearR','rearG','rearB']]
#else:
#df = df[['x','y','z','rearZ','mattype','rearMat',
# 'Wm2Front','Wm2Back','Back/FrontRatio']]
#else:
# if RGB:
# df = df[['x','y','z', 'mattype','Wm2Front', 'r', 'g', 'b']]
#
# else:
# df = df[['x','y','z', 'mattype','Wm2Front']]
# rename columns if only rear data was originally passed
if rearswapflag:
df = df.rename(columns={'Wm2Front':'Wm2Back','mattype':'rearMat'})
# set attributes of analysis to equal columns of df
for col in df.columns:
setattr(self, col, list(df[col]))
# only save a subset
df = df.drop(columns=['rearX','rearY','backRatio'], errors='ignore')
df.to_csv(os.path.join("results", savefile), sep = ',',
index = False)
print('Saved: %s'%(os.path.join("results", savefile)))
return os.path.join("results", savefile)
def _saveResultsCumulative(self, data, reardata=None, savefile=None):
"""
TEMPORARY FUNCTION -- this is a fix to save ONE cumulative results csv
in the main working folder for when doing multiple entries in a
tracker dict.
Returns
--------
savefile : str
If set to None, will write to default .csv filename in results folder.
"""
if savefile is None:
savefile = data['title'] + '.csv'
# make dataframe from results
data_sub = {key:data[key] for key in ['x', 'y', 'z', 'Wm2', 'mattype']}
self.x = data['x']
self.y = data['y']
self.z = data['z']
self.mattype = data['mattype']
#TODO: data_sub front values don't seem to be saved to self.
if reardata is not None:
self.rearX = reardata['x']
self.rearY = reardata['y']
self.rearMat = reardata['mattype']
data_sub['rearMat'] = self.rearMat
self.rearZ = reardata['z']
data_sub['rearZ'] = self.rearZ
self.Wm2Front = data_sub.pop('Wm2')
data_sub['Wm2Front'] = self.Wm2Front
self.Wm2Back = reardata['Wm2']
data_sub['Wm2Back'] = self.Wm2Back
self.backRatio = [x/(y+.001) for x,y in zip(reardata['Wm2'],data['Wm2'])] # add 1mW/m2 to avoid dividebyzero
data_sub['Back/FrontRatio'] = self.backRatio
df = pd.DataFrame.from_dict(data_sub)
df.to_csv(savefile, sep = ',',
columns = ['x','y','z','rearZ','mattype','rearMat',
'Wm2Front','Wm2Back','Back/FrontRatio'],
index = False) # new in 0.2.3
else:
df = pd.DataFrame.from_dict(data_sub)
df.to_csv(savefile, sep = ',',
columns = ['x','y','z', 'mattype','Wm2'], index = False)
print('Saved: %s'%(savefile))
return (savefile)
def moduleAnalysis(self, scene, modWanted=None, rowWanted=None,
sensorsy=9, sensorsx=1,
frontsurfaceoffset=0.001, backsurfaceoffset=0.001,
modscanfront=None, modscanback=None, relative=False,
debug=False):
"""
Handler function that decides how to handle different number of front
and back sensors. If number for front sensors is not provided or is
the same as for the back, _moduleAnalysis
is called only once. Else it is called twice to get the different front
and back dictionary.
This function defines the scan points to be used in the
:py:class:`~bifacial_radiance.AnalysisObj.analysis` function,
to perform the raytrace through Radiance function `rtrace`
Parameters
------------
scene : ``SceneObj``
Generated with :py:class:`~bifacial_radiance.RadianceObj.makeScene`.
modWanted : int
Module wanted to sample. If none, defaults to center module (rounding down)
rowWanted : int
Row wanted to sample. If none, defaults to center row (rounding down)
sensorsy : int or list
Number of 'sensors' or scanning points along the collector width
(CW) of the module(s). If multiple values are passed, first value
represents number of front sensors, second value is number of back sensors
sensorsx : int or list
Number of 'sensors' or scanning points along the length, the side perpendicular
to the collector width (CW) of the module(s) for the back side of the module.
If multiple values are passed, first value represents number of
front sensors, second value is number of back sensors.
debug : bool
Activates various print statemetns for debugging this function.
modscanfront : dict
Dictionary to modify the fronstcan values established by this routine
and set a specific value. Keys possible are 'xstart', 'ystart', 'zstart',
'xinc', 'yinc', 'zinc', 'Nx', 'Ny', 'Nz', and 'orient'. If modifying
Nx, Ny or Nz, make sure to modify on modscanback to avoid issues on
results writing stage. All of these keys are ints or
floats except for 'orient' which takes x y z values as string 'x y z'
for example '0 0 -1'. These values will overwrite the internally
calculated frontscan dictionary for the module & row selected.
modscanback: dict
Dictionary to modify the backscan values established by this routine
and set a specific value. Keys possible are 'xstart', 'ystart', 'zstart',
'xinc', 'yinc', 'zinc', 'Nx', 'Ny', 'Nz', and 'orient'. If modifying
Nx, Ny or Nz, make sure to modify on modscanback to avoid issues on
results writing stage. All of these keys are ints or
floats except for 'orient' which takes x y z values as string 'x y z'
for example '0 0 -1'. These values will overwrite the internally
calculated frontscan dictionary for the module & row selected.
relative : Bool
if passing modscanfront and modscanback to modify dictionarie of positions,
this sets if the values passed to be updated are relative or absolute.
Default is absolute value (relative=False)
Returns
-------
frontscan : dictionary
Scan dictionary for module's front side. Used to pass into
:py:class:`~bifacial_radiance.AnalysisObj.analysis` function
backscan : dictionary
Scan dictionary for module's back side. Used to pass into
:py:class:`~bifacial_radiance.AnalysisObj.analysis` function
"""
# Height: clearance height for fixed tilt systems, or torque tube
# height for single-axis tracked systems.
# Single axis tracked systems will consider the offset to calculate the final height.
def _checkSensors(sensors):
# Checking Sensors input data for list or tuple
if (type(sensors)==tuple or type(sensors)==list):
try:
sensors_back = sensors[1]
sensors_front = sensors[0]
except IndexError: # only 1 value passed??
sensors_back = sensors_front = sensors[0]
elif (type(sensors)==int or type(sensors)==float):
# Ensure sensors are positive int values.
if int(sensors) < 1:
raise Exception('input sensorsy must be numeric >0')
sensors_back = sensors_front = int(sensors)
else:
print('Warning: invalid value passed for sensors. Setting = 1')
sensors_back = sensors_front = 1
return sensors_front, sensors_back
sensorsy_front, sensorsy_back = _checkSensors(sensorsy)
sensorsx_front, sensorsx_back = _checkSensors(sensorsx)
if (sensorsx_back != sensorsx_front) or (sensorsy_back != sensorsy_front):
sensors_diff = True
else:
sensors_diff = False
dtor = np.pi/180.0
# Internal scene parameters are stored in scene.sceneDict. Load these into local variables
sceneDict = scene.sceneDict
azimuth = sceneDict['azimuth']
tilt = sceneDict['tilt']
nMods = sceneDict['nMods']
nRows = sceneDict['nRows']
originx = sceneDict['originx']
originy = sceneDict['originy']
# offset = moduleDict['offsetfromaxis']
offset = scene.module.offsetfromaxis
sceney = scene.module.sceney
scenex = scene.module.scenex
# x needed for sensorsx>1 case
x = scene.module.x
## Check for proper input variables in sceneDict
if 'pitch' in sceneDict:
pitch = sceneDict['pitch']
elif 'gcr' in sceneDict:
pitch = sceney / sceneDict['gcr']
else:
raise Exception("Error: no 'pitch' or 'gcr' passed in sceneDict" )
if 'axis_tilt' in sceneDict:
axis_tilt = sceneDict['axis_tilt']
else:
axis_tilt = 0
if hasattr(scene.module,'z'):
modulez = scene.module.z
else:
print ("Module's z not set on sceneDict internal dictionary. Setting to default")
modulez = 0.02
if frontsurfaceoffset is None:
frontsurfaceoffset = 0.001
if backsurfaceoffset is None:
backsurfaceoffset = 0.001
# The Sensor routine below needs a "hub-height", not a clearance height.
# The below complicated check checks to see if height (deprecated) is passed,
# and if clearance_height or hub_height is passed as well.
sceneDict, use_clearanceheight = _heightCasesSwitcher(sceneDict,
preferred = 'hub_height',
nonpreferred = 'clearance_height')
if use_clearanceheight :
height = sceneDict['clearance_height'] + 0.5* \
np.sin(abs(tilt) * np.pi / 180) * \
sceney - offset*np.sin(abs(tilt)*np.pi/180)
else:
height = sceneDict['hub_height']
if debug:
print("For debug:\n hub_height, Azimuth, Tilt, nMods, nRows, "
"Pitch, Offset, SceneY, SceneX")
print(height, azimuth, tilt, nMods, nRows,
pitch, offset, sceney, scenex)
if modWanted == 0:
print( " FYI Modules and Rows start at index 1. "
"Reindexing to modWanted 1" )
modWanted = modWanted+1 # otherwise it gives results on Space.
if rowWanted ==0:
print( " FYI Modules and Rows start at index 1. "
"Reindexing to rowWanted 1" )
rowWanted = rowWanted+1
if modWanted is None:
modWanted = round(nMods / 1.99)
if rowWanted is None:
rowWanted = round(nRows / 1.99)
if debug is True:
print( f"Sampling: modWanted {modWanted}, rowWanted {rowWanted} "
"out of {nMods} modules, {nRows} rows" )
x0 = (modWanted-1)*scenex - (scenex*(round(nMods/1.99)*1.0-1))
y0 = (rowWanted-1)*pitch - (pitch*(round(nRows / 1.99)*1.0-1))
x1 = x0 * np.cos ((180-azimuth)*dtor) - y0 * np.sin((180-azimuth)*dtor)
y1 = x0 * np.sin ((180-azimuth)*dtor) + y0 * np.cos((180-azimuth)*dtor)
z1 = 0
if axis_tilt != 0 and azimuth == 90:
print ("fixing height for axis_tilt")
z1 = (modWanted-1)*scenex * np.sin(axis_tilt*dtor)
# Edge of Panel
x2 = (sceney/2.0) * np.cos((tilt)*dtor) * np.sin((azimuth)*dtor)
y2 = (sceney/2.0) * np.cos((tilt)*dtor) * np.cos((azimuth)*dtor)
z2 = -(sceney/2.0) * np.sin(tilt*dtor)
# Axis of rotation Offset (if offset is not 0) for the front of the module
x3 = (offset + modulez + frontsurfaceoffset) * np.sin(tilt*dtor) * np.sin((azimuth)*dtor)
y3 = (offset + modulez + frontsurfaceoffset) * np.sin(tilt*dtor) * np.cos((azimuth)*dtor)
z3 = (offset + modulez + frontsurfaceoffset) * np.cos(tilt*dtor)
# Axis of rotation Offset, for the back of the module
x4 = (offset - backsurfaceoffset) * np.sin(tilt*dtor) * np.sin((azimuth)*dtor)
y4 = (offset - backsurfaceoffset) * np.sin(tilt*dtor) * np.cos((azimuth)*dtor)
z4 = (offset - backsurfaceoffset) * np.cos(tilt*dtor)
xstartfront = x1 + x2 + x3 + originx
xstartback = x1 + x2 + x4 + originx
ystartfront = y1 + y2 + y3 + originy
ystartback = y1 + y2 + y4 + originy
zstartfront = height + z1 + z2 + z3
zstartback = height + z1 + z2 + z4
#Adjust orientation of scan depending on tilt & azimuth
zdir = np.cos((tilt)*dtor)
ydir = np.sin((tilt)*dtor) * np.cos((azimuth)*dtor)
xdir = np.sin((tilt)*dtor) * np.sin((azimuth)*dtor)
front_orient = '%0.3f %0.3f %0.3f' % (-xdir, -ydir, -zdir)
back_orient = '%0.3f %0.3f %0.3f' % (xdir, ydir, zdir)
#IF cellmodule:
#TODO: Add check for sensorsx_back
#temp = scene.moduleDict.get('cellModule') #error-free way to query it
#if ((temp is not None) and
if ((getattr(scene.module, 'cellModule', None)) and
(sensorsy_back == scene.module.cellModule.numcellsy)):
ycell = scene.module.cellModule.ycell
xinc_back = -((sceney - ycell ) / (scene.module.cellModule.numcellsy-1)) * np.cos((tilt)*dtor) * np.sin((azimuth)*dtor)
yinc_back = -((sceney - ycell) / (scene.module.cellModule.numcellsy-1)) * np.cos((tilt)*dtor) * np.cos((azimuth)*dtor)
zinc_back = ((sceney - ycell) / (scene.module.cellModule.numcellsy-1)) * np.sin(tilt*dtor)
firstsensorxstartfront = xstartfront - scene.module.cellModule.ycell/2 * np.cos((tilt)*dtor) * np.sin((azimuth)*dtor)
firstsensorxstartback = xstartback - ycell/2 * np.cos((tilt)*dtor) * np.sin((azimuth)*dtor)
firstsensorystartfront = ystartfront - ycell/2 * np.cos((tilt)*dtor) * np.cos((azimuth)*dtor)
firstsensorystartback = ystartback - ycell/2 * np.cos((tilt)*dtor) * np.cos((azimuth)*dtor)
firstsensorzstartfront = zstartfront + ycell/2 * np.sin(tilt*dtor)
firstsensorzstartback = zstartback + ycell/2 * np.sin(tilt*dtor)
xinc_front = xinc_back
yinc_front = yinc_back
zinc_front = zinc_back
sx_xinc_front = 0.0
sx_yinc_front = 0.0
sx_zinc_front = 0.0
sx_xinc_back = 0.0
sx_yinc_back = 0.0
sx_zinc_back = 0.0
if (sensorsx_back != 1.0):
print("Warning: Cell-level module analysis for sensorsx > 1 not "+
"fine-tuned yet. Use at own risk, some of the x positions "+
"might fall in spacing between cells.")
else:
xinc_back = -(sceney/(sensorsy_back + 1.0)) * np.cos((tilt)*dtor) * np.sin((azimuth)*dtor)
yinc_back = -(sceney/(sensorsy_back + 1.0)) * np.cos((tilt)*dtor) * np.cos((azimuth)*dtor)
zinc_back = (sceney/(sensorsy_back + 1.0)) * np.sin(tilt*dtor)
if sensors_diff:
xinc_front = -(sceney/(sensorsy_front + 1.0)) * np.cos((tilt)*dtor) * np.sin((azimuth)*dtor)
yinc_front = -(sceney/(sensorsy_front + 1.0)) * np.cos((tilt)*dtor) * np.cos((azimuth)*dtor)
zinc_front = (sceney/(sensorsy_front + 1.0)) * | np.sin(tilt*dtor) | numpy.sin |
import numpy as np
def findCosineDistance(source_representation, test_representation):
a = np.matmul(np.transpose(source_representation), test_representation)
b = np.sum(np.multiply(source_representation, source_representation))
c = np.sum(np.multiply(test_representation, test_representation))
return 1 - (a / (np.sqrt(b) * np.sqrt(c)))
def findEuclideanDistance(source_representation, test_representation):
euclidean_distance = source_representation - test_representation
euclidean_distance = np.sum( | np.multiply(euclidean_distance, euclidean_distance) | numpy.multiply |
from jesse.helpers import get_candle_source, slice_candles, np_shift
import numpy as np
from numba import njit
import talib
from typing import Union
from jesse.helpers import get_config
from collections import namedtuple
#jesse backtest '2021-01-03' '2021-03-02'
WEIS = namedtuple('WEIS',['up','dn'])
'''
https://www.tradingview.com/script/XttzkWc0-Weis-Wave-Volume-Pinescript-4/#chart-view-comments
'''
def weis(candles: np.ndarray, trendDetectionLength:int=3, source_type: str = "close", sequential: bool = False ) -> Union[float, np.ndarray]:
candles = slice_candles(candles, sequential)
source = get_candle_source(candles, source_type=source_type)
up,dn = fast_weis(source,candles,trendDetectionLength)
if sequential:
return WEIS(up,dn)
else:
return WEIS(up[-1],dn[-1])
@njit
def fast_weis(source,candles,trendDetectionLength):
mov = np.full_like(source,0)
trend = | np.full_like(source,0) | numpy.full_like |
from itertools import accumulate
import numba
import numpy as np
from matplotlib import pyplot as plt
from scipy.optimize import curve_fit
def do_rw(n=1000000, w=0.1, sigma=1):
deltas = sigma * np.random.randn(n)
x = np.array(list(accumulate(deltas, lambda x0, xi: x0 + xi - w * x0)))
return x
@numba.njit(fastmath=True)
def do_numba_rw(n=1000, w=0.99, sigma=1.0, init_sigma=7):
val = np.random.randn() * init_sigma
for i in range(n):
val = val * w + np.random.randn() * sigma
return val
@numba.njit(fastmath=True)
def do_full_rw(n=1000, w=0.99, sigma=1.0, init_sigma=7):
result = np.zeros(n)
val = np.random.randn() * init_sigma
for i in range(n):
val = val * w + np.random.randn() * sigma
result[i] = val
return result
@numba.njit(fastmath=True)
def find_final_std(w, sigma, tolerance=0.01, n=1000, nrounds=1000):
# get two initial data points
vals = np.array([do_numba_rw(n, w=w, sigma=sigma) for i in range(nrounds)])
old_std = np.std(vals)
# on the second one, use the std dev we found the first time to jumpstart
# things
vals = np.array([
do_numba_rw(n, w=w, sigma=sigma, init_sigma=old_std)
for i in range(nrounds)
])
new_std = np.std(vals)
# declare a couple variables. monotonic keeps track of how long the increase
# in std is monotonic, stds is an array to accumulate the stds once they b
monotonic = True
stds = []
# make sure we don't trigger the first time
percent_err = tolerance + 1
# the loop has two phases, the initial monotonic phase, and the phase where
# we are actually recording stderrs.
while monotonic or (percent_err > tolerance):
# print("new std: ", new_std)
# if we are in the first phase, we will seed the next point with the
# last std
if not monotonic:
avg = new_std
# check for monotonicity and switch modes if appropriate
if old_std > new_std:
monotonic = False
# when we switch modes, populate the std array with two initial
# values
stds.append(old_std)
stds.append(new_std)
old_std = new_std
# while running, run another nrounds random walks and compute the
# standard deviation
vals = np.array([
do_numba_rw(n, w=w, sigma=sigma, init_sigma=old_std)
for i in range(nrounds)
])
new_std = np.std(vals)
# in the second mode, compute standard errors of the standard deviations
# as well as the mean.
if not monotonic:
stds.append(new_std)
stds_array = np.array(stds)
stderr = np.std(stds_array) / np.sqrt(len(stds_array) - 1)
# print("stderr: ", stderr)
avg = np.mean(stds_array)
percent_err = stderr / avg
return (avg, np.std(stds_array))
@numba.njit(fastmath=True)
def find_coh_length(w, sigma, measured_sigma=-1., tolerance=.01, nrounds=1000):
# if not passed a measured_sigma (final sigma of RR), find it:
measured_sigma_std = 0
if measured_sigma == -1.:
measured_sigma, measured_sigma_std = find_final_std(w, sigma)
# now do a bunch of random walks and find the coherence length for each one
coherence_lengths = []
# initialize stderr to a large value so we enter the loop the first time
percent_err = tolerance + 1
while percent_err > tolerance:
# initialize a random walk
init_val = np.random.randn() * measured_sigma
val = init_val
# keeps track of how far the random walk goes
counter = 0
# continue the random walk until we have wandered sigma/e (the threshold
# for "coherence" for a unweighted random walk. Assuming this is a
# resonable estimate for a weighted random walk may be bad, but life is
# short.
while np.abs(init_val - val) < measured_sigma / np.e:
val = val * w + np.random.randn() * sigma
counter += 1
coherence_lengths.append(counter)
if len(coherence_lengths) > 5:
coh_len_array = | np.array(coherence_lengths) | numpy.array |
#! /usr/bin/env python
"""
IMU Node. Gets raw IMU data from ABridge and publishes calibrated IMU messages.
Can perform a 2D IMU Calibration as a fallback at the start of a round.
Ellipsoid fit, from:
https://github.com/aleksandrbazhin/ellipsoid_fit_python
Adapted for ROS by <NAME>, Cabrillo College.
The MIT License (MIT)
Copyright (c) 2016 aleksandrbazhin
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
todo: pick validation thresholds that make sense. Something that means the 2D calibration is likely going to be better.
todo: ok for node to crash if extended cal file is missing or corrupt?
"""
from __future__ import print_function
import math
import numpy
import sys
import tf
import rospy
from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus, KeyValue
from geometry_msgs.msg import Quaternion
from sensor_msgs.msg import Imu
from std_msgs.msg import String
from std_srvs.srv import Empty, EmptyRequest, EmptyResponse
from control_msgs.srv import QueryCalibrationState, QueryCalibrationStateResponse
from swarmie_msgs.msg import SwarmieIMU
class IMU:
"""Global State Variables"""
STATE_IDLE = 0
STATE_NORMAL = 1
STATE_VALIDATE = 2
STATE_CAL_GYRO_BIAS = 3
STATE_CAL_GYRO_SCALE = 4
STATE_CAL_MAG = 5
STATE_CAL_MISALIGN = 6
# Mode variables
MODE_3D = 0
MODE_2D = 1
# In case someone forgets to exit either calibration state.
DATA_SIZE_LIMIT = 3000 # 5 min worth of data at 10 Hz
MIN_DATA_SIZE = 50
# For extended file validation
ROLL_PITCH_TOLERANCE = 3.0 # degrees
MAG_VAR_TOLERANCE = 1e-3
ACC_VAR_TOLERANCE = 4e-3
def __init__(self, rover):
self.rover = rover
rospy.init_node(self.rover + '_IMU')
if rospy.has_param('~imu_mode'): # if respawning
self._get_mode()
else:
self.current_mode = IMU.MODE_3D # default to 3D mode
self.current_state = IMU.STATE_IDLE # idle until data file is loaded
self.gyro_timer = None
self.gyro_start_time = None
self.gyro_status_msg = ''
self.cal = {}
self.roll = 0
self.pitch = 0
self.yaw = 0
# used during file validation
self.rolls = []
self.pitches = []
# Default param values. Set to final values after validating
self.finished_validating = False
self.needs_calibration = False
self.DEBUG = rospy.get_param(
'~publish_debug_topic',
default=False
)
self.LOAD_RAW_DATA = rospy.get_param(
'~load_raw_data',
default=False
)
self.RAW_DATA_PATH = rospy.get_param(
'~raw_data_path',
default='/home/swarmie/KSC_extended_calibration.csv'
)
# Raw data collected while in a calibration state is stored in a list
# of lists, which is converted to a numpy array when needed.
self.mag_data = [[], [], []]
self.gyro_data = [[], [], []]
# Default matrices
self.acc_offsets = [[0], [0], [0]]
self.acc_transform = [[1., 0, 0],
[0, 1., 0],
[0, 0, 1.]]
self.mag_offsets = [[0], [0], [0]]
self.mag_transform = [[1., 0, 0],
[0, 1., 0],
[0, 0, 1.]]
self.misalignment = [[1., 0, 0],
[0, 1., 0],
[0, 0, 1.]]
self.gyro_bias = [[0], [0], [0]]
self.gyro_scale = [[1., 0, 0],
[0, 1., 0],
[0, 0, 1.]]
# Subscribers
self.imu_raw_sub = rospy.Subscriber(
self.rover + '/imu/raw',
SwarmieIMU,
self.imu_callback,
queue_size=10
)
# Publishers
self.imu_pub = rospy.Publisher(
self.rover + '/imu',
Imu,
queue_size=10
)
self.imu_diag_pub = rospy.Publisher(
self.rover + '/imu/cal_diag',
DiagnosticArray,
queue_size=10,
latch=True
)
if self.DEBUG:
self.imu_cal_data_pub = rospy.Publisher(
self.rover + '/imu/raw/calibrated',
SwarmieIMU,
queue_size=10
)
self.info_log = rospy.Publisher(
'/infoLog',
String,
queue_size=10
)
self.diags_log = rospy.Publisher(
'/diagsLog',
String,
queue_size=10,
latch=True
)
# Services
self.start_imu_cal = rospy.Service(
self.rover + '/start_imu_calibration',
Empty,
self.start_imu_calibration
)
self.store_cal = rospy.Service(
self.rover + '/store_imu_calibration',
Empty,
self.store_calibration
)
self.start_misalign_cal = rospy.Service(
self.rover + '/start_misalignment_calibration',
Empty,
self.start_misalignment_calibration
)
self.start_gyro_bias_cal = rospy.Service(
self.rover + '/start_gyro_bias_calibration',
Empty,
self.start_gyro_bias_calibration
)
self.start_gyro_scale_cal = rospy.Service(
self.rover + '/start_gyro_scale_calibration',
Empty,
self.start_gyro_scale_calibration
)
self._is_finished_val = rospy.Service(
self.rover + '/imu/is_finished_validating',
QueryCalibrationState,
self._is_finished_validating
)
self._needs_cal = rospy.Service(
self.rover + '/imu/needs_calibration',
QueryCalibrationState,
self._needs_calibration
)
# Try waiting for subscriber on /diagsLog. Helps to make sure first
# message or two actually make it onto the rqt gui.
rate = rospy.Rate(10)
for i in range(20):
if self.diags_log.get_num_connections() > 0:
break
rate.sleep()
# If node is respawning for some reason
if rospy.has_param('~imu_calibration_matrices'):
self.cal = rospy.get_param('~imu_calibration_matrices')
self._get_mode()
self.acc_offsets = self.cal['acc_offsets']
self.acc_transform = self.cal['acc_transform']
self.mag_offsets = self.cal['mag_offsets']
self.mag_transform = self.cal['mag_transform']
self.misalignment = self.cal['misalignment']
self.gyro_bias = self.cal['gyro_bias']
self.gyro_scale = self.cal['gyro_scale']
self.current_state = IMU.STATE_NORMAL
self.finished_validating = True
self.needs_calibration = False
msg = self.rover + ': reloaded calibration matrices after respawn.'
if self.current_mode == IMU.MODE_2D:
msg += ' Using 2D mode.'
elif self.current_mode == IMU.MODE_3D:
msg += ' Using 3D mode.'
rospy.loginfo(msg)
self.diags_log.publish(msg)
elif self.LOAD_RAW_DATA:
self.load_and_validate_calibration()
# Publish current calibration once:
self.publish_diagnostic_msg()
def _set_mode(self, mode):
"""Sets the IMU mode to mode and puts it onto the parameter server.
Useful if node respawns, so it knows which mode (2D/3D) it was in."""
self.current_mode = mode
rospy.set_param('~imu_mode', mode)
def _get_mode(self):
"""Gets the IMU mode from the parameter server. Useful if node
respawns, so it knows which mode (2D/3D) it was in."""
self.current_mode = rospy.get_param('~imu_mode', default=IMU.MODE_3D)
def _is_finished_validating(self, req):
"""Service to allow Swarmie API to wait until extended calibration file
has been loaded and validated."""
response = QueryCalibrationStateResponse()
response.is_calibrated = self.finished_validating
return response
def _needs_calibration(self, req):
"""Service to allow Swarmie API to ask if the IMU needs to be
calibrated using the 2D fallback."""
response = QueryCalibrationStateResponse()
response.is_calibrated = self.needs_calibration
return response
def load_and_validate_calibration(self):
"""Load the extended calibration file.
Raises:
* IOError if calibration file can't be found.
* ValueError if calibration file is corrupt.
"""
try:
data = numpy.loadtxt(self.RAW_DATA_PATH, delimiter=',')
mag_x = data[:,0]
mag_y = data[:,1]
mag_z = data[:,2]
acc_x = data[:,3]
acc_y = data[:,4]
acc_z = data[:,5]
self.cal['mag_offsets'], self.cal['mag_transform'] = \
self.ellipsoid_fit(mag_x, mag_y, mag_z)
self.cal['acc_offsets'], self.cal['acc_transform'] = \
self.ellipsoid_fit(acc_x, acc_y, acc_z)
self.cal['misalignment'] = [[1., 0, 0],
[0, 1., 0],
[0, 0, 1.]]
self.cal['gyro_bias'] = [[0], [0], [0]]
self.cal['gyro_scale'] = [[1., 0, 0],
[0, 1., 0],
[0, 0, 1.]]
rospy.loginfo(
self.rover + ': IMU raw data file loaded from ' +
self.RAW_DATA_PATH
)
except IOError as e:
msg = (self.rover +
': FATAL ERROR. Extended calibration file not found.')
rospy.logfatal(msg)
self.diags_log.publish('<font color=Red>' + msg + '</font>')
raise
except ValueError as e:
msg = (self.rover +
': FATAL ERROR. Error reading extended calibration file.')
rospy.logfatal(msg)
self.diags_log.publish('<font color=Red>' + msg + '</font>')
raise
# Calibration matrices are stored as lists and converted to numpy
# arrays when needed.
self.acc_offsets = self.cal['acc_offsets']
self.acc_transform = self.cal['acc_transform']
self.mag_offsets = self.cal['mag_offsets']
self.mag_transform = self.cal['mag_transform']
self.misalignment = self.cal['misalignment']
self.gyro_bias = self.cal['gyro_bias']
self.gyro_scale = self.cal['gyro_scale']
# Check variance in errors
mag_var_err = self.error(mag_x, mag_y, mag_z,
self.mag_offsets, self.mag_transform)
acc_var_err = self.error(acc_x, acc_y, acc_z,
self.acc_offsets, self.acc_transform)
mag_msg = '{}: Magnetometer v[Err]: {:7.6f}'.format(self.rover,
mag_var_err)
acc_msg = '{}: Accelerometer v[Err]: {:7.6f}'.format(self.rover,
acc_var_err)
self.diags_log.publish(mag_msg)
rospy.loginfo(mag_msg)
self.diags_log.publish(acc_msg)
rospy.loginfo(acc_msg)
if (math.isnan(mag_var_err) or
abs(mag_var_err) >= IMU.MAG_VAR_TOLERANCE):
msg = "{}: The magnetometer fit is too poor to use.".format(
self.rover
)
rospy.logwarn(msg)
self.diags_log.publish('<font color=Red>' + msg + '</font>')
self.needs_calibration = True
self._set_mode(IMU.MODE_2D)
if (math.isnan(acc_var_err) or
abs(acc_var_err) >= IMU.ACC_VAR_TOLERANCE):
msg = "{}: The accelerometer fit is too poor to use.".format(
self.rover
)
rospy.logwarn(msg)
self.diags_log.publish('<font color=Red>' + msg + '</font>')
self.needs_calibration = True
self._set_mode(IMU.MODE_2D)
# Check roll and pitch
self.current_state = IMU.STATE_VALIDATE
try:
rospy.wait_for_message(
self.rover + '/imu/raw',
SwarmieIMU,
timeout=5
)
except rospy.ROSException:
# hopefully this doesn't happen
pass
# wait for 2 seconds for messages to come in and populate
# self.rolls and self.pitches
rospy.sleep(2)
avg_roll = numpy.average(self.rolls) * 180 / math.pi
avg_pitch = numpy.average(self.pitches) * 180 / math.pi
self.diags_log.publish('{}: Average roll: {:6.3f} deg'.format(
self.rover,
avg_roll)
)
self.diags_log.publish('{}: Average pitch: {:6.3f} deg'.format(
self.rover,
avg_pitch)
)
if abs(avg_roll) > IMU.ROLL_PITCH_TOLERANCE:
msg = '{}: Roll exceeds tolerance threshold of {:.1f} deg.'.format(
self.rover,
IMU.ROLL_PITCH_TOLERANCE
)
rospy.logwarn(msg)
self.diags_log.publish('<font color=Red>' + msg + '</font>')
self.needs_calibration = True
self._set_mode(IMU.MODE_2D)
if abs(avg_pitch) > IMU.ROLL_PITCH_TOLERANCE:
msg = '{}: Pitch exceeds tolerance threshold of {:.1f} deg.'.format(
self.rover,
IMU.ROLL_PITCH_TOLERANCE
)
rospy.logwarn(msg)
self.diags_log.publish('<font color=Red>' + msg + '</font>')
self.needs_calibration = True
self._set_mode(IMU.MODE_2D)
self.finished_validating = True
self.store_calibration(EmptyRequest())
self.current_state = IMU.STATE_NORMAL
def error(self, x, y, z, offsets, transform):
"""Compute the variance of errors of data in numpy arrays x,
y, z. Errors are the distances of the calibrated points from the
surface of the unit sphere.
"""
v = numpy.array([x, y, z])
offsets = numpy.array(offsets)
transform = numpy.array(transform)
v = transform.dot(v - offsets)
var_err = numpy.var(numpy.sqrt(numpy.sum(numpy.square(v), 0)) - 1)
return var_err
def ellipsoid_fit(self, x, y, z):
"""Fit the data points contained in numpy arrays x, y and z to a unit
sphere centered at the origin.
Returns a list containing the offset matrix to center the data, and
a list containing the transformation matrix, to map each data point to
its position on the sphere.
Modified from:
http://www.mathworks.com/matlabcentral/fileexchange/24693-ellipsoid-fit
"""
D = numpy.array([x*x,
y*y,
z*z,
2 * x*y,
2 * x*z,
2 * y*z,
2 * x,
2 * y,
2 * z])
DT = D.conj().T
v = numpy.linalg.solve(D.dot(DT), D.dot(numpy.ones(numpy.size(x))))
A = numpy.array([[v[0], v[3], v[4], v[6]],
[v[3], v[1], v[5], v[7]],
[v[4], v[5], v[2], v[8]],
[v[6], v[7], v[8], -1]])
center = numpy.linalg.solve(-A[:3,:3], [[v[6]], [v[7]], [v[8]]])
T = numpy.eye(4)
T[3,:3] = center.T
R = T.dot(A).dot(T.conj().T)
evals, evecs = numpy.linalg.eig(R[:3,:3] / -R[3,3])
radii = numpy.sqrt(1. / evals)
offset = center
a, b, c = radii
D = numpy.array([[1/a, 0., 0.], [0., 1/b, 0.], [0., 0., 1/c]])
transform = evecs.dot(D).dot(evecs.T)
return offset.tolist(), transform.tolist()
def ellipse_fit(self, x, y):
"""Fits the data points in x and y to a circle centered at the x-y
origin.
http://nicky.vanforeest.com/misc/fitEllipse/fitEllipse.html
Returns 3R x 1C offset matrix and a 3x3 transformation matrix. Only the
first 2 rows and columns are calculated in the transformation matrix,
since this is only a 2-D calibration.
"""
x = x[:,numpy.newaxis]
y = y[:,numpy.newaxis]
D = numpy.hstack((x*x, x*y, y*y, x, y, numpy.ones_like(x)))
S = numpy.dot(D.T,D)
C = numpy.zeros([6,6])
C[0,2] = C[2,0] = 2; C[1,1] = -1
E, V = numpy.linalg.eig(numpy.dot( | numpy.linalg.inv(S) | numpy.linalg.inv |
import cv2
import pymynt
import numpy as np
import autoaim
import time
watching_point = None
seq = 0
hook_activate_count = 0
def printDepth(event, x, y, flags, param):
global watching_point
if event == cv2.EVENT_FLAG_LBUTTON:
print((x, y),' :',mat[y,x])
watching_point = (x, y)
def depth2img(mat):
out = mat/ | np.max(mat) | numpy.max |
# This module has been generated automatically from space group information
# obtained from the Computational Crystallography Toolbox
#
"""
Space groups
This module contains a list of all the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer arrays (rot, tn, td), where
rot is the rotation matrix and tn/td
are the numerator and denominator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.transposed_rotations = N.array([N.transpose(t[0])
for t in transformations])
self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.array_type
:return: a tuple (miller_indices, phase_factor) of two arrays
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.transposed_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(39, 'A b m 2', transformations)
space_groups[39] = sg
space_groups['A b m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(40, 'A m a 2', transformations)
space_groups[40] = sg
space_groups['A m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(41, 'A b a 2', transformations)
space_groups[41] = sg
space_groups['A b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(42, 'F m m 2', transformations)
space_groups[42] = sg
space_groups['F m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(43, 'F d d 2', transformations)
space_groups[43] = sg
space_groups['F d d 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(44, 'I m m 2', transformations)
space_groups[44] = sg
space_groups['I m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(45, 'I b a 2', transformations)
space_groups[45] = sg
space_groups['I b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(46, 'I m a 2', transformations)
space_groups[46] = sg
space_groups['I m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(47, 'P m m m', transformations)
space_groups[47] = sg
space_groups['P m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(48, 'P n n n :2', transformations)
space_groups[48] = sg
space_groups['P n n n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(49, 'P c c m', transformations)
space_groups[49] = sg
space_groups['P c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(50, 'P b a n :2', transformations)
space_groups[50] = sg
space_groups['P b a n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(51, 'P m m a', transformations)
space_groups[51] = sg
space_groups['P m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(52, 'P n n a', transformations)
space_groups[52] = sg
space_groups['P n n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(53, 'P m n a', transformations)
space_groups[53] = sg
space_groups['P m n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(54, 'P c c a', transformations)
space_groups[54] = sg
space_groups['P c c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(55, 'P b a m', transformations)
space_groups[55] = sg
space_groups['P b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(56, 'P c c n', transformations)
space_groups[56] = sg
space_groups['P c c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(57, 'P b c m', transformations)
space_groups[57] = sg
space_groups['P b c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(58, 'P n n m', transformations)
space_groups[58] = sg
space_groups['P n n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(59, 'P m m n :2', transformations)
space_groups[59] = sg
space_groups['P m m n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(60, 'P b c n', transformations)
space_groups[60] = sg
space_groups['P b c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(61, 'P b c a', transformations)
space_groups[61] = sg
space_groups['P b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(62, 'P n m a', transformations)
space_groups[62] = sg
space_groups['P n m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(63, 'C m c m', transformations)
space_groups[63] = sg
space_groups['C m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(64, 'C m c a', transformations)
space_groups[64] = sg
space_groups['C m c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(65, 'C m m m', transformations)
space_groups[65] = sg
space_groups['C m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(66, 'C c c m', transformations)
space_groups[66] = sg
space_groups['C c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(67, 'C m m a', transformations)
space_groups[67] = sg
space_groups['C m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(68, 'C c c a :2', transformations)
space_groups[68] = sg
space_groups['C c c a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(69, 'F m m m', transformations)
space_groups[69] = sg
space_groups['F m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(70, 'F d d d :2', transformations)
space_groups[70] = sg
space_groups['F d d d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(71, 'I m m m', transformations)
space_groups[71] = sg
space_groups['I m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(72, 'I b a m', transformations)
space_groups[72] = sg
space_groups['I b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(73, 'I b c a', transformations)
space_groups[73] = sg
space_groups['I b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(74, 'I m m a', transformations)
space_groups[74] = sg
space_groups['I m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(75, 'P 4', transformations)
space_groups[75] = sg
space_groups['P 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(76, 'P 41', transformations)
space_groups[76] = sg
space_groups['P 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(77, 'P 42', transformations)
space_groups[77] = sg
space_groups['P 42'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(78, 'P 43', transformations)
space_groups[78] = sg
space_groups['P 43'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(79, 'I 4', transformations)
space_groups[79] = sg
space_groups['I 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(80, 'I 41', transformations)
space_groups[80] = sg
space_groups['I 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(81, 'P -4', transformations)
space_groups[81] = sg
space_groups['P -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(82, 'I -4', transformations)
space_groups[82] = sg
space_groups['I -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(83, 'P 4/m', transformations)
space_groups[83] = sg
space_groups['P 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(84, 'P 42/m', transformations)
space_groups[84] = sg
space_groups['P 42/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(85, 'P 4/n :2', transformations)
space_groups[85] = sg
space_groups['P 4/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(86, 'P 42/n :2', transformations)
space_groups[86] = sg
space_groups['P 42/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(87, 'I 4/m', transformations)
space_groups[87] = sg
space_groups['I 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(88, 'I 41/a :2', transformations)
space_groups[88] = sg
space_groups['I 41/a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(89, 'P 4 2 2', transformations)
space_groups[89] = sg
space_groups['P 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(90, 'P 4 21 2', transformations)
space_groups[90] = sg
space_groups['P 4 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(91, 'P 41 2 2', transformations)
space_groups[91] = sg
space_groups['P 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(92, 'P 41 21 2', transformations)
space_groups[92] = sg
space_groups['P 41 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(93, 'P 42 2 2', transformations)
space_groups[93] = sg
space_groups['P 42 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(94, 'P 42 21 2', transformations)
space_groups[94] = sg
space_groups['P 42 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(95, 'P 43 2 2', transformations)
space_groups[95] = sg
space_groups['P 43 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(96, 'P 43 21 2', transformations)
space_groups[96] = sg
space_groups['P 43 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(97, 'I 4 2 2', transformations)
space_groups[97] = sg
space_groups['I 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(98, 'I 41 2 2', transformations)
space_groups[98] = sg
space_groups['I 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(99, 'P 4 m m', transformations)
space_groups[99] = sg
space_groups['P 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(100, 'P 4 b m', transformations)
space_groups[100] = sg
space_groups['P 4 b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(101, 'P 42 c m', transformations)
space_groups[101] = sg
space_groups['P 42 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(102, 'P 42 n m', transformations)
space_groups[102] = sg
space_groups['P 42 n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(103, 'P 4 c c', transformations)
space_groups[103] = sg
space_groups['P 4 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(104, 'P 4 n c', transformations)
space_groups[104] = sg
space_groups['P 4 n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(105, 'P 42 m c', transformations)
space_groups[105] = sg
space_groups['P 42 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(106, 'P 42 b c', transformations)
space_groups[106] = sg
space_groups['P 42 b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(107, 'I 4 m m', transformations)
space_groups[107] = sg
space_groups['I 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(108, 'I 4 c m', transformations)
space_groups[108] = sg
space_groups['I 4 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(109, 'I 41 m d', transformations)
space_groups[109] = sg
space_groups['I 41 m d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(110, 'I 41 c d', transformations)
space_groups[110] = sg
space_groups['I 41 c d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(111, 'P -4 2 m', transformations)
space_groups[111] = sg
space_groups['P -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(112, 'P -4 2 c', transformations)
space_groups[112] = sg
space_groups['P -4 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(113, 'P -4 21 m', transformations)
space_groups[113] = sg
space_groups['P -4 21 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(114, 'P -4 21 c', transformations)
space_groups[114] = sg
space_groups['P -4 21 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(115, 'P -4 m 2', transformations)
space_groups[115] = sg
space_groups['P -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(116, 'P -4 c 2', transformations)
space_groups[116] = sg
space_groups['P -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(117, 'P -4 b 2', transformations)
space_groups[117] = sg
space_groups['P -4 b 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(118, 'P -4 n 2', transformations)
space_groups[118] = sg
space_groups['P -4 n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(119, 'I -4 m 2', transformations)
space_groups[119] = sg
space_groups['I -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(120, 'I -4 c 2', transformations)
space_groups[120] = sg
space_groups['I -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(121, 'I -4 2 m', transformations)
space_groups[121] = sg
space_groups['I -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(122, 'I -4 2 d', transformations)
space_groups[122] = sg
space_groups['I -4 2 d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(123, 'P 4/m m m', transformations)
space_groups[123] = sg
space_groups['P 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(124, 'P 4/m c c', transformations)
space_groups[124] = sg
space_groups['P 4/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(125, 'P 4/n b m :2', transformations)
space_groups[125] = sg
space_groups['P 4/n b m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(126, 'P 4/n n c :2', transformations)
space_groups[126] = sg
space_groups['P 4/n n c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(127, 'P 4/m b m', transformations)
space_groups[127] = sg
space_groups['P 4/m b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(128, 'P 4/m n c', transformations)
space_groups[128] = sg
space_groups['P 4/m n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(129, 'P 4/n m m :2', transformations)
space_groups[129] = sg
space_groups['P 4/n m m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(130, 'P 4/n c c :2', transformations)
space_groups[130] = sg
space_groups['P 4/n c c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(131, 'P 42/m m c', transformations)
space_groups[131] = sg
space_groups['P 42/m m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(132, 'P 42/m c m', transformations)
space_groups[132] = sg
space_groups['P 42/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(133, 'P 42/n b c :2', transformations)
space_groups[133] = sg
space_groups['P 42/n b c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(134, 'P 42/n n m :2', transformations)
space_groups[134] = sg
space_groups['P 42/n n m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(135, 'P 42/m b c', transformations)
space_groups[135] = sg
space_groups['P 42/m b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(136, 'P 42/m n m', transformations)
space_groups[136] = sg
space_groups['P 42/m n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(137, 'P 42/n m c :2', transformations)
space_groups[137] = sg
space_groups['P 42/n m c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(138, 'P 42/n c m :2', transformations)
space_groups[138] = sg
space_groups['P 42/n c m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(139, 'I 4/m m m', transformations)
space_groups[139] = sg
space_groups['I 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(140, 'I 4/m c m', transformations)
space_groups[140] = sg
space_groups['I 4/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(141, 'I 41/a m d :2', transformations)
space_groups[141] = sg
space_groups['I 41/a m d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(142, 'I 41/a c d :2', transformations)
space_groups[142] = sg
space_groups['I 41/a c d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(143, 'P 3', transformations)
space_groups[143] = sg
space_groups['P 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(144, 'P 31', transformations)
space_groups[144] = sg
space_groups['P 31'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(145, 'P 32', transformations)
space_groups[145] = sg
space_groups['P 32'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(146, 'R 3 :H', transformations)
space_groups[146] = sg
space_groups['R 3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(147, 'P -3', transformations)
space_groups[147] = sg
space_groups['P -3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(148, 'R -3 :H', transformations)
space_groups[148] = sg
space_groups['R -3 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(149, 'P 3 1 2', transformations)
space_groups[149] = sg
space_groups['P 3 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(150, 'P 3 2 1', transformations)
space_groups[150] = sg
space_groups['P 3 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(151, 'P 31 1 2', transformations)
space_groups[151] = sg
space_groups['P 31 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(152, 'P 31 2 1', transformations)
space_groups[152] = sg
space_groups['P 31 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(153, 'P 32 1 2', transformations)
space_groups[153] = sg
space_groups['P 32 1 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(154, 'P 32 2 1', transformations)
space_groups[154] = sg
space_groups['P 32 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(155, 'R 3 2 :H', transformations)
space_groups[155] = sg
space_groups['R 3 2 :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(156, 'P 3 m 1', transformations)
space_groups[156] = sg
space_groups['P 3 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(157, 'P 3 1 m', transformations)
space_groups[157] = sg
space_groups['P 3 1 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(158, 'P 3 c 1', transformations)
space_groups[158] = sg
space_groups['P 3 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(159, 'P 3 1 c', transformations)
space_groups[159] = sg
space_groups['P 3 1 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(160, 'R 3 m :H', transformations)
space_groups[160] = sg
space_groups['R 3 m :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(161, 'R 3 c :H', transformations)
space_groups[161] = sg
space_groups['R 3 c :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(162, 'P -3 1 m', transformations)
space_groups[162] = sg
space_groups['P -3 1 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(163, 'P -3 1 c', transformations)
space_groups[163] = sg
space_groups['P -3 1 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(164, 'P -3 m 1', transformations)
space_groups[164] = sg
space_groups['P -3 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(165, 'P -3 c 1', transformations)
space_groups[165] = sg
space_groups['P -3 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(166, 'R -3 m :H', transformations)
space_groups[166] = sg
space_groups['R -3 m :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,7])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,2,2])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,2,1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,5])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([2,1,1])
trans_den = N.array([3,3,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([2,1,-1])
trans_den = N.array([3,3,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(167, 'R -3 c :H', transformations)
space_groups[167] = sg
space_groups['R -3 c :H'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(168, 'P 6', transformations)
space_groups[168] = sg
space_groups['P 6'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(169, 'P 61', transformations)
space_groups[169] = sg
space_groups['P 61'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(170, 'P 65', transformations)
space_groups[170] = sg
space_groups['P 65'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(171, 'P 62', transformations)
space_groups[171] = sg
space_groups['P 62'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(172, 'P 64', transformations)
space_groups[172] = sg
space_groups['P 64'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(173, 'P 63', transformations)
space_groups[173] = sg
space_groups['P 63'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(174, 'P -6', transformations)
space_groups[174] = sg
space_groups['P -6'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(175, 'P 6/m', transformations)
space_groups[175] = sg
space_groups['P 6/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(176, 'P 63/m', transformations)
space_groups[176] = sg
space_groups['P 63/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(177, 'P 6 2 2', transformations)
space_groups[177] = sg
space_groups['P 6 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(178, 'P 61 2 2', transformations)
space_groups[178] = sg
space_groups['P 61 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,5])
trans_den = N.array([1,1,6])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(179, 'P 65 2 2', transformations)
space_groups[179] = sg
space_groups['P 65 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(180, 'P 62 2 2', transformations)
space_groups[180] = sg
space_groups['P 62 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(181, 'P 64 2 2', transformations)
space_groups[181] = sg
space_groups['P 64 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(182, 'P 63 2 2', transformations)
space_groups[182] = sg
space_groups['P 63 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(183, 'P 6 m m', transformations)
space_groups[183] = sg
space_groups['P 6 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(184, 'P 6 c c', transformations)
space_groups[184] = sg
space_groups['P 6 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(185, 'P 63 c m', transformations)
space_groups[185] = sg
space_groups['P 63 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(186, 'P 63 m c', transformations)
space_groups[186] = sg
space_groups['P 63 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(187, 'P -6 m 2', transformations)
space_groups[187] = sg
space_groups['P -6 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(188, 'P -6 c 2', transformations)
space_groups[188] = sg
space_groups['P -6 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(189, 'P -6 2 m', transformations)
space_groups[189] = sg
space_groups['P -6 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(190, 'P -6 2 c', transformations)
space_groups[190] = sg
space_groups['P -6 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,-1,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,-1,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(191, 'P 6/m m m', transformations)
space_groups[191] = sg
space_groups['P 6/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = | N.array([0,0,0]) | numpy.array |
import math
from io import BytesIO
from shutil import copyfile, rmtree
from tempfile import mkdtemp
from typing import Dict
from pathlib import Path
import numpy as np
from qcore import geo
def plot_site_vs30(
out_ffp: str,
site_lon: float,
site_lat: float,
station_lon: float,
station_lat: float,
station_vs30: float,
station_ll_ffp: str,
vs30_grid_ffp: str = None,
site_vs30: float = None,
distance: float = 8,
):
from qcore import gmt
vs30_grid_ffp = (
str(
Path(__file__).resolve().parent.parent
/ "resources"
/ "nz_vs30_nz-specific-v19p1_100m.grd"
)
if vs30_grid_ffp is None
else vs30_grid_ffp
)
max_lat = geo.ll_shift(site_lat, site_lon, distance, 0)[0]
min_lon = geo.ll_shift(site_lat, site_lon, distance, -90)[1]
min_lat = site_lat - (max_lat - site_lat)
max_lon = site_lon + (site_lon - min_lon)
region = (min_lon, max_lon, min_lat, max_lat)
# automatic label positioning, doesn't work over geographic quadrants
if abs(site_lat - station_lat) > abs(site_lon - station_lon):
# labels above/below
dx = 0
if site_lat > station_lat:
# site label above, closest site label below
site_align = "CB"
closest_align = "CT"
dy = 0.12
else:
# opposite
site_align = "CT"
closest_align = "CB"
dy = -0.12
else:
# labels to the side
dy = 0
if site_lon > station_lon:
# site label to right, closest site label to left
site_align = "LM"
closest_align = "RM"
dx = -0.12
else:
# opposite
site_align = "RM"
closest_align = "LM"
dx = 0.12
wd = mkdtemp()
img = Path(wd) / "snapped_station"
cpt = Path(wd) / "vs30.cpt"
p = gmt.GMTPlot(str(img) + ".ps")
p.spacial("M", region, sizing=9, x_shift=1, y_shift=2)
gmt.makecpt("rainbow", str(cpt), 100, 800, continuing=True)
p.overlay(vs30_grid_ffp, cpt=str(cpt))
p.points(station_ll_ffp, shape="s", size=0.2, line_thickness="2p", line="black")
p.points(
f"{site_lon} {site_lat}\n",
is_file=False,
shape="c",
fill="black",
size=0.1,
line="white",
line_thickness="1p",
)
p.points(
f"{station_lon} {station_lat}\n",
is_file=False,
shape="c",
size=0.2,
line_thickness="2p",
line="white",
)
p.text(
site_lon,
site_lat,
"site",
dx=-dx,
dy=dy,
align=site_align,
size="14p",
box_fill="white@40",
)
p.text(
station_lon,
station_lat,
"closest station",
dx=dx * 1.5,
dy=-dy * 1.5,
align=closest_align,
size="14p",
box_fill="white@40",
)
p.text(
min_lon,
min_lat,
f"Site Vs30: {site_vs30} {'m/s' * (site_vs30 is not None)}",
size="20p",
align="LB",
dx=0.2,
dy=0.8,
box_fill="white@40",
)
p.text(
min_lon,
min_lat,
f"Closest Site Vs30: {station_vs30} m/s",
size="20p",
align="LB",
dx=0.2,
dy=0.5,
box_fill="white@40",
)
p.text(
min_lon,
min_lat,
f"Distance: {geo.ll_dist(station_lon, station_lat, site_lon, site_lat):.2f} km",
size="20p",
align="LB",
dx=0.2,
dy=0.2,
box_fill="white@40",
)
p.ticks(major="0.05d", minor="0.01d")
p.cpt_scale(
"R",
"M",
cpt,
pos="rel_out",
dx=0.2,
label="Vs30 (m/s)",
major=100,
minor=10,
horiz=False,
)
p.finalise()
p.png(background="white")
copyfile(str(img) + ".png", out_ffp)
rmtree(wd)
def plot_context(
lon: float,
lat: float,
out_ffp_no_ext: str,
fault_corners_ffp: str = None,
bballs_ffp: str = None,
):
"""Creates a gmt context map
Parameters
----------
lon: float
lat: float
out_ffp_no_ext: str
Output file path, no extension...
fault_corners_ffp: str, optional
Path to the fault corners csv file
bballs_ffp: str, optional
Path to the beach ball csv file
"""
from qcore import gmt
resource_dir = Path(__file__).resolve().parent.parent / "resources"
if fault_corners_ffp is None:
fault_corners_ffp = resource_dir / "SimAtlasFaults.csv"
if bballs_ffp is None:
bballs_ffp = resource_dir / "gmt.bb"
mom2mag = lambda mom: (2 / 3.0 * math.log(mom) / math.log(10.0)) - 10.7
wd = mkdtemp()
p = gmt.GMTPlot(str(Path(wd) / f"{Path(out_ffp_no_ext).name}.ps"))
# in a future release of GMT, this might be possible
# p.spacial("M" + str(args.lon) + "/" + str(args.lat) + "/", ("-200", "200", "-200", "200+uk"), sizing=8, x_shift=2, y_shift=2)
p.spacial(
"M", (lon - 1.3, lon + 1.3, lat - 1, lat + 1), sizing=8, x_shift=1, y_shift=1
)
p.basemap()
p.water()
paths = []
with fault_corners_ffp.open(mode="r") as c:
c.readline()
for l in c:
paths.append(
l.split(",")[9]
.replace("]|", "\n")
.replace("|", " ")
.replace("[[", ">\n")
.replace("[", "")
.replace("]", "")
.replace("\n ", "\n")
)
paths = "".join(paths)
p.path(
paths,
is_file=False,
close=True,
colour="black",
width="1.0p",
cols="1,0",
split="-",
)
paths = "\n".join([">\n" + "\n".join(x.split("\n")[1:3]) for x in paths.split(">")])
p.path(paths, is_file=False, colour="black", width="1.5p", cols="1,0")
p.ticks()
# beachballs by magnitude
b5 = []
b56 = []
b6 = []
with bballs_ffp.open(mode="r") as b:
for l in b:
man, exp = map(float, l.split()[9:11])
mag = mom2mag(man * 10 ** exp)
if mag < 5:
b5.append(l)
elif mag < 6:
b56.append(l)
else:
b6.append(l)
if len(b5) > 0:
p.beachballs("\n".join(b5), scale=0.2, colour="blue")
if len(b56) > 0:
p.beachballs("\n".join(b56), scale=0.2, colour="orange")
if len(b6) > 0:
p.beachballs("\n".join(b6), scale=0.2, colour="red")
p.points(
f"{lon} {lat}\n",
is_file=False,
shape="c",
fill="black",
size=0.1,
line="white",
line_thickness="1p",
)
p.text(lon, lat, "site", dy=-0.12, align="CT", size="14p", box_fill="white@40")
p.dist_scale("R", "B", "25", pos="rel", dx=0.5, dy=0.5)
p.finalise()
p.png(out_dir=str(Path(out_ffp_no_ext).parent), clip=True)
rmtree(wd)
def plot_disagg(
out_ffp_no_ext: str, disagg_data: Dict, dpi: int = 300, bin_type: str = "src"
) -> None:
"""
Creates a gmt based disagg plot
Parameters
----------
out_ffp_no_ext: string
Output file path, don't include the file extension...
disagg_data: Dictionary
The disagg grid data as dictionary
dpi: int
bin_type: str
The type of binning to use for the disagg plot,
either "src" (i.e. Fault and DS) or "eps"
"""
from qcore import gmt
# Constants
X_LEN = 4.5
Y_LEN = 4.0
Z_LEN = 2.5
ROT = 30
TILT = 60
LEGEND_SPACE = 0.7
EPSILON_LEGEND_EXPAND = 1.0
EPSILON_COLOURS = [
"215/38/3",
"252/94/62",
"252/180/158",
"254/220/210",
"217/217/255",
"151/151/255",
"0/0/255",
"0/0/170",
]
EPSILON_LABELS = [
"@~e@~<-2",
"-2<@~e@~<-1",
"-1<@~e@~<-0.5",
"-0.5<@~e@~<0",
"0<@~e@~<0.5",
"0.5<@~e@~<1",
"1<@~e@~<2",
"2<@~e@~",
]
TYPE_LEGEND_EXPAND = 0.35
TYPE_COLOURS = ["blue", "green"]
TYPE_LABELS = ["Fault", "DS"]
out_dir = Path(out_ffp_no_ext).parent
out_name = Path(out_ffp_no_ext).name
rrup_edges = np.asarray(disagg_data["rrup_edges"])
mag_edges = np.asarray(disagg_data["mag_edges"])
# modifications based on plot type selection
if bin_type == "src":
colours = TYPE_COLOURS
labels = TYPE_LABELS
legend_expand = TYPE_LEGEND_EXPAND
else:
colours = EPSILON_COLOURS
labels = EPSILON_LABELS
legend_expand = EPSILON_LEGEND_EXPAND
###
### PROCESS DATA
###
# x axis
x_axis_max = max(rrup_edges)
if x_axis_max < 115:
x_tick_inc = 10
elif x_axis_max < 225:
x_tick_inc = 20
elif x_axis_max < 335:
x_tick_inc = 30
elif x_axis_max < 445:
x_tick_inc = 40
else:
x_tick_inc = 50
x_axis_max = math.ceil(x_axis_max / float(x_tick_inc)) * x_tick_inc
# y axis
y_min, y_max = mag_edges[0], mag_edges[-1]
if y_max - y_min < 5:
y_tick_inc = 0.5
else:
y_tick_inc = 1.0
# bins to put data in
# TODO: set bottom limit on x and y (not e)
bin_centres_rrup = rrup_edges[:-1] + (np.diff(rrup_edges) / 2.0)
bin_centre_mag = mag_edges[:-1] + (np.diff(mag_edges) / 2.0)
bins_e = np.array([-2, -1, -0.5, 0, 0.5, 1, 2, np.inf])
# build gmt input lines from block data
gmt_in = BytesIO()
if bin_type == "src":
blocks_flt = np.array(disagg_data["flt_bin_contr"])
blocks_ds = | np.array(disagg_data["ds_bin_contr"]) | numpy.array |
# Copyright 2018 The Lucid Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helpers for generating synthetic stimuli for probing network behavior."""
import numpy as np
def sample_binary_image(size, alias_factor=10, color_a=(1,1,1), color_b=(0,0,0),
boundary_line=False, boundary_width=1,
blur_beyond_radius=None, fade_beyond_radius=None,
fade_over_distance=10, fade_color=(.5, .5, .5), **kwds):
"""Highly flexible tool for sampling binary images.
Many stimuli of interest are "binary" in that they have two regions. For
example, a curve stimuli has an interio and exterior region. Ideally, such a
stimulus should be rendered with antialiasing. Additionlly, there are many
styling options that effect how one might wish to render the image: selecting
the color for interior and exterior, showing the boundary between the regions
instead of interior vs exterior, and much more.
This function provides a flexible rendering tool that supports many options.
We assume the image is reprented in the "f-rep" or implicit funciton
convention: the image is represented by a function which maps (x,y) values
to a sclar, with negative representing the object interior and positive
representing the exterior.
The general usage would look smething like:
@sample_binary_image(size, more_options)
def img(x,y):
return (negative if interior, positive if exterior)
Or alternatively:
sampler = sample_binary_image(size, more_options)
def img_f(x,y):
return (negative if interior, positive if exterior)
img = sampler(img_f)
Arguments:
size: Size of image to be rendered in pixels.
alias_factor: Number of samples to use in aliasing.
color_a: Color of exterior. A 3-tuple of floats between 0 and 1. Defaults
to white (1,1,1).
color_b: Color of interior or boundary. A 3-tuple of floats between 0 and 1.
Defaults to black (0,0,0).
boundary_line: Draw boundary instead of interior vs exterior.
boundary_width: If drawing boundary, number of pixels wide boundary line
should be. Defaults to 1 pixel.
blur_beyond_radius: If not None, blur the image outside a given radius.
Defaults to None.
fade_beyond_radius: If not None, fade the image to fade_color outside a
given radius. Defaults to None.
fade_over_distance: Controls rate of fading.
fade_color: Color to fade to, if fade_beyond_radius is set. Defaults to
(.5, .5, .5).
Returns:
A function which takes a function mapping (x,y) -> float and returns a
numpy array of shape [size, size, 3].
"""
# Initial setup
color_a, color_b = np.asarray(color_a).reshape([1,1,3]), np.asarray(color_b).reshape([1,1,3])
fade_color = np.asarray(fade_color).reshape([1,1,3])
X = (np.arange(size) - size//2)
X, Y = X[None, :], X[:, None]
alias_offsets = [ tuple(np.random.uniform(-.5, .5, size=2)) for n in range(alias_factor) ]
boundary_offsets = [ (boundary_width*np.cos(2*np.pi*n/16.), boundary_width*np.sin(2*np.pi*n/16.)) for n in range(16) ]
# Setup for blur / fade stuff
radius = np.sqrt(X**2+Y**2)
offset_scale = 1
fade_coef = 0
if blur_beyond_radius is not None:
offset_scale += np.maximum(0, radius-blur_beyond_radius)
if fade_beyond_radius is not None:
fade_coef = np.maximum(0, radius-fade_beyond_radius)
fade_coef /= float(fade_over_distance)
fade_coef = np.clip(fade_coef, 0, 1)[..., None]
# The function we'll return.
# E is an "energy function" mapping (x,y) -> float
# (and vectorized for numpy support)
# such that it is negative on interior reigions and positive on exterior ones.
def sampler(E):
# Naively smaple an image
def sample(x_off, y_off):
# note: offset_scale controls blurring
vals = E(X + offset_scale * x_off, Y + offset_scale * y_off)
return np.greater_equal(vals, 0).astype("float32")
def boundary_sample(x_off, y_off):
imgs = [sample(x_off + bd_off_x, y_off + bd_off_y)
for bd_off_x, bd_off_y in boundary_offsets]
# If we are on the boundary, some smaples will be zero and others one.
# as a result, the mean will be in the middle.
vals = np.mean(imgs, axis=0)
vals = 2* | np.abs(vals-0.5) | numpy.abs |
import numpy as np
import scipy.stats as stats
import scipy.linalg as la
import scipy.optimize as optimize
import scipy.integrate as integrate
import sklearn.linear_model
import kernels
import ep_fast
#import EP_cython
np.set_printoptions(precision=4, linewidth=200)
class GradientFields():
def __init__(self, K_nodiag, s0, t_i, prev):
normPDF = stats.norm(0,1)
try: t_i[0]
except: t_i = np.zeros(K_nodiag.shape[0]) + t_i
#general computations (always the same if the fixed effects are 0!!!!!)
self.Ki = normPDF.sf(t_i)
self.Ps = s0 + (1-s0)*self.Ki
self.Pi = self.Ki / self.Ps
self.stdY = np.sqrt(self.Pi * (1-self.Pi))
#compute Atag0 and B0
self.phi_ti = normPDF.pdf(t_i)
self.phitphit = np.outer(self.phi_ti, self.phi_ti)
self.stdY_mat = np.outer(self.stdY, self.stdY)
mat1_temp = self.phi_ti / self.stdY
self.mat1 = np.outer(mat1_temp, mat1_temp)
sumProbs_temp = np.tile(self.Pi, (K_nodiag.shape[0], 1))
sumProbs = sumProbs_temp + sumProbs_temp.T
Atag0_B0_inner_vec = self.Pi*(1-s0)
self.mat2 = np.outer(Atag0_B0_inner_vec, Atag0_B0_inner_vec) + 1-sumProbs*(1-s0)
self.Atag0 = self.mat1*self.mat2
self.B0 = np.outer(self.Ps, self.Ps)
#Compute the elements of the function value (the squared distance between the observed and expected pairwise phenotypic covariance)
self.K_nodiag_AB0 = K_nodiag * self.Atag0/self.B0
self.K_nodiag_sqr_AB0 = K_nodiag * self.K_nodiag_AB0
class PrevTest():
def __init__(self, n, m, prev, useFixed, h2Scale=1.0, prng=None, num_generate=None):
self.prng = prng
if (prng is None): self.prng = np.random.RandomState(args.seed)
self.n = n
self.useFixed = useFixed
self.h2Scale = h2Scale
if num_generate is None:
if prev == 0.5:
numGeno = n
else:
numGeno = np.maximum(int(float(self.n)/float(2*prev)), 25000)
else:
numGeno = num_generate
#generate SNPs
mafs = self.prng.rand(m) * 0.45 + 0.05
self.X = prng.binomial(2, mafs, size=(numGeno, m)).astype(np.float)
mafs_estimated = mafs.copy()
self.X_estimated = self.X.copy()
self.X -= 2*mafs
self.X_estimated -= 2*mafs_estimated
self.X /= np.sqrt(2*mafs*(1-mafs))
self.X_estimated /= np.sqrt(2*mafs_estimated*(1-mafs_estimated))
self.m = m
self.n = n
X_mean_diag = np.mean(np.einsum('ij,ij->i', self.X, self.X)) / self.X.shape[1]
X_estimated_mean_diag = np.mean(np.einsum('ij,ij->i', self.X_estimated, self.X_estimated)) / self.X.shape[1]
self.diag_ratio = X_estimated_mean_diag / X_mean_diag
self.prev = prev
#approx coeffs lam_i and c_i for logistic likelihood
self.logistic_c = np.array([1.146480988574439e+02, -1.508871030070582e+03, 2.676085036831241e+03, -1.356294962039222e+03, 7.543285642111850e+01])
self.logistic_lam = np.sqrt(2)*np.array([0.44 ,0.41, 0.40, 0.39, 0.36])
self.logistic_lam2 = self.logistic_lam**2
self.logistic_clam = self.logistic_c * self.logistic_lam
def genData(self, h2, eDist, numFixed, ascertain=True, scaleG=False, extraSNPs=0, fixedVar=0, frac_cases=0.5, kernel='linear', rbf_scale=1.0):
args.seed += 1
self.true_h2 = h2
self.ascertain = ascertain
self.eDist = eDist
if (numFixed==0): fixedVar=0
if (numFixed > 0): assert fixedVar>0
self.fixedVar = fixedVar
self.covars = self.prng.randn(self.X.shape[0], numFixed)
if (eDist == 'normal' and not scaleG): sig2g = h2/(1-h2)
elif (eDist == 'normal' and scaleG): sig2g = h2
elif (eDist == 'logistic' and not scaleG): sig2g = (np.pi**2)/3.0 * h2 / (1 - h2)
elif (eDist == 'logistic' and scaleG): sig2g = h2
else: raise ValueError('unknown e_dist. Valid value are normal, logistic')
if kernel == 'linear':
self.beta = self.prng.randn(self.m) * np.sqrt(sig2g/self.m) #generate effect sizes
self.g = self.X.dot(self.beta) #generate genetic effects
self.g_estimated = self.X_estimated.dot(self.beta)
elif args.kernel == 'rbf':
assert scaleG
kernel_obj = kernels.ScaledKernel(kernels.RBFKernel(self.X))
K = kernel_obj.getTrainKernel(np.array([np.log(rbf_scale), np.log(sig2g) / 2.0]))
L = la.cholesky(K, lower=True, overwrite_a=True)
self.g = L.dot(np.random.randn(K.shape[0]))
if np.allclose(self.X, self.X_estimated):
self.g_estimated = self.g.copy()
else:
kernel_obj_estimated = kernels.ScaledKernel(kernels.RBFKernel(self.X_estimated))
K_estimated = kernel_obj_estimated.getTrainKernel(np.array([np.log(rbf_scale), np.log(sig2g) / 2.0]))
L_estimated = la.cholesky(K_estimated, lower=True, overwrite_a=True)
self.g_estimated = L_estimated.dot(np.random.randn(K_estimated.shape[0]))
else:
raise ValueError('unknown kernel')
#create identical twins if needed
if self.prev == 0.5:
numGeno = self.n
else:
numGeno = np.maximum(int(float(self.n)/float(2*self.prev)), 25000)
self.fixedEffects = np.ones(numFixed) * (0 if (numFixed==0) else np.sqrt(fixedVar / numFixed))
self.covars = self.prng.randn(self.g.shape[0], numFixed)
m = self.covars.dot(self.fixedEffects)
self.g += m
self.g_estimated += m
if (eDist == 'logistic' and numFixed>0): raise ValueError('logistic distribution with fixed effects not supported')
#generate environmental effect
if (eDist == 'normal' and not scaleG): e = self.prng.randn(self.g.shape[0])
elif (eDist == 'normal' and scaleG): e = self.prng.randn(self.g.shape[0]) * np.sqrt(1 - sig2g)# - (fixedVar if (numFixed>0) else 0))
elif (eDist == 'logistic' and not scaleG): e = stats.logistic(0,1).rvs(self.g.shape[0])
elif (eDist == 'logistic' and scaleG): e = stats.logistic(0,1).rvs(self.g.shape[0]) * np.sqrt(1-sig2g) / np.sqrt((np.pi**2)/3.0)
else: raise ValueError('unknown e distribution: ' + self.eDist)
self.yAll = self.g + e
self.yAll_estimated = self.g_estimated + e
self.affCutoff = np.percentile(self.yAll, 100*(1-self.prev))
cases = (self.yAll >= self.affCutoff) #determine cases
cases_estimated = (self.yAll_estimated >= self.affCutoff) #determine cases
controls = ~cases
controls_estimated = ~cases_estimated
self.y = np.ones(self.yAll.shape[0])
self.y[controls] = -1
self.y_estimated = np.ones(self.yAll.shape[0])
self.y_estimated = np.ones(self.yAll.shape[0])
self.y_estimated[controls_estimated] = -1
#select cases and controls
caseInds = np.where(cases)[0]
controlInds = np.where(controls)[0]
if ascertain:
numCases = np.sum(cases)
if (numCases > self.n/2+2):
selectedCases = self.prng.permutation(numCases)[:self.n//2]
caseInds = caseInds[selectedCases]
numCases = len(caseInds)
numControls = int(numCases * (1-frac_cases)/frac_cases)
selectedControls = self.prng.permutation(controls.sum())[:numControls]
selectedInds = np.concatenate((caseInds, controlInds[selectedControls]))
else:
while True:
selectedInds = self.prng.permutation(cases.shape[0])[:self.n]
if (np.sum(cases[selectedInds]) > 0): break
#scramble inds to avoid numerical issues
self.prng.shuffle(selectedInds)
self.y = self.y[selectedInds]
###print('%%cases: %0.2f'%(np.mean(self.y>0)))
self.g = self.g[selectedInds]
self.g_estimated = self.g_estimated[selectedInds]
self.y_cont = self.yAll[selectedInds]
self.covars = self.covars[selectedInds, :]
self.X_selected = self.X_estimated[selectedInds, :]
if (extraSNPs > 0):
###print('Adding', extraSNPs, 'non-causal SNPs...')
mafs = self.prng.rand(extraSNPs) * 0.45 + 0.05
X2 = self.prng.binomial(2, mafs, size=(self.X_selected.shape[0], extraSNPs)).astype(np.float)
X2 -= 2*mafs
X2 /= np.sqrt(2*mafs*(1-mafs))
self.X_selected = np.concatenate((self.X_selected, X2), axis=1)
#create the kernel matrix
if kernel=='linear':
kernel_obj = kernels.linearKernel(self.X_selected)
K = kernel_obj.getTrainKernel(np.array([]))
elif kernel=='rbf':
kernel_obj = kernels.RBFKernel(self.X_selected)
K = kernel_obj.getTrainKernel(np.array([np.log(rbf_scale)]))
else:
raise ValueError('unknown kernel')
self.kernel = kernels.ScaledKernel(kernel_obj)
def computeT(self, K, sig2e=np.pi**2/3.0):
if (self.prev==0.5): return 0.0
controls = (self.y < 0)
cases = ~controls
diagK = np.diag(K)
sig2g = (1-self.prev)*np.mean(diagK[controls]) + self.prev*np.mean(diagK[cases])
if (self.eDist == 'normal'): t = stats.norm(0, np.sqrt(sig2g+1)).isf(self.prev)
elif (self.eDist == 'logistic'):
s = np.sqrt(3*sig2e/np.pi**2)
normCache = np.log(np.sqrt(2*np.pi*sig2g))
llF = lambda f,t: -(f-t)**2/(2*sig2g) - normCache
pFAndY = lambda f,t: np.exp(llF(f,t)) * (1.0/(1+np.exp(-f/s)) if f>-35 else 0.0)
pY = lambda t: integrate.quad(lambda f:pFAndY(f,t), -np.inf, np.inf)
t = -optimize.minimize_scalar(lambda t:(pY(t)[0]-self.prev)**2, method='bounded', bounds=(-8, 8)).x
else: raise Exception('unknown e distribution: ' + self.eDist)
return t
def likErf_EP(self, y, mu, s2, hyp=None, compDerivs=False):
sqrtVarDenom = 1.0 / np.sqrt(1+s2)
z = mu * sqrtVarDenom * y
normPDF = stats.norm(0,1)
lZ = normPDF.logcdf(z)
if (not compDerivs): return lZ
n_p = np.exp(normPDF.logpdf(z) - lZ)
dlZ = n_p * sqrtVarDenom * y #1st derivative wrt mean
d2lZ = -n_p * (z+n_p) / (1+s2) #2nd derivative wrt mean
return lZ, dlZ, d2lZ
#compute EP for a single individual, and compute derivatives with respect to the mean (mu)
def likLogistic_EP_single_new(self, y, mu, s2, hyp):
t = hyp[4]; mu = mu-t
hyp[4] = 0
lZc, dlZc, d2lZc = self.likProbit_EP_single(y, mu*self.logistic_lam, s2*self.logistic_lam2, hyp)
lZ = self.log_expA_x_single(lZc, self.logistic_c) #A=lZc, B=dlZc, d=c.*lam', lZ=log(exp(A)*c)
dlZ = self.expABz_expAx_single(lZc, self.logistic_c, dlZc, self.logistic_clam) #((exp(A).*B)*d)./(exp(A)*c)
#d2lZ = ((exp(A).*Z)*e)./(exp(A)*c) - dlZ.^2 where e = c.*(lam.^2)'
d2lZ = self.expABz_expAx_single(lZc, self.logistic_c, dlZc**2+d2lZc, self.logistic_c * self.logistic_lam2) - dlZ**2
#A note (from the GPML package documentation):
#The scale mixture approximation does not capture the correct asymptotic
#behavior; we have linear decay instead of quadratic decay as suggested
#by the scale mixture approximation. By observing that for large values
#of -f*y ln(p(y|f)) for likLogistic is linear in f with slope y, we are
#able to analytically integrate the tail region.
val = np.abs(mu) - 196/200*s2-4 #empirically determined bound at val==0
lam = 1.0 / (1.0+np.exp(-10*val)) #interpolation weights
lZtail = np.minimum(s2/2.0-np.abs(mu), -0.1) #apply the same to p(y|f) = 1 - p(-y|f)
if (mu*y > 0):
lZtail = np.log(1-np.exp(lZtail)) #label and mean agree
dlZtail = 0
else:
dlZtail = -np.sign(mu)
lZ = (1-lam)* lZ + lam* lZtail #interpolate between scale ..
dlZ = (1-lam)* dlZ + lam* dlZtail #.. mixture and ..
d2lZ = (1-lam)*d2lZ #.. tail approximation
hyp[4] = t
return lZ, dlZ, d2lZ
def likLogistic_EP_multi_new(self, y, mu, s2, hyp=None):
t = hyp[4]; mu = mu-t
hyp[4] = 0
lZc = self.likProbit_EP_multi(np.outer(y, np.ones(5)), np.outer(mu, self.logistic_lam), np.outer(s2, self.logistic_lam2), hyp)
lZ = self.log_expA_x_multi(lZc, self.logistic_c) #A=lZc, B=dlZc, d=c.*lam', lZ=log(exp(A)*c)
val = np.abs(mu) - 196/200*s2-4 #empirically determined bound at val==0
lam = 1.0 / (1.0+np.exp(-10*val)) #interpolation weights
lZtail = np.minimum(s2/2.0-np.abs(mu), -0.1) #apply the same to p(y|f) = 1 - p(-y|f)
muy = mu*y
id = muy>0; lZtail[id] = np.log(1-np.exp(lZtail[id])) #label and mean agree
lZ = (1-lam)*lZ + lam*lZtail #interpolate between scale mixture and tail approximation
hyp[4] = t
return lZ
def likProbit_EP_multi(self, y, mu, s2, hyp):
sig2e, t = hyp[0], hyp[4]
lZ = stats.norm(0,1).logcdf(y * (mu-t) / np.sqrt(s2+sig2e))
return lZ
def likProbit_EP_single(self, y, mu, s2, hyp):
sig2e, t = hyp[0], hyp[4]
a = y / np.sqrt(s2+sig2e)
z = a * (mu-t)
normPDF = stats.norm(0,1)
lZ = normPDF.logcdf(z)
n_p = np.exp(normPDF.logpdf(z) - lZ)
dlZ = a * n_p
d2lZ = -a**2 * n_p * (z+n_p)
return lZ, dlZ, d2lZ
def likFunc_EP_asc_multi(self, y, mu, s2, hyp):
logS0, logSDiff, sDiff = hyp[1], hyp[2], hyp[3]
likFunc_numer, likFunc_denom = hyp[5], hyp[6]
lZ = likFunc_numer(1, mu, s2, hyp)
logZstar = np.logaddexp(logS0, logSDiff+lZ)
return logZstar
def likFunc_EP_asc_single(self, y, mu, s2, hyp):
logS0, logSDiff, sDiff = hyp[1], hyp[2], hyp[3]
likFunc_numer, likFunc_denom = hyp[5], hyp[6]
lZ, dlZ, d2lZ = likFunc_numer(1, mu, s2, hyp)
logZstar = np.logaddexp(logS0, logSDiff+lZ)
expDiff = np.exp(lZ-logZstar)
temp = sDiff * expDiff
dZstar = temp * dlZ
d2Zstar = temp * (d2lZ + dlZ**2 * (1-temp))
return logZstar, dZstar, d2Zstar
def likFunc_EP_both_single(self, y, mu, s2, hyp):
logS0, logSDiff, sDiff = hyp[1], hyp[2], hyp[3]
likFunc_numer, likFunc_denom = hyp[5], hyp[6]
lZ_numer, dlZ_numer, d2lZ_numer = likFunc_numer(y, mu, s2, hyp)
lZ_numer += (logS0 if y<0 else 0)
lZ_denom, dlZ_denom, d2lZ_denom = likFunc_denom(y, mu, s2, hyp)
return lZ_numer-lZ_denom, dlZ_numer-dlZ_denom, d2lZ_numer-d2lZ_denom
def likFunc_EP_both_multi(self, y, mu, s2, hyp):
logS0, logSDiff, sDiff = hyp[1], hyp[2], hyp[3]
likFunc_numer, likFunc_denom = hyp[5], hyp[6]
lZ_numer = likFunc_numer(y, mu, s2, hyp)
lZ_numer[y<0] += logS0 #note: we assume that logS1=0
lZ_denom = likFunc_denom(y, mu, s2, hyp)
return lZ_numer-lZ_denom
def evalLL_EP(self, hyp):
try: hyp[0]
except: hyp=np.array([hyp])
tol = 1e-4; max_sweep = 20; min_sweep = 2 #tolerance to stop EP iterations
p = np.mean(self.y>0)
s1 = 1.0
s0 = s1 * self.prev / (1-self.prev) * (1-p) / p
logS0 = np.log(s0); sDiff = s1-s0; logSDiff = np.log(sDiff)
K = self.kernel.getTrainKernel(hyp)
m = np.zeros(self.y.shape[0])
controls = (self.y < 0)
cases = ~controls
diagK = np.diag(K)
sig2g = (1-self.prev)*np.mean(diagK[controls]) + self.prev*np.mean(diagK[cases])
if (sig2g > self.h2Scale): raise ValueError('sig2g larger than h2Scale found')
if (self.covars.shape[1] > 0):
C = self.covars
logreg = sklearn.linear_model.LogisticRegression(penalty='l2', C=1000, fit_intercept=True)
s0 = self.prev / (1-self.prev) * (1-np.mean(self.y>0)) / np.mean(self.y>0)
logreg.fit(C, self.y)
Pi = logreg.predict_proba(C)[:,1]
Ki = Pi * s0 / (1 - Pi*(1-s0))
if (self.eDist == 'logistic'):
old_prev = self.prev
t = np.empty(self.y.shape[0])
for i in range(self.y.shape[0]):
self.prev = Ki[i]
t[i] = self.computeT(K, self.h2Scale-sig2g)
self.prev = old_prev
else: t = stats.norm(0,1).isf(Ki)
if (self.eDist == 'normal'):
likFunc_numer_multi = self.likProbit_EP_multi
likFunc_numer_single = self.likProbit_EP_single
sig2e = self.h2Scale - sig2g
if (self.covars.shape[1] == 0): t = np.zeros(self.y.shape[0]) + stats.norm(0, np.sqrt(sig2g+sig2e)).isf(self.prev)
#t = stats.norm(0, np.sqrt(sig2g+sig2e)).isf(self.prev)
elif (self.eDist == 'logistic'):
likFunc_numer_multi = self.likLogistic_EP_multi_new
likFunc_numer_single = self.likLogistic_EP_single_new
sig2e = (self.h2Scale - sig2g) / (np.pi**2 / 3.0)
#if (self.covars.shape[1] == 0): t = np.zeros(self.y.shape[0]) + self.computeT(K, self.h2Scale-sig2g)
t = self.computeT(K, self.h2Scale-sig2g)
else: raise ValueError('unknown eDist')
likHyp_multi = [sig2e, logS0, logSDiff, sDiff, t, likFunc_numer_multi, self.likFunc_EP_asc_multi]
likHyp_single = [sig2e, logS0, logSDiff, sDiff, t, likFunc_numer_single, self.likFunc_EP_asc_single]
likFuncMulti = likFunc_numer_multi
likFuncSingle = likFunc_numer_single
Sigma = K.copy()
mu = m.copy() #- t
nlZ0 = -np.sum(likFuncMulti(self.y, mu, np.diag(K), likHyp_multi))
ttau, tnu = np.zeros(self.y.shape[0]), np.zeros(self.y.shape[0])
nlZ_old, sweep = np.inf, 0
nlZ = nlZ0
while ((np.abs(nlZ-nlZ_old) > tol and sweep < max_sweep) or sweep<min_sweep):
nlZ_old = nlZ
sweep+=1
if (self.eDist == 'logistic'): ttau, tnu = self.EP_innerloop2(Sigma, self.y, mu, ttau, tnu, likFuncSingle, likHyp_single)
else: ttau, tnu = ep_fast.EP_innerloop_probit(Sigma, self.y, mu, ttau, tnu, sig2e, t)
(Sigma, mu, L, alpha, nlZ) = self.epComputeParams2(K, self.y, ttau, tnu, m, likFuncMulti, likHyp_multi)
if (sweep == max_sweep and np.abs(nlZ-nlZ_old) > tol):
nlZ = np.inf
if (nlZ < 0): nlZ = np.inf
self.mu = mu
return nlZ
def evalLL_AEP(self, hyp, grad=False, update_freq=1):
try: hyp[0]
except: hyp=np.array([hyp])
tol = 1e-4; max_sweep = 20; min_sweep = 2 #tolerance to stop EP iterations
p = np.mean(self.y>0)
s1 = 1.0
s0 = s1 * self.prev / (1-self.prev) * (1-p) / p
y = self.y.copy()
useCython = True
logS0 = np.log(s0)
sDiff = s1-s0
logSDiff = np.log(sDiff)
#Generate problem settings
hyp_scaled = hyp.copy()
if self.h2Scale != 1.0:
hyp_scaled[-1] = np.log(np.exp(2*hyp[-1]) * self.h2Scale) / 2.0
K = self.kernel.getTrainKernel(hyp_scaled)
C = self.covars.copy()
m = np.zeros(y.shape[0])
controls = (y < 0)
cases = ~controls
diagK = np.diag(K)
sig2g = np.exp(2*hyp[-1])
if (self.eDist == 'normal'): sig2e = self.h2Scale - sig2g
elif (self.eDist == 'logistic'): sig2e = (self.h2Scale - sig2g) / (np.pi**2 / 3.0)
else: raise ValueError('unknown eDist')
if (sig2g > self.h2Scale):
raise ValueError('sig2g larger than h2Scale found')
if C.shape[1] > 0 and self.useFixed:
logreg = sklearn.linear_model.LogisticRegression(penalty='l2', C=1000, fit_intercept=True)
s0 = self.prev / (1-self.prev) * (1-np.mean(y>0)) / np.mean(y>0)
logreg.fit(C, y)
Pi = logreg.predict_proba(C)[:,1]
Ki = Pi * s0 / (1 - Pi*(1-s0))
if (self.eDist == 'logistic'):
old_prev = self.prev
t = np.empty(y.shape[0])
for i in range(y.shape[0]):
self.prev = Ki[i]
t[i] = self.computeT(K, self.h2Scale-sig2g)
self.prev = old_prev
else: t = stats.norm(0, np.sqrt(sig2g+sig2e)).isf(Ki)
if (self.eDist == 'normal'):
likFunc_numer_single = self.likProbit_EP_single
likFunc_numer_multi = self.likProbit_EP_multi
if (C.shape[1] == 0 or not self.useFixed): t = stats.norm(0, np.sqrt(sig2g+sig2e)).isf(self.prev)
elif (self.eDist == 'logistic'):
likFunc_numer_single = self.likLogistic_EP_single_new
likFunc_numer_multi = self.likLogistic_EP_multi_new
if (C.shape[1] == 0 or not self.useFixed): t = self.computeT(K, self.h2Scale-sig2g)
else: raise ValueError('unknown eDist')
likHyp_multi = [sig2e, logS0, logSDiff, sDiff, t, likFunc_numer_multi, self.likFunc_EP_asc_multi]
likHyp_single = [sig2e, logS0, logSDiff, sDiff, t, likFunc_numer_single, self.likFunc_EP_asc_single]
likFuncMulti = self.likFunc_EP_both_multi
likFuncSingle = self.likFunc_EP_both_single
#initialize Sigma and mu, the parameters of the Gaussian posterior approximation
Sigma = K.copy()
mu = m.copy()
#marginal likelihood for ttau = tnu = zeros(n,1); equals n*log(2) for likCum*
nlZ0 = -np.sum(likFuncMulti(y, mu, np.diag(K), likHyp_multi))
ttau, tnu = np.zeros(y.shape[0]), np.zeros(y.shape[0])
nlZ_old, sweep = np.inf, 0
nlZ = nlZ0
while ((np.abs(nlZ-nlZ_old) > tol and sweep < max_sweep) or sweep<min_sweep):
nlZ_old = nlZ
sweep+=1
if (self.eDist == 'logistic' or not useCython): ttau, tnu = self.EP_innerloop2(Sigma, y, mu, ttau, tnu, likFuncSingle, likHyp_single)
else:
ttau, tnu = ep_fast.EP_innerloop_probit_both_parallel(Sigma, y, mu, s0, sDiff, ttau, tnu, sig2e, np.zeros(y.shape[0])+t, update_freq=update_freq)
try:
(Sigma, mu, L, alpha, nlZ) = self.epComputeParams2(K, y, ttau, tnu, m, likFuncMulti, likHyp_multi)
except:
nlZ=np.inf
print('\t', 'Cholesky failed!')
raise
break
if (sweep == max_sweep and np.abs(nlZ-nlZ_old) > tol):
nlZ = np.inf
nlZ_asc = nlZ
if (len(self.prev_nlZ) >= 2):
prev_diff = np.maximum(np.abs(self.prev_nlZ[-1]-self.prev_nlZ[-2]), 2)
bad_inds = ((np.abs(ttau)>100) | (np.abs(tnu)>100))
if (np.abs(nlZ - self.prev_nlZ[-1]) > 2*np.abs(prev_diff) and np.any(bad_inds)):
nlZ = np.inf
nlZ_asc = nlZ
if (nlZ == np.inf):
self.old_ttau
tol=1e-2
ttau, tnu = self.old_ttau, self.old_tnu
Sigma = self.old_Sigma
mu = self.old_mu
nlZ_old, sweep = np.inf, 0
nlZ = np.inf
nlZ_arr = []
max_sweep=40
while (sweep<min_sweep or (np.abs(nlZ-nlZ_old) > tol and sweep < max_sweep)):
nlZ_old = nlZ
sweep+=1
if (self.eDist == 'logistic' or not useCython): ttau, tnu = self.EP_innerloop2(Sigma, y, mu, ttau, tnu, likFuncSingle, likHyp_single)
else:
ttau, tnu = ep_fast.EP_innerloop_probit_both_parallel(Sigma, y, mu, s0, sDiff, ttau, tnu, sig2e, np.zeros(y.shape[0])+t, update_freq=update_freq)
try:
(Sigma, mu, L, alpha, nlZ) = self.epComputeParams2(K, y, ttau, tnu, m, likFuncMulti, likHyp_multi)
except:
nlZ = np.inf
break
nlZ_arr.append(nlZ)
nlZ_arr = np.array(nlZ_arr)
if (sweep == max_sweep and np.abs(nlZ-nlZ_old) > tol):
if (np.abs(nlZ-nlZ_old) < 3):
if (np.all(nlZ_arr[5:] < self.old_nlZ)): nlZ = np.max(nlZ_arr[5:])
elif (np.all(nlZ_arr[5:] > self.old_nlZ)): nlZ = np.min(nlZ_arr[5:])
else:
nlZ = np.inf
prev_diff = np.maximum(np.abs(self.prev_nlZ[-1]-self.prev_nlZ[-2]), 2)
bad_inds = ((np.abs(ttau)>100) | (np.abs(tnu)>100))
try:
if (nlZ < np.inf and np.max(np.abs(nlZ_arr[5:] - self.prev_nlZ[-1])) > 2*np.abs(prev_diff) and np.any(bad_inds)):
nlZ = np.inf
except:
pass
nlZ_asc = nlZ
if (nlZ < np.inf):
self.old_ttau, self.old_tnu, self.old_Sigma, self.old_mu, self.old_nlZ = ttau, tnu, Sigma, mu, nlZ
self.prev_nlZ.append(nlZ)
self.mu = mu
nlZ = nlZ_asc
if (nlZ < 0): nlZ = np.inf
return nlZ
def likLogistic_EP_multi(self, y, mu, s2, hyp=None):
lZc = self.likErf_EP(np.outer(y, np.ones(5)), np.outer(mu, self.logistic_lam), np.outer(s2, self.logistic_lam2), compDerivs=False)
lZ = self.log_expA_x_multi(lZc, self.logistic_c) #A=lZc, B=dlZc, d=c.*lam', lZ=log(exp(A)*c)
val = np.abs(mu) - 196/200*s2-4 #empirically determined bound at val==0
lam = 1.0 / (1.0+np.exp(-10*val)) #interpolation weights
lZtail = np.minimum(s2/2.0-np.abs(mu), -0.1) #apply the same to p(y|f) = 1 - p(-y|f)
muy = mu*y
id = muy>0; lZtail[id] = np.log(1-np.exp(lZtail[id])) #label and mean agree
lZ = (1-lam)*lZ + lam*lZtail #interpolate between scale mixture and tail approximation
return lZ
#computes y = log( exp(A)*x ) in a numerically safe way by subtracting the
# maximal value in each row to avoid cancelation after taking the exp
def log_expA_x_multi(self, A, x):
maxA = np.max(A, axis=1) #number of columns, max over columns
y = np.log(np.exp(A - maxA[:, np.newaxis]).dot(x)) + maxA #exp(A) = exp(A-max(A))*exp(max(A))
return y
#computes y = log( exp(A)*x ) in a numerically safe way by subtracting the
# maximal value in each row to avoid cancelation after taking the exp
def log_expA_x_single(self, A, x):
maxA = np.max(A) #number of columns, max over columns
y = np.log(np.exp(A-maxA).dot(x)) + maxA #exp(A) = exp(A-max(A))*exp(max(A))
return y
# computes y = ( (exp(A).*B)*z ) ./ ( exp(A)*x ) in a numerically safe way.
#The function is not general in the sense that it yields correct values for
#all types of inputs. We assume that the values are close together.
def expABz_expAx_single(self, A,x,B,z):
maxA = np.max(A) #number of columns, max over columns
expA = np.exp(A-maxA)
y = np.dot(expA*B, z) / np.dot(expA, x)
return y
def evalLL(self, hyp, method):
if (method == 'aep'): return self.evalLL_AEP(hyp)
elif (method == 'aep_parallel'): return self.evalLL_AEP(hyp, update_freq=10000000000)
elif (method == 'ep'): return self.evalLL_EP(hyp)
else: raise ValueError('unrecognized method: %s. Valid methods are reml, pcgc, apl, aep, aep_parallel or ep'%(method))
def reml(self, is_binary):
K = self.kernel.getTrainKernel(np.array([0]))
logdetXX = 0
#eigendecompose
s,U = la.eigh(K)
s[s<0]=0
ind = np.argsort(s)[::-1]
U = U[:, ind]
s = s[ind]
#Prepare required matrices
if is_binary: y = (self.y>0).astype(np.int)
else: y = self.y_cont
Uy = U.T.dot(y).flatten()
covars = np.ones((y.shape[0], 1))
UX = U.T.dot(covars)
if (U.shape[1] < U.shape[0]):
UUX = covars - U.dot(UX)
UUy = y - U.dot(Uy)
UUXUUX = UUX.T.dot(UUX)
UUXUUy = UUX.T.dot(UUy)
UUyUUy = UUy.T.dot(UUy)
else: UUXUUX, UUXUUy, UUyUUy = None, None, None
n = U.shape[0]
ldeltaopt_glob = optimize.minimize_scalar(self.negLLevalLong, bounds=(-5, 5), method='Bounded', args=(s, Uy, UX, logdetXX, UUXUUX, UUXUUy, UUyUUy, n)).x
ll, sig2g, beta, r2 = self.negLLevalLong(ldeltaopt_glob, s, Uy, UX, logdetXX, UUXUUX, UUXUUy, UUyUUy, n, returnAllParams=True)
sig2e = np.exp(ldeltaopt_glob) * sig2g
return sig2g/(sig2g+sig2e)
def negLLevalLong(self, logdelta, s, Uy, UX, logdetXX, UUXUUX, UUXUUy, UUyUUy, numIndividuals, returnAllParams=False):
Sd = s + np.exp(logdelta)
UyS = Uy / Sd
yKy = UyS.T.dot(Uy)
logdetK = np.log(Sd).sum()
null_ll, sigma2, beta, r2 = self.lleval(Uy, UX, Sd, yKy, logdetK, logdetXX, logdelta, UUXUUX, UUXUUy, UUyUUy, numIndividuals)
if returnAllParams: return null_ll, sigma2, beta, r2
else: return -null_ll
def lleval(self, Uy, UX, Sd, yKy, logdetK, logdetXX, logdelta, UUXUUX, UUXUUy, UUyUUy, numIndividuals):
N = numIndividuals
D = UX.shape[1]
UXS = UX / np.lib.stride_tricks.as_strided(Sd, (Sd.size, D), (Sd.itemsize,0))
XKy = UXS.T.dot(Uy)
XKX = UXS.T.dot(UX)
if (Sd.shape[0] < numIndividuals):
delta = np.exp(logdelta)
denom = delta
XKX += UUXUUX / denom
XKy += UUXUUy / denom
yKy += UUyUUy / denom
logdetK += (numIndividuals-Sd.shape[0]) * logdelta
[SxKx,UxKx]= la.eigh(XKX)
i_pos = SxKx>1E-10
beta = np.dot(UxKx[:,i_pos], (np.dot(UxKx[:,i_pos].T, XKy) / SxKx[i_pos]))
r2 = yKy-XKy.dot(beta)
reml = True
if reml:
logdetXKX = np.log(SxKx).sum()
sigma2 = (r2 / (N - D))
ll = -0.5 * (logdetK + (N-D)*np.log(2.0*np.pi*sigma2) + (N-D) + logdetXKX - logdetXX)
else:
sigma2 = r2 / N
ll = -0.5 * (logdetK + N*np.log(2.0*np.pi*sigma2) + N)
return ll, sigma2, beta, r2
def solveChol(self, L, B, overwrite_b=True):
cholSolve1 = la.solve_triangular(L, B, trans=1, check_finite=False, overwrite_b=overwrite_b)
cholSolve2 = la.solve_triangular(L, cholSolve1, check_finite=False, overwrite_b=True)
return cholSolve2
def evalLL_EP(self, hyp):
tol = 1e-4; max_sweep = 20; min_sweep = 2 #tolerance to stop EP iterations
s0 = self.prev / (1-self.prev)
s1 = 1.0
useCython = False
try: hyp[0]
except: hyp=np.array([hyp])
if (self.prev < 0.5):
logS0 = np.log(s0)
logSdiff = np.log(s1-s0)
else:
logS0 = -np.inf
logSdiff = 0.0
#Generate problem settings
K = self.kernel.getTrainKernel(hyp)
m = np.zeros(self.y.shape[0])
if self.useFixed: m += self.covars.dot(self.fixedEffects)
controls = (self.y < 0)
cases = ~controls
diagK = np.diag(K)
sig2g = (1-self.prev)*np.mean(diagK[controls]) + self.prev*np.mean(diagK[cases])
if (sig2g > 1.0): raise ValueError('sig2g larger than 1.0 found')
sig2e = 1.0 - sig2g
t = stats.norm(0, np.sqrt(sig2g+sig2e)).isf(self.prev)
m -= t
if useCython:
EP_func = EP_cython.EPInnerLoop_cython
else:
EP_func = self.EPInnerLoop
llFunc = self.llFuncStandard
#A note on naming (taken directly from the GPML documentation):
#variables are given short but descriptive names in
#accordance with <NAME> "GPs for Machine Learning" (2006): mu
#and s2 are mean and variance, nu and tau are natural parameters. A leading t
#means tilde, a subscript _ni means "not i" (for cavity parameters), or _n
#for a vector of cavity parameters. N(f|mu,Sigma) is the posterior.
#initialize Sigma and mu, the parameters of the Gaussian posterior approximation
Sigma = K.copy()
mu = m.copy()
#marginal likelihood for ttau = tnu = zeros(n,1); equals n*log(2) for likCum*
nlZ0 = -np.sum(llFunc(self.y, mu, np.diag(K), sig2e))
ttau, tnu = np.zeros(self.y.shape[0]), np.zeros(self.y.shape[0])
nlZ_old, sweep = np.inf, 0
nlZ = nlZ0
while ((np.abs(nlZ-nlZ_old) > tol and sweep < max_sweep) or sweep<min_sweep):
nlZ_old = nlZ
sweep+=1
Sigma, mu, ttau, tnu = EP_func(Sigma, self.y, mu, ttau, tnu, sig2e)
#recompute since repeated rank-one updates can destroy numerical precision
(Sigma, mu, L, alpha, nlZ) = self.epComputeParams(K, self.y, ttau, tnu, sig2e, m, llFunc)
self.mu = mu
return nlZ
def llFuncStandard(self, y, mu, s2, sig2e):
z = mu / np.sqrt(sig2e+s2) * y
nlZ = stats.norm(0,1).logcdf(z)
return nlZ
def EP_innerloop2(self, Sigma, y, mu, ttau, tnu, likFuncSingle, likHyp):
randpermN = np.random.permutation(range(y.shape[0]))
normPDF = stats.norm(0,1)
for i in randpermN: #iterate EP updates (in random order) over examples
#first find the cavity distribution params tau_ni and nu_ni
if (ttau[i] > 1.0/Sigma[i,i]):
raise ValueError('infeasible ttau[i] found!!!')
tau_ni = 1.0/Sigma[i,i] - ttau[i] #Equation 3.56 rhs (and 3.66) from GP book
nu_ni = (mu[i]/Sigma[i,i] - tnu[i]) #Equation 3.56 lhs (and 3.66) from GP book
mu_ni = nu_ni / tau_ni
#compute the desired derivatives of the individual log partition function
try:
t = likHyp[4]
likHyp[4] = t[i]
lZ, dlZ, d2lZ = likFuncSingle(y[i], mu_ni, 1.0/tau_ni, likHyp)
likHyp[4] = t
except:
lZ, dlZ, d2lZ = likFuncSingle(y[i], mu_ni, 1.0/tau_ni, likHyp)
ttau_old, tnu_old = ttau[i], tnu[i] #find the new tilde params, keep old
ttau[i] = -d2lZ / (1+d2lZ/tau_ni)
ttau[i] = np.maximum(ttau[i], 0) #enforce positivity i.e. lower bound ttau by zero
tnu[i] = (dlZ - mu_ni*d2lZ ) / (1+d2lZ/tau_ni)
if (ttau[i] == 0): tnu[i]=0
dtt = ttau[i] - ttau_old
dtn = tnu[i] - tnu_old #rank-1 update Sigma
si = Sigma[:,i]
ci = dtt / (1+dtt*si[i])
mu -= (ci* (mu[i]+si[i]*dtn) - dtn) * si #Equation 3.53 from GP book
Sigma -= np.outer(ci*si, si) #Equation 3.70 from GP book (#takes 70% of total time)
return ttau, tnu
def EPInnerLoop(self, Sigma, y, mu, ttau, tnu, sig2e):
randpermN = np.random.permutation(range(y.shape[0]))
normPDF = stats.norm(0,1)
for i in randpermN: #iterate EP updates (in random order) over examples
#first find the cavity distribution params tau_ni and mu_ni
tau_ni = 1.0/Sigma[i,i] - ttau[i] #Equation 3.56 rhs (and 3.66) from GP book
mu_ni = (mu[i]/Sigma[i,i] - tnu[i]) / tau_ni #Equation 3.56 lhs (and 3.66) from GP book
#compute the desired derivatives of the individual log partition function
s2 = 1.0/tau_ni
sqrtS2 = np.sqrt(s2 + sig2e)
z = mu_ni * y[i] / sqrtS2 #Equation 3.82 from GP book
ttau_old, tnu_old = ttau[i], tnu[i] #find the new tilde params, keep old
Z = normPDF.logcdf(z)
n_p = np.exp(normPDF.logpdf(z) - Z) #Equation 3.82 from GP book
#matlab computation...
dlZ = y[i] * n_p / sqrtS2 #1st derivative of log(Z) wrt mean
d2lZ = -n_p*(z+n_p)/(sig2e+s2) #2nd derivative of log(Z) wrt mean
ttau_matlab = -d2lZ / (1+d2lZ/tau_ni)
tnu_matlab = (dlZ - mu_ni*d2lZ ) / (1+d2lZ/tau_ni)
#my new computation...
meanQx = mu_ni + s2*n_p * y[i] / sqrtS2 #This is mu_hat from Equations 3.57-3.59 (specifically this is Equation 3.85)
meanQx2 = dlZ/tau_ni + mu_ni
assert np.isclose(meanQx, meanQx2)
varQx = s2 - s2**2 * n_p / (sig2e+s2) * (z + n_p) #This is sigma^2_hat from Equations 3.57-3.59 (specifically this is equation 3.87)
#varQx2 = d2lZ/tau_ni**2 + 2*mu_ni*meanQx - mu_ni**2 + 1.0/tau_ni + dlZ**2/tau_ni**2 - meanQx2**2
varQx2 = (d2lZ+dlZ**2)/tau_ni**2 + 2*mu_ni*meanQx - mu_ni**2 + 1.0/tau_ni - meanQx2**2
assert np.isclose(varQx, varQx2)
ttau[i] = 1.0/varQx - tau_ni #Equation 3.59 (and 3.66)
tnu[i] = meanQx/varQx - mu_ni*tau_ni #Equation 3.59 (and 3.66)
ttau[i] = np.maximum(ttau[i], 0) #enforce positivity i.e. lower bound ttau by zero
dtt = ttau[i] - ttau_old
dtn = tnu[i] - tnu_old #rank-1 update Sigma
si = Sigma[:,i]
ci = dtt / (1+dtt*si[i])
mu -= (ci* (mu[i]+si[i]*dtn) - dtn) * si #Equation 3.53 from GP book
Sigma -= np.outer(ci*si, si) #Equation 3.70 from GP book (#takes 70% of total time)
return Sigma, mu, ttau, tnu
def epComputeParams2(self, K, y, ttau, tnu, m, likFuncMulti, likHyp):
n = y.shape[0]
sW = np.sqrt(ttau) #compute Sigma and mu
L = la.cholesky(np.eye(n) + np.outer(sW, sW) * K, overwrite_a=True, check_finite=False)
#L.T*L=B=eye(n)+sW*K*sW
V = la.solve_triangular(L, K*np.tile(sW, (n, 1)).T, trans=1, check_finite=False, overwrite_b=True)
Sigma = K - V.T.dot(V)
alpha = tnu-sW * self.solveChol(L, sW*(K.dot(tnu)+m))
mu = K.dot(alpha) + m
v = np.diag(Sigma)
tau_n = 1.0/np.diag(Sigma) - ttau #compute the log marginal likelihood
nu_n = mu/np.diag(Sigma) - tnu #vectors of cavity parameters
lZ = likFuncMulti(y, nu_n/tau_n, 1.0/tau_n, likHyp)
p = tnu - m*ttau #auxiliary vectors
q = nu_n - m*tau_n #auxiliary vectors
nlZ = (np.sum(np.log(np.diag(L))) - lZ.sum() - (p.T.dot(Sigma)).dot(p/2.0) + (v.T.dot(p**2))/2.0
- q.T.dot((ttau/tau_n*q - 2*p) * v)/2.0 - np.sum(np.log(1+ttau/tau_n))/2.0)
return (Sigma, mu, L, alpha, nlZ)
def epComputeParams(self, K, y, ttau, tnu, sig2e, m, llFunc):
n = y.shape[0]
sW = np.sqrt(ttau) #compute Sigma and mu
L = la.cholesky(np.eye(n) + np.outer(sW, sW) * K, overwrite_a=True, check_finite=False)
#L.T*L=B=eye(n)+sW*K*sW
V = la.solve_triangular(L, K*np.tile(sW, (n, 1)).T, trans=1, check_finite=False, overwrite_b=True)
Sigma = K - V.T.dot(V)
alpha = tnu-sW * self.solveChol(L, sW*(K.dot(tnu)+m))
mu = K.dot(alpha) + m
v = np.diag(Sigma)
tau_n = 1.0/np.diag(Sigma) - ttau #compute the log marginal likelihood
nu_n = mu/np.diag(Sigma) - tnu #vectors of cavity parameters
mu_temp = nu_n/tau_n
s2 = 1.0/tau_n
lZ = llFunc(y, mu_temp, s2, sig2e)
p = tnu - m*ttau #auxiliary vectors
q = nu_n - m*tau_n #auxiliary vectors
nlZ = (np.sum(np.log(np.diag(L))) - np.sum(lZ) - (p.T.dot(Sigma)).dot(p/2.0) + (v.T.dot(p**2))/2.0
- q.T.dot((ttau/tau_n*q - 2*p) * v)/2.0 - np.sum(np.log(1+ttau/tau_n))/2.0)
return (Sigma, mu, L, alpha, nlZ)
def solveChol(self, L, B, overwrite_b=True):
cholSolve1 = la.solve_triangular(L, B, trans=1, check_finite=False, overwrite_b=overwrite_b)
cholSolve2 = la.solve_triangular(L, cholSolve1, check_finite=False, overwrite_b=True)
return cholSolve2
def pairwise_ml(self):
K = self.kernel.getTrainKernel(np.array([0]))
yBinary = (self.y>0).astype(np.int)
t = stats.norm(0,1).isf(self.prev)
#estimate initial fixed effects
C = self.covars
if C.shape[1] > 0 and self.useFixed:
logreg = sklearn.linear_model.LogisticRegression(penalty='l2', C=1000, fit_intercept=True)
s0 = self.prev / (1-self.prev) * (1-np.mean(yBinary>0)) / np.mean(yBinary>0)
logreg.fit(C, yBinary)
Pi = logreg.predict_proba(C)[:,1]
Ki = Pi * s0 / (1 - Pi*(1-s0))
t = stats.norm(0,1).isf(Ki)
phit = stats.norm(0,1).pdf(t)
ysum_temp = np.tile(yBinary, (yBinary.shape[0], 1))
sumY = ysum_temp + ysum_temp.T
#sumY_flat = sumY[np.triu_indices(K.shape[0], 1)]
Y0 = (sumY==0)
Y1 = (sumY==1)
Y2 = (sumY==2)
P = np.mean(yBinary)
denom = (self.prev**2 * (1-self.prev)**2)
coef0 = phit**2 * P * (1-P)**2 * (2*self.prev-P) / denom
coef1 = -(phit**2 * 2 * P * (1-P) * (P**2 + self.prev - 2*self.prev*P)) / denom
coef2 = phit**2 * (1-P) * P**2 * (1-2*self.prev+P) / denom
intercept = Y0*(1-P)**2 + Y1*2*P*(1-P) + Y2*P**2
coef = Y0*coef0 + Y1*coef1 + Y2*coef2
coefG = coef*K
np.fill_diagonal(coefG, 0) #to ensure log(intercept + coefG*h2)=0 in diagonal
np.fill_diagonal(intercept, 1) #to ensure log(intercept + coefG*h2)=0 in diagonal
def pw_nll(h2):
ll = np.sum(np.log(intercept + coefG*h2))
if np.isnan(ll): ll=-np.inf
return -ll
optObj = optimize.minimize_scalar(pw_nll, bounds=(0, 1), method='bounded')
best_h2 = optObj.x
return best_h2, optObj.fun
def pcgc(self, rbf_hyp=None):
t = stats.norm(0,1).isf(self.prev)
if rbf_hyp is None:
K = self.kernel.getTrainKernel(np.array([0]))
else:
K = self.kernel.getTrainKernel(np.array([rbf_hyp, 0]))
y = self.y.copy()
y[y>0] = 1
y[y<=0] = 0
C = self.covars
if rbf_hyp is None and (C.shape[1] == 0 or not self.useFixed) and False:
P = np.sum(y>0) / float(y.shape[0])
phit = stats.norm(0,1).pdf(t)
xCoeff = P*(1-P) / (self.prev**2 * (1-self.prev)**2) * phit**2
yBinary = (y>0).astype(np.int)
yy = np.outer((yBinary-P) / np.sqrt(P*(1-P)), (yBinary-P) / np.sqrt(P*(1-P)))
xx = xCoeff * K
yy = yy[np.triu_indices(yy.shape[0], 1)]
xx = xx[ | np.triu_indices(xx.shape[0], 1) | numpy.triu_indices |
"""Test the validation module"""
import os
import re
import sys
import tempfile
import warnings
from functools import partial
from time import sleep
import pytest
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix
from sklearn.exceptions import FitFailedWarning
from sklearn.model_selection.tests.test_search import FailingClassifier
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_allclose
from sklearn.utils._mocking import CheckingClassifier, MockDataFrame
from sklearn.utils.validation import _num_samples
from sklearn.model_selection import cross_val_score, ShuffleSplit
from sklearn.model_selection import cross_val_predict
from sklearn.model_selection import cross_validate
from sklearn.model_selection import permutation_test_score
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import LeaveOneOut
from sklearn.model_selection import LeaveOneGroupOut
from sklearn.model_selection import LeavePGroupsOut
from sklearn.model_selection import GroupKFold
from sklearn.model_selection import GroupShuffleSplit
from sklearn.model_selection import learning_curve
from sklearn.model_selection import validation_curve
from sklearn.model_selection._validation import _check_is_permutation
from sklearn.model_selection._validation import _fit_and_score
from sklearn.model_selection._validation import _score
from sklearn.datasets import make_regression
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_iris
from sklearn.datasets import load_digits
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import precision_score
from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_error
from sklearn.metrics import check_scoring
from sklearn.linear_model import Ridge, LogisticRegression, SGDClassifier
from sklearn.linear_model import PassiveAggressiveClassifier, RidgeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC, LinearSVC
from sklearn.cluster import KMeans
from sklearn.neural_network import MLPRegressor
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import LabelEncoder
from sklearn.pipeline import Pipeline
from io import StringIO
from sklearn.base import BaseEstimator
from sklearn.base import clone
from sklearn.multiclass import OneVsRestClassifier
from sklearn.utils import shuffle
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.model_selection.tests.common import OneTimeSplitter
from sklearn.model_selection import GridSearchCV
try:
WindowsError
except NameError:
WindowsError = None
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2.0 - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes, expected_fit_params=None):
super().__init__(n_max_train_sizes)
self.x = None
self.expected_fit_params = expected_fit_params
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
if self.expected_fit_params:
missing = set(self.expected_fit_params) - set(params)
if missing:
raise AssertionError(
f"Expected fit parameter(s) {list(missing)} not seen."
)
for key, value in params.items():
if key in self.expected_fit_params and _num_samples(
value
) != _num_samples(X):
raise AssertionError(
f"Fit parameter {key} has length {_num_samples(value)}"
f"; expected {_num_samples(X)}."
)
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
class MockEstimatorWithSingleFitCallAllowed(MockEstimatorWithParameter):
"""Dummy classifier that disallows repeated calls of fit method"""
def fit(self, X_subset, y_subset):
assert not hasattr(self, "fit_called_"), "fit is called the second time"
self.fit_called_ = True
return super().fit(X_subset, y_subset)
def predict(self, X):
raise NotImplementedError
class MockClassifier:
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(
self,
X,
Y=None,
sample_weight=None,
class_prior=None,
sparse_sample_weight=None,
sparse_param=None,
dummy_int=None,
dummy_str=None,
dummy_obj=None,
callback=None,
):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError("X cannot be d")
if sample_weight is not None:
assert sample_weight.shape[0] == X.shape[0], (
"MockClassifier extra fit_param "
"sample_weight.shape[0] is {0}, should be {1}".format(
sample_weight.shape[0], X.shape[0]
)
)
if class_prior is not None:
assert class_prior.shape[0] == len(np.unique(y)), (
"MockClassifier extra fit_param class_prior.shape[0]"
" is {0}, should be {1}".format(class_prior.shape[0], len(np.unique(y)))
)
if sparse_sample_weight is not None:
fmt = (
"MockClassifier extra fit_param sparse_sample_weight"
".shape[0] is {0}, should be {1}"
)
assert sparse_sample_weight.shape[0] == X.shape[0], fmt.format(
sparse_sample_weight.shape[0], X.shape[0]
)
if sparse_param is not None:
fmt = (
"MockClassifier extra fit_param sparse_param.shape "
"is ({0}, {1}), should be ({2}, {3})"
)
assert sparse_param.shape == P_sparse.shape, fmt.format(
sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0],
P_sparse.shape[1],
)
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def predict_proba(self, T):
return T
def score(self, X=None, Y=None):
return 1.0 / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {"a": self.a, "allow_nd": self.allow_nd}
# XXX: use 2D array, since 1D X is being detected as a single sample in
# check_consistent_length
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
y = np.array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4])
# The number of samples per class needs to be > n_splits,
# for StratifiedKFold(n_splits=3)
y2 = np.array([1, 1, 1, 2, 2, 2, 3, 3, 3, 3])
P_sparse = coo_matrix(np.eye(5))
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cross_val_score(clf, X, y2)
assert_array_equal(scores, clf.score(X, y2))
# test with multioutput y
multioutput_y = np.column_stack([y2, y2[::-1]])
scores = cross_val_score(clf, X_sparse, multioutput_y)
assert_array_equal(scores, clf.score(X_sparse, multioutput_y))
scores = cross_val_score(clf, X_sparse, y2)
assert_array_equal(scores, clf.score(X_sparse, y2))
# test with multioutput y
scores = cross_val_score(clf, X_sparse, multioutput_y)
assert_array_equal(scores, clf.score(X_sparse, multioutput_y))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cross_val_score(clf, X.tolist(), y2.tolist(), cv=3)
clf = CheckingClassifier(check_y=list_check)
scores = cross_val_score(clf, X, y2.tolist(), cv=3)
with pytest.raises(ValueError):
cross_val_score(clf, X, y2, scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cross_val_score(clf, X_3d, y2)
clf = MockClassifier(allow_nd=False)
with pytest.raises(ValueError):
cross_val_score(clf, X_3d, y2, error_score="raise")
def test_cross_validate_many_jobs():
# regression test for #12154: cv='warn' with n_jobs>1 trigger a copy of
# the parameters leading to a failure in check_cv due to cv is 'warn'
# instead of cv == 'warn'.
X, y = load_iris(return_X_y=True)
clf = SVC(gamma="auto")
grid = GridSearchCV(clf, param_grid={"C": [1, 10]})
cross_validate(grid, X, y, n_jobs=2)
def test_cross_validate_invalid_scoring_param():
X, y = make_classification(random_state=0)
estimator = MockClassifier()
# Test the errors
error_message_regexp = ".*must be unique strings.*"
# List/tuple of callables should raise a message advising users to use
# dict of names to callables mapping
with pytest.raises(ValueError, match=error_message_regexp):
cross_validate(
estimator,
X,
y,
scoring=(make_scorer(precision_score), make_scorer(accuracy_score)),
)
with pytest.raises(ValueError, match=error_message_regexp):
cross_validate(estimator, X, y, scoring=(make_scorer(precision_score),))
# So should empty lists/tuples
with pytest.raises(ValueError, match=error_message_regexp + "Empty list.*"):
cross_validate(estimator, X, y, scoring=())
# So should duplicated entries
with pytest.raises(ValueError, match=error_message_regexp + "Duplicate.*"):
cross_validate(estimator, X, y, scoring=("f1_micro", "f1_micro"))
# Nested Lists should raise a generic error message
with pytest.raises(ValueError, match=error_message_regexp):
cross_validate(estimator, X, y, scoring=[[make_scorer(precision_score)]])
error_message_regexp = (
".*scoring is invalid.*Refer to the scoring glossary for details:.*"
)
# Empty dict should raise invalid scoring error
with pytest.raises(ValueError, match="An empty dict"):
cross_validate(estimator, X, y, scoring=(dict()))
# And so should any other invalid entry
with pytest.raises(ValueError, match=error_message_regexp):
cross_validate(estimator, X, y, scoring=5)
multiclass_scorer = make_scorer(precision_recall_fscore_support)
# Multiclass Scorers that return multiple values are not supported yet
# the warning message we're expecting to see
warning_message = (
"Scoring failed. The score on this train-test "
f"partition for these parameters will be set to {np.nan}. "
"Details: \n"
)
with pytest.warns(UserWarning, match=warning_message):
cross_validate(estimator, X, y, scoring=multiclass_scorer)
with pytest.warns(UserWarning, match=warning_message):
cross_validate(estimator, X, y, scoring={"foo": multiclass_scorer})
with pytest.raises(ValueError, match="'mse' is not a valid scoring value."):
cross_validate(SVC(), X, y, scoring="mse")
def test_cross_validate_nested_estimator():
# Non-regression test to ensure that nested
# estimators are properly returned in a list
# https://github.com/scikit-learn/scikit-learn/pull/17745
(X, y) = load_iris(return_X_y=True)
pipeline = Pipeline(
[
("imputer", SimpleImputer()),
("classifier", MockClassifier()),
]
)
results = cross_validate(pipeline, X, y, return_estimator=True)
estimators = results["estimator"]
assert isinstance(estimators, list)
assert all(isinstance(estimator, Pipeline) for estimator in estimators)
def test_cross_validate():
# Compute train and test mse/r2 scores
cv = KFold()
# Regression
X_reg, y_reg = make_regression(n_samples=30, random_state=0)
reg = Ridge(random_state=0)
# Classification
X_clf, y_clf = make_classification(n_samples=30, random_state=0)
clf = SVC(kernel="linear", random_state=0)
for X, y, est in ((X_reg, y_reg, reg), (X_clf, y_clf, clf)):
# It's okay to evaluate regression metrics on classification too
mse_scorer = check_scoring(est, scoring="neg_mean_squared_error")
r2_scorer = check_scoring(est, scoring="r2")
train_mse_scores = []
test_mse_scores = []
train_r2_scores = []
test_r2_scores = []
fitted_estimators = []
for train, test in cv.split(X, y):
est = clone(reg).fit(X[train], y[train])
train_mse_scores.append(mse_scorer(est, X[train], y[train]))
train_r2_scores.append(r2_scorer(est, X[train], y[train]))
test_mse_scores.append(mse_scorer(est, X[test], y[test]))
test_r2_scores.append(r2_scorer(est, X[test], y[test]))
fitted_estimators.append(est)
train_mse_scores = np.array(train_mse_scores)
test_mse_scores = np.array(test_mse_scores)
train_r2_scores = | np.array(train_r2_scores) | numpy.array |
"""
setinit:
this routine creates local directories and makes topo, qinit, and aux DEMs
to be used by setrun.py
If you have other files, modify this and/or your setrun.py accordingly.
"""
import numpy as np
import dclaw.topotools as gt
import os
#import pylab
#import pdb
cdir = os.path.abspath(os.environ['PWD'])
#---create local directories for data if they do not exist----------
indatadir=os.path.join(cdir,'init_data')
topodir = os.path.join(cdir,indatadir,'topo')
auxdir = os.path.join(cdir,indatadir,'aux')
qinitdir = os.path.join(cdir,indatadir,'qinit')
if not os.path.isdir(indatadir):
execstr = 'mkdir '+indatadir
os.system(execstr)
if not os.path.isdir(topodir):
execstr = 'mkdir '+topodir
os.system(execstr)
if not os.path.isdir(auxdir):
execstr = 'mkdir '+auxdir
os.system(execstr)
if not os.path.isdir(qinitdir):
execstr = 'mkdir '+qinitdir
os.system(execstr)
#------------------------------------------------------------------------
#---------------- functions for flume geometry to build DEMs ------------
def zero(X,Y):
yind1 = np.where((Y[:,0]>=-0.5)&(Y[:,0]<=0.0))[0]
yind2 = np.where((Y[:,0]>=2.0)&(Y[:,0]<=2.5))[0]
xind = np.where((X[0,:]>=-15.0)&(X[0,:]<=90.0))[0]
Z = np.zeros(np.shape(X))
return Z
def wallzero(X,Y):
yind1 = np.where((Y[:,0]>=-0.5)&(Y[:,0]<=0.0))[0]
yind2 = np.where((Y[:,0]>=2.0)&(Y[:,0]<=2.5))[0]
xind = np.where((X[0,:]>=-15.0)&(X[0,:]<=82.5))[0]
xhopperind = np.where((X[0,:]>=-15.0)&(X[0,:]<=0.0))[0]
Z = np.zeros(np.shape(X))
Z[np.ix_(yind1,xind)] = 1.6
Z[np.ix_(yind2,xind)] = 1.6
Z[np.ix_(yind1,xhopperind)] = 2.5
Z[np.ix_(yind2,xhopperind)] = 2.5
return Z
def zero_backstop(X,Y):
yind1 = np.where((Y[:,0]>=-0.5)&(Y[:,0]<=0.0))[0]
yind2 = np.where((Y[:,0]>=2.0)&(Y[:,0]<=2.5))[0]
xind = np.where((X[0,:]>=-15.0)&(X[0,:]<=90.0))[0]
xbackstopind = np.where(X[0,:]<=-4.0)[0]
ybackstopind = np.where((Y[:,0]>=-0.5)&(Y[:,0]<=2.5))[0]
Z = np.zeros(np.shape(X))
Z[np.ix_(ybackstopind,xbackstopind)] = 2.5
return Z
def wallzero_backstop(X,Y):
yind1 = np.where((Y[:,0]>=-0.5)&(Y[:,0]<=0.0))[0]
yind2 = np.where((Y[:,0]>=2.0)&(Y[:,0]<=2.5))[0]
xind = np.where((X[0,:]>=-15.0)&(X[0,:]<=82.5))[0]
xhopperind = np.where((X[0,:]>=-15.0)&(X[0,:]<=0.0))[0]
Z = np.zeros(np.shape(X))
xbackstopind = np.where(X[0,:]<=-4.0)[0]
ybackstopind = np.where((Y[:,0]>=-0.5)&(Y[:,0]<=2.5))[0]
Z[np.ix_(yind1,xind)] = 1.6
Z[np.ix_(yind2,xind)] = 1.6
Z[np.ix_(yind1,xhopperind)] = 2.5
Z[np.ix_(yind2,xhopperind)] = 2.5
Z[np.ix_(ybackstopind,xbackstopind)] = 2.5
return Z
def flume_eta(X,Y):
hopperlen = 4.7
hmax = 1.9
hoppertop = 3.3
topangle = 17.0*np.pi/180.0
flumeangle = 31.0*np.pi/180.0
x0 = -hopperlen
x2 = -hmax*np.cos(0.5*np.pi - flumeangle)
x1 = x2 - hoppertop*np.cos(flumeangle-topangle)
x3 = 0.0
y2 = hmax*np.sin(0.5*np.pi - flumeangle)
y1 = y2 - hoppertop*np.sin(flumeangle-topangle)
slope0 = y1/(x1-x0)
slope1 = (y2-y1)/(x2-x1)
slope2 = -y2/(x3-x2)
yind = np.where((Y[:,0]<=2.0)&(Y[:,0]>=0.0))[0]
x0ind = np.where((X[0,:]>=x0)&(X[0,:]<x1))[0]
x1ind = np.where((X[0,:]>=x1)&(X[0,:]<x2))[0]
x2ind = np.where((X[0,:]>=x2)&(X[0,:]<x3))[0]
#pdb.set_trace()
Z=np.zeros(np.shape(X))
Z[np.ix_(yind,x0ind)] = (X[np.ix_(yind,x0ind)]-x0)*slope0
Z[np.ix_(yind,x1ind)] = y1+(X[np.ix_(yind,x1ind)]-x1)*slope1
Z[np.ix_(yind,x2ind)] = -(x3-X[np.ix_(yind,x2ind)])*slope2
return Z
def flume_eta_res(X,Y):
hopperlen = 4.7
hmax = 1.9
hoppertop = 3.3
topangle = 17.0*np.pi/180.0
flumeangle = 31.0*np.pi/180.0
x0 = -hopperlen
x2 = -hmax*np.cos(0.5*np.pi - flumeangle)
x1 = x2 - hoppertop*np.cos(flumeangle-topangle)
x3 = 0.0
y2 = hmax*np.sin(0.5*np.pi - flumeangle)
y1 = y2 - hoppertop*np.sin(flumeangle-topangle)
xm1 = x1 - y1*np.tan(0.5*np.pi - flumeangle)
slope0 = y1/(x1-xm1)
slope1 = (y2-y1)/(x2-x1)
slope2 = -y2/(x3-x2)
yind = np.where((Y[:,0]<=2.0)&(Y[:,0]>=0.0))[0]
xm1ind = np.where((X[0,:]>=xm1)&(X[0,:]<x1))[0]
x1ind = np.where((X[0,:]>=x1)&(X[0,:]<x2))[0]
x2ind = np.where((X[0,:]>=x2)&(X[0,:]<x3))[0]
#pdb.set_trace()
Z=np.zeros(np.shape(X))
Z[np.ix_(yind,xm1ind)] = (X[ | np.ix_(yind,xm1ind) | numpy.ix_ |
# coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
from __future__ import division, print_function
import ast
from copy import copy
from collections import OrderedDict
from math import cos, sin
import numpy as np
from six import string_types
import warnings
from ase.geometry import cellpar_to_cell, complete_cell, get_distances
from matplotlib.colors import rgb2hex
from scipy.interpolate import interp1d
from pyiron.atomistics.structure.atom import Atom
from pyiron.atomistics.structure.sparse_list import SparseArray, SparseList
from pyiron.atomistics.structure.periodic_table import PeriodicTable, ChemicalElement, ElementColorDictionary
from pyiron.base.settings.generic import Settings
from scipy.spatial import cKDTree, Voronoi
try:
import spglib
except ImportError:
try:
import pyspglib as spglib
except ImportError:
raise ImportError("The spglib package needs to be installed")
__author__ = "<NAME>, <NAME>"
__copyright__ = "Copyright 2019, Max-Planck-Institut für Eisenforschung GmbH - " \
"Computational Materials Design (CM) Department"
__version__ = "1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "production"
__date__ = "Sep 1, 2017"
s = Settings()
class Atoms(object):
"""
The Atoms class represents all the information required to describe a structure at the atomic scale. This class is
written in such a way that is compatible with the `ASE atoms class`_. Some of the functions in this module is based
on the corresponding implementation in the ASE package
Args:
elements (list/numpy.ndarray): List of strings containing the elements or a list of
atomistics.structure.periodic_table.ChemicalElement instances
numbers (list/numpy.ndarray): List of atomic numbers of elements
symbols (list/numpy.ndarray): List of chemical symbols
positions (list/numpy.ndarray): List of positions
scaled_positions (list/numpy.ndarray): List of scaled positions (relative coordinates)
pbc (list/numpy.ndarray/boolean): Tells if periodic boundary conditions should be applied on the three axes
cell (list/numpy.ndarray instance): A 3x3 array representing the lattice vectors of the structure
Note: Only one of elements/symbols or numbers should be assigned during initialization
Attributes:
indices (numpy.ndarray): A list of size N which gives the species index of the structure which has N atoms
.. _ASE atoms class: https://wiki.fysik.dtu.dk/ase/ase/atoms.html
"""
def __init__(self, symbols=None, positions=None, numbers=None, tags=None, momenta=None, masses=None,
magmoms=None, charges=None, scaled_positions=None, cell=None, pbc=None, celldisp=None, constraint=None,
calculator=None, info=None, indices=None, elements=None, dimension=None, species=None,
**qwargs):
if symbols is not None:
if elements is None:
elements = symbols
else:
raise ValueError("Only elements OR symbols should be given.")
if tags is not None or momenta is not None or masses is not None or charges is not None \
or celldisp is not None or constraint is not None or calculator is not None or info is not None:
s.logger.debug('Not supported parameter used!')
self._store_elements = dict()
self._species_to_index_dict = None
self.colorLut = ElementColorDictionary().to_lut()
self._is_scaled = False
if cell is not None:
# make it ASE compatible
if np.linalg.matrix_rank(cell) == 1:
cell = np.eye(len(cell)) * cell
else:
cell = np.array(cell)
self._cell = cell
self._species = list()
self.positions= None
self._pse = PeriodicTable()
self._tag_list = SparseArray()
self.indices = np.array([])
self._info = dict()
self.arrays = dict()
self.adsorbate_info = {}
self.bonds = None
self._pbc = False
self.dimension = 3 # Default
self.units = {"length": "A", "mass": "u"}
el_index_lst = list()
element_list = None
if (elements is None) and (numbers is None) and (indices is None):
return
if numbers is not None: # for ASE compatibility
if not (elements is None):
raise AssertionError()
elements = self.numbers_to_elements(numbers)
if elements is not None:
el_object_list = None
if isinstance(elements, str):
element_list = self.convert_formula(elements)
elif isinstance(elements, (list, tuple, np.ndarray)):
if not all([isinstance(el, elements[0].__class__) for el in elements]):
object_list = list()
for el in elements:
if isinstance(el, (str, np.str, np.str_)):
object_list.append(self.convert_element(el))
if isinstance(el, ChemicalElement):
object_list.append(el)
if isinstance(el, Atom):
object_list.append(el.element)
if isinstance(el, (int, np.integer)):
# pse = PeriodicTable()
object_list.append(self._pse.element(el))
el_object_list = object_list
if len(elements) == 0:
element_list = elements
else:
if isinstance(elements[0], (list, tuple, np.ndarray)):
elements = np.array(elements).flatten()
if isinstance(elements[0], string_types):
element_list = elements
elif isinstance(elements[0], ChemicalElement):
el_object_list = elements
elif isinstance(elements[0], Atom):
el_object_list = [el.element for el in elements]
positions = [el.position for el in elements]
elif elements.dtype in [int, np.integer]:
el_object_list = self.numbers_to_elements(elements)
else:
raise ValueError('Unknown static type for element in list: ' + str(type(elements[0])))
if el_object_list is None:
el_object_list = [self.convert_element(el) for el in element_list]
self.set_species(list(set(el_object_list)))
# species_to_index_dict = {el: i for i, el in enumerate(self.species)}
el_index_lst = [self._species_to_index_dict[el] for el in el_object_list]
elif indices is not None:
el_index_lst = indices
self.set_species(species)
if scaled_positions is not None:
if positions is not None:
raise ValueError("either position or scaled_positions can be given")
if cell is None:
raise ValueError('scaled_positions can only be used with a given cell')
positions = np.dot(np.array(cell).T, np.array(scaled_positions).T).T
if positions is None:
self.dimension = 3
if cell is not None:
positions = np.zeros((len(el_index_lst), self.dimension))
self.indices = np.array(el_index_lst)
self.positions = np.array(positions).astype(np.float)
self._tag_list._length = len(positions)
for key, val in qwargs.items():
print('set qwargs (ASE): ', key, val)
setattr(self, key, val)
if len(positions) > 0:
self.dimension = len(positions[0])
else:
self.dimension = 3
if dimension is not None:
self.dimension = dimension
if cell is not None:
if pbc is None:
self.pbc = True # default setting
else:
self.pbc = pbc
self.set_initial_magnetic_moments(magmoms)
@property
def cell(self):
"""
numpy.ndarray: A size 3x3 array which gives the lattice vectors of the cell as [a1, a2, a3]
"""
return self._cell
@cell.setter
def cell(self, value):
if value is None:
self._cell = None
else:
if self._is_scaled:
self.set_cell(value, scale_atoms=True)
else:
self.set_cell(value)
@property
def species(self):
"""
list: A list of atomistics.structure.periodic_table.ChemicalElement instances
"""
return self._species
# @species.setter
def set_species(self, value):
"""
Setting the species list
Args:
value (list): A list atomistics.structure.periodic_table.ChemicalElement instances
"""
if value is None:
return
value = list(value)
self._species_to_index_dict = {el: i for i, el in enumerate(value)}
self._species = value[:]
self._store_elements = {el.Abbreviation: el for el in value}
@property
def info(self):
"""
dict: This dictionary is merely used to be compatible with the ASE Atoms class.
"""
return self._info
@info.setter
def info(self, val):
self._info = val
@property
def pbc(self):
"""
list: A list of boolean values which gives the periodic boundary consitions along the three axes.
The default value is [True, True, True]
"""
if not isinstance(self._pbc, np.ndarray):
self.set_pbc(self._pbc)
return self._pbc
@pbc.setter
def pbc(self, val):
self._pbc = val
@property
def elements(self):
"""
numpy.ndarray: A size N list of atomistics.structure.periodic_table.ChemicalElement instances according
to the ordering of the atoms in the instance
"""
return np.array([self.species[el] for el in self.indices])
def new_array(self, name, a, dtype=None, shape=None):
"""
Adding a new array to the instance. This function is for the purpose of compatibility with the ASE package
Args:
name (str): Name of the array
a (list/numpy.ndarray): The array to be added
dtype (type): Data type of the array
shape (list/turple): Shape of the array
"""
if dtype is not None:
a = np.array(a, dtype, order='C')
if len(a) == 0 and shape is not None:
a.shape = (-1,) + shape
else:
if not a.flags['C_CONTIGUOUS']:
a = np.ascontiguousarray(a)
else:
a = a.copy()
if name in self.arrays:
raise RuntimeError
for b in self.arrays.values():
if len(a) != len(b):
raise ValueError('Array has wrong length: %d != %d.' %
(len(a), len(b)))
break
if shape is not None and a.shape[1:] != shape:
raise ValueError('Array has wrong shape %s != %s.' %
(a.shape, (a.shape[0:1] + shape)))
self.arrays[name] = a
def get_array(self, name, copy=True):
"""
Get an array. This function is for the purpose of compatibility with the ASE package
Args:
name (str): Name of the required array
copy (bool): True if a copy of the array is to be returned
Returns:
An array of a copy of the array
"""
if copy:
return self.arrays[name].copy()
else:
return self.arrays[name]
def set_array(self, name, a, dtype=None, shape=None):
"""
Update array. This function is for the purpose of compatibility with the ASE package
Args:
name (str): Name of the array
a (list/numpy.ndarray): The array to be added
dtype (type): Data type of the array
shape (list/turple): Shape of the array
"""
b = self.arrays.get(name)
if b is None:
if a is not None:
self.new_array(name, a, dtype, shape)
else:
if a is None:
del self.arrays[name]
else:
a = np.asarray(a)
if a.shape != b.shape:
raise ValueError('Array has wrong shape %s != %s.' %
(a.shape, b.shape))
b[:] = a
def add_tag(self, *args, **qwargs):
"""
Add tags to the atoms object.
Examples:
For selective dynamics::
>>> self.add_tag(selective_dynamics=[False, False, False])
"""
self._tag_list.add_tag(*args, **qwargs)
# @staticmethod
def numbers_to_elements(self, numbers):
"""
Convert atomic numbers in element objects (needed for compatibility with ASE)
Args:
numbers (list): List of Element Numbers (as Integers; default in ASE)
Returns:
list: A list of elements as needed for pyiron
"""
# pse = PeriodicTable() # TODO; extend to internal PSE which can contain additional elements and tags
atom_number_to_element = {}
for i_el in set(numbers):
i_el = int(i_el)
atom_number_to_element[i_el] = self._pse.element(i_el)
return [atom_number_to_element[i_el] for i_el in numbers]
def copy(self):
"""
Returns a copy of the instance
Returns:
pyiron.atomistics.structure.atoms.Atoms: A copy of the instance
"""
return self.__copy__()
def to_hdf(self, hdf, group_name="structure"):
"""
Save the object in a HDF5 file
Args:
hdf (pyiron.base.generic.hdfio.FileHDFio): HDF path to which the object is to be saved
group_name (str):
Group name with which the object should be stored. This same name should be used to retrieve the object
"""
# import time
with hdf.open(group_name) as hdf_structure:
# time_start = time.time()
hdf_structure["TYPE"] = str(type(self))
for el in self.species:
if isinstance(el.tags, dict):
with hdf_structure.open("new_species") as hdf_species:
el.to_hdf(hdf_species)
hdf_structure['species'] = [el.Abbreviation for el in self.species]
hdf_structure["indices"] = self.indices
with hdf_structure.open("tags") as hdf_tags:
for tag in self._tag_list.keys():
tag_value = self._tag_list[tag]
if isinstance(tag_value, SparseList):
tag_value.to_hdf(hdf_tags, tag)
hdf_structure["units"] = self.units
hdf_structure["dimension"] = self.dimension
if self.cell is not None:
with hdf_structure.open("cell") as hdf_cell:
hdf_cell["cell"] = self.cell
hdf_cell["pbc"] = self.pbc
# hdf_structure["coordinates"] = self.positions # "Atomic coordinates"
hdf_structure["positions"] = self.positions # "Atomic coordinates"
# potentials with explicit bonds (TIP3P, harmonic, etc.)
if self.bonds is not None:
hdf_structure["explicit_bonds"] = self.bonds
# print ('time in atoms.to_hdf: ', time.time() - time_start)
def from_hdf(self, hdf, group_name="structure"):
"""
Retrieve the object from a HDF5 file
Args:
hdf (pyiron.base.generic.hdfio.FileHDFio): HDF path to which the object is to be saved
group_name (str): Group name from which the Atoms object is retreived.
Returns:
pyiron_atomistic.structure.atoms.Atoms: The retrieved atoms class
"""
if "indices" in hdf[group_name].list_nodes():
with hdf.open(group_name) as hdf_atoms:
if "new_species" in hdf_atoms.list_groups():
with hdf_atoms.open("new_species") as hdf_species:
self._pse.from_hdf(hdf_species)
el_object_list = [self.convert_element(el, self._pse) for el in hdf_atoms["species"]]
self.indices = hdf_atoms["indices"]
self._tag_list._length = len(self)
self.set_species(el_object_list)
self.bonds = None
if "explicit_bonds" in hdf_atoms.list_nodes():
# print "bonds: "
self.bonds = hdf_atoms["explicit_bonds"]
if "tags" in hdf_atoms.list_groups():
with hdf_atoms.open("tags") as hdf_tags:
tags = hdf_tags.list_nodes()
for tag in tags:
# tr_dict = {'0': False, '1': True}
if isinstance(hdf_tags[tag], (list, np.ndarray)):
my_list = hdf_tags[tag]
self._tag_list[tag] = SparseList(my_list, length=len(self))
else:
my_dict = hdf_tags.get_pandas(tag).to_dict()
my_dict = {i: val for i, val in zip(my_dict["index"], my_dict["values"])}
self._tag_list[tag] = SparseList(my_dict, length=len(self))
tr_dict = {1: True, 0: False}
self.dimension = hdf_atoms["dimension"]
self.units = hdf_atoms["units"]
self.cell = None
if "cell" in hdf_atoms.list_groups():
with hdf_atoms.open("cell") as hdf_cell:
self.cell = hdf_cell["cell"]
self.pbc = hdf_cell["pbc"]
# Backward compatibility
position_tag = "positions"
if position_tag not in hdf_atoms.list_nodes():
position_tag = "coordinates"
if "is_absolute" in hdf_atoms.list_nodes():
if not tr_dict[hdf_atoms["is_absolute"]]:
self.set_scaled_positions(hdf_atoms[position_tag])
else:
self.positions = hdf_atoms[position_tag]
else:
self.positions = hdf_atoms[position_tag]
if "bonds" in hdf_atoms.list_nodes():
self.bonds = hdf_atoms["explicit_bonds"]
return self
else:
return self._from_hdf_old(hdf, group_name)
def _from_hdf_old(self, hdf, group_name="structure"):
"""
This function exits merely for the purpose of backward compatibility
"""
with hdf.open(group_name) as hdf_atoms:
self._pse = PeriodicTable()
if "species" in hdf_atoms.list_groups():
with hdf_atoms.open("species") as hdf_species:
self._pse.from_hdf(hdf_species)
chemical_symbols = np.array(hdf_atoms["elements"], dtype=str)
el_object_list = [self.convert_element(el, self._pse) for el in chemical_symbols]
self.set_species(list(set(el_object_list)))
self.indices = [self._species_to_index_dict[el] for el in el_object_list]
self._tag_list._length = len(self)
self.bonds = None
if "explicit_bonds" in hdf_atoms.list_nodes():
# print "bonds: "
self.bonds = hdf_atoms["explicit_bonds"]
if "tags" in hdf_atoms.list_groups():
with hdf_atoms.open("tags") as hdf_tags:
tags = hdf_tags.list_nodes()
for tag in tags:
# tr_dict = {'0': False, '1': True}
if isinstance(hdf_tags[tag], (list, np.ndarray)):
my_list = hdf_tags[tag]
self._tag_list[tag] = SparseList(my_list, length=len(self))
else:
my_dict = hdf_tags.get_pandas(tag).to_dict()
my_dict = {i: val for i, val in zip(my_dict["index"], my_dict["values"])}
self._tag_list[tag] = SparseList(my_dict, length=len(self))
self.cell = None
if "cell" in hdf_atoms.list_groups():
with hdf_atoms.open("cell") as hdf_cell:
self.cell = hdf_cell["cell"]
self.pbc = hdf_cell["pbc"]
tr_dict = {1: True, 0: False}
self.dimension = hdf_atoms["dimension"]
if "is_absolute" in hdf_atoms and not tr_dict[hdf_atoms["is_absolute"]]:
self.positions = hdf_atoms["coordinates"]
else:
self.set_scaled_positions(hdf_atoms["coordinates"])
self.units = hdf_atoms["units"]
if "bonds" in hdf_atoms.list_nodes():
self.bonds = hdf_atoms["explicit_bonds"]
return self
def center(self, vacuum=None, axis=(0, 1, 2)):
"""
Center atoms in unit cell.
Adopted from ASE code (https://wiki.fysik.dtu.dk/ase/_modules/ase/atoms.html#Atoms.center)
Args:
vacuum (float): If specified adjust the amount of vacuum when centering. If vacuum=10.0 there will thus be
10 Angstrom of vacuum on each side.
axis (tuple/list): List or turple of integers specifying the axis along which the atoms should be centered
"""
# Find the orientations of the faces of the unit cell
c = self.cell
if c is None:
c = np.identity(self.dimension)
self.cell = c
dirs = np.zeros_like(c)
for i in range(3):
dirs[i] = np.cross(c[i - 1], c[i - 2])
dirs[i] /= np.linalg.norm(dirs[i]) # normalize
if np.dot(dirs[i], c[i]) < 0.0:
dirs[i] *= -1
# Now, decide how much each basis vector should be made longer
if isinstance(axis, int):
axes = (axis,)
else:
axes = axis
p = self.positions
longer = np.zeros(3)
shift = np.zeros(3)
for i in axes:
p0 = np.dot(p, dirs[i]).min()
p1 = np.dot(p, dirs[i]).max()
height = np.dot(c[i], dirs[i])
if vacuum is not None:
lng = (p1 - p0 + 2 * vacuum) - height
else:
lng = 0.0 # Do not change unit cell size!
top = lng + height - p1
shf = 0.5 * (top - p0)
cosphi = np.dot(c[i], dirs[i]) / np.linalg.norm(c[i])
longer[i] = lng / cosphi
shift[i] = shf / cosphi
# Now, do it!
translation = np.zeros(3)
for i in axes:
nowlen = np.sqrt(np.dot(c[i], c[i]))
self.cell[i] *= 1 + longer[i] / nowlen
translation += shift[i] * c[i] / nowlen
self.positions += translation
if self.pbc is None:
self.pbc = self.dimension * [True]
def set_positions(self, positions):
"""
Set positions. This function is for compatability with ASE
Args:
positions (numpy.ndarray/list): Positions in absolute coordinates
"""
self.positions = np.array(positions)
self._tag_list._length = len(self)
def get_positions(self):
"""
Get positions. This function is for compatability with ASE
Returns:
numpy.ndarray: Positions in absolute coordinates
"""
return self.positions
def select_index(self, el):
"""
Returns the indices of a given element in the structure
Args:
el (str/atomistics.structures.periodic_table.ChemicalElement/list): Element for which the indices should
be returned
Returns:
numpy.ndarray: An array of indices of the atoms of the given element
"""
if isinstance(el, str):
return np.where(self.get_chemical_symbols()==el)[0]
elif isinstance(el, ChemicalElement):
return np.where([e==el for e in self.get_chemical_elements()])[0]
if isinstance(el, (list, np.ndarray)):
if isinstance(el[0], str):
return np.where(np.isin(self.get_chemical_symbols(), el))[0]
elif isinstance(el[0], ChemicalElement):
return np.where([e in el for e in self.get_chemical_elements()])[0]
def select_parent_index(self, el):
"""
Returns the indices of a given element in the structure ignoring user defined elements
Args:
el (str/atomistics.structures.periodic_table.ChemicalElement): Element for which the indices should
be returned
Returns:
numpy.ndarray: An array of indices of the atoms of the given element
"""
parent_basis = self.get_parent_basis()
return parent_basis.select_index(el)
def get_tags(self):
"""
Returns the keys of the stored tags of the structure
Returns:
dict_keys: Keys of the stored tags
"""
return self._tag_list.keys()
def get_pbc(self):
"""
Returns a boolean array of the periodic boundary conditions along the x, y and z axis respectively
Returns:
numpy.ndarray: Boolean array of length 3
"""
if not isinstance(self._pbc, np.ndarray):
self.set_pbc(self._pbc)
return np.array(self._pbc, bool)
def set_pbc(self, value):
"""
Sets the perioic boundary conditions on all three axis
Args:
value (numpy.ndarray/list): An array of bool type with length 3
"""
if value is None:
self._pbc = None
else:
if isinstance(value, np.ndarray):
self._pbc = value
elif value in (True, False):
value = self.dimension * [value]
if not (np.shape(np.array(value)) == (self.dimension,)):
raise AssertionError()
self._pbc = np.array(value, bool)
def convert_element(self, el, pse=None):
"""
Convert a string or an atom instance into a ChemicalElement instance
Args:
el (str/atomistics.structure.atom.Atom): String or atom instance from which the element should
be generated
pse (atomistics.structure.periodictable.PeriodicTable): PeriodicTable instance from which the element
is generated (optional)
Returns:
atomistics.structure.periodictable.ChemicalElement: The required chemical element
"""
if el in list(self._store_elements.keys()):
return self._store_elements[el]
if isinstance(el, string_types): # as symbol
element = Atom(el, pse=pse).element
elif isinstance(el, Atom):
element = el.element
el = el.element.Abbreviation
elif isinstance(el, ChemicalElement):
element = el
el = el.Abbreviation
else:
raise ValueError('Unknown static type to specify a element')
self._store_elements[el] = element
if hasattr(self, 'species'):
if element not in self.species:
self._species.append(element)
self.set_species(self._species)
return element
def get_chemical_formula(self):
"""
Returns the chemical formula of structure
Returns:
str: The chemical formula as a string
"""
species = self.get_number_species_atoms()
formula = ""
for string_sym, num in species.items():
if num == 1:
formula += str(string_sym)
else:
formula += str(string_sym) + str(num)
return formula
def get_chemical_indices(self):
"""
Returns the list of chemical indices as ordered in self.species
Returns:
numpy.ndarray: A list of chemical indices
"""
return self.indices
def get_atomic_numbers(self):
"""
Returns the atomic numbers of all the atoms in the structure
Returns:
numpy.ndarray: A list of atomic numbers
"""
el_lst = [el.AtomicNumber for el in self.species]
return np.array([el_lst[el] for el in self.indices])
def get_chemical_symbols(self):
"""
Returns the chemical symbols for all the atoms in the structure
Returns:
numpy.ndarray: A list of chemical symbols
"""
el_lst = [el.Abbreviation for el in self.species]
return np.array([el_lst[el] for el in self.indices])
def get_parent_symbols(self):
"""
Returns the chemical symbols for all the atoms in the structure even for user defined elements
Returns:
numpy.ndarray: A list of chemical symbols
"""
sp_parent_list = list()
for sp in self.species:
if isinstance(sp.Parent, (float, np.float, type(None))):
sp_parent_list.append(sp.Abbreviation)
else:
sp_parent_list.append(sp.Parent)
return np.array([sp_parent_list[i] for i in self.indices])
def get_parent_basis(self):
"""
Returns the basis with all user defined/special elements as the it's parent
Returns:
pyiron.atomistics.structure.atoms.Atoms: Structure without any user defined elements
"""
parent_basis = copy(self)
new_species = np.array(parent_basis.species)
for i, sp in enumerate(new_species):
if not isinstance(sp.Parent, (float, np.float, type(None))):
pse = PeriodicTable()
new_species[i] = pse.element(sp.Parent)
sym_list = [el.Abbreviation for el in new_species]
if len(sym_list) != len(np.unique(sym_list)):
uni, ind, inv_ind = np.unique(sym_list, return_index=True, return_inverse=True)
new_species = new_species[ind].copy()
parent_basis.set_species(list(new_species))
indices_copy = parent_basis.indices.copy()
for i, ind_ind in enumerate(inv_ind):
indices_copy[parent_basis.indices == i] = ind_ind
parent_basis.indices = indices_copy
return parent_basis
parent_basis.set_species(list(new_species))
return parent_basis
def get_chemical_elements(self):
"""
Returns the list of chemical element instances
Returns:
numpy.ndarray: A list of chemical element instances
"""
return self.elements
def get_number_species_atoms(self):
"""
Returns a dictionary with the species in the structure and the corresponding count in the structure
Returns:
collections.OrderedDict: An ordered dictionary with the species and the corresponding count
"""
count = OrderedDict()
# print "sorted: ", sorted(set(self.elements))
for el in sorted(set(self.get_chemical_symbols())):
count[el] = 0
for el in self.get_chemical_symbols():
count[el] += 1
return count
def get_species_symbols(self):
"""
Returns the symbols of the present species
Returns:
numpy.ndarray: List of the symbols of the species
"""
return np.array(sorted([el.Abbreviation for el in self.species]))
def get_species_objects(self):
"""
Returns:
"""
el_set = self.species
el_sym_lst = {el.Abbreviation: i for i, el in enumerate(el_set)}
el_sorted = self.get_species_symbols()
return [el_set[el_sym_lst[el]] for el in el_sorted]
def get_number_of_species(self):
"""
Returns:
"""
return len(self.species)
def get_number_of_degrees_of_freedom(self):
"""
Returns:
"""
return len(self) * self.dimension
def get_center_of_mass(self):
"""
Returns:
com (float): center of mass in A
"""
masses = self.get_masses()
return np.einsum('i,ij->j', masses, self.positions)/np.sum(masses)
def get_masses(self):
"""
Returns:
"""
el_lst = [el.AtomicMass for el in self.species]
return [el_lst[el] for el in self.indices]
def get_masses_dof(self):
"""
Returns:
"""
dim = self.dimension
return np.repeat(self.get_masses(), dim)
def get_volume(self, per_atom=False):
"""
Args:
per_atom (bool): True if volume per atom is to be returned
Returns:
volume (float): Volume in A**3
"""
if per_atom:
return np.abs(np.linalg.det(self.cell))/len(self)
else:
return np.abs(np.linalg.det(self.cell))
def get_density(self):
"""
Returns the density in g/cm^3
Returns:
float: Density of the structure
"""
# conv_factor = Ang3_to_cm3/scipi.constants.Avogadro
# with Ang3_to_cm3 = 1e24
conv_factor = 1.660539040427164
return conv_factor * np.sum(self.get_masses()) / self.get_volume()
def get_scaled_positions(self, wrap=True):
"""
Returns:
"""
pbc = np.array(self.pbc)
positions = np.einsum('jk,ij->ik', np.linalg.inv(self.cell), self.positions)
if wrap:
positions[:, pbc] = np.mod(positions[:, pbc], 1.)
return positions
def get_number_of_atoms(self):
"""
Returns:
"""
# assert(len(self) == np.sum(self.get_number_species_atoms().values()))
return len(self)
def set_absolute(self):
if self._is_scaled:
self._is_scaled = False
def set_relative(self):
if not self._is_scaled:
self._is_scaled = True
def center_coordinates_in_unit_cell(self, origin=0, eps=1e-4):
"""
compact atomic coordinates in supercell as given by a1, a2., a3
Args:
origin: 0 to confine between 0 and 1, -0.5 to confine between -0.5 and 0.5
eps:
Returns:
"""
self.set_scaled_positions(np.mod(self.get_scaled_positions(wrap=False) + eps, 1) - eps + origin)
return self
def repeat(self, rep):
"""Create new repeated atoms object.
The *rep* argument should be a sequence of three positive
integers like *(2,3,1)* or a single integer (*r*) equivalent
to *(r,r,r)*."""
atoms = self.copy()
atoms *= rep
return atoms
def set_repeat(self, vec):
self *= vec
def reset_absolute(self, is_absolute):
raise NotImplementedError('This function was removed!')
def analyse_ovito_cna_adaptive(self, mode='total'):
from pyiron.atomistics.structure.ovito import analyse_ovito_cna_adaptive
warnings.filterwarnings("ignore")
return analyse_ovito_cna_adaptive(atoms=self, mode=mode)
def analyse_ovito_centro_symmetry(atoms, num_neighbors=12):
from pyiron.atomistics.structure.ovito import analyse_ovito_centro_symmetry
warnings.filterwarnings("ignore")
return analyse_ovito_centro_symmetry(atoms, num_neighbors=num_neighbors)
def analyse_ovito_voronoi_volume(atoms):
from pyiron.atomistics.structure.ovito import analyse_ovito_voronoi_volume
warnings.filterwarnings("module")
return analyse_ovito_voronoi_volume(atoms)
def analyse_phonopy_equivalent_atoms(atoms):
from pyiron.atomistics.structure.phonopy import analyse_phonopy_equivalent_atoms
#warnings.filterwarnings("ignore")
warnings.warn("analyse_phonopy_equivalent_atoms() is obsolete use get_symmetry()['equivalent_atoms'] instead")
return analyse_phonopy_equivalent_atoms(atoms)
@staticmethod
def _ngl_write_cell(a1, a2, a3, f1=90, f2=90, f3=90):
"""
Writes a PDB-formatted line to represent the simulation cell.
Args:
a1, a2, a3 (float): Lengths of the cell vectors.
f1, f2, f3 (float): Angles between the cell vectors (which angles exactly?) (in degrees).
Returns:
(str): The line defining the cell in PDB format.
"""
return 'CRYST1 {:8.3f} {:8.3f} {:8.3f} {:6.2f} {:6.2f} {:6.2f} P 1\n'.format(a1, a2, a3, f1, f2, f3)
@staticmethod
def _ngl_write_atom(num, species, x, y, z, group=None, num2=None, occupancy=1., temperature_factor=0.):
"""
Writes a PDB-formatted line to represent an atom.
Args:
num (int): Atomic index.
species (str): Elemental species.
x, y, z (float): Cartesian coordinates of the atom.
group (str): A...group name? (Default is None, repeat elemental species.)
num2 (int): An "alternate" index. (Don't ask me...) (Default is None, repeat first number.)
occupancy (float): PDB occupancy parameter. (Default is 1.)
temperature_factor (float): PDB temperature factor parameter. (Default is 0.
Returns:
(str): The line defining an atom in PDB format
Warnings:
* The [PDB docs](https://www.cgl.ucsf.edu/chimera/docs/UsersGuide/tutorials/pdbintro.html) indicate that
the xyz coordinates might need to be in some sort of orthogonal basis. If you have weird behaviour,
this might be a good place to investigate.
"""
if group is None:
group = species
if num2 is None:
num2 = num
return 'ATOM {:>6} {:>4} {:>4} {:>5} {:10.3f} {:7.3f} {:7.3f} {:5.2f} {:5.2f} {:>11} \n'.format(
num, species, group, num2, x, y, z, occupancy, temperature_factor, species)
def _ngl_write_structure(self, elements, positions, cell):
"""
Turns structure information into a NGLView-readable protein-database-formatted string.
Args:
elements (numpy.ndarray/list): Element symbol for each atom.
positions (numpy.ndarray/list): Vector of Cartesian atom positions.
cell (numpy.ndarray/list): Simulation cell Bravais matrix.
Returns:
(str): The PDB-formatted representation of the structure.
"""
from ase.geometry import cell_to_cellpar, cellpar_to_cell
cellpar = cell_to_cellpar(cell)
exportedcell = cellpar_to_cell(cellpar)
rotation = np.linalg.solve(cell, exportedcell)
pdb_str = self._ngl_write_cell(*cellpar)
pdb_str += 'MODEL 1\n'
if rotation is not None:
positions = np.array(positions).dot(rotation)
for i, p in enumerate(positions):
pdb_str += self._ngl_write_atom(i, elements[i], *p)
pdb_str += 'ENDMDL \n'
return pdb_str
def _atomic_number_to_radius(self, atomic_number, shift=0.2, slope=0.1, scale=1.0):
"""
Give the atomic radius for plotting, which scales like the root of the atomic number.
Args:
atomic_number (int/float): The atomic number.
shift (float): A constant addition to the radius. (Default is 0.2.)
slope (float): A multiplier for the root of the atomic number. (Default is 0.1)
scale (float): How much to rescale the whole thing by.
Returns:
(float): The radius. (Not physical, just for visualization!)
"""
return (shift + slope * np.sqrt(atomic_number)) * scale
def _add_colorscheme_spacefill(self, view, elements, atomic_numbers, particle_size, scheme='element'):
"""
Set NGLView spacefill parameters according to a color-scheme.
Args:
view (NGLWidget): The widget to work on.
elements (numpy.ndarray/list): Elemental symbols.
atomic_numbers (numpy.ndarray/list): Integer atomic numbers for determining atomic size.
particle_size (float): A scale factor for the atomic size.
scheme (str): The scheme to use. (Default is "element".)
Possible NGLView color schemes:
" ", "picking", "random", "uniform", "atomindex", "residueindex",
"chainindex", "modelindex", "sstruc", "element", "resname", "bfactor",
"hydrophobicity", "value", "volume", "occupancy"
Returns:
(nglview.NGLWidget): The modified widget.
"""
for elem, num in set(list(zip(elements, atomic_numbers))):
view.add_spacefill(selection='#' + elem,
radius_type='vdw',
radius=self._atomic_number_to_radius(num, scale=particle_size),
color_scheme=scheme)
return view
def _add_custom_color_spacefill(self, view, atomic_numbers, particle_size, colors):
"""
Set NGLView spacefill parameters according to per-atom colors.
Args:
view (NGLWidget): The widget to work on.
atomic_numbers (numpy.ndarray/list): Integer atomic numbers for determining atomic size.
particle_size (float): A scale factor for the atomic size.
colors (numpy.ndarray/list): A per-atom list of HTML or hex color codes.
Returns:
(nglview.NGLWidget): The modified widget.
"""
for n, num in enumerate(atomic_numbers):
view.add_spacefill(selection=[n],
radius_type='vdw',
radius=self._atomic_number_to_radius(num, scale=particle_size),
color=colors[n])
return view
@staticmethod
def _scalars_to_hex_colors(scalar_field, start=None, end=None, cmap=None):
"""
Convert scalar values to hex codes using a colormap.
Args:
scalar_field (numpy.ndarray/list): Scalars to convert.
start (float): Scalar value to map to the bottom of the colormap (values below are clipped). (Default is
None, use the minimal scalar value.)
end (float): Scalar value to map to the top of the colormap (values above are clipped). (Default is
None, use the maximal scalar value.)
cmap (matplotlib.cm): The colormap to use. (Default is None, which gives a blue-red divergent map.)
Returns:
(list): The corresponding hex codes for each scalar value passed in.
"""
if start is None:
start = np.amin(scalar_field)
if end is None:
end = np.amax(scalar_field)
interp = interp1d([start, end], [0, 1])
remapped_field = interp(np.clip(scalar_field, start, end)) # Map field onto [0,1]
if cmap is None:
try:
from seaborn import diverging_palette
except ImportError:
print("The package seaborn needs to be installed for the plot3d() function!")
cmap = diverging_palette(245, 15, as_cmap=True) # A nice blue-red palette
return [rgb2hex(cmap(scalar)[:3]) for scalar in remapped_field] # The slice gets RGB but leaves alpha
def plot3d(self, show_cell=True, show_axes=True, camera='orthographic', spacefill=True, particle_size=1.0,
select_atoms=None, background='white', color_scheme=None, colors=None,
scalar_field=None, scalar_start=None, scalar_end=None, scalar_cmap=None,
vector_field=None, vector_color=None, custom_array=None, custom_3darray=None):
"""
Plot3d relies on NGLView to visualize atomic structures. Here, we construct a string in the "protein database"
("pdb") format, then turn it into an NGLView "structure". PDB is a white-space sensitive format, so the
string snippets are carefully formatted.
The final widget is returned. If it is assigned to a variable, the visualization is suppressed until that
variable is evaluated, and in the meantime more NGL operations can be applied to it to modify the visualization.
Args:
show_cell (bool): Whether or not to show the frame. (Default is True.)
show_axes (bool): Whether or not to show xyz axes. (Default is True.)
camera (str): 'perspective' or 'orthographic'. (Default is 'perspective'.)
spacefill (bool): Whether to use a space-filling or ball-and-stick representation. (Default is True, use
space-filling atoms.)
particle_size (float): Size of the particles. (Default is 1.)
select_atoms (numpy.ndarray): Indices of atoms to show, either as integers or a boolean array mask.
(Default is None, show all atoms.)
background (str): Background color. (Default is 'white'.)
color_scheme (str): NGLView color scheme to use. (Default is None, color by element.)
colors (numpy.ndarray): A per-atom array of HTML color names or hex color codes to use for atomic colors.
(Default is None, use coloring scheme.)
scalar_field (numpy.ndarray): Color each atom according to the array value (Default is None, use coloring
scheme.)
scalar_start (float): The scalar value to be mapped onto the low end of the color map (lower values are
clipped). (Default is None, use the minimum value in `scalar_field`.)
scalar_end (float): The scalar value to be mapped onto the high end of the color map (higher values are
clipped). (Default is None, use the maximum value in `scalar_field`.)
scalar_cmap (matplotlib.cm): The colormap to use. (Default is None, giving a blue-red divergent map.)
vector_field (numpy.ndarray): Add vectors (3 values) originating at each atom. (Default is None, no
vectors.)
vector_color (numpy.ndarray): Colors for the vectors (only available with vector_field). (Default is None,
vectors are colored by their direction.)
Possible NGLView color schemes:
" ", "picking", "random", "uniform", "atomindex", "residueindex",
"chainindex", "modelindex", "sstruc", "element", "resname", "bfactor",
"hydrophobicity", "value", "volume", "occupancy"
Returns:
(nglview.NGLWidget): The NGLView widget itself, which can be operated on further or viewed as-is.
Warnings:
* Many features only work with space-filling atoms (e.g. coloring by a scalar field).
* The colour interpretation of some hex codes is weird, e.g. 'green'.
"""
try: # If the graphical packages are not available, the GUI will not work.
import nglview
except ImportError:
raise ImportError("The package nglview needs to be installed for the plot3d() function!")
if custom_array is not None:
warnings.warn('custom_array is deprecated. Use scalar_field instead', DeprecationWarning)
scalar_field = custom_array
if custom_3darray is not None:
warnings.warn('custom_3darray is deprecated. Use vector_field instead', DeprecationWarning)
vector_field = custom_3darray
parent_basis = self.get_parent_basis()
elements = parent_basis.get_chemical_symbols()
atomic_numbers = parent_basis.get_atomic_numbers()
positions = self.positions
# If `select_atoms` was given, visualize only a subset of the `parent_basis`
if select_atoms is not None:
select_atoms = np.array(select_atoms, dtype=int)
elements = elements[select_atoms]
atomic_numbers = atomic_numbers[select_atoms]
positions = positions[select_atoms]
if colors is not None:
colors = np.array(colors)
colors = colors[select_atoms]
if scalar_field is not None:
scalar_field = np.array(scalar_field)
scalar_field = scalar_field[select_atoms]
if vector_field is not None:
vector_field = np.array(vector_field)
vector_field = vector_field[select_atoms]
if vector_color is not None:
vector_color = np.array(vector_color)
vector_color = vector_color[select_atoms]
# Write the nglview protein-database-formatted string
struct = nglview.TextStructure(self._ngl_write_structure(elements, positions, self.cell))
# Parse the string into the displayable widget
view = nglview.NGLWidget(struct)
if spacefill:
# Color by scheme
if color_scheme is not None:
if colors is not None:
warnings.warn('`color_scheme` is overriding `colors`')
if scalar_field is not None:
warnings.warn('`color_scheme` is overriding `scalar_field`')
view = self._add_colorscheme_spacefill(view, elements, atomic_numbers, particle_size, color_scheme)
# Color by per-atom colors
elif colors is not None:
if scalar_field is not None:
warnings.warn('`colors` is overriding `scalar_field`')
view = self._add_custom_color_spacefill(view, atomic_numbers, particle_size, colors)
# Color by per-atom scalars
elif scalar_field is not None: # Color by per-atom scalars
colors = self._scalars_to_hex_colors(scalar_field, scalar_start, scalar_end, scalar_cmap)
view = self._add_custom_color_spacefill(view, atomic_numbers, particle_size, colors)
# Color by element
else:
view = self._add_colorscheme_spacefill(view, elements, atomic_numbers, particle_size)
view.remove_ball_and_stick()
else:
view.add_ball_and_stick()
if show_cell:
if parent_basis.cell is not None:
view.add_unitcell()
if vector_color is None and vector_field is not None:
vector_color = 0.5 * vector_field / np.linalg.norm(vector_field, axis=-1)[:, np.newaxis] + 0.5
elif vector_field is not None and vector_field is not None: # WARNING: There must be a bug here...
try:
if vector_color.shape != np.ones((len(self), 3)).shape:
vector_color = np.outer(np.ones(len(self)), vector_color / np.linalg.norm(vector_color))
except AttributeError:
vector_color = np.ones((len(self), 3)) * vector_color
if vector_field is not None:
for arr, pos, col in zip(vector_field, positions, vector_color):
view.shape.add_arrow(list(pos), list(pos + arr), list(col), 0.2)
if show_axes: # Add axes
axes_origin = -np.ones(3)
arrow_radius = 0.1
text_size = 1
text_color = [0, 0, 0]
arrow_names = ['x', 'y', 'z']
for n in [0, 1, 2]:
start = list(axes_origin)
shift = np.zeros(3)
shift[n] = 1
end = list(start + shift)
color = list(shift)
# We cast as list to avoid JSON warnings
view.shape.add_arrow(start, end, color, arrow_radius)
view.shape.add_text(end, text_color, text_size, arrow_names[n])
if camera != 'perspective' and camera != 'orthographic':
warnings.warn('Only perspective or orthographic is (likely to be) permitted for camera')
view.camera = camera
view.background = background
return view
def plot3d_ase(self, spacefill=True, show_cell=True, camera='perspective', particle_size=0.5, background='white', color_scheme='element', show_axes=True):
"""
Possible color schemes:
" ", "picking", "random", "uniform", "atomindex", "residueindex",
"chainindex", "modelindex", "sstruc", "element", "resname", "bfactor",
"hydrophobicity", "value", "volume", "occupancy"
Returns:
"""
try: # If the graphical packages are not available, the GUI will not work.
import nglview
except ImportError:
raise ImportError("The package nglview needs to be installed for the plot3d() function!")
# Always visualize the parent basis
parent_basis = self.get_parent_basis()
view = nglview.show_ase(parent_basis)
if spacefill:
view.add_spacefill(radius_type='vdw', color_scheme=color_scheme, radius=particle_size)
# view.add_spacefill(radius=1.0)
view.remove_ball_and_stick()
else:
view.add_ball_and_stick()
if show_cell:
if parent_basis.cell is not None:
view.add_unitcell()
if show_axes:
view.shape.add_arrow([-2, -2, -2], [2, -2, -2], [1, 0, 0], 0.5)
view.shape.add_arrow([-2, -2, -2], [-2, 2, -2], [0, 1, 0], 0.5)
view.shape.add_arrow([-2, -2, -2], [-2, -2, 2], [0, 0, 1], 0.5)
if camera!='perspective' and camera!='orthographic':
print('Only perspective or orthographic is permitted')
return None
view.camera = camera
view.background = background
return view
def pos_xyz(self):
"""
Returns:
"""
x = self.positions[:, 0]
y = self.positions[:, 1]
z = self.positions[:, 2]
return x, y, z
def scaled_pos_xyz(self):
"""
Returns:
"""
xyz = self.get_scaled_positions(wrap=False)
return xyz[:,0], xyz[:,1], xyz[:,2]
def __select_slice(self, i_dim, i_flag, dist):
"""
Args:
i_dim:
i_flag:
dist:
Returns:
"""
if i_dim + 1 > self.dimension:
return True
if i_flag == 1:
return self.get_scaled_positions(wrap=False)[:, i_dim] < dist
elif i_flag == 0:
return True
elif i_flag == -1:
return self.get_scaled_positions(wrap=False)[:, i_dim] > 1. - dist
def get_boundary_region(self, dist):
"""
get all atoms in the boundary around the supercell which have a distance
to the supercell boundary of less than dist
Args:
dist:
Returns:
"""
rel_coordinates = self.get_scaled_positions(wrap=False)
dim = self.dimension
cell = self.cell.T # to use same definition as ASE
a1 = cell[0]
a2, a3 = 0, 0
min_i, max_i = -1, 2
iyl, iy, izl, iz = 0, 1, 0, 1
if dim > 1:
a2 = cell[1]
iyl, iy = min_i, max_i
if dim > 2:
a3 = cell[2]
izl, iz = min_i, max_i
index = np.arange(len(self))
new_coordinates = | np.zeros((1, dim)) | numpy.zeros |
"""
This code is based on https://github.com/ekwebb/fNRI which in turn is based on https://github.com/ethanfetaya/NRI
(MIT licence)
"""
import numpy as np
import torch
from torch.utils.data.dataset import TensorDataset
from torch.utils.data import DataLoader
import torch.nn.functional as F
from torch.autograd import Variable
from itertools import permutations, chain
from math import factorial
from os import path
def my_softmax(input, axis=1):
trans_input = input.transpose(axis, 0).contiguous()
soft_max_1d = F.softmax(trans_input, dim=0) # added dim=0 as implicit choice is deprecated, dim 0 is edgetype due to transpose
return soft_max_1d.transpose(axis, 0)
def binary_concrete(logits, tau=1, hard=False, eps=1e-10):
y_soft = binary_concrete_sample(logits, tau=tau, eps=eps)
if hard:
y_hard = (y_soft > 0.5).float()
y = Variable(y_hard.data - y_soft.data) + y_soft
else:
y = y_soft
return y
def binary_concrete_sample(logits, tau=1, eps=1e-10):
logistic_noise = sample_logistic(logits.size(), eps=eps)
if logits.is_cuda:
logistic_noise = logistic_noise.cuda()
y = logits + Variable(logistic_noise)
return F.sigmoid(y / tau)
def sample_logistic(shape, eps=1e-10):
uniform = torch.rand(shape).float()
return torch.log(uniform + eps) - torch.log(1 - uniform + eps)
def sample_gumbel(shape, eps=1e-10):
"""
NOTE: Stolen from https://github.com/pytorch/pytorch/pull/3341/commits/327fcfed4c44c62b208f750058d14d4dc1b9a9d3
Sample from Gumbel(0, 1)
based on
https://github.com/ericjang/gumbel-softmax/blob/3c8584924603869e90ca74ac20a6a03d99a91ef9/Categorical%20VAE.ipynb ,
(MIT license)
"""
U = torch.rand(shape).float()
return - torch.log(eps - torch.log(U + eps))
def gumbel_softmax_sample(logits, tau=1, eps=1e-10):
"""
NOTE: Stolen from https://github.com/pytorch/pytorch/pull/3341/commits/3<PASSWORD>
Draw a sample from the Gumbel-Softmax distribution
based on
https://github.com/ericjang/gumbel-softmax/blob/3c8584924603869e90ca74ac20a6a03d99a91ef9/Categorical%20VAE.ipynb
(MIT license)
"""
gumbel_noise = sample_gumbel(logits.size(), eps=eps)
if logits.is_cuda:
gumbel_noise = gumbel_noise.cuda()
y = logits + Variable(gumbel_noise)
return my_softmax(y / tau, axis=-1)
def gumbel_softmax(logits, tau=1, hard=False, eps=1e-10):
"""
NOTE: Stolen from https://github.com/pytorch/pytorch/pull/3341/commits/327fcf<PASSWORD>9<PASSWORD>3
Sample from the Gumbel-Softmax distribution and optionally discretize.
Args:
logits: [batch_size, n_class] unnormalized log-probs
tau: non-negative scalar temperature
hard: if True, take argmax, but differentiate w.r.t. soft sample y
Returns:
[batch_size, n_class] sample from the Gumbel-Softmax distribution.
If hard=True, then the returned sample will be one-hot, otherwise it will
be a probability distribution that sums to 1 across classes
Constraints:
- this implementation only works on batch_size x num_features tensor for now
based on
https://github.com/ericjang/gumbel-softmax/blob/3c8584924603869e90ca74ac20a6a03d99a91ef9/Categorical%20VAE.ipynb ,
(MIT license)
"""
y_soft = gumbel_softmax_sample(logits, tau=tau, eps=eps)
if hard:
shape = logits.size()
_, k = y_soft.data.max(-1)
# this bit is based on
# https://discuss.pytorch.org/t/stop-gradients-for-st-gumbel-softmax/530/5
y_hard = torch.zeros(*shape)
if y_soft.is_cuda:
y_hard = y_hard.cuda()
y_hard = y_hard.zero_().scatter_(-1, k.view(shape[:-1] + (1,)), 1.0)
# this cool bit of code achieves two things:
# - makes the output value exactly one-hot (since we add then
# subtract y_soft value)
# - makes the gradient equal to y_soft gradient (since we strip
# all other gradients)
y = Variable(y_hard - y_soft.data) + y_soft
else:
y = y_soft
return y
def my_sigmoid(logits, hard=True, sharpness=1.0):
edges_soft = 1/(1+torch.exp(-sharpness*logits))
if hard:
edges_hard = torch.round(edges_soft)
# this bit is based on
# https://discuss.pytorch.org/t/stop-gradients-for-st-gumbel-softmax/530/5
if edges_soft.is_cuda:
edges_hard = edges_hard.cuda()
# this cool bit of code achieves two things:
# - makes the output value exactly one-hot (since we add then
# subtract y_soft value)
# - makes the gradient equal to y_soft gradient (since we strip
# all other gradients)
edges = Variable(edges_hard - edges_soft.data) + edges_soft
else:
edges = edges_soft
return edges
def binary_accuracy(output, labels):
preds = output > 0.5
correct = preds.type_as(labels).eq(labels).double()
correct = correct.sum()
return correct / len(labels)
def edge_type_encode(edges): # this is used to gives each 'interaction strength' a unique integer = 0, 1, 2 ..
unique = np.unique(edges)
encode = np.zeros(edges.shape)
for i in range(unique.shape[0]):
encode += np.where( edges == unique[i], i, 0)
return encode
def loader_edges_encode(edges, num_atoms):
edges = np.reshape(edges, [edges.shape[0], edges.shape[1], num_atoms ** 2])
edges = np.array(edge_type_encode(edges), dtype=np.int64)
off_diag_idx = np.ravel_multi_index(
np.where(np.ones((num_atoms, num_atoms)) - np.eye(num_atoms)),
[num_atoms, num_atoms])
edges = edges[:,:, off_diag_idx]
return edges
def loader_combine_edges(edges):
edge_types_list = [ int(np.max(edges[:,i,:]))+1 for i in range(edges.shape[1]) ]
assert( edge_types_list == sorted(edge_types_list)[::-1] )
encoded_target = np.zeros( edges[:,0,:].shape )
base = 1
for i in reversed(range(edges.shape[1])):
encoded_target += base*edges[:,i,:]
base *= edge_types_list[i]
return encoded_target.astype('int')
def load_data_NRI(batch_size=1, sim_folder='', shuffle=True, data_folder='data'):
# the edges numpy arrays below are [ num_sims, N, N ]
loc_train = np.load(path.join(data_folder,sim_folder,'loc_train.npy'))
vel_train = np.load(path.join(data_folder,sim_folder,'vel_train.npy'))
edges_train = np.load(path.join(data_folder,sim_folder,'edges_train.npy'))
loc_valid = np.load(path.join(data_folder,sim_folder,'loc_valid.npy'))
vel_valid = np.load(path.join(data_folder,sim_folder,'vel_valid.npy'))
edges_valid = np.load(path.join(data_folder,sim_folder,'edges_valid.npy'))
loc_test = np.load(path.join(data_folder,sim_folder,'loc_test.npy'))
vel_test = np.load(path.join(data_folder,sim_folder,'vel_test.npy'))
edges_test = np.load(path.join(data_folder,sim_folder,'edges_test.npy'))
# [num_samples, num_timesteps, num_dims, num_atoms]
num_atoms = loc_train.shape[3]
loc_max = loc_train.max()
loc_min = loc_train.min()
vel_max = vel_train.max()
vel_min = vel_train.min()
# Normalize to [-1, 1]
loc_train = (loc_train - loc_min) * 2 / (loc_max - loc_min) - 1
vel_train = (vel_train - vel_min) * 2 / (vel_max - vel_min) - 1
loc_valid = (loc_valid - loc_min) * 2 / (loc_max - loc_min) - 1
vel_valid = (vel_valid - vel_min) * 2 / (vel_max - vel_min) - 1
loc_test = (loc_test - loc_min) * 2 / (loc_max - loc_min) - 1
vel_test = (vel_test - vel_min) * 2 / (vel_max - vel_min) - 1
# Reshape to: [num_sims, num_atoms, num_timesteps, num_dims]
loc_train = np.transpose(loc_train, [0, 3, 1, 2])
vel_train = np.transpose(vel_train, [0, 3, 1, 2])
feat_train = np.concatenate([loc_train, vel_train], axis=3)
loc_valid = np.transpose(loc_valid, [0, 3, 1, 2])
vel_valid = np.transpose(vel_valid, [0, 3, 1, 2])
feat_valid = np.concatenate([loc_valid, vel_valid], axis=3)
loc_test = np.transpose(loc_test, [0, 3, 1, 2])
vel_test = np.transpose(vel_test, [0, 3, 1, 2])
feat_test = np.concatenate([loc_test, vel_test], axis=3)
edges_train = loader_edges_encode(edges_train, num_atoms)
edges_valid = loader_edges_encode(edges_valid, num_atoms)
edges_test = loader_edges_encode(edges_test, num_atoms)
edges_train = loader_combine_edges(edges_train)
edges_valid = loader_combine_edges(edges_valid)
edges_test = loader_combine_edges(edges_test)
feat_train = torch.FloatTensor(feat_train)
edges_train = torch.LongTensor(edges_train)
feat_valid = torch.FloatTensor(feat_valid)
edges_valid = torch.LongTensor(edges_valid)
feat_test = torch.FloatTensor(feat_test)
edges_test = torch.LongTensor(edges_test)
train_data = TensorDataset(feat_train, edges_train)
valid_data = TensorDataset(feat_valid, edges_valid)
test_data = TensorDataset(feat_test, edges_test)
train_data_loader = DataLoader(train_data, batch_size=batch_size, shuffle=shuffle)
valid_data_loader = DataLoader(valid_data, batch_size=batch_size)
test_data_loader = DataLoader(test_data, batch_size=batch_size)
return train_data_loader, valid_data_loader, test_data_loader, loc_max, loc_min, vel_max, vel_min
def load_data_fNRI(batch_size=1, sim_folder='', shuffle=True, data_folder='data'):
# the edges numpy arrays below are [ num_sims, N, N ]
loc_train = np.load(path.join(data_folder,sim_folder,'loc_train.npy'))
vel_train = np.load(path.join(data_folder,sim_folder,'vel_train.npy'))
edges_train = np.load(path.join(data_folder,sim_folder,'edges_train.npy'))
loc_valid = np.load(path.join(data_folder,sim_folder,'loc_valid.npy'))
vel_valid = np.load(path.join(data_folder,sim_folder,'vel_valid.npy'))
edges_valid = np.load(path.join(data_folder,sim_folder,'edges_valid.npy'))
loc_test = np.load(path.join(data_folder,sim_folder,'loc_test.npy'))
vel_test = np.load(path.join(data_folder,sim_folder,'vel_test.npy'))
edges_test = np.load(path.join(data_folder,sim_folder,'edges_test.npy'))
# [num_samples, num_timesteps, num_dims, num_atoms]
num_atoms = loc_train.shape[3]
loc_max = loc_train.max()
loc_min = loc_train.min()
vel_max = vel_train.max()
vel_min = vel_train.min()
# Normalize to [-1, 1]
loc_train = (loc_train - loc_min) * 2 / (loc_max - loc_min) - 1
vel_train = (vel_train - vel_min) * 2 / (vel_max - vel_min) - 1
loc_valid = (loc_valid - loc_min) * 2 / (loc_max - loc_min) - 1
vel_valid = (vel_valid - vel_min) * 2 / (vel_max - vel_min) - 1
loc_test = (loc_test - loc_min) * 2 / (loc_max - loc_min) - 1
vel_test = (vel_test - vel_min) * 2 / (vel_max - vel_min) - 1
# Reshape to: [num_sims, num_atoms, num_timesteps, num_dims]
loc_train = np.transpose(loc_train, [0, 3, 1, 2])
vel_train = np.transpose(vel_train, [0, 3, 1, 2])
feat_train = np.concatenate([loc_train, vel_train], axis=3)
loc_valid = np.transpose(loc_valid, [0, 3, 1, 2])
vel_valid = np.transpose(vel_valid, [0, 3, 1, 2])
feat_valid = np.concatenate([loc_valid, vel_valid], axis=3)
loc_test = np.transpose(loc_test, [0, 3, 1, 2])
vel_test = np.transpose(vel_test, [0, 3, 1, 2])
feat_test = np.concatenate([loc_test, vel_test], axis=3)
edges_train = loader_edges_encode( edges_train, num_atoms )
edges_valid = loader_edges_encode( edges_valid, num_atoms )
edges_test = loader_edges_encode( edges_test, num_atoms )
edges_train = torch.LongTensor(edges_train)
edges_valid = torch.LongTensor(edges_valid)
edges_test = torch.LongTensor(edges_test)
feat_train = torch.FloatTensor(feat_train)
feat_valid = torch.FloatTensor(feat_valid)
feat_test = torch.FloatTensor(feat_test)
train_data = TensorDataset(feat_train, edges_train)
valid_data = TensorDataset(feat_valid, edges_valid)
test_data = TensorDataset(feat_test, edges_test)
train_data_loader = DataLoader(train_data, batch_size=batch_size, shuffle=shuffle)
valid_data_loader = DataLoader(valid_data, batch_size=batch_size)
test_data_loader = DataLoader(test_data, batch_size=batch_size)
return train_data_loader, valid_data_loader, test_data_loader, loc_max, loc_min, vel_max, vel_min
def to_2d_idx(idx, num_cols):
idx = np.array(idx, dtype=np.int64)
y_idx = np.array(np.floor(idx / float(num_cols)), dtype=np.int64)
x_idx = idx % num_cols
return x_idx, y_idx
def encode_onehot(labels):
classes = set(labels)
classes_dict = {c: np.identity(len(classes))[i, :] for i, c in
enumerate(classes)}
labels_onehot = np.array(list(map(classes_dict.get, labels)),
dtype=np.int32)
return labels_onehot
def get_triu_indices(num_nodes):
"""Linear triu (upper triangular) indices."""
ones = torch.ones(num_nodes, num_nodes)
eye = torch.eye(num_nodes, num_nodes)
triu_indices = (ones.triu() - eye).nonzero().t()
triu_indices = triu_indices[0] * num_nodes + triu_indices[1]
return triu_indices
def get_tril_indices(num_nodes):
"""Linear tril (lower triangular) indices."""
ones = torch.ones(num_nodes, num_nodes)
eye = torch.eye(num_nodes, num_nodes)
tril_indices = (ones.tril() - eye).nonzero().t()
tril_indices = tril_indices[0] * num_nodes + tril_indices[1]
return tril_indices
def get_offdiag_indices(num_nodes):
"""Linear off-diagonal indices."""
ones = torch.ones(num_nodes, num_nodes)
eye = torch.eye(num_nodes, num_nodes)
offdiag_indices = (ones - eye).nonzero().t()
offdiag_indices = offdiag_indices[0] * num_nodes + offdiag_indices[1]
return offdiag_indices
def get_triu_offdiag_indices(num_nodes):
"""Linear triu (upper) indices w.r.t. vector of off-diagonal elements."""
triu_idx = torch.zeros(num_nodes * num_nodes)
triu_idx[get_triu_indices(num_nodes)] = 1.
triu_idx = triu_idx[get_offdiag_indices(num_nodes)]
return triu_idx.nonzero()
def get_tril_offdiag_indices(num_nodes):
"""Linear tril (lower) indices w.r.t. vector of off-diagonal elements."""
tril_idx = torch.zeros(num_nodes * num_nodes)
tril_idx[get_tril_indices(num_nodes)] = 1.
tril_idx = tril_idx[get_offdiag_indices(num_nodes)]
return tril_idx.nonzero()
def get_minimum_distance(data):
data = data[:, :, :, :2].transpose(1, 2)
data_norm = (data ** 2).sum(-1, keepdim=True)
dist = data_norm + \
data_norm.transpose(2, 3) - \
2 * torch.matmul(data, data.transpose(2, 3))
min_dist, _ = dist.min(1)
return min_dist.view(min_dist.size(0), -1)
def get_buckets(dist, num_buckets):
dist = dist.cpu().data.numpy()
min_dist = | np.min(dist) | numpy.min |
from scipy.optimize import fmin_bfgs
from sklearn.base import BaseEstimator, ClassifierMixin, TransformerMixin
from svm_kernels import linear_kernel, polynomial_kernel, rbf_kernel, gaussian_kernel
import numpy as np
import cvxpy as cp
class PlattScaler(TransformerMixin):
'''
Perform Platt Scaling.
Based on Platt 1999
<NAME>., 1999. Probabilistic Outputs for Support Vector Machines and Comparisons
to Regularized Likelihood Methods, Advances in Large Margin Classifiers, 61-74,
Original code: https://github.com/neal-o-r/platt/blob/master/platt.py
'''
def __init__(self):
pass
def fit(self, f, y):
'''
Fit Platt model.
This method takes in the classifier outputs and the true labels,
and fits a scaling model to convert classifier outputs to true
probabilities. Sticks with Platt's weird notation throughout.
f: classifier outputs
y: true labels
'''
eps = np.finfo(np.float).tiny # to avoid division by 0 warning
# Bayes priors
prior0 = float(np.sum(y <= 0))
prior1 = y.shape[0] - prior0
T = np.zeros(y.shape)
T[y > 0] = (prior1 + 1.) / (prior1 + 2.)
T[y <= 0] = 1. / (prior0 + 2.)
T1 = 1. - T
def objective(theta):
A, B = theta
E = np.exp(A * f + B)
P = 1. / (1. + E)
l = -(T * np.log(P + eps) + T1 * np.log(1. - P + eps))
return l.sum()
def grad(theta):
A, B = theta
E = np.exp(A * f + B)
P = 1. / (1. + E)
TEP_minus_T1P = P * (T * E - T1)
dA = np.dot(TEP_minus_T1P, f)
dB = np.sum(TEP_minus_T1P)
return np.array([dA, dB])
AB0 = np.array([0., np.log((prior0 + 1.) / (prior1 + 1.))])
self.A_, self.B_ = fmin_bfgs(objective, AB0, fprime=grad, disp=False)
def transform(self, f):
'''
Given a set of classifer outputs return probs.
'''
return 1. / (1. + np.exp(self.A_ * f + self.B_))
def fit_transform(self, f, y):
self.fit(f, y)
return self.transform(f)
class SVC(BaseEstimator, ClassifierMixin):
'''
######################################################################
# -A soft margin svm classifier capable of using non-linear kernels- #
######################################################################
'''
proba_fit = False
classes_ = [-1, 1] # Required
def __init__(
self,
C=None,
gamma=1,
delta=1,
kernel='linear',
threshold=1e-7,
dtype=np.float64,
probability=False,
solver_params={'solver':'SCS', 'eps': 1e-14, 'acceleration_lookback': 0},
):
'''
Args:
C - float or int, the soft margin penalty
kernel - function, takes two array-like objects as input and projects them
into a different space, returns a single array-like object
gamma - float or int, kernel hyperparameter
delta - float or int, kernel hyperparameter (polynomial only)
threshold - float or int, support vectors have alphas that exceed this value
dtype - callable or string, the data type to use, effects precision,
i.e. np.float32 or 'float64'
solver_params - dictionary, kwargs for the cvxpy solver
'''
self.kernel = kernel
self.C = C
self.gamma = gamma
self.delta = delta
self.threshold = threshold
self.dtype = dtype
self.probability = probability
self.solver_params = solver_params
def _init(self):
if self.C:
self.C = self.dtype(self.C)
if type(self.kernel) == str:
self.kernel = globals()[self.kernel + '_kernel']
def fit(self, X, y=None):
'''
Finds the optimal separating hyperplane by solving the dual constraint quadratic
optimization problem of maximizing the margin by using cvxpy package
Args:
X - array-like, shape=[n_samples, n_features], the training data
y - array-like, shape=[n_samples], training targets/labels
'''
self._init()
X = X.astype(self.dtype)
y = y.astype(self.dtype)
n_samples, n_features = X.shape
# Compute proto kernel/gram matrix
K = self.kernel(X, params={'gamma': self.gamma, 'delta': self.delta})
# Build the variables
P = np.outer(y, y) * K # kernel/gram matrix
q = -np.ones(n_samples, dtype=self.dtype) # negative makes it a minimization problem
A = y.reshape(1, -1)
b = self.dtype(0.0)
x = cp.Variable(n_samples) # alphas
# Constraints
if self.C: # If soft margin classifier...
G = np.vstack((-np.eye(n_samples, dtype=self.dtype), np.eye(n_samples, dtype=self.dtype)))
h = np.hstack((np.zeros(n_samples, dtype=self.dtype), np.ones(n_samples, dtype=self.dtype) * self.C))
else: # Hard margin...
G = np.diag(-np.ones(n_samples, dtype=self.dtype))
h = np.zeros(n_samples, dtype=self.dtype) # self.dtype(0.0) #
objective = cp.Minimize(0.5 * cp.quad_form(x, P) + q.T @ x)
constraints = [G @ x <= h,
A @ x == b]
problem = cp.Problem(objective, constraints)
problem.solve(**self.solver_params)
self.alphas = x.value
# Support vectors have non zero lagrange multipliers
mask = self.alphas > self.threshold # threshold otherwise we end up with everything being a support vector
self.alphas = self.alphas[mask]
self.support_vectors = X[mask]
self.support_vector_labels = y[mask]
# Calculate bias:
# .. math::
# b = rac{1}{N_S}\sum\limits_{v∈S} [α_u y_u k(x_u , x_v )]
self.b = self.dtype(0.0)
self.sv_idx = np.arange(len(X))[mask]
n_alpha = len(self.alphas)
for i in range(n_alpha):
self.b += self.support_vector_labels[i]
self.b -= np.sum(self.alphas * self.support_vector_labels * K[self.sv_idx[i], mask])
self.b /= n_alpha
if self.probability:
self.plattscaler = PlattScaler()
distances = self.decision_function(X)
self.plattscaler.fit(distances, y)
self.proba_fit = True
return self
def predict_proba(self, X):
if not self.proba_fit:
raise Exception("SVC must be initialized with 'probability' keyword argument set to True before calling fit method in order to use Platt Scaling and produce probabilistic outputs")
distances = self.decision_function(X)
n = distances.__len__()
pos_p = self.plattscaler.transform(distances)
neg_p = np.ones(n) - pos_p
probabilities = | np.array((neg_p, pos_p)) | numpy.array |
#!/usr/bin/env python
""" Utility module. All auxiliary functions go here.
This module provides internal functionalities for everything else, aside from
handling the internal details of running the simulation itself. There is a
plethora of varied functions, so it's best left alone unless strictly necessary.
Do not modify this module unless you know exactly what you are doing.
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program. If not, see <http://www.gnu.org/licenses/>.
"""
__author__ = "<NAME>"
__authors__ = ["<NAME>"]
__contact__ = "<EMAIL>"
__copyright__ = "Copyright 2019, UAH"
__credits__ = ["<NAME>"]
__date__ = "2019/03/29"
__deprecated__ = False
__email__ = "<EMAIL>"
__license__ = "GPLv3"
__maintainer__ = "<NAME>"
__status__ = "Development"
__version__ = "0.0.2"
from PIL import Image, ImageDraw, ImageColor
import numpy as np
import pygame
from scipy import ndimage as filters
from matplotlib.pyplot import imshow
from scipy.stats import norm
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib.widgets import Button
import matplotlib as mpl
import time
import math
import json
import copy
from robot import Robot
import controller
from controllers.controllers import get_controllers
start_time = time.time()
last_call = time.time()
frames = 0
delta = 0
pressed = []
labels = []
co2_center = (0, 0)
scale = 1
grid_size = []
ax = None
frozen_dist = None
fig = None
npdata = None
gui = True
show_robot = True
button = None
showFPS = False
run = True
screen = None
clock = None
def switch_show_robot(dummy):
"""
Helper function that controls whether the robot should be displayed and updated.
Mainly created as a callback for buttons to use, can be called before the
simulation starts in order to get the corresponding button to show up.
It requires a single positional argument due to Matplotlib constraints.
"""
global show_robot
show_robot = not show_robot
def press(event):
"""Helper function that handles key presses. Must be registered to a plot."""
global pressed
pressed.append(event.key)
def calculate_delta():
"""
Helper function that calculates the delta value for any given update call.
Necessary in order for the update to be performed somewhat smoothly.
"""
'''global last_call
new_call = time.time()
delta = new_call - last_call
last_call = new_call
return delta'''
global clock, delta
delta = clock.tick(30)
return delta
def generate_dist(size = 1):
global frozen_dist, scale
scale = 1/size
norm.stats(scale=scale)
frozen_dist = norm()
def get_reading(x, y):
global frozen_dist
if frozen_dist is None:
generate_dist(1000)
distance = np.linalg.norm((co2_center[0] - x, co2_center[1] - y))
return frozen_dist.pdf(distance*scale)
def create_controllers():
"""
Driver function to abstract the process of instancing a Controller object
using factories.
Inputs:
- No input, everything comes from the config global variable
Outputs:
- a fully configured Controller object or a list of Controllers, depending on the config
"""
global npdata
if 'class' in config or 'controllers' in config:
return get_controllers(config)
else:
raise KeyError("The configuration file received doesn't contain a \"class\" attribute")
def create_robot(json_file = '../conf/robot.json', controller = None):
"""
Uses a json file to generate a fully configured Robot object.
Inputs:
- json_file: path to the JSON configuration file for the robot in question.
- controller: pre-configured Controller object.
Outputs:
- fully configured Robot object.
"""
with open(json_file, 'r') as fp:
f = json.load(fp)
if 'name' in f:
r = Robot(identifier = f['id'], x = f['x'], y = f['y'],\
orientation = f['orientation'],\
vision_range = (f['sonar_range'][0], f['sonar_range'][1]),\
sensors = f['sonars'],\
radius = f['radius'], max_speed = f['max_speed'], controller=controller,
name=f['name'])
else:
r = Robot(identifier = f['id'], x = f['x'], y = f['y'],\
orientation = f['orientation'],\
vision_range = (f['sonar_range'][0], f['sonar_range'][1]),\
sensors = f['sonars'],\
radius = f['radius'], max_speed = f['max_speed'], controller=controller)
if 'battery' in f:
r.insert_battery_details(f['step'], f['battery'], f['charging_rate'],
f['movement_cost'], f['reading_cost'],
f['picture_cost'], f['generic_cost'])
if 'color' in f:
r.set_color(f['color'])
return r
def init_globals_from_config():
"""
Initialize all global variables based on config
TODO: Remove global variables and keep only the config dict, to at some point remove it too and pass it as reference if needed
"""
showFPS = 'fps' in config
gui = config['gui']
co2_center = config['co2_center'] if 'co2_center' in config else (0, 0)
def create_robots(controllers):
"""
Returns a list of robots or one robot depending on the config
(TBC, Pedro) Why did we do a deepcopy of the controller config only when we received 1 controler, but not when multiple?
If this is still a requirement, we need to add it to the r.append line
"""
if type(config['robot']) is list:
r = []
for i, path in enumerate(config['robot']):
if i >= len(controllers):
r.append(create_robot(path, controllers[0]))
else:
r.append(create_robot(path, controllers[i]))
else:
r = create_robot(config['robot'], controllers[0])
return r
def load_simulation(config_mgr):
"""
Loads a simulation using a configuration file. For the time being, it limits itself to loading the corresponding map and robot.
Inputs:
- json_file: path to the configuration file describing the simulation to be loaded. This configuration file must be a JSON
containing the following:
* stage: string defining the path to the image file that represents the stage to be loaded.
* robot: string defining the path to the configuration file of the robot that will be used.
"""
global gui, npdata, co2_center, showFPS, config
# Load the config in the global variable
config = config_mgr.get_config()
# Init global variables based on config dict
# TODO: I think we should refactor everything to only use config as global (Pedro)
init_globals_from_config()
# Load the image used in the stage
npdata = load_image(config['stage'])
# Get the controller if only one or a list of controllers
controllers = create_controllers()
if 'co2_radius' in config:
generate_dist(config['co2_radius'])
robots = create_robots(controllers)
display_image(robots)
def update_loop(robots, npdata):
global delta, pressed, run
while run:
init_time = time.time()
if gui:
delta = calculate_delta()
else:
delta = 0.1
for r in robots:
r.update(npdata, delta, True)
pressed.clear()
time.sleep(1/80)
def update(robots, npdata):
delta = calculate_delta()/1000
for r in robots:
r.update(npdata, delta, True)
def animate(robots):
"""
Update function. Updates internal world data, then prints it to a plot.
Must be registered to said plot.
"""
global start_time, frames, delta, show_robot, screen, clock, grid_size
if show_robot:
for r in robots:
r.get_lock().acquire()
if r.controller.has_cur_detected_edge_list():
'''for e in r.controller.detected_edges:
pygame.draw.circle(screen, r.color, (int(e[0]), int(e[1])), 1)'''
for a in r.controller.actual_sensor_angles:
dstX = r.x + np.cos(np.radians(a)) * r.controller.cur_detected_edges_distances[r.controller.actual_sensor_angles.index(a)]
dstY = r.y + np.sin(np.radians(a)) * r.controller.cur_detected_edges_distances[r.controller.actual_sensor_angles.index(a)]
pygame.draw.line(screen, (255, 0, 255), (int(r.x), int(r.y)), (int(dstX), int(dstY)), 1)
for e in r.controller.cur_detected_edges:
pygame.draw.circle(screen, (0, 255, 0), (int(e[0]), int(e[1])), int((100/np.linalg.norm((e[0]-r.x, e[1]-r.y)))/90))
if type(r.color) is str:
r.color = list(mpl.colors.to_rgb(mpl.colors.get_named_colors_mapping()[r.color]))
r.color[0] *= 255
r.color[1] *= 255
r.color[2] *= 255
r.color = tuple(r.color)
pygame.draw.circle(screen, r.color, (int(r.x), int(r.y)), int(r.radius))
dstX = r.x + np.cos(np.radians(r.orientation)) * 2 * r.radius
dstY = r.y + np.sin( | np.radians(r.orientation) | numpy.radians |
# -*- coding: utf-8 -*-
# samplers.py
# MIT License
# Copyright (c) 2017 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from numpy import eye, zeros, dot, array, diag, sqrt, mean
from scipy.stats import multivariate_normal, invgamma, invgauss, gamma
from numpy.linalg import inv, norm
from trendpy.globals import derivative_matrix
__all__ = ['Parameter','Parameters','Sampler','L1']
class Parameter(object):
""" Implements an unknown parameter to be estimated
Examples
--------
We first need to import the wanted posterior distribution in `Scipy`:
>>> from scipy.stats import norm
and then we can instanciate parameter:
>>> param1 = Parameter('lambda',norm,(1,1),0.1)
"""
def __init__(self, name, distribution, size, current_value=None):
""" Creates a parameter to estimate in the MCMC algorithm.
:param name: Name of the parameter (unique identification)
:type name: string
:param distribution: Posterior Probability distribution of the parameter.
:type distribution: `Scipy.stats.rv_continuous`
:param size: Dimension of the parameter.
:type name: tuple
:param current_value: Current value of the parameter
:type current_value: array
"""
self.name = str(name)
self.distribution = distribution
self.size = size
self.current_value = current_value
@property
def current_value(self):
"""Parameter current value (last generated)"""
return self.__current_value
@current_value.setter
def current_value(self, current_value):
self.__current_value = current_value
def __str__(self):
return """
parameter name : %s
parameter distribution : %s
""" % (self.name, self.distribution.__str__())
def __len__(self):
return 1
def is_multivariate(self):
""" Checks if the parameter is univariate."""
return not self.size == (1,1)
class Parameters(object):
""" Implements the set of parameters to be estimated
Examples
--------
We first need to import the wanted posterior distribution in `Scipy.stats`:
>>> from scipy.stats import invgamma
then we can create an empty parameter set and add a new parameter:
>>> param1 = Parameter('sigma2',invgamma,(1,1),0.09)
>>> params = Params()
>>> params.append(param1)
>>> print(params)
"""
def __init__(self, list=None, hierarchy=None):
""" Creates a parameter set to estimate in the MCMC algorithm.
:param list: A dictionary with the parameters to estimate
:type list: dict
:param hierarchy: List containing the order in which
the Gibbs sampler updates the parameter values.
:type hierarchy: array
"""
self.list = list
self.hierarchy = hierarchy
@property
def list(self):
""" Dictionary containing the parameters to be
estimated.
"""
return self.__list
@list.setter
def list(self, new_value):
self.__list = new_value if new_value is not None else {}
@property
def hierarchy(self):
""" List containing the order in which
the Gibbs sampler updates the
parameter values.
"""
return self.__hierarchy
@hierarchy.setter
def hierarchy(self, new_value):
self.__hierarchy = new_value if new_value is not None else []
def __len__(self):
return len(self.list)
def __str__(self):
descr = '(parameters: ----------------------- \n'
descr += ', \n'.join(['name: %s, distribution: %s, size: %s' % (str(l.name), l.distribution.__str__(), l.size) for l in self.list.values()])
descr += '\n ----------------------- )'
return descr
def __getitem__(self, key):
if isinstance(key,str):
try:
return self.list[key]
except KeyError:
print("Key %s not found in parameter set" % key)
except:
print("Wrong key")
elif isinstance(key,int):
try:
return self.list[self.hierarchy[key]]
except KeyError:
print("Key %s not found in parameter set" % key)
except IndexError:
print("Index out of bounds: %s > %s" % (key,len(self.hierarchy)))
else:
raise TypeError("Wrong Type")
def __delitem__(self,key):
pass
def __contains__(self, item):
if isinstance(item,Parameter):
try:
return item.name in self.hierarchy
except KeyError:
print("Key %s not found in parameter set" % key)
except:
print("Wrong key: %s" % item.name)
else:
raise TypeError("Wrong Type")
def append(self, parameter):
""" Adds a parameter to the parameter set.
First parameter added is the first in the
hierarchy.
:param parameter: parameter to estimate
:type parameter: trendpy.Parameter
"""
if not parameter.name in self.list:
self.list[parameter.name] = parameter
self.hierarchy.append(parameter.name)
def clear(self):
""" Removes all parameters."""
self.list = None
self.hierarchy = None
class Sampler(object):
""" Abstract class for implementing Gibbs sampling algorithms and providing outputs."""
def __init__(self):
self.parameters = None
self.data = None
self.options = None
self.derivative_matrix = None
self.parameters = None
def define_parameters(self):
""" Method to set the parameter set to be updated
in the MCMC algorithm.
"""
raise NotImplementedError("Must be overriden")
def initial_value(self,parameter_name):
""" Method that sets the initial value of the
parameters to be estimated.
:param parameter_name: name of the parameter.
:type parameter_name: str
:return: initial value of the parameter
:rtype: `Numpy.dnarray`
"""
raise NotImplementedError("Must be overriden")
def distribution_parameters(self, parameter_name):
""" Method that sets the parameters of the posterior
distribution of the parameters to be estimated.
:param parameter_name: name of the parameter.
:type parameter_name: str
:return: dictionary the parameters needed to compute the
next value of the Markov chain for the parameter with name:
parameter_name.
:rtype: dict
"""
raise NotImplementedError("Must be overriden")
def generate(self,parameter_name):
""" This method handles the generation of the random draws of
the Markov chain for each parameters.
:param parameter_name: name of the parameter of interest
:type parameter_name: string
:return: random draw from the posterior probability distribution
:rtype: `Numpy.dnarray`
"""
raise NotImplementedError("Must be overriden")
def output(self, simulations, burn, parameter_name):
""" Computes the poserior mean of the parameters.
:param simulations: history of the Markov chain simulation
:type simulations: dict
:param burn: number of draws dismissed as burning samples
:type burn: int
:param parameter_name: name of the parameter of interest
:type parameter_name: string
:return: output of the MCMC algorithm
:rtype: `Numpy.dnarray`
"""
raise NotImplementedError("Must be overriden")
class Factory(object):
def create(self,*args,**kwargs):
return Sampler()
class L1(Sampler):
def __init__(self,data,alpha=0.1,rho=0.1,total_variation_order=2):
self.rho = rho
self.alpha = alpha
self.__data = data
self.size = len(data)
self.total_variation_order = total_variation_order
self.derivative_matrix = derivative_matrix(self.size, self.total_variation_order)
self.define_parameters()
@property
def data(self):
return self.__data
@property
def parameters(self):
""" List containing the parameters to estimate."""
return self.__parameters
@parameters.setter
def parameters(self, new_value):
self.__parameters = new_value if new_value is not None else []
def define_parameters(self):
params=Parameters()
params.append(Parameter("trend", multivariate_normal, (self.size,1)))
params.append(Parameter("sigma2", invgamma, (1,1)))
params.append(Parameter("lambda2", gamma, (1,1)))
params.append(Parameter("omega", invgauss, (self.size-self.total_variation_order,1)))
self.parameters = params
def initial_value(self,parameter_name):
if parameter_name=='trend':
return array([(4*i+10)/20 for i in range(self.size)])
elif parameter_name=='sigma2':
return 0.8
elif parameter_name=='lambda2':
return 1
elif parameter_name==str('omega'):
return 0.8*array([(30*(i/2)+3)/(2*(i/2)+35) for i in range(self.size-self.total_variation_order)])
def distribution_parameters(self, parameter_name):
if parameter_name=='trend':
E = dot(dot(self.derivative_matrix.T,inv(diag(self.parameters.list['omega'].current_value))),self.derivative_matrix)
mean = dot(inv(eye(self.size)+E),self.data)
cov = (self.parameters.list['sigma2'].current_value)*inv( | eye(self.size) | numpy.eye |
from __future__ import print_function
import os
import pandas as pd
import numpy as np
from . import load_stdata
from ..config import Config
from ..utils import string2timestamp
'''
STMatrix 时空矩阵对象,每一个进行预测数据创建一个
'''
class STMatrix(object):
"""docstring for STMatrix"""
def __init__(self, data, timestamps, T=48, CheckComplete=True):
super(STMatrix, self).__init__()
assert len(data) == len(timestamps)
self.data = data
self.timestamps = timestamps
self.T = T
self.pd_timestamps = string2timestamp(timestamps, T=self.T)
if CheckComplete:
self.check_complete()
# index
self.make_index()
def make_index(self):
self.get_index = dict()
for i, ts in enumerate(self.pd_timestamps):
self.get_index[ts] = i
def check_complete(self):
missing_timestamps = []
offset = pd.DateOffset(minutes=24 * 60 // self.T)
pd_timestamps = self.pd_timestamps
i = 1
while i < len(pd_timestamps):
if pd_timestamps[i-1] + offset != pd_timestamps[i]:
missing_timestamps.append("(%s -- %s)" % (pd_timestamps[i-1], pd_timestamps[i]))
i += 1
for v in missing_timestamps:
print(v)
assert len(missing_timestamps) == 0
def get_matrix(self, timestamp):
return self.data[self.get_index[timestamp]]
def save(self, fname):
pass
def check_it(self, depends):
for d in depends:
if d not in self.get_index.keys():
return False
return True
def create_dataset(self, len_closeness=3, len_trend=3, TrendInterval=7, len_period=3, PeriodInterval=1):
"""current version
"""
# offset_week = pd.DateOffset(days=7)
offset_frame = pd.DateOffset(minutes=24 * 60 // self.T)
XC = []
XP = []
XT = []
Y = []
timestamps_Y = []
depends = [range(1, len_closeness+1),
[PeriodInterval * self.T * j for j in range(1, len_period+1)],
[TrendInterval * self.T * j for j in range(1, len_trend+1)]]
i = max(self.T * TrendInterval * len_trend, self.T * PeriodInterval * len_period, len_closeness)
while i < len(self.pd_timestamps):
Flag = True
for depend in depends:
if Flag is False:
break
Flag = self.check_it([self.pd_timestamps[i] - j * offset_frame for j in depend])
if Flag is False:
i += 1
continue
x_c = [self.get_matrix(self.pd_timestamps[i] - j * offset_frame) for j in depends[0]]
x_p = [self.get_matrix(self.pd_timestamps[i] - j * offset_frame) for j in depends[1]]
x_t = [self.get_matrix(self.pd_timestamps[i] - j * offset_frame) for j in depends[2]]
y = self.get_matrix(self.pd_timestamps[i])
if len_closeness > 0:
XC.append(np.vstack(x_c))
if len_period > 0:
XP.append(np.vstack(x_p))
if len_trend > 0:
XT.append(np.vstack(x_t))
Y.append(y)
timestamps_Y.append(self.timestamps[i])
i += 1
XC = np.asarray(XC)
XP = | np.asarray(XP) | numpy.asarray |
# -*- coding:utf-8 -*-
import unittest
from simple_ml.evaluation import *
from simple_ml.evaluation import _check_input, _get_binary_confusion_matrix, _gen_binary_pairs
import numpy as np
from numpy.testing import assert_array_equal
from simple_ml.base.base_error import *
class TestEvaluation(unittest.TestCase):
def test_regression_plot(self):
x_train = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
[10, 11, 12]])
y_train = np.array([0.1, 0.2, 0.4, 0.6])
x_test = np.array([[1, 3, 4, 5]])
y_test = np.array([0.12])
self.assertRaises(FeatureNumberMismatchError, regression_plot, x_train, y_train, x_test, y_test, y_test)
x_test = np.array([[1,3,4]])
y_test = np.array([0.12, 0.13])
self.assertRaises(SampleNumberMismatchError, regression_plot, x_train, y_train, x_test, y_test, y_test)
def test_check_input(self):
y_true = np.array([[1, 2, 3], [4, 5, 6]])
y_predict = y_true.copy()
self.assertRaises(InputTypeError, _check_input, y_predict, y_true)
y_true = np.array([1, 2])
y_predict = np.array([1, 2, 3])
self.assertRaises(LabelLengthMismatchError, _check_input, y_predict, y_true)
def test_confusion_matrix(self):
y1 = np.array([1, 0, 0, 1])
y2 = np.array([1, 0, 0, 2])
self.assertRaises(ParamInputError, _get_binary_confusion_matrix, y1, y2)
y2 = np.array([1, 0, 0, 0])
confusion_matrix = _get_binary_confusion_matrix(y1, y2)
assert_array_equal(confusion_matrix, np.array([[1, 1],
[0, 2]]))
def test_classify_accuracy(self):
y1 = np.array([1, 0, 0, 1])
y2 = np.array([1, 0, 1, 1])
score = classify_accuracy(y1, y2)
self.assertEqual(score, 0.75)
def test_classify_precision(self):
y1 = np.array([1, 1, 0, 0, 1])
y2 = np.array([1, 0, 1, 0, 0])
score = classify_precision(y1, y2)
self.assertEqual(score, 1/3)
def test_classify_recall(self):
y1 = np.array([1, 1, 0, 0, 1])
y2 = np.array([1, 0, 1, 0, 0])
score = classify_recall(y1, y2)
self.assertEqual(score, 0.5)
def test_classify_f1(self):
y1 = np.array([1, 1, 0, 0, 1])
y2 = np.array([1, 0, 1, 0, 0])
score = classify_f1(y1, y2)
self.assertEqual(score, (2 * 0.5 / 3) / (0.5 + 1/3))
def test_gen_binary_paris(self):
y1 = np.array([1, 1])
y2 = np.array([1, 0])
res = list(_gen_binary_pairs(y1, y2))
assert_array_equal(res, [(np.array([0, 0]), np.array([0, 1])), ( | np.array([1, 1]) | numpy.array |
import copy as cp
import pickle
import numpy as np
from mmcv.fileio import FileClient
from scipy.stats import mode
from ..builder import PIPELINES
from .augmentations import Flip
@PIPELINES.register_module()
class MySampleFrames:
"""Uniformly sample frames from the video.
To sample an n-frame clip from the video. UniformSampleFrames basically
divide the video into n segments of equal length and randomly sample one
frame from each segment. To make the testing results reproducible, a
random seed is set during testing, to make the sampling results
deterministic.
Required keys are "total_frames", "start_index" , added or modified keys
are "frame_inds", "clip_len", "frame_interval" and "num_clips".
Args:
clip_len (int): Frames of each sampled output clip.
num_clips (int): Number of clips to be sampled. Default: 1.
test_mode (bool): Store True when building test or validation dataset.
Default: False.
seed (int): The random seed used during test time. Default: 255.
"""
def __init__(self, clip_len, num_clips=1, test_mode=False, seed=255):
self.clip_len = clip_len
self.num_clips = num_clips
self.test_mode = test_mode
self.seed = seed
def _get_train_clips(self, num_frames, clip_len):
"""Uniformly sample indices for training clips.
Args:
num_frames (int): The number of frames.
clip_len (int): The length of the clip.
"""
assert self.num_clips == 1
if num_frames < clip_len:
start = np.random.randint(0, num_frames)
inds = np.arange(start, start + clip_len)
elif clip_len <= num_frames < 2 * clip_len:
basic = np.arange(clip_len)
inds = np.random.choice(
clip_len + 1, num_frames - clip_len, replace=False)
offset = np.zeros(clip_len + 1, dtype=np.int64)
offset[inds] = 1
offset = np.cumsum(offset)
inds = basic + offset[:-1]
else:
bids = np.array(
[i * num_frames // clip_len for i in range(clip_len + 1)])
bsize = np.diff(bids)
bst = bids[:clip_len]
offset = np.random.randint(bsize)
inds = bst + offset
print(inds)
return inds
def _get_test_clips(self, num_frames, clip_len):
"""Uniformly sample indices for testing clips.
Args:
num_frames (int): The number of frames.
clip_len (int): The length of the clip.
"""
np.random.seed(self.seed)
if num_frames < clip_len:
# Then we use a simple strategy
if num_frames < self.num_clips:
start_inds = list(range(self.num_clips))
else:
start_inds = [
i * num_frames // self.num_clips
for i in range(self.num_clips)
]
inds = np.concatenate(
[np.arange(i, i + clip_len) for i in start_inds])
elif clip_len <= num_frames < clip_len * 2:
all_inds = []
for i in range(self.num_clips):
basic = np.arange(clip_len)
inds = np.random.choice(
clip_len + 1, num_frames - clip_len, replace=False)
offset = | np.zeros(clip_len + 1, dtype=np.int64) | numpy.zeros |
import numpy as np
import pytest
import unittest
from desc.equilibrium import Equilibrium, EquilibriaFamily
from desc.grid import ConcentricGrid
from desc.profiles import PowerSeriesProfile, SplineProfile
from desc.geometry import (
FourierRZCurve,
FourierRZToroidalSurface,
ZernikeRZToroidalSection,
)
class TestConstructor(unittest.TestCase):
def test_defaults(self):
eq = Equilibrium()
self.assertEqual(eq.spectral_indexing, "ansi")
self.assertEqual(eq.NFP, 1)
self.assertEqual(eq.L, 1)
self.assertEqual(eq.M, 1)
self.assertEqual(eq.N, 0)
self.assertEqual(eq.sym, False)
self.assertTrue(eq.surface.eq(FourierRZToroidalSurface()))
self.assertIsInstance(eq.pressure, PowerSeriesProfile)
np.testing.assert_allclose(eq.p_l, [0])
self.assertIsInstance(eq.iota, PowerSeriesProfile)
np.testing.assert_allclose(eq.i_l, [0])
def test_supplied_objects(self):
pressure = SplineProfile([1, 2, 3])
iota = SplineProfile([2, 3, 4])
surface = ZernikeRZToroidalSection(spectral_indexing="ansi")
axis = FourierRZCurve([-1, 10, 1], [1, 0, -1], NFP=2)
eq = Equilibrium(pressure=pressure, iota=iota, surface=surface, axis=axis)
self.assertTrue(eq.pressure.eq(pressure))
self.assertTrue(eq.iota.eq(iota))
self.assertTrue(eq.surface.eq(surface))
self.assertTrue(eq.axis.eq(axis))
self.assertEqual(eq.spectral_indexing, "ansi")
self.assertEqual(eq.NFP, 2)
surface2 = FourierRZToroidalSurface(NFP=3)
eq2 = Equilibrium(surface=surface2)
self.assertEqual(eq2.NFP, 3)
self.assertEqual(eq2.axis.NFP, 3)
eq3 = Equilibrium(surface=surface, axis=None)
np.testing.assert_allclose(eq3.axis.R_n, [10])
def test_dict(self):
inputs = {
"L": 4,
"M": 2,
"N": 2,
"NFP": 3,
"sym": False,
"spectral_indexing": "ansi",
"surface": np.array(
[[0, 0, 0, 10, 0], [0, 1, 0, 1, 1], [0, -1, 1, 0.1, 0.1]]
),
"axis": np.array([[0, 10, 0]]),
"pressure": np.array([[0, 10], [2, 5]]),
"iota": np.array([[0, 1], [2, 3]]),
}
eq = Equilibrium(**inputs)
self.assertEqual(eq.L, 4)
self.assertEqual(eq.M, 2)
self.assertEqual(eq.N, 2)
self.assertEqual(eq.NFP, 3)
self.assertEqual(eq.spectral_indexing, "ansi")
np.testing.assert_allclose(eq.p_l, [10, 0, 5])
np.testing.assert_allclose(eq.i_l, [1, 0, 3])
self.assertIsInstance(eq.surface, FourierRZToroidalSurface)
np.testing.assert_allclose(
eq.Rb_lmn,
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
10.0,
1.0,
0.0,
0.0,
0.1,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
],
)
np.testing.assert_allclose(
eq.Zb_lmn,
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
1.0,
0.0,
0.0,
0.1,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
],
)
inputs["surface"] = np.array([[0, 0, 0, 10, 0], [1, 1, 0, 1, 1]])
eq = Equilibrium(**inputs)
self.assertEqual(eq.bdry_mode, "poincare")
np.testing.assert_allclose(
eq.Rb_lmn, [10.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
)
def test_asserts(self):
with pytest.raises(AssertionError):
eq = Equilibrium(L=3.4)
with pytest.raises(AssertionError):
eq = Equilibrium(M=3.4)
with pytest.raises(AssertionError):
eq = Equilibrium(N=3.4)
with pytest.raises(AssertionError):
eq = Equilibrium(NFP=3.4j)
with pytest.raises(ValueError):
eq = Equilibrium(surface=np.array([[1, 1, 1, 10, 2]]))
with pytest.raises(TypeError):
eq = Equilibrium(surface=FourierRZCurve())
with pytest.raises(TypeError):
eq = Equilibrium(axis=2)
with pytest.raises(ValueError):
eq = Equilibrium(surface=FourierRZToroidalSurface(NFP=1), NFP=2)
with pytest.raises(TypeError):
eq = Equilibrium(pressure="abc")
with pytest.raises(TypeError):
eq = Equilibrium(iota="def")
def test_supplied_coeffs(self):
R_lmn = np.random.random(3)
Z_lmn = np.random.random(3)
L_lmn = | np.random.random(3) | numpy.random.random |
import numpy as np
import os
import cv2
import copy
import json
import dill
import psutil
import sys
class Layer_Dense:
def __init__(self, n_inputs, n_neurons, weight_regulator=0., bias_regulator=0.):
# Initialise weights and biases
self.weights = 0.0001 * np.random.randn(n_inputs, n_neurons)
self.biases = np.zeros((1, n_neurons))
# Set regularisation strength
self.weight_regulator = weight_regulator
self.bias_regulator = bias_regulator
def forward(self, inputs, training):
# Save input values
self.inputs = inputs
# Calculate output values
self.outputs = np.dot(inputs, self.weights) + self.biases
def backward(self, dvalues):
# Gradients on parameters
self.dweights = np.dot(self.inputs.T, dvalues)
self.dbiases = np.sum(dvalues, axis=0, keepdims=True)
# Gradients on regularisation
self.dweights += 2 * self.weight_regulator * self.weights
self.dbiases += 2 * self.bias_regulator * self.biases
# Gradient on values
self.dinputs = np.dot(dvalues, self.weights.T)
class Layer_Dropout:
def __init__(self, rate):
# Invert the rate
self.rate = 1 - rate
def forward(self, inputs, training):
# Save input values
self.inputs = inputs
# If not in training mode deactivate dropout
if not training:
self.outputs = inputs.copy()
return
# Generate and save scaled mask
self.binary_mask = np.random.binomial(1, self.rate, size=inputs.shape) / self.rate
# Apply mask to output values
self.outputs = inputs * self.binary_mask
def backward(self, dvalues):
# Gradient on values
self.dinputs = dvalues * self.binary_mask
class Layer_Input:
def forward(self, inputs, training):
# No calculation needed. Mark inputs as outputs
self.outputs = inputs
class Activation_ReLU:
def forward(self, inputs, training):
# Save input values
self.inputs = inputs
# Calculate output values
self.outputs = np.maximum(0, inputs)
def backward(self, dvalues):
# Copy dvalues
self.dinputs = dvalues.copy()
# Gradient on values
self.dinputs[self.inputs <= 0] = 0
class Activation_Sigmoid:
def forward(self, inputs, training):
# Save input values
self.inputs = inputs
# Calculate output values
self.outputs = 1 / (1 + np.exp(-inputs))
def backward(self, dvalues):
# Gradient on values
self.dinputs = dvalues * (1 - self.outputs) * self.outputs
def predictions(self, outputs):
# Round output values to 1 or 0
return (outputs > 0.5) * 1
class Optimiser_Adam:
def __init__(self, learning_rate=0.001, decay=0., epsilon=1e-7, beta_1=0.9, beta_2=0.999):
# Initialise optimiser settings
self.learning_rate = learning_rate
self.current_learning_rate = learning_rate
self.decay = decay
self.iterations = 0
self.epsilon = epsilon
self.beta_1 = beta_1
self.beta_2 = beta_2
def pre_update_params(self):
# Update learning rate before any parameter updates
self.current_learning_rate = self.learning_rate * (1. / (1. + self.decay * self.iterations))
def update_params(self, layer):
# Create cache array filled with 0 if they do not exist
if not hasattr(layer, "weight_cache"):
layer.weight_momentum = np.zeros_like(layer.weights)
layer.weight_cache = np.zeros_like(layer.weights)
layer.bias_momentum = np.zeros_like(layer.biases)
layer.bias_cache = np.zeros_like(layer.biases)
# Update momentum with current gradients
layer.weight_momentum = self.beta_1 * layer.weight_momentum + (1 - self.beta_1) * layer.dweights
layer.bias_momentum = self.beta_1 * layer.bias_momentum + (1 - self.beta_1) * layer.dbiases
# Get corrected momentum
weight_momentum_corrected = layer.weight_momentum / (1 - self.beta_1 ** (self.iterations + 1))
bias_momentum_corrected = layer.bias_momentum / (1 - self.beta_1 ** (self.iterations + 1))
# Update cache with squared current gradients
layer.weight_cache = self.beta_2 * layer.weight_cache + (1 - self.beta_2) * layer.dweights ** 2
layer.bias_cache = self.beta_2 * layer.bias_cache + (1 - self.beta_2) * layer.dbiases ** 2
# Get corrected cache
weight_cache_corrected = layer.weight_cache / (1 - self.beta_2 ** (self.iterations + 1))
bias_cache_corrected = layer.bias_cache / (1 - self.beta_2 ** (self.iterations + 1))
# Update parameters
layer.weights -= self.current_learning_rate * weight_momentum_corrected / (np.sqrt(weight_cache_corrected) + self.epsilon)
layer.biases -= self.current_learning_rate * bias_momentum_corrected / (np.sqrt(bias_cache_corrected) + self.epsilon)
def post_update_params(self):
# Update iterations after any parameter updates
self.iterations += 1
class Loss:
def regularisation_loss(self):
regularisation_loss = 0
# Iterate over all trainable layers to calculate regularisation loss
for layer in self.trainable_layers:
regularisation_loss += layer.weight_regulator * np.sum(layer.weights * layer.weights)
regularisation_loss += layer.bias_regulator * np.sum(layer.biases * layer.biases)
return regularisation_loss
def remember_trainable_layers(self, trainable_layers):
# Set/remember trainable layers
self.trainable_layers = trainable_layers
def calculate(self, outputs, target_outputs):
# Calculate loss for each sample
sample_losses = self.forward(outputs, target_outputs)
# Calculate mean loss over all samples
data_loss = np.mean(sample_losses)
# Update accumulated
self.accumulated_sum += np.sum(sample_losses)
self.accumulated_count += sample_losses.size
return data_loss, self.regularisation_loss()
def calculate_accumulated(self):
# Calculate mean loss over whole dataset
data_loss = self.accumulated_sum / self.accumulated_count
return data_loss, self.regularisation_loss()
def new_pass(self):
# Reset variables for accumulated loss
self.accumulated_sum = 0
self.accumulated_count = 0
class Loss_BinaryCrossentropy(Loss):
def forward(self, outputs, target_outputs):
# Clip data to prevent division by 0
# Clip both sides to not drag mean towards any value
outputs_clipped = np.clip(outputs, 1e-7, 1 - 1e-7)
# Calculate sample-wise loss
sample_losses = -(target_outputs * np.log(outputs_clipped) + (1 - target_outputs) * np.log(1 - outputs_clipped))
sample_losses = np.mean(sample_losses, axis=-1)
# Return losses
return sample_losses
def backward(self, dvalues, target_outputs):
# Clip data to prevent division by 0
# Clip both sides to not drag mean towards any value
clipped_dvalues = np.clip(dvalues, 1e-7, 1 - 1e-7)
# Calculate gradient
self.dinputs = -(target_outputs / clipped_dvalues - (1 - target_outputs) / (1 - clipped_dvalues)) / len(dvalues[0])
# Normalise gradient
self.dinputs = self.dinputs / len(dvalues)
class Loss_MeanSquaredError(Loss):
def forward(self, outputs, target_outputs):
# Calculate loss
sample_losses = np.mean((target_outputs - outputs) ** 2, axis=-1)
# Return losses
return sample_losses
def backward(self, dvalues, target_outputs):
# Gradient on values
self.dinputs = -2 * (target_outputs - dvalues) / len(dvalues[0])
# Normalise gradient
self.dinputs = self.dinputs / len(dvalues)
class Loss_UnbalancedSegmentation(Loss):
def forward(self, outputs, target_outputs):
# Normalise outputs
outputs = target_outputs - outputs
# Calculate sample-wise loss
sample_losses = 1 / 32 * (outputs + 2) ** 4 - (outputs + 2) + 1.5
sample_losses = | np.mean(sample_losses, axis=-1) | numpy.mean |
# -*- coding: utf-8 -*-
"""
Created on Mon May 15 20:37:33 2017
@author: DX
"""
'''
Created on 15-May-2017
@author: aii32199
'''
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import MiniBatchKMeans, KMeans
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.datasets.samples_generator import make_blobs
#Generate sample data
np.random.seed(0)
batch_size = 45
centers = [[1, 1], [-1, -1], [1, -1]]
n_clusters = len(centers)
X, labels_true = make_blobs(n_samples=3000, centers=centers, cluster_std=0.7)
# Compute clustering with Means
k_means = KMeans(init='k-means++', n_clusters=3, n_init=10)
t0 = time.time()
k_means.fit(X)
t_batch = time.time() - t0
# Compute clustering with MiniBatchKMeans
# Plot result
fig = plt.figure(figsize=(12, 8))
fig.subplots_adjust(left=0.02, right=0.98, bottom=0.05, top=0.9)
colors = ['#4EACC5', '#FF9C34', '#4E9A06']
# We want to have the same colors for the same cluster from the
# MiniBatchKMeans and the KMeans algorithm. Let's pair the cluster centers per
# closest one.
k_means_cluster_centers = np.sort(k_means.cluster_centers_, axis=0)
#k_means_cluster_centers = np.load('E:/PyDevWorkSpaceTest/Ensembles/Chapter_01/data/kmenasCenter.npy')
# np.save('E:/PyDevWorkSpaceTest/Ensembles/Chapter_01/data/kmenasCenter.npy',k_means_cluster_centers)
k_means_labels = pairwise_distances_argmin(X, k_means_cluster_centers)
ax = fig.add_subplot(1, 2,1)
# ax.plot(X[:, 0], X[:, 1], 'w',markerfacecolor='k', marker='.',markersize=8)
# KMeans
ax = fig.add_subplot(1,2,1)
for k, col in zip(range(n_clusters), colors):
my_members = k_means_labels == k
cluster_center = k_means_cluster_centers[k]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',markerfacecolor=col, marker='.',markersize=8)
# plt.text(X[my_members, 0], X[my_members, 1], '%i' % (k))
ax.plot(cluster_center[0], cluster_center[1], marker='o', markerfacecolor=col,
markeredgecolor='k', markersize=10)
plt.text(cluster_center[0], cluster_center[1], 'Cluster: %i' % (k))
# ax.set_title('KMeans')
test_point = [-1.3,1.3]
ax.plot(test_point[0],test_point[1],marker='x',markerfacecolor='r',markersize=12)
#plt.text(test_point[0],test_point[1], 'point:%.1f,%.1f' % (test_point[0],test_point[1]))
#Check out its distance from each of the cluster
dist = []
for center in k_means_cluster_centers:
dist.append((sum(np.square((center) - (test_point)))))
min = np.argmin(dist)
test_point = [-1.3,1.3]
ax = fig.add_subplot(1,2,2)
for k, col in zip(range(n_clusters), colors):
my_members = k_means_labels == k
cluster_center = k_means_cluster_centers[k]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',markerfacecolor=col, marker='.',markersize=8)
# plt.text(X[my_members, 0], X[my_members, 1], '%i' % (k))
ax.plot(cluster_center[0], cluster_center[1], marker='o', markerfacecolor=col,
markeredgecolor='k', markersize=10)
plt.text(cluster_center[0], cluster_center[1], 'Cluster: %i' % (k))
ax.plot(test_point[0],test_point[1],marker='x',markerfacecolor='r',markersize=8)
plt.text(test_point[0],test_point[1], '%i' % (min))
print('distances are: '+ str(dist))
print('Minimum distance index: '+str(min))
#Supervised algorithm
from sklearn.ensemble import RandomForestClassifier as rf
from sklearn.metrics import log_loss
y = k_means_labels
X_train, y_train = X[:2000], y[:2000]
X_valid, y_valid = X[2000:2500], y[2000:2500]
X_train_valid, y_train_valid = X[:2500], y[:2500]
X_test, y_test = X[2500:], y[2500:]
# Train uncalibrated random forest classifier on whole train and validation
# data and evaluate on test data
clf = rf(n_estimators=25)
clf.fit(X_train_valid, y_train_valid)
clf_probs = clf.predict_proba(X_test)
pred_label = np.argmax(clf_probs,axis=1)
# score = log_loss(y_test, clf_probs)
nnz = np.shape(y_test)[0] - np.count_nonzero(pred_label - y_test)
acc = 100*nnz/np.shape(y_test)[0]
print('accuracy is: '+str(acc))
clf_probs = clf.predict_proba(test_point)
pred_label = | np.argmax(clf_probs,axis=1) | numpy.argmax |
import cv2
import numpy as np
import torch
def letterbox_image(image, size):
ih, iw, _ = np.shape(image)
w, h = size
scale = min(w/iw, h/ih)
nw = int(iw*scale)
nh = int(ih*scale)
image = cv2.resize(image, (nw,nh), interpolation=cv2.INTER_LINEAR)
new_image = np.ones([size[1],size[0],3])*128
new_image[(h-nh)//2:nh+(h-nh)//2, (w-nw)//2:nw+(w-nw)//2] = image
return new_image
def retinaface_correct_boxes(result, input_shape, image_shape):
new_shape = image_shape*np.min(input_shape/image_shape)
offset = (input_shape-new_shape)/2./input_shape
scale = input_shape/new_shape
scale_for_boxs = [scale[1], scale[0], scale[1], scale[0]]
scale_for_landmarks = [scale[1], scale[0], scale[1], scale[0], scale[1], scale[0], scale[1], scale[0], scale[1], scale[0]]
offset_for_boxs = [offset[1], offset[0], offset[1],offset[0]]
offset_for_landmarks = [offset[1], offset[0], offset[1], offset[0], offset[1], offset[0], offset[1], offset[0], offset[1], offset[0]]
result[:,:4] = (result[:,:4] - np.array(offset_for_boxs)) * np.array(scale_for_boxs)
result[:,5:] = (result[:,5:] - np.array(offset_for_landmarks)) * np.array(scale_for_landmarks)
return result
def point_form(boxes):
#------------------------------#
# Get the upper left and lower right corners of the box
#------------------------------#
return torch.cat((boxes[:, :2] - boxes[:, 2:]/2,
boxes[:, :2] + boxes[:, 2:]/2), 1)
def center_size(boxes):
#------------------------------#
# Get the center and width and height of the box
#------------------------------#
return torch.cat((boxes[:, 2:] + boxes[:, :2])/2,
boxes[:, 2:] - boxes[:, :2], 1)
def intersect(box_a, box_b):
# Calculates the intersection area of all real boxes and priori boxes
A = box_a.size(0)
B = box_b.size(0)
#------------------------------#
# Gets the upper left corner of the intersecting rectangle
#------------------------------#
max_xy = torch.min(box_a[:, 2:].unsqueeze(1).expand(A, B, 2),
box_b[:, 2:].unsqueeze(0).expand(A, B, 2))
#------------------------------#
# Gets the bottom right corner of the intersecting rectangle
#------------------------------#
min_xy = torch.max(box_a[:, :2].unsqueeze(1).expand(A, B, 2),
box_b[:, :2].unsqueeze(0).expand(A, B, 2))
inter = torch.clamp((max_xy - min_xy), min=0)
#-------------------------------------#
# Calculate the coincidence area between the priori box and all the real boxes
#-------------------------------------#
return inter[:, :, 0] * inter[:, :, 1]
def jaccard(box_a, box_b):
inter = intersect(box_a, box_b)
#-------------------------------------#
# Calculate the respective area of the anchor and the bounding box
#-------------------------------------#
area_a = ((box_a[:, 2]-box_a[:, 0]) *
(box_a[:, 3]-box_a[:, 1])).unsqueeze(1).expand_as(inter) # [A,B]
area_b = ((box_b[:, 2]-box_b[:, 0]) *
(box_b[:, 3]-box_b[:, 1])).unsqueeze(0).expand_as(inter) # [A,B]
union = area_a + area_b - inter
#-------------------------------------#
# The intersection and union ratio of each bounding box to A anchor is [A,B]
#-------------------------------------#
return inter / union # [A,B]
def match(threshold, truths, priors, variances, labels, landms, loc_t, conf_t, landm_t, idx):
#----------------------------------------------#
# Calculate the degree of coincidence between the anchor and the bounding box
#----------------------------------------------#
overlaps = jaccard(
truths,
point_form(priors)
)
#----------------------------------------------#
# The best degree of overlap between all bounding boxes and anchor
# best_prior_overlap [truth_box,1]
# best_prior_idx [truth_box,1]
#----------------------------------------------#
best_prior_overlap, best_prior_idx = overlaps.max(1, keepdim=True)
best_prior_idx.squeeze_(1)
best_prior_overlap.squeeze_(1)
best_truth_overlap, best_truth_idx = overlaps.max(0, keepdim=True)
best_truth_idx.squeeze_(0)
best_truth_overlap.squeeze_(0)
best_truth_overlap.index_fill_(0, best_prior_idx, 2)
for j in range(best_prior_idx.size(0)):
best_truth_idx[best_prior_idx[j]] = j
matches = truths[best_truth_idx]
# Shape: [num_priors] 此处为每一个anchor对应的label取出来
conf = labels[best_truth_idx]
matches_landm = landms[best_truth_idx]
conf[best_truth_overlap < threshold] = 0
loc = encode(matches, priors, variances)
landm = encode_landm(matches_landm, priors, variances)
# [num_priors, 4]
loc_t[idx] = loc
# [num_priors]
conf_t[idx] = conf
# [num_priors, 10]
landm_t[idx] = landm
def encode(matched, priors, variances):
g_cxcy = (matched[:, :2] + matched[:, 2:])/2 - priors[:, :2]
g_cxcy /= (variances[0] * priors[:, 2:])
g_wh = (matched[:, 2:] - matched[:, :2]) / priors[:, 2:]
g_wh = torch.log(g_wh) / variances[1]
return torch.cat([g_cxcy, g_wh], 1) # [num_priors,4]
def encode_landm(matched, priors, variances):
matched = torch.reshape(matched, (matched.size(0), 5, 2))
priors_cx = priors[:, 0].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2)
priors_cy = priors[:, 1].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2)
priors_w = priors[:, 2].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2)
priors_h = priors[:, 3].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2)
priors = torch.cat([priors_cx, priors_cy, priors_w, priors_h], dim=2)
g_cxcy = matched[:, :, :2] - priors[:, :, :2]
g_cxcy /= (variances[0] * priors[:, :, 2:])
g_cxcy = g_cxcy.reshape(g_cxcy.size(0), -1)
return g_cxcy
def log_sum_exp(x):
x_max = x.data.max()
return torch.log(torch.sum(torch.exp(x-x_max), 1, keepdim=True)) + x_max
def decode(loc, priors, variances):
boxes = torch.cat((priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:],
priors[:, 2:] * torch.exp(loc[:, 2:] * variances[1])), 1)
boxes[:, :2] -= boxes[:, 2:] / 2
boxes[:, 2:] += boxes[:, :2]
return boxes
def decode_landm(pre, priors, variances):
landms = torch.cat((priors[:, :2] + pre[:, :2] * variances[0] * priors[:, 2:],
priors[:, :2] + pre[:, 2:4] * variances[0] * priors[:, 2:],
priors[:, :2] + pre[:, 4:6] * variances[0] * priors[:, 2:],
priors[:, :2] + pre[:, 6:8] * variances[0] * priors[:, 2:],
priors[:, :2] + pre[:, 8:10] * variances[0] * priors[:, 2:],
), dim=1)
return landms
def non_max_suppression(boxes, conf_thres=0.5, nms_thres=0.3):
detection = boxes
mask = detection[:, 4] >= conf_thres
detection = detection[mask]
if not np.shape(detection)[0]:
return []
best_box = []
scores = detection[:, 4]
arg_sort = np.argsort(scores)[::-1]
detection = detection[arg_sort]
while np.shape(detection)[0]>0:
best_box.append(detection[0])
if len(detection) == 1:
break
ious = iou(best_box[-1], detection[1:])
detection = detection[1:][ious<nms_thres]
return np.array(best_box)
def iou(b1,b2):
b1_x1, b1_y1, b1_x2, b1_y2 = b1[0], b1[1], b1[2], b1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = b2[:, 0], b2[:, 1], b2[:, 2], b2[:, 3]
inter_rect_x1 = np.maximum(b1_x1, b2_x1)
inter_rect_y1 = np.maximum(b1_y1, b2_y1)
inter_rect_x2 = np.minimum(b1_x2, b2_x2)
inter_rect_y2 = np.minimum(b1_y2, b2_y2)
inter_area = | np.maximum(inter_rect_x2 - inter_rect_x1, 0) | numpy.maximum |
import numpy as np
import timeit
from pemfc import simulation
import sys
np.set_printoptions(threshold=sys.maxsize, linewidth=10000,
precision=9, suppress=True)
| np.seterr(all='raise') | numpy.seterr |
import numpy as np
from numpy.testing import assert_equal, assert_almost_equal, run_module_suite
from skimage.feature import ORB
from skimage import data
from skimage.color import rgb2gray
img = rgb2gray(data.lena())
def test_keypoints_orb_desired_no_of_keypoints():
detector_extractor = ORB(n_keypoints=10, fast_n=12, fast_threshold=0.20)
detector_extractor.detect(img)
exp_rows = np.array([ 435. , 435.6 , 376. , 455. , 434.88, 269. ,
375.6 , 310.8 , 413. , 311.04])
exp_cols = np.array([ 180. , 180. , 156. , 176. , 180. , 111. ,
156. , 172.8, 70. , 172.8])
exp_scales = np.array([ 1. , 1.2 , 1. , 1. , 1.44 , 1. ,
1.2 , 1.2 , 1. , 1.728])
exp_orientations = np.array([-175.64733392, -167.94842949, -148.98350192,
-142.03599837, -176.08535837, -53.08162354,
-150.89208271, 97.7693776 , -173.4479964 ,
38.66312042])
exp_response = np.array([ 0.96770745, 0.81027306, 0.72376257,
0.5626413 , 0.5097993 , 0.44351774,
0.39154173, 0.39084861, 0.39063076,
0.37602487])
assert_almost_equal(exp_rows, detector_extractor.keypoints[:, 0])
assert_almost_equal(exp_cols, detector_extractor.keypoints[:, 1])
assert_almost_equal(exp_scales, detector_extractor.scales)
assert_almost_equal(exp_response, detector_extractor.responses)
assert_almost_equal(exp_orientations,
np.rad2deg(detector_extractor.orientations), 5)
detector_extractor.detect_and_extract(img)
assert_almost_equal(exp_rows, detector_extractor.keypoints[:, 0])
assert_almost_equal(exp_cols, detector_extractor.keypoints[:, 1])
def test_keypoints_orb_less_than_desired_no_of_keypoints():
detector_extractor = ORB(n_keypoints=15, fast_n=12,
fast_threshold=0.33, downscale=2, n_scales=2)
detector_extractor.detect(img)
exp_rows = np.array([ 67., 247., 269., 413., 435., 230., 264.,
330., 372.])
exp_cols = np.array([ 157., 146., 111., 70., 180., 136., 336.,
148., 156.])
exp_scales = np.array([ 1., 1., 1., 1., 1., 2., 2., 2., 2.])
exp_orientations = np.array([-105.76503839, -96.28973044, -53.08162354,
-173.4479964 , -175.64733392, -106.07927215,
-163.40016243, 75.80865813, -154.73195911])
exp_response = np.array([ 0.13197835, 0.24931321, 0.44351774,
0.39063076, 0.96770745, 0.04935129,
0.21431068, 0.15826555, 0.42403573])
assert_almost_equal(exp_rows, detector_extractor.keypoints[:, 0])
assert_almost_equal(exp_cols, detector_extractor.keypoints[:, 1])
assert_almost_equal(exp_scales, detector_extractor.scales)
assert_almost_equal(exp_response, detector_extractor.responses)
assert_almost_equal(exp_orientations,
np.rad2deg(detector_extractor.orientations), 5)
detector_extractor.detect_and_extract(img)
| assert_almost_equal(exp_rows, detector_extractor.keypoints[:, 0]) | numpy.testing.assert_almost_equal |
import numpy as np
from scipy.stats import linregress
from matplotlib import pyplot as pl
def circles_boxcount(r = 1, n = 10):
w = np.logspace(np.log10(r*0.001), | np.log10(r*0.1) | numpy.log10 |
import unittest
import sys, os, shutil, tempfile
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
import coremltools
from os.path import dirname
from tensorflow.python.tools.freeze_graph import freeze_graph
import tfcoreml as tf_converter
from coremltools._deps import HAS_KERAS2_TF
if HAS_KERAS2_TF:
from keras import backend as K
from keras.models import Sequential, Model
from keras.layers import Dense, Activation, Conv2D, Conv1D, Flatten, BatchNormalization, Conv2DTranspose, SeparableConv2D
from keras.layers import MaxPooling2D, AveragePooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D
from keras.layers import MaxPooling1D, AveragePooling1D, GlobalAveragePooling1D, GlobalMaxPooling1D
from keras.layers import Embedding, Input, Permute, Reshape, RepeatVector, Dropout
from keras.layers import Add, Multiply, Concatenate, Dot, Maximum, Average
from keras.layers import add, multiply, concatenate, dot, maximum, average
from keras.layers import ZeroPadding2D, UpSampling2D, Cropping2D
from keras.layers import ZeroPadding1D, UpSampling1D, Cropping1D
from keras.layers.core import SpatialDropout1D, SpatialDropout2D
from keras.applications.mobilenet import DepthwiseConv2D
def _tf_transpose(x, is_sequence=False):
if not hasattr(x, "shape"):
return x
if len(x.shape) == 4:
# [Batch, Height, Width, Channels] --> [Batch, Channels, Height, Width]
x = np.transpose(x, [0,3,1,2])
return np.expand_dims(x, axis=0)
elif len(x.shape) == 3:
# We only deal with non-recurrent networks for now
# [Batch, (Sequence) Length, Channels] --> [1,B, Channels, 1, Seq]
# [0,1,2] [0,2,1]
return np.transpose(x, [0,2,1])[None,:,:,None,:]
elif len(x.shape) == 2:
if is_sequence: # (N,S) --> (S,N,1,)
return x.reshape(x.shape[::-1] + (1,))
else: # (N,C) --> (N,C,1,1)
return x.reshape((1, ) + x.shape) # Dense
elif len(x.shape) == 1:
if is_sequence: # (S) --> (S,N,1,1,1)
return x.reshape((x.shape[0], 1, 1))
else:
return x
else:
return x
def _convert_to_coreml(tf_model_path, mlmodel_path, input_name_shape_dict,
output_names):
""" Convert and return the coreml model from the Tensorflow
"""
model = tf_converter.convert(tf_model_path=tf_model_path,
mlmodel_path=mlmodel_path,
output_feature_names=output_names,
input_name_shape_dict=input_name_shape_dict)
return model
def _generate_data(input_shape, mode = 'random'):
"""
Generate some random data according to a shape.
"""
if input_shape is None or len(input_shape) == 0:
return 0.5
if mode == 'zeros':
X = np.zeros(input_shape)
elif mode == 'ones':
X = np.ones(input_shape)
elif mode == 'linear':
X = np.array(range(np.product(input_shape))).reshape(input_shape)*1.0
elif mode == 'random':
X = np.random.rand(*input_shape)
elif mode == 'random_zero_mean':
X = np.random.rand(*input_shape)-0.5
return X
class TFNetworkTest(unittest.TestCase):
@classmethod
def setUpClass(self):
""" Set up the unit test by loading common utilities.
"""
K.set_learning_phase(0)
def _simple_freeze(self, input_graph, input_checkpoint, output_graph,
output_node_names):
# output_node_names is a string of names separated by comma
freeze_graph(input_graph=input_graph,
input_saver="",
input_binary=True,
input_checkpoint=input_checkpoint,
output_node_names=output_node_names,
restore_op_name="save/restore_all",
filename_tensor_name="save/Const:0",
output_graph=output_graph,
clear_devices=True,
initializer_nodes="")
def _test_keras_model(self, model,
data_mode = 'random', delta = 1e-2, use_cpu_only = False,
one_dim_seq_flags = None, has_variables = True):
"""
Saves out the backend TF graph from the Keras model and tests it
"""
# Some file processing
model_dir = tempfile.mkdtemp()
graph_def_file = os.path.join(model_dir, 'tf_graph.pb')
checkpoint_file = os.path.join(model_dir, 'tf_model.ckpt')
frozen_model_file = os.path.join(model_dir, 'tf_frozen.pb')
coreml_model_file = os.path.join(model_dir, 'coreml_model.mlmodel')
input_shape = [i for i in model.input_shape]
for i, d in enumerate(input_shape):
if d is None:
input_shape[i] = 1
input_tensor_shapes = {model.input.name : input_shape}
output_node_names = [model.output.name[:-2]]
tf_graph = K.get_session().graph
tf.reset_default_graph()
if has_variables:
with tf_graph.as_default() as g:
saver = tf.train.Saver()
with tf.Session(graph = tf_graph) as sess:
sess.run(tf.global_variables_initializer())
feed_dict = {}
for in_tensor_name in input_tensor_shapes:
in_tensor_shape = input_tensor_shapes[in_tensor_name]
feed_dict[in_tensor_name] = _generate_data(in_tensor_shape, data_mode)
# run the result
fetches = [tf_graph.get_operation_by_name(name).outputs[0] for name in \
output_node_names]
result = sess.run(fetches, feed_dict=feed_dict)
# save graph definition somewhere
tf.train.write_graph(sess.graph, model_dir, graph_def_file, as_text = False)
# save the weights
if has_variables:
saver.save(sess, checkpoint_file)
K.clear_session()
# freeze the graph
if has_variables:
self._simple_freeze(
input_graph=graph_def_file,
input_checkpoint=checkpoint_file,
output_graph=frozen_model_file,
output_node_names=",".join(output_node_names))
else:
frozen_model_file = graph_def_file
# convert the tensorflow model
output_tensor_names = [name + ':0' for name in output_node_names]
coreml_model = _convert_to_coreml(
tf_model_path=frozen_model_file,
mlmodel_path=coreml_model_file,
input_name_shape_dict=input_tensor_shapes,
output_names=output_tensor_names)
# evaluate coreml
coreml_inputs = {}
for idx, in_tensor_name in enumerate(input_tensor_shapes):
in_shape = input_tensor_shapes[in_tensor_name]
coreml_in_name = in_tensor_name.replace(':', '__').replace('/', '__')
if one_dim_seq_flags is None:
coreml_inputs[coreml_in_name] = _tf_transpose(
feed_dict[in_tensor_name]).copy()
else:
coreml_inputs[coreml_in_name] = _tf_transpose(
feed_dict[in_tensor_name], one_dim_seq_flags[idx]).copy()
coreml_output = coreml_model.predict(coreml_inputs, useCPUOnly=use_cpu_only)
for idx, out_name in enumerate(output_node_names):
tp = _tf_transpose(result[idx]).flatten()
out_tensor_name = out_name.replace('/','__') +'__0'
cp = coreml_output[out_tensor_name].flatten()
self.assertEquals(len(tp), len(cp))
for i in range(len(tp)):
max_den = max(1.0, tp[i], cp[i])
self.assertAlmostEquals(tp[i]/max_den, cp[i]/max_den, delta=delta)
# Cleanup files - models on disk no longer useful
if os.path.exists(model_dir):
shutil.rmtree(model_dir)
@unittest.skipIf(not HAS_KERAS2_TF, 'Missing keras. Skipping tests.')
class KerasBasicNumericCorrectnessTest(TFNetworkTest):
def test_dense_softmax(self):
np.random.seed(1987)
# Define a model
model = Sequential()
model.add(Dense(32, input_shape=(32,), activation='softmax'))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
#Test it
self._test_keras_model(model)
def test_dense_elu(self):
np.random.seed(1988)
# Define a model
model = Sequential()
model.add(Dense(32, input_shape=(32,), activation='elu'))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_keras_model(model)
def test_dense_tanh(self):
np.random.seed(1988)
# Define a model
model = Sequential()
model.add(Dense(32, input_shape=(32,), activation='tanh'))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_keras_model(model)
def test_housenet_random(self):
np.random.seed(1988)
num_hidden = 2
num_features = 3
# Define a model
model = Sequential()
model.add(Dense(num_hidden, input_dim = num_features))
model.add(Activation('relu'))
model.add(Dense(1, input_dim = num_features))
# Set some random weights
model.set_weights([np.random.rand(*w.shape) for w in model.get_weights()])
# Test the keras model
self._test_keras_model(model)
def test_tiny_conv_random(self):
| np.random.seed(1988) | numpy.random.seed |
import numpy as np
from datetime import datetime
cnn_layer_types = ["CONV", "MAXPOOL"]
# ( layer type , x_length , y_length , zero_padding, no of mask ) zero_padding and no of mask not applicable for MAXPOOL
cnn_layer_info = []
ndelst = inpt_dim = [] # contains the node numbers in FC layer
mask_depth = [] # contains the mask depths of each layer
epoch_itr = optLyr = hydLyr = 0
lrn_rate = nrm_fac = 0.0
read_wt = 0
instructions_file = "instructions.txt"
data_input_file = "data_input_train.txt"
data_output_file = "data_output_train.txt"
weight_file = ""
f_ins = open(instructions_file, "r")
lns = f_ins.readlines()
# reading the instructions from the instruction files
try:
lrn_rate = float(lns[0].strip(' \n')) # first line should be learning rate
epoch_itr = int(lns[1].strip(' \n')) # second line should contain no of iterations
inpt_dim = lns[2].strip(' \n').split(' ') # third line should contain the input matrix dimensions
inpt_dim = [int(inpt_dim[i]) for i in range(len(inpt_dim))]
if (len(inpt_dim) == 3):
mask_depth.append(inpt_dim[2])
else:
mask_depth.append(1)
optLyr = int(lns[3].strip(' \n')) # fourth line should contain no of nodes in output layer
nrm_fac = float(lns[4].strip(' \n')) # fifth line should contain normalization factor
hydLyr = int(lns[5].strip(' \n')) # sixth line should contain no of hidden layer
ndelst.extend(
[int(x) for x in lns[6].strip(' \n').split(' ')]) # seventh line should contain no of nodes in hidden layer
ndelst.append(optLyr)
read_wt_ln = lns[7].strip(' \n')
if (int(read_wt_ln[0]) == 1):
weight_file = (read_wt_ln.split(' '))[1]
read_wt = 1
for i in range(8, len(lns)): # From eighth line the convolutions and pooling instructions are given
intgs = lns[i].strip(' \n').split(' ')
operate = cnn_layer_types.index(intgs[0])
if (operate == 0): # check for convolution or pooling
cnn_layer_info.append((operate, int(intgs[1]), int(intgs[2]), int(intgs[3]), int(intgs[4])))
mask_depth.append(int(intgs[4]))
else:
cnn_layer_info.append((operate, int(intgs[1]), int(intgs[2])))
mask_depth.append(mask_depth[-1])
except:
print("Wrong Instruction list .. Exitting code")
exit(1)
f_ins.close()
# checking whether convolution operations are correct or not
def check_input():
row, col = inpt_dim[0], inpt_dim[1]
for i in range(len(cnn_layer_info)):
pad = 0 # the pad applied
if (cnn_layer_info[i][0] == 0):
pad = cnn_layer_info[i][3]
row = row - cnn_layer_info[i][1] + 2 * pad + 1
col = col - cnn_layer_info[i][2] + 2 * pad + 1
return row, col
row, col = check_input()
if (row <= 0 or col <= 0): # row and column should be positive to be valid
print("Invalid Convolution and pooling layers .. Exitting code")
exit(1)
inpLyr = row * col * mask_depth[-1] # no of input nodes for the fully connected layer
ndelst.insert(0, inpLyr)
# printing the layer informations
print(" Learn Rate = " + str(lrn_rate))
print(" No of epoch iterations = " + str(epoch_itr))
print(" No of input layer node = " + str(inpLyr))
print(" No of output layer node = " + str(optLyr))
print(" No of normalization = " + str(nrm_fac))
for i in range(len(cnn_layer_info)):
pad = 0
no_mask = None
if (cnn_layer_info[i][0] == 0):
pad = cnn_layer_info[i][3]
no_mask = cnn_layer_info[i][4]
print(" " + cnn_layer_types[cnn_layer_info[i][0]] + " " + str(cnn_layer_info[i][1]) + "X" + str(
cnn_layer_info[i][2]) + " pad " + str(pad) + " no of masks " + str(no_mask))
print(" No of Hidden layers = " + str(hydLyr))
print(" No of nodes in the hidden layers = ", end="")
for i in range(1, len(ndelst) - 1):
print(str(ndelst[i]), end=" ")
print("")
train_input = []
train_input_data = []
train_output = []
no_of_input_data = 0
# accepting input in the specified format and also the output
f_in = open(data_input_file, "r")
f_out = open(data_output_file, "r")
for lns in f_in:
intgs = [(float(x)) for x in lns.strip(' \n').split()]
if (len(intgs) == 0):
train_input.append( | np.array(train_input_data) | numpy.array |
'''
从本地获取图像数据
'''
import os,random
import numpy as np
from PIL import Image
import pickle
import time
import uuid
def resize_image(in_image, new_width, new_height, out_image=None,
resize_mode=Image.ANTIALIAS):
""" Resize an image.
Arguments:
in_image: `PIL.Image`. The image to resize.
new_width: `int`. The image new width.
new_height: `int`. The image new height.
out_image: `str`. If specified, save the image to the given path.
resize_mode: `PIL.Image.mode`. The resizing mode.
Returns:
`PIL.Image`. The resize image.
"""
img = in_image.resize((new_width, new_height), resize_mode)
if out_image:
img.save(out_image)
return img
def to_categorical(y, nb_classes):
""" to_categorical.
Convert class vector (integers from 0 to nb_classes)
to binary class matrix, for use with categorical_crossentropy.
Arguments:
y: `array`. Class vector to convert.
nb_classes: `int`. Total number of classes.
"""
y = np.asarray(y, dtype='int32')
if not nb_classes:
nb_classes = np.max(y)+1
Y = np.zeros((len(y), nb_classes))
Y[np.arange(len(y)),y] = 1.
return Y
def shuffle(*arrs):
""" shuffle.
Shuffle given arrays at unison, along first axis.
Arguments:
*arrs: Each array to shuffle at unison.
Returns:
Tuple of shuffled arrays.
"""
arrs = list(arrs)
for i, arr in enumerate(arrs):
assert len(arrs[0]) == len(arrs[i])
arrs[i] = np.array(arr)
p = np.random.permutation(len(arrs[0]))
return tuple(arr[p] for arr in arrs)
def get_img_channel(image_path):
"""
Load a image and return the channel of the image
:param image_path:
:return: the channel of the image
"""
img = load_image(image_path)
img = pil_to_nparray(img)
try:
channel = img.shape[2]
except:
channel = 1
return channel
def ListPic(dir,fileNameList,samples,targets,label,flags=None,filter_channel=False):
for sample in fileNameList:
if not flags or any(flag in sample for flag in flags):
if filter_channel:
if get_img_channel(os.path.join(dir, sample)) != 3:
continue
samples.append(os.path.join(dir, sample))
targets.append(label)
def ListDir(dir,dirList,samples,targets,label,flags=None,filter_channel=False):
for dirName in dirList:
dirPath = os.path.join(dir,dirName)
walk = os.walk(dirPath).__next__()
ListDir(dir=dirPath, dirList=walk[1], samples=samples, targets=targets, label=label, flags=flags,
filter_channel=filter_channel)
ListPic(dir=dirPath, fileNameList=walk[2], samples=samples, targets=targets, label=label, flags=flags,
filter_channel=filter_channel)
def directory_to_samples(directory, flags=None, filter_channel=False):
""" Read a directory, and list all subdirectories files as class sample """
samples = []
targets = []
label = 0
classes = sorted(os.walk(directory).__next__()[1])
for c in classes:
c_dir = os.path.join(directory, c)
walk = os.walk(c_dir).__next__()
ListDir(dir=c_dir,dirList=walk[1],samples=samples,targets=targets,label=label,flags=flags,filter_channel=filter_channel)
ListPic(dir=c_dir,fileNameList=walk[2],samples=samples,targets=targets,label=label,flags=flags,filter_channel=filter_channel)
label += 1
return samples, targets
def load_image(in_image):
""" Load an image, returns PIL.Image. """
img = Image.open(in_image)
return img
def convert_color(in_image, mode):
""" Convert image color with provided `mode`. """
return in_image.convert(mode)
def pil_to_nparray(pil_image):
""" Convert a PIL.Image to numpy array. """
pil_image.load()
return np.asarray(pil_image, dtype="float32")
def image_dirs_to_samples(directory, resize=None, convert_gray=None,
filetypes=None):
print("Starting to parse images...")
if filetypes:
if filetypes not in [list, tuple]: filetypes = list(filetypes)
samples, targets = directory_to_samples(directory, flags=filetypes)
print('Sample is ok,convert image')
print("samples number is %d " %(len(samples)))
for i, s in enumerate(samples):
print(s)
samples[i] = load_image(s)
if resize:
samples[i] = resize_image(samples[i], resize[0], resize[1])
if convert_gray:
samples[i] = convert_color(samples[i], 'L')
samples[i] = pil_to_nparray(samples[i])
samples[i] /= 255.
print("Parsing Done!")
return samples, targets
def build_image_dataset_from_dir(directory,
dataset_file="my_tflearn_dataset.pkl",
resize=None, convert_gray=None,
filetypes=None, shuffle_data=False,
categorical_Y=False):
try:
X, Y = pickle.load(open(dataset_file, 'rb'))
except Exception:
X, Y = image_dirs_to_samples(directory, resize, convert_gray, filetypes)
if categorical_Y:
Y = to_categorical(Y, np.max(Y) + 1) # First class is '0'
if shuffle_data:
X, Y = shuffle(X, Y)
pickle.dump((X, Y), open(dataset_file, 'wb'))
return X, Y
#dirname 根目录
#imagefolder 图片目录
#pklname 训练数据
def load_data(dirname="TrainData",
imagefolder="gray",
pklname='ctimage.pkl',
convert_gray=True,
resize_pics=(227, 227), shuffle=True,one_hot=False):
dataset_file = os.path.join(dirname, pklname)
if not os.path.exists(dataset_file):
imagePath = os.path.join(dirname,imagefolder)
if not os.path.exists(imagePath):
raise Exception("%s doesn't exist " %(imagePath))
X, Y = build_image_dataset_from_dir(os.path.join(dirname,imagefolder),
dataset_file=dataset_file,
resize=resize_pics,
filetypes=['.jpg', '.jpeg'],
convert_gray=convert_gray,
shuffle_data=shuffle,
categorical_Y=one_hot)
X = np.asarray(X, dtype=np.float32)
if convert_gray:
X = X.reshape([-1,resize_pics[0],resize_pics[1],1])
else:
X = X.reshape([-1, resize_pics[0], resize_pics[1], 3])
X_train, X_val = X[:-5000], X[-5000:]
y_train, y_val = Y[:-5000], Y[-5000:]
X_train = | np.asarray(X_train, dtype=np.float32) | numpy.asarray |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.