prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
'''
Created on 29.09.2017
@author: lemmerfn
'''
import numbers
from collections import namedtuple
from functools import total_ordering
import numpy as np
import pysubgroup as ps
@total_ordering
class NumericTarget:
statistic_types = (
'size_sg', 'size_dataset', 'mean_sg', 'mean_dataset', 'std_sg', 'std_dataset', 'median_sg', 'median_dataset',
'max_sg', 'max_dataset', 'min_sg', 'min_dataset', 'mean_lift', 'median_lift')
def __init__(self, target_variable):
self.target_variable = target_variable
def __repr__(self):
return "T: " + str(self.target_variable)
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __lt__(self, other):
return str(self) < str(other)
def get_attributes(self):
return [self.target_variable]
def get_base_statistics(self, subgroup, data):
cover_arr, size_sg = ps.get_cover_array_and_size(subgroup, len(data), data)
all_target_values = data[self.target_variable]
sg_target_values = all_target_values[cover_arr]
instances_dataset = len(data)
instances_subgroup = size_sg
mean_sg = np.mean(sg_target_values)
mean_dataset = np.mean(all_target_values)
return (instances_dataset, mean_dataset, instances_subgroup, mean_sg)
def calculate_statistics(self, subgroup, data, cached_statistics=None):
if cached_statistics is None or not isinstance(cached_statistics, dict):
statistics = dict()
elif all(k in cached_statistics for k in NumericTarget.statistic_types):
return cached_statistics
else:
statistics = cached_statistics
cover_arr, _ = ps.get_cover_array_and_size(subgroup, len(data), data)
all_target_values = data[self.target_variable].to_numpy()
sg_target_values = all_target_values[cover_arr]
statistics['size_sg'] = len(sg_target_values)
statistics['size_dataset'] = len(data)
statistics['mean_sg'] = np.mean(sg_target_values)
statistics['mean_dataset'] = np.mean(all_target_values)
statistics['std_sg'] = np.std(sg_target_values)
statistics['std_dataset'] = np.std(all_target_values)
statistics['median_sg'] = np.median(sg_target_values)
statistics['median_dataset'] = np.median(all_target_values)
statistics['max_sg'] = np.max(sg_target_values)
statistics['max_dataset'] = np.max(all_target_values)
statistics['min_sg'] = np.min(sg_target_values)
statistics['min_dataset'] = np.min(all_target_values)
statistics['mean_lift'] = statistics['mean_sg'] / statistics['mean_dataset']
statistics['median_lift'] = statistics['median_sg'] / statistics['median_dataset']
return statistics
class StandardQFNumeric(ps.BoundedInterestingnessMeasure):
tpl = namedtuple('StandardQFNumeric_parameters', ('size_sg', 'mean', 'estimate'))
@staticmethod
def standard_qf_numeric(a, _, mean_dataset, instances_subgroup, mean_sg):
return instances_subgroup ** a * (mean_sg - mean_dataset)
def __init__(self, a, invert=False, estimator='sum'):
if not isinstance(a, numbers.Number):
raise ValueError(f'a is not a number. Received a={a}')
self.a = a
self.invert = invert
self.required_stat_attrs = ('size_sg', 'mean')
self.dataset_statistics = None
self.all_target_values = None
self.has_constant_statistics = False
if estimator == 'sum':
self.estimator = StandardQFNumeric.Summation_Estimator(self)
elif estimator == 'average':
self.estimator = StandardQFNumeric.Average_Estimator(self)
elif estimator == 'order':
self.estimator = StandardQFNumeric.Ordering_Estimator(self)
else:
raise ValueError('estimator is not one of the following: ' + str(['sum', 'average', 'order']))
def calculate_constant_statistics(self, data, target):
data = self.estimator.get_data(data, target)
self.all_target_values = data[target.target_variable].to_numpy()
target_mean = np.mean(self.all_target_values)
data_size = len(data)
self.dataset_statistics = StandardQFNumeric.tpl(data_size, target_mean, None)
self.estimator.calculate_constant_statistics(data, target)
self.has_constant_statistics = True
def evaluate(self, subgroup, target, data, statistics=None):
statistics = self.ensure_statistics(subgroup, target, data, statistics)
dataset = self.dataset_statistics
return StandardQFNumeric.standard_qf_numeric(self.a, dataset.size_sg, dataset.mean, statistics.size_sg, statistics.mean)
def calculate_statistics(self, subgroup, target, data, statistics=None):
cover_arr, sg_size = ps.get_cover_array_and_size(subgroup, len(self.all_target_values), data)
sg_mean = | np.array([0]) | numpy.array |
'''
Utility functions to analyze particle data.
@author: <NAME> <<EMAIL>>
Units: unless otherwise noted, all quantities are in (combinations of):
mass [M_sun]
position [kpc comoving]
distance, radius [kpc physical]
velocity [km / s]
time [Gyr]
'''
# system ----
from __future__ import absolute_import, division, print_function # python 2 compatability
import numpy as np
from numpy import Inf
# local ----
from . import basic as ut
from . import halo_property
from . import orbit
from . import catalog
#===================================================================================================
# utilities - parsing input arguments
#===================================================================================================
def parse_species(part, species):
'''
Parse input list of species to ensure all are in catalog.
Parameters
----------
part : dict : catalog of particles
species : str or list : name[s] of particle species to analyze
Returns
-------
species : list : name[s] of particle species
'''
Say = ut.io.SayClass(parse_species)
if np.isscalar(species):
species = [species]
if species == ['all'] or species == ['total']:
species = list(part.keys())
elif species == ['baryon']:
species = ['gas', 'star']
for spec in list(species):
if spec not in part:
species.remove(spec)
Say.say('! {} not in particle catalog'.format(spec))
return species
def parse_indices(part_spec, part_indices):
'''
Parse input list of particle indices.
If none, generate via arange.
Parameters
----------
part_spec : dict : catalog of particles of given species
part_indices : array-like : indices of particles
Returns
-------
part_indices : array : indices of particles
'''
if part_indices is None or not len(part_indices):
if 'position' in part_spec:
part_indices = ut.array.get_arange(part_spec['position'].shape[0])
elif 'id' in part_spec:
part_indices = ut.array.get_arange(part_spec['id'].size)
elif 'mass' in part_spec:
part_indices = ut.array.get_arange(part_spec['mass'].size)
return part_indices
def parse_property(parts_or_species, property_name, property_values=None, single_host=True):
'''
Get property values, either input or stored in particle catalog.
List-ify as necessary to match input particle catalog.
Parameters
----------
parts_or_species : dict or string or list thereof :
catalog[s] of particles or string[s] of species
property_name : str : options: 'center_position', 'center_velocity', 'indices'
property_values : float/array or list thereof : property values to assign
single_host : bool : use only the primary host (if not input any property_values)
Returns
-------
property_values : float or list
'''
def parse_property_single(part_or_spec, property_name, property_values, single_host):
if property_name in ['center_position', 'center_velocity']:
if property_values is None or not len(property_values):
if property_name == 'center_position':
property_values = part_or_spec.host_positions
elif property_name == 'center_velocity':
# default to the primary host
property_values = part_or_spec.host_velocities
if property_values is None or not len(property_values):
raise ValueError('no input {} and no {} in input catalog'.format(
property_name, property_name))
if single_host:
property_values = property_values[0] # use omly the primary host
if isinstance(property_values, list):
raise ValueError('input list of {}s but input single catalog'.format(property_name))
return property_values
assert property_name in ['center_position', 'center_velocity', 'indices']
if isinstance(parts_or_species, list):
# input list of particle catalogs
if (property_values is None or not len(property_values) or
not isinstance(property_values, list)):
property_values = [property_values for _ in parts_or_species]
if len(property_values) != len(parts_or_species):
raise ValueError('number of input {}s not match number of input catalogs'.format(
property_name))
for i, part_or_spec in enumerate(parts_or_species):
property_values[i] = parse_property_single(
part_or_spec, property_name, property_values[i], single_host)
else:
# input single particle catalog
property_values = parse_property_single(
parts_or_species, property_name, property_values, single_host)
return property_values
#===================================================================================================
# id <-> index conversion
#===================================================================================================
def assign_id_to_index(
part, species=['all'], id_name='id', id_min=0, store_as_dict=False, print_diagnostic=True):
'''
Assign, to particle dictionary, arrays that points from object id to species kind and index in
species array.
This is useful for analyses multi-species catalogs with intermixed ids.
Do not assign pointers for ids below id_min.
Parameters
----------
part : dict : catalog of particles of various species
species : str or list : name[s] of species to use: 'all' = use all in particle dictionary
id_name : str : key name for particle id
id_min : int : minimum id in catalog
store_as_dict : bool : whether to store id-to-index pointer as dict instead of array
print_diagnostic : bool : whether to print diagnostic information
'''
Say = ut.io.SayClass(assign_id_to_index)
# get list of species that have valid id key
species = parse_species(part, species)
for spec in species:
assert id_name in part[spec]
# get list of all ids
ids_all = []
for spec in species:
ids_all.extend(part[spec][id_name])
ids_all = np.array(ids_all, dtype=part[spec][id_name].dtype)
if print_diagnostic:
# check if duplicate ids within species
for spec in species:
masks = (part[spec][id_name] >= id_min)
total_number = np.sum(masks)
unique_number = np.unique(part[spec][id_name][masks]).size
if total_number != unique_number:
Say.say('species {} has {} ids that are repeated'.format(
spec, total_number - unique_number))
# check if duplicate ids across species
if len(species) > 1:
masks = (ids_all >= id_min)
total_number = np.sum(masks)
unique_number = np.unique(ids_all[masks]).size
if total_number != unique_number:
Say.say('across all species, {} ids are repeated'.format(
total_number - unique_number))
Say.say('maximum id = {}'.format(ids_all.max()))
part.id_to_index = {}
if store_as_dict:
# store pointers as a dictionary
# store overall dictionary (across all species) and dictionary within each species
for spec in species:
part[spec].id_to_index = {}
for part_i, part_id in enumerate(part[spec][id_name]):
if part_id in part.id_to_index:
# redundant ids - add to existing entry as list
if isinstance(part.id_to_index[part_id], tuple):
part.id_to_index[part_id] = [part.id_to_index[part_id]]
part.id_to_index[part_id].append((spec, part_i))
if part_id in part[spec].id_to_index:
if np.isscalar(part[spec].id_to_index[part_id]):
part[spec].id_to_index[part_id] = [part[spec].id_to_index[part_id]]
part[spec].id_to_index[part_id].append(part_i)
else:
# new id - add as new entry
part.id_to_index[part_id] = (spec, part_i)
part[spec].id_to_index[part_id] = part_i
# convert lists to arrays
dtype = part[spec][id_name].dtype
for part_id in part[spec].id_to_index:
if isinstance(part[spec].id_to_index[part_id], list):
part[spec].id_to_index[part_id] = np.array(
part[spec].id_to_index[part_id], dtype=dtype)
else:
# store pointers as arrays
part.id_to_index['species'] = np.zeros(ids_all.max() + 1, dtype='|S6')
dtype = ut.array.parse_data_type(ids_all.max() + 1)
part.id_to_index['index'] = ut.array.get_array_null(ids_all.max() + 1, dtype=dtype)
for spec in species:
masks = (part[spec][id_name] >= id_min)
part.id_to_index['species'][part[spec][id_name][masks]] = spec
part.id_to_index['index'][part[spec][id_name][masks]] = ut.array.get_arange(
part[spec][id_name], dtype=dtype)[masks]
#===================================================================================================
# position, velocity
#===================================================================================================
def get_center_positions(
part, species=['star', 'dark', 'gas'], part_indicess=None, method='center-of-mass',
center_number=1, exclusion_distance=200, center_positions=None, distance_max=Inf,
compare_centers=False, return_array=True):
'''
Get position[s] of center of mass [kpc comoving] using iterative zoom-in on input species.
Parameters
----------
part : dict : dictionary of particles
species : str or list : name[s] of species to use: 'all' = use all in particle dictionary
part_indicess : array or list of arrays : indices of particle to use to define center
use this to include only particles that you know are relevant
method : str : method of centering: 'center-of-mass', 'potential'
center_number : int : number of centers to compute
exclusion_distance : float :
radius around previous center to cut before finding next center [kpc comoving]
center_position : array-like : initial center position[s] to use
distance_max : float : maximum radius to consider initially
compare_centers : bool : whether to run sanity check to compare centers via zoom v potential
return_array : bool :
whether to return single array instead of array of arrays, if center_number = 1
Returns
-------
center_positions : array or array of arrays: position[s] of center[s] [kpc comoving]
'''
Say = ut.io.SayClass(get_center_positions)
assert method in ['center-of-mass', 'potential']
species = parse_species(part, species)
part_indicess = parse_property(species, 'indices', part_indicess)
if center_positions is None or np.ndim(center_positions) == 1:
# list-ify center_positions
center_positions = [center_positions for _ in range(center_number)]
if np.shape(center_positions)[0] != center_number:
raise ValueError('! input center_positions = {} but also input center_number = {}'.format(
center_positions, center_number))
if method == 'potential':
if len(species) > 1:
Say.say('! using only first species = {} for centering via potential'.format(
species[0]))
if 'potential' not in part[species[0]]:
Say.say('! {} does not have potential, using center-of-mass zoom instead'.format(
species[0]))
method = 'center-of-mass'
if method == 'potential':
# use single (first) species
spec_i = 0
spec_name = species[spec_i]
part_indices = parse_indices(spec_name, part_indicess[spec_i])
for center_i, center_position in enumerate(center_positions):
if center_i > 0:
# cull out particles near previous center
distances = get_distances_wrt_center(
part, spec_name, part_indices, center_positions[center_i - 1],
total_distance=True, return_array=True)
# exclusion distance in [kpc comoving]
part_indices = part_indices[
distances > (exclusion_distance * part.info['scalefactor'])]
if center_position is not None and distance_max > 0 and distance_max < Inf:
# impose distance cut around input center
part_indices = get_indices_within_coordinates(
part, spec_name, [0, distance_max], center_position, part_indicess=part_indices,
return_array=True)
part_index = np.nanargmin(part[spec_name]['potential'][part_indices])
center_positions[center_i] = part[spec_name]['position'][part_index]
else:
for spec_i, spec_name in enumerate(species):
part_indices = parse_indices(part[spec_name], part_indicess[spec_i])
if spec_i == 0:
positions = part[spec_name]['position'][part_indices]
masses = part[spec_name]['mass'][part_indices]
else:
positions = np.concatenate(
[positions, part[spec_name]['position'][part_indices]])
masses = np.concatenate([masses, part[spec_name]['mass'][part_indices]])
for center_i, center_position in enumerate(center_positions):
if center_i > 0:
# remove particles near previous center
distances = ut.coordinate.get_distances(
positions, center_positions[center_i - 1], part.info['box.length'],
part.snapshot['scalefactor'], total_distance=True) # [kpc physical]
masks = (distances > (exclusion_distance * part.info['scalefactor']))
positions = positions[masks]
masses = masses[masks]
center_positions[center_i] = ut.coordinate.get_center_position_zoom(
positions, masses, part.info['box.length'], center_position=center_position,
distance_max=distance_max)
center_positions = np.array(center_positions)
if compare_centers:
position_dif_max = 1 # [kpc comoving]
if 'potential' not in part[species[0]]:
Say.say('! {} not have potential, cannot compare against zoom center-of-mass'.format(
species[0]))
return center_positions
if method == 'potential':
method_other = 'center-of-mass'
else:
method_other = 'potential'
center_positions_other = get_center_positions(
part, species, part_indicess, method_other, center_number, exclusion_distance,
center_positions, distance_max, compare_centers=False, return_array=False)
position_difs = np.abs(center_positions - center_positions_other)
for pi, position_dif in enumerate(position_difs):
if np.max(position_dif) > position_dif_max:
Say.say('! offset center positions')
Say.say('center position via {}: '.format(method), end='')
ut.io.print_array(center_positions[pi], '{:.3f}')
Say.say('center position via {}: '.format(method_other), end='')
ut.io.print_array(center_positions_other[pi], '{:.3f}')
Say.say('position difference: ', end='')
ut.io.print_array(position_dif, '{:.3f}')
if return_array and center_number == 1:
center_positions = center_positions[0]
return center_positions
def get_center_velocities(
part, species_name='star', part_indices=None, distance_max=15, center_positions=None,
return_array=True):
'''
Get velocity[s] [km / s] of center of mass of input species.
Parameters
----------
part : dict : dictionary of particles
species_name : str : name of particle species to use
part_indices : array : indices of particle to use to define center
use this to exclude particles that you know are not relevant
distance_max : float : maximum radius to consider [kpc physical]
center_positions : array or list of arrays: center position[s] [kpc comoving]
if None, will use default center position[s] in catalog
return_array : bool :
whether to return single array instead of array of arrays, if input single center position
Returns
-------
center_velocities : array or array of arrays : velocity[s] of center of mass [km / s]
'''
center_positions = parse_property(part, 'center_position', center_positions, single_host=False)
part_indices = parse_indices(part[species_name], part_indices)
distance_max /= part.snapshot['scalefactor'] # convert to [kpc comoving] to match positions
center_velocities = np.zeros(center_positions.shape, part[species_name]['velocity'].dtype)
for center_i, center_position in enumerate(center_positions):
center_velocities[center_i] = ut.coordinate.get_center_velocity(
part[species_name]['velocity'][part_indices],
part[species_name]['mass'][part_indices],
part[species_name]['position'][part_indices],
center_position, distance_max, part.info['box.length'])
if return_array and len(center_velocities) == 1:
center_velocities = center_velocities[0]
return center_velocities
def get_distances_wrt_center(
part, species=['star'], part_indicess=None, center_position=None, rotation=None,
coordinate_system='cartesian', total_distance=False, return_array=True):
'''
Get distances (scalar or vector) between input particles and center_position (input or stored
in particle catalog).
Parameters
----------
part : dict : catalog of particles at snapshot
species : str or list : name[s] of particle species to compute
part_indicess : array or list : indices[s] of particles to compute, one array per input species
center_position : array : position of center [kpc comoving]
if None, will use default center position in particle catalog
rotation : bool or array : whether to rotate particles
two options:
(a) if input array of eigen-vectors, will define rotation axes for all species
(b) if True, will rotate to align with principal axes defined by input species
coordinate_system : str : which coordinates to get distances in:
'cartesian' (default), 'cylindrical', 'spherical'
total_distance : bool : whether to compute total/scalar distance
return_array : bool : whether to return single array instead of dict if input single species
Returns
-------
dist : array (object number x dimension number) or dict thereof : [kpc physical]
3-D distance vectors aligned with default x,y,z axes OR
3-D distance vectors aligned with major, medium, minor axis OR
2-D distance vectors along major axes and along minor axis OR
1-D scalar distances
OR
dictionary of above for each species
'''
assert coordinate_system in ('cartesian', 'cylindrical', 'spherical')
species = parse_species(part, species)
center_position = parse_property(part, 'center_position', center_position)
part_indicess = parse_property(species, 'indices', part_indicess)
dist = {}
for spec_i, spec in enumerate(species):
part_indices = parse_indices(part[spec], part_indicess[spec_i])
dist[spec] = ut.coordinate.get_distances(
part[spec]['position'][part_indices], center_position, part.info['box.length'],
part.snapshot['scalefactor'], total_distance) # [kpc physical]
if not total_distance:
if rotation is not None:
if rotation is True:
# get principal axes stored in particle dictionary
if (len(part[spec].host_rotation_tensors) and
len(part[spec].host_rotation_tensors[0])):
rotation_tensor = part[spec].host_rotation_tensors[0]
else:
raise ValueError('! cannot find principal_axes_tensor in species dict')
elif len(rotation):
# use input rotation vectors
rotation_tensor = rotation
dist[spec] = ut.coordinate.get_coordinates_rotated(dist[spec], rotation_tensor)
if coordinate_system in ['cylindrical', 'spherical']:
dist[spec] = ut.coordinate.get_positions_in_coordinate_system(
dist[spec], 'cartesian', coordinate_system)
if return_array and len(species) == 1:
dist = dist[species[0]]
return dist
def get_velocities_wrt_center(
part, species=['star'], part_indicess=None, center_velocity=None, center_position=None,
rotation=False, coordinate_system='cartesian', total_velocity=False, return_array=True):
'''
Get velocities (either scalar or vector) between input particles and center_velocity
(input or stored in particle catalog).
Parameters
----------
part : dict : catalog of particles at snapshot
species : str or list : name[s] of particle species to get
part_indicess : array or list : indices[s] of particles to select, one array per input species
center_velocity : array : center velocity [km / s]
if None, will use default center velocity in catalog
center_position : array : center position [kpc comoving], to use in computing Hubble flow
if None, will use default center position in catalog
rotation : bool or array : whether to rotate particles
two options:
(a) if input array of eigen-vectors, will define rotation axes for all species
(b) if True, will rotate to align with principal axes defined by input species
coordinate_system : str : which coordinates to get positions in:
'cartesian' (default), 'cylindrical', 'spherical'
total_velocity : bool : whether to compute total/scalar velocity
return_array : bool : whether to return array (instead of dict) if input single species
Returns
-------
vel : array or dict thereof :
velocities (object number x dimension number, or object number) [km / s]
'''
assert coordinate_system in ('cartesian', 'cylindrical', 'spherical')
species = parse_species(part, species)
center_velocity = parse_property(part, 'center_velocity', center_velocity)
center_position = parse_property(part, 'center_position', center_position)
part_indicess = parse_property(species, 'indices', part_indicess)
vel = {}
for spec_i, spec in enumerate(species):
part_indices = parse_indices(part[spec], part_indicess[spec_i])
vel[spec] = ut.coordinate.get_velocity_differences(
part[spec]['velocity'][part_indices], center_velocity,
part[spec]['position'][part_indices], center_position, part.info['box.length'],
part.snapshot['scalefactor'], part.snapshot['time.hubble'], total_velocity)
if not total_velocity:
if rotation is not None:
if rotation is True:
# get principal axes stored in particle dictionary
if (len(part[spec].host_rotation_tensors) and
len(part[spec].host_rotation_tensors[0])):
rotation_tensor = part[spec].host_rotation_tensors[0]
else:
raise ValueError('! cannot find principal_axes_tensor in species dict')
elif len(rotation):
# use input rotation vectors
rotation_tensor = rotation
vel[spec] = ut.coordinate.get_coordinates_rotated(vel[spec], rotation_tensor)
if coordinate_system in ('cylindrical', 'spherical'):
# need to compute distance vectors
distances = ut.coordinate.get_distances(
part[spec]['position'][part_indices], center_position,
part.info['box.length'], part.snapshot['scalefactor']) # [kpc physical]
if rotation is not None:
# need to rotate distances too
distances = ut.coordinate.get_coordinates_rotated(distances, rotation_tensor)
vel[spec] = ut.coordinate.get_velocities_in_coordinate_system(
vel[spec], distances, 'cartesian', coordinate_system)
if return_array and len(species) == 1:
vel = vel[species[0]]
return vel
def get_orbit_dictionary(
part, species=['star'], part_indicess=None, center_position=None, center_velocity=None,
return_single=True):
'''
Get dictionary of orbital parameters.
Parameters
----------
part : dict : catalog of particles at snapshot
species : str or list : name[s] of particle species to compute
part_indicess : array or list : indices[s] of particles to select, one array per input species
center_position : array : center (reference) position
center_position : array : center (reference) velociy
return_single : bool :
whether to return single dict instead of dict of dicts, if single species
Returns
-------
orb : dict : dictionary of orbital properties, one for each species (unless scalarize is True)
'''
species = parse_species(part, species)
center_position = parse_property(part, 'center_position', center_position)
center_velocity = parse_property(part, 'center_velocity', center_velocity)
part_indicess = parse_property(species, 'indices', part_indicess)
orb = {}
for spec_i, spec in enumerate(species):
part_indices = parse_indices(part[spec], part_indicess[spec_i])
distance_vectors = ut.coordinate.get_distances(
part[spec]['position'][part_indices], center_position, part.info['box.length'],
part.snapshot['scalefactor'])
velocity_vectors = ut.coordinate.get_velocity_differences(
part[spec]['velocity'][part_indices], center_velocity,
part[spec]['position'][part_indices], center_position,
part.info['box.length'], part.snapshot['scalefactor'], part.snapshot['time.hubble'])
orb[spec] = orbit.get_orbit_dictionary(distance_vectors, velocity_vectors)
if return_single and len(species) == 1:
orb = orb[species[0]]
return orb
#===================================================================================================
# subsample
#===================================================================================================
def get_indices_within_coordinates(
part, species=['star'],
distance_limitss=[], center_position=None,
velocity_limitss=[], center_velocity=None,
rotation=None, coordinate_system='cartesian',
part_indicess=None, return_array=True):
'''
Get indices of particles that are within distance and/or velocity coordinate limits from center
(either input or stored in particle catalog).
Parameters
----------
part : dict : catalog of particles at snapshot
species : str or list : name[s] of particle species to use
distance_limitss : list or list of lists:
min and max distance[s], relative to center, to get particles [kpc physical]
default is 1-D list, but can be 2-D or 3-D list to select separately along dimensions
if 2-D or 3-D, need to input *signed* limits
center_position : array : center position [kpc comoving]
if None, will use default center position in particle catalog
velocity_limitss : list or list of lists:
min and max velocities, relative to center, to get particles [km / s]
default is 1-D list, but can be 2-D or 3-D list to select separately along dimensions
if 2-D or 3-D, need to input *signed* limits
center_velocity : array : center velocity [km / s]
if None, will use default center velocity in particle catalog
rotation : bool or array : whether to rotate particle coordinates
two options:
(a) if input array of eigen-vectors, will use to define rotation axes for all species
(b) if True, will rotate to align with principal axes defined by each input species
coordinate_system : str : which coordinates to get positions in:
'cartesian' (default), 'cylindrical', 'spherical'
part_indicess : array : prior indices[s] of particles to select, one array per input species
return_array : bool : whether to return single array instead of dict, if input single species
Returns
-------
part_index : dict or array : array or dict of arrays of indices of particles in region
'''
assert coordinate_system in ['cartesian', 'cylindrical', 'spherical']
species = parse_species(part, species)
center_position = parse_property(part, 'center_position', center_position)
if velocity_limitss is not None and len(velocity_limitss):
center_velocity = parse_property(part, 'center_velocity', center_velocity)
part_indicess = parse_property(species, 'indices', part_indicess)
part_index = {}
for spec_i, spec in enumerate(species):
part_indices = parse_indices(part[spec], part_indicess[spec_i])
if len(part_indices) and distance_limitss is not None and len(distance_limitss):
distance_limits_dimen = np.ndim(distance_limitss)
if distance_limits_dimen == 1:
total_distance = True
elif distance_limits_dimen == 2:
total_distance = False
assert len(distance_limitss) in [2, 3]
else:
raise ValueError('! cannot parse distance_limitss = {}'.format(distance_limitss))
if (distance_limits_dimen == 1 and distance_limitss[0] <= 0 and
distance_limitss[1] >= Inf):
pass # null case, no actual limits imposed, so skip rest
else:
"""
# an attempt to be clever, but gains seem modest
distances = np.abs(coordinate.get_position_difference(
part[spec]['position'] - center_position,
part.info['box.length'])) * part.snapshot['scalefactor'] # [kpc physical]
for dimension_i in range(part[spec]['position'].shape[1]):
masks *= ((distances[:, dimension_i] < np.max(distance_limits)) *
(distances[:, dimension_i] >= np.min(distance_limits)))
part_indices[spec] = part_indices[spec][masks]
distances = distances[masks]
distances = np.sum(distances ** 2, 1) # assume 3-d position
"""
distancess = get_distances_wrt_center(
part, spec, part_indices, center_position, rotation, coordinate_system,
total_distance)
if distance_limits_dimen == 1:
# distances are absolute
masks = (
(distancess >= np.min(distance_limitss)) *
(distancess < np.max(distance_limitss))
)
elif distance_limits_dimen == 2:
if len(distance_limitss) == 2:
# distances are signed
masks = (
(distancess[0] >= np.min(distance_limitss[0])) *
(distancess[0] < np.max(distance_limitss[0])) *
(distancess[1] >= np.min(distance_limitss[1])) *
(distancess[1] < np.max(distance_limitss[1]))
)
elif distance_limits_dimen == 3:
# distances are signed
masks = (
(distancess[0] >= np.min(distance_limitss[0])) *
(distancess[0] < np.max(distance_limitss[0])) *
(distancess[1] >= np.min(distance_limitss[1])) *
(distancess[1] < np.max(distance_limitss[1]))
(distancess[2] >= np.min(distance_limitss[2])) *
(distancess[2] < np.max(distance_limitss[2]))
)
part_indices = part_indices[masks]
if len(part_indices) and velocity_limitss is not None and len(velocity_limitss):
velocity_limits_dimen = np.ndim(velocity_limitss)
if velocity_limits_dimen == 1:
return_total_velocity = True
elif velocity_limits_dimen == 2:
return_total_velocity = False
assert len(velocity_limitss) in [2, 3]
else:
raise ValueError('! cannot parse velocity_limitss = {}'.format(velocity_limitss))
if (velocity_limits_dimen == 1 and velocity_limitss[0] <= 0 and
velocity_limitss[1] >= Inf):
pass # null case, no actual limits imposed, so skip rest
else:
velocitiess = get_velocities_wrt_center(
part, spec, part_indices, center_velocity, center_position, rotation,
coordinate_system, return_total_velocity)
if velocity_limits_dimen == 1:
# velocities are absolute
masks = (
(velocitiess >= np.min(velocity_limitss)) *
(velocitiess < np.max(velocity_limitss))
)
elif velocity_limits_dimen == 2:
if len(velocity_limitss) == 2:
# velocities are signed
masks = (
(velocitiess[0] >= np.min(velocity_limitss[0])) *
(velocitiess[0] < np.max(velocity_limitss[0])) *
(velocitiess[1] >= np.min(velocity_limitss[1])) *
(velocitiess[1] < np.max(velocity_limitss[1]))
)
elif len(velocity_limitss) == 3:
# velocities are signed
masks = (
(velocitiess[0] >= np.min(velocity_limitss[0])) *
(velocitiess[0] < np.max(velocity_limitss[0])) *
(velocitiess[1] >= np.min(velocity_limitss[1])) *
(velocitiess[1] < np.max(velocity_limitss[1]))
(velocitiess[2] >= np.min(velocity_limitss[2])) *
(velocitiess[2] < np.max(velocity_limitss[2]))
)
part_indices = part_indices[masks]
part_index[spec] = part_indices
if return_array and len(species) == 1:
part_index = part_index[species[0]]
return part_index
def get_indices_id_kind(
part, species=['star'], id_kind='unique', part_indicess=None, return_array=True):
'''
Get indices of particles that either are unique (no other particles of same species have
same id) or multiple (other particle of same species has same id).
Parameters
----------
part : dict : catalog of particles at snapshot
species : str or list : name[s] of particle species
split_kind : str : id kind of particles to get: 'unique', 'multiple'
part_indicess : array : prior indices[s] of particles to select, one array per input species
return_array : bool : whether to return single array instead of dict, if input single species
Returns
-------
part_index : dict or array : array or dict of arrays of indices of particles of given split kind
'''
species = parse_species(part, species)
part_indicess = parse_property(species, 'indices', part_indicess)
assert id_kind in ['unique', 'multiple']
part_index = {}
for spec_i, spec in enumerate(species):
part_indices = parse_indices(part[spec], part_indicess[spec_i])
_pids, piis, counts = np.unique(
part[spec]['id'][part_indices], return_index=True, return_counts=True)
pis_unsplit = np.sort(part_indices[piis[counts == 1]])
if id_kind == 'unique':
part_index[spec] = pis_unsplit
elif id_kind == 'multiple':
part_index[spec] = np.setdiff1d(part_indices, pis_unsplit)
else:
raise ValueError('! not recognize id_kind = {}'.format(id_kind))
if return_array and len(species) == 1:
part_index = part_index[species[0]]
return part_index
#===================================================================================================
# halo/galaxy major/minor axes
#===================================================================================================
def get_principal_axes(
part, species_name='star', distance_max=Inf, mass_percent=None, age_percent=None, age_limits=[],
center_positions=None, center_velocities=None, part_indices=None, return_array=True,
print_results=True):
'''
Get reverse-sorted eigen-vectors, eigen-values, and axis ratios of principal axes of
each host galaxy/halo.
Ensure that principal axes are oriented so median v_phi > 0.
Parameters
----------
part : dict : catalog of particles at snapshot
species : str or list : name[s] of particle species to use
distance_max : float : maximum distance to select particles [kpc physical]
mass_percent : float : keep particles within the distance that encloses mass percent [0, 100]
of all particles within distance_max
age_percent : float : use the youngest age_percent of particles within distance cut
age_limits : float : use only particles within age limits
center_positions : array or array of arrays : position[s] of center[s] [kpc comoving]
center_velocities : array or array of arrays : velocity[s] of center[s] [km / s]
part_indices : array : indices[s] of particles to select
return_array : bool :
whether to return single array for each property, instead of array of arrays, if single host
print_results : bool : whether to print axis ratios
Returns
-------
principal_axes = {
'rotation.tensor': array : rotation vectors that define max, med, min axes
'eigen.values': array : eigen-values of max, med, min axes
'axis.ratios': array : ratios of principal axes
}
'''
Say = ut.io.SayClass(get_principal_axes)
center_positions = parse_property(part, 'center_position', center_positions, single_host=False)
center_velocities = parse_property(
part, 'center_velocity', center_velocities, single_host=False)
part_indices = parse_indices(part[species_name], part_indices)
principal_axes = {
'rotation.tensor': [],
'eigen.values': [],
'axis.ratios': [],
}
for center_i, center_position in enumerate(center_positions):
distance_vectors = ut.coordinate.get_distances(
part[species_name]['position'][part_indices], center_position, part.info['box.length'],
part.snapshot['scalefactor']) # [kpc physical]
distances = np.sqrt(np.sum(distance_vectors ** 2, 1))
masks = (distances < distance_max)
if mass_percent:
distance_percent = ut.math.percentile_weighted(
distances[masks], mass_percent,
part[species_name].prop('mass', part_indices[masks]))
masks *= (distances < distance_percent)
if age_percent or (age_limits is not None and len(age_limits)):
if 'form.scalefactor' not in part[species_name]:
raise ValueError('! input age constraints but age not in {} catalog'.format(
species_name))
if age_percent and (age_limits is not None and len(age_limits)):
Say.say('input both age_percent and age_limits, using only age_percent')
if age_percent:
age_max = ut.math.percentile_weighted(
part[species_name].prop('age', part_indices[masks]), age_percent,
part[species_name].prop('mass', part_indices[masks]))
age_limits_use = [0, age_max]
else:
age_limits_use = age_limits
Say.say('using {} particles with age = {} Gyr'.format(
species_name, ut.array.get_limits_string(age_limits_use)))
masks *= ((part[species_name].prop('age', part_indices) >= min(age_limits_use)) *
(part[species_name].prop('age', part_indices) < max(age_limits_use)))
rotation_tensor, eigen_values, axis_ratios = ut.coordinate.get_principal_axes(
distance_vectors[masks], part[species_name].prop('mass', part_indices[masks]),
print_results)
# test if need to flip a principal axis to ensure that net v_phi > 0
velocity_vectors = ut.coordinate.get_velocity_differences(
part[species_name].prop('velocity', part_indices[masks]), center_velocities[center_i])
velocity_vectors_rot = ut.coordinate.get_coordinates_rotated(
velocity_vectors, rotation_tensor)
distance_vectors_rot = ut.coordinate.get_coordinates_rotated(
distance_vectors[masks], rotation_tensor)
velocity_vectors_cyl = ut.coordinate.get_velocities_in_coordinate_system(
velocity_vectors_rot, distance_vectors_rot, 'cartesian', 'cylindrical')
if np.median(velocity_vectors_cyl[:, 2]) < 0:
rotation_tensor[1] *= -1 # flip so net v_phi is positive
principal_axes['rotation.tensor'].append(rotation_tensor)
principal_axes['eigen.values'].append(eigen_values)
principal_axes['axis.ratios'].append(axis_ratios)
for k in principal_axes:
principal_axes[k] = np.array(principal_axes[k])
if return_array and np.shape(center_positions)[0] == 1:
for k in principal_axes:
principal_axes[k] = principal_axes[k][0]
return principal_axes
#===================================================================================================
# halo/galaxy radius
#===================================================================================================
def get_halo_properties(
part, species=['dark', 'star', 'gas'], virial_kind='200m',
distance_limits=[10, 600], distance_bin_width=0.02, distance_scaling='log',
center_position=None, return_array=True, print_results=True):
'''
Compute halo radius according to virial_kind.
Return this radius, the mass from each species within this radius, and particle indices within
this radius (if get_part_indices).
Parameters
----------
part : dict : catalog of particles at snapshot
species : str or list : name[s] of particle species to use: 'all' = use all in dictionary
virial_kind : str : virial overdensity definition
'200m' -> average density is 200 x matter
'200c' -> average density is 200 x critical
'vir' -> average density is Bryan & Norman
'fof.100m' -> edge density is 100 x matter, for FoF(ll=0.168)
'fof.60m' -> edge density is 60 x matter, for FoF(ll=0.2)
distance_limits : list : min and max distance to consider [kpc physical]
distance_bin_width : float : width of distance bin
distance_scaling : str : scaling of distance: 'log', 'linear'
center_position : array : center position to use
if None, will use default center position in catalog
return_array : bool : whether to return array (instead of dict) if input single species
print_results : bool : whether to print radius and mass
Returns
-------
halo_prop : dict : dictionary of halo properties:
radius : float : halo radius [kpc physical]
mass : float : mass within radius [M_sun]
indices : array : indices of partices within radius (if get_part_indices)
'''
distance_limits = np.asarray(distance_limits)
Say = ut.io.SayClass(get_halo_properties)
species = parse_species(part, species)
center_position = parse_property(part, 'center_position', center_position)
HaloProperty = halo_property.HaloPropertyClass(part.Cosmology, part.snapshot['redshift'])
DistanceBin = ut.binning.DistanceBinClass(
distance_scaling, distance_limits, width=distance_bin_width, dimension_number=3)
overdensity, reference_density = HaloProperty.get_overdensity(virial_kind, units='kpc physical')
virial_density = overdensity * reference_density
mass_cum_in_bins = np.zeros(DistanceBin.number)
distancess = []
for spec_i, spec in enumerate(species):
distances = ut.coordinate.get_distances(
part[spec]['position'], center_position, part.info['box.length'],
part.snapshot['scalefactor'], total_distance=True) # [kpc physical]
distancess.append(distances)
mass_in_bins = DistanceBin.get_histogram(distancess[spec_i], False, part[spec]['mass'])
# get mass within distance minimum, for computing cumulative values
distance_indices = np.where(distancess[spec_i] < np.min(distance_limits))[0]
mass_cum_in_bins += (np.sum(part[spec]['mass'][distance_indices]) +
np.cumsum(mass_in_bins))
if part.info['baryonic'] and len(species) == 1 and species[0] == 'dark':
# correct for baryonic mass if analyzing only dark matter in baryonic simulation
Say.say('! using only dark particles, so correcting for baryonic mass')
mass_factor = 1 + part.Cosmology['omega_baryon'] / part.Cosmology['omega_matter']
mass_cum_in_bins *= mass_factor
# cumulative densities in bins
density_cum_in_bins = mass_cum_in_bins / DistanceBin.volumes_cum
# get smallest radius that satisfies virial density
for d_bin_i in range(DistanceBin.number - 1):
if (density_cum_in_bins[d_bin_i] >= virial_density and
density_cum_in_bins[d_bin_i + 1] < virial_density):
# interpolate in log space
log_halo_radius = np.interp(
np.log10(virial_density), np.log10(density_cum_in_bins[[d_bin_i + 1, d_bin_i]]),
DistanceBin.log_maxs[[d_bin_i + 1, d_bin_i]])
halo_radius = 10 ** log_halo_radius
break
else:
Say.say('! could not determine halo R_{}'.format(virial_kind))
if density_cum_in_bins[0] < virial_density:
Say.say('distance min = {:.1f} kpc already is below virial density = {}'.format(
distance_limits.min(), virial_density))
Say.say('decrease distance_limits')
elif density_cum_in_bins[-1] > virial_density:
Say.say('distance max = {:.1f} kpc still is above virial density = {}'.format(
distance_limits.max(), virial_density))
Say.say('increase distance_limits')
else:
Say.say('not sure why!')
return
# get maximum of V_circ = sqrt(G M(< r) / r)
vel_circ_in_bins = ut.constant.km_per_kpc * np.sqrt(
ut.constant.grav_kpc_msun_sec * mass_cum_in_bins / DistanceBin.maxs)
vel_circ_max = np.max(vel_circ_in_bins)
vel_circ_max_radius = DistanceBin.maxs[np.argmax(vel_circ_in_bins)]
halo_mass = 0
part_indices = {}
for spec_i, spec in enumerate(species):
masks = (distancess[spec_i] < halo_radius)
halo_mass += np.sum(part[spec]['mass'][masks])
part_indices[spec] = ut.array.get_arange(part[spec]['mass'])[masks]
if print_results:
Say.say(
'R_{} = {:.1f} kpc\n M_{} = {} M_sun, log = {}\n V_max = {:.1f} km/s'.format(
virial_kind, halo_radius, virial_kind,
ut.io.get_string_from_numbers(halo_mass, 2),
ut.io.get_string_from_numbers(np.log10(halo_mass), 2),
vel_circ_max)
)
halo_prop = {}
halo_prop['radius'] = halo_radius
halo_prop['mass'] = halo_mass
halo_prop['vel.circ.max'] = vel_circ_max
halo_prop['vel.circ.max.radius'] = vel_circ_max_radius
if return_array and len(species) == 1:
part_indices = part_indices[species[0]]
halo_prop['indices'] = part_indices
return halo_prop
def get_galaxy_properties(
part, species_name='star', edge_kind='mass.percent', edge_value=90,
distance_max=20, distance_bin_width=0.02, distance_scaling='log', center_position=None,
axis_kind='', rotation_tensor=None, rotation_distance_max=20,
other_axis_distance_limits=None, part_indices=None, print_results=True):
'''
Compute galaxy radius according to edge_kind.
Return this radius, the mass from species within this radius, particle indices within this
radius, and rotation vectors (if applicable).
Parameters
----------
part : dict : catalog of particles at snapshot
species_name : str : name of particle species to use
edge_kind : str : method to define galaxy radius
'mass.percent' = radius at which edge_value (percent) of stellar mass within distance_max
'density' = radius at which density is edge_value [log(M_sun / kpc^3)]
edge_value : float : value to use to define galaxy radius
mass_percent : float : percent of mass (out to distance_max) to define radius
distance_max : float : maximum distance to consider [kpc physical]
distance_bin_width : float : width of distance bin
distance_scaling : str : distance bin scaling: 'log', 'linear'
axis_kind : str : 'major', 'minor', 'both'
rotation_tensor : array : rotation vectors that define principal axes
rotation_distance_max : float :
maximum distance to use in defining rotation vectors of principal axes [kpc physical]
other_axis_distance_limits : float :
min and max distances along other axis[s] to keep particles [kpc physical]
center_position : array : center position [kpc comoving]
if None, will use default center position in catalog
part_indices : array : star particle indices (if already know which ones are close)
print_results : bool : whether to print radius and mass of galaxy
Returns
-------
gal_prop : dict : dictionary of galaxy properties:
radius or radius.major & radius.minor : float : galaxy radius[s] [kpc physical]
mass : float : mass within radius[s] [M_sun]
indices : array : indices of partices within radius[s] (if get_part_indices)
rotation.vectors : array : eigen-vectors that defined rotation
'''
def get_radius_mass_indices(
masses, distances, distance_scaling, distance_limits, distance_bin_width, dimension_number,
edge_kind, edge_value):
'''
Utility function.
'''
Say = ut.io.SayClass(get_radius_mass_indices)
DistanceBin = ut.binning.DistanceBinClass(
distance_scaling, distance_limits, width=distance_bin_width,
dimension_number=dimension_number)
# get masses in distance bins
mass_in_bins = DistanceBin.get_histogram(distances, False, masses)
if edge_kind == 'mass.percent':
# get mass within distance minimum, for computing cumulative values
d_indices = np.where(distances < np.min(distance_limits))[0]
log_masses_cum = ut.math.get_log(np.sum(masses[d_indices]) + np.cumsum(mass_in_bins))
log_mass = np.log10(edge_value / 100) + log_masses_cum.max()
try:
# interpolate in log space
log_radius = np.interp(log_mass, log_masses_cum, DistanceBin.log_maxs)
except ValueError:
Say.say('! could not find object radius - increase distance_max')
return
elif edge_kind == 'density':
log_density_in_bins = ut.math.get_log(mass_in_bins / DistanceBin.volumes)
# use only bins with defined density (has particles)
d_bin_indices = np.arange(DistanceBin.number)[np.isfinite(log_density_in_bins)]
# get smallest radius that satisfies density threshold
for d_bin_ii, d_bin_i in enumerate(d_bin_indices):
d_bin_i_plus_1 = d_bin_indices[d_bin_ii + 1]
if (log_density_in_bins[d_bin_i] >= edge_value and
log_density_in_bins[d_bin_i_plus_1] < edge_value):
# interpolate in log space
log_radius = np.interp(
edge_value, log_density_in_bins[[d_bin_i_plus_1, d_bin_i]],
DistanceBin.log_maxs[[d_bin_i_plus_1, d_bin_i]])
break
else:
Say.say('! could not find object radius - increase distance_max')
return
radius = 10 ** log_radius
masks = (distances < radius)
mass = np.sum(masses[masks])
indices = ut.array.get_arange(masses)[masks]
return radius, mass, indices
# start function
Say = ut.io.SayClass(get_galaxy_properties)
distance_min = 0.001 # [kpc physical]
distance_limits = [distance_min, distance_max]
if edge_kind == 'mass.percent':
# dealing with cumulative value - stable enough to decrease bin with
distance_bin_width *= 0.1
center_position = parse_property(part, 'center_position', center_position)
if part_indices is None or not len(part_indices):
part_indices = ut.array.get_arange(part[species_name]['position'].shape[0])
distance_vectors = ut.coordinate.get_distances(
part[species_name]['position'][part_indices], center_position,
part.info['box.length'], part.snapshot['scalefactor']) # [kpc physical]
distances = np.sqrt(np.sum(distance_vectors ** 2, 1)) # 3-D distance
masses = part[species_name].prop('mass', part_indices)
if axis_kind:
# radius along 2-D major axes (projected radius) or along 1-D minor axis (height)
assert axis_kind in ['major', 'minor', 'both']
if rotation_tensor is None or not len(rotation_tensor):
if (len(part[species_name].host_rotation_tensors) and
len(part[species_name].host_rotation_tensors[0])):
# use only the primary host
rotation_tensor = part[species_name].host_rotation_tensors[0]
else:
masks = (distances < rotation_distance_max)
rotation_tensor = ut.coordinate.get_principal_axes(
distance_vectors[masks], masses[masks])[0]
distance_vectors = ut.coordinate.get_coordinates_rotated(
distance_vectors, rotation_tensor=rotation_tensor)
distances_cyl = ut.coordinate.get_positions_in_coordinate_system(
distance_vectors, 'cartesian', 'cylindrical')
major_distances, minor_distances = distances_cyl[:, 0], distances_cyl[:, 1]
minor_distances = np.abs(minor_distances) # need only absolute distances
if axis_kind in ['major', 'minor']:
if axis_kind == 'minor':
dimension_number = 1
distances = minor_distances
other_distances = major_distances
elif axis_kind == 'major':
dimension_number = 2
distances = major_distances
other_distances = minor_distances
if (other_axis_distance_limits is not None and
(min(other_axis_distance_limits) > 0 or max(other_axis_distance_limits) < Inf)):
masks = ((other_distances >= min(other_axis_distance_limits)) *
(other_distances < max(other_axis_distance_limits)))
distances = distances[masks]
masses = masses[masks]
else:
# spherical average
dimension_number = 3
gal_prop = {}
if axis_kind == 'both':
# first get 3-D radius
galaxy_radius_3d, _galaxy_mass_3d, indices = get_radius_mass_indices(
masses, distances, distance_scaling, distance_limits, distance_bin_width, 3,
edge_kind, edge_value)
galaxy_radius_major = galaxy_radius_3d
axes_mass_dif = 1
# then iterate to get both major and minor axes
while axes_mass_dif > 0.005:
# get 1-D radius along minor axis
masks = (major_distances < galaxy_radius_major)
galaxy_radius_minor, galaxy_mass_minor, indices = get_radius_mass_indices(
masses[masks], minor_distances[masks], distance_scaling, distance_limits,
distance_bin_width, 1, edge_kind, edge_value)
# get 2-D radius along major axes
masks = (minor_distances < galaxy_radius_minor)
galaxy_radius_major, galaxy_mass_major, indices = get_radius_mass_indices(
masses[masks], major_distances[masks], distance_scaling, distance_limits,
distance_bin_width, 2, edge_kind, edge_value)
axes_mass_dif = (abs(galaxy_mass_major - galaxy_mass_minor) /
(0.5 * (galaxy_mass_major + galaxy_mass_minor)))
indices = (major_distances < galaxy_radius_major) * (minor_distances < galaxy_radius_minor)
gal_prop['radius.major'] = galaxy_radius_major
gal_prop['radius.minor'] = galaxy_radius_minor
gal_prop['mass'] = galaxy_mass_major
gal_prop['log mass'] = np.log10(galaxy_mass_major)
gal_prop['rotation.tensor'] = rotation_tensor
gal_prop['indices'] = part_indices[indices]
if print_results:
Say.say('R_{:.0f} along major, minor axes = {:.2f}, {:.2f} kpc physical'.format(
edge_value, galaxy_radius_major, galaxy_radius_minor))
else:
galaxy_radius, galaxy_mass, indices = get_radius_mass_indices(
masses, distances, distance_scaling, distance_limits, distance_bin_width,
dimension_number, edge_kind, edge_value)
gal_prop['radius'] = galaxy_radius
gal_prop['mass'] = galaxy_mass
gal_prop['log mass'] = np.log10(galaxy_mass)
gal_prop['indices'] = part_indices[indices]
if print_results:
Say.say('R_{:.0f} = {:.2f} kpc physical'.format(edge_value, galaxy_radius))
if print_results:
Say.say('M_star = {:.2e} M_sun, log = {:.2f}'.format(
gal_prop['mass'], gal_prop['log mass']))
return gal_prop
#===================================================================================================
# profiles of properties
#===================================================================================================
class SpeciesProfileClass(ut.binning.DistanceBinClass):
'''
Get profiles of either histogram/sum or stastitics (such as average, median) of given
property for given particle species.
__init__ is defined via ut.binning.DistanceBinClass
'''
def get_profiles(
self, part, species=['all'],
property_name='', property_statistic='sum', weight_by_mass=False,
center_position=None, center_velocity=None, rotation=None,
other_axis_distance_limits=None, property_select={}, part_indicess=None):
'''
Parse inputs into either get_sum_profiles() or get_statistics_profiles().
If know what you want, can skip this and jump to those functions.
Parameters
----------
part : dict : catalog of particles
species : str or list : name[s] of particle species to compute mass from
property_name : str : name of property to get statistics of
property_statistic : str : statistic to get profile of:
'sum', 'sum.cum', 'density', 'density.cum', 'vel.circ'
weight_by_mass : bool : whether to weight property by species mass
center_position : array : position of center
center_velocity : array : velocity of center
rotation : bool or array : whether to rotate particles - two options:
(a) if input array of eigen-vectors, will define rotation axes
(b) if True, will rotate to align with principal axes stored in species dictionary
other_axis_distance_limits : float :
min and max distances along other axis[s] to keep particles [kpc physical]
property_select : dict : (other) properties to select on: names as keys and limits as values
part_indicess : array (species number x particle number) :
indices of particles from which to select
Returns
-------
pros : dict : dictionary of profiles for each particle species
'''
if ('sum' in property_statistic or 'vel.circ' in property_statistic or
'density' in property_statistic):
pros = self.get_sum_profiles(
part, species, property_name, center_position, rotation, other_axis_distance_limits,
property_select, part_indicess)
else:
pros = self.get_statistics_profiles(
part, species, property_name, weight_by_mass, center_position, center_velocity,
rotation, other_axis_distance_limits, property_select, part_indicess)
for k in pros:
if '.cum' in property_statistic or 'vel.circ' in property_statistic:
pros[k]['distance'] = pros[k]['distance.cum']
pros[k]['log distance'] = pros[k]['log distance.cum']
else:
pros[k]['distance'] = pros[k]['distance.mid']
pros[k]['log distance'] = pros[k]['log distance.mid']
return pros
def get_sum_profiles(
self, part, species=['all'], property_name='mass', center_position=None,
rotation=None, other_axis_distance_limits=None, property_select={}, part_indicess=None):
'''
Get profiles of summed quantity (such as mass or density) for given property for each
particle species.
Parameters
----------
part : dict : catalog of particles
species : str or list : name[s] of particle species to compute mass from
property_name : str : property to get sum of
center_position : list : center position
rotation : bool or array : whether to rotate particles - two options:
(a) if input array of eigen-vectors, will define rotation axes
(b) if True, will rotate to align with principal axes stored in species dictionary
other_axis_distance_limits : float :
min and max distances along other axis[s] to keep particles [kpc physical]
property_select : dict : (other) properties to select on: names as keys and limits as values
part_indicess : array (species number x particle number) :
indices of particles from which to select
Returns
-------
pros : dict : dictionary of profiles for each particle species
'''
if 'gas' in species and 'consume.time' in property_name:
pros_mass = self.get_sum_profiles(
part, species, 'mass', center_position, rotation, other_axis_distance_limits,
property_select, part_indicess)
pros_sfr = self.get_sum_profiles(
part, species, 'sfr', center_position, rotation, other_axis_distance_limits,
property_select, part_indicess)
pros = pros_sfr
for k in pros_sfr['gas']:
if 'distance' not in k:
pros['gas'][k] = pros_mass['gas'][k] / pros_sfr['gas'][k] / 1e9
return pros
pros = {}
Fraction = ut.math.FractionClass()
if np.isscalar(species):
species = [species]
if species == ['baryon']:
# treat this case specially for baryon fraction
species = ['gas', 'star', 'dark', 'dark2']
species = parse_species(part, species)
center_position = parse_property(part, 'center_position', center_position)
part_indicess = parse_property(species, 'indices', part_indicess)
assert 0 < self.dimension_number <= 3
for spec_i, spec in enumerate(species):
part_indices = part_indicess[spec_i]
if part_indices is None or not len(part_indices):
part_indices = ut.array.get_arange(part[spec].prop(property_name))
if property_select:
part_indices = catalog.get_indices_catalog(
part[spec], property_select, part_indices)
prop_values = part[spec].prop(property_name, part_indices)
if self.dimension_number == 3:
# simple case: profile using scalar distance
distances = ut.coordinate.get_distances(
part[spec]['position'][part_indices], center_position, part.info['box.length'],
part.snapshot['scalefactor'], total_distance=True) # [kpc physical]
elif self.dimension_number in [1, 2]:
# other cases: profile along R (2 major axes) or Z (minor axis)
if rotation is not None and not isinstance(rotation, bool) and len(rotation):
rotation_tensor = rotation
elif (len(part[spec].host_rotation_tensors) and
len(part[spec].host_rotation_tensors[0])):
rotation_tensor = part[spec].host_rotation_tensors[0]
else:
raise ValueError('want 2-D or 1-D profile but no means to define rotation')
distancess = get_distances_wrt_center(
part, spec, part_indices, center_position, rotation_tensor,
coordinate_system='cylindrical')
# ensure all distances are positive definite
distancess = np.abs(distancess)
if self.dimension_number == 1:
# compute profile along minor axis (Z)
distances = distancess[:, 1]
other_distances = distancess[:, 0]
elif self.dimension_number == 2:
# compute profile along major axes (R)
distances = distancess[:, 0]
other_distances = distancess[:, 1]
if (other_axis_distance_limits is not None and
(min(other_axis_distance_limits) > 0 or
max(other_axis_distance_limits) < Inf)):
masks = ((other_distances >= min(other_axis_distance_limits)) *
(other_distances < max(other_axis_distance_limits)))
distances = distances[masks]
prop_values = prop_values[masks]
pros[spec] = self.get_sum_profile(distances, prop_values) # defined in DistanceBinClass
props = [pro_prop for pro_prop in pros[species[0]] if 'distance' not in pro_prop]
props_dist = [pro_prop for pro_prop in pros[species[0]] if 'distance' in pro_prop]
if property_name == 'mass':
# create dictionary for baryonic mass
if 'star' in species or 'gas' in species:
spec_new = 'baryon'
pros[spec_new] = {}
for spec in np.intersect1d(species, ['star', 'gas']):
for pro_prop in props:
if pro_prop not in pros[spec_new]:
pros[spec_new][pro_prop] = np.array(pros[spec][pro_prop])
elif 'log' in pro_prop:
pros[spec_new][pro_prop] = ut.math.get_log(
10 ** pros[spec_new][pro_prop] +
10 ** pros[spec][pro_prop])
else:
pros[spec_new][pro_prop] += pros[spec][pro_prop]
for pro_prop in props_dist:
pros[spec_new][pro_prop] = pros[species[0]][pro_prop]
species.append(spec_new)
if len(species) > 1:
# create dictionary for total mass
spec_new = 'total'
pros[spec_new] = {}
for spec in np.setdiff1d(species, ['baryon', 'total']):
for pro_prop in props:
if pro_prop not in pros[spec_new]:
pros[spec_new][pro_prop] = np.array(pros[spec][pro_prop])
elif 'log' in pro_prop:
pros[spec_new][pro_prop] = ut.math.get_log(
10 ** pros[spec_new][pro_prop] +
10 ** pros[spec][pro_prop])
else:
pros[spec_new][pro_prop] += pros[spec][pro_prop]
for pro_prop in props_dist:
pros[spec_new][pro_prop] = pros[species[0]][pro_prop]
species.append(spec_new)
# create mass fraction wrt total mass
for spec in np.setdiff1d(species, ['total']):
for pro_prop in ['sum', 'sum.cum']:
pros[spec][pro_prop + '.fraction'] = Fraction.get_fraction(
pros[spec][pro_prop], pros['total'][pro_prop])
if spec == 'baryon':
# units of cosmic baryon fraction
pros[spec][pro_prop + '.fraction'] /= (
part.Cosmology['omega_baryon'] / part.Cosmology['omega_matter'])
# create circular velocity = sqrt (G m(< r) / r)
for spec in species:
pros[spec]['vel.circ'] = halo_property.get_circular_velocity(
pros[spec]['sum.cum'], pros[spec]['distance.cum'])
return pros
def get_statistics_profiles(
self, part, species=['all'], property_name='', weight_by_mass=True,
center_position=None, center_velocity=None, rotation=None, other_axis_distance_limits=None,
property_select={}, part_indicess=None):
'''
Get profiles of statistics (such as median, average) for given property for each
particle species.
Parameters
----------
part : dict : catalog of particles
species : str or list : name[s] of particle species to compute mass from
property_name : str : name of property to get statistics of
weight_by_mass : bool : whether to weight property by species mass
center_position : array : position of center
center_velocity : array : velocity of center
rotation : bool or array : whether to rotate particles - two options:
(a) if input array of eigen-vectors, will define rotation axes
(b) if True, will rotate to align with principal axes stored in species dictionary
other_axis_distance_limits : float :
min and max distances along other axis[s] to keep particles [kpc physical]
property_select : dict : (other) properties to select on: names as keys and limits as values
part_indicess : array or list : indices of particles from which to select
Returns
-------
pros : dict : dictionary of profiles for each particle species
'''
pros = {}
species = parse_species(part, species)
center_position = parse_property(part, 'center_position', center_position)
if 'velocity' in property_name:
center_velocity = parse_property(part, 'center_velocity', center_velocity)
part_indicess = parse_property(species, 'indices', part_indicess)
assert 0 < self.dimension_number <= 3
for spec_i, spec in enumerate(species):
prop_test = property_name
if 'velocity' in prop_test:
prop_test = 'velocity' # treat velocity specially because compile below
assert part[spec].prop(prop_test) is not None
part_indices = part_indicess[spec_i]
if part_indices is None or not len(part_indices):
part_indices = ut.array.get_arange(part[spec].prop(property_name))
if property_select:
part_indices = catalog.get_indices_catalog(
part[spec], property_select, part_indices)
masses = None
if weight_by_mass:
masses = part[spec].prop('mass', part_indices)
if 'velocity' in property_name:
distance_vectors = ut.coordinate.get_distances(
part[spec]['position'][part_indices], center_position,
part.info['box.length'], part.snapshot['scalefactor']) # [kpc physical]
velocity_vectors = ut.coordinate.get_velocity_differences(
part[spec]['velocity'][part_indices], center_velocity,
part[spec]['position'][part_indices], center_position, part.info['box.length'],
part.snapshot['scalefactor'], part.snapshot['time.hubble'])
# defined in DistanceBinClass
pro = self.get_velocity_profile(distance_vectors, velocity_vectors, masses)
pros[spec] = pro[property_name.replace('host.', '')]
for prop in pro:
if 'velocity' not in prop:
pros[spec][prop] = pro[prop]
else:
prop_values = part[spec].prop(property_name, part_indices)
if self.dimension_number == 3:
# simple case: profile using total distance [kpc physical]
distances = ut.coordinate.get_distances(
part[spec]['position'][part_indices], center_position,
part.info['box.length'], part.snapshot['scalefactor'], total_distance=True)
elif self.dimension_number in [1, 2]:
# other cases: profile along R (2 major axes) or Z (minor axis)
if rotation is not None and not isinstance(rotation, bool) and len(rotation):
rotation_tensor = rotation
elif (len(part[spec].host_rotation_tensors) and
len(part[spec].host_rotation_tensors[0])):
rotation_tensor = part[spec].host_rotation_tensors[0]
else:
raise ValueError('want 2-D or 1-D profile but no means to define rotation')
distancess = get_distances_wrt_center(
part, spec, part_indices, center_position, rotation_tensor, 'cylindrical')
distancess = | np.abs(distancess) | numpy.abs |
import pickle
import numpy as np
import random
class Cifar():
def __init__(self,filename):
self.filename = filename
splitfile = filename.split("/")
metaname = "/batches.meta"
metalabel = b'label_names'
if splitfile[-1] == "cifar-100-python":
metaname = "/meta"
metalabel = b'fine_label_names'
self.metaname = metaname
self.metalabel = metalabel
self.image_size = 32
self.img_channels = 3
# 解析数据
def unpickle(self,filename):
with open(filename,"rb") as fo:
dict = pickle.load(fo,encoding='bytes')
return dict
# 将数据导入
def load_data_one(self,file):
batch = self.unpickle(file)
data = batch[b'data']
labelname = b'labels'
if self.metaname== "/meta":
labelname = b'fine_labels'
label = batch[labelname]
print("Loading %s : %d." % (file, len(data)))
return data, label
# 对 label进行处理
def load_data(self,files,data_dir, label_count):
data, labels = self.load_data_one(data_dir + "/" + files[0])
for f in files[1:]:
data_n, labels_n = self.load_data_one(data_dir + '/' + f)
data = np.append(data, data_n, axis=0)
labels = np.append(labels, labels_n, axis=0)
labels = np.array([[float(i == label) for i in range(label_count)] for label in labels])
data = data.reshape([-1, self.img_channels, self.image_size, self.image_size])
data = data.transpose([0, 2, 3, 1]) # 将图片转置 跟卷积层一致
return data, labels
# 数据导入
def prepare_data(self):
print("======Loading data======")
# data_dir = '../Cifar_10/cifar-100-python'
# image_dim = self.image_size * self.image_size * self.img_channels
meta = self.unpickle(self.filename + self.metaname)
label_names = meta[self.metalabel]
label_count = len(label_names)
if self.metaname== "/batches.meta":
train_files = ['data_batch_%d' % d for d in range(1, 6)]
train_data, train_labels = self.load_data(train_files, self.filename, label_count)
test_data, test_labels = self.load_data(['test_batch'], self.filename, label_count)
else:
train_data, train_labels = self.load_data(['train'],self.filename, label_count)
test_data, test_labels = self.load_data(['test'] , self.filename, label_count)
print("Train data:", np.shape(train_data), | np.shape(train_labels) | numpy.shape |
"""
Unit tests for mir_eval.chord
"""
import mir_eval
import numpy as np
import nose.tools
import warnings
import glob
import json
A_TOL = 1e-12
# Path to the fixture files
REF_GLOB = 'data/chord/ref*.lab'
EST_GLOB = 'data/chord/est*.lab'
SCORES_GLOB = 'data/chord/output*.json'
def __check_valid(function, parameters, result):
''' Helper function for checking the output of a function '''
assert function(*parameters) == result
def __check_exception(function, parameters, exception):
''' Makes sure the provided function throws the provided
exception given the provided input '''
nose.tools.assert_raises(exception, function, *parameters)
def test_pitch_class_to_semitone():
valid_classes = ['Gbb', 'G', 'G#', 'Cb', 'B#']
valid_semitones = [5, 7, 8, 11, 0]
for pitch_class, semitone in zip(valid_classes, valid_semitones):
yield (__check_valid, mir_eval.chord.pitch_class_to_semitone,
(pitch_class,), semitone)
invalid_classes = ['Cab', '#C', 'bG']
for pitch_class in invalid_classes:
yield (__check_exception, mir_eval.chord.pitch_class_to_semitone,
(pitch_class,), mir_eval.chord.InvalidChordException)
def test_scale_degree_to_semitone():
valid_degrees = ['b7', '#3', '1', 'b1', '#7', 'bb5', '11', '#13']
valid_semitones = [10, 5, 0, -1, 12, 5, 17, 22]
for scale_degree, semitone in zip(valid_degrees, valid_semitones):
yield (__check_valid, mir_eval.chord.scale_degree_to_semitone,
(scale_degree,), semitone)
invalid_degrees = ['7b', '4#', '77', '15']
for scale_degree in invalid_degrees:
yield (__check_exception, mir_eval.chord.scale_degree_to_semitone,
(scale_degree,), mir_eval.chord.InvalidChordException)
def test_scale_degree_to_bitmap():
def __check_bitmaps(function, parameters, result):
actual = function(*parameters)
assert np.all(actual == result), (actual, result)
valid_degrees = ['3', '*3', 'b1', '9']
valid_bitmaps = [[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
for scale_degree, bitmap in zip(valid_degrees, valid_bitmaps):
yield (__check_bitmaps, mir_eval.chord.scale_degree_to_bitmap,
(scale_degree, True, 12), np.array(bitmap))
yield (__check_bitmaps, mir_eval.chord.scale_degree_to_bitmap,
('9', False, 12), np.array([0] * 12))
yield (__check_bitmaps, mir_eval.chord.scale_degree_to_bitmap,
('9', False, 15),
np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]))
def test_validate_chord_label():
valid_labels = ['C', 'Eb:min/5', 'A#:dim7', 'B:maj(*1,*5)/3',
'A#:sus4', 'A:(9,11)']
# For valid labels, calling the function without an error = pass
for chord_label in valid_labels:
yield (mir_eval.chord.validate_chord_label, chord_label)
invalid_labels = ["C::maj", "C//5", "C((4)", "C5))",
"C:maj(*3/3", "Cmaj*3/3)", 'asdf']
for chord_label in invalid_labels:
yield (__check_exception, mir_eval.chord.validate_chord_label,
(chord_label,), mir_eval.chord.InvalidChordException)
def test_split():
labels = ['C', 'B:maj(*1,*3)/5', 'Ab:min/b3', 'N', 'G:(3)']
splits = [['C', 'maj', set(), '1'],
['B', 'maj', set(['*1', '*3']), '5'],
['Ab', 'min', set(), 'b3'],
['N', '', set(), ''],
['G', '', set(['3']), '1']]
for chord_label, split_chord in zip(labels, splits):
yield (__check_valid, mir_eval.chord.split,
(chord_label,), split_chord)
# Test with reducing extended chords
labels = ['C', 'C:minmaj7']
splits = [['C', 'maj', set(), '1'],
['C', 'min', set(['7']), '1']]
for chord_label, split_chord in zip(labels, splits):
yield (__check_valid, mir_eval.chord.split,
(chord_label, True), split_chord)
# Test that an exception is raised when a chord with an omission but no
# quality is supplied
yield (__check_exception, mir_eval.chord.split,
('C(*5)',), mir_eval.chord.InvalidChordException)
def test_join():
# Arguments are root, quality, extensions, bass
splits = [('F#', '', None, ''),
('F#', 'hdim7', None, ''),
('F#', '', ['*b3', '4'], ''),
('F#', '', None, 'b7'),
('F#', '', ['*b3', '4'], 'b7'),
('F#', 'hdim7', None, 'b7'),
('F#', 'hdim7', ['*b3', '4'], 'b7')]
labels = ['F#', 'F#:hdim7', 'F#:(*b3,4)', 'F#/b7',
'F#:(*b3,4)/b7', 'F#:hdim7/b7', 'F#:hdim7(*b3,4)/b7']
for split_chord, chord_label in zip(splits, labels):
yield (__check_valid, mir_eval.chord.join,
split_chord, chord_label)
def test_rotate_bitmaps_to_roots():
def __check_bitmaps(bitmaps, roots, expected_bitmaps):
''' Helper function for checking bitmaps_to_roots '''
ans = mir_eval.chord.rotate_bitmaps_to_roots(bitmaps, roots)
assert np.all(ans == expected_bitmaps)
bitmaps = [
[1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0],
[1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0],
[1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0]]
roots = [0, 5, 11]
expected_bitmaps = [
[1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1]]
# The function can operate on many bitmaps/roots at a time
# but we should only test them one at a time.
for bitmap, root, expected_bitmap in zip(bitmaps, roots, expected_bitmaps):
yield (__check_bitmaps, [bitmap], [root], [expected_bitmap])
def test_encode():
def __check_encode(label, expected_root, expected_intervals,
expected_bass, reduce_extended_chords,
strict_bass_intervals):
''' Helper function for checking encode '''
root, intervals, bass = mir_eval.chord.encode(
label, reduce_extended_chords=reduce_extended_chords,
strict_bass_intervals=strict_bass_intervals)
assert root == expected_root, (root, expected_root)
assert np.all(intervals == expected_intervals), (intervals,
expected_intervals)
assert bass == expected_bass, (bass, expected_bass)
labels = ['B:maj(*1,*3)/5', 'G:dim', 'C:(3)/3', 'A:9/b3']
expected_roots = [11, 7, 0, 9]
expected_intervals = [[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
# Note that extended scale degrees are dropped.
[1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0]]
expected_bass = [7, 0, 4, 3]
args = zip(labels, expected_roots, expected_intervals, expected_bass)
for label, e_root, e_interval, e_bass in args:
yield (__check_encode, label, e_root, e_interval, e_bass, False, False)
# Non-chord bass notes *must* be explicitly named as extensions when
# strict_bass_intervals == True
yield (__check_exception, mir_eval.chord.encode,
('G:dim(4)/6', False, True), mir_eval.chord.InvalidChordException)
# Otherwise, we can cut a little slack.
yield (__check_encode, 'G:dim(4)/6', 7,
[1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0], 9,
False, False)
# Check that extended scale degrees are mapped back into pitch classes.
yield (__check_encode, 'A:9', 9,
[1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0], 0,
True, False)
def test_encode_many():
def __check_encode_many(labels, expected_roots, expected_intervals,
expected_basses):
''' Does all of the logic for checking encode_many '''
roots, intervals, basses = mir_eval.chord.encode_many(labels)
assert np.all(roots == expected_roots)
assert np.all(intervals == expected_intervals)
assert np.all(basses == expected_basses)
labels = ['B:maj(*1,*3)/5',
'B:maj(*1,*3)/5',
'N',
'C:min',
'C:min']
expected_roots = [11, 11, -1, 0, 0]
expected_intervals = [
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0],
[1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0]]
expected_basses = [7, 7, -1, 0, 0]
yield (__check_encode_many, labels, expected_roots, expected_intervals,
expected_basses)
def __check_one_metric(metric, ref_label, est_label, score):
''' Checks that a metric function produces score given ref_label and
est_label '''
# We provide a dummy interval. We're just checking one pair
# of labels at a time.
assert metric([ref_label], [est_label]) == score
def __check_not_comparable(metric, ref_label, est_label):
''' Checks that ref_label is not comparable to est_label by metric '''
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
# Try to produce the warning
score = mir_eval.chord.weighted_accuracy(metric([ref_label],
[est_label]),
np.array([1]))
assert len(w) == 1
assert issubclass(w[-1].category, UserWarning)
assert str(w[-1].message) == ("No reference chords were comparable "
"to estimated chords, returning 0.")
# And confirm that the metric is 0
assert np.allclose(score, 0)
# TODO(ejhumphrey): Comparison functions lacking unit tests.
# test_root()
def test_mirex():
ref_labels = ['N', 'C:maj', 'C:maj', 'C:maj', 'C:min', 'C:maj',
'C:maj', 'G:min', 'C:maj', 'C:min', 'C:min',
'C:maj', 'F:maj', 'C:maj7', 'A:maj', 'A:maj']
est_labels = ['N', 'N', 'C:aug', 'C:dim', 'C:dim', 'C:5',
'C:sus4', 'G:sus2', 'G:maj', 'C:hdim7', 'C:min7',
'C:maj6', 'F:min6', 'C:minmaj7', 'A:7', 'A:9']
scores = [1.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 1.0,
1.0, 0.0, 1.0, 1.0, 1.0]
for ref_label, est_label, score in zip(ref_labels, est_labels, scores):
yield (__check_one_metric, mir_eval.chord.mirex,
ref_label, est_label, score)
ref_not_comparable = ['C:5', 'X']
est_not_comparable = ['C:maj', 'N']
for ref_label, est_label in zip(ref_not_comparable, est_not_comparable):
yield (__check_not_comparable, mir_eval.chord.mirex,
ref_label, est_label)
def test_thirds():
ref_labels = ['N', 'C:maj', 'C:maj', 'C:maj', 'C:min',
'C:maj', 'G:min', 'C:maj', 'C:min', 'C:min',
'C:maj', 'F:maj', 'C:maj', 'A:maj', 'A:maj']
est_labels = ['N', 'N', 'C:aug', 'C:dim', 'C:dim',
'C:sus4', 'G:sus2', 'G:maj', 'C:hdim7', 'C:min7',
'C:maj6', 'F:min6', 'C:minmaj7', 'A:7', 'A:9']
scores = [1.0, 0.0, 1.0, 0.0, 1.0,
1.0, 0.0, 0.0, 1.0, 1.0,
1.0, 0.0, 0.0, 1.0, 1.0]
for ref_label, est_label, score in zip(ref_labels, est_labels, scores):
yield (__check_one_metric, mir_eval.chord.thirds,
ref_label, est_label, score)
yield (__check_not_comparable, mir_eval.chord.thirds, 'X', 'N')
def test_thirds_inv():
ref_labels = ['C:maj/5', 'G:min', 'C:maj', 'C:min/b3', 'C:min']
est_labels = ['C:sus4/5', 'G:min/b3', 'C:maj/5', 'C:hdim7/b3', 'C:dim']
scores = [1.0, 0.0, 0.0, 1.0, 1.0]
for ref_label, est_label, score in zip(ref_labels, est_labels, scores):
yield (__check_one_metric, mir_eval.chord.thirds_inv,
ref_label, est_label, score)
yield (__check_not_comparable, mir_eval.chord.thirds_inv, 'X', 'N')
def test_triads():
ref_labels = ['C:min', 'C:maj', 'C:maj', 'C:min', 'C:maj',
'C:maj', 'G:min', 'C:maj', 'C:min', 'C:min']
est_labels = ['C:min7', 'C:7', 'C:aug', 'C:dim', 'C:sus2',
'C:sus4', 'G:minmaj7', 'G:maj', 'C:hdim7', 'C:min6']
scores = [1.0, 1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0, 1.0]
for ref_label, est_label, score in zip(ref_labels, est_labels, scores):
yield (__check_one_metric, mir_eval.chord.triads,
ref_label, est_label, score)
yield (__check_not_comparable, mir_eval.chord.triads, 'X', 'N')
def test_triads_inv():
ref_labels = ['C:maj/5', 'G:min', 'C:maj', 'C:min/b3', 'C:min/b3']
est_labels = ['C:maj7/5', 'G:min7/5', 'C:7/5', 'C:min6/b3', 'C:dim/b3']
scores = [1.0, 0.0, 0.0, 1.0, 0.0]
for ref_label, est_label, score in zip(ref_labels, est_labels, scores):
yield (__check_one_metric, mir_eval.chord.triads_inv,
ref_label, est_label, score)
yield (__check_not_comparable, mir_eval.chord.triads_inv, 'X', 'N')
def test_tetrads():
ref_labels = ['C:min', 'C:maj', 'C:7', 'C:maj7', 'C:sus2',
'C:7/3', 'G:min', 'C:maj', 'C:min', 'C:min']
est_labels = ['C:min7', 'C:maj6', 'C:9', 'C:maj7/5', 'C:sus2/2',
'C:11/b7', 'G:sus2', 'G:maj', 'C:hdim7', 'C:minmaj7']
scores = [0.0, 0.0, 1.0, 1.0, 1.0,
1.0, 0.0, 0.0, 0.0, 0.0]
for ref_label, est_label, score in zip(ref_labels, est_labels, scores):
yield (__check_one_metric, mir_eval.chord.tetrads,
ref_label, est_label, score)
yield (__check_not_comparable, mir_eval.chord.tetrads, 'X', 'N')
def test_tetrads_inv():
ref_labels = ['C:maj7/5', 'G:min', 'C:7/5', 'C:min/b3', 'C:min9']
est_labels = ['C:maj7/3', 'G:min/b3', 'C:13/5', 'C:hdim7/b3', 'C:min7']
scores = [0.0, 0.0, 1.0, 0.0, 1.0]
for ref_label, est_label, score in zip(ref_labels, est_labels, scores):
yield (__check_one_metric, mir_eval.chord.tetrads_inv,
ref_label, est_label, score)
yield (__check_not_comparable, mir_eval.chord.tetrads_inv, 'X', 'N')
def test_majmin():
ref_labels = ['N', 'C:maj', 'C:maj', 'C:min', 'G:maj7']
est_labels = ['N', 'N', 'C:aug', 'C:dim', 'G']
scores = [1.0, 0.0, 0.0, 0.0, 1.0]
for ref_label, est_label, score in zip(ref_labels, est_labels, scores):
yield (__check_one_metric, mir_eval.chord.majmin,
ref_label, est_label, score)
ref_not_comparable = ['C:aug', 'X']
est_not_comparable = ['C:maj', 'N']
for ref_label, est_label in zip(ref_not_comparable, est_not_comparable):
yield (__check_not_comparable, mir_eval.chord.majmin,
ref_label, est_label)
def test_majmin_inv():
ref_labels = ['C:maj/5', 'G:min', 'C:maj/5', 'C:min7',
'G:min/b3', 'C:maj7/5', 'C:7']
est_labels = ['C:sus4/5', 'G:min/b3', 'C:maj/5', 'C:min',
'G:min/b3', 'C:maj/5', 'C:maj']
scores = [0.0, 0.0, 1.0, 1.0,
1.0, 1.0, 1.0]
for ref_label, est_label, score in zip(ref_labels, est_labels, scores):
yield (__check_one_metric, mir_eval.chord.majmin_inv,
ref_label, est_label, score)
ref_not_comparable = ['C:hdim7/b3', 'C:maj/4', 'C:maj/2', 'X']
est_not_comparable = ['C:min/b3', 'C:maj/4', 'C:sus2/2', 'N']
for ref_label, est_label in zip(ref_not_comparable, est_not_comparable):
yield (__check_not_comparable, mir_eval.chord.majmin_inv,
ref_label, est_label)
def test_sevenths():
ref_labels = ['C:min', 'C:maj', 'C:7', 'C:maj7',
'C:7/3', 'G:min', 'C:maj', 'C:7']
est_labels = ['C:min7', 'C:maj6', 'C:9', 'C:maj7/5',
'C:11/b7', 'G:sus2', 'G:maj', 'C:maj7']
scores = [0.0, 0.0, 1.0, 1.0,
1.0, 0.0, 0.0, 0.0]
for ref_label, est_label, score in zip(ref_labels, est_labels, scores):
yield (__check_one_metric, mir_eval.chord.sevenths,
ref_label, est_label, score)
ref_not_comparable = ['C:sus2', 'C:hdim7', 'X']
est_not_comparable = ['C:sus2/2', 'C:hdim7', 'N']
for ref_label, est_label in zip(ref_not_comparable, est_not_comparable):
yield (__check_not_comparable, mir_eval.chord.sevenths,
ref_label, est_label)
def test_sevenths_inv():
ref_labels = ['C:maj7/5', 'G:min', 'C:7/5', 'C:min7/b7']
est_labels = ['C:maj7/3', 'G:min/b3', 'C:13/5', 'C:min7/b7']
scores = [0.0, 0.0, 1.0, 1.0]
for ref_label, est_label, score in zip(ref_labels, est_labels, scores):
yield (__check_one_metric, mir_eval.chord.sevenths_inv,
ref_label, est_label, score)
ref_not_comparable = ['C:dim7/b3', 'X']
est_not_comparable = ['C:dim7/b3', 'N']
for ref_label, est_label in zip(ref_not_comparable, est_not_comparable):
yield (__check_not_comparable, mir_eval.chord.sevenths_inv,
ref_label, est_label)
def test_directional_hamming_distance():
ref_ivs = np.array([[0., 1.], [1., 2.], [2., 3.]])
est_ivs = np.array([[0., 0.9], [0.9, 1.8], [1.8, 2.5]])
dhd_ref_to_est = (0.1 + 0.2 + 0.5) / 3.
dhd_est_to_ref = (0.0 + 0.1 + 0.2) / 2.5
dhd = mir_eval.chord.directional_hamming_distance
assert np.allclose(dhd_ref_to_est, dhd(ref_ivs, est_ivs))
assert np.allclose(dhd_est_to_ref, dhd(est_ivs, ref_ivs))
assert np.allclose(0, dhd(ref_ivs, ref_ivs))
assert np.allclose(0, dhd(est_ivs, est_ivs))
ivs_overlap_all = np.array([[0., 1.], [0.9, 2.]])
ivs_overlap_one = np.array([[0., 1.], [0.9, 2.], [2., 3.]])
nose.tools.assert_raises(ValueError, dhd, ivs_overlap_all, est_ivs)
nose.tools.assert_raises(ValueError, dhd, ivs_overlap_one, est_ivs)
def test_segmentation_functions():
ref_ivs = np.array([[0., 2.], [2., 2.5], [2.5, 3.2]])
est_ivs = np.array([[0., 3.], [3., 3.5]])
true_oseg = 1. - 0.2 / 3.2
true_useg = 1. - (1. + 0.2) / 3.5
true_seg = min(true_oseg, true_useg)
assert np.allclose(true_oseg, mir_eval.chord.overseg(ref_ivs, est_ivs))
assert np.allclose(true_useg, mir_eval.chord.underseg(ref_ivs, est_ivs))
assert np.allclose(true_seg, mir_eval.chord.seg(ref_ivs, est_ivs))
ref_ivs = np.array([[0., 2.], [2., 2.5], [2.5, 3.2]])
est_ivs = np.array([[0., 2.], [2., 2.5], [2.5, 3.2]])
true_oseg = 1.0
true_useg = 1.0
true_seg = 1.0
assert np.allclose(true_oseg, mir_eval.chord.overseg(ref_ivs, est_ivs))
assert np.allclose(true_useg, mir_eval.chord.underseg(ref_ivs, est_ivs))
assert np.allclose(true_seg, mir_eval.chord.seg(ref_ivs, est_ivs))
ref_ivs = np.array([[0., 2.], [2., 2.5], [2.5, 3.2]])
est_ivs = np.array([[0., 3.2]])
true_oseg = 1.0
true_useg = 1 - 1.2 / 3.2
true_seg = min(true_oseg, true_useg)
assert np.allclose(true_oseg, mir_eval.chord.overseg(ref_ivs, est_ivs))
assert np.allclose(true_useg, mir_eval.chord.underseg(ref_ivs, est_ivs))
assert np.allclose(true_seg, mir_eval.chord.seg(ref_ivs, est_ivs))
ref_ivs = np.array([[0., 2.], [2., 2.5], [2.5, 3.2]])
est_ivs = np.array([[3.2, 3.5]])
true_oseg = 1.0
true_useg = 1.0
true_seg = 1.0
assert np.allclose(true_oseg, mir_eval.chord.overseg(ref_ivs, est_ivs))
assert np.allclose(true_useg, mir_eval.chord.underseg(ref_ivs, est_ivs))
assert np.allclose(true_seg, mir_eval.chord.seg(ref_ivs, est_ivs))
def test_merge_chord_intervals():
intervals = np.array([[0., 1.], [1., 2.], [2., 3], [3., 4.], [4., 5.]])
labels = ['C:maj', 'C:(1,3,5)', 'A:maj', 'A:maj7', 'A:maj7/3']
assert np.allclose(np.array([[0., 2.], [2., 3], [3., 4.], [4., 5.]]),
mir_eval.chord.merge_chord_intervals(intervals, labels))
def test_weighted_accuracy():
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
# First, test for a warning on empty beats
score = mir_eval.chord.weighted_accuracy(np.array([1, 0, 1]),
np.array([0, 0, 0]))
assert len(w) == 1
assert issubclass(w[-1].category, UserWarning)
assert str(w[-1].message) == 'No nonzero weights, returning 0'
# And that the metric is 0
assert np.allclose(score, 0)
# len(comparisons) must equal len(weights)
comparisons = np.array([1, 0, 1])
weights = np.array([1, 1])
nose.tools.assert_raises(ValueError, mir_eval.chord.weighted_accuracy,
comparisons, weights)
# Weights must all be positive
comparisons = | np.array([1, 1]) | numpy.array |
#! /usr/bin/python3
# -*- coding: utf-8 -*-
"""
********
BST file
********
"""
__author__ = '<NAME>'
__copyright__ = 'Copyright 2021, nenupy'
__credits__ = ['<NAME>']
__maintainer__ = 'Alan'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = [
"XST"
]
from abc import ABC
import os
from itertools import islice
from astropy.time import Time, TimeDelta
from astropy.coordinates import SkyCoord, AltAz, Angle
import astropy.units as u
from astropy.io import fits
from healpy.fitsfunc import write_map, read_map
from healpy.pixelfunc import mask_bad, nside2resol
import numpy as np
import json
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.colorbar import ColorbarBase
from matplotlib.ticker import LinearLocator
from matplotlib.colors import Normalize
from matplotlib.cm import get_cmap
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import dask.array as da
from dask.diagnostics import ProgressBar
import nenupy
from os.path import join, dirname
from nenupy.astro.target import FixedTarget, SolarSystemTarget
from nenupy.io.io_tools import StatisticsData
from nenupy.io.bst import BST_Slice
from nenupy.astro import wavelength, altaz_to_radec, l93_to_etrs, etrs_to_enu
from nenupy.astro.uvw import compute_uvw
from nenupy.astro.sky import HpxSky
from nenupy.astro.pointing import Pointing
from nenupy.instru import NenuFAR, MiniArray, read_cal_table, freq2sb, nenufar_miniarrays
from nenupy import nenufar_position, DummyCtMgr
import logging
log = logging.getLogger(__name__)
# ============================================================= #
# ------------------------- XST_Slice ------------------------- #
# ============================================================= #
class XST_Slice:
""" """
def __init__(self, mini_arrays, time, frequency, value):
self.mini_arrays = mini_arrays
self.time = time
self.frequency = frequency
self.value = value
# --------------------------------------------------------- #
# ------------------------ Methods ------------------------ #
def plot_correlaton_matrix(self, mask_autocorrelations: bool = False, **kwargs):
"""
"""
max_ma_index = self.mini_arrays.max() + 1
all_mas = np.arange(max_ma_index)
matrix = np.full([max_ma_index, max_ma_index], np.nan, "complex")
ma1, ma2 = np.tril_indices(self.mini_arrays.size, 0)
for ma in all_mas:
if ma not in self.mini_arrays:
ma1[ma1 >= ma] += 1
ma2[ma2 >= ma] += 1
mask = None
if mask_autocorrelations:
mask = ma1 != ma2 # cross_correlation mask
matrix[ma2[mask], ma1[mask]] = np.mean(self.value, axis=(0, 1))[mask]
fig = plt.figure(figsize=kwargs.get("figsize", (10, 10)))
ax = fig.add_subplot(111)
ax.set_aspect("equal")
data = np.absolute(matrix)
if kwargs.get("decibel", True):
data = 10*np.log10(data)
im = ax.pcolormesh(
all_mas,
all_mas,
data,
shading="nearest",
cmap=kwargs.get("cmap", "YlGnBu"),
vmin=kwargs.get("vmin", np.nanmin(data)),
vmax=kwargs.get("vmax", np.nanmax(data))
)
ax.set_xticks(all_mas[::2])
ax.set_yticks(all_mas[::2])
ax.grid(alpha=0.5)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.3)
cbar = fig.colorbar(im, cax=cax)
cbar.set_label(kwargs.get("colorbar_label", "dB" if kwargs.get("decibel", True) else "Amp"))
# Axis abels
ax.set_xlabel(f"Mini-Array index")
ax.set_ylabel(f"Mini-Array index")
# Title
ax.set_title(kwargs.get("title", ""))
# Save or show the figure
figname = kwargs.get("figname", "")
if figname != "":
plt.savefig(
figname,
dpi=300,
bbox_inches="tight",
transparent=True
)
log.info(f"Figure '{figname}' saved.")
else:
plt.show()
plt.close("all")
def rephase_visibilities(self, phase_center, uvw):
""" """
# Compute the zenith original phase center
zenith = SkyCoord(
np.zeros(self.time.size),
np.ones(self.time.size)*90,
unit="deg",
frame=AltAz(
obstime=self.time,
location=nenufar_position
)
)
zenith_phase_center = altaz_to_radec(zenith)
# Define the rotation matrix
def rotation_matrix(skycoord):
"""
"""
ra_rad = skycoord.ra.rad
dec_rad = skycoord.dec.rad
if np.isscalar(ra_rad):
ra_rad = np.array([ra_rad])
dec_rad = np.array([dec_rad])
cos_ra = np.cos(ra_rad)
sin_ra = np.sin(ra_rad)
cos_dec = np.cos(dec_rad)
sin_dec = np.sin(dec_rad)
return np.array([
[cos_ra, -sin_ra, np.zeros(ra_rad.size)],
[-sin_ra*sin_dec, -cos_ra*sin_dec, cos_dec],
[sin_ra*cos_dec, cos_ra*cos_dec, sin_dec],
])
# Transformation matrices
to_origin = rotation_matrix(zenith_phase_center) # (3, 3, ntimes)
to_new_center = rotation_matrix(phase_center) # (3, 3, 1)
total_transformation = np.matmul(
np.transpose(
to_new_center,
(2, 0, 1)
),
to_origin
) # (3, 3, ntimes)
rotUVW = np.matmul(
np.expand_dims(
(to_origin[2, :] - to_new_center[2, :]).T,
axis=1
),
np.transpose(
to_origin,
(2, 1, 0)
)
) # (ntimes, 1, 3)
phase = np.matmul(
rotUVW,
np.transpose(uvw, (0, 2, 1))
) # (ntimes, 1, nvis)
rotate_visibilities = np.exp(
2.j*np.pi*phase/wavelength(self.frequency).to(u.m).value[None, :, None]
) # (ntimes, nfreqs, nvis)
new_uvw = np.matmul(
uvw, # (ntimes, nvis, 3)
np.transpose(total_transformation, (2, 0, 1))
)
return rotate_visibilities, new_uvw
def make_image(self,
resolution: u.Quantity = 1*u.deg,
fov_radius: u.Quantity = 25*u.deg,
phase_center: SkyCoord = None,
stokes: str = "I"
):
"""
:Example:
xst = XST("XST.fits")
data = xst.get_stokes("I")
sky = data.make_image(
resolution=0.5*u.deg,
fov_radius=27*u.deg,
phase_center=SkyCoord(277.382, 48.746, unit="deg")
)
sky[0, 0, 0].plot(
center=SkyCoord(277.382, 48.746, unit="deg"),
radius=24.5*u.deg
)
"""
exposure = self.time[-1] - self.time[0]
# Compute XST UVW coordinates (zenith phased)
uvw = compute_uvw(
interferometer=NenuFAR()[self.mini_arrays],
phase_center=None, # will be zenith
time=self.time,
)
# Prepare visibilities rephasing
rephase_matrix, uvw = self.rephase_visibilities(
phase_center=phase_center,
uvw=uvw
)
# Mask auto-correlations
ma1, ma2 = np.tril_indices(self.mini_arrays.size, 0)
cross_mask = ma1 != ma2
uvw = uvw[:, cross_mask, :]
# Transform to lambda units
wvl = wavelength(self.frequency).to(u.m).value
uvw = uvw[:, None, :, :]/wvl[None, :, None, None] # (t, f, bsl, 3)
# Mean in time
uvw = np.mean(uvw, axis=0)
# Prepare the sky
sky = HpxSky(
resolution=resolution,
time=self.time[0] + exposure/2,
frequency=np.mean(self.frequency),
polarization= | np.array([stokes]) | numpy.array |
from __future__ import division
import numpy as np
from scipy.stats import norm
from scipy.optimize import minimize, bisect
import sklearn.gaussian_process as gp
from sklearn.gaussian_process import GaussianProcessClassifier as GPC
from pyDOE import lhs
from gp import GPR
def normalize(y, return_mean_std=False):
y_mean = np.mean(y)
y_std = np.std(y)
y = (y-y_mean)/y_std
if return_mean_std:
return y, y_mean, y_std
return y
def inv_normalize(y, y_mean, y_std):
return y*y_std + y_mean
def proba_of_improvement(samples, gp_model, f_best):
samples = np.array(samples).reshape(-1, gp_model.X_train_.shape[1])
mu, sigma = gp_model.predict(samples, return_std=True)
mu = mu.reshape(-1,1)
sigma = sigma.reshape(-1,1)
PI = 1 - norm.cdf(f_best, loc=mu, scale=sigma)
return np.squeeze(PI)
def expected_improvement(samples, gp_model, f_best):
samples = np.array(samples).reshape(-1, gp_model.X_train_.shape[1])
mu, sigma = gp_model.predict(samples, return_std=True)
mu = mu.reshape(-1,1)
sigma = sigma.reshape(-1,1)
with np.errstate(divide='ignore'):
Z = (mu - f_best)/sigma
EI = (mu - f_best) * norm.cdf(Z) + sigma * norm.pdf(Z)
EI[sigma==0.0] = 0.0
return np.squeeze(EI)
def upper_confidence_bound(samples, gp_model, beta):
samples = np.array(samples).reshape(-1, gp_model.X_train_.shape[1])
mu, sigma = gp_model.predict(samples, return_std=True)
mu = mu.reshape(-1,1)
sigma = sigma.reshape(-1,1)
UCB = mu + beta * sigma
return np.squeeze(UCB)
def lower_confidence_bound(samples, gp_model, beta):
samples = np.array(samples).reshape(-1, gp_model.X_train_.shape[1])
mu, sigma = gp_model.predict(samples, return_std=True)
mu = mu.reshape(-1,1)
sigma = sigma.reshape(-1,1)
LCB = mu - beta * sigma
return np.squeeze(LCB)
def regularized_ei_quadratic(samples, gp_model, f_best, center, w):
samples = np.array(samples).reshape(-1, gp_model.X_train_.shape[1])
mu, sigma = gp_model.predict(samples, return_std=True)
mu = mu.reshape(-1,1)
sigma = sigma.reshape(-1,1)
epsilon = np.diag(np.matmul(np.matmul(samples-center, np.diag(w**(-2))), (samples-center).T)).reshape(-1,1)
f_tilde = f_best * (1. + np.sign(f_best)*epsilon)
with np.errstate(divide='ignore'):
Z = (mu - f_tilde)/sigma
EIQ = (mu - f_tilde) * norm.cdf(Z) + sigma * norm.pdf(Z)
EIQ[sigma==0.0] = 0.0
return np.squeeze(EIQ)
def regularized_ei_hinge_quadratic(samples, gp_model, f_best, center, R, beta):
samples = np.array(samples).reshape(-1, gp_model.X_train_.shape[1])
mu, sigma = gp_model.predict(samples, return_std=True)
mu = mu.reshape(-1,1)
sigma = sigma.reshape(-1,1)
dists = np.linalg.norm(samples-center, axis=1, keepdims=True)
epsilon = (dists-R)/beta/R
epsilon[dists < R] = 0.0
f_tilde = f_best * (1 + np.sign(f_best)*epsilon)
with np.errstate(divide='ignore'):
Z = (mu - f_tilde)/sigma
EIQ = (mu - f_tilde) * norm.cdf(Z) + sigma * norm.pdf(Z)
EIQ[sigma==0.0] = 0.0
return np.squeeze(EIQ)
def var_constrained_pi(samples, gp_model, f_best, tau):
samples = np.array(samples).reshape(-1, gp_model.X_train_.shape[1])
mu, sigma = gp_model.predict(samples, return_std=True)
mu = mu.reshape(-1,1)
sigma = sigma.reshape(-1,1)
PI = 1 - norm.cdf(f_best, loc=mu, scale=sigma)
PI[sigma > (tau*gp_model.kernel_.diag(samples).reshape(-1,1))**.5] = 0.0
return np.squeeze(PI)
def var_constrained_ei(samples, gp_model, f_best, tau):
samples = np.array(samples).reshape(-1, gp_model.X_train_.shape[1])
mu, sigma = gp_model.predict(samples, return_std=True)
mu = mu.reshape(-1,1)
sigma = sigma.reshape(-1,1)
with np.errstate(divide='ignore'):
Z = (mu - f_best)/sigma
EI = (mu - f_best) * norm.cdf(Z) + sigma * norm.pdf(Z)
EI[sigma==0.0] = 0.0
EI[sigma > (tau*gp_model.kernel_.diag(samples).reshape(-1,1))**.5] = 0.0
return np.squeeze(EI)
def compute_tau(f_best, gp_model, xi=0.01, kappa=0.1):
delta = 0.01
sigma_plus = (xi+delta)/norm.ppf(1-kappa)
_ = np.zeros((1, gp_model.X_train_.shape[1]))
k0 = np.asscalar(gp_model.kernel_(_, _))
def func(x):
mu_tau = 0.#xi - np.sqrt(x*k0)*norm.ppf(1-kappa)
u_tau = (mu_tau-f_best)/np.sqrt(x*k0)
EI_tau = np.sqrt(x*k0) * (u_tau*norm.cdf(u_tau) + norm.pdf(u_tau))
u_plus = -delta/sigma_plus
EI_plus = sigma_plus * (u_plus*norm.cdf(u_plus) + norm.pdf(u_plus))
return EI_tau - EI_plus
# import matplotlib.pyplot as plt
# xx = np.linspace(0.1, 1., 100)
# plt.plot(xx, func(xx))
try:
tau = bisect(func, 0.01, 1.)
tau = | np.clip(tau, 0.0001, 0.99) | numpy.clip |
"""
Intermittency Graph by <NAME>.
Goal here is to emphasize the complexities of daily and seasonal intermittency
compared to baseload low carbon sources like nuclear.
Want two scenarios: nuclear and solar. For each, we'll want winter and summer versions.
So, 2x2 subplots? With text in each one saying how much power going around.
Both nuclear and solar will just use some form of energy storage to deal with
intermittency and/or load follow
"""
import os
import csv
from datetime import datetime
import copy
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('TkAgg')
from matplotlib import animation
from matplotlib.animation import ArtistAnimation
from matplotlib.animation import ImageMagickFileWriter
from matplotlib import collections
DFMT = "%m/%d/%Y %H:%M"
NUM_POINTS = 288 # b/c the data is messy
def read_data():
"""
Read CAISO data
Exported from web interface at https://www.caiso.com/TodaysOutlook/Pages/supply.html
We need:
* Summer demand
* Winter demand
* Summer solar (mostly shape important)
* Winter solar (shape and peak/avg important for setting mag)
* Nuclear generation (assume 91% cf)
"""
data = {}
data.update(_read_demand())
data.update(_read_solar_supply())
data.update(_read_nuclear_supply())
return data
def _read_demand():
"""Read summer and winter demand curves."""
data = {}
for label, fname in [
# ("Summer demand", "CAISO-demand-20200621.csv"),
("Summer demand", "CAISO-demand-20190621.csv"),
("Winter demand", "CAISO-demand-20191221.csv"),
]:
datetimes = []
print(f"opening {fname}")
with open(os.path.join("data", fname)) as f:
reader = csv.reader(f)
row = next(reader) # grab header
# process row full of times
date = row[0].split()[1]
times = row[1:NUM_POINTS]
# convert to datetime objects
# import pdb;pdb.set_trace()
datetimes = [datetime.strptime(f"{date} {time}", DFMT) for time in times]
for row in reader:
if row[0].startswith("Demand (5"):
# process demand data
print(f"Reading {label}")
mw = np.array([float(di) for di in row[1:NUM_POINTS]])
data[label] = datetimes, mw
else:
# lookahead estimate. throw it away
print(f"Skipping {row[0]}")
continue
return data
def _read_solar_supply():
"""Read how much solar comes in a day."""
data = {}
for label, fname in [
("Summer solar", "CAISO-renewables-20190621.csv"),
("Winter solar", "CAISO-renewables-20191221.csv"),
]:
datetimes = []
print(f"opening {fname}")
with open(os.path.join("data", fname)) as f:
reader = csv.reader(f)
row = next(reader) # grab header
# process row full of times
date = row[0].split()[1]
times = row[1:NUM_POINTS]
datetimes = [datetime.strptime(f"{date} {time}", DFMT) for time in times]
for row in reader:
if row[0].startswith("Solar"):
# process demand data
print(f"Reading {label}")
mw = np.array([float(di) for di in row[1:NUM_POINTS]])
data[label] = datetimes, mw
else:
# lookahead estimate. throw it away
print(f"Skipping {row[0]}")
continue
return data
def _read_nuclear_supply():
return {}
def _integrate_megawatts(mw):
"""Sum megawatts over a day and return GW*day. Assume 5 minute increments."""
mw = np.array(mw)
return sum(mw * 5 / 60 / 24) / 1000
def plot_demand(data):
"""
Plot demand curves
I was surprised that winter 2019 was roughly the same demand integral as summer 2020.
Maybe covid? So I grabbed summer 2019 data file too. Still really close!!
Damn check out that huge baseload.
"""
fig, ax = plt.subplots()
for label in ["Summer demand", "Winter demand"]:
x, y = data[label]
integral = _integrate_megawatts(y)
x = [dt.time().hour + dt.time().minute / 60 for dt in x]
ax.plot(x, y / 1000, label=label + f" ({integral:.1f} GWd)")
ax.legend(loc="lower right")
ax.set_ylabel("Demand (GW)")
ax.set_xlabel("Time (hour of day)")
ax.set_title("Seasonal demand variation in California 2019")
ax.grid(alpha=0.3, ls="--")
ax.set_ylim(bottom=0)
ax.set_xlim([0, 24])
ax.set_xticks(np.arange(0, 25, 3.0))
ax.text(1, 0.1, "W: 2019-12-21\nS: 2019-06-21\nData: CAISO")
# plt.show()
plt.savefig("Seasonal-demand-variation.png")
def plot_solar_supply(data):
"""Yikes solar was really low on 2019-12-21. Must've been cloudy."""
fig, ax = plt.subplots()
for label in ["Summer solar", "Winter solar"]:
x, y = data[label]
integral = _integrate_megawatts(y)
x = [dt.time().hour + dt.time().minute / 60 for dt in x]
ax.plot(x, y / 1000, label=label + f" ({integral:.1f} GWd)")
ax.legend(loc="upper left")
ax.set_ylabel("Solar supply (GW)")
ax.set_xlabel("Time (hour of day)")
ax.set_title("Seasonal solar variation in California 2019")
ax.grid(alpha=0.3, ls="--")
ax.set_ylim(bottom=0)
ax.set_xlim([0, 24])
ax.set_xticks(np.arange(0, 25, 3.0))
ax.text(1, 0.1, "W: 2019-12-21\nS: 2019-06-21\nData: CAISO")
# plt.show()
plt.savefig("seasonal-solar-variation.png")
def plot_solar_scenario(data):
fig, ax = plt.subplots(figsize=(10, 8))
for season, color in [("Winter", "tab:cyan"), ("Summer", "tab:pink")]:
demand_dt, demand_mw = data[f"{season} demand"]
supply_dt, supply_mw = data[f"{season} solar"]
demand_t = [dt.time().hour + dt.time().minute / 60 for dt in demand_dt]
supply_t = [dt.time().hour + dt.time().minute / 60 for dt in supply_dt]
demand_integral = _integrate_megawatts(demand_mw)
supply_integral = _integrate_megawatts(supply_mw)
scaleup = demand_integral / supply_integral
ax.plot(
demand_t,
demand_mw / 1000,
"-.",
lw=2,
color=color,
label=f"{season} Demand ({demand_integral:.1f} GWd)",
)
ax.plot(
supply_t,
supply_mw / 1000,
"-",
lw=2,
color=color,
label=f"{season} Supply Current ({supply_integral:.1f} GWd)",
)
ax.plot(
supply_t,
scaleup * supply_mw / 1000,
":",
lw=2,
color=color,
label=f"{season} Supply Required ({supply_integral*scaleup:.1f} GWd)",
)
ax.legend(loc="upper left")
ax.set_ylabel("Power (GW)")
ax.set_xlabel("Time (hour of day)")
ax.set_title("Seasonal implications of 100% solar in California")
ax.grid(alpha=0.3, ls="--")
ax.set_ylim(bottom=0)
ax.set_xlim([0, 24])
ax.set_xticks(np.arange(0, 25, 3.0))
ax.text(1, 5, "W: 2019-12-21\nS: 2019-06-21\nData: CAISO")
# plt.show()
plt.savefig("solar-scenario.png")
import typing
class Data(typing.NamedTuple):
time: np.ndarray
vals: np.ndarray
integral: np.ndarray
label: str
color: str
hatch: str = None
opacity: float = 1.0
def process(data, season, nonelectric=False):
demand_dt, demand_mw = data[f"{season} demand"]
demand_integral = _integrate_megawatts(demand_mw)
demand_gw = demand_mw/1000
demand_t = np.array([dt.time().hour + dt.time().minute / 60 for dt in demand_dt])
demand = Data(demand_t, demand_gw, demand_integral, f"{season} demand", "tan")
# factor in other 60% that is not electric
# gratuitously reduce primary energy assuming electric efficiency
# by 60%
# Today 40% of energy is electric so 60% is non-electric. But if we electrify
# everything let's assume the 60% chunk is itself reduced to 60%, or 36% of
# the original total. Then the total integral itself is also reduced to 40%+36%
# of the original (76%). But since we're starting from electricity demand integral
# we still have to increase the total
others_integral = demand_integral/0.4*0.6*0.6
total_integral = demand_integral + others_integral
# flat line value in GW will equal integral in GWd since time is 1 day
others_gw = np.array([others_integral for dt in demand_dt])
others = Data(demand_t, others_gw, others_integral, "Transportation,Industry,Heating", "brown")
supply_dt, supply_mw = data[f"{season} solar"]
supply_t = np.array([dt.time().hour + dt.time().minute / 60 for dt in supply_dt])
supply_integral = _integrate_megawatts(supply_mw)
supply_gw = supply_mw/1000
supply = Data(
supply_t, supply_gw, supply_integral, f"{season} solar supply", "green"
)
if nonelectric:
factor = total_integral/supply_integral
else:
factor = demand_integral /supply_integral
scaled_gw = supply_gw * factor
scaled = Data(
supply_t,
scaled_gw,
supply_integral*factor,
f"{season} required supply",
"green",
"",
0.1,
)
return demand, supply, scaled, others
def add_data(ax, data, x=12, y0=0.0):
line, = ax.plot(
data.time,
data.vals,
"-",
lw=2,
color=data.color,
label=f"{data.label} ({data.integral:.1f} GWd)",
)
fill = ax.fill_between(
data.time, data.vals, y2=y0, alpha=data.opacity, color=data.color, hatch=data.hatch
)
text = ax.text(
x,
data.vals.max()*0.6,
f"{data.label}:\n{data.integral:.1f} GWd",
horizontalalignment="center",
verticalalignment="center",
)
return line, fill, text
def plot_both_scenarios(data):
seasons = ["Summer", "Winter"]
#seasons = ["Winter"]
fig, axs = plt.subplots(
1, len(seasons), figsize=(4 * len(seasons), 4), squeeze=False, dpi=100
)
for season, ax in zip(seasons, axs[0]):
demand, supply, scaled, _others = process(data, season)
add_data(ax, demand)
add_data(ax, supply)
add_data(ax, scaled)
an_demand1 = ax.annotate(
"Area of supply\nmust equal area\nof demand",
xy=(8, 5),
xytext=(4, 50),
arrowprops=dict(arrowstyle="->",facecolor="black", relpos=(0.3,0.5)),
horizontalalignment="center",
verticalalignment="bottom",
)
an_demand2 = ax.annotate(
"Area of supply\nmust equal area\nof demand",
xy=(4, 15),
xytext=(4, 50),
arrowprops=dict(arrowstyle="->",facecolor="black", relpos=(0.3,0.5)),
horizontalalignment="center",
verticalalignment="bottom",
)
#an_demand3 = ax.annotate(
# "Area of supply must\nequal area of demand",
# xy=(10, 40),
# xytext=(1, 40),
# arrowprops=dict(facecolor="black", shrink=0.05),
# horizontalalignment="left",
# verticalalignment="bottom",
#)
if ax is axs[0][0]:
an_storage = ax.annotate(
"Area above demand\ncurve must be handled \nby energy storage\nsystems",
xy=(16, 60),
horizontalalignment="left",
verticalalignment="bottom",
)
peak_req = scaled.vals.max()
storage_bounds = ax.annotate(
"",
xy=(18, 24),
xytext=(18, peak_req),
arrowprops=dict(arrowstyle="<->"),
)
if ax is axs[0][1]:
# winter only
peak_req = scaled.vals.max()
an_cap_req = ax.annotate(
f"Total capacity required\n({peak_req:.1f}) GW",
xy=(14, peak_req),
xytext=(20, peak_req),
arrowprops=dict(facecolor="black", shrink=0.05),
horizontalalignment="center",
verticalalignment="center",
)
cpeak = supply.vals.max()
an_cap_now = ax.annotate(
f"Current winter capacity\n({cpeak:.1f}) GW",
xy=(14, cpeak),
xytext=(20, cpeak),
arrowprops=dict(facecolor="black", shrink=0.05),
horizontalalignment="center",
verticalalignment="center",
)
add_axes(ax)
ax.set_title(f"{season}")
#fig.suptitle("Seasonal implications of 100% solar in California", fontsize=14)
# ax.text(1, 5, "W: 2019-12-21\nS: 2019-06-21\nData: CAISO")
#plt.tight_layout()
plt.show()
#plt.savefig("solar-intermittency.png")
#from matplotlib.animation import FFMpegWriter
#writer = FFMpegWriter(fps=1, metadata=dict(artist="Me"), bitrate=1800)
#anim.save("movie.mp4", writer=writer)
# writer = ImageMagickFileWriter()
# anim.save("animation.avi", writer=writer)
def scene1_summer(season, data, showSupply=True):
fig, axs = plt.subplots(1, 1, squeeze=False, dpi=200)
ax = axs[0][0]
demand, supply, scaled, others = process(data, season)
add_data(ax, demand, 3.5)
if showSupply:
add_data(ax, supply, 13)
#add_data(ax, scaled)
#an_demand1 = ax.annotate(
# "Area of supply\nmust equal area\nof demand",
# xy=(8, 5),
# xytext=(4, 30),
# arrowprops=dict(arrowstyle="->",facecolor="black", relpos=(0.3,0.5)),
# horizontalalignment="center",
# verticalalignment="bottom",
#)
#an_demand2 = ax.annotate(
# "Area of supply\nmust equal area\nof demand",
# xy=(4, 15),
# xytext=(4, 30),
# arrowprops=dict(arrowstyle="->",facecolor="black", relpos=(0.3,0.5)),
# horizontalalignment="center",
# verticalalignment="bottom",
#)
ax.set_title(f"{season} electricity in California")
#fig.suptitle("Seasonal implications of 100% solar in California", fontsize=14)
# ax.text(1, 5, "W: 2019-12-21\nS: 2019-06-21\nData: CAISO")
ax.set_ylabel("Power (GW)")
ax.set_xlabel("Time (hour of day)")
ax.grid(alpha=0.3, ls="--")
ax.set_ylim(bottom=0)
ax.set_xlim([0, 24])
ax.set_ylim([0, 40])
ax.set_xticks( | np.arange(0, 25, 3.0) | numpy.arange |
#!/usr/bin/env python
__author__ = "<NAME>"
__email__ = "mncosta(at)isr(dot)tecnico(dot)ulisboa(dot)pt"
import numpy as np
from sklearn.linear_model import HuberRegressor
import math
from random import randint
import cvxopt as cvx
from RiskPerception.OpticalFlow import getWeightFromOFDistance, calcDistance
from RiskPerception.Objects import getOFWeightFromObjects
from RiskPerception.CONFIG import CVX_SUPRESS_PRINT,\
HUBER_LOSS_EPSILON,\
RANSAC_MINIMUM_DATAPOINTS,\
RANSAC_NUMBER_ITERATIONS, \
RANSAC_MINIMUM_RATIO_INLIERS,\
RANSAC_MINIMUM_ERROR_ANGLE,\
RANSAC_RATIO_INCREASE_ETA,\
ITERATIVE_OBJECT_WEIGHTS_ITERATIONS,\
MAXIMUM_INLIERS_ANGLE,\
EXPONENTIAL_DECAY_NBR_WEIGHTS,\
EXPONENTIAL_DECAY_INITIAL,\
EXPONENTIAL_DECAY_TAU
def l1_norm_optimization(a_i, b_i, c_i, w_i=None):
"""Solve l1-norm optimization problem."""
cvx.solvers.options['show_progress'] = not CVX_SUPRESS_PRINT
# Non-Weighted optimization:
if w_i is None:
# Problem must be formulated as sum |P*x - q|
P = cvx.matrix([[cvx.matrix(a_i)], [cvx.matrix(b_i)]])
q = cvx.matrix(c_i * -1)
# Solve the l1-norm problem
u = cvx.l1.l1(P, q)
# Get results
x0, y0 = u[0], u[1]
# Weighted optimization:
else:
# Problem must be formulated as sum |P*x - q|
P = cvx.matrix([[cvx.matrix(np.multiply(a_i, w_i))],
[cvx.matrix(np.multiply(b_i, w_i))]])
q = cvx.matrix(np.multiply(w_i, c_i * -1))
# Solve the l1-norm problem
u = cvx.l1.l1(P, q)
# Get results
x0, y0 = u[0], u[1]
# return resulting point
return (x0, y0)
def l2_norm_optimization(a_i, b_i, c_i, w_i=None):
"""Solve l2-norm optimization problem."""
# Non-Weighted optimization:
if w_i is None:
aux1 = -2 * ((np.sum(np.multiply(b_i, b_i))) * (
np.sum(np.multiply(a_i, a_i))) / float(
np.sum(np.multiply(a_i, b_i))) - (np.sum(np.multiply(a_i, b_i))))
aux2 = 2 * ((np.sum(np.multiply(b_i, b_i))) * (
np.sum(np.multiply(a_i, c_i))) / float(
np.sum(np.multiply(a_i, b_i))) - (np.sum(np.multiply(b_i, c_i))))
x0 = aux2 / float(aux1)
y0 = (-(np.sum(np.multiply(a_i, c_i))) - (
np.sum(np.multiply(a_i, a_i))) * x0) / float(
np.sum(np.multiply(a_i, b_i)))
# Weighted optimization:
else:
aux1 = -2 * ((np.sum(np.multiply(np.multiply(b_i, b_i), w_i))) * (
np.sum(np.multiply(np.multiply(a_i, a_i), w_i))) / float(
np.sum(np.multiply(np.multiply(a_i, b_i), w_i))) - (
np.sum(np.multiply(np.multiply(a_i, b_i), w_i))))
aux2 = 2 * ((np.sum(np.multiply(np.multiply(b_i, b_i), w_i))) * (
np.sum(np.multiply(np.multiply(a_i, c_i), w_i))) / float(
np.sum(np.multiply(np.multiply(a_i, b_i), w_i))) - (
np.sum(np.multiply(np.multiply(b_i, c_i), w_i))))
x0 = aux2 / float(aux1)
y0 = (-(np.sum(np.multiply(np.multiply(a_i, c_i), w_i))) - (
np.sum(np.multiply(np.multiply(a_i, a_i), w_i))) * x0) / float(
np.sum(np.multiply(np.multiply(a_i, b_i), w_i)))
# return resulting point
return (x0, y0)
def huber_loss_optimization(a_i, b_i, c_i, w_i=None):
"""Solve Huber loss optimization problem."""
for k in range(5):
try:
# Non-Weighted optimization:
if w_i is None:
huber = HuberRegressor(fit_intercept=True, alpha=0.0,
max_iter=100, epsilon=HUBER_LOSS_EPSILON)
X = -1 * np.concatenate(
(a_i.reshape(a_i.shape[0], 1),
b_i.reshape(b_i.shape[0], 1)), axis=1)
y = c_i
huber.fit(X, y)
# Get results
x0, y0 = huber.coef_ + np.array([0., 1.]) * huber.intercept_
# Weighted optimization:
else:
huber = HuberRegressor(fit_intercept=True, alpha=0.0,
max_iter=100, epsilon=HUBER_LOSS_EPSILON)
X = -1 * np.concatenate(
(a_i.reshape(a_i.shape[0], 1),
b_i.reshape(b_i.shape[0], 1)), axis=1)
y = c_i
sampleWeight = w_i
huber.fit(X, y, sample_weight=sampleWeight)
# Get results
x0, y0 = huber.coef_ + np.array([0., 1.]) * huber.intercept_
except ValueError:
pass
else:
# return resulting point
return x0, y0
else:
return None, None
def select_subset(OFVectors):
"""Select a subset of a given set."""
subset = np.array([]).reshape(0, 4)
for i in range(RANSAC_MINIMUM_DATAPOINTS):
idx = randint(0, (OFVectors.shape)[0] - 1)
subset = np.vstack((subset, np.array([OFVectors[idx]])))
return subset
def fit_model(subset):
"""Return a solution for a given subset of points."""
# Initialize some empty variables
a_i = np.array([])
b_i = np.array([])
c_i = np.array([])
# Save the lines coeficients of the form a*x + b*y + c = 0 to the variables
for i in range(subset.shape[0]):
a1, b1, c1, d1 = subset[i]
pt1 = (a1, b1)
# So we don't divide by zero
if (a1 - c1) == 0:
continue
a = float(b1 - d1) / float(a1 - c1)
b = -1
c = (b1) - a * a1
denominator = float(a ** 2 + 1)
a_i = np.append(a_i, a / denominator)
b_i = np.append(b_i, b / denominator)
c_i = np.append(c_i, c / denominator)
# Solve a optimization problem with Minimum Square distance as a metric
(x0, y0) = l2_norm_optimization(a_i, b_i, c_i)
# Return FOE
return (x0, y0)
def get_intersect_point(a1, b1, c1, d1, x0, y0):
"""Get the point on the lines that passes through (a1,b1) and (c1,d1) and s closest to the point (x0,y0)."""
a = 0
if (a1 - c1) != 0:
a = float(b1 - d1) / float(a1 - c1)
c = b1 - a * a1
# Compute the line perpendicular to the line of the OF vector that passes throught (x0,y0)
a_aux = 0
if a != 0:
a_aux = -1 / a
c_aux = y0 - a_aux * x0
# Get intersection of the two lines
x1 = (c_aux - c) / (a - a_aux)
y1 = a_aux * x1 + c_aux
return (x1, y1)
def find_angle_between_lines(x0, y0, a1, b1, c1, d1):
"""Finds the angle between two lines."""
# Line 1 : line that passes through (x0,y0) and (a1,b1)
# Line 2 : line that passes through (c1,d1) and (a1,b1)
angle1 = 0
angle2 = 0
if (a1 - x0) != 0:
angle1 = float(b1 - y0) / float(a1 - x0)
if (a1 - c1) != 0:
angle2 = float(b1 - d1) / float(a1 - c1)
# Get angle in degrees
angle1 = math.degrees(math.atan(angle1))
angle2 = math.degrees(math.atan(angle2))
ang_diff = angle1 - angle2
# Find angle in the interval [0,180]
if math.fabs(ang_diff) > 180:
ang_diff = ang_diff - 180
# Return angle between the two lines
return ang_diff
def find_inliers_outliers(x0, y0, OFVectors):
"""Find set of inliers and outliers of a given set of optical flow vectors and the estimated FOE."""
# Initialize some varaiables
inliers = np.array([])
nbr_inlier = 0
# Find inliers with the angle method
# For each vector
for i in range((OFVectors.shape)[0]):
a1, b1, c1, d1 = OFVectors[i]
# Find the angle between the line that passes through (x0,y0) and (a1,b1) and the line that passes through (c1,d1) and (a1,b1)
ang_diff = find_angle_between_lines((x0, y0), (a1, b1, c1, d1))
# If the angle is below a certain treshold consider it a inlier
if -RANSAC_MINIMUM_ERROR_ANGLE < ang_diff < RANSAC_MINIMUM_ERROR_ANGLE:
# Increment number of inliers and add save it
nbr_inlier += 1
inliers = np.append(inliers, i)
# Compute the ratio of inliers to overall number of optical flow vectors
ratioInliersOutliers = float(nbr_inlier) / (OFVectors.shape)[0]
# Return set of inliers and ratio of inliers to overall set
return inliers, ratioInliersOutliers
def RANSAC(OFVectors):
"""Estimate the FOE of a set of optical flow (OF) vectors using RANSAC."""
# Initialize some variables
savedRatio = 0
FOE = (0, 0)
inliersModel = np.array([])
# Repeat iterations for a number of times
for i in range(RANSAC_NUMBER_ITERATIONS):
# Randomly initial select OF vectors
subset = select_subset(OFVectors)
# Estimate a FOE for the set of OF vectors
(x0, y0) = fit_model(subset)
# Find the inliers of the set for the estimated FOE
inliers, ratioInliersOutliers = find_inliers_outliers((x0, y0), OFVectors)
# If ratio of inliers is bigger than the previous iterations, save current solution
if savedRatio < ratioInliersOutliers:
savedRatio = ratioInliersOutliers
inliersModel = inliers
FOE = (x0, y0)
# If ratio is acceptable, stop iterating and return the found solution
if savedRatio > RANSAC_MINIMUM_RATIO_INLIERS and RANSAC_MINIMUM_RATIO_INLIERS != 0:
break
# Return the estimated FOE, the found inliers ratio and the set of inliers
return FOE, savedRatio, inliersModel
def RANSAC_ImprovedModel(OFVectors):
"""Estimate the FOE of a set of optical flow (OF) vectors using a form of RANSAC method."""
# Initialize some variables
FOE = (0, 0)
savedRatio = 0
inliersModel = np.array([])
# Repeat iterations for a number of times
for i in range(RANSAC_NUMBER_ITERATIONS):
# Randomly select initial OF vectors
subset = select_subset(OFVectors)
# Estimate a FOE for the set of OF vectors
(x0, y0) = fit_model(subset)
# Find the inliers of the set for the estimated FOE
inliers, ratioInliersOutliers = find_inliers_outliers((x0, y0),
OFVectors)
# Initialize some varaibles
iter = 0
ratioInliersOutliers_old = 0
# While the ratio of inliers keeps on increasing
while ((inliers.shape)[
0] != 0 and ratioInliersOutliers - ratioInliersOutliers_old > RANSAC_RATIO_INCREASE_ETA):
# Repeat iterations for a number of times
if iter > RANSAC_NUMBER_ITERATIONS:
break
iter += 1
# Select a new set of OF vectors that are inliers tot he estimated FOE
for i in range((inliers.shape)[0]):
subset = np.vstack(
(subset, np.array([OFVectors[int(inliers[i])]])))
# Estimate a FOE for the new set of OF vectors
(x0, y0) = fit_model(subset)
# Save the previous iteration ratio if inliers
ratioInliersOutliers_old = ratioInliersOutliers
# Find the inliers of the set for the estimated FOE
inliers, ratioInliersOutliers = find_inliers_outliers((x0, y0),
OFVectors)
# If ratio of inliers is bigger than the previous iterations, save current solution
if savedRatio < ratioInliersOutliers:
savedRatio = ratioInliersOutliers
inliersModel = inliers
FOE = (x0, y0)
# If ratio is acceptable, stop iterating and return the found solution
if savedRatio > RANSAC_MINIMUM_RATIO_INLIERS and RANSAC_MINIMUM_RATIO_INLIERS != 0:
break
# If ratio is acceptable, stop iterating and return the found solution
if savedRatio > RANSAC_MINIMUM_RATIO_INLIERS and RANSAC_MINIMUM_RATIO_INLIERS != 0:
break
# Return the estimated FOE, the found inliers ratio and the set of inliers
return FOE, savedRatio, inliersModel
def vectorOFRightDirection(OFvect, FOE):
"""Returns True if OF vector is pointing away from the FOE, False otherwise."""
# Get points of optical flow vector
a1, b1, c1, d1 = OFvect
# If left side of FOE
if a1 <= FOE[0]:
if c1 <= a1:
return False
# If right side of FOE
else:
if c1 >= a1:
return False
return True
def improveOFObjectsWeights(OF, objects, framenbr, FOE, currResult,
dist_intervals=None, dist_avg_int=None,
dist_max_int=None):
"""Iteratively check weights coming from objects."""
staticObjects = objects[objects[:, 0] == str(framenbr)].copy()
staticObjects[:, 6] = 0
a_i = np.array([])
b_i = np.array([])
c_i = np.array([])
w_i = np.array([])
(x0, y0) = currResult
for i in range((OF.shape)[0]):
a1, b1, c1, d1 = OF[i]
# So we don't divide by zero
if (a1 - c1) == 0:
continue
a = float(b1 - d1) / float(a1 - c1)
b = -1
c = (b1) - a*a1
lengthLine = math.sqrt((a1-c1)**2 + (b1-d1)**2)
distToFOE = calcDistance((a1, b1), FOE)
for j in range(dist_intervals.shape[0] - 1):
if dist_intervals[j] < distToFOE < dist_intervals[j + 1]:
break
distance_weight = (
getWeightFromOFDistance((lengthLine), (dist_avg_int[j]),
(dist_max_int[j])))
if getOFWeightFromObjects(objects, (a1, b1), framenbr) != 0:
for object in staticObjects:
if (float(object[1]) <= float(a1) <= float(object[3])) and (
float(object[2]) <= float(b1) <= float(object[4])):
if (-MAXIMUM_INLIERS_ANGLE <
find_angle_between_lines((x0,y0), (a1, b1, c1,d1)) <
MAXIMUM_INLIERS_ANGLE) and \
vectorOFRightDirection((a1, b1, c1, d1), FOE):
object_weight = 1
object[6] = str(float(object[6]) + 1)
else:
object_weight = 0
object[6] = str(float(object[6]) - 1)
else:
object_weight = 1
weight = distance_weight * object_weight
denominator = float(a ** 2 + 1)
a_i = np.append(a_i, a / denominator)
b_i = np.append(b_i, b / denominator)
c_i = np.append(c_i, c / denominator)
w_i = np.append(w_i, [weight])
return a_i, b_i, c_i, w_i, staticObjects
def iterative_improve_on_object_weights(optimization_method, OF, objects,
framenbr, FOE, curr_result,
dist_intervals, dist_avg_int,
dist_max_int):
for i in range(ITERATIVE_OBJECT_WEIGHTS_ITERATIONS):
a_i, b_i, c_i, w_i, staticObjects = \
improveOFObjectsWeights(OF,
objects,
framenbr,
FOE,
curr_result,
dist_intervals=dist_intervals,
dist_avg_int=dist_avg_int,
dist_max_int=dist_max_int)
(x0, y0) = optimization_method(a_i, b_i, c_i, w_i)
if x0 is None and y0 is None:
return curr_result
return (x0, y0)
def negative_exponential_decay(x, initial = None, tau = None):
"""Returns the value of a negative exponential [f(x) = No * e^-(t*x) ]."""
if initial is None:
initial = EXPONENTIAL_DECAY_INITIAL
if tau is None:
tau = EXPONENTIAL_DECAY_TAU
return initial * math.exp(-1 * tau * x)
def generate_weights(nbr_weights):
"""Generate negative exponential weights."""
weights = np.array([])
for i in range(nbr_weights):
weights = np.append(weights, negative_exponential_decay(i))
return weights
def points_history(points, newPoint):
"""Refresh the points history. Delete the oldest point and add the new point."""
points = np.delete(points, (points.shape)[1] - 1, axis=1)
points = np.insert(points, 0, newPoint, axis=1)
return points
def initialize_points_history(width, height, nbr_points = None):
"""Initialize the point history memory."""
if nbr_points is None:
nbr_points = EXPONENTIAL_DECAY_NBR_WEIGHTS
points = | np.array([[],[]]) | numpy.array |
import numpy as np
import pandas as pd
from numpy.testing import assert_array_equal
from pandas.testing import assert_frame_equal
from nose.tools import (assert_equal,
assert_almost_equal,
raises,
ok_,
eq_)
from rsmtool.preprocessor import (FeaturePreprocessor,
FeatureSubsetProcessor,
FeatureSpecsProcessor)
class TestFeaturePreprocessor:
def setUp(self):
self.fpp = FeaturePreprocessor()
def test_select_candidates_with_N_or_more_items(self):
data = pd.DataFrame({'candidate': ['a'] * 3 + ['b'] * 2 + ['c'],
'sc1': [2, 3, 1, 5, 6, 1]})
df_included_expected = pd.DataFrame({'candidate': ['a'] * 3 + ['b'] * 2,
'sc1': [2, 3, 1, 5, 6]})
df_excluded_expected = pd.DataFrame({'candidate': ['c'],
'sc1': [1]})
(df_included,
df_excluded) = FeaturePreprocessor.select_candidates(data, 2)
assert_frame_equal(df_included, df_included_expected)
assert_frame_equal(df_excluded, df_excluded_expected)
def test_select_candidates_with_N_or_more_items_all_included(self):
data = pd.DataFrame({'candidate': ['a'] * 2 + ['b'] * 2 + ['c'] * 2,
'sc1': [2, 3, 1, 5, 6, 1]})
(df_included,
df_excluded) = FeaturePreprocessor.select_candidates(data, 2)
assert_frame_equal(df_included, data)
assert_equal(len(df_excluded), 0)
def test_select_candidates_with_N_or_more_items_all_excluded(self):
data = pd.DataFrame({'candidate': ['a'] * 3 + ['b'] * 2 + ['c'],
'sc1': [2, 3, 1, 5, 6, 1]})
(df_included,
df_excluded) = FeaturePreprocessor.select_candidates(data, 4)
assert_frame_equal(df_excluded, data)
assert_equal(len(df_included), 0)
def test_select_candidates_with_N_or_more_items_custom_name(self):
data = pd.DataFrame({'ID': ['a'] * 3 + ['b'] * 2 + ['c'],
'sc1': [2, 3, 1, 5, 6, 1]})
df_included_expected = pd.DataFrame({'ID': ['a'] * 3 + ['b'] * 2,
'sc1': [2, 3, 1, 5, 6]})
df_excluded_expected = pd.DataFrame({'ID': ['c'],
'sc1': [1]})
(df_included,
df_excluded) = FeaturePreprocessor.select_candidates(data, 2, 'ID')
assert_frame_equal(df_included, df_included_expected)
assert_frame_equal(df_excluded, df_excluded_expected)
def test_rename_no_columns(self):
df = pd.DataFrame(columns=['spkitemid', 'sc1', 'sc2', 'length',
'raw', 'candidate', 'feature1', 'feature2'])
df = self.fpp.rename_default_columns(df, [], 'spkitemid', 'sc1', 'sc2',
'length', 'raw', 'candidate')
assert_array_equal(df.columns,
['spkitemid', 'sc1', 'sc2', 'length', 'raw',
'candidate', 'feature1', 'feature2'])
def test_rename_no_columns_some_values_none(self):
df = pd.DataFrame(columns=['spkitemid', 'sc1', 'sc2', 'feature1', 'feature2'])
df = self.fpp.rename_default_columns(df, [], 'spkitemid', 'sc1', 'sc2', None, None, None)
assert_array_equal(df.columns, ['spkitemid', 'sc1', 'sc2', 'feature1', 'feature2'])
def test_rename_no_used_columns_but_unused_columns_with_default_names(self):
df = pd.DataFrame(columns=['spkitemid', 'sc1', 'sc2', 'length', 'feature1', 'feature2'])
df = self.fpp.rename_default_columns(df, [], 'spkitemid', 'sc1', 'sc2', None, None, None)
assert_array_equal(df.columns, ['spkitemid', 'sc1', 'sc2',
'##length##', 'feature1', 'feature2'])
def test_rename_used_columns(self):
df = pd.DataFrame(columns=['id', 'r1', 'r2', 'words', 'SR', 'feature1', 'feature2'])
df = self.fpp.rename_default_columns(df, [], 'id', 'r1', 'r2', 'words', 'SR', None)
assert_array_equal(df.columns, ['spkitemid', 'sc1', 'sc2', 'length',
'raw', 'feature1', 'feature2'])
def test_rename_used_columns_and_unused_columns_with_default_names(self):
df = pd.DataFrame(columns=['id', 'r1', 'r2', 'words', 'raw', 'feature1', 'feature2'])
df = self.fpp.rename_default_columns(df, [], 'id', 'r1', 'r2', 'words', None, None)
assert_array_equal(df.columns, ['spkitemid', 'sc1', 'sc2', 'length',
'##raw##', 'feature1', 'feature2'])
def test_rename_used_columns_with_swapped_names(self):
df = pd.DataFrame(columns=['id', 'sc1', 'sc2', 'raw', 'words', 'feature1', 'feature2'])
df = self.fpp.rename_default_columns(df, [], 'id', 'sc2', 'sc1', 'words', None, None)
assert_array_equal(df.columns, ['spkitemid', 'sc2', 'sc1', '##raw##',
'length', 'feature1', 'feature2'])
def test_rename_used_columns_but_not_features(self):
df = pd.DataFrame(columns=['id', 'sc1', 'sc2', 'length', 'feature2'])
df = self.fpp.rename_default_columns(df, ['length'], 'id', 'sc1', 'sc2', None, None, None)
assert_array_equal(df.columns, ['spkitemid', 'sc1', 'sc2', 'length', 'feature2'])
def test_rename_candidate_column(self):
df = pd.DataFrame(columns=['spkitemid', 'sc1', 'sc2', 'length',
'apptNo', 'feature1', 'feature2'])
df = self.fpp.rename_default_columns(df, [],
'spkitemid', 'sc1', 'sc2', None, None, 'apptNo')
assert_array_equal(df.columns, ['spkitemid', 'sc1', 'sc2', '##length##',
'candidate', 'feature1', 'feature2'])
def test_rename_candidate_named_sc2(self):
df = pd.DataFrame(columns=['id', 'sc1', 'sc2', 'question', 'l1', 'score'])
df_renamed = self.fpp.rename_default_columns(df, [],
'id', 'sc1', None, None, 'score', 'sc2')
assert_array_equal(df_renamed.columns, ['spkitemid', 'sc1',
'candidate', 'question', 'l1', 'raw'])
@raises(KeyError)
def test_check_subgroups_missing_columns(self):
df = pd.DataFrame(columns=['a', 'b', 'c'])
subgroups = ['a', 'd']
FeaturePreprocessor.check_subgroups(df, subgroups)
def test_check_subgroups_nothing_to_replace(self):
df = pd.DataFrame({'a': ['1', '2'],
'b': ['32', '34'],
'd': ['abc', 'def']})
subgroups = ['a', 'd']
df_out = FeaturePreprocessor.check_subgroups(df, subgroups)
assert_frame_equal(df_out, df)
def test_check_subgroups_replace_empty(self):
df = pd.DataFrame({'a': ['1', ''],
'b': [' ', '34'],
'd': ['ab c', ' ']})
subgroups = ['a', 'd']
df_expected = pd.DataFrame({'a': ['1', 'No info'],
'b': [' ', '34'],
'd': ['ab c', 'No info']})
df_out = FeaturePreprocessor.check_subgroups(df, subgroups)
assert_frame_equal(df_out, df_expected)
def test_filter_on_column(self):
bad_df = pd.DataFrame({'spkitemlab': np.arange(1, 9, dtype='int64'),
'sc1': ['00', 'TD', '02', '03'] * 2})
df_filtered_with_zeros = pd.DataFrame({'spkitemlab': [1, 3, 4, 5, 7, 8],
'sc1': [0.0, 2.0, 3.0] * 2})
df_filtered = pd.DataFrame({'spkitemlab': [3, 4, 7, 8], 'sc1': [2.0, 3.0] * 2})
(output_df_with_zeros,
output_excluded_df_with_zeros) = self.fpp.filter_on_column(bad_df, 'sc1',
'spkitemlab',
exclude_zeros=False)
output_df, output_excluded_df = self.fpp.filter_on_column(bad_df, 'sc1',
'spkitemlab',
exclude_zeros=True)
assert_frame_equal(output_df_with_zeros, df_filtered_with_zeros)
assert_frame_equal(output_df, df_filtered)
def test_filter_on_column_all_non_numeric(self):
bad_df = pd.DataFrame({'sc1': ['A', 'I', 'TD', 'TD'] * 2,
'spkitemlab': range(1, 9)})
expected_df_excluded = bad_df.copy()
expected_df_excluded.drop('sc1', axis=1, inplace=True)
df_filtered, df_excluded = self.fpp.filter_on_column(bad_df, 'sc1',
'spkitemlab',
exclude_zeros=True)
ok_(df_filtered.empty)
ok_("sc1" not in df_filtered.columns)
assert_frame_equal(df_excluded, expected_df_excluded, check_dtype=False)
def test_filter_on_column_std_epsilon_zero(self):
# Test that the function exclude columns where std is returned as
# very low value rather than 0
data = {'id': np.arange(1, 21, dtype='int64'),
'feature_ok': np.arange(1, 21),
'feature_zero_sd': [1.5601] * 20}
bad_df = pd.DataFrame(data=data)
output_df, output_excluded_df = self.fpp.filter_on_column(bad_df,
'feature_zero_sd',
'id',
exclude_zeros=False,
exclude_zero_sd=True)
good_df = bad_df[['id', 'feature_ok']].copy()
assert_frame_equal(output_df, good_df)
ok_(output_excluded_df.empty)
def test_filter_on_column_with_inf(self):
# Test that the function exclude columns where feature value is 'inf'
data = pd.DataFrame({'feature_1': [1.5601, 0, 2.33, 11.32],
'feature_ok': np.arange(1, 5)})
data['feature_with_inf'] = 1 / data['feature_1']
data['id'] = np.arange(1, 5, dtype='int64')
bad_df = data[np.isinf(data['feature_with_inf'])].copy()
good_df = data[~np.isinf(data['feature_with_inf'])].copy()
bad_df.reset_index(drop=True, inplace=True)
good_df.reset_index(drop=True, inplace=True)
output_df, output_excluded_df = self.fpp.filter_on_column(data, 'feature_with_inf',
'id',
exclude_zeros=False,
exclude_zero_sd=True)
assert_frame_equal(output_df, good_df)
assert_frame_equal(output_excluded_df, bad_df)
def test_filter_on_flag_column_empty_flag_dictionary(self):
# no flags specified, keep the data frame as is
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0, 0, 0, 0],
'flag2': [1, 2, 2, 1]})
flag_dict = {}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_int_column_and_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0, 1, 2, 3]})
flag_dict = {'flag1': [0, 1, 2, 3, 4]}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_float_column_and_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0.5, 1.1, 2.2, 3.6]})
flag_dict = {'flag1': [0.5, 1.1, 2.2, 3.6, 4.5]}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_str_column_and_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': ['a', 'b', 'c', 'd']})
flag_dict = {'flag1': ['a', 'b', 'c', 'd', 'e']}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_float_column_int_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0.0, 1.0, 2.0, 3.0]})
flag_dict = {'flag1': [0, 1, 2, 3, 4]}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_int_column_float_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0, 1, 2, 3]})
flag_dict = {'flag1': [0.0, 1.0, 2.0, 3.0, 4.5]}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_str_column_float_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': ['4', '1', '2', '3.5']})
flag_dict = {'flag1': [0.0, 1.0, 2.0, 3.5, 4.0]}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_float_column_str_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [4.0, 1.0, 2.0, 3.5]})
flag_dict = {'flag1': ['1', '2', '3.5', '4', 'TD']}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_str_column_int_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': ['0.0', '1.0', '2.0', '3.0']})
flag_dict = {'flag1': [0, 1, 2, 3, 4]}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_int_column_str_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0, 1, 2, 3]})
flag_dict = {'flag1': ['0.0', '1.0', '2.0', '3.0', 'TD']}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_mixed_type_column_str_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0, '1.0', 2, 3.5]})
flag_dict = {'flag1': ['0.0', '1.0', '2.0', '3.5', 'TD']}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_mixed_type_column_int_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0, '1.0', 2, 3.0]})
flag_dict = {'flag1': [0, 1, 2, 3, 4]}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_mixed_type_column_float_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0, '1.5', 2, 3.5]})
flag_dict = {'flag1': [0.0, 1.5, 2.0, 3.5, 4.0]}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_int_column_mixed_type_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0, 1, 2, 3]})
flag_dict = {'flag1': [0, 1, 2, 3.0, 3.5, 'TD']}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_float_column_mixed_type_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [0.0, 1.0, 2.0, 3.5]})
flag_dict = {'flag1': [0, 1, 2, 3.0, 3.5, 'TD']}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_str_column_mixed_type_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': ['0.0', '1.0', '2.0', '3.5']})
flag_dict = {'flag1': [0, 1, 2, 3.0, 3.5, 'TD']}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_nothing_to_exclude_mixed_type_column_mixed_type_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [1, 2, 3.5, 'TD']})
flag_dict = {'flag1': [0, 1, 2, 3.0, 3.5, 'TD']}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df)
eq_(len(df_excluded), 0)
def test_filter_on_flag_column_mixed_type_column_mixed_type_dict_filter_preserve_type(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd', 'e', 'f'],
'sc1': [1, 2, 1, 3, 4, 5],
'feature': [2, 3, 4, 5, 6, 2],
'flag1': [1, 1.5, 2, 3.5, 'TD', 'NS']})
flag_dict = {'flag1': [1.5, 2, 'TD']}
df_new_expected = pd.DataFrame({'spkitemid': ['b', 'c', 'e'],
'sc1': [2, 1, 4],
'feature': [3, 4, 6],
'flag1': [1.5, 2, 'TD']})
df_excluded_expected = pd.DataFrame({'spkitemid': ['a', 'd', 'f'],
'sc1': [1, 3, 5],
'feature': [2, 5, 2],
'flag1': [1, 3.5, 'NS']})
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df_new_expected)
assert_frame_equal(df_excluded, df_excluded_expected)
def test_filter_on_flag_column_with_none_value_in_int_flag_column_int_dict(self):
df = pd.DataFrame({'spkitemid': [1, 2, 3, 4, 5, 6],
'sc1': [1, 2, 1, 3, 4, 5],
'feature': [2, 3, 4, 5, 6, 2],
'flag1': [1, 2, 2, 3, 4, None]}, dtype=object)
flag_dict = {'flag1': [2, 4]}
df_new_expected = pd.DataFrame({'spkitemid': [2, 3, 5],
'sc1': [2, 1, 4],
'feature': [3, 4, 6],
'flag1': [2, 2, 4]}, dtype=object)
df_excluded_expected = pd.DataFrame({'spkitemid': [1, 4, 6],
'sc1': [1, 3, 5],
'feature': [2, 5, 2],
'flag1': [1, 3, None]}, dtype=object)
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df_new_expected)
assert_frame_equal(df_excluded, df_excluded_expected)
def test_filter_on_flag_column_with_none_value_in_float_flag_column_float_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd', 'e', 'f'],
'sc1': [1, 2, 1, 3, 4, 5],
'feature': [2, 3, 4, 5, 6, 2],
'flag1': [1.2, 2.1, 2.1, 3.3, 4.2, None]})
flag_dict = {'flag1': [2.1, 4.2]}
df_new_expected = pd.DataFrame({'spkitemid': ['b', 'c', 'e'],
'sc1': [2, 1, 4],
'feature': [3, 4, 6],
'flag1': [2.1, 2.1, 4.2]})
df_excluded_expected = pd.DataFrame({'spkitemid': ['a', 'd', 'f'],
'sc1': [1, 3, 5],
'feature': [2, 5, 2],
'flag1': [1.2, 3.3, None]})
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df_new_expected)
assert_frame_equal(df_excluded, df_excluded_expected)
def test_filter_on_flag_column_with_none_value_in_str_flag_column_str_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd', 'e', 'f'],
'sc1': [1, 2, 1, 3, 4, 5],
'feature': [2, 3, 4, 5, 6, 2],
'flag1': ['a', 'b', 'b', 'c', 'd', None]})
flag_dict = {'flag1': ['b', 'd']}
df_new_expected = pd.DataFrame({'spkitemid': ['b', 'c', 'e'],
'sc1': [2, 1, 4],
'feature': [3, 4, 6],
'flag1': ['b', 'b', 'd']})
df_excluded_expected = pd.DataFrame({'spkitemid': ['a', 'd', 'f'],
'sc1': [1, 3, 5],
'feature': [2, 5, 2],
'flag1': ['a', 'c', None]})
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df_new_expected)
assert_frame_equal(df_excluded, df_excluded_expected)
def test_filter_on_flag_column_with_none_value_in_mixed_type_flag_column_float_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd', 'e', 'f'],
'sc1': [1, 2, 1, 3, 4, 5],
'feature': [2, 3, 4, 5, 6, 2],
'flag1': [1, 1.5, 2.0, 'TD', 2.0, None]},
dtype=object)
flag_dict = {'flag1': [1.5, 2.0]}
df_new_expected = pd.DataFrame({'spkitemid': ['b', 'c', 'e'],
'sc1': [2, 1, 4],
'feature': [3, 4, 6],
'flag1': [1.5, 2.0, 2.0]},
dtype=object)
df_excluded_expected = pd.DataFrame({'spkitemid': ['a', 'd', 'f'],
'sc1': [1, 3, 5],
'feature': [2, 5, 2],
'flag1': [1, 'TD', None]},
dtype=object)
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df_new_expected)
assert_frame_equal(df_excluded, df_excluded_expected)
def test_filter_on_flag_column_with_none_value_in_mixed_type_flag_column_int_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd', 'e', 'f'],
'sc1': [1, 2, 1, 3, 4, 5],
'feature': [2, 3, 4, 5, 6, 2],
'flag1': [1.5, 2, 2, 'TD', 4, None]},
dtype=object)
flag_dict = {'flag1': [2, 4]}
df_new_expected = pd.DataFrame({'spkitemid': ['b', 'c', 'e'],
'sc1': [2, 1, 4],
'feature': [3, 4, 6],
'flag1': [2, 2, 4]},
dtype=object)
df_excluded_expected = pd.DataFrame({'spkitemid': ['a', 'd', 'f'],
'sc1': [1, 3, 5],
'feature': [2, 5, 2],
'flag1': [1.5, 'TD', None]},
dtype=object)
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df_new_expected)
assert_frame_equal(df_excluded, df_excluded_expected)
def test_filter_on_flag_column_with_none_value_in_mixed_type_flag_column_mixed_type_dict(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd', 'e', 'f'],
'sc1': [1, 2, 1, 3, 4, 5],
'feature': [2, 3, 4, 5, 6, 2],
'flag1': [1, 1.5, 2, 3.5, 'TD', None]},
dtype=object)
flag_dict = {'flag1': [1.5, 2, 'TD']}
df_new_expected = pd.DataFrame({'spkitemid': ['b', 'c', 'e'],
'sc1': [2, 1, 4],
'feature': [3, 4, 6],
'flag1': [1.5, 2, 'TD']}, dtype=object)
df_excluded_expected = pd.DataFrame({'spkitemid': ['a', 'd', 'f'],
'sc1': [1, 3, 5],
'feature': [2, 5, 2],
'flag1': [1, 3.5, None]}, dtype=object)
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df_new_expected)
assert_frame_equal(df_excluded, df_excluded_expected)
def test_filter_on_flag_column_two_flags_same_responses(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd', 'e', 'f'],
'sc1': [1, 2, 1, 3, 4, 5],
'feature': [2, 3, 4, 5, 6, 2],
'flag1': [1, 1.5, 2, 3.5, 'TD', 'NS'],
'flag2': [1, 0, 0, 1, 0, 1]})
flag_dict = {'flag1': [1.5, 2, 'TD'], 'flag2': [0]}
df_new_expected = pd.DataFrame({'spkitemid': ['b', 'c', 'e'],
'sc1': [2, 1, 4],
'feature': [3, 4, 6],
'flag1': [1.5, 2, 'TD'],
'flag2': [0, 0, 0]})
df_excluded_expected = pd.DataFrame({'spkitemid': ['a', 'd', 'f'],
'sc1': [1, 3, 5],
'feature': [2, 5, 2],
'flag1': [1, 3.5, 'NS'],
'flag2': [1, 1, 1]})
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df_new_expected)
assert_frame_equal(df_excluded, df_excluded_expected)
def test_filter_on_flag_column_two_flags_different_responses(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd', 'e', 'f'],
'sc1': [1, 2, 1, 3, 4, 5],
'feature': [2, 3, 4, 5, 6, 2],
'flag1': [1, 1.5, 2, 3.5, 'TD', 'NS'],
'flag2': [2, 0, 0, 1, 0, 1]})
flag_dict = {'flag1': [1.5, 2, 'TD', 'NS'], 'flag2': [0, 2]}
df_new_expected = pd.DataFrame({'spkitemid': ['b', 'c', 'e'],
'sc1': [2, 1, 4],
'feature': [3, 4, 6],
'flag1': [1.5, 2, 'TD'],
'flag2': [0, 0, 0]})
df_excluded_expected = pd.DataFrame({'spkitemid': ['a', 'd', 'f'],
'sc1': [1, 3, 5],
'feature': [2, 5, 2],
'flag1': [1, 3.5, 'NS'],
'flag2': [2, 1, 1]})
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
assert_frame_equal(df_new, df_new_expected)
assert_frame_equal(df_excluded, df_excluded_expected)
@raises(KeyError)
def test_filter_on_flag_column_missing_columns(self):
df = pd.DataFrame({'spkitemid': ['a', 'b', 'c', 'd'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': ['1', '1', '1', '1'],
'flag2': ['1', '2', '2', '1']})
flag_dict = {'flag3': ['0'], 'flag2': ['1', '2']}
df_new, df_excluded = self.fpp.filter_on_flag_columns(df, flag_dict)
@raises(ValueError)
def test_filter_on_flag_column_nothing_left(self):
bad_df = pd.DataFrame({'spkitemid': ['a1', 'b1', 'c1', 'd1'],
'sc1': [1, 2, 1, 3],
'feature': [2, 3, 4, 5],
'flag1': [1, 0, 20, 14],
'flag2': [1, 1.0, 'TD', '03']})
flag_dict = {'flag1': [1, 0, 14], 'flag2': ['TD']}
df_new, df_excluded = self.fpp.filter_on_flag_columns(bad_df, flag_dict)
def test_remove_outliers(self):
# we want to test that even if we pass in a list of
# integers, we still get the right clamped output
data = [1, 1, 2, 2, 1, 1] * 10 + [10]
ceiling = np.mean(data) + 4 * np.std(data)
clamped_data = FeaturePreprocessor.remove_outliers(data)
assert_almost_equal(clamped_data[-1], ceiling)
def test_generate_feature_names_subset(self):
reserved_column_names = ['reserved_col1', 'reserved_col2']
expected = ['col_1']
df = pd.DataFrame({'reserved_col1': ['X', 'Y', 'Z'],
'reserved_col2': ['Q', 'R', 'S'],
'col_1': [1, 2, 3],
'col_2': ['A', 'B', 'C']})
subset = 'A'
feature_subset = pd.DataFrame({'Feature': ['col_1', 'col_2', 'col_3'],
'A': [1, 0, 0],
'B': [1, 1, 1]})
feat_names = self.fpp.generate_feature_names(df,
reserved_column_names,
feature_subset,
subset)
eq_(feat_names, expected)
def test_generate_feature_names_none(self):
reserved_column_names = ['reserved_col1', 'reserved_col2']
expected = ['col_1', 'col_2']
df = pd.DataFrame({'reserved_col1': ['X', 'Y', 'Z'],
'reserved_col2': ['Q', 'R', 'S'],
'col_1': [1, 2, 3],
'col_2': ['A', 'B', 'C']})
feat_names = self.fpp.generate_feature_names(df,
reserved_column_names,
feature_subset_specs=None,
feature_subset=None)
eq_(feat_names, expected)
def test_model_name_builtin_model(self):
model_name = 'LinearRegression'
model_type = self.fpp.check_model_name(model_name)
eq_(model_type, 'BUILTIN')
def test_model_name_skll_model(self):
model_name = 'AdaBoostRegressor'
model_type = self.fpp.check_model_name(model_name)
eq_(model_type, 'SKLL')
@raises(ValueError)
def test_model_name_wrong_name(self):
model_name = 'random_model'
self.fpp.check_model_name(model_name)
def test_trim(self):
values = np.array([1.4, 8.5, 7.4])
expected = np.array([1.4, 8.4998, 7.4])
actual = FeaturePreprocessor.trim(values, 1, 8)
assert_array_equal(actual, expected)
def test_trim_with_list(self):
values = [1.4, 8.5, 7.4]
expected = np.array([1.4, 8.4998, 7.4])
actual = FeaturePreprocessor.trim(values, 1, 8)
assert_array_equal(actual, expected)
def test_trim_with_custom_tolerance(self):
values = [0.6, 8.4, 7.4]
expected = np.array([0.75, 8.25, 7.4])
actual = FeaturePreprocessor.trim(values, 1, 8, 0.25)
assert_array_equal(actual, expected)
def test_preprocess_feature_fail(self):
np.random.seed(10)
values = np.random.random(size=1000)
values = np.append(values, | np.array([10000000]) | numpy.array |
"""
Test Surrogates Overview
========================
"""
# Author: <NAME> <<EMAIL>>
# License: new BSD
from PIL import Image
import numpy as np
import scripts.surrogates_overview as exo
import scripts.image_classifier as imgclf
import sklearn.datasets
import sklearn.linear_model
SAMPLES = 10
BATCH = 50
SAMPLE_IRIS = False
IRIS_SAMPLES = 50000
def test_bilmey_image():
"""Tests surrogate image bLIMEy."""
# Load the image
doggo_img = Image.open('surrogates_overview/img/doggo.jpg')
doggo_array = np.array(doggo_img)
# Load the classifier
clf = imgclf.ImageClassifier()
explain_classes = [('tennis ball', 852),
('golden retriever', 207),
('Labrador retriever', 208)]
# Configure widgets to select occlusion colour, segmentation granularity
# and explained class
colour_selection = {
i: i for i in ['mean', 'black', 'white', 'randomise-patch', 'green']
}
granularity_selection = {'low': 13, 'medium': 30, 'high': 50}
# Generate explanations
blimey_image_collection = {}
for gran_name, gran_number in granularity_selection.items():
blimey_image_collection[gran_name] = {}
for col_name in colour_selection:
blimey_image_collection[gran_name][col_name] = \
exo.build_image_blimey(
doggo_array,
clf.predict_proba,
explain_classes,
explanation_size=5,
segments_number=gran_number,
occlusion_colour=col_name,
samples_number=SAMPLES,
batch_size=BATCH,
random_seed=42)
exp = []
for gran_ in blimey_image_collection:
for col_ in blimey_image_collection[gran_]:
exp.append(blimey_image_collection[gran_][col_]['surrogates'])
assert len(exp) == len(EXP_IMG)
for e, E in zip(exp, EXP_IMG):
assert sorted(list(e.keys())) == sorted(list(E.keys()))
for key in e.keys():
assert e[key]['name'] == E[key]['name']
assert len(e[key]['explanation']) == len(E[key]['explanation'])
for e_, E_ in zip(e[key]['explanation'], E[key]['explanation']):
assert e_[0] == E_[0]
assert np.allclose(e_[1], E_[1], atol=.001, equal_nan=True)
def test_bilmey_tabular():
"""Tests surrogate tabular bLIMEy."""
# Load the iris data set
iris = sklearn.datasets.load_iris()
iris_X = iris.data # [:, :2] # take the first two features only
iris_y = iris.target
iris_labels = iris.target_names
iris_feature_names = iris.feature_names
label2class = {lab: i for i, lab in enumerate(iris_labels)}
# Fit the classifier
logreg = sklearn.linear_model.LogisticRegression(C=1e5)
logreg.fit(iris_X, iris_y)
# explained class
_dtype = iris_X.dtype
explained_instances = {
'setosa': np.array([5, 3.5, 1.5, 0.25]).astype(_dtype),
'versicolor': np.array([5.5, 2.75, 4.5, 1.25]).astype(_dtype),
'virginica': np.array([7, 3, 5.5, 2.25]).astype(_dtype)
}
petal_length_idx = iris_feature_names.index('petal length (cm)')
petal_length_bins = [1, 2, 3, 4, 5, 6, 7]
petal_width_idx = iris_feature_names.index('petal width (cm)')
petal_width_bins = [0, .5, 1, 1.5, 2, 2.5]
discs_ = []
for i, ix in enumerate(petal_length_bins): # X-axis
for iix in petal_length_bins[i + 1:]:
for j, jy in enumerate(petal_width_bins): # Y-axis
for jjy in petal_width_bins[j + 1:]:
discs_.append({
petal_length_idx: [ix, iix],
petal_width_idx: [jy, jjy]
})
for inst_i in explained_instances:
for cls_i in iris_labels:
for disc_i, disc in enumerate(discs_):
inst = explained_instances[inst_i]
cls = label2class[cls_i]
exp = exo.build_tabular_blimey(
inst, cls, iris_X, iris_y, logreg.predict_proba, disc,
IRIS_SAMPLES, SAMPLE_IRIS, 42)
key = '{}&{}&{}'.format(inst_i, cls, disc_i)
exp_ = EXP_TAB[key]
assert exp['explanation'].shape[0] == exp_.shape[0]
assert np.allclose(
exp['explanation'], exp_, atol=.001, equal_nan=True)
EXP_IMG = [
{207: {'explanation': [(13, -0.24406872165780585),
(11, -0.20456180387430317),
(9, -0.1866779131424261),
(4, 0.15001224157793785),
(3, 0.11589480417160983)],
'name': 'golden retriever'},
208: {'explanation': [(13, -0.08395966359346249),
(0, -0.0644986107387837),
(9, 0.05845584633658977),
(1, 0.04369763085720947),
(11, -0.035958188394941866)],
'name': '<NAME>'},
852: {'explanation': [(13, 0.3463529698715463),
(11, 0.2678050131923326),
(4, -0.10639863421417416),
(6, 0.08345792378117327),
(9, 0.07366945242386444)],
'name': '<NAME>'}},
{207: {'explanation': [(13, -0.0624167912596456),
(7, 0.06083359545295548),
(3, 0.0495953943686462),
(11, -0.04819787147412231),
(2, -0.03858823761391199)],
'name': '<NAME>'},
208: {'explanation': [(13, -0.08408428146916162),
(7, 0.07704235920590158),
(3, 0.06646468388122273),
(11, -0.0638326572126609),
(2, -0.052621478002380796)],
'name': '<NAME>'},
852: {'explanation': [(11, 0.35248212611685886),
(13, 0.2516925608037859),
(2, 0.13682853028454384),
(9, 0.12930134856644754),
(6, 0.1257747954095489)],
'name': '<NAME>'}},
{207: {'explanation': [(3, 0.21351937934930917),
(10, 0.16933456312772083),
(11, -0.13447244552856766),
(8, 0.11058919217055371),
(2, -0.06269239798368743)],
'name': '<NAME>'},
208: {'explanation': [(8, 0.05995551486884414),
(9, -0.05375302972380482),
(11, -0.051997353324246445),
(6, 0.04213181405953071),
(2, -0.039169895361928275)],
'name': '<NAME>'},
852: {'explanation': [(7, 0.31382219776986503),
(11, 0.24126214884275987),
(13, 0.21075924370226598),
(2, 0.11937652039885377),
(8, -0.11911265319329697)],
'name': '<NAME>'}},
{207: {'explanation': [(3, 0.39254403293049134),
(9, 0.19357165018747347),
(6, 0.16592079671652987),
(0, 0.14042059731407297),
(1, 0.09793027079765507)],
'name': '<NAME>'},
208: {'explanation': [(9, -0.19351859273276703),
(1, -0.15262967987262344),
(3, 0.12205127112235375),
(2, 0.11352141032313934),
(6, -0.11164209893429898)],
'name': '<NAME>'},
852: {'explanation': [(7, 0.17213007100844877),
(0, -0.1583030948868859),
(3, -0.13748574615069775),
(5, 0.13273283867075436),
(11, 0.12309551170070354)],
'name': '<NAME>'}},
{207: {'explanation': [(3, 0.4073533182995105),
(10, 0.20711667988142463),
(8, 0.15360813290032324),
(6, 0.1405424759832785),
(1, 0.1332920685413575)],
'name': '<NAME>'},
208: {'explanation': [(9, -0.14747910525112617),
(1, -0.13977061235228924),
(2, 0.10526833898161611),
(6, -0.10416022118399552),
(3, 0.09555992655161764)],
'name': '<NAME>'},
852: {'explanation': [(11, 0.2232260929107954),
(7, 0.21638443149433054),
(5, 0.21100464215582274),
(13, 0.145614853795006),
(1, -0.11416523431311262)],
'name': '<NAME>'}},
{207: {'explanation': [(1, 0.14700178977744183),
(0, 0.10346667279328238),
(2, 0.10346667279328238),
(7, 0.10346667279328238),
(8, 0.10162900633690726)],
'name': '<NAME>'},
208: {'explanation': [(10, -0.10845134816658476),
(8, -0.1026920429226184),
(6, -0.10238154733842847),
(18, 0.10094164937411244),
(16, 0.08646888450232793)],
'name': '<NAME>'},
852: {'explanation': [(18, -0.20542297091894474),
(13, 0.2012751176130666),
(8, -0.19194747162742365),
(20, 0.14686930696710473),
(15, 0.11796990086271067)],
'name': '<NAME>'}},
{207: {'explanation': [(13, 0.12446259821701779),
(17, 0.11859084421095789),
(15, 0.09690553833007137),
(12, -0.08869743701731962),
(4, 0.08124900427893789)],
'name': '<NAME>'},
208: {'explanation': [(10, -0.09478194981909983),
(20, -0.09173392507039077),
(9, 0.08768898801254493),
(17, -0.07553994244536394),
(4, 0.07422905503397653)],
'name': '<NAME>'},
852: {'explanation': [(21, 0.1327882942965061),
(1, 0.1238236573086363),
(18, -0.10911712271717902),
(19, 0.09707191051320978),
(6, 0.08593672504338913)],
'name': '<NAME>'}},
{207: {'explanation': [(6, 0.14931728779865114),
(14, 0.14092073957103526),
(1, 0.11071480021464616),
(4, 0.10655287976934531),
(8, 0.08705404649152573)],
'name': '<NAME>'},
208: {'explanation': [(8, -0.12242580400886727),
(9, 0.12142729544158742),
(14, -0.1148252787068248),
(16, -0.09562322208795092),
(4, 0.09350160975513132)],
'name': '<NAME>'},
852: {'explanation': [(6, 0.04227675072263027),
(9, -0.03107924340879173),
(14, 0.028007115650713045),
(13, 0.02771190348545554),
(19, 0.02640441416071482)],
'name': '<NAME>'}},
{207: {'explanation': [(19, 0.14313680656283245),
(18, 0.12866508562342843),
(8, 0.11809779264185447),
(0, 0.11286255403442104),
(2, 0.11286255403442104)],
'name': '<NAME>'},
208: {'explanation': [(9, 0.2397917428082761),
(14, -0.19435572812170654),
(6, -0.1760894833446507),
(18, -0.12243333818399058),
(15, 0.10986343675377105)],
'name': '<NAME>'},
852: {'explanation': [(14, 0.15378038774613365),
(9, -0.14245940635481966),
(6, 0.10213601012183973),
(20, 0.1009180838986786),
(3, 0.09780065767815548)],
'name': '<NAME>'}},
{207: {'explanation': [(15, 0.06525850448807077),
(9, 0.06286791243851698),
(19, 0.055189970374185854),
(8, 0.05499197604401475),
(13, 0.04748220842936177)],
'name': '<NAME>'},
208: {'explanation': [(6, -0.31549091899770765),
(5, 0.1862302670824446),
(8, -0.17381478451341995),
(10, -0.17353516098662508),
(14, -0.13591542421754205)],
'name': '<NAME>'},
852: {'explanation': [(14, 0.2163853942943355),
(6, 0.17565046338282214),
(1, 0.12446193028474549),
(9, -0.11365789839746396),
(10, 0.09239073691962967)],
'name': '<NAME>'}},
{207: {'explanation': [(19, 0.1141207265647932),
(36, -0.08861425922625768),
(30, 0.07219209872026074),
(9, -0.07150939547859836),
(38, -0.06988288637544438)],
'name': '<NAME>'},
208: {'explanation': [(29, 0.10531073909547647),
(13, 0.08279642208039652),
(34, -0.0817952443980797),
(33, -0.08086848205765082),
(12, 0.08086848205765082)],
'name': '<NAME>'},
852: {'explanation': [(13, -0.1330452414595897),
(4, 0.09942366413042845),
(12, -0.09881995683190645),
(33, 0.09881995683190645),
(19, -0.09596925317560831)],
'name': '<NAME>'}},
{207: {'explanation': [(37, 0.08193926967758253),
(35, 0.06804043021426347),
(15, 0.06396269230810163),
(11, 0.062255657227065296),
(8, 0.05529200233091672)],
'name': '<NAME>'},
208: {'explanation': [(19, 0.05711957286614678),
(27, -0.050230108135410824),
(16, -0.04743034616549999),
(5, -0.046717346734255705),
(9, -0.04419100026638039)],
'name': '<NAME>'},
852: {'explanation': [(3, -0.08390967998497496),
(30, -0.07037680222442452),
(22, 0.07029819368543713),
(8, -0.06861396187180349),
(37, -0.06662511956402824)],
'name': '<NAME>'}},
{207: {'explanation': [(19, 0.048418845359024805),
(9, -0.0423869575883795),
(30, 0.04012650790044438),
(36, -0.03787242980067195),
(10, 0.036557999380695635)],
'name': '<NAME>'},
208: {'explanation': [(10, 0.12120686823129677),
(17, 0.10196564232230493),
(7, 0.09495133975425854),
(25, -0.0759657891182803),
(2, -0.07035244568286837)],
'name': '<NAME>'},
852: {'explanation': [(3, -0.0770578003457272),
(28, 0.0769372258280398),
(6, -0.06044725989272927),
(22, 0.05550155775286349),
(31, -0.05399028046597057)],
'name': '<NAME>'}},
{207: {'explanation': [(14, 0.05371383110181226),
(0, -0.04442539316084218),
(18, 0.042589475382826494),
(19, 0.04227647855354252),
(17, 0.041685661662754295)],
'name': '<NAME>'},
208: {'explanation': [(29, 0.14419601354489464),
(17, 0.11785174500536676),
(36, 0.1000501679652906),
(10, 0.09679790134851017),
(35, 0.08710376081189208)],
'name': '<NAME>'},
852: {'explanation': [(8, -0.02486237985832769),
(3, -0.022559886154747102),
(11, -0.021878686669239856),
(36, 0.021847953817988534),
(19, -0.018317598300716522)],
'name': '<NAME>'}},
{207: {'explanation': [(37, 0.08098729255605368),
(35, 0.06639102704982619),
(15, 0.06033721190370432),
(34, 0.05826267856117829),
(28, 0.05549505160798173)],
'name': '<NAME>'},
208: {'explanation': [(17, 0.13839012042250542),
(10, 0.11312187488346881),
(7, 0.10729071207480922),
(25, -0.09529127965797404),
(11, -0.09279834572979286)],
'name': '<NAME>'},
852: {'explanation': [(3, -0.028385651836694076),
(22, 0.023364702783498722),
(8, -0.023097812578270233),
(30, -0.022931236620034406),
(37, -0.022040170736525342)],
'name': '<NAME>'}}
]
EXP_TAB = {
'setosa&0&0': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&1': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&2': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&3': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&4': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&5': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&6': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&7': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&8': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&9': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&10': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&11': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&12': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&13': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&14': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&15': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&16': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&17': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&18': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&19': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&20': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&21': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&22': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&23': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&24': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&25': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&26': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&27': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&28': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&29': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&30': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&31': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&32': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&33': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&34': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&35': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&36': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&37': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&38': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&39': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&40': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&41': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&42': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&43': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&44': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&45': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&46': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&47': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&48': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&49': np.array([0.4656481363306145, 0.007982539480288167]),
'setosa&0&50': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&51': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&52': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&53': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&54': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&55': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&56': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&57': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&58': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&59': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&60': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&61': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&62': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&63': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&0&64': np.array([0.3094460464703627, 0.11400643817329122]),
'setosa&0&65': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&66': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&67': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&68': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&69': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&70': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&71': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&72': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&73': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&74': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&0&75': np.array([0.0, 0.95124502153736]),
'setosa&0&76': np.array([0.0, 0.9708703761803881]),
'setosa&0&77': np.array([0.0, 0.5659706098422994]),
'setosa&0&78': np.array([0.0, 0.3962828716108186]),
'setosa&0&79': np.array([0.0, 0.2538069363248767]),
'setosa&0&80': np.array([0.0, 0.95124502153736]),
'setosa&0&81': np.array([0.0, 0.95124502153736]),
'setosa&0&82': np.array([0.0, 0.95124502153736]),
'setosa&0&83': np.array([0.0, 0.95124502153736]),
'setosa&0&84': np.array([0.0, 0.9708703761803881]),
'setosa&0&85': np.array([0.0, 0.9708703761803881]),
'setosa&0&86': np.array([0.0, 0.9708703761803881]),
'setosa&0&87': np.array([0.0, 0.5659706098422994]),
'setosa&0&88': np.array([0.0, 0.5659706098422994]),
'setosa&0&89': np.array([0.0, 0.3962828716108186]),
'setosa&0&90': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&91': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&92': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&93': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&94': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&95': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&96': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&97': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&98': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&99': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&100': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&101': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&102': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&103': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&104': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&105': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&106': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&107': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&108': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&109': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&110': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&111': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&112': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&113': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&114': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&115': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&116': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&117': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&118': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&119': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&120': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&121': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&122': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&123': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&124': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&125': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&126': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&127': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&128': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&129': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&130': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&131': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&132': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&133': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&134': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&135': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&136': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&137': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&138': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&139': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&140': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&141': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&142': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&143': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&144': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&145': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&146': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&147': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&148': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&149': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&150': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&151': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&152': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&153': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&154': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&155': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&156': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&157': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&158': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&159': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&160': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&161': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&162': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&163': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&164': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&165': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&166': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&167': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&168': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&169': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&170': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&171': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&172': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&173': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&174': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&175': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&176': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&177': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&178': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&179': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&180': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&181': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&182': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&183': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&184': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&185': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&186': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&187': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&188': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&189': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&190': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&191': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&192': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&193': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&194': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&195': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&196': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&197': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&198': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&199': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&200': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&201': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&202': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&203': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&204': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&205': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&206': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&207': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&208': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&209': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&210': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&211': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&212': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&213': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&214': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&215': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&216': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&217': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&218': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&219': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&220': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&221': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&222': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&223': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&224': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&225': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&226': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&227': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&228': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&229': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&230': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&231': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&232': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&233': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&234': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&235': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&236': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&237': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&238': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&239': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&240': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&241': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&242': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&243': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&244': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&245': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&246': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&247': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&248': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&249': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&250': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&251': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&252': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&253': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&254': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&255': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&256': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&257': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&258': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&259': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&260': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&261': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&262': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&263': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&264': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&265': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&266': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&267': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&268': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&269': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&270': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&271': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&272': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&273': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&274': np.array([0.4656481363306145, 0.007982539480288167]),
'setosa&0&275': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&276': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&277': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&278': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&279': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&280': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&281': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&282': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&283': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&284': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&285': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&286': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&287': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&288': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&289': np.array([0.4656481363306145, 0.007982539480288167]),
'setosa&0&290': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&291': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&292': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&293': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&294': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&295': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&296': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&297': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&298': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&299': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&300': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&301': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&302': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&303': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&0&304': np.array([0.3094460464703627, 0.11400643817329122]),
'setosa&0&305': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&306': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&307': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&308': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&309': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&310': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&311': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&312': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&313': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&314': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&1&0': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&1': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&2': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&3': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&4': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&5': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&6': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&7': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&8': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&9': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&10': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&11': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&12': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&13': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&14': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&15': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&16': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&17': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&18': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&19': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&20': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&21': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&22': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&23': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&24': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&25': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&26': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&27': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&28': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&29': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&30': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&31': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&32': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&33': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&34': np.array([-0.26192650167775977, 0.33491141590339474]),
'setosa&1&35': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&36': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&37': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&38': np.array([0.32199975656257585, -0.748229355246375]),
'setosa&1&39': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&40': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&41': np.array([0.43843349141088417, -0.8642740701867918]),
'setosa&1&42': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&43': np.array([-0.7141739659554724, 0.6619819140152877]),
'setosa&1&44': np.array([-0.4446001433508151, 0.6107546840046902]),
'setosa&1&45': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&46': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&47': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&48': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&49': np.array([0.4079256832347186, 0.038455640985860955]),
'setosa&1&50': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&51': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&52': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&53': np.array([0.7749499208750121, -0.814718944080443]),
'setosa&1&54': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&55': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&56': np.array([0.80403091954169, -0.844515250413482]),
'setosa&1&57': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&58': np.array([0.5826506963750848, -0.22335655671229107]),
'setosa&1&59': np.array([0.33108168891715983, 0.13647816746351163]),
'setosa&1&60': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&61': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&62': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&63': np.array([0.13717260713320106, 0.3627779907901665]),
'setosa&1&64': np.array([0.3093950298647913, 0.1140298206733954]),
'setosa&1&65': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&66': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&67': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&68': np.array([0.4933316375690333, -0.5272416708629277]),
'setosa&1&69': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&70': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&71': np.array([0.5041830043657418, -0.5392782673950876]),
'setosa&1&72': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&73': np.array([0.25657760110071476, 0.12592645350389123]),
'setosa&1&74': np.array([0.13717260713320106, 0.3627779907901665]),
'setosa&1&75': np.array([0.0, -0.4756207622944677]),
'setosa&1&76': np.array([0.0, -0.4854334805210761]),
'setosa&1&77': np.array([0.0, 0.16885577975809635]),
'setosa&1&78': np.array([0.0, 0.395805885538554]),
'setosa&1&79': np.array([0.0, 0.2538072707138344]),
'setosa&1&80': np.array([0.0, -0.4756207622944677]),
'setosa&1&81': np.array([0.0, -0.4756207622944677]),
'setosa&1&82': np.array([0.0, -0.4756207622944677]),
'setosa&1&83': np.array([0.0, -0.4756207622944677]),
'setosa&1&84': np.array([0.0, -0.4854334805210761]),
'setosa&1&85': np.array([0.0, -0.4854334805210761]),
'setosa&1&86': np.array([0.0, -0.4854334805210761]),
'setosa&1&87': np.array([0.0, 0.16885577975809635]),
'setosa&1&88': np.array([0.0, 0.16885577975809635]),
'setosa&1&89': np.array([0.0, 0.395805885538554]),
'setosa&1&90': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&91': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&92': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&93': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&94': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&95': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&96': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&97': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&98': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&99': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&100': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&101': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&102': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&103': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&104': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&105': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&106': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&107': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&108': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&109': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&110': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&111': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&112': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&113': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&114': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&115': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&116': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&117': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&118': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&119': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&120': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&121': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&122': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&123': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&124': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&125': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&126': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&127': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&128': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&129': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&130': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&131': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&132': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&133': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&134': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&135': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&136': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&137': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&138': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&139': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&140': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&141': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&142': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&143': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&144': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&145': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&146': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&147': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&148': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&149': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&150': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&151': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&152': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&153': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&154': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&155': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&156': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&157': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&158': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&159': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&160': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&161': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&162': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&163': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&164': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&165': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&166': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&167': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&168': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&169': np.array([-0.4964962439921071, 0.3798215458387346]),
'setosa&1&170': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&171': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&172': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&173': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&174': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&175': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&176': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&177': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&178': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&179': np.array([-0.6718337295341267, 0.6620422637360075]),
'setosa&1&180': np.array([-0.37157553889555184, -0.1221600832023858]),
'setosa&1&181': np.array([-0.2463036871609408, -0.24630368716093934]),
'setosa&1&182': np.array([-0.9105775730167809, 0.6842162738602727]),
'setosa&1&183': | np.array([-0.6718337295341267, 0.6620422637360075]) | numpy.array |
import os
import copy
import math
import errno
import torch
#import trimesh
import skimage
import numpy as np
import urllib.request
import skimage.filters
import matplotlib.colors as colors
from tqdm import tqdm
from PIL import Image
#from inside_mesh import inside_mesh
from torch.utils.data import Dataset
from data_struct import QuadTree, OctTree
from scipy.spatial import cKDTree as spKDTree
from torchvision.transforms import Resize, Compose, ToTensor, Normalize
def get_mgrid(sidelen, dim=2):
'''Generates a flattened grid of (x,y,...) coordinates in a range of -1 to 1.'''
if isinstance(sidelen, int):
sidelen = dim * (sidelen,)
if dim == 2:
pixel_coords = np.stack(np.mgrid[:sidelen[0], :sidelen[1]], axis=-1)[None, ...].astype(np.float32)
pixel_coords[0, :, :, 0] = pixel_coords[0, :, :, 0] / (sidelen[0] - 1)
pixel_coords[0, :, :, 1] = pixel_coords[0, :, :, 1] / (sidelen[1] - 1)
elif dim == 3:
pixel_coords = np.stack(np.mgrid[:sidelen[0], :sidelen[1], :sidelen[2]], axis=-1)[None, ...].astype(np.float32)
pixel_coords[..., 0] = pixel_coords[..., 0] / max(sidelen[0] - 1, 1)
pixel_coords[..., 1] = pixel_coords[..., 1] / (sidelen[1] - 1)
pixel_coords[..., 2] = pixel_coords[..., 2] / (sidelen[2] - 1)
else:
raise NotImplementedError('Not implemented for dim=%d' % dim)
pixel_coords -= 0.5
pixel_coords *= 2.
pixel_coords = torch.Tensor(pixel_coords).view(-1, dim)
return pixel_coords
def lin2img(tensor, image_resolution=None):
batch_size, num_samples, channels = tensor.shape
if image_resolution is None:
width = np.sqrt(num_samples).astype(int)
height = width
else:
height = image_resolution[0]
width = image_resolution[1]
return tensor.permute(0, 2, 1).view(batch_size, channels, height, width)
def grads2img(gradients):
mG = gradients.detach().squeeze(0).permute(-2, -1, -3).cpu()
# assumes mG is [row,cols,2]
nRows = mG.shape[0]
nCols = mG.shape[1]
mGr = mG[:, :, 0]
mGc = mG[:, :, 1]
mGa = np.arctan2(mGc, mGr)
mGm = | np.hypot(mGc, mGr) | numpy.hypot |
from nose import SkipTest
from nose.tools import assert_raises, assert_true, assert_equal
import networkx as nx
from networkx.generators.classic import barbell_graph,cycle_graph,path_graph
class TestConvertNumpy(object):
numpy=1 # nosetests attribute, use nosetests -a 'not numpy' to skip test
@classmethod
def setupClass(cls):
global np
global np_assert_equal
try:
import numpy as np
np_assert_equal=np.testing.assert_equal
except ImportError:
raise SkipTest('NumPy not available.')
def __init__(self):
self.G1 = barbell_graph(10, 3)
self.G2 = cycle_graph(10, create_using=nx.DiGraph())
self.G3 = self.create_weighted(nx.Graph())
self.G4 = self.create_weighted(nx.DiGraph())
def create_weighted(self, G):
g = cycle_graph(4)
e = g.edges()
source = [u for u,v in e]
dest = [v for u,v in e]
weight = [s+10 for s in source]
ex = zip(source, dest, weight)
G.add_weighted_edges_from(ex)
return G
def assert_equal(self, G1, G2):
assert_true( sorted(G1.nodes())==sorted(G2.nodes()) )
assert_true( sorted(G1.edges())==sorted(G2.edges()) )
def identity_conversion(self, G, A, create_using):
GG = nx.from_numpy_matrix(A, create_using=create_using)
self.assert_equal(G, GG)
GW = nx.to_networkx_graph(A, create_using=create_using)
self.assert_equal(G, GW)
GI = create_using.__class__(A)
self.assert_equal(G, GI)
def test_shape(self):
"Conversion from non-square array."
A=np.array([[1,2,3],[4,5,6]])
assert_raises(nx.NetworkXError, nx.from_numpy_matrix, A)
def test_identity_graph_matrix(self):
"Conversion from graph to matrix to graph."
A = nx.to_numpy_matrix(self.G1)
self.identity_conversion(self.G1, A, nx.Graph())
def test_identity_graph_array(self):
"Conversion from graph to array to graph."
A = nx.to_numpy_matrix(self.G1)
A = np.asarray(A)
self.identity_conversion(self.G1, A, nx.Graph())
def test_identity_digraph_matrix(self):
"""Conversion from digraph to matrix to digraph."""
A = nx.to_numpy_matrix(self.G2)
self.identity_conversion(self.G2, A, nx.DiGraph())
def test_identity_digraph_array(self):
"""Conversion from digraph to array to digraph."""
A = nx.to_numpy_matrix(self.G2)
A = np.asarray(A)
self.identity_conversion(self.G2, A, nx.DiGraph())
def test_identity_weighted_graph_matrix(self):
"""Conversion from weighted graph to matrix to weighted graph."""
A = nx.to_numpy_matrix(self.G3)
self.identity_conversion(self.G3, A, nx.Graph())
def test_identity_weighted_graph_array(self):
"""Conversion from weighted graph to array to weighted graph."""
A = nx.to_numpy_matrix(self.G3)
A = np.asarray(A)
self.identity_conversion(self.G3, A, nx.Graph())
def test_identity_weighted_digraph_matrix(self):
"""Conversion from weighted digraph to matrix to weighted digraph."""
A = nx.to_numpy_matrix(self.G4)
self.identity_conversion(self.G4, A, nx.DiGraph())
def test_identity_weighted_digraph_array(self):
"""Conversion from weighted digraph to array to weighted digraph."""
A = nx.to_numpy_matrix(self.G4)
A = np.asarray(A)
self.identity_conversion(self.G4, A, nx.DiGraph())
def test_nodelist(self):
"""Conversion from graph to matrix to graph with nodelist."""
P4 = path_graph(4)
P3 = path_graph(3)
nodelist = P3.nodes()
A = nx.to_numpy_matrix(P4, nodelist=nodelist)
GA = nx.Graph(A)
self.assert_equal(GA, P3)
# Make nodelist ambiguous by containing duplicates.
nodelist += [nodelist[0]]
assert_raises(nx.NetworkXError, nx.to_numpy_matrix, P3, nodelist=nodelist)
def test_weight_keyword(self):
WP4 = nx.Graph()
WP4.add_edges_from( (n,n+1,dict(weight=0.5,other=0.3)) for n in range(3) )
P4 = path_graph(4)
A = nx.to_numpy_matrix(P4)
np_assert_equal(A, nx.to_numpy_matrix(WP4,weight=None))
np_assert_equal(0.5*A, nx.to_numpy_matrix(WP4))
np_assert_equal(0.3*A, nx.to_numpy_matrix(WP4,weight='other'))
def test_from_numpy_matrix_type(self):
A=np.matrix([[1]])
G=nx.from_numpy_matrix(A)
assert_equal(type(G[0][0]['weight']),int)
A=np.matrix([[1]]).astype(np.float)
G=nx.from_numpy_matrix(A)
assert_equal(type(G[0][0]['weight']),float)
A=np.matrix([[1]]).astype(np.str)
G=nx.from_numpy_matrix(A)
assert_equal(type(G[0][0]['weight']),str)
A= | np.matrix([[1]]) | numpy.matrix |
# Copyright 2021 <NAME>
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
"""
import unittest
import numpy as np
import sempler
import sempler.generators
import ges
import ges.utils as utils
from ges.scores.gauss_obs_l0_pen import GaussObsL0Pen
# ---------------------------------------------------------------------
# Tests for the insert operator
class InsertOperatorTests(unittest.TestCase):
true_A = np.array([[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 0]])
factorization = [(4, (2, 3)), (3, (2,)), (2, (0, 1)), (0, ()), (1, ())]
true_B = true_A * np.random.uniform(1, 2, size=true_A.shape)
scm = sempler.LGANM(true_B, (0, 0), (0.3, 0.4))
p = len(true_A)
n = 10000
obs_data = scm.sample(n=n)
# ------------------------------------------------------
# Tests
def test_insert_1(self):
# Test behaviour of the ges.insert(x,y,T) function
# Insert should fail on adjacent edges
try:
ges.insert(0, 2, set(), self.true_A)
self.fail("Call to insert should have failed")
except ValueError as e:
print("OK:", e)
# Insert should fail when T contains non-neighbors of y
try:
ges.insert(0, 1, {2}, self.true_A)
self.fail("Call to insert should have failed")
except ValueError as e:
print("OK:", e)
# Insert should fail when T contains adjacents of x
A = np.zeros_like(self.true_A)
A[2, 4], A[4, 2] = 1, 1 # x2 - x4
A[4, 3] = 1 # x4 -> x3
try:
ges.insert(3, 2, {4}, A)
self.fail("Call to insert should have failed")
except ValueError as e:
print("OK:", e)
# This should work
true_new_A = A.copy()
true_new_A[3, 2] = 1
new_A = ges.insert(3, 2, set(), A)
self.assertTrue((true_new_A == new_A).all())
# This should work
true_new_A = A.copy()
true_new_A[1, 2] = 1
new_A = ges.insert(1, 2, set(), A)
self.assertTrue((true_new_A == new_A).all())
# This should work
true_new_A = A.copy()
true_new_A[1, 2] = 1
true_new_A[4, 2], true_new_A[2, 4] = 1, 0
new_A = ges.insert(1, 2, {4}, A)
self.assertTrue((true_new_A == new_A).all())
# This should work
true_new_A = A.copy()
true_new_A[1, 0] = 1
new_A = ges.insert(1, 0, set(), A)
self.assertTrue((true_new_A == new_A).all())
def test_insert_2(self):
G = 100
p = 20
for i in range(G):
A = sempler.generators.dag_avg_deg(p, 3, 1, 1)
cpdag = utils.dag_to_cpdag(A)
for x in range(p):
# Can only apply the operator to non-adjacent nodes
adj_x = utils.adj(x, cpdag)
Y = set(range(p)) - adj_x
for y in Y:
for T in utils.subsets(utils.neighbors(y, cpdag) - adj_x):
# print(x,y,T)
output = ges.insert(x, y, T, cpdag)
# Verify the new vstructures
vstructs = utils.vstructures(output)
for t in T:
vstruct = (x, y, t) if x < t else (t, y, x)
self.assertIn(vstruct, vstructs)
# Verify whole connectivity
truth = cpdag.copy()
# Add edge x -> y
truth[x, y] = 1
# Orient t -> y
truth[list(T), y] = 1
truth[y, list(T)] = 0
self.assertTrue((output == truth).all())
print("\nExhaustively checked insert operator on %i CPDAGS" % (i + 1))
def test_valid_insert_operators_1a(self):
# Define A and cache
A = np.zeros_like(self.true_A)
A[2, 4], A[4, 2] = 1, 1 # x2 - x4
A[4, 3] = 1 # x4 -> x3
cache = GaussObsL0Pen(self.obs_data)
# there should only be one valid operator, as
# 1. X1 has no neighbors in A, so T0 = {set()}
# 2. na_yx is also an empty set, thus na_yx U T is a clique
# 3. there are no semi-directed paths from y to x
valid_operators = ges.score_valid_insert_operators(0, 1, A, cache, debug=False)
self.assertEqual(1, len(valid_operators))
_, new_A, _, _, _ = valid_operators[0]
true_new_A = A.copy()
true_new_A[0, 1] = 1
self.assertTrue((new_A == true_new_A).all())
def test_valid_insert_operators_1b(self):
# Define A and cache
A = np.zeros_like(self.true_A)
A[2, 4], A[4, 2] = 1, 1 # x2 - x4
A[4, 3] = 1 # x4 -> x3
cache = GaussObsL0Pen(self.obs_data)
# there should only be one valid operator, as
# 1. X0 has no neighbors in A, so T0 = {set()}
# 2. na_yx is also an empty set, thus na_yx U T is a clique
# 3. there are no semi-directed paths from y to x
valid_operators = ges.score_valid_insert_operators(1, 0, A, cache, debug=False)
self.assertEqual(1, len(valid_operators))
_, new_A, _, _, _ = valid_operators[0]
true_new_A = A.copy()
true_new_A[1, 0] = 1
self.assertTrue((new_A == true_new_A).all())
def test_valid_insert_operators_2a(self):
# Define A and cache
A = np.zeros_like(self.true_A)
A[2, 4], A[4, 2] = 1, 1 # x2 - x4
A[4, 3] = 1 # x4 -> x3
cache = GaussObsL0Pen(self.obs_data)
# there should be two valid operators, as T0 = {X4}
# 1. insert(X0,X2,set()) should be valid
# 2. and also insert(X0,X2,{X4}), as na_yx U T = {X4} and is a clique
# 3. there are no semi-directed paths from y to x
valid_operators = ges.score_valid_insert_operators(0, 2, A, cache, debug=False)
self.assertEqual(2, len(valid_operators))
# Test outcome of insert(0,2,set())
_, new_A, _, _, _ = valid_operators[0]
true_new_A = A.copy()
true_new_A[0, 2] = 1
self.assertTrue((new_A == true_new_A).all())
# Test outcome of insert(0,2,4)
_, new_A, _, _, _ = valid_operators[1]
true_new_A = A.copy()
true_new_A[0, 2], true_new_A[2, 4] = 1, 0
self.assertTrue((new_A == true_new_A).all())
def test_valid_insert_operators_2b(self):
# Define A and cache
A = np.zeros_like(self.true_A)
A[2, 4], A[4, 2] = 1, 1 # x2 - x4
A[4, 3] = 1 # x4 -> x3
cache = GaussObsL0Pen(self.obs_data)
# there should only be one valid operator, as
# 1. X0 has no neighbors in A, so T0 = {set()}
# 2. na_yx is also an empty set, thus na_yx U T is a clique
# 3. there are no semi-directed paths from y to x
valid_operators = ges.score_valid_insert_operators(2, 0, A, cache, debug=False)
self.assertEqual(1, len(valid_operators))
# Test outcome of insert(2,0,set())
_, new_A, _, _, _ = valid_operators[0]
true_new_A = A.copy()
true_new_A[2, 0] = 1
self.assertTrue((new_A == true_new_A).all())
def test_valid_insert_operators_3a(self):
# Define A and cache
A = np.zeros_like(self.true_A)
A[2, 4], A[4, 2] = 1, 1 # x2 - x4
A[4, 3] = 1 # x4 -> x3
cache = GaussObsL0Pen(self.obs_data)
# there should be two valid operators, as T0 = {X4}
# 1. insert(X1,X2,set()) should be valid
# 2. and also insert(X1,X2,{X4}), as na_yx U T = {X4} and is a clique
# 3. there are no semi-directed paths from y to x
valid_operators = ges.score_valid_insert_operators(1, 2, A, cache, debug=False)
self.assertEqual(2, len(valid_operators))
# Test outcome of insert(0,2,set())
_, new_A, _, _, _ = valid_operators[0]
true_new_A = A.copy()
true_new_A[1, 2] = 1
self.assertTrue((new_A == true_new_A).all())
# Test outcome of insert(1,2,4)
_, new_A, _, _, _ = valid_operators[1]
true_new_A = A.copy()
true_new_A[1, 2], true_new_A[2, 4] = 1, 0
self.assertTrue((new_A == true_new_A).all())
def test_valid_insert_operators_3b(self):
# Define A and cache
A = np.zeros_like(self.true_A)
A[2, 4], A[4, 2] = 1, 1 # x2 - x4
A[4, 3] = 1 # x4 -> x3
cache = GaussObsL0Pen(self.obs_data)
# there should only be one valid operator, as
# 1. X1 has no neighbors in A, so T0 = {set()}
# 2. na_yx is also an empty set, thus na_yx U T is a clique
# 3. there are no semi-directed paths from y to x
valid_operators = ges.score_valid_insert_operators(2, 1, A, cache, debug=False)
self.assertEqual(1, len(valid_operators))
# Test outcome of insert(2,0,set())
_, new_A, _, _, _ = valid_operators[0]
true_new_A = A.copy()
true_new_A[2, 1] = 1
self.assertTrue((new_A == true_new_A).all())
def test_valid_insert_operators_4a(self):
# Define A and cache
A = np.zeros_like(self.true_A)
A[2, 4], A[4, 2] = 1, 1 # x2 - x4
A[4, 3] = 1 # x4 -> x3
cache = GaussObsL0Pen(self.obs_data)
# there should be one valid operator, as T0 = set(), na_yx = {4}
# 1. insert(X0,X2,set()) should be valid
# 2. na_yx U T = {X4} should be a clique
# 3. the semi-directed path X2-X4->X3 contains one node in na_yx U T
valid_operators = ges.score_valid_insert_operators(3, 2, A, cache, debug=False)
self.assertEqual(1, len(valid_operators))
# Test outcome of insert(3,2,set())
_, new_A, _, _, _ = valid_operators[0]
true_new_A = A.copy()
true_new_A[3, 2] = 1
self.assertTrue((new_A == true_new_A).all())
def test_valid_insert_operators_4b(self):
# Define A and cache
A = | np.zeros_like(self.true_A) | numpy.zeros_like |
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import numpy as np
import unittest
import sys
from tests.op_test import OpTest
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.op import Operator
paddle.enable_static()
def calculate_momentum_by_numpy(param,
grad,
mu,
velocity,
use_nesterov,
learning_rate,
regularization_method=None,
regularization_coeff=1.0):
if regularization_method == "l2_decay":
grad = grad + regularization_coeff * param
velocity_out = mu * velocity + grad
if use_nesterov:
param_out = param - (grad + velocity_out * mu) * learning_rate
else:
param_out = param - learning_rate * velocity_out
else:
velocity_out = mu * velocity + grad
if use_nesterov:
param_out = param - grad * learning_rate - \
velocity_out * mu * learning_rate
else:
param_out = param - learning_rate * velocity_out
return param_out, velocity_out
class TestMomentumOp1(OpTest):
def set_npu(self):
self.__class__.use_custom_device = True
def setUp(self):
self.set_npu()
self.op_type = "momentum"
self.init_dtype()
self.init_case()
param = np.random.random(self.shape).astype(self.dtype)
grad = np.random.random(self.shape).astype(self.dtype)
velocity = np.zeros(self.shape).astype(self.dtype)
learning_rate = np.array([0.001]).astype(np.float32)
mu = 0.0001
self.inputs = {
'Param': param,
'Grad': grad,
'Velocity': velocity,
'LearningRate': learning_rate
}
self.attrs = {'mu': mu, 'use_nesterov': self.use_nesterov}
param_out, velocity_out = calculate_momentum_by_numpy(
param=param,
grad=grad,
mu=mu,
velocity=velocity,
use_nesterov=self.use_nesterov,
learning_rate=learning_rate)
self.outputs = {'ParamOut': param_out, 'VelocityOut': velocity_out}
def init_case(self):
self.shape = (123, 321)
self.use_nesterov = False
def init_dtype(self):
self.dtype = np.float32
def test_check_output(self):
self.check_output_with_place(paddle.CustomPlace('ascend', 0))
class TestMomentumOpFp16(TestMomentumOp1):
def init_dtype(self):
self.dtype = np.float16
def test_check_output(self):
self.check_output(atol=1e-3)
class TestMomentumOp2(TestMomentumOp1):
def init_case(self):
self.shape = (123, 321)
self.use_nesterov = True
class TestMomentumV2(unittest.TestCase):
def test_momentum_dygraph(self):
paddle.disable_static(place=paddle.CustomPlace('ascend', 0))
value = | np.arange(26) | numpy.arange |
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for the von Mises distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import test_case
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import
tfd = tfp.distributions
@test_util.run_all_in_graph_and_eager_modes
class _VonMisesTest(object):
def make_tensor(self, x):
x = tf.cast(x, self.dtype)
return tf.compat.v1.placeholder_with_default(
input=x, shape=x.shape if self.use_static_shape else None)
def testVonMisesShape(self):
loc = self.make_tensor([.1] * 5)
concentration = self.make_tensor([.2] * 5)
von_mises = tfd.VonMises(loc=loc, concentration=concentration)
self.assertEqual([
5,
], self.evaluate(von_mises.batch_shape_tensor()))
self.assertAllEqual([], self.evaluate(von_mises.event_shape_tensor()))
if self.use_static_shape:
self.assertEqual(tf.TensorShape([5]), von_mises.batch_shape)
self.assertEqual(tf.TensorShape([]), von_mises.event_shape)
def testInvalidconcentration(self):
with self.assertRaisesOpError("Condition x >= 0"):
loc = self.make_tensor(0.)
concentration = self.make_tensor(-.01)
von_mises = tfd.VonMises(loc, concentration, validate_args=True)
self.evaluate(von_mises.concentration)
def testVonMisesLogPdf(self):
locs_v = .1
concentrations_v = .2
x = np.array([2., 3., 4., 5., 6., 7.])
von_mises = tfd.VonMises(
self.make_tensor(locs_v), self.make_tensor(concentrations_v))
try:
from scipy import stats # pylint:disable=g-import-not-at-top
except ImportError:
tf.compat.v1.logging.warn("Skipping scipy-dependent tests")
return
expected_log_prob = stats.vonmises.logpdf(x, concentrations_v, loc=locs_v)
log_prob = von_mises.log_prob(self.make_tensor(x))
self.assertAllClose(expected_log_prob, self.evaluate(log_prob))
def testVonMisesLogPdfUniform(self):
x = np.array([2., 3., 4., 5., 6., 7.])
von_mises = tfd.VonMises(self.make_tensor(.1), self.make_tensor(0.))
log_prob = von_mises.log_prob(self.make_tensor(x))
expected_log_prob = np.array([-np.log(2. * np.pi)] * 6)
self.assertAllClose(expected_log_prob, self.evaluate(log_prob))
def testVonMisesPdf(self):
locs_v = .1
concentrations_v = .2
x = np.array([2., 3., 4., 5., 6., 7.])
von_mises = tfd.VonMises(
self.make_tensor(locs_v), self.make_tensor(concentrations_v))
prob = von_mises.prob(self.make_tensor(x))
try:
from scipy import stats # pylint:disable=g-import-not-at-top
except ImportError:
tf.compat.v1.logging.warn("Skipping scipy-dependent tests")
return
expected_prob = stats.vonmises.pdf(x, concentrations_v, loc=locs_v)
self.assertAllClose(expected_prob, self.evaluate(prob))
def testVonMisesPdfUniform(self):
x = np.array([2., 3., 4., 5., 6., 7.])
von_mises = tfd.VonMises(self.make_tensor(1.), self.make_tensor(0.))
prob = von_mises.prob(self.make_tensor(x))
expected_prob = np.array([1. / (2. * np.pi)] * 6)
self.assertAllClose(expected_prob, self.evaluate(prob))
def testVonMisesCdf(self):
locs_v = np.reshape(np.linspace(-10., 10., 20), [-1, 1, 1])
concentrations_v = np.reshape(np.logspace(-3., 3., 20), [1, -1, 1])
x = np.reshape(np.linspace(-10., 10., 20), [1, 1, -1])
von_mises = tfd.VonMises(
self.make_tensor(locs_v), self.make_tensor(concentrations_v))
cdf = von_mises.cdf(self.make_tensor(x))
try:
from scipy import stats # pylint:disable=g-import-not-at-top
except ImportError:
tf.compat.v1.logging.warn("Skipping scipy-dependent tests")
return
expected_cdf = stats.vonmises.cdf(x, concentrations_v, loc=locs_v)
self.assertAllClose(expected_cdf, self.evaluate(cdf), atol=1e-4, rtol=1e-4)
def testVonMisesCdfUniform(self):
x = | np.linspace(-np.pi, np.pi, 20) | numpy.linspace |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as md
import datetime as dt
from matplotlib.ticker import FormatStrFormatter
import cartopy
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import cartopy.io.shapereader as shpreader
from matplotlib.axes import Axes
from cartopy.mpl.geoaxes import GeoAxes
GeoAxes._pcolormesh_patched = Axes.pcolormesh
proj_cart = ccrs.PlateCarree(central_longitude=-95)
# Add a note about plotting counties by default if metpy is available in docs, and how to add your own map data without relying on built-ins.
# reader = shpreader.Reader('/Users/vannac/Documents/UScounties/UScounties.shp')
# reader = shpreader.Reader('/home/vanna/status_plots/UScounties/UScounties.shp')
# counties = list(reader.geometries())
# COUNTIES = cfeature.ShapelyFeature(counties, ccrs.PlateCarree())
try:
from metpy.plots import USCOUNTIES
county_scales = ['20m', '5m', '500k']
COUNTIES = USCOUNTIES.with_scale(county_scales[0])
except ImportError:
COUNTIES = None
M2KM = 1000.0
COORD_TH = [0.1, 0.8, 0.83, 0.1]
COORD_PLAN = [0.1, 0.1, 0.65, 0.5]
COORD_LON = [0.1, 0.65, 0.65, 0.1]
COORD_LAT = [0.8, 0.1, 0.13, 0.5]
COORD_HIST = [0.8, 0.65, 0.13, 0.1]
xdiv = 0.01
ydiv = 0.01
zdiv = 0.1
class XlmaPlot(object):
def __init__(self, data, stime, subplot_labels, bkgmap, readtype, **kwargs):
"""
data = an data structure matching the lmatools or pyxlma formats:
1. pyxlma.lmalib.io.lmatools.LMAdata.data for reading with lmatools
2. pyxlma.lmalib.io.read.lma_file.readfile() for a pandas dataframe
with same headers as LYLOUT files.
stime = start time datetime object
readtype = fileread options "lmatools" or "pandas" dataframe
"""
self.data = data
self.readtype = readtype
if self.readtype == 'pandas':
self.stime = stime
self.subplot_labels = subplot_labels
self.bkgmap = bkgmap
self.majorFormatter = FormatStrFormatter('%.1f')
self.setup_figure(**kwargs)
self.time_height()
self.lon_alt()
self.histogram()
self.plan_view()
self.lat_alt()
def setup_figure(self, **kwargs):
if self.readtype == 'lmatools':
self.datetime = self.data.datetime
self.dt_init = dt.datetime(
self.data.year, self.data.month, self.data.day)
if self.readtype == 'pandas':
self.datetime = self.data.Datetime
self.dt_init = dt.datetime(
self.stime.year, self.stime.month, self.stime.day)
self._subset_data(**kwargs)
self._setup_colors(**kwargs)
self.fig = plt.figure(figsize=(8.5, 11))
self.ax_th = self.fig.add_axes(COORD_TH)
if self.bkgmap == False:
self.ax_plan = self.fig.add_axes(COORD_PLAN)
if self.bkgmap == True:
self.ax_plan = self.fig.add_axes(COORD_PLAN,projection=ccrs.PlateCarree())
self.ax_lon = self.fig.add_axes(COORD_LON)
self.ax_lat = self.fig.add_axes(COORD_LAT)
self.ax_hist = self.fig.add_axes(COORD_HIST)
self.yticks = 5 * np.arange(6)
if 'title' in kwargs.keys():
self.title = kwargs['title']
else:
schi = 'chisqr = ' + str(kwargs['chi2'])
if self.density:
self.title = \
self.tlim[0].strftime('%Y%m%d Source Density, ') + schi
else:
self.title = \
self.tlim[0].strftime('%Y%m%d Sources by Time, ') + schi
self.xbins = int((self.xlim[1] - self.xlim[0]) / xdiv)
self.ybins = int((self.ylim[1] - self.ylim[0]) / ydiv)
self.zbins = int((self.zlim[1] - self.zlim[0]) / zdiv)
self.tbins = 300
def time_height(self):
if self.density:
if self.readtype == 'lmatools':
self.ax_th.hist2d(
self.data.data['time'][self.cond],
self.data.data['alt'][self.cond]/M2KM,
bins=[self.tbins, self.zbins],
density=True, cmap=self.cmap, cmin=0.00001)
if self.readtype == 'pandas':
self.ax_th.hist2d(
self.data['time (UT sec of day)'][self.cond],
self.data['alt(m)'][self.cond]/M2KM,
bins=[self.tbins, self.zbins],
density=True, cmap=self.cmap, cmin=0.00001)
else:
if self.readtype == 'lmatools':
self.ax_th.scatter(
self.data.data['time'][self.cond],
self.data.data['alt'][self.cond]/M2KM, c=self.c,
vmin=self.vmin, vmax=self.vmax, cmap=self.cmap, s=self.s,
marker='o', edgecolors='none')
if self.readtype == 'pandas':
self.ax_th.scatter(
self.data['time (UT sec of day)'][self.cond],
self.data['alt(m)'][self.cond]/M2KM, c=self.c,
vmin=self.vmin, vmax=self.vmax, cmap=self.cmap, s=self.s,
marker='o', edgecolors='none')
self.ax_th.set_xlabel('Time (UTC)')
self.ax_th.set_ylabel('Altitude (km)')
self.ax_th.set_yticks(self.yticks)
self.ax_th.set_ylim(self.zlim)
self.ax_th.set_title(self.title)
self.ax_th.minorticks_on()
# Now fix the ticks and labels on the time axis
# Ticks
tstep = int(1e6*(self.tlim[1] - self.tlim[0]).total_seconds()/5)
sod_start = (self.tlim[0] - self.dt_init).total_seconds()
xticks = [sod_start + i*tstep*1e-6 for i in range(6)]
self.ax_th.set_xlim(xticks[0], xticks[-1])
self.ax_th.set_xticks(xticks)
# Tick labels
dt1 = self.tlim[0]
dt2 = dt1 + dt.timedelta(microseconds=tstep)
dt3 = dt2 + dt.timedelta(microseconds=tstep)
dt4 = dt3 + dt.timedelta(microseconds=tstep)
dt5 = dt4 + dt.timedelta(microseconds=tstep)
dt6 = dt5 + dt.timedelta(microseconds=tstep)
if tstep < 5000000:
tfmt = '%H:%M:%S.%f'
else:
tfmt = '%H:%M:%S000'
self.ax_th.set_xticklabels([
dt1.strftime(tfmt)[:-3], dt2.strftime(tfmt)[:-3],
dt3.strftime(tfmt)[:-3], dt4.strftime(tfmt)[:-3],
dt5.strftime(tfmt)[:-3], dt6.strftime(tfmt)[:-3]])
# Subplot letter
if self.subplot_labels == True:
plt.text(0.05, 0.8, '(a)', fontsize='x-large', weight='bold',
horizontalalignment='center', verticalalignment='center',
transform=self.ax_th.transAxes)
def lon_alt(self):
if self.density:
if self.readtype == 'lmatools':
self.ax_lon.hist2d(
self.data.data['lon'][self.cond],
self.data.data['alt'][self.cond]/M2KM,
bins=[self.xbins, self.zbins], density=True, cmap=self.cmap,
cmin=0.00001)
if self.readtype == 'pandas':
self.ax_lon.hist2d(
self.data['lon'][self.cond],
self.data['alt(m)'][self.cond]/M2KM,
bins=[self.xbins, self.zbins], density=True, cmap=self.cmap,
cmin=0.00001)
else:
if self.readtype == 'lmatools':
self.ax_lon.scatter(
self.data.data['lon'][self.cond],
self.data.data['alt'][self.cond]/M2KM, c=self.c,
vmin=self.vmin, vmax=self.vmax, cmap=self.cmap, s=self.s,
marker='o', edgecolors='none')
if self.readtype == 'pandas':
self.ax_lon.scatter(
self.data['lon'][self.cond],
self.data['alt(m)'][self.cond]/M2KM, c=self.c,
vmin=self.vmin, vmax=self.vmax, cmap=self.cmap, s=self.s,
marker='o', edgecolors='none')
self.ax_lon.set_ylabel('Altitude (km MSL)')
self.ax_lon.set_yticks(self.yticks)
self.ax_lon.set_ylim(self.zlim)
if hasattr(self, 'xlim'):
self.ax_lon.set_xlim(self.xlim)
self.ax_lon.minorticks_on()
# self.ax_lon.xaxis.set_major_formatter(self.majorFormatter)
if self.subplot_labels == True:
plt.text(0.065, 0.80, '(b)', fontsize='x-large', weight='bold',
horizontalalignment='center', verticalalignment='center',
transform=self.ax_lon.transAxes)
def histogram(self):
if self.readtype == 'lmatools':
self.ax_hist.hist(self.data.data['alt'][self.cond]/M2KM,
orientation='horizontal',
density=True, bins=80, range=(0, 20))
if self.readtype == 'pandas':
self.ax_hist.hist(self.data['alt(m)'][self.cond]/M2KM,
orientation='horizontal',
density=True, bins=80, range=(0, 20))
self.ax_hist.set_xticks([0, 0.1, 0.2, 0.3])
self.ax_hist.set_yticks(self.yticks)
self.ax_hist.set_ylim(self.zlim)
self.ax_hist.set_xlim(0, 0.3)
self.ax_hist.set_xlabel('Freq')
self.ax_hist.minorticks_on()
if self.subplot_labels == True:
plt.text(0.30, 0.80, '(c)', fontsize='x-large', weight='bold',
horizontalalignment='center', verticalalignment='center',
transform=self.ax_hist.transAxes)
plt.text(0.25, 0.10,
# str(len(self.data.data['alt'][self.cond])) + ' src',
str(len(self.data['alt(m)'][self.cond])) + ' src',
fontsize='small',
horizontalalignment='left', verticalalignment='center',
transform=self.ax_hist.transAxes)
def plan_view(self):
if self.density:
if self.readtype == 'lmatools':
self.ax_plan.hist2d(
self.data.data['lon'][self.cond],
self.data.data['lat'][self.cond],
bins=[self.xbins, self.ybins], density=True, cmap=self.cmap,
cmin=0.00001)
if self.readtype == 'pandas':
self.ax_plan.hist2d(
self.data['lon'][self.cond],
self.data['lat'][self.cond],
bins=[self.xbins, self.ybins], density=True, cmap=self.cmap,
cmin=0.00001)
else:
if self.readtype == 'lmatools':
self.ax_plan.scatter(
self.data.data['lon'][self.cond],
self.data.data['lat'][self.cond], c=self.c,
vmin=self.vmin, vmax=self.vmax, cmap=self.cmap, s=self.s,
marker='o', edgecolors='none')
if self.readtype == 'pandas':
self.ax_plan.scatter(
self.data['lon'][self.cond],
self.data['lat'][self.cond], c=self.c,
vmin=self.vmin, vmax=self.vmax, cmap=self.cmap, s=self.s,
marker='o', edgecolors='none')
if self.bkgmap == True:
if COUNTIES is not None:
self.ax_plan.add_feature(COUNTIES, facecolor='none', edgecolor='gray')
self.ax_plan.add_feature(cfeature.BORDERS)
self.ax_plan.add_feature(cfeature.STATES.with_scale('10m'))
self.ax_plan.set_xlabel('Longitude (degrees)')
self.ax_plan.set_ylabel('Latitude (degrees)')
if hasattr(self, 'xlim'):
self.ax_plan.set_xlim(self.xlim)
if hasattr(self, 'ylim'):
self.ax_plan.set_ylim(self.ylim)
self.ax_plan.minorticks_on()
self.ax_plan.xaxis.set_major_formatter(self.majorFormatter)
if self.bkgmap == True:
self.ax_plan.set_xticks(self.ax_plan.get_xticks())
self.ax_plan.set_yticks(self.ax_plan.get_yticks())
if self.bkgmap == True:
self.ax_plan.set_extent([self.xlim[0], self.xlim[1],
self.ylim[0], self.ylim[1]])
if self.subplot_labels == True:
plt.text(0.065, 0.95, '(d)', fontsize='x-large', weight='bold',
horizontalalignment='center', verticalalignment='center',
transform=self.ax_plan.transAxes)
def lat_alt(self):
if self.density:
if self.readtype == 'lmatools':
self.ax_lat.hist2d(
self.data.data['alt'][self.cond]/M2KM,
self.data.data['lat'][self.cond],
bins=[self.zbins, self.ybins], density=True, cmap=self.cmap,
cmin=0.00001)
if self.readtype == 'pandas':
self.ax_lat.hist2d(
self.data['alt(m)'][self.cond]/M2KM,
self.data['lat'][self.cond],
bins=[self.zbins, self.ybins], density=True, cmap=self.cmap,
cmin=0.00001)
else:
if self.readtype == 'lmatools':
self.ax_lat.scatter(
self.data.data['alt'][self.cond]/M2KM,
self.data.data['lat'][self.cond], c=self.c,
vmin=self.vmin, vmax=self.vmax, cmap=self.cmap, s=self.s,
marker='o', edgecolors='none')
if self.readtype == 'pandas':
self.ax_lat.scatter(
self.data['alt(m)'][self.cond]/M2KM,
self.data['lat'][self.cond], c=self.c,
vmin=self.vmin, vmax=self.vmax, cmap=self.cmap, s=self.s,
marker='o', edgecolors='none')
self.ax_lat.set_xlabel('Altitude (km MSL)')
self.ax_lat.set_xticks(self.yticks)
self.ax_lat.set_xlim(self.zlim)
if hasattr(self, 'ylim'):
self.ax_lat.set_ylim(self.ylim)
self.ax_lat.minorticks_on()
for xlabel_i in self.ax_lat.get_yticklabels():
xlabel_i.set_fontsize(0.0)
xlabel_i.set_visible(False)
if self.subplot_labels == True:
plt.text(0.30, 0.95, '(e)', fontsize='x-large', weight='bold',
horizontalalignment='center', verticalalignment='center',
transform=self.ax_lat.transAxes)
def _subset_data(self, **kwargs):
if self.readtype == 'lmatools':
if 'chi2' in kwargs.keys():
self.cond = self.data.data['chi2'] <= kwargs['chi2']
else:
self.cond = self.data.data['chi2'] <= 5
if 'xlim' in kwargs.keys():
self.xlim = kwargs['xlim']
tmpcond = np.logical_and(self.data.data['lon'] >= self.xlim[0],
self.data.data['lon'] <= self.xlim[1])
self.cond = np.logical_and(self.cond, tmpcond)
if 'ylim' in kwargs.keys():
self.ylim = kwargs['ylim']
tmpcond = np.logical_and(self.data.data['lat'] >= self.ylim[0],
self.data.data['lat'] <= self.ylim[1])
self.cond = np.logical_and(self.cond, tmpcond)
if 'zlim' in kwargs.keys():
self.zlim = kwargs['zlim']
else:
self.zlim = (0, 20)
tmpcond = np.logical_and(self.data.data['alt']/M2KM >= self.zlim[0],
self.data.data['alt']/M2KM <= self.zlim[1])
self.cond = | np.logical_and(self.cond, tmpcond) | numpy.logical_and |
from ..flow.plotting import FlowPlot
from ..flow import transform
from matplotlib.colors import LogNorm, LinearSegmentedColormap
from .conftest import create_linear_data, create_lognormal_data
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
import pytest
sns.set(style="white", font_scale=1.2)
sns.set_style("ticks", {"xtick.major.size": 8, "ytick.major.size": 8})
def test_create_linear_data():
data = create_linear_data()
fig, ax = plt.subplots(figsize=(5, 5))
bins = [np.histogram_bin_edges(data.x.values, bins="sqrt"),
| np.histogram_bin_edges(data.y.values, bins="sqrt") | numpy.histogram_bin_edges |
import argparse
import fnmatch
import os
import shutil
import h5py as h5py
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sn
from sklearn.metrics import confusion_matrix
import sunrgbd
import wrgbd51
from alexnet_model import AlexNet
from basic_utils import Models, RunSteps
from densenet_model import DenseNet
from main import init_save_dirs
from resnet_models import ResNet
from vgg16_model import VGG16Net
def get_rnn_model(params):
if params.net_model == Models.AlexNet:
model_rnn = AlexNet(params)
elif params.net_model == Models.VGGNet16:
model_rnn = VGG16Net(params)
elif params.net_model == Models.ResNet50 or params.net_model == Models.ResNet101:
model_rnn = ResNet(params)
else: # params.net_model == Models.DenseNet121:
model_rnn = DenseNet(params)
return model_rnn
def calc_scores(l123_preds, test_labels, model_rnn):
model_rnn.test_labels = test_labels
avg_res, true_preds, test_size = model_rnn.calc_scores(l123_preds)
conf_mat = confusion_matrix(test_labels, l123_preds)
return avg_res, true_preds, test_size, conf_mat
def show_sunrgbd_conf_mat(conf_mat):
num_ctgs = len(conf_mat)
cm_sum = np.sum(conf_mat, axis=1, keepdims=True)
cm_perc = conf_mat / cm_sum.astype(float) * 100
columns = sunrgbd.get_class_names(range(num_ctgs))
df_cm = pd.DataFrame(cm_perc, index=columns, columns=columns)
plt.figure(figsize=(20, 15))
sn.set(font_scale=1.4) # for label size
heatmap = sn.heatmap(df_cm, annot=True, annot_kws={"size": 16}, cmap='Oranges', fmt=".1f", vmax=100) # font size
heatmap.yaxis.set_ticklabels(heatmap.yaxis.get_ticklabels(), rotation=0, ha='right', fontsize=16)
heatmap.xaxis.set_ticklabels(heatmap.xaxis.get_ticklabels(), rotation=45, ha='right', fontsize=16)
# plt.ylabel('True Label')
# plt.xlabel('Predicted Label')
plt.show()
# plt.savefig('sunrgb_confusion_matrix.eps', format='eps', dpi=1000)
def calc_scores_conf_mat(svm_path):
model_rnn = get_rnn_model(params)
l1, l2, l3 = 'layer5', 'layer6', 'layer7'
with h5py.File(svm_path, 'r') as f:
l1_conf_scores = np.asarray(f[l1])
l2_conf_scores = np.asarray(f[l2])
l3_conf_scores = np.asarray(f[l3])
test_labels = np.asarray(f['labels'])
f.close()
print('Running Layer-[{}+{}+{}] Confidence Average Fusion...'.format(l1, l2, l3))
print('SVM confidence scores of {}, {} and {} are average fused'.format(l1, l2, l3))
print('SVM confidence average fusion')
l123_avr_confidence = np.mean(np.array([l1_conf_scores, l2_conf_scores, l3_conf_scores]), axis=0)
l123_preds = np.argmax(l123_avr_confidence, axis=1)
avg_res, true_preds, test_size, conf_mat = calc_scores(l123_preds, test_labels, model_rnn)
print('Fusion result: {0:.2f}% ({1}/{2})..'.format(avg_res, true_preds, test_size))
show_sunrgbd_conf_mat(conf_mat)
def sunrgbd_combined_scores_conf_mat(rgb_svm_path, depth_svm_path):
model_rnn = get_rnn_model(params)
l1, l2, l3 = 'layer5', 'layer6', 'layer7'
with h5py.File(rgb_svm_path, 'r') as f:
rgb1_conf_scores = np.asarray(f[l1])
rgb2_conf_scores = np.asarray(f[l2])
rgb3_conf_scores = np.asarray(f[l3])
test_labels = np.asarray(f['labels'])
f.close()
with h5py.File(depth_svm_path, 'r') as f:
depth1_conf_scores = np.asarray(f[l1])
depth2_conf_scores = np.asarray(f[l2])
depth3_conf_scores = np.asarray(f[l3])
f.close()
rgb_l123_sum_confidence = np.sum(np.array([rgb1_conf_scores, rgb2_conf_scores, rgb3_conf_scores]), axis=0)
depth_l123_sum_confidence = np.sum(np.array([depth1_conf_scores, depth2_conf_scores, depth3_conf_scores]), axis=0)
print('Weighted Average SVM confidence scores of [RGB({}+{}+{})+Depth({}+{}+{})] are taken')
print('SVMs confidence weighted fusion')
w_rgb, w_depth = model_rnn.calc_modality_weights((rgb_l123_sum_confidence, depth_l123_sum_confidence))
rgbd_l123_wadd_confidence = np.add(rgb_l123_sum_confidence * w_rgb[:, np.newaxis],
depth_l123_sum_confidence * w_depth[:, np.newaxis])
l123_preds = np.argmax(rgbd_l123_wadd_confidence, axis=1)
avg_res, true_preds, test_size, conf_mat = calc_scores(l123_preds, test_labels, model_rnn)
print('Combined Weighted Confidence result: {0:.2f}% ({1}/{2})..'.format(avg_res, true_preds, test_size))
show_sunrgbd_conf_mat(conf_mat)
def sunrgbd_main(params):
root_path = '../../data/sunrgbd/'
svm_conf_paths = root_path + params.features_root + params.proceed_step + '/svm_confidence_scores/'
rgb_svm_path = svm_conf_paths + params.net_model + '_RGB_JPG.hdf5'
depth_svm_path = svm_conf_paths + params.net_model + '_Depth_Colorized_HDF5.hdf5'
if params.data_type == 'rgb':
calc_scores_conf_mat(rgb_svm_path)
elif params.data_type == 'depth':
calc_scores_conf_mat(depth_svm_path)
else:
sunrgbd_combined_scores_conf_mat(rgb_svm_path, depth_svm_path)
def individual_class_scores(total_conf_mat):
num_ctgs = len(total_conf_mat)
cm_sum = np.sum(total_conf_mat, axis=1, keepdims=True)
cm_perc = total_conf_mat / cm_sum.astype(float) * 100
indidual_scores = cm_perc.diagonal()
categories = wrgbd51.get_class_names(range(num_ctgs))
i = 0
for category, category_score in zip(categories, indidual_scores):
print(f'{category:<15} {category_score:>10.1f}')
def show_wrgbd_conf_mat(conf_mat):
num_ctgs = len(conf_mat)
cm_sum = np.sum(conf_mat, axis=1, keepdims=True)
cm_perc = conf_mat / cm_sum.astype(float) * 100
columns = wrgbd51.get_class_names(range(num_ctgs))
df_cm = pd.DataFrame(cm_perc, index=columns, columns=columns)
plt.figure(figsize=(20, 15))
sn.set(font_scale=1.4) # for label size
heatmap = sn.heatmap(df_cm, annot=True, annot_kws={"size": 10}, cmap='Oranges', fmt=".1f", vmax=100) # font size
heatmap.yaxis.set_ticklabels(heatmap.yaxis.get_ticklabels(), rotation=0, ha='right', fontsize=12)
heatmap.xaxis.set_ticklabels(heatmap.xaxis.get_ticklabels(), rotation=45, ha='right', fontsize=12)
plt.ylabel('True Label')
plt.xlabel('Predicted Label')
plt.show()
def wrgb_scores_conf_mat(params, svm_conf_paths):
model_rnn = get_rnn_model(params)
if params.data_type == 'rgb':
params.proceed_step = RunSteps.FIX_RECURSIVE_NN
data_type_ex = 'crop'
params.data_type = 'crop'
l1, l2, l3 = model_rnn.get_best_trio_layers()
params.data_type = 'rgb'
else:
params.proceed_step = RunSteps.FINE_RECURSIVE_NN
data_type_ex = 'depthcrop'
params.data_type = 'depthcrop'
l1, l2, l3 = model_rnn.get_best_trio_layers()
params.data_type = 'depths'
all_splits_scores = []
for split in range(1, 11):
conf_file = params.net_model + '_' + data_type_ex + '_split_' + str(split) + '.hdf5'
svm_conf_file_path = svm_conf_paths + conf_file
with h5py.File(svm_conf_file_path, 'r') as f:
l1_conf_scores = np.asarray(f[l1])
l2_conf_scores = np.asarray(f[l2])
l3_conf_scores = | np.asarray(f[l3]) | numpy.asarray |
import numpy as np
import math
import scipy
from fractions import Fraction
import itertools
import biotuner
from biotuner.biotuner_utils import *
import matplotlib.pyplot as plt
from numpy import array, zeros, ones, arange, log2, sqrt, diff, concatenate
import pytuning
from math import gcd
from numpy import array, zeros, ones, arange, log2, sqrt, diff, concatenate
from scipy.stats import norm
from scipy.signal import argrelextrema, detrend
import scipy.signal as ss
from pytuning import create_euler_fokker_scale
from collections import Counter
from functools import reduce
from pytuning.utilities import normalize_interval
from pactools import Comodulogram, REFERENCES
'''---------------------------------------------------------Extended peaks-------------------------------------------------------------'''
'''EXTENDED PEAKS from expansions
'''
def EEG_harmonics_mult(peaks, n_harmonics, n_oct_up = 0):
"""
Natural harmonics
This function takes a list of frequency peaks as input and computes the desired number of harmonics
with the formula: x, 2x, 3x ..., nx
peaks: List (float)
Peaks represent local maximum in a spectrum
n_harmonics: int
Number of harmonics to compute
n_oct_up: int
Defaults to 0. Corresponds to the number of octave the peaks are shifted
Returns
-------
multi_harmonics: array
(n_peaks, n_harmonics + 1)
"""
n_harmonics = n_harmonics + 2
multi_harmonics = []
multi_harmonics_rebound = []
for p in peaks:
multi_harmonics_r = []
multi_harm_temp = []
harmonics = []
p = p * (2**n_oct_up)
i = 1
harm_temp = p
while i < n_harmonics:
harm_temp = p * i
harmonics.append(harm_temp)
i+=1
multi_harmonics.append(harmonics)
multi_harmonics = np.array(multi_harmonics)
return multi_harmonics
def EEG_harmonics_div(peaks, n_harmonics, n_oct_up = 0, mode = 'div'):
"""
Natural sub-harmonics
This function takes a list of frequency peaks as input and computes the desired number of harmonics
with using division:
peaks: List (float)
Peaks represent local maximum in a spectrum
n_harmonics: int
Number of harmonics to compute
n_oct_up: int
Defaults to 0. Corresponds to the number of octave the peaks are shifted
mode: str
Defaults to 'div'.
'div': x, x/2, x/3 ..., x/n
'div_add': x, (x+x/2), (x+x/3), ... (x+x/n)
'div_sub': x, (x-x/2), (x-x/3), ... (x-x/n)
Returns
-------
div_harmonics: array
(n_peaks, n_harmonics + 1)
div_harmonics_bounded: array
(n_peaks, n_harmonics + 1)
"""
n_harmonics = n_harmonics + 2
div_harmonics = []
for p in peaks:
harmonics = []
p = p * (2**n_oct_up)
i = 1
harm_temp = p
while i < n_harmonics:
if mode == 'div':
harm_temp = (p/i)
if mode == 'div_add':
harm_temp = p + (p/i)
if mode == 'div_sub':
harm_temp = p - (p/i)
harmonics.append(harm_temp)
i+=1
div_harmonics.append(harmonics)
div_harmonics = np.array(div_harmonics)
div_harmonics_bounded = div_harmonics.copy()
#Rebound the result between 1 and 2
for i in range(len(div_harmonics_bounded)):
for j in range(len(div_harmonics_bounded[i])):
div_harmonics_bounded[i][j] = rebound(div_harmonics_bounded[i][j])
return div_harmonics, div_harmonics_bounded
def harmonic_fit(peaks, n_harm = 10, bounds = 1, function = 'mult', div_mode = 'div', n_common_harms = 5):
"""
This function computes harmonics of a list of peaks and compares the lists of harmonics pairwise to find fitting
between the harmonic series
peaks: List (float)
Peaks represent local maximum in a spectrum
n_harm: int
Number of harmonics to compute
bounds: int
Minimum distance (in Hz) between two frequencies to consider a fit
function: str
Defaults to 'mult'.
'mult' will use natural harmonics
'div' will use natural sub-harmonics
div_mode: str
Defaults to 'div'. See EEG_harmonics_div function.
Returns
-------
"""
from itertools import combinations
peak_bands = []
for i in range(len(peaks)):
peak_bands.append(i)
if function == 'mult':
multi_harmonics = EEG_harmonics_mult(peaks, n_harm)
elif function == 'div':
multi_harmonics, x = EEG_harmonics_div(peaks, n_harm, mode = div_mode)
elif function == 'exp':
multi_harmonics = []
increments = []
for h in range(n_harm+1):
h += 1
multi_harmonics.append([i**h for i in peaks])
multi_harmonics = np.array(multi_harmonics)
multi_harmonics = np.moveaxis(multi_harmonics, 0, 1)
#print(np.array(multi_harmonics).shape)
list_peaks = list(combinations(peak_bands,2))
#print(list_peaks)
harm_temp = []
harm_list1 = []
harm_list2 = []
harm_list = []
harmonics = []
for i in range(len(list_peaks)):
harms, _, _, d, e, harm_list = compareLists(multi_harmonics[list_peaks[i][0]], multi_harmonics[list_peaks[i][1]], bounds)
harm_temp.append(harms)
harm_list1.append(d)
harm_list2.append(e)
harmonics.append(harm_list)
harm_fit = np.array(harm_temp).squeeze()
harmonics = reduce(lambda x, y: x+y, harmonics)
most_common_harmonics= [h for h, h_count in Counter(harmonics).most_common(n_common_harms) if h_count > 1]
harmonics = list(np.sort(list(set(harmonics))))
if len(peak_bands) > 2:
harm_fit = list(itertools.chain.from_iterable(harm_fit))
harm_fit = [round(num, 3) for num in harm_fit]
harm_fit = list(dict.fromkeys(harm_fit))
harm_fit = list(set(harm_fit))
return harm_fit, harm_list1, harm_list2, harmonics, most_common_harmonics
'''EXTENDED PEAKS from restrictions
'''
def consonance_peaks (peaks, limit):
"""
This function computes consonance (for a given ratio a/b, when a < 2b, consonance corresponds to (a+b)/(a*b)) between peaks
peaks: List (float)
Peaks represent local maximum in a spectrum
limit: float
minimum consonance value to keep associated pairs of peaks
Comparisons with familiar ratios:
Unison-frequency ratio 1:1 yields a value of 2
Octave-frequency ratio 2:1 yields a value of 1.5
Perfect 5th-frequency ratio 3:2 yields a value of 0.833
Perfect 4th-frequency ratio 4:3 yields a value of 0.583
Major 6th-frequency ratio 5:3 yields a value of 0.533
Major 3rd-frequency ratio 5:4 yields a value of 0.45
Minor 3rd-frequency ratio 5:6 yields a value of 0.366
Minor 6th-frequency ratio 5:8 yields a value of 0.325
Major 2nd-frequency ratio 8:9 yields a value of 0.236
Major 7th-frequency ratio 8:15 yields a value of 0.192
Minor 7th-frequency ratio 9:16 yields a value of 0.174
Minor 2nd-frequency ratio 15:16 yields a value of 0.129
Returns
-------
consonance: List (float)
consonance scores for each pairs of consonant peaks
cons_pairs: List of lists (float)
list of lists of each pairs of consonant peaks
cons_peaks: List (float)
list of consonant peaks (no doublons)
cons_tot: float
averaged consonance value for each pairs of peaks
"""
from fractions import Fraction
consonance_ = []
peaks2keep = []
peaks_consonance = []
cons_tot = []
for p1 in peaks:
for p2 in peaks:
peaks2keep_temp = []
p2x = p2
p1x = p1
if p1x > p2x:
while p1x > p2x:
p1x = p1x/2
if p1x < p2x:
while p2x > p1x:
p2x = p2x/2
if p1x < 0.1:
p1x = 0.06
if p2x < 0.1:
p2x = 0.06 #random number to avoid division by 0
ratio = Fraction(p2x/p1x).limit_denominator(1000)
cons_ = (ratio.numerator + ratio.denominator)/(ratio.numerator * ratio.denominator)
if cons_ < 1 :
cons_tot.append(cons_)
if cons_ > 1 or cons_ < limit:
cons_ = None
cons_ = None
p2x = None
p1x = None
if p2x != None:
peaks2keep_temp.extend([p2, p1])
consonance_.append(cons_)
peaks2keep.append(peaks2keep_temp)
#cons_pairs = np.array(peaks2keep)
cons_pairs = [x for x in peaks2keep if x]
#consonance = np.array(consonance_)
consonance = [i for i in consonance_ if i]
cons_peaks = list(itertools.chain(*cons_pairs))
cons_peaks = [np.round(c, 2) for c in cons_peaks]
cons_peaks = list(set(cons_peaks))
#consonance = list(set(consonance))
return consonance, cons_pairs, cons_peaks, np.average(cons_tot)
def multi_consonance(cons_pairs, n_freqs = 5):
"""
Function that keeps the frequencies that are the most consonant with others
Takes pairs of frequencies that are consonant (output of the 'compute consonance' function)
cons_pairs: List of lists (float)
list of lists of each pairs of consonant peaks
n_freqs: int
maximum number of consonant freqs to keep
Returns
-------
freqs_related: List (float)
peaks that are consonant with at least two other peaks, starting with the peak that is
consonant with the maximum number of other peaks
"""
freqs_dup = list(itertools.chain(*cons_pairs))
pairs_temp = list(itertools.chain.from_iterable(cons_pairs))
freqs_nodup = list(dict.fromkeys(pairs_temp))
f_count = []
for f in freqs_nodup:
f_count.append(freqs_dup.count(f))
freqs_related = [x for _,x in sorted(zip(f_count,freqs_nodup))][-(n_freqs):][::-1]
return freqs_related
def consonant_ratios (peaks, limit, sub = False, input_type = 'peaks', metric = 'cons'):
"""
Function that computes integer ratios from peaks with higher consonance
Needs at least two pairs of values
peaks: List (float)
Peaks represent local maximum in a spectrum
limit: float
minimum consonance value to keep associated pairs of peaks
sub: boolean
Defaults to False
When set to True, include ratios a/b when a < b.
Returns
-------
cons_ratios: List (float)
list of consonant ratios
consonance: List (float)
list of associated consonance values
"""
from fractions import Fraction
consonance_ = []
ratios2keep = []
if input_type == 'peaks':
ratios = compute_peak_ratios(peaks, sub = sub)
if input_type == 'ratios':
ratios = peaks
for ratio in ratios:
frac = Fraction(ratio).limit_denominator(1000)
if metric == 'cons':
cons_ = (frac.numerator + frac.denominator)/(frac.numerator * frac.denominator)
if metric == 'harmsim':
cons_ = dyad_similarity(ratio)
if cons_ > limit :
consonance_.append(cons_)
ratios2keep.append(ratio)
#print(ratios2keep)
ratios2keep = np.array(np.round(ratios2keep, 3))
cons_ratios = np.sort(list(set(ratios2keep)))
#cons_ratios = np.array(ratios2keep)
#ratios = []
#ratios = [ratios.append(x) for x in ratios2keep if x not in ratios]
consonance = np.array(consonance_)
consonance = [i for i in consonance if i]
return cons_ratios, consonance
def timepoint_consonance (data, method = 'cons', limit = 0.2, min_notes = 3):
"""
## Function that keeps moments of consonance from multiple time series of peak frequencies
data: List of lists (float)
Axis 0 represents moments in time
Axis 1 represents the sets of frequencies
method: str
Defaults to 'cons'
'cons' will compute pairwise consonance between frequency peaks in the form of (a+b)/(a*b)
'euler' will compute Euler's gradus suavitatis
limit: float
limit of consonance under which the set of frequencies are not retained
When method = 'cons'
--> See consonance_peaks method's doc to refer consonance values to common intervals
When method = 'euler'
--> Major (4:5:6) = 9
Minor (10:12:15) = 9
Major 7th (8:10:12:15) = 10
Minor 7th (10:12:15:18) = 11
Diminish (20:24:29) = 38
min_notes: int
minimum number of consonant frequencies in the chords. Only relevant when method is set to 'cons'.
Returns
-------
chords: List of lists (float)
Axis 0 represents moments in time
Axis 1 represents the sets of consonant frequencies
positions: List (int)
positions on Axis 0
"""
data = np.moveaxis(data, 0, 1)
#print('NAN', np.argwhere(np.isnan(data)))
out = []
positions = []
for count, peaks in enumerate(data):
peaks = [x for x in peaks if x >= 0]
if method == 'cons':
cons, b, peaks_cons, d = consonance_peaks(peaks, limit)
#print(peaks_cons)
out.append(peaks_cons)
if len(list(set(peaks_cons))) >= min_notes:
positions.append(count)
if method == 'euler':
peaks_ = [int(np.round(p, 2)*100) for p in peaks]
#print(peaks_)
eul = euler(*peaks_)
#print(eul)
if eul < limit:
out.append(list(peaks))
positions.append(count)
out = [x for x in out if x != []]
#if method == 'cons':
out = list(out for out,_ in itertools.groupby(out))
chords = [x for x in out if len(x)>=min_notes]
return chords, positions
'''
################################################## PEAKS METRICS ############################################################
'''
#Consonance#
#Input: peaks
def consonance (ratio, limit = 1000):
''' Compute metric of consonance from a single ratio of frequency
ratio: float
limit: int
Defaults to 1000
Maximum value of the denominator of the fraction representing the ratio
'''
ratio = Fraction(float(ratio)).limit_denominator(limit)
cons = (ratio.numerator + ratio.denominator)/(ratio.numerator * ratio.denominator)
return cons
def euler(*numbers):
"""
Euler's "gradus suavitatis" (degree of sweetness) function
Return the "degree of sweetness" of a musical interval or chord expressed
as a ratio of frequencies a:b:c, according to Euler's formula
Greater values indicate more dissonance
numbers: List (int)
frequencies
"""
factors = prime_factors(lcm(*reduced_form(*numbers)))
return 1 + sum(p - 1 for p in factors)
#Input: peaks
def tenneyHeight(peaks, avg = True):
"""
Tenney Height is a measure of inharmonicity calculated on two frequencies (a/b) reduced in their simplest form.
It can also be called the log product complexity of a given interval.
peaks: List (float)
frequencies
avg: Boolean
Default to True
When set to True, all tenney heights are averaged
"""
pairs = getPairs(peaks)
pairs
tenney = []
for p in pairs:
try:
frac = Fraction(p[0]/p[1]).limit_denominator(1000)
except ZeroDivisionError:
p[1] = 0.01
frac = Fraction(p[0]/p[1]).limit_denominator(1000)
x = frac.numerator
y = frac.denominator
tenney.append(log2(x*y))
if avg == True:
tenney = np.average(tenney)
return tenney
def peaks_to_metrics (peaks, n_harm = 10):
'''
This function computes different metrics on peak frequencies.
peaks: List (float)
Peaks represent local maximum in a spectrum
n_harm: int
Number of harmonics to compute for 'harm_fit' metric
Returns
-------
metrics: dict (float)
Dictionary of values associated to metrics names
metrics_list: List (float)
list of peaks metrics values in the order: 'cons', 'euler', 'tenney', 'harm_fit'
'''
peaks = list(peaks)
metrics = {'cons' : 0, 'euler' : 0, 'tenney': 0, 'harm_fit': 0}
harm_fit, harm_pos1, harm_pos2 = harmonic_fit(peaks, n_harm = n_harm)
metrics['harm_pos1'] = harm_pos1
metrics['harm_pos2'] = harm_pos2
metrics['harm_fit'] = len(harm_fit)
a, b, c, metrics['cons'] = consonance_peaks (peaks, 0.1)
peaks_highfreq = [int(p*1000) for p in peaks]
metrics['euler'] = euler(*peaks_highfreq)
metrics['tenney'] = tenneyHeight(peaks_highfreq)
metrics_list = []
for value in metrics.values():
metrics_list.append(value)
return metrics, metrics_list
def metric_denom(ratio):
'''Function that computes the denominator of the normalized ratio
ratio: float
'''
ratio = sp.Rational(ratio).limit_denominator(10000)
normalized_degree = normalize_interval(ratio)
y = int(sp.fraction(normalized_degree)[1])
return y
'''SCALE METRICS'''
'''Metric of harmonic similarity represents the degree of similarity between a scale and the natural harmonic series ###
Implemented from Gill and Purves (2009)'''
def dyad_similarity(ratio):
'''
This function computes the similarity between a dyad of frequencies and the natural harmonic series
ratio: float
frequency ratio
'''
frac = Fraction(float(ratio)).limit_denominator(1000)
x = frac.numerator
y = frac.denominator
z = ((x+y-1)/(x*y))*100
return z
#Input: ratios (list of floats)
def ratios2harmsim (ratios):
'''
This function computes the similarity for each ratio of a list
ratios: List (float)
list of frequency ratios (forming a scale)
Returns
---------
similarity: List (float)
list of percentage of similarity for each ratios
'''
fracs = []
for r in ratios:
fracs.append(Fraction(r).limit_denominator(1000))
sims = []
for f in fracs:
sims.append(dyad_similarity(f.numerator/f.denominator))
similarity = np.array(sims)
return similarity
def scale_cons_matrix (scale, function):
'''
This function gives a metric of a scale corresponding to the averaged metric for each pairs of ratios (matrix)
scale: List (float)
function: function
possible functions: dyad_similarity
consonance
metric_denom
'''
metric_values = []
mode_values = []
for index1 in range(len(scale)):
for index2 in range(len(scale)):
if scale[index1] > scale[index2]: #not include the diagonale in the computation of the avg. consonance
entry = scale[index1]/scale[index2]
mode_values.append([scale[index1], scale[index2]])
metric_values.append(function(entry))
return np.average(metric_values)
def PyTuning_metrics(scale, maxdenom):
'''
This function computes the scale metrics of the PyTuning library (https://pytuning.readthedocs.io/en/0.7.2/metrics.html)
Smaller values are more consonant
scale: List (float)
List of ratios corresponding to scale steps
maxdenom: int
Maximum value of the denominator for each step's fraction
'''
scale_frac, num, denom = scale2frac(scale, maxdenom)
metrics = pytuning.metrics.all_metrics(scale_frac)
sum_p_q = metrics['sum_p_q']
sum_distinct_intervals = metrics['sum_distinct_intervals']
metric_3 = metrics['metric_3']
sum_p_q_for_all_intervals = metrics['sum_p_q_for_all_intervals']
sum_q_for_all_intervals = metrics['sum_q_for_all_intervals']
return sum_p_q, sum_distinct_intervals, metric_3, sum_p_q_for_all_intervals, sum_q_for_all_intervals
def scale_to_metrics(scale):
'''
This function computes the scale metrics of the PyTuning library and other scale metrics
scale: List (float)
List of ratios corresponding to scale steps
Returns
----------
scale_metrics: dictionary
keys correspond to metrics names
scale_metrics_list: List (float)
List of values corresponding to all computed metrics (in the same order as dictionary)
'''
scale_frac, num, denom = scale2frac(scale, maxdenom=1000)
scale_metrics = pytuning.metrics.all_metrics(scale_frac)
scale_metrics['harm_sim'] = np.round(np.average(ratios2harmsim(scale)), 2)
scale_metrics['matrix_harm_sim'] = scale_cons_matrix(scale, dyad_similarity)
scale_metrics['matrix_cons'] = scale_cons_matrix(scale, consonance)
scale_metrics_list = []
for value in scale_metrics.values():
scale_metrics_list.append(value)
return scale_metrics, scale_metrics_list
def scale_consonance (scale, function, rounding = 4):
'''
Function that gives the average consonance of each scale interval
scale: List (float)
scale to reduce
function: function
function used to compute the consonance between pairs of ratios
Choose between: consonance, dyad_similarity, metric_denom
'''
metric_values = []
mode_values = []
for index1 in range(len(scale)):
metric_value = []
for index2 in range(len(scale)):
entry = scale[index1]/scale[index2]
mode_values.append([scale[index1], scale[index2]])
metric_value.append(function(entry))
metric_values.append(np.average(metric_value))
return metric_values
'''
################################################ SCALE CONSTRUCTION ##############################################################
'''
def oct_subdiv(ratio, octave_limit = 0.01365 ,octave = 2 ,n = 5):
'''
N-TET tuning from Generator Interval
This function uses a generator interval to suggest numbers of steps to divide the octave,
so the given interval will be approximately present (octave_limit) in the steps of the N-TET tuning.
ratio: float
ratio that corresponds to the generator_interval
e.g.: by giving the fifth (3/2) as generator interval, this function will suggest to subdivide the octave in 12, 53, ...
octave_limit: float
Defaults to 0.01365 (Pythagorean comma)
approximation of the octave corresponding to the acceptable distance between the ratio of the generator interval after
multiple iterations and the octave value.
octave: int
Defaults to 2
value of the octave
n: int
Defaults to 5
number of suggested octave subdivisions
Returns
-------
Octdiv: List (int)
list of N-TET tunings corresponding to dividing the octave in equal steps
Octvalue: List (float)
list of the approximations of the octave for each N-TET tuning
'''
Octdiv, Octvalue, i = [], [], 1
ratios = []
while len(Octdiv) < n:
ratio_mult = (ratio**i)
while ratio_mult > octave:
ratio_mult = ratio_mult/octave
rescale_ratio = ratio_mult - round(ratio_mult)
ratios.append(ratio_mult)
i+=1
if -octave_limit < rescale_ratio < octave_limit:
Octdiv.append(i-1)
Octvalue.append(ratio_mult)
else:
continue
return Octdiv, Octvalue
def compare_oct_div(Octdiv = 12, Octdiv2 = 53, bounds = 0.005, octave = 2):
'''
Function that compare steps for two N-TET tunings and return matching ratios and corresponding degrees
Octdiv: int
Defaults to 12.
first N-TET tuning number of steps
Octdiv2: int
Defaults to 53.
second N-TET tuning number of steps
bounds: float
Defaults to 0.005
Maximum distance between 1 ratio of Octdiv and 1 ratio of Octdiv2 to consider a match
octave: int
Defaults to 2
value of the octave
Returns
-------
avg_ratios: List (float)
list of ratios corresponding to the shared steps in the two N-TET tunings
shared_steps: List of tuples
the two elements of each tuple corresponds to the scale steps sharing the same interval in the two N-TET tunings
'''
ListOctdiv = []
ListOctdiv2 = []
OctdivSum = 1
OctdivSum2 = 1
i = 1
i2 = 1
while OctdivSum < octave:
OctdivSum =(nth_root(octave, Octdiv))**i
i+=1
ListOctdiv.append(OctdivSum)
while OctdivSum2 < octave:
OctdivSum2 =(nth_root(octave, Octdiv2))**i2
i2+=1
ListOctdiv2.append(OctdivSum2)
shared_steps = []
avg_ratios = []
for i, n in enumerate(ListOctdiv):
for j, harm in enumerate(ListOctdiv2):
if harm-bounds < n < harm+bounds:
shared_steps.append((i+1, j+1))
avg_ratios.append((n+harm)/2)
return avg_ratios, shared_steps
#Output1: octave subdivisions
#Output2: ratios that led to Output1
def multi_oct_subdiv (peaks, max_sub = 100, octave_limit = 1.01365, octave = 2, n_scales = 10, cons_limit = 0.1):
'''
This function uses the most consonant peaks ratios as input of oct_subdiv function. Each consonant ratio
leads to a list of possible octave subdivisions. These lists are compared and optimal octave subdivisions are
determined.
peaks: List (float)
Peaks represent local maximum in a spectrum
max_sub: int
Defaults to 100.
Maximum number of intervals in N-TET tuning suggestions.
octave_limit: float
Defaults to 1.01365 (Pythagorean comma).
Approximation of the octave corresponding to the acceptable distance between the ratio of the generator interval after
multiple iterations and the octave value.
octave: int
Defaults to 2.
value of the octave
n_scales: int
Defaults to 10.
Number of N-TET tunings to compute for each generator interval (ratio).
Returns
-------
multi_oct_div: List (int)
List of octave subdivisions that fit with multiple generator intervals.
ratios: List (float)
list of the generator intervals for which at least 1 N-TET tuning match with another generator interval.
'''
import itertools
from collections import Counter
#a, b, pairs, cons = consonance_peaks(peaks, cons_limit)
ratios, cons = consonant_ratios(peaks, cons_limit)
list_oct_div = []
for i in range(len(ratios)):
list_temp, _ = oct_subdiv(ratios[i], octave_limit, octave, n_scales)
list_oct_div.append(list_temp)
counts = Counter(list(itertools.chain(*list_oct_div)))
oct_div_temp = []
for k, v in counts.items():
if v > 1:
oct_div_temp.append(k)
oct_div_temp = np.sort(oct_div_temp)
multi_oct_div = []
for i in range(len(oct_div_temp)):
if oct_div_temp[i] < max_sub:
multi_oct_div.append(oct_div_temp[i])
return multi_oct_div, ratios
def harmonic_tuning (list_harmonics, octave = 2, min_ratio = 1, max_ratio = 2):
'''
Function that computes a tuning based on a list of harmonic positions
list_harmonics: List (int)
harmonic positions to use in the scale construction
octave: int
min_ratio: float
max_ratio: float
'''
ratios = []
for i in list_harmonics:
ratios.append(rebound(1*i, min_ratio, max_ratio, octave))
ratios = list(set(ratios))
ratios = list(np.sort(np.array(ratios)))
return ratios
def euler_fokker_scale(intervals, n = 1):
'''
Function that takes as input a series of intervals and derives a Euler Fokker Genera scale
intervals: List (float)
n: int
Defaults to 1
number of times the interval is used in the scale generation
'''
multiplicities = [n for x in intervals]
scale = create_euler_fokker_scale(intervals, multiplicities)
return scale
def generator_interval_tuning (interval = 3/2, steps = 12, octave = 2):
'''
Function that takes a generator interval and derives a tuning based on its stacking.
interval: float
Generator interval
steps: int
Defaults to 12 (12-TET for interval 3/2)
Number of steps in the scale
octave: int
Defaults to 2
Value of the octave
'''
scale = []
for s in range(steps):
s += 1
degree = interval**s
while degree > octave:
degree = degree/octave
scale.append(degree)
return sorted(scale)
#function that takes two ratios a input (boundaries of )
#The mediant corresponds to the interval where small and large steps are equal.
def tuning_range_to_MOS (frac1, frac2, octave = 2, max_denom_in = 100, max_denom_out = 100):
gen1 = octave**(frac1)
gen2 = octave**(frac2)
a = Fraction(frac1).limit_denominator(max_denom_in).numerator
b = Fraction(frac1).limit_denominator(max_denom_in).denominator
c = Fraction(frac2).limit_denominator(max_denom_in).numerator
d = Fraction(frac2).limit_denominator(max_denom_in).denominator
print(a, b, c, d)
mediant = (a+c)/(b+d)
mediant_frac = sp.Rational((a+c)/(b+d)).limit_denominator(max_denom_out)
gen_interval = octave**(mediant)
gen_interval_frac = sp.Rational(octave**(mediant)).limit_denominator(max_denom_out)
MOS_signature = [d, b]
invert_MOS_signature = [b, d]
return mediant, mediant_frac, gen_interval, gen_interval_frac, MOS_signature, invert_MOS_signature
#def tuning_embedding ()
def stern_brocot_to_generator_interval (ratio, octave = 2):
gen_interval = octave**(ratio)
return gen_interval
def gen_interval_to_stern_brocot (gen):
root_ratio = log2(gen)
return root_ratio
#Dissonance
def dissmeasure(fvec, amp, model='min'):
"""
Given a list of partials in fvec, with amplitudes in amp, this routine
calculates the dissonance by summing the roughness of every sine pair
based on a model of Plomp-Levelt's roughness curve.
The older model (model='product') was based on the product of the two
amplitudes, but the newer model (model='min') is based on the minimum
of the two amplitudes, since this matches the beat frequency amplitude.
"""
# Sort by frequency
sort_idx = np.argsort(fvec)
am_sorted = np.asarray(amp)[sort_idx]
fr_sorted = np.asarray(fvec)[sort_idx]
# Used to stretch dissonance curve for different freqs:
Dstar = 0.24 # Point of maximum dissonance
S1 = 0.0207
S2 = 18.96
C1 = 5
C2 = -5
# Plomp-Levelt roughness curve:
A1 = -3.51
A2 = -5.75
# Generate all combinations of frequency components
idx = np.transpose(np.triu_indices(len(fr_sorted), 1))
fr_pairs = fr_sorted[idx]
am_pairs = am_sorted[idx]
Fmin = fr_pairs[:, 0]
S = Dstar / (S1 * Fmin + S2)
Fdif = fr_pairs[:, 1] - fr_pairs[:, 0]
if model == 'min':
a = np.amin(am_pairs, axis=1)
elif model == 'product':
a = np.prod(am_pairs, axis=1) # Older model
else:
raise ValueError('model should be "min" or "product"')
SFdif = S * Fdif
D = np.sum(a * (C1 * np.exp(A1 * SFdif) + C2 * np.exp(A2 * SFdif)))
return D
#Input: peaks and amplitudes
def diss_curve (freqs, amps, denom=1000, max_ratio=2, euler_comp = True, method = 'min', plot = True, n_tet_grid = None):
'''
This function computes the dissonance curve and related metrics for a given set of frequencies (freqs) and amplitudes (amps)
freqs: List (float)
list of frequencies associated with spectral peaks
amps: List (float)
list of amplitudes associated with freqs (must be same lenght)
denom: int
Defaults to 1000.
Highest value for the denominator of each interval
max_ratio: int
Defaults to 2.
Value of the maximum ratio
Set to 2 for a span of 1 octave
Set to 4 for a span of 2 octaves
Set to 8 for a span of 3 octaves
Set to 2**n for a span of n octaves
euler: Boolean
Defaults to True
When set to True, compute the Euler Gradus Suavitatis for the derived scale
method: str
Defaults to 'min'
Can be set to 'min' or 'product'. Refer to dissmeasure function for more information.
plot: boolean
Defaults to True
When set to True, a plot of the dissonance curve will be generated
n_tet_grid: int
Defaults to None
When an integer is given, dotted lines will be add to the plot a steps of the given N-TET scale
Returns
-------
intervals: List of tuples
Each tuple corresponds to the numerator and the denominator of each scale step ratio
ratios: List (float)
list of ratios that constitute the scale
euler_score: int
value of consonance of the scale
diss: float
value of averaged dissonance of the total curve
dyad_sims: List (float)
list of dyad similarities for each ratio of the scale
'''
from numpy import array, linspace, empty, concatenate
from scipy.signal import argrelextrema
from fractions import Fraction
freqs = np.array(freqs)
r_low = 1
alpharange = max_ratio
method = method
n = 1000
diss = empty(n)
a = concatenate((amps, amps))
for i, alpha in enumerate(linspace(r_low, alpharange, n)):
f = concatenate((freqs, alpha*freqs))
d = dissmeasure(f, a, method)
diss[i] = d
diss_minima = argrelextrema(diss, np.less)
intervals = []
for d in range(len(diss_minima[0])):
frac = Fraction(diss_minima[0][d]/(n/(max_ratio-1))+1).limit_denominator(denom)
frac = (frac.numerator, frac.denominator)
intervals.append(frac)
intervals.append((2, 1))
ratios = [i[0]/i[1] for i in intervals]
ratios_sim = [np.round(r, 2) for r in ratios] #round ratios for similarity measures of harmonic series
#print(ratios_sim)
dyad_sims = ratios2harmsim(ratios[:-1]) # compute dyads similarities with natural harmonic series
dyad_sims
a = 1
ratios_euler = [a]+ratios
ratios_euler = [int(round(num, 2)*1000) for num in ratios]
#print(ratios_euler)
euler_score = None
if euler_comp == True:
euler_score = euler(*ratios_euler)
euler_score = euler_score/len(diss_minima)
else:
euler_score = 'NaN'
if plot == True:
plt.figure(figsize=(14, 6))
plt.plot(linspace(r_low, alpharange, len(diss)), diss)
plt.xscale('linear')
plt.xlim(r_low, alpharange)
try:
plt.text(1.9, 1.5, 'Euler = '+str(int(euler_score)), horizontalalignment = 'center',
verticalalignment='center', fontsize = 16)
except:
pass
for n, d in intervals:
plt.axvline(n/d, color='silver')
# Plot N-TET grid
if n_tet_grid != None:
n_tet = NTET_ratios(n_tet_grid, max_ratio = max_ratio)
for n in n_tet :
plt.axvline(n, color='red', linestyle = '--')
# Plot scale ticks
plt.minorticks_off()
plt.xticks([n/d for n, d in intervals],
['{}/{}'.format(n, d) for n, d in intervals], fontsize = 13)
plt.yticks(fontsize = 13)
plt.tight_layout()
plt.show()
return intervals, ratios, euler_score, np.average(diss), dyad_sims
'''Harmonic Entropy'''
def compute_harmonic_entropy_domain_integral(ratios, ratio_interval, spread=0.01, min_tol=1e-15):
# The first step is to pre-sort the ratios to speed up computation
ind = np.argsort(ratios)
weight_ratios = ratios[ind]
centers = (weight_ratios[:-1] + weight_ratios[1:]) / 2
ratio_interval = array(ratio_interval)
N = len(ratio_interval)
HE = zeros(N)
for i, x in enumerate(ratio_interval):
P = diff(concatenate(([0], norm.cdf(log2(centers), loc=log2(x), scale=spread), [1])))
ind = P > min_tol
HE[i] = -np.sum(P[ind] * log2(P[ind]))
return weight_ratios, HE
def compute_harmonic_entropy_simple_weights(numerators, denominators, ratio_interval, spread=0.01, min_tol=1e-15):
# The first step is to pre-sort the ratios to speed up computation
ratios = numerators / denominators
ind = np.argsort(ratios)
numerators = numerators[ind]
denominators = denominators[ind]
weight_ratios = ratios[ind]
ratio_interval = array(ratio_interval)
N = len(ratio_interval)
HE = zeros(N)
for i, x in enumerate(ratio_interval):
P = norm.pdf(log2(weight_ratios), loc=log2(x), scale=spread) / sqrt(numerators * denominators)
ind = P > min_tol
P = P[ind]
P /= np.sum(P)
HE[i] = -np.sum(P * log2(P))
return weight_ratios, HE
def harmonic_entropy (ratios, res = 0.001, spread = 0.01, plot_entropy = True, plot_tenney = False, octave = 2):
'''
Harmonic entropy is a measure of the uncertainty in pitch perception, and it provides a physical correlate of tonalness,
one aspect of the psychoacoustic concept of dissonance (Sethares). High tonalness corresponds to low entropy and low tonalness
corresponds to high entropy.
ratios: List (float)
ratios between each pairs of frequency peaks
res: float
Defaults to 0.001
resolution of the ratio steps
spread: float
Default to 0.01
plot_entropy: boolean
Defaults to True
When set to True, plot the harmonic entropy curve
plot_tenney: boolean
Defaults to False
When set to True, plot the tenney heights (y-axis) across ratios (x-axis)
octave: int
Defaults to 2
Value of the maximum interval ratio
Returns
----------
HE_minima: List (float)
List of ratios corresponding to minima of the harmonic entropy curve
HE: float
Value of the averaged harmonic entropy
'''
fracs, numerators, denominators = scale2frac(ratios)
ratios = numerators / denominators
#print(ratios)
#ratios = np.interp(ratios, (ratios.min(), ratios.max()), (1, 10))
bendetti_heights = numerators * denominators
tenney_heights = log2(bendetti_heights)
ind = np.argsort(tenney_heights) # first, sort by Tenney height to make things more efficient
bendetti_heights = bendetti_heights[ind]
tenney_heights = tenney_heights[ind]
numerators = numerators[ind]
denominators = denominators[ind]
#ratios = ratios[ind]
if plot_tenney == True:
fig = plt.figure(figsize=(10, 4), dpi=150)
ax = fig.add_subplot(111)
# ax.scatter(ratios, 2**tenney_heights, s=1)
ax.scatter(ratios, tenney_heights, s=1, alpha=.2)
# ax.scatter(ratios[:200], tenney_heights[:200], s=1, color='r')
plt.show()
# Next, we need to ensure a distance `d` between adjacent ratios
M = len(bendetti_heights)
delta = 0.00001
indices = ones(M, dtype=bool)
for i in range(M - 2):
ind = abs(ratios[i + 1:] - ratios[i]) > delta
indices[i + 1:] = indices[i + 1:] * ind
bendetti_heights = bendetti_heights[indices]
tenney_heights = tenney_heights[indices]
numerators = numerators[indices]
denominators = denominators[indices]
ratios = ratios[indices]
M = len(tenney_heights)
#print(M)
#print('hello')
x_ratios = arange(1, octave, res)
_, HE = compute_harmonic_entropy_domain_integral(ratios, x_ratios, spread=spread)
#_, HE = compute_harmonic_entropy_simple_weights(numerators, denominators, x_ratios, spread=0.01)
ind = argrelextrema(HE, np.less)
HE_minima = (x_ratios[ind], HE[ind])
if plot_entropy == True:
fig = plt.figure(figsize=(10, 4), dpi=150)
ax = fig.add_subplot(111)
# ax.plot(weight_ratios, log2(pdf))
ax.plot(x_ratios, HE)
# ax.plot(x_ratios, HE_simple)
ax.scatter(HE_minima[0], HE_minima[1], color='k', s=4)
ax.set_xlim(1, octave)
plt.show()
return HE_minima, np.average(HE)
'''Scale reduction'''
def scale_reduction (scale, mode_n_steps, function, rounding = 4):
'''
Function that reduces the number of steps in a scale according to the consonance between pairs of ratios
scale: List (float)
scale to reduce
mode_n_steps: int
number of steps of the reduced scale
function: function
function used to compute the consonance between pairs of ratios
Choose between: consonance, dyad_similarity, metric_denom
'''
metric_values = []
mode_values = []
for index1 in range(len(scale)):
for index2 in range(len(scale)):
if scale[index1] > scale[index2]: #not include the diagonale in the computation of the avg. consonance
entry = scale[index1]/scale[index2]
#print(entry_value, scale[index1], scale[index2])
mode_values.append([scale[index1], scale[index2]])
#if function == metric_denom:
# metric_values.append(int(function(sp.Rational(entry).limit_denominator(1000))))
#else:
metric_values.append(function(entry))
if function == metric_denom:
cons_ratios = [x for _, x in sorted(zip(metric_values, mode_values))]
else:
cons_ratios = [x for _, x in sorted(zip(metric_values, mode_values))][::-1]
i = 0
mode_ = []
mode_out = []
while len(mode_out) < mode_n_steps:
cons_temp = cons_ratios[i]
mode_.append(cons_temp)
mode_out_temp = [item for sublist in mode_ for item in sublist]
mode_out_temp = [np.round(x, rounding) for x in mode_out_temp]
mode_out = sorted(set(mode_out_temp), key = mode_out_temp.index)[0:mode_n_steps]
i +=1
mode_metric = []
for index1 in range(len(mode_out)):
for index2 in range(len(mode_out)):
if mode_out[index1] > mode_out[index2]:
entry = mode_out[index1]/mode_out[index2]
#if function == metric_denom:
# mode_metric.append(int(function(sp.Rational(entry).limit_denominator(1000))))
#else:
mode_metric.append(function(entry))
return np.average(metric_values), mode_out, np.average(mode_metric)
'''------------------------------------------------------Peaks extraction--------------------------------------------------------------'''
import emd
from PyEMD import EMD, EEMD
from scipy.signal import butter, lfilter
import colorednoise as cn
#PEAKS FUNCTIONS
#HH1D_weightAVG (Hilbert-Huang 1D): takes the average of all the instantaneous frequencies weighted by power
#HH1D_max: takes the frequency bin that has the maximum power value
def compute_peaks_ts (data, peaks_function = 'EMD', FREQ_BANDS = None, precision = 0.25, sf = 1000, max_freq = 80):
alphaband = [[7, 12]]
try:
if FREQ_BANDS == None:
FREQ_BANDS = [[2, 3.55], [3.55, 7.15], [7.15, 14.3], [14.3, 28.55], [28.55, 49.4]]
except:
pass
if peaks_function == 'EEMD':
IMFs = EMD_eeg(data)[1:6]
if peaks_function == 'EMD':
data = np.interp(data, (data.min(), data.max()), (0, +1))
IMFs = emd.sift.sift(data)
#IMFs = emd.sift.ensemble_sift(data)
IMFs = np.moveaxis(IMFs, 0, 1)[1:6]
try:
peaks_temp = []
amps_temp = []
for imf in range(len(IMFs)):
p, a = compute_peak(IMFs[imf], precision = precision, average = 'median')
#print(p)
peaks_temp.append(p)
amps_temp.append(a)
peaks_temp = np.flip(peaks_temp)
amps_temp = np.flip(amps_temp)
except:
pass
if peaks_function == 'HH1D_max':
IMFs = EMD_eeg(data)
IMFs = np.moveaxis(IMFs, 0, 1)
IP, IF, IA = emd.spectra.frequency_transform(IMFs[:, 1:6], sf, 'nht')
precision_hh = precision*2
low = 1
high = max_freq
steps = int((high-low)/precision_hh)
edges, bins = emd.spectra.define_hist_bins(low, high, steps, 'log')
# Compute the 1d Hilbert-Huang transform (power over carrier frequency)
spec = emd.spectra.hilberthuang_1d(IF, IA, edges)
spec = np.moveaxis(spec, 0, 1)
peaks_temp = []
amps_temp = []
for e, i in enumerate(spec):
max_power = np.argmax(i)
peaks_temp.append(bins[max_power])
amps_temp.append(spec[e][max_power])
peaks_temp = np.flip(peaks_temp)
amps_temp = np.flip(amps_temp)
#if peaks_function == 'HH1D_weightAVG':
if peaks_function == 'adapt':
p, a = compute_peaks_raw(data, alphaband, precision = precision, average = 'median')
FREQ_BANDS = alpha2bands(p)
peaks_temp, amps_temp = compute_peaks_raw(data, FREQ_BANDS, precision = precision, average = 'median')
if peaks_function == 'fixed':
peaks_temp, amps_temp = compute_peaks_raw(data, FREQ_BANDS, precision = precision, average = 'median')
peaks = np.array(peaks_temp)
amps = np.array(amps_temp)
return peaks, amps
def extract_all_peaks (data, sf, precision, max_freq = None):
if max_freq == None:
max_freq = sf/2
mult = 1/precision
nperseg = sf*mult
nfft = nperseg
freqs, psd = scipy.signal.welch(data, sf, nfft = nfft, nperseg = nperseg, average = 'median')
psd = 10. * np.log10(psd)
indexes = ss.find_peaks(psd, height=None, threshold=None, distance=10, prominence=None, width=2, wlen=None, rel_height=0.5, plateau_size=None)
peaks = []
amps = []
for i in indexes[0]:
peaks.append(freqs[i])
amps.append(psd[i])
peaks = np.around(np.array(peaks), 5)
peaks = list(peaks)
peaks = [p for p in peaks if p<=max_freq]
return peaks, amps
def harmonic_peaks_fit (peaks, amps, min_freq = 0.5, max_freq = 30, min_harms = 2, harm_limit = 128):
n_total = []
harm_ = []
harm_peaks = []
max_n = []
max_peaks = []
max_amps = []
harmonics = []
harmonic_peaks = []
harm_peaks_fit = []
for p, a in zip(peaks, amps):
n = 0
harm_temp = []
harm_peaks_temp = []
if p < max_freq and p > min_freq:
for p2 in peaks:
if p2 == p:
ratio = 0.1 #arbitrary value to set ratio value to non integer
if p2 > p:
ratio = p2/p
harm = ratio
if p2 < p:
ratio = p/p2
harm = -ratio
if ratio.is_integer():
if harm <= harm_limit:
n += 1
harm_temp.append(harm)
if p not in harm_peaks_temp:
harm_peaks_temp.append(p)
if p2 not in harm_peaks_temp:
harm_peaks_temp.append(p2)
n_total.append(n)
harm_.append(harm_temp)
harm_peaks.append(harm_peaks_temp)
if n >= min_harms:
max_n.append(n)
max_peaks.append(p)
max_amps.append(a)
#print(harm_temp)
harmonics.append(harm_temp)
harmonic_peaks.append(harm_peaks)
harm_peaks_fit.append([p, harm_temp, harm_peaks_temp])
for i in range(len(harm_peaks_fit)):
harm_peaks_fit[i][2] = sorted(harm_peaks_fit[i][2])
max_n = np.array(max_n)
max_peaks = np.array(max_peaks)
max_amps = np.array(max_amps)
harmonics = np.array(harmonics)
#print(harmonics.shape)
harmonic_peaks = np.array(harmonic_peaks)
#harm_peaks_fit = np.array(harm_peaks_fit)
#max_indexes = np.argsort(n_total)[-10:]
return max_n, max_peaks, max_amps, harmonics, harmonic_peaks, harm_peaks_fit
def cepstrum(signal, sample_freq, plot_cepstrum = False, min_freq=1.5, max_freq=80):
windowed_signal = signal
dt = 1/sample_freq
freq_vector = np.fft.rfftfreq(len(windowed_signal), d=dt)
X = np.fft.rfft(windowed_signal)
log_X = np.log(np.abs(X))
cepstrum = | np.fft.rfft(log_X) | numpy.fft.rfft |
import sys
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from utils.histograms import make_eta_range, make_phi_range
import utils.transformation as utf
def delta_phi(phi1, phi2):
result = phi1 - phi2
while result > np.pi:
result -= 2 * np.pi
while result <= -np.pi:
result += 2 * np.pi
return result
def vdelta_phi(x1, x2):
return np.vectorize(delta_phi)(x1, x2)
# PARAMETERS
input_path = "./tracksReais_04_06_2019"
output_path = "/data/track-ml/output/"
extra_track_info = 7
max_num_hits = 28
num_fine_eta_bins = 88
num_fine_phi_bins = 63
num_fine_rho_bins = 120
num_coarse_eta_bins = 22 # 8.8/22 = 0.4
num_coarse_phi_bins = 13 # 6.28/13 = 0.48
hit_size = 6 # 3 space points + 3 technical information
plt.ioff()
# Open the input file
tracks = np.genfromtxt(input_path, delimiter=",")
print(tracks.shape)
# Separate the input tracks in different parts
indexes, vertices, momenta, hits = utf.parts_from_tracks(tracks)
print("Hits", hits.shape)
global_X = np.empty(0)
global_Y = np.empty(0)
global_eta = np.empty(0)
global_phi = np.empty(0)
global_rho = np.empty(0)
global_centered_eta = np.empty(0)
global_centered_phi = np.empty(0)
num_hits = []
eta_bins = np.linspace(-4.4, 4.4, num_fine_eta_bins + 1)
phi_bins = np.linspace(-np.pi, np.pi, num_fine_phi_bins + 1)
rho_bins = np.linspace(0, 1200, num_fine_rho_bins + 1)
delta_bins = np.linspace(-2.0, 2.0, 40 + 1)
# Loop over particles
for i in range(hits.shape[0]):
# for i in range(100):
if i % 100 == 0:
print(i)
the_hits = hits[i]
reshaped_hits = the_hits.reshape(max_num_hits, 6)
X = reshaped_hits[:, 0]
Y = reshaped_hits[:, 1]
Z = reshaped_hits[:, 2]
X = np.trim_zeros(X)
Y = np.trim_zeros(Y)
Z = np.trim_zeros(Z)
assert X.size == Y.size and X.size == Z.size and Y.size == Z.size
if X.size < 1:
continue
num_hits.append(X.size)
global_X = np.append(global_X, X)
global_Y = np.append(global_Y, Y)
global_rho = np.append(global_rho, np.sqrt(X * X + Y * Y))
eta = -np.log(np.tan(np.arctan2(np.sqrt(X * X + Y * Y), Z) / 2))
centered_eta = eta - eta.mean(axis=0)
global_eta = np.append(global_eta, eta)
global_centered_eta = np.append(global_centered_eta, centered_eta)
# phi = np.arctan2(Y,X)
### SO EPIC, how do I make phi right? Use imaginary numbers!
imagY = Y * (0 + 1j)
XY_vector = (X + imagY) / (np.abs(X + imagY))
average_XY_vector = XY_vector.mean(axis=0)
phi = np.angle(XY_vector)
average_phi = | np.angle(average_XY_vector) | numpy.angle |
"""Data utils functions for pre-processing and data loading."""
import os
import pickle as pkl
import sys
import networkx as nx
import numpy as np
import scipy.sparse as sp
import torch
from scipy import sparse
import logging
def load_data(args, datapath):
## Load data
data = load_data_lp(args.dataset, args.use_feats, datapath)
adj = data['adj_train']
## TAKES a lot of time
if args.node_cluster == 1:
task = 'nc'
else:
task = 'lp'
cached_dir = os.path.join('/root/tmp', task, args.dataset,
f"seed{args.split_seed}-val{args.val_prop}-test{args.test_prop}")
if not os.path.isdir(cached_dir):
logging.info(f"Caching at `{cached_dir}`randomly masked edges")
os.makedirs(cached_dir, exist_ok=True)
adj_train, train_edges, train_edges_false, val_edges, val_edges_false, test_edges, test_edges_false = mask_edges(
adj, args.val_prop, args.test_prop, args.split_seed
)
if args.val_prop + args.test_prop > 0:
torch.save(val_edges, os.path.join(cached_dir, 'val_edges.pth'))
torch.save(val_edges_false, os.path.join(cached_dir, 'val_edges_false.pth'))
torch.save(test_edges, os.path.join(cached_dir, 'test_edges.pth'))
torch.save(test_edges_false, os.path.join(cached_dir, 'test_edges_false.pth'))
torch.save(train_edges, os.path.join(cached_dir, 'train_edges.pth'))
torch.save(train_edges_false, os.path.join(cached_dir, 'train_edges_false.pth'))
sparse.save_npz(os.path.join(cached_dir, "adj_train.npz"), adj_train)
st0 = np.random.get_state()
np.save(os.path.join(cached_dir, 'np_state.npy'), st0)
else:
logging.info(f"Loading from `{cached_dir}` randomly masked edges")
if args.val_prop + args.test_prop > 0:
val_edges = torch.load(os.path.join(cached_dir, 'val_edges.pth'))
val_edges_false = torch.load(os.path.join(cached_dir, 'val_edges_false.pth'))
test_edges = torch.load(os.path.join(cached_dir, 'test_edges.pth'))
test_edges_false = torch.load(os.path.join(cached_dir, 'test_edges_false.pth'))
adj_train = sparse.load_npz(os.path.join(cached_dir, "adj_train.npz"))
train_edges = torch.load(os.path.join(cached_dir, 'train_edges.pth'))
train_edges_false = torch.load(os.path.join(cached_dir, 'train_edges_false.pth'))
st0 = np.load(os.path.join(cached_dir, 'np_state.npy'))
| np.random.set_state(st0) | numpy.random.set_state |
#!/usr/bin/env @PYTHON_EXECUTABLE@
"""
Description: Viewer and exporter for Siconos mechanics-IO HDF5 files based on VTK.
"""
# Lighter imports before command line parsing
from __future__ import print_function
import sys
import os
import json
import getopt
import math
import traceback
import vtk
from vtk.util.vtkAlgorithm import VTKPythonAlgorithmBase
from vtk.numpy_interface import dataset_adapter as dsa
import h5py
# Exports from this module
__all__ = ['VView', 'VViewOptions', 'VExportOptions', 'VViewConfig']
if hasattr(math, 'inf'):
infinity = math.inf
else:
infinity = float('inf')
## Persistent configuration
class VViewConfig(dict):
def __init__(self, d={'background_color' : [0., 0. , 0.],
'window_size': [600,600]}, filename=None):
super(self.__class__, self).__init__(d)
self.should_save_config = True
if filename is not None:
self.filename = filename
else:
self.filename = os.path.join(os.environ['HOME'], '.config',
'siconos_vview.json')
def load_configuration(self):
if os.path.exists(self.filename):
try:
self.update(json.load(open(self.filename)))
print('Loaded configuration from ', self.filename)
for k in self:
print(' ', k,': ', self[k])
self.should_save_config = True
except:
self.should_save_config = False
print("Warning: Error loading configuration `{}'".format(self.filename))
def save_configuration(self, force=False):
if not force and not self.should_save_config:
return
try:
if not os.path.exists(os.path.join(os.environ['HOME'], '.config')):
os.mkdir(os.path.join(os.environ['HOME'], '.config'))
json.dump(self, open(self.filename,'w'))
except:
print("Error saving configuration `{}'".format(self.filename))
class VViewOptions(object):
def __init__(self):
self.min_time = None
self.max_time = None
self.cf_scale_factor = 1
self.normalcone_ratio = 1
self.time_scale_factor = 1
self.advance_by_time = None
self.frames_per_second = 25
self.cf_disable = False
if hasattr(vtk.vtkPolyDataMapper(), 'ImmediateModeRenderingOff'):
self.imr = False
else:
# vtk 8
self.imr = True
self.depth_peeling = True
self.maximum_number_of_peels = 100
self.occlusion_ratio = 0.1
self.global_filter = False
self.initial_camera = [None] * 5
self.visible_mode = 'all'
self.export = False
self.gen_para_script = False
self.with_edges = False
self.with_random_color = True
self.with_charts= 0
self.depth_2d=0.1
self.verbose=0
## Print usage information
def usage(self, long=False):
print(__doc__); print()
print('Usage: {0} [OPTION]... <HDF5>'
.format(os.path.split(sys.argv[0])[1]))
print()
if not long:
print("""[--help] [--tmin=<float value>] [--tmax=<float value>]
[--cf-scale=<float value>] [--no-cf] [--imr] [--global-filter]
[--no-depth-peeling] [--maximum-number-of-peels=<int value>]
[--occlusion-ratio=<float value>]
[--normalcone-ratio = <float value>]
[--advance=<'fps' or float value>] [--fps=float value]
[--camera=x,y,z] [--lookat=x,y,z] [--up=x,y,z] [--clipping=near,far] [--ortho=scale]
[--with-charts=<int value>]
[--visible=all,avatars,contactors] [--with-edges] [--verbose]
""")
else:
print("""Options:
--help
display this message
--version
display version information
--tmin= value
set the time lower bound for visualization
--tmax= value
set the time upper bound for visualization
--cf-scale= value (default : 1.0 )
rescale the arrow representing the contact forces by the value.
the normal cone and the contact points are also rescaled
--no-cf
do not display contact forces
--imr
immediate-mode-rendering, use less memory at the price of
slower rendering
--global-filter (default : off)
With export mode, concatenates all blocks in a big vtkPolyData.
This option is for when the number of objects is huge.
With vview, the display is done with only one vtk
actor. Note that global-filter use a vtkCompositeDataGeometryFilter
which is slow.
--no-depth-peeling (default : on)
do not use vtk depth peeling
--maximum-number-of-peels= value
maximum number of peels when depth peeling is on
--occlusion-ratio= value
occlusion-ratio when depth peeling is on
--normalcone-ratio = value (default : 1.0 )
introduce a ratio between the representation of the contact
forces arrows the normal cone and the contact points. useful
when the contact forces are small with respect to the
characteristic dimesion
--advance= value or 'fps'
automatically advance time during recording (default : don't
advance)
--fps= value
frames per second of generated video (default 25)
--camera=x,y,z
initial position of the camera (default=above looking down)
--lookat=x,y,z
initial direction to look (default=center of bounding box)
--up=x,y,z
initial up direction of the camera (default=y-axis)
--ortho=scale
start in ortho mode with given parallel scale
(default=perspective)
--with-charts=value
display convergence charts
--visible=all
all: view all contactors and avatars
avatars: view only avatar if an avatar is defined (for each
object) contactors: ignore avatars, view only contactors where
avatars are contactors with collision_group=-1
--with-edges
add edges in the rendering (experimental for primitives)
--with-fixed-color
use fixed color defined in the config file
--depth-2d=<value>
specify a depth for 2D objects
--verbose=<int verbose_level>
""")
def parse(self):
## Parse command line
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], '',
['help', 'version',
'dat', 'tmin=', 'tmax=',
'no-cf', 'imr', 'global-filter',
'no-depth-peeling',
'maximum-number-of-peels=',
'occlusion-ratio=',
'cf-scale=', 'normalcone-ratio=',
'advance=', 'fps=',
'camera=', 'lookat=', 'up=', 'clipping=', 'ortho=', 'visible=',
'with-edges', 'with-fixed-color', 'with-charts=', 'depth-2d=', 'verbose='])
self.configure(opts, args)
except getopt.GetoptError as err:
sys.stderr.write('{0}\n'.format(str(err)))
self.usage()
exit(2)
def configure(self, opts, args):
for o, a in opts:
if o == '--help':
self.usage(long=True)
exit(0)
elif o == '--version':
print('{0} @SICONOS_VERSION@'.format(
os.path.split(sys.argv[0])[1]))
exit(0)
elif o == '--tmin':
self.min_time = float(a)
elif o == '--tmax':
self.max_time = float(a)
elif o == '--cf-scale':
self.cf_scale_factor = float(a)
elif o == '--no-cf':
self.cf_disable = True
elif o == '--imr':
self.imr = True
elif o == '--no-depth-peeling':
self.depth_peeling=False
elif o == '--maximum-number-of-peels':
self.maximum_number_of_peels = int(a)
elif o == '--occlusion-ratio':
self.occlusion_ratio = float(a)
elif o == '--global-filter':
self.global_filter = True
elif o == '--normalcone-ratio':
self.normalcone_ratio = float(a)
elif o == '--advance':
if 'fps' in a:
self.advance_by_time = \
eval(a, {'fps': 1.0 / self.frames_per_second})
else:
self.advance_by_time = float(a)
elif o == '--fps':
self.frames_per_second = int(a)
elif o == '--camera':
self.initial_camera[0] = map(float, a.split(','))
elif o == '--lookat':
self.initial_camera[1] = map(float, a.split(','))
elif o == '--up':
self.initial_camera[2] = map(float, a.split(','))
elif o == '--clipping':
self.initial_camera[4] = map(float, a.split(','))
elif o == '--ortho':
self.initial_camera[3] = float(a)
elif o == '--with-charts=':
self.with_charts = int(a)
elif o == '--depth-2d':
self.depth_2d = float(a)
elif o == '--visible':
self.visible_mode = a
elif o == '--with-edges':
self.with_edges = True
elif o == '--with-fixed-color':
self.with_random_color = False
elif o == '--verbose':
self.verbose = int(a)
if self.verbose >=1:
self.display()
if self.frames_per_second == 0:
self.frames_per_second = 25
if len(args) > 0:
self.io_filename = args[0]
else:
self.usage()
exit(1)
def display(self):
display_str = \
"""[io.VViewOptions] Display vview options:
min_time : {0}
min_time : {1}
cf_disable : {2}
cf_scale_factor : {3}
normalcone_ratio {4}
time_scale_factor {5}
advance_by_time {6}
frames_per_second {7}
imr {8}
depth_peeling {9}
maximum_number_of_peels {10}
occlusion_ratio {11}
global_filter {12}
initial_camera {13}
visible_mode {14}
export {15}
gen_para_script {16}
with_edges {17}
with_random_color {18}
with_charts {19}
depth_2d {20}
verbose {21}""".format(self.min_time,
self.max_time,
self.cf_disable,
self.cf_scale_factor,
self.normalcone_ratio,
self.time_scale_factor,
self.advance_by_time,
self.frames_per_second,
self.imr,
self.depth_peeling,
self.maximum_number_of_peels,
self.occlusion_ratio,
self.global_filter,
self.initial_camera,
self.visible_mode,
self.export,
self.gen_para_script,
self.with_edges,
self.with_random_color,
self.with_charts,
self.depth_2d,
self.verbose
)
print(display_str)
# self.cf_scale_factor = 1
# self.normalcone_ratio = 1
# self.time_scale_factor = 1
# self.advance_by_time = None
# self.frames_per_second = 25
# self.cf_disable = False
# if hasattr(vtk.vtkPolyDataMapper(), 'ImmediateModeRenderingOff'):
# self.imr = False
# else:
# # vtk 8
# self.imr = True
# self.depth_peeling = True
# self.maximum_number_of_peels = 100
# self.occlusion_ratio = 0.1
# self.global_filter = False
# self.initial_camera = [None] * 5
# self.visible_mode = 'all'
# self.export = False
# self.gen_para_script = False
# self.with_edges = False
# self.with_random_color = True
# self.with_charts= 0
# self.depth=0.1
# self.verbose=0
class VExportOptions(VViewOptions):
def __init__(self):
super(self.__class__, self).__init__()
self.export = True
self.ascii_mode = False
self.start_step = 0
self.end_step = None
self.stride = 1
self.nprocs = 1
self.gen_para_script = False
def usage(self, long=False):
print(__doc__); print()
print('Usage: {0} [--help] [--version] [--ascii] <HDF5>'
.format(os.path.split(sys.argv[0])[1]))
if long:
print()
print("""Options:
--help display this message
--version display version information
--global-filter one vtp file/time step
--start-step=n integer, set the first simulation time step
number (default: 0)
--end-step=n integer, set the last simulation time step
number (default: None)
--stride=n integer, set export time step/simulation time step
(default: 1)
--ascii export file in ascii format
--gen-para-script=n generation of a gnu parallel command for n processus
""")
def parse(self):
## Parse command line
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], '',
['help', 'version', 'ascii',
'start-step=', 'end-step=',
'stride=', 'global-filter',
'gen-para-script=',
'depth-2d=', 'verbose='])
self.configure(opts, args)
except getopt.GetoptError as err:
sys.stderr.write('{0}\n'.format(str(err)))
self.usage()
exit(2)
def configure(self, opts, args):
for o, a in opts:
if o == '--help':
self.usage(long=True)
exit(0)
if o == '--version':
print('{0} @SICONOS_VERSION@'.format(
os.path.split(sys.argv[0])[1]))
exit(0)
if o == '--global-filter':
self.global_filter = True
if o == '--start-step':
self.start_step = int(a)
if o == '--end-step':
self.end_step = int(a)
if o == '--stride':
self.stride = int(a)
if o == '--gen-para-script':
self.gen_para_script = True
self.nprocs = int(a)
if o in ('--ascii'):
self.ascii_mode = True
if o in ('--depth-2d'):
self.depth_2d = float(a)
if o in ('--verbose'):
self.verbose = int(a)
if self.verbose >=1:
self.display()
if len(args) > 0:
self.io_filename = args[0]
else:
self.usage()
exit(1)
class VRawDataExportOptions(VViewOptions):
def __init__(self, io_filename = None):
super(self.__class__, self).__init__()
self.export = True
self._export_position = True
self._export_velocity = True
self._export_cf = False
self._export_velocity_in_absolute_frame = False
self.start_step = 0
self.end_step = None
self.stride = 1
self.io_filename = io_filename
def usage(self, long=False):
print(__doc__); print()
print('Usage: {0} [--help] <HDF5>'
.format(os.path.split(sys.argv[0])[1]))
if long:
print()
print("""Options:
--help display this message
--version display version information
--start-step=n integer, set the first simulation time step
number (default: 0)
--end-step=n integer, set the last simulation time step
number (default: None)
--stride=n integer, set export time step/simulation time step
(default: 1)
--no-export-position do not export position
--no-export-velocity do not export position
--export-cf do export of contact friction data
--export-velocity-in-absolute-frame do export of contact friction data
""")
def parse(self):
## Parse command line
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], '',
['help', 'version', 'ascii',
'start-step=', 'end-step=',
'stride=',
'no-export-position',
'no-export-velocity',
'export-cf',
'export-velocity-in-absolute-frame'])
self.configure(opts, args)
except getopt.GetoptError as err:
sys.stderr.write('{0}\n'.format(str(err)))
self.usage()
exit(2)
def configure(self, opts, args):
for o, a in opts:
if o == '--help':
self.usage(long=True)
exit(0)
if o == '--version':
print('{0} @SICONOS_VERSION@'.format(
os.path.split(sys.argv[0])[1]))
exit(0)
if o == '--start-step':
self.start_step = int(a)
if o == '--end-step':
self.end_step = int(a)
if o == '--stride':
self.stride = int(a)
if o == '--no-export-position':
self._export_position = False
if o == '--no-export-velocity':
self._export_velocity = False
if o == '--export-cf':
self._export_cf = True
if o == '--export-velocity-in-absolute-frame':
self._export_velocity_in_absolute_frame = True
if self.io_filename is None:
if len(args) > 0 :
self.io_filename = args[0]
else:
self.usage()
exit(1)
## Utilities
def add_compatiblity_methods(obj):
"""
Add missing methods in previous VTK versions.
"""
if hasattr(obj, 'SetInput'):
obj.SetInputData = obj.SetInput
if hasattr(obj, 'AddInput'):
obj.AddInputData = obj.AddInput
def random_color():
r = random.uniform(0.1, 0.9)
g = random.uniform(0.1, 0.9)
b = random.uniform(0.1, 0.9)
return r, g, b
class Quaternion():
def __init__(self, *args):
import vtk
self._vtkmath = vtk.vtkMath()
self._data = vtk.vtkQuaternion[float](*args)
def __mul__(self, q):
r = Quaternion()
self._vtkmath.MultiplyQuaternion(self._data, q._data, r._data)
return r
def __getitem__(self, i):
return self._data[i]
def conjugate(self):
r = Quaternion((self[0], self[1], self[2], self[3]))
r._data.Conjugate()
return r
def rotate(self, v):
pv = Quaternion((0, v[0], v[1], v[2]))
rv = self * pv * self.conjugate()
# assert(rv[0] == 0)
return [rv[1], rv[2], rv[3]]
def axisAngle(self):
r = [0, 0, 0]
a = self._data.GetRotationAngleAndAxis(r)
return r, a
class InputObserver():
def __init__(self, vview, times=None, slider_repres=None):
self.vview = vview
self._opacity = 1.0
self._opacity_static = 1.0
self._opacity_contact = 0.4
self._current_id = vtk.vtkIdTypeArray()
self._renderer = vview.renderer
self._renderer_window = vview.renderer_window
self._image_counter = 0
self._view_cycle = -1
self._recording = False
self._times = None
if times is None or len(times)==0:
return
self._times = times
self._stimes = set(times)
self._time_step = (max(self._stimes) - min(self._stimes)) \
/ len(self._stimes)
self._time = min(times)
if slider_repres is None:
return
self._slider_repres = slider_repres
def update(self):
self.vview.print_verbose('InputObserver - update at time', self._time)
self.vview.io_reader.SetTime(self._time)
if self._times is None:
self.vview.renderer_window.Render()
return
if not self.vview.opts.cf_disable:
self.vview.io_reader.Update()
for mu in self.vview.io_reader._mu_coefs:
self.vview.contact_posa[mu].Update()
self.vview.contact_posb[mu].Update()
self.vview.contact_pos_force[mu].Update()
self.vview.contact_pos_norm[mu].Update()
self.vview.set_dynamic_actors_visibility(self.vview.io_reader._time)
self.vview.set_static_actors_visibility(self.vview.io_reader._time)
pos_data = self.vview.io_reader.pos_data
self.vview.set_position(pos_data)
self._slider_repres.SetValue(self.vview.io_reader._time)
self._current_id.SetNumberOfValues(1)
self._current_id.SetValue(0, self.vview.io_reader._index)
if self.vview.opts.with_charts:
self.vview.iter_plot.SetSelection(self._current_id)
self.vview.prec_plot.SetSelection(self._current_id)
self.vview.renderer_window.Render()
def set_opacity(self):
for instance, actors in self.vview.dynamic_actors.items():
for actor,_,_ in actors:
actor.GetProperty().SetOpacity(self._opacity)
def set_opacity_static(self):
for instance, actors in self.vview.static_actors.items():
for actor,_,_ in actors:
actor.GetProperty().SetOpacity(self._opacity_static)
def set_opacity_contact(self):
for mu in self.vview.io_reader._mu_coefs:
self.vview.cactor[mu].GetProperty().SetOpacity(self._opacity_contact)
self.vview.gactor[mu].GetProperty().SetOpacity(self._opacity_contact)
self.vview.clactor[mu].GetProperty().SetOpacity(self._opacity_contact)
self.vview.sactora[mu].GetProperty().SetOpacity(self._opacity_contact)
self.vview.sactorb[mu].GetProperty().SetOpacity(self._opacity_contact)
def key(self, obj, event):
key = obj.GetKeySym()
self.vview.print_verbose('InputObserver - key', key)
key_recognized = True
if key == 'r':
self.vview.reload()
self._slider_repres.SetMinimumValue(self.vview.min_time)
self._slider_repres.SetMaximumValue(self.vview.max_time)
self.update()
elif key == 'p':
self._image_counter += 1
self.vview.image_maker.Update()
self.vview.writer.SetFileName(
'vview-{0}.png'.format(self._image_counter))
self.vview.writer.Write()
elif key == 'Up':
self._time_step = self._time_step * 2.
self._time += self._time_step
elif key == 'Down':
self._time_step = self._time_step / 2.
self._time -= self._time_step
elif key == 'Left':
self._time -= self._time_step
elif key == 'Right':
self._time += self._time_step
elif key == 't':
print('Decrease the opacity of bodies')
self._opacity -= .1
self.set_opacity()
elif key == 'T':
print('Increase the opacity of bodies')
self._opacity += .1
self.set_opacity()
elif key == 'y':
print('Decrease the opacity of static bodies')
self._opacity_static -= .1
self.set_opacity_static()
elif key == 'Y':
print('Increase the opacity of static bodies')
self._opacity_static += .1
self.set_opacity_static()
elif key == 'u':
print('Decrease the opacity of contact elements')
self._opacity_contact -= .1
self.set_opacity_contact()
elif key == 'U':
print('Increase the opacity of contact elements')
self._opacity_contact += .1
self.set_opacity_contact()
elif key == 'c':
print('camera position:', self._renderer.GetActiveCamera().GetPosition())
print('camera focal point', self._renderer.GetActiveCamera().GetFocalPoint())
print('camera clipping range', self._renderer.GetActiveCamera().GetClippingRange())
print('camera up vector', self._renderer.GetActiveCamera().GetViewUp())
if self._renderer.GetActiveCamera().GetParallelProjection() != 0:
print('camera parallel scale', self._renderer.GetActiveCamera().GetParallelScale())
elif key == 'o':
self._renderer.GetActiveCamera().SetParallelProjection(
1 - self._renderer.GetActiveCamera().GetParallelProjection())
elif key == 'v':
# Cycle through some useful views
dist = norm(self._renderer.GetActiveCamera().GetPosition())
# dist2 = norm([numpy.sqrt(dist**2)/3]*2)
d3 = norm([numpy.sqrt(dist**2) / 3] * 3)
self._view_cycle += 1
if self._view_cycle == 0:
print('Left')
self._renderer.GetActiveCamera().SetPosition(
dist, 0, 0)
self._renderer.GetActiveCamera().SetFocalPoint(0, 0, 0)
self._renderer.GetActiveCamera().SetViewUp(0, 0, 1)
elif self._view_cycle == 1:
print('Right')
self._renderer.GetActiveCamera().SetPosition(
0, dist, 0)
self._renderer.GetActiveCamera().SetFocalPoint(0, 0, 0)
self._renderer.GetActiveCamera().SetViewUp(0, 0, 1)
elif self._view_cycle == 2:
print('Top')
self._renderer.GetActiveCamera().SetPosition(
0, 0, dist)
self._renderer.GetActiveCamera().SetFocalPoint(0, 0, 0)
self._renderer.GetActiveCamera().SetViewUp(1, 0, 0)
else: # Corner
print('Corner')
self._renderer.GetActiveCamera().SetPosition(
d3, d3, d3)
self._renderer.GetActiveCamera().SetFocalPoint(0, 0, 0)
self._renderer.GetActiveCamera().SetViewUp(
-1, -1, 1)
self._view_cycle = -1
self._renderer.ResetCameraClippingRange()
elif key == 'C':
self._renderer.ResetCameraClippingRange()
elif key == 's':
self.toggle_recording(True)
elif key == 'e':
# Note 'e' has the effect to also "end" the program due to
# default behaviour of vtkInteractorStyle, see class
# documentation.
self.toggle_recording(False)
# else:
# self.vview.print_verbose('InputObserver - key not recognized')
# key_recognized=False
# if(key_recognized):
self.update()
def time(self, obj, event):
slider_repres = obj.GetRepresentation()
self._time = slider_repres.GetValue()
self.update()
def toggle_recording(self, recording):
self.vview.print_verbose('InputObserver - toggle recording')
if recording and not self._recording:
fps = 25
self._timer_id = (self.vview.interactor_renderer
.CreateRepeatingTimer(1000//fps))
self._recording = True
self.vview.recorder.Start()
elif self._recording and not recording:
self.vview.interactor_renderer.DestroyTimer(self._timer_id)
self._timer_id = None
self._recording = False
self.vview.recorder.End()
# observer on 2D chart
def iter_plot_observer(self, obj, event):
if self.vview.iter_plot.GetSelection() is not None:
# just one selection at the moment!
if self.vview.iter_plot.GetSelection().GetMaxId() >= 0:
self._time = self._times[
self.vview.iter_plot.GetSelection().GetValue(0)]
# -> recompute index ...
self.update()
def prec_plot_observer(self, obj, event):
if self.vview.prec_plot.GetSelection() is not None:
# just one selection at the moment!
if self.vview.prec_plot.GetSelection().GetMaxId() >= 0:
self._time = self._times[
self.vview.prec_plot.GetSelection().GetValue(0)]
# -> recompute index ...
self.update()
def recorder_observer(self, obj, event):
if self._recording:
if self.vview.opts.advance_by_time is not None:
self._time += self.vview.opts.advance_by_time
self.vview.slwsc.SetEnabled(False) # Scale slider
self.vview.xslwsc.SetEnabled(False) # Time scale slider
# slider_widget.SetEnabled(False) # Time slider (TODO video options)
# widget.SetEnabled(False) # Axis widget
self.update()
self.vview.image_maker.Modified()
self.vview.recorder.Write()
if self.vview.opts.advance_by_time is not None:
self.vview.slwsc.SetEnabled(True)
self.vview.xslwsc.SetEnabled(True)
# slider_widget.SetEnabled(True)
# widget.SetEnabled(True) # Axis widget
# End video if done
if self._time >= max(self._times):
self.toggle_recording(False)
class CellConnector(vtk.vtkProgrammableFilter):
"""
Add Arrays to Cells
"""
def __init__(self, instance, data_names, data_sizes):
vtk.vtkProgrammableFilter.__init__(self)
self._instance = instance
self._data_names = data_names
self._data_sizes = data_sizes
self.SetExecuteMethod(self.method)
self._datas = [numpy.zeros(s) for s in data_sizes]
self._vtk_datas = [None]*len(data_sizes)
self._index = list(enumerate(data_names))
for i, data_name in self._index:
self._vtk_datas[i] = vtk.vtkFloatArray()
self._vtk_datas[i].SetName(data_name)
self._vtk_datas[i].SetNumberOfComponents(data_sizes[i])
def method(self):
input = self.GetInput()
output = self.GetOutput()
output.ShallowCopy(input)
ncells = output.GetNumberOfCells()
for i, data_name in self._index:
self._vtk_datas[i].SetNumberOfTuples(ncells)
if output.GetCellData().GetArray(data_name) is None:
output.GetCellData().AddArray(self._vtk_datas[i])
data = self._datas[i]
data_t = data[0:self._data_sizes[i]]
for c in range(ncells):
output.GetCellData().GetArray(data_name).SetTuple(c, data_t)
def makeConvexSourceClass():
class UnstructuredGridSource(vtk.vtkProgrammableSource):
def GetOutputPort(self):
# 3: UnstructuredGridOutput for vtkProgrammableSource
return vtk.vtkProgrammableSource.GetOutputPort(self, 3)
class ConvexSource(UnstructuredGridSource):
def __init__(self, convex, points):
self._convex = convex
self._points = points
self.SetExecuteMethod(self.method)
def method(self):
output = self.GetUnstructuredGridOutput()
output.Allocate(1, 1)
output.InsertNextCell(
self._convex.GetCellType(), self._convex.GetPointIds())
output.SetPoints(self._points)
return ConvexSource
# attempt for a vtk reader
# only half the way, the reading part is ok but the output is only used
# in vview and export from python members
class IOReader(VTKPythonAlgorithmBase):
def __init__(self):
VTKPythonAlgorithmBase.__init__(self,
nInputPorts=0,
nOutputPorts=1,
outputType='vtkPolyData')
self._io = None
self._with_contact_forces = False
self.cf_data = None
self.time = 0
self.timestep = 0
self.points = vtk.vtkPoints()
def RequestInformation(self, request, inInfo, outInfo):
info = outInfo.GetInformationObject(0)
info.Set(vtk.vtkStreamingDemandDrivenPipeline.TIME_STEPS(),
self._times,
len(self._times))
info.Set(vtk.vtkStreamingDemandDrivenPipeline.TIME_RANGE(),
[self._times[0], self._times[-1]], 2)
return 1
def RequestData(self, request, inInfo, outInfo):
info = outInfo.GetInformationObject(0)
output = vtk.vtkPolyData.GetData(outInfo)
output.SetPoints(self.points)
# The time step requested
t = info.Get(vtk.vtkStreamingDemandDrivenPipeline.UPDATE_TIME_STEP())
id_t = max(0, numpy.searchsorted(self._times, t, side='right') - 1)
if id_t < len(self._indices)-1:
self._id_t_m = list(range(self._indices[id_t],
self._indices[id_t+1]))
else:
self._id_t_m = [self._indices[id_t]]
self._time = self._times[id_t]
self._index = id_t
self.pos_data = self._idpos_data[self._id_t_m, :]
self.velo_data = self._ivelo_data[self._id_t_m, :]
static_id_t = max(0, numpy.searchsorted(self._static_times, t, side='right') - 1)
if static_id_t < len(self._static_indices)-1:
self._static_id_t_m = list(range(self._static_indices[static_id_t],
self._static_indices[static_id_t+1]))
else:
self._static_id_t_m = [self._static_indices[static_id_t]]
self.pos_static_data = self._ispos_data[self._static_id_t_m, :]
vtk_pos_data = dsa.numpyTovtkDataArray(self.pos_data)
vtk_pos_data.SetName('pos_data')
vtk_velo_data = dsa.numpyTovtkDataArray(self.velo_data)
vtk_velo_data.SetName('velo_data')
vtk_points_data = dsa.numpyTovtkDataArray(self.pos_data[:, 2:5])
self.points.SetData(vtk_points_data)
output.GetPointData().AddArray(vtk_velo_data)
try:
if self._with_contact_forces:
ncfindices = len(self._cf_indices)
id_t_cf = min(numpy.searchsorted(self._cf_times, t,
side='right'),
ncfindices-1)
# Check the duration between t and last impact.
# If it is superior to current time step, we consider there
# is no contact (rebound).
# The current time step is the max between slider timestep
# and simulation timestep
ctimestep = max(self.timestep, self._avg_timestep)
if (id_t_cf > 0 and abs(t-self._cf_times[id_t_cf-1])
<= ctimestep):
if id_t_cf < ncfindices-1:
self._id_t_m_cf = list(range(self._cf_indices[id_t_cf-1],
self._cf_indices[id_t_cf]))
self.cf_data = self._icf_data[self._id_t_m_cf, :]
else:
self.cf_data = self._icf_data[self._cf_indices[
id_t_cf]:, :]
self._cf_time = self._cf_times[id_t_cf]
vtk_cf_data = dsa.numpyTovtkDataArray(self.cf_data)
vtk_cf_data.SetName('cf_data')
output.GetFieldData().AddArray(vtk_cf_data)
else:
# there is no contact forces at this time
self.cf_data = None
vtk_cf_data = dsa.numpyTovtkDataArray(numpy.array([]))
vtk_cf_data.SetName('cf_data')
output.GetFieldData().AddArray(vtk_cf_data)
if self.cf_data is not None:
self.contact = True
data = self.cf_data
for mu in self._mu_coefs:
imu = numpy.where(
abs(data[:, 1] - mu) < 1e-15)[0]
#dom_imu = None
#dom_imu = numpy.where(
# self._dom_data[:,-1] == data[id_f[imu],-1]
#)[0]
if len(imu) > 0:
self.cpa_at_time[mu] = data[
imu, 2:5]
self.cpb_at_time[mu] = data[
imu, 5:8]
self.cn_at_time[mu] = - data[
imu, 8:11]
self.cf_at_time[mu] = data[
imu, 11:14]
if data[imu, :].shape[1] > 26:
self.ids_at_time[mu] = data[
imu, 23:26].astype(int)
else:
self.ids_at_time[mu] = None
else:
# for mu in self._mu_coefs:
# self.cpa_at_time[mu] = [[nan, nan, nan]]
# self.cpb_at_time[mu] = [[nan, nan, nan]]
# self.cn_at_time[mu] = [[nan, nan, nan]]
# self.cf_at_time[mu] = [[nan, nan, nan]]
# self.ids_at_time[mu] = None
pass
if self._with_contact_forces:
for mu in self._mu_coefs:
self.cpa[mu] = numpy_support.numpy_to_vtk(
self.cpa_at_time[mu])
self.cpa[mu].SetName('contact_positions_a')
self.cpb[mu] = numpy_support.numpy_to_vtk(
self.cpb_at_time[mu])
self.cpb[mu].SetName('contact_positions_b')
self.cn[mu] = numpy_support.numpy_to_vtk(
self.cn_at_time[mu])
self.cn[mu].SetName('contact_normals')
self.cf[mu] = numpy_support.numpy_to_vtk(
self.cf_at_time[mu])
self.cf[mu].SetName('contact_forces')
# field info for vview (should go in point data)
self._contact_field[mu].AddArray(self.cpa[mu])
self._contact_field[mu].AddArray(self.cpb[mu])
self._contact_field[mu].AddArray(self.cn[mu])
self._contact_field[mu].AddArray(self.cf[mu])
# contact points
self._points[mu].SetData(self.cpa[mu])
self._output[mu].GetPointData().AddArray(self.cpb[mu])
self._output[mu].GetPointData().AddArray(self.cn[mu])
self._output[mu].GetPointData().AddArray(self.cf[mu])
if self.ids_at_time[mu] is not None:
self.ids[mu] = numpy_support.numpy_to_vtk(
self.ids_at_time[mu])
self.ids[mu].SetName('ids')
self._contact_field[mu].AddArray(self.ids[mu])
self._output[mu].GetPointData().AddArray(self.ids[mu])
dsa_ids = numpy.unique(self.ids_at_time[mu][:, 1])
dsb_ids = numpy.unique(self.ids_at_time[mu][:, 2])
_i, _i, dsa_pos_ids = numpy.intersect1d(
self.pos_data[:, 1],
dsa_ids, return_indices=True)
_i, _i, dsb_pos_ids = numpy.intersect1d(
self.pos_data[:, 1],
dsb_ids, return_indices=True)
# objects a & b translations
obj_pos_a = self.pos_data[dsa_pos_ids, 2:5]
obj_pos_b = self.pos_data[dsb_pos_ids, 2:5]
self._all_objs_pos[mu] = numpy.vstack((obj_pos_a,
obj_pos_b))
self._all_objs_pos_vtk[mu] = numpy_support.numpy_to_vtk(
self._all_objs_pos[mu])
self._objs_points[mu].SetData(self._all_objs_pos_vtk[mu])
self._objs_output[mu].GetPointData().AddArray(self.cn[mu])
self._objs_output[mu].GetPointData().AddArray(self.cf[mu])
#if dom_imu is not None:
# self.dom_at_time[mu] = self._dom_data[
# dom_imu, 1]
# self.dom[mu] = numpy_support.numpy_to_vtk(
# self.dom_at_time[mu])
# self.dom[mu].SetName('domains')
# self._contact_field[mu].AddArray(self.dom[mu])
except Exception:
traceback.print_exc()
return 1
def SetIO(self, io):
self._io = io
self._ispos_data = self._io.static_data()
self._idpos_data = self._io.dynamic_data()
try:
self._idom_data = self._io.domains_data()
except ValueError:
self._idom_data = None
self._icf_data = self._io.contact_forces_data()
self._isolv_data = self._io.solver_data()
self._ivelo_data = self._io.velocities_data()
self._spos_data = self._ispos_data[:, :]
# all times as hdf5 slice
self._raw_times = self._idpos_data[:, 0]
# build times steps
self._times, self._indices = numpy.unique(self._raw_times,
return_index=True)
# all times as hdf5 slice for static objects
self._static_raw_times = self._ispos_data[:, 0]
# build times steps for static objects
self._static_times, self._static_indices = numpy.unique(self._static_raw_times,
return_index=True)
dcf = self._times[1:]-self._times[:-1]
self._avg_timestep = numpy.mean(dcf)
self._min_timestep = numpy.min(dcf)
# self._times.sort()
# self._indices = ?
# we assume times must be sorted
# assert all(self._times[i] <= self._times[i+1]
# for i in range(len(self._times)-1))
# if self._with_contact_forces:
# assert all(self._cf_times[i] <= self._cf_times[i+1]
# for i in range(len(self._cf_times)-1))
self.Modified()
return 1
def SetTime(self, time):
self.GetOutputInformation(0).Set(
vtk.vtkStreamingDemandDrivenPipeline.UPDATE_TIME_STEP(),
time)
self.timestep = abs(self.time-time)
self.time = time
# with a True pipeline: self.Modified()
# but as the consumers (VView class, export function) are
# not (yet) vtk filters, the Update is needed here
self.Update()
# contact forces provider
def ContactForcesOn(self):
self._cf_raw_times = self._icf_data[:, 0]
self._cf_times, self._cf_indices = numpy.unique(self._cf_raw_times,
return_index=True)
self._mu_coefs = numpy.unique(self._icf_data[:, 1],
return_index=False)
self.cpa_at_time = dict()
self.cpa = dict()
self.cpb_at_time = dict()
self.cpb = dict()
self.cf_at_time = dict()
self.cf = dict()
self.cn_at_time = dict()
self.cn = dict()
self.ids_at_time = dict()
self.ids = dict()
self.dom_at_time = [dict(), None][self._idom_data is None]
self.dom = dict()
self._all_objs_pos = dict()
self._all_objs_pos_vtk = dict()
self._points = dict()
self._contact_field = dict()
self._output = dict()
self._objs_points = dict()
self._objs_output = dict()
for mu in self._mu_coefs:
# the contact points
self._points[mu] = vtk.vtkPoints()
self._contact_field[mu] = vtk.vtkPointData()
self._output[mu] = vtk.vtkPolyData()
self._output[mu].SetPoints(self._points[mu])
self._output[mu].SetFieldData(self._contact_field[mu])
# the objects translations
self._objs_points[mu] = vtk.vtkPoints()
self._objs_output[mu] = vtk.vtkPolyData()
self._objs_output[mu].SetPoints(self._objs_points[mu])
self._with_contact_forces = True
self.Update()
def ContactForcesOff(self):
self._with_contact_forces = False
self.Update()
def ExportOn(self):
self._export = True
def ExportOff(self):
self._export = False
# Read file and open VTK interaction window
class VView(object):
def __init__(self, io, options, config=None):
self.opts = options
self.config = [config,VViewConfig()][config is None]
self.gui_initialized = False
self.io = io
self.refs = []
self.refs_attrs = []
self.shape = dict()
self.pos = dict()
self.mass = dict()
self.inertia = dict()
self.contact_posa = dict()
self.contact_posb = dict()
self.contact_pos_force = dict()
self.contact_pos_norm = dict()
self.cone = dict()
self.cone_glyph = dict()
self.cmapper = dict()
self.cLUT = dict()
self.cactor = dict()
self.arrow = dict()
self.cylinder = dict()
self.sphere = dict()
self.arrow_glyph = dict()
self.gmapper = dict()
self.gactor = dict()
self.ctransform = dict()
self.cylinder_glyph = dict()
self.clmapper = dict()
self.sphere_glypha = dict()
self.sphere_glyphb = dict()
self.smappera = dict()
self.smapperb = dict()
self.sactora = dict()
self.sactorb = dict()
self.clactor = dict()
self.cell_connectors = dict()
self.times_of_birth = dict()
self.times_of_death = dict()
self.min_time = self.opts.min_time
self.max_time = self.opts.max_time
self.transforms = dict()
self.transformers = dict()
self.offsets = dict()
self.io_reader = IOReader()
self.io_reader.SetIO(io=self.io)
if self.opts.cf_disable:
self.io_reader.ContactForcesOff()
else:
self.io_reader.ContactForcesOn()
if self.opts.export:
self.io_reader.ExportOn()
else:
self.io_reader.ExportOff()
def print_verbose(self, *args, **kwargs):
if self.opts.verbose:
print('[io.vview]', *args, **kwargs)
def print_verbose_level(self, level, *args, **kwargs):
if level <= self.opts.verbose:
print('[io.vview]', *args, **kwargs)
def reload(self):
if self.opts.cf_disable:
self.io_reader.ContactForcesOff()
else:
self.io_reader._time = min(times[:])
for mu in self.io_reader._mu_coefs:
self.contact_posa[mu].SetInputData(self.io_reader._output[mu])
self.contact_posa[mu].Update()
self.contact_posb[mu].SetInputData(self.io_reader._output[mu])
self.contact_posb[mu].Update()
self.contact_pos_force[mu].Update()
self.contact_pos_norm[mu].Update()
self.min_time = self.io_reader._times[0]
self.max_time = self.io_reader._times[-1]
self.set_dynamic_actors_visibility(self.time0)
def init_contact_pos(self, mu):
self.print_verbose_level(2,'contact positions', mu)
self.contact_posa[mu] = vtk.vtkDataObjectToDataSetFilter()
self.contact_posb[mu] = vtk.vtkDataObjectToDataSetFilter()
add_compatiblity_methods(self.contact_posa[mu])
add_compatiblity_methods(self.contact_posb[mu])
self.contact_pos_force[mu] = vtk.vtkFieldDataToAttributeDataFilter()
self.contact_pos_norm[mu] = vtk.vtkFieldDataToAttributeDataFilter()
self.contact_posa[mu].SetDataSetTypeToPolyData()
self.contact_posa[mu].SetPointComponent(0, "contact_positions_a", 0)
self.contact_posa[mu].SetPointComponent(1, "contact_positions_a", 1)
self.contact_posa[mu].SetPointComponent(2, "contact_positions_a", 2)
self.contact_posb[mu].SetDataSetTypeToPolyData()
self.contact_posb[mu].SetPointComponent(0, "contact_positions_b", 0)
self.contact_posb[mu].SetPointComponent(1, "contact_positions_b", 1)
self.contact_posb[mu].SetPointComponent(2, "contact_positions_b", 2)
self.contact_pos_force[mu].SetInputConnection(
self.contact_posa[mu].GetOutputPort())
self.contact_pos_force[mu].SetInputFieldToDataObjectField()
self.contact_pos_force[mu].SetOutputAttributeDataToPointData()
self.contact_pos_force[mu].SetVectorComponent(0, "contact_forces", 0)
self.contact_pos_force[mu].SetVectorComponent(1, "contact_forces", 1)
self.contact_pos_force[mu].SetVectorComponent(2, "contact_forces", 2)
self.contact_pos_norm[mu].SetInputConnection(
self.contact_posa[mu].GetOutputPort())
self.contact_pos_norm[mu].SetInputFieldToDataObjectField()
self.contact_pos_norm[mu].SetOutputAttributeDataToPointData()
self.contact_pos_norm[mu].SetVectorComponent(0, "contact_normals", 0)
self.contact_pos_norm[mu].SetVectorComponent(1, "contact_normals", 1)
self.contact_pos_norm[mu].SetVectorComponent(2, "contact_normals", 2)
# if self.cf_prov.dom_at_time is not None:
# self.contact_pos_norm[mu].SetScalarComponent(0, "domains", 0)
def init_cf_sources(self, mu, transform):
self.print_verbose_level(2,'contact sources', mu)
self.cf_collector.AddInputData(self.io_reader._output[mu])
self.cf_collector.AddInputData(self.io_reader._objs_output[mu])
self.contact_posa[mu].SetInputData(self.io_reader._output[mu])
self.contact_posa[mu].Update()
self.contact_posb[mu].SetInputData(self.io_reader._output[mu])
self.contact_posb[mu].Update()
self.contact_pos_force[mu].Update()
self.contact_pos_norm[mu].Update()
self.cone[mu] = vtk.vtkConeSource()
self.cone[mu].SetResolution(40)
self.cone[mu].SetRadius(mu) # one coef!!
self.cone_glyph[mu] = vtk.vtkGlyph3D()
self.cone_glyph[mu].SetSourceTransform(transform)
self.cone_glyph[mu].SetInputConnection(self.contact_pos_norm[mu].GetOutputPort())
self.cone_glyph[mu].SetSourceConnection(self.cone[mu].GetOutputPort())
self.cone_glyph[mu]._scale_fact = self.opts.normalcone_ratio
self.cone_glyph[mu].SetScaleFactor(
self.cone_glyph[mu]._scale_fact *self.opts.cf_scale_factor)
self.cone_glyph[mu].SetVectorModeToUseVector()
self.cone_glyph[mu].SetInputArrayToProcess(1, 0, 0, 0, 'contact_normals')
self.cone_glyph[mu].OrientOn()
# Don't allow scalar to affect size of glyph
self.cone_glyph[mu].SetScaleModeToDataScalingOff()
# Allow scalar to affect color of glyph
self.cone_glyph[mu].SetColorModeToColorByScalar()
self.cmapper[mu] = vtk.vtkPolyDataMapper()
if not self.opts.imr:
self.cmapper[mu].ImmediateModeRenderingOff()
self.cmapper[mu].SetInputConnection(self.cone_glyph[mu].GetOutputPort())
# Random color map, up to 256 domains
self.cLUT[mu] = vtk.vtkLookupTable()
self.cLUT[mu].SetNumberOfColors(256)
self.cLUT[mu].Build()
for i in range(256):
self.cLUT[mu].SetTableValue(i, *random_color())
self.cLUT[mu].SetTableRange(0, 255)
# By default don't allow scalars to have an effect
self.cmapper[mu].ScalarVisibilityOff()
# If domain information is available, we turn on the color
# table and turn on scalars
if self.io_reader.dom_at_time is not None:
self.cmapper[mu].SetLookupTable(self.cLUT[mu])
self.cmapper[mu].SetColorModeToMapScalars()
self.cmapper[mu].SetScalarModeToUsePointData()
self.cmapper[mu].SetScalarRange(0,255)
self.cmapper[mu].ScalarVisibilityOn()
self.cactor[mu] = vtk.vtkActor()
self.cactor[mu].GetProperty().SetOpacity(self.config.get('contact_opacity', 0.4))
self.cactor[mu].GetProperty().SetColor(0, 0, 1)
self.cactor[mu].SetMapper(self.cmapper[mu])
self.arrow[mu] = vtk.vtkArrowSource()
self.arrow[mu].SetTipResolution(40)
self.arrow[mu].SetShaftResolution(40)
self.cylinder[mu] = vtk.vtkCylinderSource()
self.cylinder[mu].SetRadius(.01)
self.cylinder[mu].SetHeight(1)
self.sphere[mu] = vtk.vtkSphereSource()
# 1. scale = (scalar value of that particular data index);
# 2. denominator = Range[1] - Range[0];
# 3. scale = (scale < Range[0] ? Range[0] : (scale > Range[1] ? Range[1] : scale));
# 4. scale = (scale - Range[0]) / denominator;
# 5. scale *= scaleFactor;
self.arrow_glyph[mu] = vtk.vtkGlyph3D()
self.arrow_glyph[mu].SetInputConnection(
self.contact_pos_force[mu].GetOutputPort())
self.arrow_glyph[mu].SetSourceConnection(self.arrow[mu].GetOutputPort())
self.arrow_glyph[mu].ScalingOn()
self.arrow_glyph[mu].SetScaleModeToScaleByVector()
self.arrow_glyph[mu].SetRange(0, .01)
self.arrow_glyph[mu].ClampingOn()
self.arrow_glyph[mu]._scale_fact = 5
self.arrow_glyph[mu].SetScaleFactor(
self.arrow_glyph[mu]._scale_fact * self.opts.cf_scale_factor)
self.arrow_glyph[mu].SetVectorModeToUseVector()
self.arrow_glyph[mu].SetInputArrayToProcess(1, 0, 0, 0, 'contact_forces')
self.arrow_glyph[mu].SetInputArrayToProcess(3, 0, 0, 0, 'contact_forces')
self.arrow_glyph[mu].OrientOn()
self.gmapper[mu] = vtk.vtkPolyDataMapper()
if not self.opts.imr:
self.gmapper[mu].ImmediateModeRenderingOff()
self.gmapper[mu].SetInputConnection(self.arrow_glyph[mu].GetOutputPort())
self.gmapper[mu].SetScalarModeToUsePointFieldData()
self.gmapper[mu].SetColorModeToMapScalars()
self.gmapper[mu].ScalarVisibilityOn()
self.gmapper[mu].SelectColorArray('contact_forces')
# gmapper.SetScalarRange(contact_pos_force.GetOutput().GetPointData().GetArray('contact_forces').GetRange())
self.gactor[mu] = vtk.vtkActor()
self.gactor[mu].SetMapper(self.gmapper[mu])
self.ctransform[mu] = vtk.vtkTransform()
self.ctransform[mu].Translate(-0.5, 0, 0)
self.ctransform[mu].RotateWXYZ(90, 0, 0, 1)
self.cylinder_glyph[mu] = vtk.vtkGlyph3D()
self.cylinder_glyph[mu].SetSourceTransform(self.ctransform[mu])
self.cylinder_glyph[mu].SetInputConnection(
self.contact_pos_norm[mu].GetOutputPort())
self.cylinder_glyph[mu].SetSourceConnection(self.cylinder[mu].GetOutputPort())
self.cylinder_glyph[mu].SetVectorModeToUseVector()
self.cylinder_glyph[mu].SetInputArrayToProcess(1, 0, 0, 0, 'contact_normals')
self.cylinder_glyph[mu].OrientOn()
self.cylinder_glyph[mu]._scale_fact = self.opts.normalcone_ratio
self.cylinder_glyph[mu].SetScaleFactor(
self.cylinder_glyph[mu]._scale_fact * self.opts.cf_scale_factor)
self.clmapper[mu] = vtk.vtkPolyDataMapper()
if not self.opts.imr:
self.clmapper[mu].ImmediateModeRenderingOff()
self.clmapper[mu].SetInputConnection(self.cylinder_glyph[mu].GetOutputPort())
self.sphere_glypha[mu] = vtk.vtkGlyph3D()
self.sphere_glypha[mu].SetInputConnection(self.contact_posa[mu].GetOutputPort())
self.sphere_glypha[mu].SetSourceConnection(self.sphere[mu].GetOutputPort())
self.sphere_glypha[mu].ScalingOn()
# self.sphere_glypha[mu].SetScaleModeToScaleByVector()
# self.sphere_glypha[mu].SetRange(-0.5, 2)
# self.sphere_glypha[mu].ClampingOn()
self.sphere_glypha[mu]._scale_fact = .1 * self.opts.normalcone_ratio
self.sphere_glypha[mu].SetScaleFactor(
self.sphere_glypha[mu]._scale_fact * self.opts.cf_scale_factor)
# self.sphere_glypha[mu].SetVectorModeToUseVector()
self.sphere_glyphb[mu] = vtk.vtkGlyph3D()
self.sphere_glyphb[mu].SetInputConnection(self.contact_posb[mu].GetOutputPort())
self.sphere_glyphb[mu].SetSourceConnection(self.sphere[mu].GetOutputPort())
self.sphere_glyphb[mu].ScalingOn()
# self.sphere_glyphb[mu].SetScaleModeToScaleByVector()
# self.sphere_glyphb[mu].SetRange(-0.5, 2)
# self.sphere_glyphb[mu].ClampingOn()
self.sphere_glyphb[mu]._scale_fact = .1 * self.opts.normalcone_ratio
self.sphere_glyphb[mu].SetScaleFactor(
self.sphere_glyphb[mu]._scale_fact * self.opts.cf_scale_factor)
# self.sphere_glyphb[mu].SetVectorModeToUseVector()
# self.sphere_glyphb[mu].SetInputArrayToProcess(1, 0, 0, 0, 'contact_normals')
# self.sphere_glyph.OrientOn()
self.smappera[mu] = vtk.vtkPolyDataMapper()
if not self.opts.imr:
self.smappera[mu].ImmediateModeRenderingOff()
self.smappera[mu].SetInputConnection(self.sphere_glypha[mu].GetOutputPort())
self.smapperb[mu] = vtk.vtkPolyDataMapper()
if not self.opts.imr:
self.smapperb[mu].ImmediateModeRenderingOff()
self.smapperb[mu].SetInputConnection(self.sphere_glyphb[mu].GetOutputPort())
# self.cmapper.SetScalarModeToUsePointFieldData()
# self.cmapper.SetColorModeToMapScalars()
# self.cmapper.ScalarVisibilityOn()
# self.cmapper.SelectColorArray('contact_normals')
# self.gmapper.SetScalarRange(contact_pos_force.GetOutput().GetPointData().GetArray('contact_forces').GetRange())
self.clactor[mu] = vtk.vtkActor()
# cactor.GetProperty().SetOpacity(0.4)
self.clactor[mu].GetProperty().SetColor(1, 0, 0)
self.clactor[mu].SetMapper(self.clmapper[mu])
self.sactora[mu] = vtk.vtkActor()
self.sactora[mu].GetProperty().SetColor(1, 0, 0)
self.sactora[mu].SetMapper(self.smappera[mu])
self.sactorb[mu] = vtk.vtkActor()
self.sactorb[mu].GetProperty().SetColor(0, 1, 0)
self.sactorb[mu].SetMapper(self.smapperb[mu])
def init_shape(self, shape_name):
self.print_verbose_level(2,'init_shape', shape_name)
shape_type = (self.io.shapes()[shape_name].attrs['type'])
try:
# work-around h5py unicode bug
# https://github.com/h5py/h5py/issues/379
shape_type = shape_type.decode('utf-8')
except AttributeError:
pass
scale = None
if 'scale' in self.io.shapes()[shape_name].attrs:
scale = self.io.shapes()[shape_name].attrs['scale']
ConvexSource = makeConvexSourceClass()
if shape_type in ['vtp', 'stl']:
with io_tmpfile() as tmpf:
## fix compatibility with h5py version: to be removed in the future
if (h5py.version.version_tuple.major >=3 ):
tmpf[0].write((self.io.shapes()[shape_name][:][0]).decode('utf-8'))
else:
tmpf[0].write(str(self.io.shapes()[shape_name][:][0]))
tmpf[0].flush()
reader = self.vtk_reader[shape_type]()
reader.SetFileName(tmpf[1])
reader.Update()
self.readers[shape_name] = reader
# a try for smooth rendering but it does not work here
normals = vtk.vtkPolyDataNormals()
normals.SetInputConnection(reader.GetOutputPort())
normals.SetFeatureAngle(60.0)
mapper = vtk.vtkDataSetMapper()
add_compatiblity_methods(mapper)
mapper.SetInputConnection(normals.GetOutputPort())
mapper.ScalarVisibilityOff()
# delayed (see the one in brep)
# note: "lambda : mapper" fails (dynamic scope)
# and (x for x in [mapper]) is ok.
self.mappers[shape_name] = (x for x in [mapper])
elif shape_type in ['brep']:
# try to find an associated shape
if 'associated_shape' in self.io.shapes()[shape_name].attrs:
associated_shape = \
self.io.shapes()[shape_name].\
attrs['associated_shape']
# delayed
self.mappers[shape_name] = (x for x in
[mappers[associated_shape]()])
else:
if 'brep' in self.io.shapes()[shape_name].attrs:
brep = self.io.shapes()[shape_name].attrs['brep']
else:
brep = shape_name
reader = brep_reader(str(self.io.shapes()[brep][:][0]),
self.io.shapes()[brep].attrs['occ_indx'])
self.readers[shape_name] = reader
mapper = vtk.vtkDataSetMapper()
add_compatiblity_methods(mapper)
mapper.SetInputConnection(reader.GetOutputPort())
self.mappers[shape_name] = (x for x in [mapper])
elif shape_type in ['stp', 'step', 'igs', 'iges']:
# try to find an associated shape
if 'associated_shape' in self.io.shapes()[shape_name].attrs:
associated_shape = \
self.io.shapes()[shape_name].\
attrs['associated_shape']
# delayed
self.mappers[shape_name] = (
x for x in [mappers[associated_shape]()])
elif shape_type in ['stp', 'step', 'igs', 'iges']:
with io_tmpfile(
debug=True,
suffix='.{0}'.format(shape_type),
contents=str(self.io.shapes()[shape_name][:][0])) as tmpf:
shape = occ_load_file(tmpf[1])
# whole shape
reader = topods_shape_reader(shape)
self.readers[shape_name] = reader
mapper = vtk.vtkDataSetMapper()
add_compatiblity_methods(mapper)
mapper.SetInputConnection(reader.GetOutputPort())
self.mappers[shape_name] = (x for x in [mapper])
# subparts
faces, edges = occ_topo_list(shape)
for i, f in enumerate(faces):
shape_indx = ('Face', shape_name, i)
reader = topods_shape_reader(f)
self.readers[shape_indx] = reader
mapper = vtk.vtkDataSetMapper()
add_compatiblity_methods(mapper)
mapper.SetInputConnection(reader.GetOutputPort())
self.mappers[shape_indx] = (x for x in [mapper])
for i, e in enumerate(edges):
shape_indx = ('Edge', shape_name, i)
reader = topods_shape_reader(e)
self.readers[shape_indx] = reader
mapper = vtk.vtkDataSetMapper()
add_compatiblity_methods(mapper)
mapper.SetInputConnection(reader.GetOutputPort())
self.mappers[shape_indx] = (x for x in [mapper])
elif shape_type == 'heightmap':
points = vtk.vtkPoints()
shape = self.io.shapes()[shape_name]
extents = list(shape.attrs['rect']) + [numpy.max(shape) - numpy.min(shape)]
# Data points are adjusted to center tangentially, but
# vertical position is left alone; i.e., supports
# non-zero-centered data. User must use contactor
# translation to compensate if desired, or simply adjust
# data itself to desired origin.
for x,d in enumerate(shape):
for y,v in enumerate(d):
points.InsertNextPoint(
float(x) / (shape.shape[0]-1) * extents[0] - extents[0]/2,
float(y) / (shape.shape[1]-1) * extents[1] - extents[1]/2,
v)
polydata = vtk.vtkPolyData()
polydata.SetPoints(points)
delaunay = vtk.vtkDelaunay2D()
delaunay.SetInputData(polydata)
delaunay.Update()
self.datasets[shape_name] = polydata
mapper = vtk.vtkPolyDataMapper()
if not self.opts.imr:
mapper.ImmediateModeRenderingOff()
mapper.SetInputConnection(delaunay.GetOutputPort())
add_compatiblity_methods(mapper)
self.mappers[shape_name] = None
self.mappers[shape_name] = (x for x in [mapper])
elif shape_type == 'convex':
# a convex shape
points = vtk.vtkPoints()
convex = vtk.vtkConvexPointSet()
data = self.io.shapes()[shape_name][:]
if self.io.dimension() == 3:
convex.GetPointIds().SetNumberOfIds(data.shape[0])
for id_, vertice in enumerate(data):
points.InsertNextPoint(vertice[0], vertice[1], vertice[2])
convex.GetPointIds().SetId(id_, id_)
elif self.io.dimension() == 2:
number_of_vertices = data.shape[0]
convex.GetPointIds().SetNumberOfIds(data.shape[0]*2)
for id_, vertice in enumerate(data):
points.InsertNextPoint(vertice[0], vertice[1], - self.opts.depth_2d/2.0)
convex.GetPointIds().SetId(id_, id_)
points.InsertNextPoint(vertice[0], vertice[1], + self.opts.depth_2d/2.0)
convex.GetPointIds().SetId(id_+number_of_vertices, id_+number_of_vertices)
source = ConvexSource(convex, points)
self.readers[shape_name] = source
# not a source!
self.datasets[shape_name] = source.GetUnstructuredGridOutput()
mapper = vtk.vtkDataSetMapper()
add_compatiblity_methods(mapper)
mapper.SetInputData(source.GetUnstructuredGridOutput())
self.mappers[shape_name] = (x for x in [mapper])
else:
assert shape_type == 'primitive'
primitive = self.io.shapes()[shape_name].attrs['primitive']
attrs = self.io.shapes()[shape_name][:][0]
if primitive == 'Sphere':
source = vtk.vtkSphereSource()
source.SetRadius(attrs[0])
source.SetThetaResolution(15)
source.SetPhiResolution(15)
elif primitive == 'Cone':
source = vtk.vtkConeSource()
source.SetRadius(attrs[0])
source.SetHeight(attrs[1])
source.SetResolution(15)
source.SetDirection(0, 1, 0) # needed
elif primitive == 'Cylinder':
source = vtk.vtkCylinderSource()
source.SetResolution(15)
source.SetRadius(attrs[0])
source.SetHeight(attrs[1])
# source.SetDirection(0,1,0)
elif primitive == 'Box':
source = vtk.vtkCubeSource()
source.SetXLength(attrs[0])
source.SetYLength(attrs[1])
source.SetZLength(attrs[2])
elif primitive == 'Capsule':
sphere1 = vtk.vtkSphereSource()
sphere1.SetRadius(attrs[0])
sphere1.SetCenter(0, attrs[1] / 2, 0)
sphere1.SetThetaResolution(15)
sphere1.SetPhiResolution(15)
sphere1.Update()
sphere2 = vtk.vtkSphereSource()
sphere2.SetRadius(attrs[0])
sphere2.SetCenter(0, -attrs[1] / 2, 0)
sphere2.SetThetaResolution(15)
sphere2.SetPhiResolution(15)
sphere2.Update()
cylinder = vtk.vtkCylinderSource()
cylinder.SetRadius(attrs[0])
cylinder.SetHeight(attrs[1])
cylinder.SetResolution(15)
cylinder.Update()
data = vtk.vtkMultiBlockDataSet()
data.SetNumberOfBlocks(3)
data.SetBlock(0, sphere1.GetOutput())
data.SetBlock(1, sphere2.GetOutput())
data.SetBlock(2, cylinder.GetOutput())
source = vtk.vtkMultiBlockDataGroupFilter()
add_compatiblity_methods(source)
source.AddInputData(data)
elif primitive == 'Disk':
source = vtk.vtkCylinderSource()
source.SetResolution(100)
source.SetRadius(attrs[0])
source.SetHeight(self.opts.depth_2d)
elif primitive == 'Box2d':
source = vtk.vtkCubeSource()
source.SetXLength(attrs[0])
source.SetYLength(attrs[1])
source.SetZLength(self.opts.depth_2d)
self.readers[shape_name] = source
mapper = vtk.vtkCompositePolyDataMapper()
if not self.opts.imr:
mapper.ImmediateModeRenderingOff()
mapper.SetInputConnection(source.GetOutputPort())
self.mappers[shape_name] = (x for x in [mapper])
if self.opts.with_edges:
mapper_edge = vtk.vtkCompositePolyDataMapper()
if not self.opts.imr:
mapper_edge.ImmediateModeRenderingOff()
mapper_edge.SetInputConnection(source.GetOutputPort())
self.mappers_edges[shape_name] = (y for y in [mapper_edge])
def init_shapes(self):
self.print_verbose_level(1,'init_shapes')
for shape_name in self.io.shapes():
self.init_shape(shape_name)
for shape_name in self.mappers.keys():
if shape_name not in self.unfrozen_mappers:
self.unfrozen_mappers[shape_name] = next(self.mappers[shape_name])
if self.opts.with_edges:
for shape_name in self.mappers_edges.keys():
if shape_name not in self.unfrozen_mappers_edges:
self.unfrozen_mappers_edges[shape_name] = next(self.mappers_edges[shape_name])
def init_contactor(self, contactor_instance_name, instance, instid):
self.print_verbose_level(2,'init_contactor', contactor_instance_name)
contactor = instance[contactor_instance_name]
contact_shape_indx = None
if 'shape_name' not in contactor.attrs:
print("Warning: old format: ctr.name must be ctr.shape_name for contact {0}".format(contactor_instance_name))
shape_attr_name='name'
else:
shape_attr_name='shape_name'
if 'group' in contactor.attrs:
collision_group = contactor.attrs['group']
else:
collision_group = -1
if 'type' in contactor.attrs:
contact_type = contactor.attrs['type']
contact_index = contactor.attrs['contact_index']
contact_shape_indx = (contact_type, contactor.attrs[shape_attr_name],
contact_index)
else:
contact_shape_indx = contactor.attrs[shape_attr_name]
try:
# work-around h5py unicode bug
# https://github.com/h5py/h5py/issues/379
contact_shape_indx = contact_shape_indx.decode('utf-8')
except AttributeError:
pass
if not (self.opts.global_filter or self.opts.export):
actor = vtk.vtkActor()
if self.opts.with_edges:
actor_edge = vtk.vtkActor()
if instance.attrs.get('mass', 0) > 0:
# objects that may move
self.dynamic_actors[instid].append((actor, contact_shape_indx,
collision_group))
actor.GetProperty().SetOpacity(
self.config.get('dynamic_opacity', 0.7))
actor.GetProperty().SetColor(
instance.attrs.get('color',
self.config.get('dynamic_bodies_color', [0.3,0.3,0.3])))
if self.opts.with_edges:
self.dynamic_actors[instid].append((actor_edge, contact_shape_indx,
collision_group))
actor_edge.GetProperty().SetOpacity(
self.config.get('dynamic_opacity', 1.0))
actor_edge.GetProperty().SetRepresentationToWireframe()
else:
# objects that are not supposed to move
self.static_actors[instid].append((actor, contact_shape_indx,
collision_group))
actor.GetProperty().SetOpacity(
self.config.get('static_opacity', 1.0))
actor.GetProperty().SetColor(
instance.attrs.get('color',
self.config.get('static_bodies_color', [0.5,0.5,0.5])))
if self.opts.with_random_color :
actor.GetProperty().SetColor(random_color())
if self.opts.with_edges:
actor_edge.GetProperty().SetColor(random_color())
actor.SetMapper(self.unfrozen_mappers[contact_shape_indx])
if self.opts.with_edges:
actor_edge.SetMapper(self.unfrozen_mappers_edges[contact_shape_indx])
if not (self.opts.global_filter or self.opts.export):
self.renderer.AddActor(actor)
if self.opts.with_edges:
self.renderer.AddActor(actor_edge)
transform = vtk.vtkTransform()
transformer = vtk.vtkTransformFilter()
if contact_shape_indx in self.readers:
transformer.SetInputConnection(
self.readers[contact_shape_indx].GetOutputPort())
else:
transformer.SetInputData(self.datasets[contact_shape_indx])
if isinstance(contact_shape_indx, tuple):
contact_shape_name = contact_shape_indx[1]
else:
contact_shape_name = contact_shape_indx
if 'scale' in self.io.shapes()[contact_shape_name].attrs:
scale = self.io.shapes()[contact_shape_name].attrs['scale']
scale_transform = vtk.vtkTransform()
scale_transform.Scale(scale, scale, scale)
scale_transform.SetInput(transform)
transformer.SetTransform(scale_transform)
if not (self.opts.global_filter or self.opts.export):
actor.SetUserTransform(scale_transform)
if self.opts.with_edges:
actor_edge.SetUserTransform(scale_transform)
else:
transformer.SetTransform(transform)
if not (self.opts.global_filter or self.opts.export):
actor.SetUserTransform(transform)
if self.opts.with_edges:
actor_edge.SetUserTransform(transform)
self.transformers[contact_shape_indx] = transformer
self.transforms[instid].append(transform)
if 'center_of_mass' in instance.attrs:
center_of_mass = instance.\
attrs['center_of_mass'].astype(float)
else:
center_of_mass = [0., 0., 0.]
offset_orientation= contactor.attrs['orientation'].astype(float)
# for disk, we change the offset since cylinder source are directed along the y axis by default
# since the disk shapemis invariant with respect to the rotation w.r.t to z-axis
# we propose to erase it.
try:
if self.io.shapes()[contact_shape_name].attrs['primitive'] == 'Disk':
offset_orientation = [math.cos(pi/4.0), math.sin(pi/4.0), 0., 0.]
except:
pass
self.offsets[instid].append(
(numpy.subtract(contactor.attrs['translation'].astype(float),
center_of_mass),
offset_orientation))
self.cell_connectors[instid] = CellConnector(
instid,
data_names=['instance', 'translation',
'velocity(linear)','velocity(angular)',
'kinetic_energy'],
data_sizes=[1, 3, 3, 3, 1])
self.cell_connectors[instid].SetInputConnection(
transformer.GetOutputPort())
self.objects_collector.AddInputConnection(
self.cell_connectors[instid].GetOutputPort())
self.cell_connectors[instid].Update()
def init_instance(self, instance_name):
self.print_verbose_level(2,'init_instance', instance_name)
instance = self.io.instances()[instance_name]
instid = int(instance.attrs['id'])
self.transforms[instid] = []
self.offsets[instid] = []
if 'time_of_birth' in instance.attrs:
self.times_of_birth[instid] = instance.attrs['time_of_birth']
if 'time_of_death' in instance.attrs:
self.times_of_death[instid] = instance.attrs['time_of_death']
if 'mass' in instance.attrs:
# a dynamic instance
self.mass[instid] = instance.attrs['mass']
if 'inertia' in instance.attrs:
inertia = instance.attrs['inertia']
if self.io.dimension() ==3 :
if len(inertia.shape) > 1 and inertia.shape[0] == inertia.shape[1] == 3:
self.inertia[instid] = inertia
else:
self.inertia[instid] = numpy.zeros((3, 3))
self.inertia[instid][0, 0] = inertia[0]
self.inertia[instid][1, 1] = inertia[1]
self.inertia[instid][2, 2] = inertia[2]
elif self.io.dimension() ==2 :
self.inertia[instid] = inertia
else:
if self.io.dimension() ==3 :
self.inertia[instid] = numpy.eye(3)
elif self.io.dimension() ==2 :
self.inertia[instid] = 1.0
else:
pass
if instid >= 0:
self.dynamic_actors[instid] = list()
else:
self.static_actors[instid] = list()
for contactor_instance_name in instance:
self.init_contactor(contactor_instance_name, instance, instid)
def init_instances(self):
self.print_verbose_level(1,'init_instances')
for instance_name in self.io.instances():
self.init_instance(instance_name)
# this sets the position for all transforms associated to an instance
def set_position_i(self, instance, q0, q1, q2, q3, q4, q5, q6):
# all objects are set to a nan position at startup,
# so they are invisibles
if (numpy.any(numpy.isnan([q0, q1, q2, q3, q4, q5, q6]))
or numpy.any(numpy.isinf([q0, q1, q2, q3, q4, q5, q6]))):
print('Bad position for object number', int(instance),' :', q0, q1, q2, q3, q4, q5, q6)
else:
q = Quaternion((q3, q4, q5, q6))
for transform, offset in zip(self.transforms[instance],
self.offsets[instance]):
p = q.rotate(offset[0])
r = q * Quaternion(offset[1])
transform.Identity()
transform.Translate(q0 + p[0], q1 + p[1], q2 + p[2])
axis, angle = r.axisAngle()
transform.RotateWXYZ(angle * 180. / pi,
axis[0],
axis[1],
axis[2])
def set_position(self, data):
self.print_verbose_level(2,'set_position')
self.set_position_v(data[:, 1],
data[:, 2],
data[:, 3],
data[:, 4],
data[:, 5],
data[:, 6],
data[:, 7],
data[:, 8])
def build_set_functions(self, cc=None):
if cc is None: cc = self.cell_connectors
# the numpy vectorization is ok on column vectors for each args
self.set_position_v = numpy.vectorize(self.set_position_i)
# here the numpy vectorization is used with a column vector and a
# scalar for the time arg
self.set_visibility_v = numpy.vectorize(self.set_dynamic_instance_visibility)
self.set_visibility_static_v = numpy.vectorize(self.set_static_instance_visibility)
def set_velocity(instance, v0, v1, v2, v3, v4, v5):
if instance in cc:
cc[instance]._datas[2][:] = [v0, v1, v2]
cc[instance]._datas[3][:] = [v3, v4, v5]
if self.io.dimension() == 3 :
cc[instance]._datas[4][:] = \
0.5*(self.mass[instance]*(v0*v0+v1*v1+v2*v2) +
numpy.dot([v3, v4, v5],
numpy.dot(self.inertia[instance],
[v3, v4, v5])))
elif self.io.dimension() == 2 :
kinetic_energy= 0.5*(self.mass[instance]*(v0*v0+v1*v1+v2*v2) +
self.inertia[instance]*(v5*v5))
#print('velo', instance, v0, v1, v2, v3, v4, v5)
#print('mass', self.mass[instance])
#print('inertia', self.inertia[instance])
#print('kinetic_energy', kinetic_energy)
cc[instance]._datas[4][:] = kinetic_energy
self.set_velocity_v = numpy.vectorize(set_velocity)
def set_translation(instance, x0, x1, x2 ):
if instance in cc:
cc[instance]._datas[1][:] = [x0, x1, x2]
self.set_translation_v = numpy.vectorize(set_translation)
def set_instance(instance):
if instance in cc:
cc[instance]._datas[0][:] = [instance]
self.set_instance_v = numpy.vectorize(set_instance)
# set visibility for all actors associated to a dynamic instance
def set_dynamic_instance_visibility(self, instance, time):
tob = self.times_of_birth.get(instance, -1)
tod = self.times_of_death.get(instance, infinity)
has_avatar = False
if self.opts.visible_mode=='avatars' or self.opts.visible_mode=='contactors':
for actor, index, group in self.dynamic_actors[instance]:
if group==-1:
has_avatar = True
break
if (tob <= time and tod >= time):
for actor, index, group in self.dynamic_actors[instance]:
if not has_avatar or visible_mode=='all':
actor.VisibilityOn()
elif visible_mode=='avatars' and group==-1 and has_avatar:
actor.VisibilityOn()
elif visible_mode=='contactors' and group!=-1 and has_avatar:
actor.VisibilityOn()
else:
actor.VisibilityOff()
else:
for actor, index, group in self.dynamic_actors[instance]:
actor.VisibilityOff()
# set visibility for all actors associated to a static instance
def set_static_instance_visibility(self, instance, time):
tob = self.times_of_birth.get(instance, -1)
tod = self.times_of_death.get(instance, infinity)
has_avatar = False
if self.opts.visible_mode=='avatars' or self.opts.visible_mode=='contactors':
for actor, index, group in self.static_actors[instance]:
if group==-1:
has_avatar = True
break
if (tob <= time and tod >= time):
for actor, index, group in self.static_actors[instance]:
if not has_avatar or visible_mode=='all':
actor.VisibilityOn()
elif visible_mode=='avatars' and group==-1 and has_avatar:
actor.VisibilityOn()
elif visible_mode=='contactors' and group!=-1 and has_avatar:
actor.VisibilityOn()
else:
actor.VisibilityOff()
else:
for actor, index, group in self.static_actors[instance]:
actor.VisibilityOff()
def set_dynamic_actors_visibility(self, time):
self.set_visibility_v(list(self.dynamic_actors.keys()), time)
def set_static_actors_visibility(self, time):
self.set_visibility_static_v(list(self.static_actors.keys()), time)
# callback maker for scale manipulation
def make_scale_observer(self, glyphs):
def scale_observer(obj, event):
slider_repres = obj.GetRepresentation()
scale_at_pos = slider_repres.GetValue()
for glyph in glyphs:
for k in glyph:
glyph[k].SetScaleFactor(
scale_at_pos * glyph[k]._scale_fact)
return scale_observer
# callback maker for time scale manipulation
def make_time_scale_observer(self, time_slider_repres, time_observer):
delta_time = self.max_time - self.min_time
def time_scale_observer(obj, event):
slider_repres = obj.GetRepresentation()
time_scale_at_pos = 1. - slider_repres.GetValue()
current_time = time_observer._time
shift = (current_time - self.min_time) / delta_time
xmin_time = self.min_time + time_scale_at_pos / 2. * delta_time
xmax_time = self.max_time - time_scale_at_pos / 2. * delta_time
xdelta_time = xmax_time - xmin_time
new_mintime = max(self.min_time, current_time - xdelta_time)
new_maxtime = min(self.max_time, current_time + xdelta_time)
time_slider_repres.SetMinimumValue(new_mintime)
time_slider_repres.SetMaximumValue(new_maxtime)
return time_scale_observer
# make a slider widget and its representation
def make_slider(self, title, observer, interactor,
startvalue, minvalue, maxvalue, cx1, cy1, cx2, cy2):
slider_repres = vtk.vtkSliderRepresentation2D()
slider_repres.SetMinimumValue(
minvalue - (maxvalue - minvalue) / 100)
slider_repres.SetMaximumValue(
maxvalue + (maxvalue - minvalue) / 100)
slider_repres.SetValue(startvalue)
slider_repres.SetTitleText(title)
slider_repres.GetPoint1Coordinate().\
SetCoordinateSystemToNormalizedDisplay()
slider_repres.GetPoint1Coordinate().SetValue(cx1, cy1)
slider_repres.GetPoint2Coordinate().\
SetCoordinateSystemToNormalizedDisplay()
slider_repres.GetPoint2Coordinate().SetValue(cx2, cy2)
slider_repres.SetSliderLength(0.02)
slider_repres.SetSliderWidth(0.03)
slider_repres.SetEndCapLength(0.01)
slider_repres.SetEndCapWidth(0.03)
slider_repres.SetTubeWidth(0.005)
slider_repres.SetLabelFormat('%f')
slider_repres.SetTitleHeight(0.02)
slider_repres.SetLabelHeight(0.02)
background_color = self.config.get('background_color', [.0,.0,.0])
reverse_background_color =numpy.ones(3) - background_color
if (numpy.linalg.norm(background_color-reverse_background_color) < 0.2):
reverse_background_color = numpy.ones(3)
slider_repres.GetSliderProperty().SetColor(*reverse_background_color)
slider_repres.GetTitleProperty().SetColor(*reverse_background_color);
slider_repres.GetLabelProperty().SetColor(*reverse_background_color);
slider_repres.GetTubeProperty().SetColor(*reverse_background_color);
slider_repres.GetCapProperty().SetColor(*reverse_background_color);
slider_widget = vtk.vtkSliderWidget()
slider_widget.SetInteractor(interactor)
slider_widget.SetRepresentation(slider_repres)
slider_widget.KeyPressActivationOff()
slider_widget.SetAnimationModeToAnimate()
slider_widget.SetEnabled(True)
slider_widget.AddObserver('InteractionEvent', observer)
return slider_widget, slider_repres
def setup_initial_position(self):
if self.opts.export:
# For time_of_birth specifications with export mode:
# a 0 scale for objects whose existence is deferred.
# The correct transform will be set in set_position when
# the objects appears in pos_data.
# One have to disable vtkMath generic warnings in order to avoid
# plenty of 'Unable to factor linear system'
vtk.vtkMath.GlobalWarningDisplayOff()
for instance_name in self.io.instances():
instance = self.io.instances()[instance_name]
instid = int(instance.attrs['id'])
for transform in self.transforms[instid]:
transform.Scale(0, 0, 0)
self.time0 = None
if len(self.io_reader._times) > 0:
# Positions at first time step
self.time0 = self.io_reader._times[0]
self.io_reader.SetTime(self.time0)
#self.pos_t0 = dsa.WrapDataObject(self.io_reader.GetOutputDataObject(0).GetFieldData().GetArray('pos_data'))
self.pos_t0 = [self.io_reader.pos_data]
else:
# this is for the case simulation has not been ran and
# time does not exists
self.time0 = 0
self.id_t0 = None
self.pos_t0 = numpy.array([
numpy.hstack(([0.,
self.io.instances()[k].attrs['id']]
,self.io.instances()[k].attrs['translation']
,self.io.instances()[k].attrs['orientation']))
for k in self.io.instances()
if self.io.instances()[k].attrs['id'] >= 0])
if numpy.shape(self.io_reader._spos_data)[0] > 0:
self.set_position(self.io_reader._spos_data)
# print('self.io_reader._spos_data', self.io_reader._spos_data)
# static objects are always visible
#for instance, actors in self.static_actors.items():
# for actor,_,_ in actors:
# actor.VisibilityOn()
self.set_position(*self.pos_t0)
self.set_static_actors_visibility(self.time0)
self.set_dynamic_actors_visibility(self.time0)
def setup_vtk_renderer(self):
self.print_verbose_level(1,'setup_tvk_renderer')
self.renderer_window.AddRenderer(self.renderer)
self.interactor_renderer.SetRenderWindow(self.renderer_window)
self.interactor_renderer.GetInteractorStyle().SetCurrentStyleToTrackballCamera()
# http://www.itk.org/Wiki/VTK/Depth_Peeling
if self.opts.depth_peeling:
# Use a render window with alpha bits (as initial value is 0 (false) ):
self.renderer_window.SetAlphaBitPlanes(1)
# Force to not pick a framebuffer with a multisample buffer ( as initial
# value is 8):
self.renderer_window.SetMultiSamples(0)
# Choose to use depth peeling (if supported) (initial value is 0
# (false) )
self.renderer.SetUseDepthPeeling(1)
# Set depth peeling parameters.
self.renderer.SetMaximumNumberOfPeels(
self.opts.maximum_number_of_peels)
# Set the occlusion ratio (initial value is 0.0, exact image)
self.renderer.SetOcclusionRatio(self.opts.occlusion_ratio)
# Set the initial camera position and orientation if specified
if self.opts.initial_camera[0] is not None:
self.renderer.GetActiveCamera().SetPosition(*self.opts.initial_camera[0])
if self.opts.initial_camera[1] is not None:
self.renderer.GetActiveCamera().SetFocalPoint(*self.opts.initial_camera[1])
if self.opts.initial_camera[2] is not None:
self.renderer.GetActiveCamera().SetViewUp(*self.opts.initial_camera[2])
if self.opts.initial_camera[4] is not None:
self.renderer.GetActiveCamera().SetClippingRange(*self.opts.initial_camera[4])
else:
self.renderer.ResetCameraClippingRange()
if self.opts.initial_camera[3] is not None:
self.renderer.GetActiveCamera().ParallelProjectionOn()
self.renderer.GetActiveCamera().SetParallelScale(
self.opts.initial_camera[3])
self.image_maker = vtk.vtkWindowToImageFilter()
self.image_maker.SetInput(self.renderer_window)
self.recorder = vtk.vtkOggTheoraWriter()
self.recorder.SetQuality(2)
self.recorder.SetRate(self.opts.frames_per_second)
self.recorder.SetFileName(os.path.splitext(self.opts.io_filename)[0]+'.avi')
self.recorder.SetInputConnection(self.image_maker.GetOutputPort())
self.writer = vtk.vtkPNGWriter()
self.writer.SetInputConnection(self.image_maker.GetOutputPort())
# Create a vtkLight, and set the light parameters.
light = vtk.vtkLight()
light.SetFocalPoint(0, 0, 0)
light.SetPosition(0, 0, 500)
# light.SetLightTypeToHeadlight()
self.renderer.AddLight(light)
hlight = vtk.vtkLight()
hlight.SetFocalPoint(0, 0, 0)
# hlight.SetPosition(0, 0, 500)
hlight.SetLightTypeToHeadlight()
self.renderer.AddLight(hlight)
self.renderer.SetBackground(*self.config.get('background_color', [.0,.0,.0]))
self.renderer_window.SetSize(*self.config['window_size'])
self.renderer_window.SetWindowName('vview: ' + self.opts.io_filename)
def setup_charts(self):
self.print_verbose_level(1,'setup_charts')
# Warning! numpy support offer a view on numpy array
# the numpy array must not be garbage collected!
nxtime = self.io_reader._isolv_data[:, 0]
nxiters = self.io_reader._isolv_data[:, 1]
nprecs = self.io_reader._isolv_data[:, 2]
xtime = numpy_support.numpy_to_vtk(nxtime)
xiters = numpy_support.numpy_to_vtk(nxiters)
xprecs = numpy_support.numpy_to_vtk(nprecs)
xtime.SetName('time')
xiters.SetName('iterations')
xprecs.SetName('precisions')
table = vtk.vtkTable()
table.AddColumn(xtime)
table.AddColumn(xiters)
table.AddColumn(xprecs)
# table.Dump()
tview_iter = vtk.vtkContextView()
tview_prec = vtk.vtkContextView()
chart_iter = vtk.vtkChartXY()
chart_prec = vtk.vtkChartXY()
tview_iter.GetScene().AddItem(chart_iter)
tview_prec.GetScene().AddItem(chart_prec)
self.iter_plot = chart_iter.AddPlot(vtk.vtkChart.LINE)
self.iter_plot.SetLabel('Solver iterations')
self.iter_plot.GetXAxis().SetTitle('time')
self.iter_plot.GetYAxis().SetTitle('iterations')
self.prec_plot = chart_prec.AddPlot(vtk.vtkChart.LINE)
self.prec_plot.SetLabel('Solver precisions')
self.prec_plot.GetXAxis().SetTitle('time')
self.prec_plot.GetYAxis().SetTitle('precisions')
add_compatiblity_methods(self.iter_plot)
add_compatiblity_methods(self.prec_plot)
self.iter_plot.SetInputData(table, 'time', 'iterations')
self.prec_plot.SetInputData(table, 'time', 'precisions')
self.iter_plot.SetWidth(5.0)
self.prec_plot.SetWidth(5.0)
self.iter_plot.SetColor(0, 255, 0, 255)
self.prec_plot.SetColor(0, 255, 0, 255)
tview_iter.GetInteractor().AddObserver('RightButtonReleaseEvent',
self.input_observer.iter_plot_observer)
tview_prec.GetInteractor().AddObserver('RightButtonReleaseEvent',
self.input_observer.prec_plot_observer)
tview_iter.GetRenderer().GetRenderWindow().SetSize(600, 200)
tview_prec.GetRenderer().GetRenderWindow().SetSize(600, 200)
tview_iter.GetInteractor().Initialize()
# tview_iter.GetInteractor().Start()
tview_iter.GetRenderer().SetBackground(.9, .9, .9)
tview_iter.GetRenderer().Render()
tview_prec.GetInteractor().Initialize()
# tview_prec.GetInteractor().Start()
tview_prec.GetRenderer().SetBackground(.9, .9, .9)
tview_prec.GetRenderer().Render()
self.tview_iter = tview_iter
self.tview_prec = tview_prec
def setup_sliders(self, times):
self.print_verbose_level(1,'setup_sliders')
if len(times) > 0:
slider_repres = vtk.vtkSliderRepresentation2D()
if self.min_time is None:
self.min_time = times[0]
if self.max_time is None:
self.max_time = times[len(times) - 1]
slider_repres.SetMinimumValue(self.min_time)
slider_repres.SetMaximumValue(self.max_time)
slider_repres.SetValue(self.min_time)
slider_repres.SetTitleText("time")
slider_repres.GetPoint1Coordinate(
).SetCoordinateSystemToNormalizedDisplay()
slider_repres.GetPoint1Coordinate().SetValue(0.4, 0.9)
slider_repres.GetPoint2Coordinate(
).SetCoordinateSystemToNormalizedDisplay()
slider_repres.GetPoint2Coordinate().SetValue(0.9, 0.9)
slider_repres.SetSliderLength(0.02)
slider_repres.SetSliderWidth(0.03)
slider_repres.SetEndCapLength(0.01)
slider_repres.SetEndCapWidth(0.03)
slider_repres.SetTubeWidth(0.005)
slider_repres.SetLabelFormat("%3.4lf")
slider_repres.SetTitleHeight(0.02)
slider_repres.SetLabelHeight(0.02)
background_color = self.config.get('background_color', [.0,.0,.0])
reverse_background_color =numpy.ones(3) - background_color
if (numpy.linalg.norm(background_color-reverse_background_color) < 0.2):
reverse_background_color = numpy.ones(3)
slider_repres.GetSliderProperty().SetColor(*reverse_background_color)
slider_repres.GetTitleProperty().SetColor(*reverse_background_color);
slider_repres.GetLabelProperty().SetColor(*reverse_background_color);
slider_repres.GetTubeProperty().SetColor(*reverse_background_color);
slider_repres.GetCapProperty().SetColor(*reverse_background_color);
slider_widget = vtk.vtkSliderWidget()
self.slider_widget = slider_widget
slider_widget.SetInteractor(self.interactor_renderer)
slider_widget.SetRepresentation(slider_repres)
slider_widget.KeyPressActivationOff()
slider_widget.SetAnimationModeToAnimate()
slider_widget.SetEnabled(True)
self.input_observer = InputObserver(self, times, slider_repres)
slider_widget.AddObserver("InteractionEvent", self.input_observer.time)
else:
self.input_observer = InputObserver(self)
self.interactor_renderer.AddObserver('KeyPressEvent', self.input_observer.key)
self.interactor_renderer.AddObserver(
'TimerEvent', self.input_observer.recorder_observer)
if self.io.contact_forces_data().shape[0] > 0:
self.slwsc, self.slrepsc = self.make_slider(
'CF scale',
self.make_scale_observer([self.cone_glyph, self.cylinder_glyph,
self.sphere_glypha, self.sphere_glyphb,
self.arrow_glyph]),
self.interactor_renderer,
self.opts.cf_scale_factor, self.opts.cf_scale_factor -
self.opts.cf_scale_factor / 2,
self.opts.cf_scale_factor + self.opts.cf_scale_factor / 2,
0.03, 0.03, 0.03, 0.7)
if len(times) > 0:
self.xslwsc, self.xslrepsc = self.make_slider(
'Time scale',
self.make_time_scale_observer(slider_repres,
self.input_observer),
self.interactor_renderer,
self.opts.time_scale_factor, self.opts.time_scale_factor -
self.opts.time_scale_factor / 2,
self.opts.time_scale_factor + self.opts.time_scale_factor / 2,
0.1, 0.9, 0.3, 0.9)
def setup_axes(self):
self.print_verbose_level(1,'setup_axes')
# display coordinates axes
self.axes = vtk.vtkAxesActor()
self.axes.SetTotalLength(1.0, 1.0, 1.0)
self.widget = vtk.vtkOrientationMarkerWidget()
# self.widget.SetOutlineColor( 0.9300, 0.5700, 0.1300 )
self.widget.SetOrientationMarker(self.axes)
self.widget.SetInteractor(self.interactor_renderer)
# self.widget.SetViewport( 0.0, 0.0, 40.0, 40.0 );
self.widget.SetEnabled(True)
self.widget.InteractiveOn()
# this should be extracted from the VView class
def export(self):
self.print_verbose('export start')
times = self.io_reader._times[
self.opts.start_step:self.opts.end_step:self.opts.stride]
ntime = len(times)
if self.opts.gen_para_script:
# just the generation of a parallel command
options_str = ''
if self.opts.ascii_mode:
options_str += '--ascii '
if self.opts.global_filter:
options_str += '--global-filter '
if self.opts.depth_2d != 0.1:
options_str += ' --depth-2d='+str(self.opts.depth_2d)
ntimes_proc = int(ntime / self.opts.nprocs)
s = ''
for i in range(self.opts.nprocs):
s += '{0}/{1} '.format(ntimes_proc*i, ntimes_proc*(i+1))
print('#!/bin/sh')
print('parallel --verbose', sys.argv[0], self.opts.io_filename,
options_str, '--start-step={//} --end-step={/} :::', s)
else:
# export
big_data_writer = vtk.vtkXMLMultiBlockDataWriter()
add_compatiblity_methods(big_data_writer)
big_data_writer.SetInputConnection(self.big_data_collector.GetOutputPort())
if self.opts.ascii_mode:
big_data_writer.SetDataModeToAscii()
k = self.opts.start_step
packet = int(ntime/100)+1
for time in times:
k = k + self.opts.stride
if (k % packet == 0):
sys.stdout.write('.')
self.io_reader.SetTime(time)
spos_data = self.io_reader.pos_static_data
#print('spos_data at time 1' , time, spos_data)
if spos_data.size > 0:
self.set_position_v(spos_data[:, 1], spos_data[:, 2],
spos_data[:, 3],
spos_data[:, 4], spos_data[:, 5],
spos_data[:, 6],
spos_data[:, 7], spos_data[:, 8])
pos_data = self.io_reader.pos_data
velo_data = self.io_reader.velo_data
self.set_position_v(
pos_data[:, 1], pos_data[:, 2], pos_data[:, 3],
pos_data[:, 4], pos_data[:, 5], pos_data[:, 6],
pos_data[:, 7], pos_data[:, 8])
self.set_velocity_v(
velo_data[:, 1],
velo_data[:, 2],
velo_data[:, 3],
velo_data[:, 4],
velo_data[:, 5],
velo_data[:, 6],
velo_data[:, 7])
self.set_translation_v(
pos_data[:, 1],
pos_data[:, 2],
pos_data[:, 3],
pos_data[:, 4],
)
self.set_instance_v(pos_data[:, 1])
big_data_writer.SetFileName('{0}-{1}.{2}'.format(
os.path.splitext(os.path.basename(self.opts.io_filename))[0],
k, big_data_writer.GetDefaultFileExtension()))
if self.opts.global_filter:
self.big_data_geometry_filter.Update()
else:
self.big_data_collector.Update()
big_data_writer.Write()
big_data_writer.Write()
def export_raw_data(self):
times = self.io_reader._times[
self.opts.start_step:self.opts.end_step:self.opts.stride]
ntime = len(times)
export_2d = False
if self.io.dimension() ==2 :
export_2d=True
print('We export raw data for 2D object')
# export
k = self.opts.start_step
packet = int(ntime/100)+1
# ######## position output ########
# nvalue = ndyna*7+1
# position_output = numpy.empty((ntime,nvalue))
# #print('position_output shape', numpy.shape(position_output))
# position_output[:,0] = times[:]
position_output = {}
velocity_output = {}
velocity_absolute_output = {}
for time in times:
k = k + self.opts.stride
if (k % packet == 0):
sys.stdout.write('.')
self.io_reader.SetTime(time)
pos_data = self.io_reader.pos_data
velo_data = self.io_reader.velo_data
ndyna=pos_data.shape[0]
for i in range(ndyna):
bdy_id = int(pos_data[i,1])
######## position output ########
if self.opts._export_position :
nvalue=pos_data.shape[1]
position_output_bdy = position_output.get(bdy_id)
if position_output_bdy is None:
position_output[bdy_id] = []
position_output_body = position_output[bdy_id]
position_output_body.append([])
position_output_body[-1].append(time)
if export_2d:
data_2d = [pos_data[i,2],pos_data[i,3],numpy.acos(pos_data[i,5]/2.0)]
position_output_body[-1].extend(data_2d)
#position_output_body[-1].extend(pos_data[i,2:nvalue])
else:
position_output_body[-1].extend(pos_data[i,2:nvalue])
position_output_body[-1].append(bdy_id)
######## velocity output ########
if self.opts._export_velocity :
nvalue=velo_data.shape[1]
velocity_output_bdy = velocity_output.get(bdy_id)
if velocity_output_bdy is None:
velocity_output[bdy_id] = []
velocity_output_body = velocity_output[bdy_id]
velocity_output_body.append([])
velocity_output_body[-1].append(time)
velocity_output_body[-1].extend(velo_data[i,2:nvalue])
velocity_output_body[-1].append(bdy_id)
######## velocity in absolute frame output ########
if self.opts._export_velocity_in_absolute_frame :
nvalue=velo_data.shape[1]
[q1,q2,q3,q4] = pos_data[i,5:9]
q = Quaternion((q1, q2, q3, q4))
velo = q.rotate(velo_data[i,5:8])
velocity_absolute_output_bdy = velocity_absolute_output.get(bdy_id)
if velocity_absolute_output_bdy is None:
velocity_absolute_output[bdy_id] = []
velocity_absolute_output_body = velocity_absolute_output[bdy_id]
velocity_absolute_output_body.append([])
velocity_absolute_output_body[-1].append(time)
velocity_absolute_output_body[-1].extend(velo_data[i,2:5])
velocity_absolute_output_body[-1].extend(velo[:])
velocity_absolute_output_body[-1].append(bdy_id)
for bdy_id in position_output.keys():
output = | numpy.array(position_output[bdy_id]) | numpy.array |
import os
import re
import glob
import json
import torch
import random
import asyncio
import warnings
import itertools
import torchaudio
import numpy as np
#import asyncstdlib as a
from pathlib import Path
from tqdm import tqdm
from PIL import Image as PILImage
#from fastcache import clru_cache
from functools import lru_cache
#from async_lru import alru_cache
from itertools import cycle, islice, chain
from einops import rearrange, repeat
from collections import defaultdict
from tabulate import tabulate
from termcolor import colored
#from cachetools import cached, LRUCache, func
import multiprocessing as mp
import torch.utils.data as data
import torch.nn.functional as F
from torchvision.transforms import (
InterpolationMode, Compose, Resize, CenterCrop, ToTensor, Normalize
)
from .audio import (
make_transform, _extract_kaldi_spectrogram
)
from .image import make_clip_image_transform as make_image_transform
from clip import tokenize
def print_label_dist(cfg, echo, label_counts, label_map, ncol=30):
def short_name(x):
if len(x) > 15:
return x[:13] + ".."
return x
data = list(itertools.chain(*[
[short_name(label_map[i]), int(v)] for i, v in enumerate(label_counts)
]))
total_num_instances = sum(data[1::2])
data.extend([None] * (ncol - (len(data) % ncol)))
data = itertools.zip_longest(*[data[i::ncol] for i in range(ncol)])
table = tabulate(
data,
headers=["category", "#"] * (ncol // 2),
tablefmt="pipe",
numalign="right",
stralign="center",
)
msg = colored(table, "cyan")
echo(f"Distribution of instances among all {len(label_map)} categories:\n{msg}")
class AudiosetNpz(data.Dataset):
""" `__getitem__' loads raw file from disk. The same inputs as AudiosetSrc.
"""
def __init__(self, cfg, data_name, train, label_map, weighted):
data_path = f"{cfg.data_root}/{data_name}.csv"
assert os.path.isfile(data_path), f"{data_path} is not a file."
self.cfg = cfg
self.label_map = label_map
self.num_label = len(label_map)
label_counts = np.zeros(self.num_label)
self.dataset = list()
with open(data_path, "r") as fr:
for iline, line in enumerate(fr):
record = json.loads(line)
if cfg.cat_label:
self._cat_label(record)
self.dataset.append(record)
if not train and iline + 1 == cfg.eval_samples:
break
if weighted: # save label distribution
for category in record["labels"]:
label_counts[
self.label_map[category][0]
] += 1
self.length = len(self.dataset)
if weighted: # compute sample weight
lid2label = {v[0]: re.sub(f"^{cfg.prompt}", "", v[1]).strip() for _, v in label_map.items()}
print_label_dist(cfg, print, label_counts, lid2label, ncol=18)
self.sample_weights = np.zeros(self.length)
label_counts = 1000.0 / (label_counts + 1.)
for i, record in enumerate(self.dataset):
for category in record["labels"]:
self.sample_weights[i] += label_counts[
self.label_map[category][0]
]
self.audio_norms = cfg.audio.norms
# compatible with AudioSet and Balanced AudioSet
self.aclip_key = "aclip_128"
self.frame_key = "frame_224"
self.train = train
acfg = cfg.audio
self.transform_image = make_image_transform(cfg.resolution)
self.transform_audio, self.transform_fbank = make_transform(acfg)
def _shuffle(self):
pass
def _cat_label(self, record):
categories = record["labels"]
label_text = [re.sub(
f"^{self.cfg.prompt}", "", self.label_map[category][1]
).strip() for category in categories]
label_text = self.cfg.prompt + " " + ", ".join(label_text)
record["captions"] = [label_text]
record["captions_bpe"] = tokenize(
record["captions"], as_list=True
) # add bpe captions
def _image2numpy(self, fname):
if fname is not None:
try:
images = np.load(fname)
images = [images[key] for key in images.files if len(images[key]) != 0]
idx = np.random.choice(len(images), 1)[0] if self.train else int(np.ceil(len(images) / 2)) - 1
image = images[idx]
except Exception as e:
h = w = self.cfg.resolution
image = PILImage.fromarray(
(np.random.rand(h, w, 3) * 256).astype(np.uint8)
)
warnings.warn(f"use random image instead because `{e}` {fname}.")
image = self.transform_image(image).cpu().numpy()
else:
image = np.array([[[1]]])
return image
def _process_item(self, index):
akey = self.aclip_key
fkey = self.frame_key
sub_dir = self.dataset[index]["dir"]
name = self.dataset[index]["id"]
aclip = self.dataset[index][akey]
frame = images = self.dataset[index][fkey]
categories = self.dataset[index]["labels"]
sub_dir = "" if len(sub_dir) == 0 else f"{sub_dir}/"
aclip_file = f"{self.cfg.data_root}/{sub_dir}{akey}/{name}.{aclip}"
frame_file = f"{self.cfg.data_root}/{sub_dir}{fkey}/{name}.{frame}" if self.cfg.imagine else None
if not self.cfg.clf: # dummy
if self.cfg.cat_label:
record = self.dataset[index]
label, text, text_int = 0, record["captions"][0], record["captions_bpe"][0]
else:
ict = np.random.choice(len(categories), 1)[0] if self.train else 0
category = categories[ict]
label, text, text_int = self.label_map[category]
else: # classification task
label_set = set([self.label_map[category][0] for category in categories])
label = [1 if i in label_set else 0 for i in range(self.num_label)]
text_int = [0] # TODO concatenate all text pieces
item = {"text": text_int, "name": name}
return item, label, aclip_file, frame_file
def __getitem__(self, index):
item, label, aclip_file, frame_file = self._process_item(index)
max_audio_len = self.cfg.max_audio_len
audio = np.load(aclip_file)["flag"] # (..., time, freq): `flag' is used as the key accidentally
image = self._image2numpy(frame_file)
npad = max_audio_len - audio.shape[0]
if npad > 0:
audio = np.pad(audio, ((0, npad), (0, 0)), "constant", constant_values=(0., 0.))
if not self.cfg.audio.eval_norms and len(self.audio_norms) == 2:
mean, std = self.audio_norms
audio = (audio - mean) / std
#if self.train and self.transform_fbank is not None:
if not self.cfg.audio.eval_norms and self.train and self.transform_fbank is not None:
audio = self.transform_fbank(audio)
image = image[None]
audio = audio[None]
item.update({"image": image, "audio": audio, "label": label})
return item
def __len__(self):
return self.length
class AudiosetSrc(data.Dataset):
""" `__getitem__' loads raw file from disk.
"""
def __init__(self, cfg, data_name, train, label_map, weighted, filter_set=None, external_text=None):
data_path = f"{cfg.data_root}/{data_name}.csv"
assert os.path.isfile(data_path), f"{data_path} is not a file."
self.cfg = cfg
self.label_map = label_map
self.num_label = len(label_map)
label_counts = np.zeros(self.num_label)
self.dataset = list()
with open(data_path, "r") as fr:
for iline, line in enumerate(fr):
record = json.loads(line)
if filter_set is not None and record["id"] not in filter_set:
continue # let us skip this sample
if external_text is not None:
self._add_text(record, external_text)
elif cfg.cat_label:
self._cat_label(record)
self.dataset.append(record)
if not train and iline + 1 == cfg.eval_samples:
break
if weighted: # save label distribution
for category in record["labels"]:
label_counts[
self.label_map[category][0]
] += 1
self.length = len(self.dataset)
if weighted: # compute sample weight
lid2label = {v[0]: re.sub(f"^{cfg.prompt}", "", v[1]).strip() for _, v in label_map.items()}
print_label_dist(cfg, print, label_counts, lid2label, ncol=18)
self.sample_weights = np.zeros(self.length)
label_counts = 1000.0 / (label_counts + 1.)
for i, record in enumerate(self.dataset):
for category in record["labels"]:
self.sample_weights[i] += label_counts[
self.label_map[category][0]
]
self.audio_norms = cfg.audio.norms
# compatible with AudioSet and Balanced AudioSet
self.aclip_key = "clip" if "clip" in self.dataset[0] else "aclip"
self.frame_key = cfg.frame_key
self.train = train
acfg = cfg.audio
self.transform_image = make_image_transform(cfg.resolution)
self.transform_audio, self.transform_fbank = make_transform(acfg)
self.kaldi_params = {
"htk_compat": True,
"use_energy": False,
"window_type": 'hanning',
"num_mel_bins": acfg.num_mel_bins,
"dither": 0.0,
"frame_shift": 10
}
def _shuffle(self):
pass
def _add_text(self, record, external_text):
record["captions"] = external_text.get(
record["id"], [-1]
)
def _cat_label(self, record):
categories = record["labels"]
label_text = [re.sub(
f"^{self.cfg.prompt}", "", self.label_map[category][1]
).strip() for category in categories]
label_text = self.cfg.prompt + " " + ", ".join(label_text)
record["captions"] = [label_text]
record["captions_bpe"] = tokenize(
record["captions"], as_list=True
) # add bpe captions
def _process_item(self, index):
akey = self.aclip_key
fkey = self.frame_key
sub_dir = self.dataset[index]["dir"]
name = self.dataset[index]["id"]
aclip = self.dataset[index][akey][0]
frame = images = self.dataset[index][fkey]
categories = self.dataset[index]["labels"]
sub_dir = "" if len(sub_dir) == 0 else f"{sub_dir}/"
aclip_file = f"{self.cfg.data_root}/{sub_dir}{akey}/{name}.{aclip}"
frame_file = frame_emb_file = None
if self.cfg.imagine:
if isinstance(frame, str):
frame_file = f"{self.cfg.data_root}/{sub_dir}{fkey}/{name}.{frame}"
else:
idx = np.random.choice(len(images), 1)[0] if self.train else int(np.ceil(len(images) / 2)) - 1
frame_file = f"{self.cfg.data_root}/{sub_dir}{fkey}/{name}.{images[idx]}"
if self.cfg.frame_emb is not None:
frame_emb_file = f"{self.cfg.data_root}/{self.cfg.frame_emb}/{name}.{images[idx].rsplit('.', 1)[0]}.npz"
if not self.cfg.clf:
if self.cfg.text_emb is not None:
caption_indice = self.dataset[index]["captions"] # list of caption id
ict = np.random.choice(len(caption_indice), 1)[0] if self.train else 0
label, text, text_int = 0, "", caption_indice[ict]
text_int = f"{self.cfg.data_root}/caption/{self.cfg.text_emb}/{text_int}.npz"
elif self.cfg.cat_label:
record = self.dataset[index]
label, text, text_int = 0, record["captions"][0], record["captions_bpe"][0]
else:
ict = np.random.choice(len(categories), 1)[0] if self.train else 0
category = categories[ict]
label, text, text_int = self.label_map[category]
else: # classification task
label_set = set([self.label_map[category][0] for category in categories])
label = [1 if i in label_set else 0 for i in range(self.num_label)]
text_int = [0] # TODO concatenate all text pieces
item = {"text": text_int, "name": name}
return item, label, aclip_file, frame_file, frame_emb_file
def blocking_io(self, fname):
try:
image = np.load(fname)["v"]
except Exception as e:
image = np.random.rand(self.cfg.embed_dim).astype("float32")
warnings.warn(f"use random image instead because `{e}` {fname}.")
return image
#@a.lru_cache(maxsize=100000)
async def _async_text2embed(self, fname):
#print(self._text2embed.cache_info())
# https://docs.python.org/3/library/asyncio-eventloop.html#executing-code-in-thread-or-process-pools
loop = asyncio.get_running_loop()
image = await loop.run_in_executor(None, self.blocking_io, fname)
return image
#@lru_cache(maxsize=100000)
#@clru_cache(maxsize=100000)
#@func.lru_cache(maxsize=100000)
#@cached(cache=LRUCache(maxsize=100000))
def _text2embed(self, fname):
#print(self._text2embed.cache_info())
# cache does not work in multi-thread model (e.g., worker > 0)
# see https://discuss.pytorch.org/t/dataloader-re-initialize-dataset-after-each-iteration/32658
# and https://discuss.pytorch.org/t/dataloader-resets-dataset-state/27960
try:
image = np.load(fname)["v"]
except Exception as e:
image = np.random.rand(self.cfg.embed_dim).astype("float32")
warnings.warn(f"use random image instead because `{e}` {fname}.")
return image
def _image2embed(self, fname):
try:
image = np.load(fname)["v"]
except Exception as e:
image = np.random.rand(self.cfg.embed_dim).astype("float32")
warnings.warn(f"use random image instead because `{e}` {fname}.")
return image
def _image2numpy(self, fname):
if fname is not None:
try:
if fname.endswith(".npz"):
images = np.load(fname)
images = [images[key] for key in images.files if len(images[key]) != 0]
idx = np.random.choice(len(images), 1)[0] if self.train else int(np.ceil(len(images) / 2)) - 1
image = images[idx]
else:
image = PILImage.open(fname)
image = self.transform_image(image).cpu().numpy()
except Exception as e:
h = w = self.cfg.resolution
image = PILImage.fromarray(
(np.random.rand(h, w, 3) * 256).astype(np.uint8)
)
warnings.warn(f"use random image instead because `{e}` {fname}.")
image = self.transform_image(image).cpu().numpy()
else:
image = np.array([[[1]]])
return image
def _audio2numpy_clf(self, aclip_file, label):
wf, sr = torchaudio.load(aclip_file)
wf = wf[:1] #wf.mean(0, keepdim=True)
wf = wf - wf.mean()
sampler = np.random if self.cfg.np_rnd else random
#if self.train and sampler.random() < self.cfg.mixup_rate:
if not self.cfg.audio.eval_norms and self.train and sampler.random() < self.cfg.mixup_rate:
idx_mix = sampler.randint(0, self.length if self.cfg.np_rnd else self.length - 1)
_, label_mix, aclip_file, _, _ = self._process_item(idx_mix)
wf_mix, _ = torchaudio.load(aclip_file)
wf_mix = wf_mix[:1] #wf_mix.mean(0, keepdim=True)
wf_mix = wf_mix - wf_mix.mean()
wf_len = wf.shape[1]
wf_mix = wf_mix[:, :wf_len]
npad = wf_len - wf_mix.shape[1]
if npad > 0:
wf_mix = F.pad(wf_mix, (0, npad), mode='constant', value=0.)
lambd = | np.random.beta(10, 10) | numpy.random.beta |
'''
----------------------------------------------------------
@file: collision_avoidance.py
@date: June 10, 2020
@author: <NAME>
@e-mail: <EMAIL>
@brief: Implementation of collision avoidance algorithm
@version: 1.0
@licence: Open source
----------------------------------------------------------
'''
import math
import os
import sys
import time
import numpy as np
import rospy
from geometry_msgs.msg import Pose2D, Vector3
from std_msgs.msg import Float64, Float32MultiArray, String
from usv_perception.msg import obstacles_list
# Class definition for easy debugging
class Color():
RED = "\033[1;31m"
BLUE = "\033[1;34m"
CYAN = "\033[1;36m"
GREEN = "\033[0;32m"
RESET = "\033[0;0m"
BOLD = "\033[;1m"
REVERSE = "\033[;7m"
class Obstacle:
def __init__(self):
self. x = 0
self.y = 0
self.radius = 0
self.teta = 0
self.alpha = 0
self.collision_flag = 0
self.past_collision_flag = 0
self.total_radius = 0
class Boat:
def __init__(self, radius=0):
self.radius = radius
self.ned_x = 0
self.ned_y = 0
self.yaw = 0
self.u = 0
self.v = 0
self.vel = 0
self.bearing = 0
class CollisionAvoidance:
def __init__(self, exp_offset=0, safety_radius=0, u_max=0, u_min=0,
exp_gain=0, chi_psi=0, r_max=0, obstacle_mode=0):
self.safety_radius = safety_radius
self.u_max = u_max
self.u_min = u_min
self.chi_psi = chi_psi
self.exp_gain = exp_gain
self.exp_offset = exp_offset
self.r_max = r_max
self.obstacle_mode = obstacle_mode
self.obs_list = []
self.vel_list = []
self.u_psi = 0
self.u_r = 0
self.boat = Boat()
for i in range(0,21,1):
obstacle = Obstacle()
self.obs_list.append(obstacle)
def avoid(self, ak, x1, y1, input_list, boat):
'''
@name: avoid
@brief: If there is an impending collision, returns the velocity and
angle to avoid.
@param: x1: x coordinate of the path starting-waypoint
y1: y coordinate of the path starting-waypoint
ak: angle from NED reference frame to path
input_list: incomming obstacle list
boat: boat class structure
@return: bearing: bearing to avoid obstacles
velocity: velocity to avoid obstacles
'''
nearest_obs = []
self.vel_list = []
self.boat = boat
vel_nedx,vel_nedy = self.body_to_ned(self.boat.u,self.boat.v,0,0)
#print("self.u: " + str(self.boat.u))
#print("self.boat.v: " + str(self.boat.v))
#print("vel_nedx: " + str(vel_nedx))
#print("vel_nedy " + str(vel_nedy))
#print("ak: ", str(ak))
vel_ppx,vel_ppy = self.ned_to_pp(ak,0,0,vel_nedx,vel_nedy)
#ppx,ppy = self.ned_to_pp(ak,x1,y1,self.boat.ned_x,self.boat.ned_y)
self.check_obstacles(input_list)
for i in range(0,len(self.obs_list),1):
sys.stdout.write(Color.CYAN)
print("obstacle"+str(i))
sys.stdout.write(Color.RESET)
print("obsx: " + str(self.obs_list[i].x))
print("obsy: " + str(self.obs_list[i].y))
print("obsradius: " + str(self.obs_list[i].radius))
self.obs_list[i].total_radius = self.boat.radius + self.safety_radius + self.obs_list[i].radius
collision, distance = self.get_collision(0, 0, vel_ppy, vel_ppx,i)
#print("distance: " + str(distance))
if collision:
#u_obs = np.amin(u_obstacle)
avoid_distance = self.calculate_avoid_distance(self.boat.u, self.boat.v, i)
nearest_obs.append(avoid_distance - distance)
#print("avoid_distance: " + str(avoid_distance))
#print("distance: " + str(distance))
else:
nearest_obs.append(0)
#self.vel_list.append(self.vel)
if len(nearest_obs) > 0:
print('nearest_obs max: ' + str(np.max(nearest_obs)))
if np.max(nearest_obs)>0:
index = nearest_obs.index(np.max(nearest_obs))
if np.max(nearest_obs) > 0 and self.obs_list[index].alpha > 0:
self.boat.vel = np.min(self.vel_list)
sys.stdout.write(Color.BOLD)
print('index: ' + str(index))
sys.stdout.write(Color.RESET)
ppx,ppy = self.ned_to_pp(ak, x1, y1, self.boat.ned_x, self.boat.ned_y)
obs_ppx, obs_ppy = self.get_obstacle( ak, x1, y1, index)
self.dodge(vel_ppx, vel_ppy, ppx, ppy, obs_ppx, obs_ppy, index)
else:
#rospy.loginfo("nearest_obs: " + str(nearest_obs[index]))
sys.stdout.write(Color.BLUE)
print ('free')
sys.stdout.write(Color.RESET)
#sys.stdout.write(Color.BOLD)
#print("yaw: " + str(self.yaw))
#print("bearing: " + str(self.bearing))
#sys.stdout.write(Color.RESET)
else:
sys.stdout.write(Color.BLUE)
print ('no obstacles')
sys.stdout.write(Color.RESET)
print('vel:' + str(self.boat.vel))
return self.boat.bearing, self.boat.vel
def check_obstacles(self, input_list):
'''
@name: check_obstacles
@brief: Recieves incomming obstacles and checks if they must be merged.
@param: input_list: incomming obstacle list
@return: --
'''
sys.stdout.write(Color.RED)
print("Check Obstacles:")
sys.stdout.write(Color.RESET)
for i in range(0,len(input_list),1):
#self.obstacle = Obstacle()
self.obs_list[i].x = input_list[i]['X']
# Negative y to compensate Lidar reference frame
self.obs_list[i].y = -input_list[i]['Y']
self.obs_list[i].radius = input_list[i]['radius']
print("self.obs_list[" + str(i)+ "].x: " + str(self.obs_list[i].x))
print("self.obs_list[i].y: " + str(self.obs_list[i].y))
print("self.obs_list[i].radius: " + str(self.obs_list[i].radius))
print("self.obs_list[i].collision_flag: " + str(self.obs_list[i].collision_flag))
i = 0
j = 0
while i < (len(self.obs_list)-1):
j = i + 1
while j < len(self.obs_list):
x = pow(self.obs_list[i].x-self.obs_list[j].x, 2)
y = pow(self.obs_list[i].y-self.obs_list[j].y, 2)
radius = self.obs_list[i].radius + self.obs_list[j].radius
distance_centers = pow(x+y, 0.5)
distance = distance_centers - radius
#print("distance between i:" + str(i)+ " and j:" + str(j) + " = "+ str(distance))
#print("boat distance: " + str((self.boat.radius + self.safety_radius)*2))
if distance < 0:
j = j + 1
elif distance <= (self.boat.radius + self.safety_radius)*2:
x,y,radius = self.merge_obstacles(i,j, distance_centers)
print("self.obs_list[i].y: " + str(self.obs_list[i].y))
print("self.obs_list[j].y: " + str(self.obs_list[j].y))
self.obs_list[i].x = x
self.obs_list[i].y = y
self.obs_list[i].radius = radius
print("self.obs_list[ij].y: " + str(self.obs_list[i].y))
print("self.obs_list[ij].radius: " + str(self.obs_list[i].radius))
self.obs_list[j].x = x
#print("self.obs_list[i].x: " + str(self.obs_list[j].x))
self.obs_list[j].y = y
self.obs_list[j].radius = radius
i = 0
else:
j = j + 1
sys.stdout.write(Color.RED)
#print("Obstacle the same")
sys.stdout.write(Color.RESET)
#print("i: " + str(i))
#print("j: " + str(j))
i = i + 1
#print("done j")
#print(self.obs_list)
return self.obs_list
def merge_obstacles(self, i, j, distance_centers):
'''
@name: merge_obstacles
@brief: Calculates new obstacle center and radius for merged obstacles.
@param: i: first obstacle index
j: second obstacle index
distance_centers: distance of obstacles centers
@return: x: merged obstacle center x
y: merged obstacle center y
radius: merged obstacle radius
'''
# calculate centroid
x = (self.obs_list[i].x + self.obs_list[j].x)/2
y = (self.obs_list[i].y + self.obs_list[j].y)/2
#calculte radius
max_radius = max(self.obs_list[i].radius, self.obs_list[i+1].radius)
radius = distance_centers/2 + max_radius
sys.stdout.write(Color.RED)
print("Merged obstacle:" + str(radius))
sys.stdout.write(Color.RESET)
return(x,y,radius)
def get_collision(self, ppx, ppy, vel_ppy, vel_ppx, i):
'''
@name: get_collision
@brief: Calculates if there is an impending collision with an obstacle.
@param: ppx: boat parallel path position x
ppy: boat parallel path position y
vel_ppy: boat parallel path velocity y
vel_ppx: boat parallel path velocity x
i: obstacle index
@return: collision: 1 = collision 0 = non-collision
distance: distance to obstacle
'''
collision = 0
#print("Total Radius: " + str(total_radius))
x_pow = pow(self.obs_list[i].x - ppx,2)
y_pow = pow(self.obs_list[i].y - ppy,2)
distance = pow((x_pow + y_pow),0.5)
distance_free = distance - self.obs_list[i].total_radius
print("Distance_free: " + str(distance_free))
if distance < self.obs_list[i].total_radius:
rospy.logwarn("CRASH")
alpha_params = (self.obs_list[i].total_radius/distance)
alpha = math.asin(alpha_params)
beta = math.atan2(vel_ppy,vel_ppx)-math.atan2(self.obs_list[i].y-ppy,self.obs_list[i].x-ppx)
if beta > math.pi:
beta = beta - 2*math.pi
if beta < - math.pi:
beta = beta + 2*math.pi
beta = abs(beta)
if beta <= alpha or 1 == self.obs_list[i].collision_flag:
#print('beta: ' + str(beta))
#print('alpha: ' + str(alpha))
print("COLLISION")
collision = 1
self.obs_list[i].collision_flag = 1
self.calculate_avoid_angle(ppy, distance, ppx, i)
#self.get_velocity(distance_free, i)
else:
#self.obs_list[i].collision_flag = 0
self.obs_list[i].tetha = 0
self.get_velocity(distance_free, i)
return collision, distance
def calculate_avoid_angle(self, ppy, distance, ppx, i):
'''
@name: calculate_avoid_angle
@brief: Calculates angle needed to avoid obstacle
@param: ppy: boat y coordiante in path reference frame
distance: distance from center of boat to center of obstacle
ppx: boat x coordiante in path reference frame
i: osbtacle index
@return: --
'''
#print("ppx: " + str(ppx) + " obs: " + str(self.obs_list[i].x))
#print("ppy: " + str(ppy) + " obs: " + str(self.obs_list[i].y))
self.obs_list[i].total_radius = self.obs_list[i].total_radius + .30
tangent_param = abs((distance - self.obs_list[i].total_radius) * (distance + self.obs_list[i].total_radius))
#print("distance: " + str(distance))
tangent = pow(tangent_param, 0.5)
#print("tangent: " + str(tangent))
teta = math.atan2(self.obs_list[i].total_radius,tangent)
#print("teta: " + str(teta))
gamma1 = math.asin(abs(ppy-self.obs_list[i].y)/distance)
#print("gamma1: " + str(gamma1))
gamma = ((math.pi/2) - teta) + gamma1
#print("gamma: " + str(gamma))
self.obs_list[i].alpha = (math.pi/2) - gamma
print("alpha: " + str(self.obs_list[i].alpha))
hb = abs(ppy-self.obs_list[i].y)/math.cos(self.obs_list[i].alpha)
#print("hb: " + str(hb))
b = self.obs_list[i].total_radius - hb
#print("i: " + str(i))
print("b: " + str(b))
self.obs_list[i].teta = math.atan2(b,tangent)
print("teta: " + str(self.obs_list[i].teta))
if self.obs_list[i].alpha < 0.0:
self.obs_list[i].collision_flag = 0
sys.stdout.write(Color.BOLD)
print("Collision flag off")
sys.stdout.write(Color.RESET)
def get_velocity(self, distance_free, i):
'''
@name: get_velocity
@brief: Calculates velocity needed to avoid obstacle
@param: distance_free: distance to collision
i: osbtacle index
@return: --
'''
u_r_obs = 1/(1 + math.exp(-self.exp_gain*(distance_free*(1/5) - self.exp_offset)))
u_psi_obs = 1/(1 + math.exp(self.exp_gain*(abs(self.obs_list[i].teta)*self.chi_psi -self.exp_offset)))
#print("u_r_obs: " + str( u_r_obs))
#print("u_psi_obs" + str(u_psi_obs))
#print("Vel chosen: " + str(np.min([self.u_psi, self.u_r, u_r_obs, u_psi_obs])))
self.vel_list.append((self.u_max - self.u_min)*np.min([self.u_psi, self.u_r, u_r_obs, u_psi_obs]) + self.u_min)
def calculate_avoid_distance(self, vel_ppx, vel_ppy, i):
'''
@name: calculate_avoid_distance
@brief: Calculates distance at wich it is necesary to leave path to
avoid obstacle
@param: vel_ppx: boat velocity x in path reference frame
vel_ppy: boat velocity y in path reference frame
i: obstacle index
@return: avoid_distance: returns distance at wich it is necesary to
leave path to avoid obstacle
'''
time = (self.obs_list[i].teta/self.r_max) + 3
#print("time: " + str(time))
eucledian_vel = pow((pow(vel_ppx,2) + pow(vel_ppy,2)),0.5)
#print("vel: " + str(eucledian_vel))
#print("self.boat.vel: " + str(self.boat.vel))
#avoid_distance = time * eucledian_vel + total_radius +.3
avoid_distance = time * self.boat.vel + self.obs_list[i].total_radius +.3 #+.5
return (avoid_distance)
def get_obstacle(self, ak, x1, y1, i):
'''
@name: get_obstacle
@brief: Gets obstacle coodinates in parallel path reference frame
@param: ak: coordinate frame angle difference
x1: starting x coordinate
y1: starting y coordinate
i: osbtacle index
@return: obs_ppx: obstalce x in parallel path reference frame
obs_ppy: obstalce y in parallel path reference frame
'''
# NED obstacles
if (self.obstacle_mode == 0):
obs_ppx,obs_ppy = self.ned_to_pp(ak,x1,y1,self.obs_list[i].x,self.obs_list[i].y)
# Body obstacles
if (self.obstacle_mode == 1):
obs_nedx, obs_nedy = self.body_to_ned(self.obs_list[i].x, self.obs_list[i].y, self.boat.ned_x, self.boat.ned_y)
obs_ppx,obs_ppy = self.ned_to_pp(ak, x1, y1, obs_nedx, obs_nedy)
return(obs_ppx, obs_ppy)
def dodge(self, vel_ppx, vel_ppy , ppx, ppy, obs_ppx, obs_ppy, i):
'''
@name: dodge
@brief: Calculates angle needed to avoid obstacle
@param: vel_ppx: boat velocity x in path reference frame
vel_ppy: boat velocity y in path reference frame
ppx: boat x coordiante in path reference frame
ppy: boat y coordiante in path reference frame
obs_ppx: osbtacle x coordiante in path reference frame
obs_ppy: osbtacle y coordiante in path reference frame
i: obstacle index
@return: --
'''
eucledian_vel = pow((pow(vel_ppx,2) + pow(vel_ppy,2)),0.5)
# Euclaedian vel must be different to cero to avoid math error
if eucledian_vel != 0:
#print("vel x: " + str(vel_ppx))
#print("vel y: " + str(vel_ppy))
#print("obs_y: " + str(obs_y))
# obstacle in center, this is to avoid shaky behaivor
if abs(self.obs_list[i].y) < 0.1:
angle_difference = self.boat.bearing - self.boat.yaw
#print("angle diference: " + str(angle_difference))
if 0.1 > abs(angle_difference) or 0 > (angle_difference):
self.boat.bearing = self.boat.yaw - self.obs_list[i].teta
sys.stdout.write(Color.RED)
print("center left -")
sys.stdout.write(Color.RESET)
else:
self.boat.bearing = self.boat.yaw + self.obs_list[i].teta
sys.stdout.write(Color.GREEN)
print("center right +")
sys.stdout.write(Color.RESET)
else:
eucledian_pos = pow((pow(obs_ppx - ppx,2) + pow(obs_ppy - ppy,2)),0.5)
#print("eucledian_vel " + str(eucledian_vel))
#print("eucladian_pos: " + str(eucledian_pos))
unit_vely = vel_ppy/eucledian_vel
unit_posy = (obs_ppy - ppy)/eucledian_pos
#print("unit_vely " + str(unit_vely))
#print("unit_posy: " + str(unit_posy))
if unit_vely <= unit_posy:
self.boat.bearing = self.boat.yaw - self.obs_list[i].teta
sys.stdout.write(Color.RED)
print("left -")
sys.stdout.write(Color.RESET)
'''
if (abs(self.avoid_angle) > (math.pi/2)):
self.avoid_angle = -math.pi/2
'''
else:
self.boat.bearing = self.boat.yaw + self.obs_list[i].teta
sys.stdout.write(Color.GREEN)
print("right +")
sys.stdout.write(Color.RESET)
'''
if (abs(self.avoid_angle) > (math.pi/3)):
self.avoid_angle = math.pi/2
'''
'''
if unit_vely <= unit_posy:
self.teta = -self.teta
else:
self.teta = self.teta
'''
def body_to_ned(self, x2, y2, offsetx, offsety):
'''
@name: body_to_ned
@brief: Coordinate transformation between body and NED reference frames.
@param: x2: target x coordinate in body reference frame
y2: target y coordinate in body reference frame
offsetx: offset x in ned reference frame
offsety: offset y in ned reference frame
@return: ned_x2: target x coordinate in ned reference frame
ned_y2: target y coordinate in ned reference frame
'''
p = np.array([x2, y2])
J = np.array([[math.cos(self.boat.yaw), -1*math.sin(self.boat.yaw)],
[math.sin(self.boat.yaw), math.cos(self.boat.yaw)]])
n = J.dot(p)
ned_x2 = n[0] + offsetx
ned_y2 = n[1] + offsety
return (ned_x2, ned_y2)
def ned_to_pp(self, ak, ned_x1, ned_y1, ned_x2, ned_y2):
'''
@name: ned_to_ned
@brief: Coordinate transformation between NED and body reference frames.
@param: ak: angle difference from ned to parallel path
ned_x1: origin of parallel path x coordinate in ned reference
frame
ned_y1: origin of parallel path y coordinate in ned reference
frame
ned_x2: target x coordinate in ned reference frame
ned_y2: target y coordinate in ned reference frame
@return: pp_x2: target x coordinate in parallel path reference frame
pp_y2: target y coordinate in parallel path reference frame
'''
n = np.array([ned_x2 - ned_x1, ned_y2 - ned_y1])
J = np.array([[math.cos(ak), -1*math.sin(ak)],
[math.sin(ak), math.cos(ak)]])
J = | np.linalg.inv(J) | numpy.linalg.inv |
import plotly.graph_objects as go
import plotly.express as px
import numpy as np
import matplotlib.pyplot as plt
def pair_to_arr(p):
return p.birth(), p.death(), p.dim()
def process_pairs(ps, remove_zeros=True):
a = []
for p in ps:
a.append([*pair_to_arr(p)])
a = np.array(a)
lens = np.abs(a[:,1] - a[:,0])
if remove_zeros:
a = a[lens > 0]
return a
def essential_pair_filter(a, tub=np.inf):
"""
tub - upper bound on what will be displayed
"""
return np.max(np.abs(a), axis=1) >= tub
def non_essential_pair_filter(a, tub=np.inf):
"""
tub - upper bound on what will be displayed
"""
lens = np.abs(a[:,1] - a[:,0])
f = lens < tub
return f
def PD_uncertainty(ps, uncertainty_length = 0.5, remove_zeros=True, show_legend=True, tmax=0.0, tmin=0.0, **kwargs):
fig, ax = plt.subplots(**kwargs)
a = process_pairs(ps, remove_zeros)
dims = np.array(a[:,2], dtype=int) # homology dimension
cs=plt.get_cmap('Set1')(dims) # set colors
issublevel = a[0,0] < a[0,1]
eps = essential_pair_filter(a)
tmax = np.max((tmax, np.ma.masked_invalid(a[:,:2]).max()))
tmin = np.min((tmin, np.ma.masked_invalid(a[:,:2]).min()))
if tmax == tmin:
# handle identical tmax, tmin
tmax = tmax*1.1 + 1.0
span = tmax - tmin
inf_to = tmax + span/10
minf_to = tmin - span/10
# set axes
xbnds = (minf_to -span/20, inf_to+span/20)
ybnds = (minf_to-span/20, inf_to+span/20)
ax.set_xlim(xbnds)
ax.set_ylim(ybnds)
ax.set_aspect('equal')
# add visual lines
ax.plot(xbnds, ybnds, '--k')
x_array = | np.linspace(*xbnds, 10) | numpy.linspace |
#!/usr/bin/python -u
import argparse
import numpy as np
import scipy.spatial
import data
parser = argparse.ArgumentParser(description='Evaluate embeddings somehow...')
parser.add_argument('--data', '-d', required=True, help='Training data directory')
parser.add_argument('--file', '-f', required=True, help='Embeddings file (numpy format)')
parser.add_argument('--type', type=int, default=1, help='Distance type')
args = parser.parse_args()
train_songs = data.load_songs_info(args.data)
embeddings = np.load(args.file)
print('Distance type: {}'.format(args.type))
print("Loaded embeddings of shape {} from file '{}'".format(np.shape(embeddings), args.file))
def album_distance_1(embeddings, songs1, songs2):
x = 0
for song1 in songs1:
for song2 in songs2:
x += scipy.spatial.distance.cosine(embeddings[song1], embeddings[song2])
return x / (len(songs1) * len(songs2))
def album_distance_2(embeddings, songs1, songs2):
x1 = np.mean(embeddings[songs1], axis=0)
x2 = | np.mean(embeddings[songs2], axis=0) | numpy.mean |
import numpy as np
import pandas as pd
from sklearn.metrics.pairwise import cosine_distances
from analysis.data_parsing.word_vectorizer import WordVectorizer
from analysis.data_parsing.word_data_parser import WordDataParser
class TextInterestManager:
"""used to get an interest-vector from text sources (word, text).
interest-vectors are described using a pd.DataFrame instance (where vector's ones are only numeric fields)
usage example:
wv = WordVectorizer('../data/model.bin')
trans = TextInterestManager('../data/interest_data/interest_groups.csv', wv)
text = '''прекрасный рыцарь спасет принцессу от любящего читать дракона'''
print('original text is:\n', text, '\ntrained high-meanings:\n')
trans.print_interest(trans.text_to_interest(text), head=20)
"""
def __init__(self, df, vectorizer: WordVectorizer = None):
if isinstance(df, str):
df = pd.read_csv(df, index_col=0)
self.description = | np.array(df['description']) | numpy.array |
"""timeresp_test.py - test time response functions"""
from copy import copy
from distutils.version import StrictVersion
import numpy as np
import pytest
import scipy as sp
import control as ct
from control import StateSpace, TransferFunction, c2d, isctime, ss2tf, tf2ss
from control.exception import slycot_check
from control.tests.conftest import slycotonly
from control.timeresp import (_default_time_vector, _ideal_tfinal_and_dt,
forced_response, impulse_response,
initial_response, step_info, step_response)
class TSys:
"""Struct of test system"""
def __init__(self, sys=None, call_kwargs=None):
self.sys = sys
self.kwargs = call_kwargs if call_kwargs else {}
def __repr__(self):
"""Show system when debugging"""
return self.sys.__repr__()
class TestTimeresp:
@pytest.fixture
def tsystem(self, request):
"""Define some test systems"""
"""continuous"""
A = np.array([[1., -2.], [3., -4.]])
B = np.array([[5.], [7.]])
C = np.array([[6., 8.]])
D = np.array([[9.]])
siso_ss1 = TSys(StateSpace(A, B, C, D, 0))
siso_ss1.t = np.linspace(0, 1, 10)
siso_ss1.ystep = np.array([9., 17.6457, 24.7072, 30.4855, 35.2234,
39.1165, 42.3227, 44.9694, 47.1599,
48.9776])
siso_ss1.X0 = np.array([[.5], [1.]])
siso_ss1.yinitial = np.array([11., 8.1494, 5.9361, 4.2258, 2.9118,
1.9092, 1.1508, 0.5833, 0.1645, -0.1391])
ss1 = siso_ss1.sys
"""D=0, continuous"""
siso_ss2 = TSys(StateSpace(ss1.A, ss1.B, ss1.C, 0, 0))
siso_ss2.t = siso_ss1.t
siso_ss2.ystep = siso_ss1.ystep - 9
siso_ss2.initial = siso_ss1.yinitial - 9
siso_ss2.yimpulse = np.array([86., 70.1808, 57.3753, 46.9975, 38.5766,
31.7344, 26.1668, 21.6292, 17.9245,
14.8945])
"""System with unspecified timebase"""
siso_ss2_dtnone = TSys(StateSpace(ss1.A, ss1.B, ss1.C, 0, None))
siso_ss2_dtnone.t = np.arange(0, 10, 1.)
siso_ss2_dtnone.ystep = np.array([0., 86., -72., 230., -360., 806.,
-1512., 3110., -6120., 12326.])
siso_tf1 = TSys(TransferFunction([1], [1, 2, 1], 0))
siso_tf2 = copy(siso_ss1)
siso_tf2.sys = ss2tf(siso_ss1.sys)
"""MIMO system, contains ``siso_ss1`` twice"""
mimo_ss1 = copy(siso_ss1)
A = np.zeros((4, 4))
A[:2, :2] = siso_ss1.sys.A
A[2:, 2:] = siso_ss1.sys.A
B = np.zeros((4, 2))
B[:2, :1] = siso_ss1.sys.B
B[2:, 1:] = siso_ss1.sys.B
C = np.zeros((2, 4))
C[:1, :2] = siso_ss1.sys.C
C[1:, 2:] = siso_ss1.sys.C
D = np.zeros((2, 2))
D[:1, :1] = siso_ss1.sys.D
D[1:, 1:] = siso_ss1.sys.D
mimo_ss1.sys = StateSpace(A, B, C, D)
"""MIMO system, contains ``siso_ss2`` twice"""
mimo_ss2 = copy(siso_ss2)
A = np.zeros((4, 4))
A[:2, :2] = siso_ss2.sys.A
A[2:, 2:] = siso_ss2.sys.A
B = np.zeros((4, 2))
B[:2, :1] = siso_ss2.sys.B
B[2:, 1:] = siso_ss2.sys.B
C = np.zeros((2, 4))
C[:1, :2] = siso_ss2.sys.C
C[1:, 2:] = siso_ss2.sys.C
D = np.zeros((2, 2))
mimo_ss2.sys = StateSpace(A, B, C, D, 0)
"""discrete"""
siso_dtf0 = TSys(TransferFunction([1.], [1., 0.], 1.))
siso_dtf0.t = np.arange(4)
siso_dtf0.yimpulse = [0., 1., 0., 0.]
siso_dtf1 = TSys(TransferFunction([1], [1, 1, 0.25], True))
siso_dtf1.t = np.arange(0, 5, 1)
siso_dtf1.ystep = np.array([0. , 0. , 1. , 0. , 0.75])
siso_dtf2 = TSys(TransferFunction([1], [1, 1, 0.25], 0.2))
siso_dtf2.t = np.arange(0, 5, 0.2)
siso_dtf2.ystep = np.array(
[0. , 0. , 1. , 0. , 0.75 , 0.25 ,
0.5625, 0.375 , 0.4844, 0.4219, 0.457 , 0.4375,
0.4482, 0.4424, 0.4456, 0.4438, 0.4448, 0.4443,
0.4445, 0.4444, 0.4445, 0.4444, 0.4445, 0.4444,
0.4444])
"""Time step which leads to rounding errors for time vector length"""
num = [-0.10966442, 0.12431949]
den = [1., -1.86789511, 0.88255018]
dt = 0.12493963338370018
siso_dtf3 = TSys(TransferFunction(num, den, dt))
siso_dtf3.t = np.linspace(0, 9*dt, 10)
siso_dtf3.ystep = np.array(
[ 0. , -0.1097, -0.1902, -0.2438, -0.2729,
-0.2799, -0.2674, -0.2377, -0.1934, -0.1368])
"""dtf1 converted statically, because Slycot and Scipy produce
different realizations, wich means different initial condtions,"""
siso_dss1 = copy(siso_dtf1)
siso_dss1.sys = StateSpace([[-1., -0.25],
[ 1., 0.]],
[[1.],
[0.]],
[[0., 1.]],
[[0.]],
True)
siso_dss1.X0 = [0.5, 1.]
siso_dss1.yinitial = np.array([1., 0.5, -0.75, 0.625, -0.4375])
siso_dss2 = copy(siso_dtf2)
siso_dss2.sys = tf2ss(siso_dtf2.sys)
mimo_dss1 = TSys(StateSpace(ss1.A, ss1.B, ss1.C, ss1.D, True))
mimo_dss1.t = np.arange(0, 5, 0.2)
mimo_dss2 = copy(mimo_ss1)
mimo_dss2.sys = c2d(mimo_ss1.sys, mimo_ss1.t[1]-mimo_ss1.t[0])
mimo_tf2 = copy(mimo_ss2)
tf_ = ss2tf(siso_ss2.sys)
mimo_tf2.sys = TransferFunction(
[[tf_.num[0][0], [0]], [[0], tf_.num[0][0]]],
[[tf_.den[0][0], [1]], [[1], tf_.den[0][0]]],
0)
mimo_dtf1 = copy(siso_dtf1)
tf_ = siso_dtf1.sys
mimo_dtf1.sys = TransferFunction(
[[tf_.num[0][0], [0]], [[0], tf_.num[0][0]]],
[[tf_.den[0][0], [1]], [[1], tf_.den[0][0]]],
True)
# for pole cancellation tests
pole_cancellation = TSys(TransferFunction(
[1.067e+05, 5.791e+04],
[10.67, 1.067e+05, 5.791e+04]))
no_pole_cancellation = TSys(TransferFunction(
[1.881e+06],
[188.1, 1.881e+06]))
# System Type 1 - Step response not stationary: G(s)=1/s(s+1)
siso_tf_type1 = TSys(TransferFunction(1, [1, 1, 0]))
siso_tf_type1.step_info = {
'RiseTime': np.NaN,
'SettlingTime': np.NaN,
'SettlingMin': np.NaN,
'SettlingMax': np.NaN,
'Overshoot': np.NaN,
'Undershoot': np.NaN,
'Peak': np.Inf,
'PeakTime': np.Inf,
'SteadyStateValue': np.NaN}
# SISO under shoot response and positive final value
# G(s)=(-s+1)/(s²+s+1)
siso_tf_kpos = TSys(TransferFunction([-1, 1], [1, 1, 1]))
siso_tf_kpos.step_info = {
'RiseTime': 1.242,
'SettlingTime': 9.110,
'SettlingMin': 0.90,
'SettlingMax': 1.208,
'Overshoot': 20.840,
'Undershoot': 28.0,
'Peak': 1.208,
'PeakTime': 4.282,
'SteadyStateValue': 1.0}
# SISO under shoot response and negative final value
# k=-1 G(s)=-(-s+1)/(s²+s+1)
siso_tf_kneg = TSys(TransferFunction([1, -1], [1, 1, 1]))
siso_tf_kneg.step_info = {
'RiseTime': 1.242,
'SettlingTime': 9.110,
'SettlingMin': -1.208,
'SettlingMax': -0.90,
'Overshoot': 20.840,
'Undershoot': 28.0,
'Peak': 1.208,
'PeakTime': 4.282,
'SteadyStateValue': -1.0}
siso_tf_asymptotic_from_neg1 = TSys(TransferFunction([-1, 1], [1, 1]))
siso_tf_asymptotic_from_neg1.step_info = {
'RiseTime': 2.197,
'SettlingTime': 4.605,
'SettlingMin': 0.9,
'SettlingMax': 1.0,
'Overshoot': 0,
'Undershoot': 100.0,
'Peak': 1.0,
'PeakTime': 0.0,
'SteadyStateValue': 1.0}
siso_tf_asymptotic_from_neg1.kwargs = {
'step_info': {'T': np.arange(0, 5, 1e-3)}}
# example from matlab online help
# https://www.mathworks.com/help/control/ref/stepinfo.html
siso_tf_step_matlab = TSys(TransferFunction([1, 5, 5],
[1, 1.65, 5, 6.5, 2]))
siso_tf_step_matlab.step_info = {
'RiseTime': 3.8456,
'SettlingTime': 27.9762,
'SettlingMin': 2.0689,
'SettlingMax': 2.6873,
'Overshoot': 7.4915,
'Undershoot': 0,
'Peak': 2.6873,
'PeakTime': 8.0530,
'SteadyStateValue': 2.5}
A = [[0.68, -0.34],
[0.34, 0.68]]
B = [[0.18, -0.05],
[0.04, 0.11]]
C = [[0, -1.53],
[-1.12, -1.10]]
D = [[0, 0],
[0.06, -0.37]]
mimo_ss_step_matlab = TSys(StateSpace(A, B, C, D, 0.2))
mimo_ss_step_matlab.kwargs['step_info'] = {'T': 4.6}
mimo_ss_step_matlab.step_info = [[
{'RiseTime': 0.6000,
'SettlingTime': 3.0000,
'SettlingMin': -0.5999,
'SettlingMax': -0.4689,
'Overshoot': 15.5072,
'Undershoot': 0.,
'Peak': 0.5999,
'PeakTime': 1.4000,
'SteadyStateValue': -0.5193},
{'RiseTime': 0.,
'SettlingTime': 3.6000,
'SettlingMin': -0.2797,
'SettlingMax': -0.1043,
'Overshoot': 118.9918,
'Undershoot': 0,
'Peak': 0.2797,
'PeakTime': .6000,
'SteadyStateValue': -0.1277}],
[{'RiseTime': 0.4000,
'SettlingTime': 2.8000,
'SettlingMin': -0.6724,
'SettlingMax': -0.5188,
'Overshoot': 24.6476,
'Undershoot': 11.1224,
'Peak': 0.6724,
'PeakTime': 1,
'SteadyStateValue': -0.5394},
{'RiseTime': 0.0000, # (*)
'SettlingTime': 3.4000,
'SettlingMin': -0.4350, # (*)
'SettlingMax': -0.1485,
'Overshoot': 132.0170,
'Undershoot': 0.,
'Peak': 0.4350,
'PeakTime': .2,
'SteadyStateValue': -0.1875}]]
# (*): MATLAB gives 0.4 for RiseTime and -0.1034 for
# SettlingMin, but it is unclear what 10% and 90% of
# the steady state response mean, when the step for
# this channel does not start a 0.
siso_ss_step_matlab = copy(mimo_ss_step_matlab)
siso_ss_step_matlab.sys = siso_ss_step_matlab.sys[1, 0]
siso_ss_step_matlab.step_info = siso_ss_step_matlab.step_info[1][0]
Ta = [[siso_tf_kpos, siso_tf_kneg, siso_tf_step_matlab],
[siso_tf_step_matlab, siso_tf_kpos, siso_tf_kneg]]
mimo_tf_step_info = TSys(TransferFunction(
[[Ti.sys.num[0][0] for Ti in Tr] for Tr in Ta],
[[Ti.sys.den[0][0] for Ti in Tr] for Tr in Ta]))
mimo_tf_step_info.step_info = [[Ti.step_info for Ti in Tr]
for Tr in Ta]
# enforce enough sample points for all channels (they have different
# characteristics)
mimo_tf_step_info.kwargs['step_info'] = {'T_num': 2000}
systems = locals()
if isinstance(request.param, str):
return systems[request.param]
else:
return [systems[sys] for sys in request.param]
@pytest.mark.parametrize(
"kwargs",
[{},
{'X0': 0},
{'X0': np.array([0, 0])},
{'X0': 0, 'return_x': True},
])
@pytest.mark.parametrize("tsystem", ["siso_ss1"], indirect=True)
def test_step_response_siso(self, tsystem, kwargs):
"""Test SISO system step response"""
sys = tsystem.sys
t = tsystem.t
yref = tsystem.ystep
# SISO call
out = step_response(sys, T=t, **kwargs)
tout, yout = out[:2]
assert len(out) == 3 if ('return_x', True) in kwargs.items() else 2
np.testing.assert_array_almost_equal(tout, t)
np.testing.assert_array_almost_equal(yout, yref, decimal=4)
@pytest.mark.parametrize("tsystem", ["mimo_ss1"], indirect=True)
def test_step_response_mimo(self, tsystem):
"""Test MIMO system, which contains ``siso_ss1`` twice."""
sys = tsystem.sys
t = tsystem.t
yref = tsystem.ystep
_t, y_00 = step_response(sys, T=t, input=0, output=0)
_t, y_11 = step_response(sys, T=t, input=1, output=1)
np.testing.assert_array_almost_equal(y_00, yref, decimal=4)
np.testing.assert_array_almost_equal(y_11, yref, decimal=4)
@pytest.mark.parametrize("tsystem", ["mimo_ss1"], indirect=True)
def test_step_response_return(self, tsystem):
"""Verify continuous and discrete time use same return conventions."""
sysc = tsystem.sys
sysd = c2d(sysc, 1) # discrete time system
Tvec = np.linspace(0, 10, 11) # make sure to use integer times 0..10
Tc, youtc = step_response(sysc, Tvec, input=0)
Td, youtd = step_response(sysd, Tvec, input=0)
np.testing.assert_array_equal(Tc.shape, Td.shape)
np.testing.assert_array_equal(youtc.shape, youtd.shape)
@pytest.mark.parametrize("dt", [0, 1], ids=["continuous", "discrete"])
def test_step_nostates(self, dt):
"""Constant system, continuous and discrete time.
gh-374 "Bug in step_response()"
"""
sys = TransferFunction([1], [1], dt)
t, y = step_response(sys)
np.testing.assert_allclose(y, np.ones(len(t)))
def assert_step_info_match(self, sys, info, info_ref):
"""Assert reasonable step_info accuracy."""
if sys.isdtime(strict=True):
dt = sys.dt
else:
_, dt = _ideal_tfinal_and_dt(sys, is_step=True)
for k in ['RiseTime', 'SettlingTime', 'PeakTime']:
np.testing.assert_allclose(info[k], info_ref[k], atol=dt,
err_msg=f"{k} does not match")
for k in ['Overshoot', 'Undershoot', 'Peak', 'SteadyStateValue']:
np.testing.assert_allclose(info[k], info_ref[k], rtol=5e-3,
err_msg=f"{k} does not match")
# steep gradient right after RiseTime
absrefinf = np.abs(info_ref['SteadyStateValue'])
if info_ref['RiseTime'] > 0:
y_next_sample_max = 0.8*absrefinf/info_ref['RiseTime']*dt
else:
y_next_sample_max = 0
for k in ['SettlingMin', 'SettlingMax']:
if (np.abs(info_ref[k]) - 0.9 * absrefinf) > y_next_sample_max:
# local min/max peak well after signal has risen
np.testing.assert_allclose(info[k], info_ref[k], rtol=1e-3)
@pytest.mark.parametrize(
"yfinal", [True, False], ids=["yfinal", "no yfinal"])
@pytest.mark.parametrize(
"systype, time_2d",
[("ltisys", False),
("time response", False),
("time response", True),
],
ids=["ltisys", "time response (n,)", "time response (1,n)"])
@pytest.mark.parametrize(
"tsystem",
["siso_tf_step_matlab",
"siso_ss_step_matlab",
"siso_tf_kpos",
"siso_tf_kneg",
"siso_tf_type1",
"siso_tf_asymptotic_from_neg1"],
indirect=["tsystem"])
def test_step_info(self, tsystem, systype, time_2d, yfinal):
"""Test step info for SISO systems."""
step_info_kwargs = tsystem.kwargs.get('step_info', {})
if systype == "time response":
# simulate long enough for steady state value
tfinal = 3 * tsystem.step_info['SettlingTime']
if np.isnan(tfinal):
pytest.skip("test system does not settle")
t, y = step_response(tsystem.sys, T=tfinal, T_num=5000)
sysdata = y
step_info_kwargs['T'] = t[np.newaxis, :] if time_2d else t
else:
sysdata = tsystem.sys
if yfinal:
step_info_kwargs['yfinal'] = tsystem.step_info['SteadyStateValue']
info = step_info(sysdata, **step_info_kwargs)
self.assert_step_info_match(tsystem.sys, info, tsystem.step_info)
@pytest.mark.parametrize(
"yfinal", [True, False], ids=["yfinal", "no_yfinal"])
@pytest.mark.parametrize(
"systype", ["ltisys", "time response"])
@pytest.mark.parametrize(
"tsystem",
['mimo_ss_step_matlab',
pytest.param('mimo_tf_step_info', marks=slycotonly)],
indirect=["tsystem"])
def test_step_info_mimo(self, tsystem, systype, yfinal):
"""Test step info for MIMO systems."""
step_info_kwargs = tsystem.kwargs.get('step_info', {})
if systype == "time response":
tfinal = 3 * max([S['SettlingTime']
for Srow in tsystem.step_info for S in Srow])
t, y = step_response(tsystem.sys, T=tfinal, T_num=5000)
sysdata = y
step_info_kwargs['T'] = t
else:
sysdata = tsystem.sys
if yfinal:
step_info_kwargs['yfinal'] = [[S['SteadyStateValue']
for S in Srow]
for Srow in tsystem.step_info]
info_dict = step_info(sysdata, **step_info_kwargs)
for i, row in enumerate(info_dict):
for j, info in enumerate(row):
self.assert_step_info_match(tsystem.sys,
info, tsystem.step_info[i][j])
def test_step_info_invalid(self):
"""Call step_info with invalid parameters."""
with pytest.raises(ValueError, match="time series data convention"):
step_info(["not numeric data"])
with pytest.raises(ValueError, match="time series data convention"):
step_info(np.ones((10, 15))) # invalid shape
with pytest.raises(ValueError, match="matching time vector"):
step_info(np.ones(15), T=np.linspace(0, 1, 20)) # time too long
with pytest.raises(ValueError, match="matching time vector"):
step_info(np.ones((2, 2, 15))) # no time vector
@pytest.mark.parametrize("tsystem",
[("no_pole_cancellation", "pole_cancellation")],
indirect=True)
def test_step_pole_cancellation(self, tsystem):
# confirm that pole-zero cancellation doesn't perturb results
# https://github.com/python-control/python-control/issues/440
step_info_no_cancellation = step_info(tsystem[0].sys)
step_info_cancellation = step_info(tsystem[1].sys)
self.assert_step_info_match(tsystem[0].sys,
step_info_no_cancellation,
step_info_cancellation)
@pytest.mark.parametrize(
"tsystem, kwargs",
[("siso_ss2", {}),
("siso_ss2", {'X0': 0}),
("siso_ss2", {'X0': np.array([0, 0])}),
("siso_ss2", {'X0': 0, 'return_x': True}),
("siso_dtf0", {})],
indirect=["tsystem"])
def test_impulse_response_siso(self, tsystem, kwargs):
"""Test impulse response of SISO systems."""
sys = tsystem.sys
t = tsystem.t
yref = tsystem.yimpulse
out = impulse_response(sys, T=t, **kwargs)
tout, yout = out[:2]
assert len(out) == 3 if ('return_x', True) in kwargs.items() else 2
np.testing.assert_array_almost_equal(tout, t)
np.testing.assert_array_almost_equal(yout, yref, decimal=4)
@pytest.mark.parametrize("tsystem", ["mimo_ss2"], indirect=True)
def test_impulse_response_mimo(self, tsystem):
""""Test impulse response of MIMO systems."""
sys = tsystem.sys
t = tsystem.t
yref = tsystem.yimpulse
_t, y_00 = impulse_response(sys, T=t, input=0, output=0)
np.testing.assert_array_almost_equal(y_00, yref, decimal=4)
_t, y_11 = impulse_response(sys, T=t, input=1, output=1)
np.testing.assert_array_almost_equal(y_11, yref, decimal=4)
yref_notrim = np.zeros((2, len(t)))
yref_notrim[:1, :] = yref
_t, yy = impulse_response(sys, T=t, input=0)
np.testing.assert_array_almost_equal(yy[:,0,:], yref_notrim, decimal=4)
@pytest.mark.skipif(StrictVersion(sp.__version__) < "1.3",
reason="requires SciPy 1.3 or greater")
@pytest.mark.parametrize("tsystem", ["siso_tf1"], indirect=True)
def test_discrete_time_impulse(self, tsystem):
# discrete time impulse sampled version should match cont time
dt = 0.1
t = np.arange(0, 3, dt)
sys = tsystem.sys
sysdt = sys.sample(dt, 'impulse')
np.testing.assert_array_almost_equal(impulse_response(sys, t)[1],
impulse_response(sysdt, t)[1])
@pytest.mark.parametrize("tsystem", ["siso_ss1"], indirect=True)
def test_impulse_response_warnD(self, tsystem):
"""Test warning about direct feedthrough"""
with pytest.warns(UserWarning, match="System has direct feedthrough"):
_ = impulse_response(tsystem.sys, tsystem.t)
@pytest.mark.parametrize(
"kwargs",
[{},
{'X0': 0},
{'X0': np.array([0.5, 1])},
{'X0': np.array([[0.5], [1]])},
{'X0': np.array([0.5, 1]), 'return_x': True},
])
@pytest.mark.parametrize("tsystem", ["siso_ss1"], indirect=True)
def test_initial_response(self, tsystem, kwargs):
"""Test initial response of SISO system"""
sys = tsystem.sys
t = tsystem.t
x0 = kwargs.get('X0', 0)
yref = tsystem.yinitial if np.any(x0) else np.zeros_like(t)
out = initial_response(sys, T=t, **kwargs)
tout, yout = out[:2]
assert len(out) == 3 if ('return_x', True) in kwargs.items() else 2
np.testing.assert_array_almost_equal(tout, t)
np.testing.assert_array_almost_equal(yout, yref, decimal=4)
@pytest.mark.parametrize("tsystem", ["mimo_ss1"], indirect=True)
def test_initial_response_mimo(self, tsystem):
"""Test initial response of MIMO system"""
sys = tsystem.sys
t = tsystem.t
x0 = np.array([[.5], [1.], [.5], [1.]])
yref = tsystem.yinitial
yref_notrim = np.broadcast_to(yref, (2, len(t)))
_t, y_00 = initial_response(sys, T=t, X0=x0, input=0, output=0)
np.testing.assert_array_almost_equal(y_00, yref, decimal=4)
_t, y_11 = initial_response(sys, T=t, X0=x0, input=0, output=1)
np.testing.assert_array_almost_equal(y_11, yref, decimal=4)
_t, yy = initial_response(sys, T=t, X0=x0)
np.testing.assert_array_almost_equal(yy, yref_notrim, decimal=4)
@pytest.mark.parametrize("tsystem",
["siso_ss1", "siso_tf2"],
indirect=True)
def test_forced_response_step(self, tsystem):
"""Test forced response of SISO systems as step response"""
sys = tsystem.sys
t = tsystem.t
u = np.ones_like(t, dtype=float)
yref = tsystem.ystep
tout, yout = forced_response(sys, t, u)
np.testing.assert_array_almost_equal(tout, t)
np.testing.assert_array_almost_equal(yout, yref, decimal=4)
@pytest.mark.parametrize("u",
[np.zeros((10,), dtype=float),
0] # special algorithm
)
@pytest.mark.parametrize("tsystem", ["siso_ss1", "siso_tf2"],
indirect=True)
def test_forced_response_initial(self, tsystem, u):
"""Test forced response of SISO system as intitial response."""
sys = tsystem.sys
t = tsystem.t
x0 = tsystem.X0
yref = tsystem.yinitial
if isinstance(sys, StateSpace):
tout, yout = forced_response(sys, t, u, X0=x0)
np.testing.assert_array_almost_equal(tout, t)
np.testing.assert_array_almost_equal(yout, yref, decimal=4)
else:
with pytest.warns(UserWarning, match="Non-zero initial condition "
"given for transfer function"):
tout, yout = forced_response(sys, t, u, X0=x0)
@pytest.mark.parametrize("tsystem, useT",
[("mimo_ss1", True),
("mimo_dss2", True),
("mimo_dss2", False)],
indirect=["tsystem"])
def test_forced_response_mimo(self, tsystem, useT):
"""Test forced response of MIMO system"""
# first system: initial value, second system: step response
sys = tsystem.sys
t = tsystem.t
u = np.array([[0., 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1., 1, 1, 1, 1, 1, 1, 1, 1, 1]])
x0 = np.array([[.5], [1], [0], [0]])
yref = np.vstack([tsystem.yinitial, tsystem.ystep])
if useT:
_t, yout = forced_response(sys, t, u, x0)
else:
_t, yout = forced_response(sys, U=u, X0=x0)
np.testing.assert_array_almost_equal(yout, yref, decimal=4)
@pytest.mark.usefixtures("editsdefaults")
def test_forced_response_legacy(self):
# Define a system for testing
sys = ct.rss(2, 1, 1)
T = np.linspace(0, 10, 10)
U = np.sin(T)
"""Make sure that legacy version of forced_response works"""
ct.config.use_legacy_defaults("0.8.4")
# forced_response returns x by default
t, y = ct.step_response(sys, T)
t, y, x = ct.forced_response(sys, T, U)
ct.config.use_legacy_defaults("0.9.0")
# forced_response returns input/output by default
t, y = ct.step_response(sys, T)
t, y = ct.forced_response(sys, T, U)
t, y, x = ct.forced_response(sys, T, U, return_x=True)
@pytest.mark.parametrize(
"tsystem, fr_kwargs, refattr",
[pytest.param("siso_ss1",
{'T': np.linspace(0, 1, 10)}, 'yinitial',
id="ctime no U"),
pytest.param("siso_dss1",
{'T': np.arange(0, 5, 1,)}, 'yinitial',
id="dt=True, no U"),
pytest.param("siso_dtf1",
{'U': np.ones(5,)}, 'ystep',
id="dt=True, no T"),
pytest.param("siso_dtf2",
{'U': np.ones(25,)}, 'ystep',
id="dt=0.2, no T"),
pytest.param("siso_ss2_dtnone",
{'U': np.ones(10,)}, 'ystep',
id="dt=None, no T"),
pytest.param("siso_dtf3",
{'U': np.ones(10,)}, 'ystep',
id="dt with rounding error, no T"),
],
indirect=["tsystem"])
def test_forced_response_T_U(self, tsystem, fr_kwargs, refattr):
"""Test documented forced_response behavior for parameters T and U."""
if refattr == 'yinitial':
fr_kwargs['X0'] = tsystem.X0
t, y = forced_response(tsystem.sys, **fr_kwargs)
np.testing.assert_allclose(t, tsystem.t)
np.testing.assert_allclose(y, getattr(tsystem, refattr),
rtol=1e-3, atol=1e-5)
@pytest.mark.parametrize("tsystem", ["siso_ss1"], indirect=True)
def test_forced_response_invalid_c(self, tsystem):
"""Test invalid parameters."""
with pytest.raises(TypeError,
match="StateSpace.*or.*TransferFunction"):
forced_response("not a system")
with pytest.raises(ValueError, match="T.*is mandatory for continuous"):
forced_response(tsystem.sys)
with pytest.raises(ValueError, match="time values must be equally "
"spaced"):
forced_response(tsystem.sys, [0, 0.1, 0.12, 0.4])
@pytest.mark.parametrize("tsystem", ["siso_dss2"], indirect=True)
def test_forced_response_invalid_d(self, tsystem):
"""Test invalid parameters dtime with sys.dt > 0."""
with pytest.raises(ValueError, match="can't both be zero"):
forced_response(tsystem.sys)
with pytest.raises(ValueError, match="must have same elements"):
forced_response(tsystem.sys,
T=tsystem.t, U=np.random.randn(1, 12))
with pytest.raises(ValueError, match="must have same elements"):
forced_response(tsystem.sys,
T=tsystem.t, U=np.random.randn(12))
with pytest.raises(ValueError, match="must match sampling time"):
forced_response(tsystem.sys, T=tsystem.t*0.9)
with pytest.raises(ValueError, match="must be multiples of "
"sampling time"):
forced_response(tsystem.sys, T=tsystem.t*1.1)
# but this is ok
forced_response(tsystem.sys, T=tsystem.t*2)
@pytest.mark.parametrize("u, x0, xtrue",
[(np.zeros((10,)),
np.array([2., 3.]),
np.vstack([np.linspace(2, 5, 10),
np.full((10,), 3)])),
(np.ones((10,)),
np.array([0., 0.]),
np.vstack([0.5 * np.linspace(0, 1, 10)**2,
np.linspace(0, 1, 10)])),
(np.linspace(0, 1, 10),
np.array([0., 0.]),
np.vstack([np.linspace(0, 1, 10)**3 / 6.,
np.linspace(0, 1, 10)**2 / 2.]))],
ids=["zeros", "ones", "linear"])
def test_lsim_double_integrator(self, u, x0, xtrue):
"""Test forced response of double integrator"""
# Note: scipy.signal.lsim fails if A is not invertible
A = np.array([[0., 1.],
[0., 0.]])
B = np.array([[0.],
[1.]])
C = np.array([[1., 0.]])
D = 0.
sys = StateSpace(A, B, C, D)
t = np.linspace(0, 1, 10)
_t, yout, xout = forced_response(sys, t, u, x0, return_x=True)
np.testing.assert_array_almost_equal(xout, xtrue, decimal=6)
ytrue = np.squeeze(np.asarray(C.dot(xtrue)))
np.testing.assert_array_almost_equal(yout, ytrue, decimal=6)
@slycotonly
def test_step_robustness(self):
"Test robustness os step_response against denomiantors: gh-240"
# Create 2 input, 2 output system
num = [[[0], [1]], [[1], [0]]]
den1 = [[[1], [1,1]], [[1, 4], [1]]]
sys1 = TransferFunction(num, den1)
den2 = [[[1], [1e-10, 1, 1]], [[1, 4], [1]]] # slight perturbation
sys2 = TransferFunction(num, den2)
t1, y1 = step_response(sys1, input=0, T=2, T_num=100)
t2, y2 = step_response(sys2, input=0, T=2, T_num=100)
np.testing.assert_array_almost_equal(y1, y2)
@pytest.mark.parametrize(
"tfsys, tfinal",
[(TransferFunction(1, [1, .5]), 13.81551), # pole at 0.5
(TransferFunction(1, [1, .5]).sample(.1), 25), # discrete pole at 0.5
(TransferFunction(1, [1, .5, 0]), 25)]) # poles at 0.5 and 0
def test_auto_generated_time_vector_tfinal(self, tfsys, tfinal):
"""Confirm a TF with a pole at p simulates for tfinal seconds"""
ideal_tfinal, ideal_dt = _ideal_tfinal_and_dt(tfsys)
np.testing.assert_allclose(ideal_tfinal, tfinal, rtol=1e-4)
T = _default_time_vector(tfsys)
np.testing.assert_allclose(T[-1], tfinal, atol=0.5*ideal_dt)
@pytest.mark.parametrize("wn, zeta", [(10, 0), (100, 0), (100, .1)])
def test_auto_generated_time_vector_dt_cont1(self, wn, zeta):
"""Confirm a TF with a natural frequency of wn rad/s gets a
dt of 1/(ratio*wn)"""
dtref = 0.25133 / wn
tfsys = TransferFunction(1, [1, 2*zeta*wn, wn**2])
np.testing.assert_almost_equal(_ideal_tfinal_and_dt(tfsys)[1], dtref,
decimal=5)
def test_auto_generated_time_vector_dt_cont2(self):
"""A sampled tf keeps its dt"""
wn = 100
zeta = .1
tfsys = TransferFunction(1, [1, 2*zeta*wn, wn**2]).sample(.1)
tfinal, dt = _ideal_tfinal_and_dt(tfsys)
np.testing.assert_almost_equal(dt, .1)
T, _ = initial_response(tfsys)
np.testing.assert_almost_equal(np.diff(T[:2]), [.1])
def test_default_timevector_long(self):
"""Test long time vector"""
# TF with fast oscillations simulates only 5000 time steps
# even with long tfinal
wn = 100
tfsys = TransferFunction(1, [1, 0, wn**2])
tout = _default_time_vector(tfsys, tfinal=100)
assert len(tout) == 5000
@pytest.mark.parametrize("fun", [step_response,
impulse_response,
initial_response])
def test_default_timevector_functions_c(self, fun):
"""Test that functions can calculate the time vector automatically"""
sys = TransferFunction(1, [1, .5, 0])
_tfinal, _dt = _ideal_tfinal_and_dt(sys)
# test impose number of time steps
tout, _ = fun(sys, T_num=10)
assert len(tout) == 10
# test impose final time
tout, _ = fun(sys, T=100.)
np.testing.assert_allclose(tout[-1], 100., atol=0.5*_dt)
@pytest.mark.parametrize("fun", [step_response,
impulse_response,
initial_response])
@pytest.mark.parametrize("dt", [0.1, 0.112])
def test_default_timevector_functions_d(self, fun, dt):
"""Test that functions can calculate the time vector automatically"""
sys = TransferFunction(1, [1, .5, 0], dt)
# test impose number of time steps is ignored with dt given
tout, _ = fun(sys, T_num=15)
assert len(tout) != 15
# test impose final time
tout, _ = fun(sys, 100)
np.testing.assert_allclose(tout[-1], 100., atol=0.5*dt)
@pytest.mark.parametrize("tsystem",
["siso_ss2", # continuous
"siso_tf1",
"siso_dss1", # unspecified sampling time
"siso_dtf1",
"siso_dss2", # matching timebase
"siso_dtf2",
"siso_ss2_dtnone", # undetermined timebase
"mimo_ss2", # MIMO
pytest.param("mimo_tf2", marks=slycotonly),
"mimo_dss1",
pytest.param("mimo_dtf1", marks=slycotonly),
],
indirect=True)
@pytest.mark.parametrize("fun", [step_response,
impulse_response,
initial_response,
forced_response])
@pytest.mark.parametrize("squeeze", [None, True, False])
def test_time_vector(self, tsystem, fun, squeeze, matarrayout):
"""Test time vector handling and correct output convention
gh-239, gh-295
"""
sys = tsystem.sys
kw = {}
if hasattr(tsystem, "t"):
t = tsystem.t
kw['T'] = t
if fun == forced_response:
kw['U'] = np.vstack([np.sin(t) for i in range(sys.ninputs)])
elif fun == forced_response and isctime(sys, strict=True):
pytest.skip("No continuous forced_response without time vector.")
if hasattr(sys, "nstates"):
kw['X0'] = np.arange(sys.nstates) + 1
if sys.ninputs > 1 and fun in [step_response, impulse_response]:
kw['input'] = 1
if squeeze is not None:
kw['squeeze'] = squeeze
out = fun(sys, **kw)
tout, yout = out[:2]
assert tout.ndim == 1
if hasattr(tsystem, 't'):
# tout should always match t, which has shape (n, )
np.testing.assert_allclose(tout, tsystem.t)
elif fun == forced_response and sys.dt in [None, True]:
np.testing.assert_allclose(np.diff(tout), 1.)
if squeeze is False or not sys.issiso():
assert yout.shape[0] == sys.noutputs
assert yout.shape[-1] == tout.shape[0]
else:
assert yout.shape == tout.shape
if sys.isdtime(strict=True) and sys.dt is not True and not \
np.isclose(sys.dt, 0.5):
kw['T'] = | np.arange(0, 5, 0.5) | numpy.arange |
"""
Stimulation protocols to run on the opsin models
* Neuro-engineering stimuli: ``step``, ``sinusoid``, ``chirp``, ``ramp``, ``delta``
* Opsin-specific protocols: ``rectifier``, ``shortPulse``, ``recovery``.
* The ``custom`` protocol can be used with arbitrary interpolation fuctions
"""
from __future__ import print_function, division
import warnings
import logging
import os
import abc
from collections import OrderedDict
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl # for tick locators
from scipy.interpolate import InterpolatedUnivariateSpline as spline
# from scipy.optimize import curve_fit
from lmfit import Parameters
from pyrho.parameters import *
from pyrho.parameters import PyRhOobject, smallSignalAnalysis
from pyrho.utilities import * # times2cycles, cycles2times, plotLight, round_sig, expDecay, biExpDecay, findPeaks
from pyrho.expdata import * # import loadData
from pyrho.fitting import (fitFV, errFV, fitfV, errfV,
getRecoveryPeaks, fitRecovery)
from pyrho.models import *
from pyrho.simulators import * # For characterise()
from pyrho.config import *
from pyrho import config
__all__ = ['protocols', 'selectProtocol', 'characterise']
logger = logging.getLogger(__name__)
class Protocol(PyRhOobject): # , metaclass=ABCMeta
"""Common base class for all protocols."""
__metaclass__ = abc.ABCMeta
protocol = None
nRuns = None
Dt_delay = None
cycles = None
Dt_total = None
dt = None
phis = None
Vs = None
def __init__(self, params=None, saveData=True):
if params is None:
params = protParams[self.protocol]
self.RhO = None
self.dataTag = ""
self.saveData = saveData
self.plotPeakRecovery = False
self.plotStateVars = False
self.plotKinetics = False
self.setParams(params)
self.prepare()
self.t_start, self.t_end = 0, self.Dt_total
self.phi_ts = None
self.lam = 470 # Default wavelength [nm]
self.PD = None
self.Ifig = None
def __str__(self):
return self.protocol
def __repr__(self):
return "<PyRhO {} Protocol object (nRuns={}, nPhis={}, nVs={})>".format(self.protocol, self.nRuns, self.nPhis, self.nVs)
def __iter__(self):
"""Iterator to return the pulse sequence for the next trial."""
self.run = 0
self.phiInd = 0
self.vInd = 0
return self
def __next__(self):
"""Iterator to return the pulse sequence for the next trial."""
self.run += 1
if self.run > self.nRuns:
raise StopIteration
return self.getRunCycles(self.run - 1)
def prepare(self):
"""Function to set-up additional variables and make parameters
consistent after any changes.
"""
if np.isscalar(self.cycles): # Only 'on' duration specified
Dt_on = self.cycles
if hasattr(self, 'Dt_total'):
Dt_off = self.Dt_total - Dt_on - self.Dt_delay
else:
Dt_off = 0
self.cycles = np.asarray([[Dt_on, Dt_off]])
elif isinstance(self.cycles, (list, tuple, np.ndarray)):
if np.isscalar(self.cycles[0]):
self.cycles = [self.cycles] # Assume only one pulse
else:
raise TypeError('Unexpected type for cycles - expected a list or array!')
self.cycles = np.asarray(self.cycles)
self.nPulses = self.cycles.shape[0]
self.pulses, self.Dt_total = cycles2times(self.cycles, self.Dt_delay)
self.Dt_delays = np.array([pulse[0] for pulse in self.pulses], copy=True) # pulses[:,0] # Delay Durations #self.Dt_delays = np.array([self.Dt_delay] * self.nRuns)
self.Dt_ons = np.array(self.cycles[:, 0]) # self.Dt_ons = np.array([cycle[0] for cycle in self.cycles])
self.Dt_offs = np.array(self.cycles[:, 1]) # self.Dt_offs = np.array([cycle[1] for cycle in self.cycles])
if np.isscalar(self.phis):
self.phis = [self.phis] # np.asarray([self.phis])
self.phis.sort(reverse=True)
self.nPhis = len(self.phis)
if np.isscalar(self.Vs):
self.Vs = [self.Vs] # np.asarray([self.Vs])
self.Vs.sort(reverse=True)
self.nVs = len(self.Vs)
self.extraPrep()
return
def extraPrep(self):
pass
def genContainer(self):
return [[[None for v in range(self.nVs)]
for p in range(self.nPhis)]
for r in range(self.nRuns)]
def getShortestPeriod(self):
# min(self.Dt_delay, min(min(self.cycles)))
return np.amin(self.cycles[self.cycles.nonzero()])
def finish(self, PC, RhO):
pass
def getRunCycles(self, run):
return (self.cycles, self.Dt_delay)
def genPulseSet(self, genPulse=None):
"""Function to generate a set of spline functions to phi(t) simulations."""
if genPulse is None: # Default to square pulse generator
genPulse = self.genPulse
phi_ts = [[[None for pulse in range(self.nPulses)] for phi in range(self.nPhis)] for run in range(self.nRuns)]
for run in range(self.nRuns):
cycles, Dt_delay = self.getRunCycles(run)
pulses, Dt_total = cycles2times(cycles, Dt_delay)
for phiInd, phi in enumerate(self.phis):
for pInd, pulse in enumerate(pulses):
phi_ts[run][phiInd][pInd] = genPulse(run, phi, pulse)
self.phi_ts = phi_ts
return phi_ts
def genPulse(self, run, phi, pulse):
"""Default interpolation function for square pulses."""
pStart, pEnd = pulse
phi_t = spline([pStart, pEnd], [phi, phi], k=1, ext=1)
return phi_t
def genPlottingStimuli(self, genPulse=None, vInd=0):
"""Redraw stimulus functions in case data has been realigned."""
if genPulse is None:
genPulse = self.genPulse
# # for Dt_delay in len(self.Dt_delays):
# # self.Dt_delays -= self.PD.trials[run][phiInd][vInd]
phi_ts = [[[None for pulse in range(self.nPulses)] for phi in range(self.nPhis)] for run in range(self.nRuns)]
for run in range(self.nRuns):
#cycles, Dt_delay = self.getRunCycles(run)
#pulses, Dt_total = cycles2times(cycles, Dt_delay)
for phiInd, phi in enumerate(self.phis):
pc = self.PD.trials[run][phiInd][vInd]
# if pc.pulseAligned:
for p, pulse in enumerate(pc.pulses):
phi_ts[run][phiInd][p] = genPulse(run, pc.phi, pulse)
#self.phi_ts = self.genPulseSet()
return phi_ts
def getStimArray(self, run, phiInd, dt): # phi_ts, Dt_delay, cycles, dt):
"""Return a stimulus array (not spline) with the same sampling rate as
the photocurrent.
"""
cycles, Dt_delay = self.getRunCycles(run)
phi_ts = self.phi_ts[run][phiInd][:]
nPulses = cycles.shape[0]
assert(len(phi_ts) == nPulses)
#start, end = RhO.t[0], RhO.t[0]+Dt_delay #start, end = 0.00, Dt_delay
start, end = 0, Dt_delay
nSteps = int(round(((end-start)/dt)+1))
t = np.linspace(start, end, nSteps, endpoint=True)
phi_tV = np.zeros_like(t)
#_idx_pulses_ = np.empty([0,2],dtype=int) # Light on and off indexes for each pulse
for p in range(nPulses):
start = end
Dt_on, Dt_off = cycles[p, 0], cycles[p, 1]
end = start + Dt_on + Dt_off
nSteps = int(round(((end-start)/dt)+1))
tPulse = np.linspace(start, end, nSteps, endpoint=True)
phi_t = phi_ts[p]
phiPulse = phi_t(tPulse) # -tPulse[0] # Align time vector to 0 for phi_t to work properly
#onInd = len(t) - 1 # Start of on-phase
#offInd = onInd + int(round(Dt_on/dt))
#_idx_pulses_ = np.vstack((_idx_pulses_, [onInd,offInd]))
#t = np.r_[t, tPulse[1:]]
phi_tV = np.r_[phi_tV, phiPulse[1:]]
phi_tV[np.ma.where(phi_tV < 0)] = 0 # Safeguard for negative phi values
return phi_tV #, t, _idx_pulses_
def plot(self, plotStateVars=False):
"""Plot protocol."""
self.Ifig = plt.figure()
self.createLayout(self.Ifig)
self.PD.plot(self.axI)
self.addAnnotations()
self.plotExtras()
self.plotStateVars = plotStateVars
# TODO: Try producing animated state figures
# https://jakevdp.github.io/blog/2013/05/28/a-simple-animation-the-magic-triangle/
#animateStates = True
if self.plotStateVars:
#RhO = self.RhO
for run in range(self.nRuns):
for phiInd in range(self.nPhis):
for vInd in range(self.nVs):
pc = self.PD.trials[run][phiInd][vInd]
fileName = '{}States{}s-{}-{}-{}'.format(self.protocol, pc.nStates, run, phiInd, vInd)
#RhO.plotStates(pc.t, pc.states, pc.pulses, RhO.stateLabels, phi, pc._idx_peaks_, fileName)
logger.info('Plotting states to: {}'.format(fileName))
pc.plotStates(name=fileName)
plt.figure(self.Ifig.number)
plt.sca(self.axI)
self.axI.set_xlim(self.PD.t_start, self.PD.t_end)
# if addTitles:
# figTitle = self.genTitle()
# plt.title(figTitle) #'Photocurrent through time'
#self.Ifig.tight_layout()
plt.tight_layout()
plt.show()
figName = os.path.join(config.fDir, self.protocol+self.dataTag+"."+config.saveFigFormat)
logger.info("Saving figure for {} protocol to {} as {}".format(self.protocol, figName, config.saveFigFormat))
#externalLegend = False
#if externalLegend:
# self.Ifig.savefig(figName, bbox_extra_artists=(lgd,), bbox_inches='tight', format=config.saveFigFormat) # Use this to save figures when legend is beside the plot
#else:
self.Ifig.savefig(figName, format=config.saveFigFormat)
return
def createLayout(self, Ifig=None, vInd=0):
"""Create axes for protocols with multiple subplots."""
if Ifig is None:
Ifig = plt.figure()
self.addStimulus = config.addStimulus
#phi_ts = self.genPlottingStimuli()
# Default layout
self.axI = Ifig.add_subplot(111)
plt.sca(self.axI)
#plotLight(self.pulses, self.axI)
# TODO: Refactor multiple getLineProps
def getLineProps(self, run, vInd, phiInd):
colours = config.colours
styles = config.styles
if config.verbose > 1 and (self.nRuns > len(colours) or len(self.phis) > len(colours) or len(self.Vs) > len(colours)):
warnings.warn("Warning: only {} line colours are available!".format(len(colours)))
if config.verbose > 0 and self.nRuns > 1 and len(self.phis) > 1 and len(self.Vs) > 1:
warnings.warn("Warning: Too many changing variables for one plot!")
if config.verbose > 2:
print("Run=#{}/{}; phiInd=#{}/{}; vInd=#{}/{}".format(run, self.nRuns, phiInd, len(self.phis), vInd, len(self.Vs)))
if self.nRuns > 1:
col = colours[run % len(colours)]
if len(self.phis) > 1:
style = styles[phiInd % len(styles)]
elif len(self.Vs) > 1:
style = styles[vInd % len(styles)]
else:
style = '-'
else:
if len(self.Vs) > 1:
col = colours[vInd % len(colours)]
if len(self.phis) > 1:
style = styles[phiInd % len(styles)]
else:
style = '-'
else:
if len(self.phis) > 1:
col = colours[phiInd % len(colours)]
style = '-'
else:
col = 'b' # colours[0]
style = '-' # styles[0]
return col, style
def plotExtras(self):
pass
def addAnnotations(self):
pass
def plotStimulus(self, phi_ts, t_start, pulses, t_end, ax=None, light='shade', col=None, style=None):
nPulses = pulses.shape[0]
assert(nPulses == len(phi_ts))
nPoints = 10 * int(round(t_end-t_start / self.dt)) + 1
t = np.linspace(t_start, t_end, nPoints)
if ax is None:
#fig = plt.figure()
ax = plt.gca()
else:
#plt.figure(fig.number)
plt.sca(ax)
if col is None:
for p in range(nPulses):
plt.plot(t, phi_ts[p](t))
else:
if style is None:
style = '-'
for p in range(nPulses):
plt.plot(t, phi_ts[p](t), color=col, linestyle=style)
if light == 'spectral':
plotLight(pulses, ax=ax, light='spectral', lam=self.lam)
else:
plotLight(pulses, ax=ax, light=light)
plt.xlabel(r'$\mathrm{Time\ [ms]}$')
plt.xlim((t_start, t_end))
plt.ylabel(r'$\mathrm{\phi\ [ph./mm^{2}/s]}$')
return ax
class protCustom(Protocol):
"""Present a time-varying stimulus defined by a spline function."""
# Class attributes
protocol = 'custom'
squarePulse = False
# custPulseGenerator = None
phi_ft = None
def extraPrep(self):
"""Function to set-up additional variables and make parameters
consistent after any changes.
"""
self.nRuns = 1 # nRuns ### TODO: Reconsider this...
#self.custPulseGenerator = self.phi_ft
if not hasattr(self, 'phi_ts') or self.phi_ts is None:
#self.phi_ts = self.genPulseSet()
#self.genPulseSet(self.custPulseGenerator)
self.genPulseSet(self.phi_ft)
def createLayout(self, Ifig=None, vInd=0):
if Ifig is None:
Ifig = plt.figure()
self.addStimulus = config.addStimulus
if self.addStimulus:
# self.genPlottingStimuli(self.custPulseGenerator)
phi_ts = self.genPlottingStimuli(self.phi_ft)
gsStim = plt.GridSpec(4, 1)
self.axS = Ifig.add_subplot(gsStim[0, :]) # Stimulus axes
self.axI = Ifig.add_subplot(gsStim[1:, :], sharex=self.axS) # Photocurrent axes
pc = self.PD.trials[0][0][0]
plotLight(pc.pulses, ax=self.axS, light='spectral', lam=470, alpha=0.2)
for run in range(self.nRuns):
for phiInd in range(self.nPhis):
pc = self.PD.trials[run][phiInd][vInd]
col, style = self.getLineProps(run, vInd, phiInd)
self.plotStimulus(phi_ts[run][phiInd], pc.t_start,
self.pulses, pc.t_end, self.axS,
light=None, col=col, style=style) #light='spectral'
plt.setp(self.axS.get_xticklabels(), visible=False)
self.axS.set_xlabel('')
else:
self.axI = Ifig.add_subplot(111)
def plotExtras(self):
pass
class protStep(Protocol):
"""Present a step (Heaviside) pulse."""
protocol = 'step'
squarePulse = True
nRuns = 1
def extraPrep(self):
"""Function to set-up additional variables and make parameters
consistent after any changes.
"""
self.nRuns = 1
self.phi_ts = self.genPulseSet()
def addAnnotations(self):
self.axI.get_xaxis().set_minor_locator(mpl.ticker.AutoMinorLocator())
self.axI.get_yaxis().set_minor_locator(mpl.ticker.AutoMinorLocator())
self.axI.grid(b=True, which='minor', axis='both', linewidth=.2)
self.axI.grid(b=True, which='major', axis='both', linewidth=1)
class protSinusoid(Protocol):
"""Present oscillating stimuli over a range of frequencies to find the
resonant frequency.
"""
protocol = 'sinusoid'
squarePulse = False
startOn = False
phi0 = 0
def extraPrep(self):
"""Function to set-up additional variables and make parameters
consistent after any changes.
"""
self.fs = np.sort(np.array(self.fs)) # Frequencies [Hz]
self.ws = 2 * np.pi * self.fs / (1000) # Frequencies [rads/ms] (scaled from /s to /ms
self.sr = max(10000, int(round(10*max(self.fs)))) # Nyquist frequency - sampling rate (10*f) >= 2*f >= 10/ms
#self.dt = 1000/self.sr # dt is set by simulator but used for plotting
self.nRuns = len(self.ws)
if (1000)/min(self.fs) > min(self.Dt_ons):
warnings.warn('Warning: The period of the lowest frequency is longer than the stimulation time!')
if isinstance(self.phi0, (int, float, complex)):
self.phi0 = np.ones(self.nRuns) * self.phi0
elif isinstance(self.phi0, (list, tuple, np.ndarray)):
if len(self.phi0) != self.nRuns:
self.phi0 = np.ones(self.nRuns) * self.phi0[0]
else:
warnings.warn('Unexpected data type for phi0: ', type(self.phi0))
assert(len(self.phi0) == self.nRuns)
self.t_start, self.t_end = 0, self.Dt_total
self.phi_ts = self.genPulseSet()
self.runLabels = [r'$f={}\mathrm{{Hz}}$ '.format(round_sig(f, 3)) for f in self.fs]
def getShortestPeriod(self):
return 1000/self.sr # dt [ms]
def genPulse(self, run, phi, pulse):
pStart, pEnd = pulse
Dt_on = pEnd - pStart
t = np.linspace(0.0, Dt_on, int(round((Dt_on*self.sr/1000))+1), endpoint=True) # Create smooth series of time points to interpolate between
if self.startOn: # Generalise to phase offset
phi_t = spline(pStart + t, self.phi0[run] + 0.5*phi*(1+np.cos(self.ws[run]*t)), ext=1, k=5)
else:
phi_t = spline(pStart + t, self.phi0[run] + 0.5*phi*(1-np.cos(self.ws[run]*t)), ext=1, k=5)
return phi_t
def createLayout(self, Ifig=None, vInd=0):
if Ifig is None:
Ifig = plt.figure()
self.addStimulus = config.addStimulus
if self.nRuns > 1: #len(phis) > 1:
gsSin = plt.GridSpec(2, 3)
self.axIp = Ifig.add_subplot(gsSin[0, -1])
self.axIss = Ifig.add_subplot(gsSin[1, -1], sharex=self.axIp)
self.axI = Ifig.add_subplot(gsSin[:, :-1])
else:
self.addStimulus = config.addStimulus
if self.addStimulus:
phi_ts = self.genPlottingStimuli()
gsStim = plt.GridSpec(4, 1)
self.axS = Ifig.add_subplot(gsStim[0, :]) # Stimulus axes
self.axI = Ifig.add_subplot(gsStim[1:, :], sharex=self.axS) # Photocurrent axes
for run in range(self.nRuns):
for phiInd in range(self.nPhis):
pc = self.PD.trials[run][phiInd][vInd]
col, style = self.getLineProps(run, vInd, phiInd)
self.plotStimulus(phi_ts[run][phiInd], pc.t_start, pc.pulses, pc.t_end, self.axS, light='spectral', col=col, style=style)
plt.setp(self.axS.get_xticklabels(), visible=False)
self.axS.set_xlabel('') # plt.xlabel('')
self.axS.set_ylim(self.phi0[0], max(self.phis)) # phi0[r]
if max(self.phis) / min(self.phis) >= 100:
self.axS.set_yscale('log') # plt.yscale('log')
else:
self.axI = Ifig.add_subplot(111)
def plotExtras(self):
splineOrder = 2 # [1,5]
trim = 0.1
transEndInd = int(self.Dt_delays[0] + round(self.Dt_ons[0] * trim / self.dt))
if self.nRuns > 1:
#plt.figure(Ifig.number)
#axI.legend().set_visible(False)
#if len(self.phis) > 1:
fstars = np.zeros((self.nPhis, self.nVs))
for phiInd, phiOn in enumerate(self.phis): # TODO: These loops need reconsidering...!!!
for vInd, V in enumerate(self.Vs):
Ipeaks = np.zeros(self.nRuns)
for run in range(self.nRuns):
PC = self.PD.trials[run][phiInd][vInd]
Ipeaks[run] = abs(PC.I_peak_) # Maximum absolute value over all peaks from that trial
Ip = self.PD.trials[np.argmax(Ipeaks)][phiInd][vInd].I_peak_
col, style = self.getLineProps(run, vInd, phiInd)
self.axIp.plot(self.fs, Ipeaks, 'x', color=col)
try:
intIp = spline(self.fs, Ipeaks, k=splineOrder)
#nPoints = 10*int(round(abs(np.log10(self.fs[-1])-np.log10(self.fs[0]))+1))
fsmooth = np.logspace(np.log10(self.fs[0]),
np.log10(self.fs[-1]), num=1001)
self.axIp.plot(fsmooth, intIp(fsmooth))
except:
if config.verbose > 0:
print('Unable to plot spline for current peaks!')
fstar_p = self.fs[np.argmax(Ipeaks)]
fstars[phiInd, vInd] = fstar_p
Ap = max(Ipeaks)
#fpLabel = r'$f^*_{{peak}}={}$ $\mathrm{{[Hz]}}$'.format(round_sig(fstar_p,3))
self.axIp.plot(fstar_p, Ap, '*', markersize=10)
#axIp.annotate(fpLabel, xy=(fstar_p,Ap), xytext=(0.7, 0.9), textcoords='axes fraction', arrowprops={'arrowstyle':'->','color':'black'})
self.axIp.set_xscale('log')
self.axIp.set_ylabel(r'$|A|_{peak}$ $\mathrm{[nA]}$')
if config.addTitles:
#self.axIp.set_title('$\mathrm{|Amplitude|_{peak}\ vs.\ frequency}.\ f^*:=arg\,max_f(|A|)$')
self.axIp.set_title(r'$f^*:=arg\,max_f(|A|_{peak})$')
#axIp.set_aspect('auto')
# Calculate the time to allow for transition effects from the period of fstar_p
# buffer = 3
# fstar_p = max(max(fstars))
# transD = buffer * np.ceil(1000/fstar_p) # [ms]
# transEndInd = round((self.Dt_delays[0]+transD)/self.dt)
# if transEndInd >= (self.Dt_ons[0])/self.dt: # If transition period is greater than the on period
# transEndInd = round((self.Dt_delays[0]+self.Dt_ons[0]/2)/self.dt) # Take the second half of the data
tTransEnd = transEndInd * self.dt #ts[0][0][0]
self.axI.axvline(x=tTransEnd, linestyle=':', color='k')
arrow = {'arrowstyle': '<->', 'color': 'black', 'shrinkA': 0, 'shrinkB': 0}
for phiInd, phiOn in enumerate(self.phis): # TODO: These loops need reconsidering...!!!
for vInd, V in enumerate(self.Vs):
PC = self.PD.trials[np.argmax(Ipeaks)][phiInd][vInd]
onBegInd, onEndInd = PC._idx_pulses_[0]
t = PC.t
self.axI.annotate('', xy=(tTransEnd, Ip), xytext=(t[onEndInd], Ip),
arrowprops=arrow)
for phiInd, phiOn in enumerate(self.phis):
for vInd, V in enumerate(self.Vs):
Iabs = np.zeros(self.nRuns) # [None for r in range(nRuns)]
for run in range(self.nRuns):
PC = self.PD.trials[run][phiInd][vInd]
onBegInd, onEndInd = PC._idx_pulses_[0]
t = PC.t # t = ts[run][phiInd][vInd]
I_RhO = PC.I # I_RhO = Is[run][phiInd][vInd]
#transEndInd = np.searchsorted(t,Dt_delay+transD,side="left") # Add one since upper bound is not included in slice
#if transEndInd >= len(t): # If transition period is greater than the on period
# transEndInd = round(len(t[onBegInd:onEndInd+1])/2) # Take the second half of the data
#print(fstar_p,'Hz --> ',transD,'ms;', transEndInd,':',onEndInd+1)
I_zone = I_RhO[transEndInd:onEndInd+1]
try:
maxV = max(I_zone)
except ValueError:
maxV = 0.0
try:
minV = min(I_zone)
except ValueError:
minV = 0.0
Iabs[run] = abs(maxV-minV)
#axI.axvline(x=t[transEndInd],linestyle=':',color='k')
#axI.annotate('Search zone', xy=(t[transEndInd], min(I_RhO)), xytext=(t[onEndInd], min(I_RhO)), arrowprops={'arrowstyle':'<->','color':'black'})
col, style = self.getLineProps(run, vInd, phiInd) # TODO: Modify to match colours correctly
self.axIss.plot(self.fs, Iabs, 'x', color=col)
try:
intIss = spline(self.fs, Iabs, k=splineOrder)
#fsmooth = np.logspace(self.fs[0], self.fs[-1], 100)
self.axIss.plot(fsmooth, intIss(fsmooth))
except:
if config.verbose > 0:
print('Unable to plot spline for current steady-states!')
fstar_abs = self.fs[np.argmax(Iabs)]
fstars[phiInd,vInd] = fstar_abs
Aabs = max(Iabs)
fabsLabel = r'$f^*_{{res}}={}$ $\mathrm{{[Hz]}}$'.format(round_sig(fstar_abs,3))
self.axIss.plot(fstar_abs, Aabs, '*', markersize=10, label=fabsLabel)
self.axIss.legend(loc='best')
#axIss.annotate(fabsLabel, xy=(fstar_abs,Aabs), xytext=(0.7, 0.9), textcoords='axes fraction', arrowprops={'arrowstyle':'->','color':'black'})
if config.verbose > 0:
print('Resonant frequency (phi={}; V={}) = {} Hz'.format(phiOn, V, fstar_abs))
self.axIss.set_xscale('log')
self.axIss.set_xlabel(r'$f$ $\mathrm{[Hz]}$')
self.axIss.set_ylabel(r'$|A|_{ss}$ $\mathrm{[nA]}$')
if config.addTitles:
#axIss.set_title('$\mathrm{|Amplitude|_{ss}\ vs.\ frequency}.\ f^*:=arg\,max_f(|A|)$')
self.axIss.set_title(r'$f^*:=arg\,max_f(|A|_{ss})$')
plt.tight_layout()
self.fstars = fstars
if len(self.phis) > 1: # Multiple light amplitudes
#for i, phi0 in enumerate(self.phi0):
fstarAfig = plt.figure()
for vInd, V in enumerate(self.Vs):
if self.phi0[0] > 0: # phi0[r]
plt.plot( | np.array(self.phis) | numpy.array |
import unittest
import hail as hl
import hail.expr.aggregators as agg
from subprocess import DEVNULL, call as syscall
import numpy as np
from struct import unpack
import hail.utils as utils
from hail.linalg import BlockMatrix
from math import sqrt
from .utils import resource, doctest_resource, startTestHailContext, stopTestHailContext
setUpModule = startTestHailContext
tearDownModule = stopTestHailContext
class Tests(unittest.TestCase):
_dataset = None
def get_dataset(self):
if Tests._dataset is None:
Tests._dataset = hl.split_multi_hts(hl.import_vcf(resource('sample.vcf')))
return Tests._dataset
def test_ibd(self):
dataset = self.get_dataset()
def plinkify(ds, min=None, max=None):
vcf = utils.new_temp_file(prefix="plink", suffix="vcf")
plinkpath = utils.new_temp_file(prefix="plink")
hl.export_vcf(ds, vcf)
threshold_string = "{} {}".format("--min {}".format(min) if min else "",
"--max {}".format(max) if max else "")
plink_command = "plink --double-id --allow-extra-chr --vcf {} --genome full --out {} {}" \
.format(utils.uri_path(vcf),
utils.uri_path(plinkpath),
threshold_string)
result_file = utils.uri_path(plinkpath + ".genome")
syscall(plink_command, shell=True, stdout=DEVNULL, stderr=DEVNULL)
### format of .genome file is:
# _, fid1, iid1, fid2, iid2, rt, ez, z0, z1, z2, pihat, phe,
# dst, ppc, ratio, ibs0, ibs1, ibs2, homhom, hethet (+ separated)
### format of ibd is:
# i (iid1), j (iid2), ibd: {Z0, Z1, Z2, PI_HAT}, ibs0, ibs1, ibs2
results = {}
with open(result_file) as f:
f.readline()
for line in f:
row = line.strip().split()
results[(row[1], row[3])] = (list(map(float, row[6:10])),
list(map(int, row[14:17])))
return results
def compare(ds, min=None, max=None):
plink_results = plinkify(ds, min, max)
hail_results = hl.identity_by_descent(ds, min=min, max=max).collect()
for row in hail_results:
key = (row.i, row.j)
self.assertAlmostEqual(plink_results[key][0][0], row.ibd.Z0, places=4)
self.assertAlmostEqual(plink_results[key][0][1], row.ibd.Z1, places=4)
self.assertAlmostEqual(plink_results[key][0][2], row.ibd.Z2, places=4)
self.assertAlmostEqual(plink_results[key][0][3], row.ibd.PI_HAT, places=4)
self.assertEqual(plink_results[key][1][0], row.ibs0)
self.assertEqual(plink_results[key][1][1], row.ibs1)
self.assertEqual(plink_results[key][1][2], row.ibs2)
compare(dataset)
compare(dataset, min=0.0, max=1.0)
dataset = dataset.annotate_rows(dummy_maf=0.01)
hl.identity_by_descent(dataset, dataset['dummy_maf'], min=0.0, max=1.0)
hl.identity_by_descent(dataset, hl.float32(dataset['dummy_maf']), min=0.0, max=1.0)
def test_impute_sex_same_as_plink(self):
ds = hl.import_vcf(resource('x-chromosome.vcf'))
sex = hl.impute_sex(ds.GT, include_par=True)
vcf_file = utils.uri_path(utils.new_temp_file(prefix="plink", suffix="vcf"))
out_file = utils.uri_path(utils.new_temp_file(prefix="plink"))
hl.export_vcf(ds, vcf_file)
utils.run_command(["plink", "--vcf", vcf_file, "--const-fid",
"--check-sex", "--silent", "--out", out_file])
plink_sex = hl.import_table(out_file + '.sexcheck',
delimiter=' +',
types={'SNPSEX': hl.tint32,
'F': hl.tfloat64})
plink_sex = plink_sex.select('IID', 'SNPSEX', 'F')
plink_sex = plink_sex.select(
s=plink_sex.IID,
is_female=hl.cond(plink_sex.SNPSEX == 2,
True,
hl.cond(plink_sex.SNPSEX == 1,
False,
hl.null(hl.tbool))),
f_stat=plink_sex.F).key_by('s')
sex = sex.select('is_female', 'f_stat')
self.assertTrue(plink_sex._same(sex.select_globals(), tolerance=1e-3))
ds = ds.annotate_rows(aaf=(agg.call_stats(ds.GT, ds.alleles)).AF[1])
self.assertTrue(hl.impute_sex(ds.GT)._same(hl.impute_sex(ds.GT, aaf='aaf')))
def test_linreg(self):
phenos = hl.import_table(resource('regressionLinear.pheno'),
types={'Pheno': hl.tfloat64},
key='Sample')
covs = hl.import_table(resource('regressionLinear.cov'),
types={'Cov1': hl.tfloat64, 'Cov2': hl.tfloat64},
key='Sample')
mt = hl.import_vcf(resource('regressionLinear.vcf'))
mt = mt.annotate_cols(pheno=phenos[mt.s].Pheno, cov=covs[mt.s])
mt = mt.annotate_entries(x = mt.GT.n_alt_alleles()).cache()
t1 = hl.linear_regression(
y=mt.pheno, x=mt.GT.n_alt_alleles(), covariates=[mt.cov.Cov1, mt.cov.Cov2 + 1 - 1]).rows()
t1 = t1.select(p=t1.linreg.p_value)
t2 = hl.linear_regression(
y=mt.pheno, x=mt.x, covariates=[mt.cov.Cov1, mt.cov.Cov2]).rows()
t2 = t2.select(p=t2.linreg.p_value)
t3 = hl.linear_regression(
y=[mt.pheno], x=mt.x, covariates=[mt.cov.Cov1, mt.cov.Cov2]).rows()
t3 = t3.select(p=t3.linreg.p_value[0])
t4 = hl.linear_regression(
y=[mt.pheno, mt.pheno], x=mt.x, covariates=[mt.cov.Cov1, mt.cov.Cov2]).rows()
t4a = t4.select(p=t4.linreg.p_value[0])
t4b = t4.select(p=t4.linreg.p_value[1])
self.assertTrue(t1._same(t2))
self.assertTrue(t1._same(t3))
self.assertTrue(t1._same(t4a))
self.assertTrue(t1._same(t4b))
def test_linear_regression_with_two_cov(self):
covariates = hl.import_table(resource('regressionLinear.cov'),
key='Sample',
types={'Cov1': hl.tfloat, 'Cov2': hl.tfloat})
pheno = hl.import_table(resource('regressionLinear.pheno'),
key='Sample',
missing='0',
types={'Pheno': hl.tfloat})
mt = hl.import_vcf(resource('regressionLinear.vcf'))
mt = hl.linear_regression(y=pheno[mt.s].Pheno,
x=mt.GT.n_alt_alleles(),
covariates=list(covariates[mt.s].values()))
results = dict(mt.aggregate_rows(hl.agg.collect((mt.locus.position, mt.linreg))))
self.assertAlmostEqual(results[1].beta, -0.28589421, places=6)
self.assertAlmostEqual(results[1].standard_error, 1.2739153, places=6)
self.assertAlmostEqual(results[1].t_stat, -0.22442167, places=6)
self.assertAlmostEqual(results[1].p_value, 0.84327106, places=6)
self.assertAlmostEqual(results[2].beta, -0.5417647, places=6)
self.assertAlmostEqual(results[2].standard_error, 0.3350599, places=6)
self.assertAlmostEqual(results[2].t_stat, -1.616919, places=6)
self.assertAlmostEqual(results[2].p_value, 0.24728705, places=6)
self.assertAlmostEqual(results[3].beta, 1.07367185, places=6)
self.assertAlmostEqual(results[3].standard_error, 0.6764348, places=6)
self.assertAlmostEqual(results[3].t_stat, 1.5872510, places=6)
self.assertAlmostEqual(results[3].p_value, 0.2533675, places=6)
self.assertTrue(np.isnan(results[6].standard_error))
self.assertTrue(np.isnan(results[6].t_stat))
self.assertTrue(np.isnan(results[6].p_value))
self.assertTrue(np.isnan(results[7].standard_error))
self.assertTrue(np.isnan(results[8].standard_error))
self.assertTrue(np.isnan(results[9].standard_error))
self.assertTrue(np.isnan(results[10].standard_error))
def test_linear_regression_with_two_cov_pl(self):
covariates = hl.import_table(resource('regressionLinear.cov'),
key='Sample',
types={'Cov1': hl.tfloat, 'Cov2': hl.tfloat})
pheno = hl.import_table(resource('regressionLinear.pheno'),
key='Sample',
missing='0',
types={'Pheno': hl.tfloat})
mt = hl.import_vcf(resource('regressionLinear.vcf'))
mt = hl.linear_regression(y=pheno[mt.s].Pheno,
x=hl.pl_dosage(mt.PL),
covariates=list(covariates[mt.s].values()))
results = dict(mt.aggregate_rows(hl.agg.collect((mt.locus.position, mt.linreg))))
self.assertAlmostEqual(results[1].beta, -0.29166985, places=6)
self.assertAlmostEqual(results[1].standard_error, 1.2996510, places=6)
self.assertAlmostEqual(results[1].t_stat, -0.22442167, places=6)
self.assertAlmostEqual(results[1].p_value, 0.84327106, places=6)
self.assertAlmostEqual(results[2].beta, -0.5499320, places=6)
self.assertAlmostEqual(results[2].standard_error, 0.3401110, places=6)
self.assertAlmostEqual(results[2].t_stat, -1.616919, places=6)
self.assertAlmostEqual(results[2].p_value, 0.24728705, places=6)
self.assertAlmostEqual(results[3].beta, 1.09536219, places=6)
self.assertAlmostEqual(results[3].standard_error, 0.6901002, places=6)
self.assertAlmostEqual(results[3].t_stat, 1.5872510, places=6)
self.assertAlmostEqual(results[3].p_value, 0.2533675, places=6)
def test_linear_regression_with_two_cov_dosage(self):
covariates = hl.import_table(resource('regressionLinear.cov'),
key='Sample',
types={'Cov1': hl.tfloat, 'Cov2': hl.tfloat})
pheno = hl.import_table(resource('regressionLinear.pheno'),
key='Sample',
missing='0',
types={'Pheno': hl.tfloat})
mt = hl.import_gen(resource('regressionLinear.gen'), sample_file=resource('regressionLinear.sample'))
mt = hl.linear_regression(y=pheno[mt.s].Pheno,
x=hl.gp_dosage(mt.GP),
covariates=list(covariates[mt.s].values()))
results = dict(mt.aggregate_rows(hl.agg.collect((mt.locus.position, mt.linreg))))
self.assertAlmostEqual(results[1].beta, -0.29166985, places=4)
self.assertAlmostEqual(results[1].standard_error, 1.2996510, places=4)
self.assertAlmostEqual(results[1].t_stat, -0.22442167, places=6)
self.assertAlmostEqual(results[1].p_value, 0.84327106, places=6)
self.assertAlmostEqual(results[2].beta, -0.5499320, places=4)
self.assertAlmostEqual(results[2].standard_error, 0.3401110, places=4)
self.assertAlmostEqual(results[2].t_stat, -1.616919, places=6)
self.assertAlmostEqual(results[2].p_value, 0.24728705, places=6)
self.assertAlmostEqual(results[3].beta, 1.09536219, places=4)
self.assertAlmostEqual(results[3].standard_error, 0.6901002, places=4)
self.assertAlmostEqual(results[3].t_stat, 1.5872510, places=6)
self.assertAlmostEqual(results[3].p_value, 0.2533675, places=6)
self.assertTrue( | np.isnan(results[6].standard_error) | numpy.isnan |
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import ScalarFormatter, FormatStrFormatter
from matplotlib.path import Path
import matplotlib.patches as patches
##############################################
# PLOTTING FUNCTIONS FOR WIDGETS
##############################################
def fcn_FDEM_InductionSpherePlaneWidget(xtx,ytx,ztx,m,orient,x0,y0,z0,a,sig,mur,xrx,yrx,zrx,logf,Comp,Phase):
sig = 10**sig
f = 10**logf
fvec = | np.logspace(0,8,41) | numpy.logspace |
import numpy as np
import unittest
import discretize
from discretize.utils import volume_average
from numpy.testing import assert_array_equal, assert_allclose
class TestVolumeAverage(unittest.TestCase):
def test_tensor_to_tensor(self):
h1 = np.random.rand(16)
h1 /= h1.sum()
h2 = np.random.rand(16)
h2 /= h2.sum()
h1s = []
h2s = []
for i in range(3):
print(f"Tensor to Tensor {i+1}D: ", end="")
h1s.append(h1)
h2s.append(h2)
mesh1 = discretize.TensorMesh(h1s)
mesh2 = discretize.TensorMesh(h2s)
in_put = np.random.rand(mesh1.nC)
out_put = np.empty(mesh2.nC)
# test the three ways of calling...
out1 = volume_average(mesh1, mesh2, in_put, out_put)
assert_array_equal(out1, out_put)
out2 = volume_average(mesh1, mesh2, in_put)
assert_allclose(out1, out2)
Av = volume_average(mesh1, mesh2)
out3 = Av @ in_put
assert_allclose(out1, out3)
vol1 = np.sum(mesh1.vol * in_put)
vol2 = np.sum(mesh2.vol * out3)
print(vol1, vol2)
self.assertAlmostEqual(vol1, vol2)
def test_tree_to_tree(self):
h1 = np.random.rand(16)
h1 /= h1.sum()
h2 = np.random.rand(16)
h2 /= h2.sum()
h1s = [h1]
h2s = [h2]
insert_1 = [0.25]
insert_2 = [0.75]
for i in range(1, 3):
print(f"Tree to Tree {i+1}D: ", end="")
h1s.append(h1)
h2s.append(h2)
insert_1.append(0.25)
insert_2.append(0.75)
mesh1 = discretize.TreeMesh(h1s)
mesh1.insert_cells([insert_1], [4])
mesh2 = discretize.TreeMesh(h2s)
mesh2.insert_cells([insert_2], [4])
in_put = np.random.rand(mesh1.nC)
out_put = np.empty(mesh2.nC)
# test the three ways of calling...
out1 = volume_average(mesh1, mesh2, in_put, out_put)
assert_array_equal(out1, out_put)
out2 = volume_average(mesh1, mesh2, in_put)
assert_allclose(out1, out2)
Av = volume_average(mesh1, mesh2)
out3 = Av @ in_put
assert_allclose(out1, out3)
vol1 = np.sum(mesh1.vol * in_put)
vol2 = np.sum(mesh2.vol * out3)
print(vol1, vol2)
self.assertAlmostEqual(vol1, vol2)
def test_tree_to_tensor(self):
h1 = np.random.rand(16)
h1 /= h1.sum()
h2 = np.random.rand(16)
h2 /= h2.sum()
h1s = [h1]
h2s = [h2]
insert_1 = [0.25]
for i in range(1, 3):
print(f"Tree to Tensor {i+1}D: ", end="")
h1s.append(h1)
h2s.append(h2)
insert_1.append(0.25)
mesh1 = discretize.TreeMesh(h1s)
mesh1.insert_cells([insert_1], [4])
mesh2 = discretize.TensorMesh(h2s)
in_put = np.random.rand(mesh1.nC)
out_put = np.empty(mesh2.nC)
# test the three ways of calling...
out1 = volume_average(mesh1, mesh2, in_put, out_put)
assert_array_equal(out1, out_put)
out2 = volume_average(mesh1, mesh2, in_put)
assert_allclose(out1, out2)
Av = volume_average(mesh1, mesh2)
out3 = Av @ in_put
assert_allclose(out1, out3)
vol1 = np.sum(mesh1.vol * in_put)
vol2 = np.sum(mesh2.vol * out3)
print(vol1, vol2)
self.assertAlmostEqual(vol1, vol2)
def test_tensor_to_tree(self):
h1 = np.random.rand(16)
h1 /= h1.sum()
h2 = np.random.rand(16)
h2 /= h2.sum()
h1s = [h1]
h2s = [h2]
insert_2 = [0.75]
for i in range(1, 3):
print(f"Tensor to Tree {i+1}D: ", end="")
h1s.append(h1)
h2s.append(h2)
insert_2.append(0.75)
mesh1 = discretize.TensorMesh(h1s)
mesh2 = discretize.TreeMesh(h2s)
mesh2.insert_cells([insert_2], [4])
in_put = np.random.rand(mesh1.nC)
out_put = np.empty(mesh2.nC)
# test the three ways of calling...
out1 = volume_average(mesh1, mesh2, in_put, out_put)
assert_array_equal(out1, out_put)
out2 = volume_average(mesh1, mesh2, in_put)
assert_allclose(out1, out2)
Av = volume_average(mesh1, mesh2)
out3 = Av @ in_put
assert_allclose(out1, out3)
vol1 = np.sum(mesh1.vol * in_put)
vol2 = np.sum(mesh2.vol * out3)
print(vol1, vol2)
self.assertAlmostEqual(vol1, vol2)
def test_errors(self):
h1 = np.random.rand(16)
h1 /= h1.sum()
h2 = np.random.rand(16)
h2 /= h2.sum()
mesh1D = discretize.TensorMesh([h1])
mesh2D = discretize.TensorMesh([h1, h1])
mesh3D = discretize.TensorMesh([h1, h1, h1])
hr = np.r_[1, 1, 0.5]
hz = np.r_[2, 1]
meshCyl = discretize.CylMesh([hr, 1, hz], np.r_[0.0, 0.0, 0.0])
mesh2 = discretize.TreeMesh([h2, h2])
mesh2.insert_cells([0.75, 0.75], [4])
with self.assertRaises(TypeError):
# Gives a wrong typed object to the function
volume_average(mesh1D, h1)
with self.assertRaises(NotImplementedError):
# Gives a wrong typed mesh
volume_average(meshCyl, mesh2)
with self.assertRaises(ValueError):
# Gives mismatching mesh dimensions
volume_average(mesh2D, mesh3D)
model1 = np.random.randn(mesh2D.nC)
bad_model1 = np.random.randn(3)
bad_model2 = np.random.rand(1)
# gives input values with incorrect lengths
with self.assertRaises(ValueError):
volume_average(mesh2D, mesh2, bad_model1)
with self.assertRaises(ValueError):
volume_average(mesh2D, mesh2, model1, bad_model2)
def test_tree_to_tree_same_base(self):
h1 = np.random.rand(16)
h1 /= h1.sum()
h1s = [h1]
insert_1 = [0.25]
insert_2 = [0.75]
for i in range(1, 3):
print(f"Tree to Tree {i+1}D: same base", end="")
h1s.append(h1)
insert_1.append(0.25)
insert_2.append(0.75)
mesh1 = discretize.TreeMesh(h1s)
mesh1.insert_cells([insert_1], [4])
mesh2 = discretize.TreeMesh(h1s)
mesh2.insert_cells([insert_2], [4])
in_put = np.random.rand(mesh1.nC)
out_put = np.empty(mesh2.nC)
# test the three ways of calling...
out1 = volume_average(mesh1, mesh2, in_put, out_put)
assert_array_equal(out1, out_put)
out2 = volume_average(mesh1, mesh2, in_put)
assert_allclose(out1, out2)
Av = volume_average(mesh1, mesh2)
out3 = Av @ in_put
assert_allclose(out1, out3)
vol1 = np.sum(mesh1.vol * in_put)
vol2 = np.sum(mesh2.vol * out3)
print(vol1, vol2)
self.assertAlmostEqual(vol1, vol2)
def test_tree_to_tensor_same_base(self):
h1 = np.random.rand(16)
h1 /= h1.sum()
h1s = [h1]
insert_1 = [0.25]
for i in range(1, 3):
print(f"Tree to Tensor {i+1}D same base: ", end="")
h1s.append(h1)
insert_1.append(0.25)
mesh1 = discretize.TreeMesh(h1s)
mesh1.insert_cells([insert_1], [4])
mesh2 = discretize.TensorMesh(h1s)
in_put = np.random.rand(mesh1.nC)
out_put = | np.empty(mesh2.nC) | numpy.empty |
# -*- coding: utf-8 -*-
from __future__ import print_function, division
# Built-ins
from collections import OrderedDict, defaultdict
import sys, datetime, copy, warnings
# External
import numpy as np
import pandas as pd
import networkx as nx
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
from scipy.stats import entropy, mannwhitneyu
from scipy.spatial.distance import squareform, pdist
from itertools import combinations
# soothsayer_utils
from soothsayer_utils import assert_acceptable_arguments, is_symmetrical, is_graph, is_nonstring_iterable, dict_build, dict_filter, is_dict, is_dict_like, is_color, is_number, write_object, format_memory, format_header, check_packages
try:
from . import __version__
except ImportError:
__version__ = "ImportError: attempted relative import with no known parent package"
# ensemble_networkx
from ensemble_networkx import Symmetric, condensed_to_dense
# ==========
# Conversion
# ==========
# Polar to cartesian coordinates
def polar_to_cartesian(r, theta):
x = r * np.cos(theta)
y = r * np.sin(theta)
return(x, y)
# Cartesian to polar coordinates
def cartesian_to_polar(x, y):
r = np.sqrt(x**2 + y**2)
theta = np.arctan2(y, x)
return(r, theta)
# =============
# Normalization
# =============
# Normalize MinMax
def normalize_minmax(x, feature_range=(0,1)):
"""
Adapted from the following source:
* https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MinMaxScaler.html
"""
x_std = (x - x.min())/(x.max() - x.min())
return x_std * (feature_range[1] - feature_range[0]) + feature_range[0]
# =======================================================
# Hive
# =======================================================
class Hive(object):
def __init__(self, data, name=None, node_type=None, edge_type=None, axis_type=None, description=None, tol=1e-10):
"""
Hive plots for undirected networks
Hive plots:
Should only be used with 2-3 axis unless intelligently ordered b/c the arcs will overlap.
Notes:
* Does not store networkx graph to overuse memory just use .to_networkx as generate them in real time.
Usage:
import soothsayer_utils as syu
import ensemble_networkx ax enx
import hive_networkx as hx
# Load data
X, y, colors = syu.get_iris_data(["X", "y", "colors"])
n, m = X.shape
# Get association matrix (n,n)
method = "pearson"
df_sim = X.T.corr(method=method)
ratio = 0.382
number_of_edges = int((n**2 - n)/2)
number_of_edges_negative = int(ratio*number_of_edges)
# Make half of the edges negative to showcase edge coloring (not statistically meaningful at all)
for a, b in zip(np.random.RandomState(0).randint(low=0, high=149, size=number_of_edges_negative), np.random.RandomState(1).randint(low=0, high=149, size=number_of_edges_negative)):
if a != b:
df_sim.values[a,b] = df_sim.values[b,a] = df_sim.values[a,b]*-1
# Create a Symmetric object from the association matrix
sym_iris = enx.Symmetric(data=df_sim, node_type="<NAME>", edge_type=method, name="iris", association="network")
# ====================================
# Symmetric(Name:iris, dtype: float64)
# ====================================
# * Number of nodes (iris sample): 150
# * Number of edges (correlation): 11175
# * Association: network
# * Memory: 174.609 KB
# --------------------------------
# | Weights
# --------------------------------
# (iris_1, iris_0) 0.995999
# (iris_0, iris_2) 0.999974
# (iris_3, iris_0) 0.998168
# (iris_0, iris_4) 0.999347
# (iris_0, iris_5) 0.999586
# ...
# (iris_148, iris_146) 0.988469
# (iris_149, iris_146) 0.986481
# (iris_147, iris_148) 0.995708
# (iris_149, iris_147) 0.994460
# (iris_149, iris_148) 0.999916
# Create NetworkX graph from the Symmetric object
graph_iris = sym_iris.to_networkx()
# # Create Hive
hive = hx.Hive(graph_iris, axis_type="species")
# Organize nodes by species for each axis
number_of_query_nodes = 3
axis_nodes = OrderedDict()
for species, _y in y.groupby(y):
axis_nodes[species] = _y.index[:number_of_query_nodes]
# Make sure there each node is specific to an axis (not fastest way, easiest to understand)
nodelist = list()
for name_axis, nodes in axis_nodes.items():
nodelist += nodes.tolist()
assert pd.Index(nodelist).value_counts().max() == 1, "Each node must be on only one axis"
# Add axis for each species
node_styles = dict(zip(['setosa', 'versicolor', 'virginica'], ["o", "p", "D"]))
for name_axis, nodes in axis_nodes.items():
hive.add_axis(name_axis, nodes, sizes=150, colors=colors[nodes], split_axis=True, node_style=node_styles[name_axis])
hive.compile()
# ===============================
# Hive(Name:iris, dtype: float64)
# ===============================
# * Number of nodes (iris sample): 150
# * Number of edges (pearson): 11175
# * Axes (species): ['setosa', 'versicolor', 'virginica']
# * Memory: 174.609 KB
# * Compiled: True
# ---------------------------
# | Axes
# ---------------------------
# 0. setosa (3) [iris_0, iris_1, iris_2]
# 1. versicolor (3) [iris_50, iris_51, iris_52]
# 2. virginica (3) [iris_100, iris_101, iris_102]
# Plot Hive
color_negative, color_positive = ('#278198', '#dc3a23')
edge_colors = hive.weights.map(lambda w: {True:color_negative, False:color_positive}[w < 0])
legend = dict(zip(["Positive", "Negative"], [color_positive, color_negative]))
fig, axes = hive.plot(func_edgeweight=lambda w: (w**10), edge_colors=edge_colors, style="light", show_node_labels=True, title="Iris", legend=legend)
"""
# Placeholders
self.nodes_in_hive = None
self.edges_in_hive = None
self.weights = None
# self.graph = None
self.name = name
self.node_type = node_type
self.edge_type = edge_type
# Propogate
if isinstance(data, pd.DataFrame):
data = self._from_pandas_adjacency(data, name, node_type, edge_type, tol) # -> Symmetric
if isinstance(data, Symmetric):
self._from_symmetric(data, name, node_type, edge_type)
if all([
(self.nodes_in_hive is None),
(self.edges_in_hive is None),
(self.weights is None),
]):
assert is_graph(data), "`data` must be either a pd.DataFrame adjacency, a Symmetric, or a networkx graph object" # Last resort, use this if Symmetric isn't provided
self._from_networkx(data)
# Initialize
self.axes = OrderedDict()
self.node_mapping_ = OrderedDict()
self.compiled = False
self.axis_type = axis_type
self.description = description
self.version = __version__
self.number_of_nodes_ = None
self.memory = self.weights.memory_usage()
self.__synthesized__ = datetime.datetime.utcnow()
def _from_pandas_adjacency(self, data, name, node_type, edge_type, tol):
# Convert pd.DataFrame into a Symmetric object
assert isinstance(data, pd.DataFrame), "Must be a 2-dimensional pandas DataFrame object"
assert is_symmetrical(data, tol=tol), "DataFrame must be symmetrical. Please force symmetry with (X + X.T)/2"
return Symmetric(data=data, name=name, node_type=node_type, edge_type=edge_type, association="network", nans_ok=False, tol=tol)
def _from_symmetric(self, data, name, node_type, edge_type):
# Propogate information from Symmetric
if name is None:
self.name = data.name
if node_type is None:
self.node_type = data.node_type
if edge_type is None:
self.edge_type = data.edge_type
self.nodes_in_hive = data.nodes
self.edges_in_hive = data.edges
self.weights = data.weights
# return data.to_networkx()
def _from_networkx(self, graph):
# Propogate information from graph
for attr in ["name", "node_type", "edge_type"]:
if getattr(self, attr) is None:
if attr in graph.graph:
value =graph.graph[attr]
if bool(value):
setattr(self, attr, value)
# if self.graph is None:
# self.graph = graph
if self.nodes_in_hive is None:
self.nodes_in_hive = pd.Index(sorted(graph.nodes()))
if (self.edges_in_hive is None) or (self.weights is None):
self.weights = dict()
for edge_data in graph.edges(data=True):
edge = frozenset(edge_data[:-1])
weight = edge_data[-1]["weight"]
self.weights[edge] = weight
self.weights = pd.Series(self.weights, name="Weights")#.sort_index()
self.edges_in_hive = pd.Index(self.weights.index, name="Edges")
# Built-ins
def __repr__(self):
pad = 4
header = format_header("Hive(Name:{}, dtype: {})".format(self.name, self.weights.dtype),line_character="=")
n = len(header.split("\n")[0])
fields = [
header,
pad*" " + "* Number of nodes ({}): {}".format(self.node_type, len(self.nodes_in_hive)),
pad*" " + "* Number of edges ({}): {}".format(self.edge_type, len(self.edges_in_hive)),
pad*" " + "* Axes ({}): {}".format(self.axis_type, list(self.axes.keys())),
pad*" " + "* Memory: {}".format(format_memory(self.memory)),
pad*" " + "* Compiled: {}".format(self.compiled),
]
if self.compiled:
for field in map(lambda line:pad*" " + line, format_header("| Axes", "-", n=n-pad).split("\n")):
fields.append(field)
for field in map(lambda line: pad*" " + str(line), repr(self.axes_preview_).split("\n")[:-1]):
fields.append(field)
return "\n".join(fields)
def __call__(self, name_axis=None):
return self.get_axis_data(name_axis=name_axis)
# def __getitem__(self, key):
# return self.weights[key]
# Add axis to HivePlot
def add_axis(self, name_axis, nodes, sizes=None, colors=None, split_axis:bool=False, node_style="o", scatter_kws=dict()):
"""
Add or update axis
nodes: Can be either an iterable of nodes or a dict-like with node positions {node:position}
"""
# Initialize axis container
self.axes[name_axis] = defaultdict(dict)
self.axes[name_axis]["colors"] = None
self.axes[name_axis]["sizes"] = None
self.axes[name_axis]["split_axis"] = split_axis
self.axes[name_axis]["node_style"] = node_style
self.axes[name_axis]["scatter_kws"] = scatter_kws
# Assign (preliminary) node positions
if is_nonstring_iterable(nodes) and not isinstance(nodes, pd.Series):
nodes = pd.Series(np.arange(len(nodes)), index=nodes)
if is_dict(nodes):
nodes = pd.Series(nodes)
nodes = nodes.sort_values()
assert set(nodes.index) <= set(self.nodes_in_hive), "All nodes in axis should be in the Hive and they aren't..."
# Set values
self.axes[name_axis]["node_positions"] = pd.Series(nodes, name=(name_axis, "node_positions"))
self.axes[name_axis]["nodes"] = pd.Index(nodes.index, name=(name_axis, "nodes"))
self.axes[name_axis]["number_of_nodes"] = nodes.size
# Group node with axis
self.node_mapping_.update(dict_build([(name_axis, self.axes[name_axis]["nodes"])]))
# Assign component colors
if colors is None:
colors = "white"
if is_color(colors):
colors = dict_build([(colors, self.axes[name_axis]["nodes"])])
if is_dict(colors):
colors = pd.Series(colors)
if not is_color(colors):
if is_nonstring_iterable(colors) and not isinstance(colors, pd.Series):
colors = pd.Series(colors, index=self.axes[name_axis]["nodes"])
self.axes[name_axis]["colors"] = pd.Series(colors[self.axes[name_axis]["nodes"]], name=(name_axis, "node_colors"))
# Assign component sizes
if sizes is None:
sizes = 100
if is_number(sizes):
sizes = dict_build([(sizes, self.axes[name_axis]["nodes"])])
if is_dict(sizes):
sizes = pd.Series(sizes)
self.axes[name_axis]["sizes"] = pd.Series(sizes[nodes.index], name=(name_axis, "node_sizes"))
# Compile the data for plotting
def compile(self, axes_theta_degrees=None, split_theta_degree=None, inner_radius=None, theta_center=90, axis_normalize=True, axis_maximum=1000):
"""
inner_radius should be similar units to axis_maximum
"""
number_of_axes = len(self.axes)
if split_theta_degree is None:
split_theta_degree = (360/number_of_axes)*0.16180339887
self.split_theta_degree = split_theta_degree
self.axis_maximum = axis_maximum
if inner_radius is None:
if axis_normalize:
inner_radius = (1/5)*self.axis_maximum
else:
inner_radius = 3
self.inner_radius = inner_radius
self.outer_radius = self.axis_maximum - self.inner_radius
self.theta_center = theta_center
# Adjust all of the node_positions
for i, query_axis in enumerate(self.axes):
# If the axis is normalized, force everything between the minimum position and the `outer_radius` (that is, the axis_maximum - inner_radius. This ensures the axis_maximum is actually what is defined)
if axis_normalize:
node_positions = self.axes[query_axis]["node_positions"]
self.axes[query_axis]["node_positions_normalized"] = normalize_minmax(node_positions, feature_range=(min(node_positions), self.outer_radius) )
else:
self.axes[query_axis]["node_positions_normalized"] = self.axes[query_axis]["node_positions"].copy()
# Offset the node positions by the inner radius
self.axes[query_axis]["node_positions_normalized"] = self.axes[query_axis]["node_positions_normalized"] + self.inner_radius
# Axis thetas
if axes_theta_degrees is not None:
assert hasattr(axes_theta_degrees, "__iter__"), "`axes_theta_degrees` must be either None or an iterable of {} angles in degrees".format(number_of_axes)
assert len(axes_theta_degrees) == number_of_axes, "`axes_theta_degrees` must be either None or an iterable of {} angles in degrees".format(number_of_axes)
if axes_theta_degrees is None:
axes_theta_degrees = list()
for i in range(number_of_axes):
theta_add = (360/number_of_axes)*i
axes_theta_degrees.append(theta_add)
# Adjust all of the axes angles
for i, query_axis in enumerate(self.axes):
# If the axis is in single mode
theta_add = axes_theta_degrees[i] #(360/number_of_axes)*i
if not self.axes[query_axis]["split_axis"]:
# If the query axis is the first then the `theta_add` will be 0
self.axes[query_axis]["theta"] = np.array([self.theta_center + theta_add])
else:
self.axes[query_axis]["theta"] = np.array([self.theta_center + theta_add - split_theta_degree,
self.theta_center + theta_add + split_theta_degree])
self.axes[query_axis]["theta"] = np.deg2rad(self.axes[query_axis]["theta"])
self.axes_theta_degrees_ = dict(zip(self.axes.keys(), axes_theta_degrees))
# Nodes
self.nodes_ = list()
for axes_data in self.axes.values():
self.nodes_ += list(axes_data["nodes"])
assert len(self.nodes_) == len(set(self.nodes_)), "Axes cannot contain duplicate nodes"
self.number_of_nodes_ = len(self.nodes_)
# Edges
self.edges_ = list(map(frozenset, combinations(self.nodes_, r=2)))
self.number_of_edges_ = len(self.edges_)
# Axes
self.number_of_axes_ = number_of_axes
self.axes_preview_ = pd.Series(dict(zip(self.axes.keys(), map(lambda data:list(data["nodes"]), self.axes.values()))), name="Axes preview")
self.axes_preview_.index = self.axes_preview_.index.map(lambda name_axis: "{}. {} ({})".format(self.axes_preview_.index.get_loc(name_axis), name_axis, len(self.axes_preview_[name_axis])))
# Compile
self.compiled = True
def _get_quadrant_info(self, theta_representative):
# 0/360
if theta_representative in np.deg2rad([0,360]):
horizontalalignment = "left"
verticalalignment = "center"
quadrant = 0
# 90
if theta_representative == np.deg2rad(90):
horizontalalignment = "center"
verticalalignment = "bottom"
quadrant = 90
# 180
if theta_representative == np.deg2rad(180):
horizontalalignment = "right"
verticalalignment = "center"
quadrant = 180
# 270
if theta_representative == np.deg2rad(270):
horizontalalignment = "center"
verticalalignment = "top"
quadrant = 270
# Quadrant 1
if | np.deg2rad(0) | numpy.deg2rad |
import numpy as np
import sys
class Softmax():
def __init__(self, D, C, learning_rate=1e-3):
self.W = 0.001 * np.random.randn(D, C)
self.lr = learning_rate
def get_weights(self):
return self.W
def predict(self, X):
scores = np.dot(X, self.W)
pred = np.argmax(scores, axis=1)
return pred
def update_weights(self, grad):
self.W += -self.lr * grad
def vectorized_loss(self, X, y, reg):
"""
W - Weights (D x C) : (784 x 10)
X - Training images (N x D) : (120 : 784)
y - Training labels (N, ) : (120, )
reg - Regularizing constant
"""
dW = | np.zeros_like(self.W) | numpy.zeros_like |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from .data import Data
from .. import backend as bkd
from .. import config
from ..utils import get_num_args, run_if_all_none
class PDE(Data):
"""ODE or time-independent PDE solver.
Args:
geometry: Instance of ``Geometry``.
pde: A global PDE or a list of PDEs. ``None`` if no global PDE.
bcs: A boundary condition or a list of boundary conditions. Use ``[]`` if no
boundary condition.
num_domain (int): The number of training points sampled inside the domain.
num_boundary (int): The number of training points sampled on the boundary.
train_distribution (string): The distribution to sample training points. One of
the following: "uniform" (equispaced grid), "pseudo" (pseudorandom), "LHS"
(Latin hypercube sampling), "Halton" (Halton sequence), "Hammersley"
(Hammersley sequence), or "Sobol" (Sobol sequence).
anchors: A Numpy array of training points, in addition to the `num_domain` and
`num_boundary` sampled points.
exclusions: A Numpy array of points to be excluded for training.
solution: The reference solution.
num_test: The number of points sampled inside the domain for testing. The testing
points on the boundary are the same set of points used for training. If
``None``, then the training points will be used for testing.
auxiliary_var_function: A function that inputs `train_x` or `test_x` and outputs
auxiliary variables.
Warning:
The testing points include points inside the domain and points on the boundary,
and they may not have the same density, and thus the entire testing points may
not be uniformly distributed. As a result, if you have a reference solution
(`solution`) and would like to compute a metric such as
.. code-block:: python
Model.compile(metrics=["l2 relative error"])
then the metric may not be very accurate. To better compute a metric, you can
sample the points manually, and then use ``Model.predict()`` to predict the
solution on thess points and compute the metric:
.. code-block:: python
x = geom.uniform_points(num, boundary=True)
y_true = ...
y_pred = model.predict(x)
error= dde.metrics.l2_relative_error(y_true, y_pred)
Attributes:
train_x_all: A Numpy array of all points for training. `train_x_all` is
unordered, and does not have duplication.
train_x: A Numpy array of the points fed into the network for training.
`train_x` is constructed from `train_x_all`, ordered from BCs to PDE, and
may have duplicate points.
train_x_bc: A Numpy array of the training points for BCs. `train_x_bc` is
constructed from `train_x_all` at the first step of training, by default it
won't be updated when `train_x_all` changes. To update `train_x_bc`, set it
to `None` and call `bc_points`, and then update the loss function by
``model.compile()``.
num_bcs (list): `num_bcs[i]` is the number of points for `bcs[i]`.
test_x: A Numpy array of the points fed into the network for testing, ordered
from BCs to PDE. The BC points are exactly the same points in `train_x_bc`.
train_aux_vars: Auxiliary variables that associate with `train_x`.
test_aux_vars: Auxiliary variables that associate with `test_x`.
"""
def __init__(
self,
geometry,
pde,
bcs,
num_domain=0,
num_boundary=0,
train_distribution="Sobol",
anchors=None,
exclusions=None,
solution=None,
num_test=None,
auxiliary_var_function=None,
):
self.geom = geometry
self.pde = pde
self.bcs = bcs if isinstance(bcs, (list, tuple)) else [bcs]
self.num_domain = num_domain
self.num_boundary = num_boundary
if train_distribution not in [
"uniform",
"pseudo",
"LHS",
"Halton",
"Hammersley",
"Sobol",
]:
raise ValueError(
"train_distribution == {} is not available choices.".format(
train_distribution
)
)
self.train_distribution = train_distribution
self.anchors = None if anchors is None else anchors.astype(config.real(np))
self.exclusions = exclusions
self.soln = solution
self.num_test = num_test
self.auxiliary_var_fn = auxiliary_var_function
# TODO: train_x_all is used for PDE losses. It is better to add train_x_pde explicitly.
self.train_x_all = None
self.train_x, self.train_y = None, None
self.train_x_bc = None
self.num_bcs = None
self.test_x, self.test_y = None, None
self.train_aux_vars, self.test_aux_vars = None, None
self.train_next_batch()
self.test()
def losses(self, targets, outputs, loss, model):
f = []
if self.pde is not None:
if get_num_args(self.pde) == 2:
f = self.pde(model.net.inputs, outputs)
elif get_num_args(self.pde) == 3:
if self.auxiliary_var_fn is None:
raise ValueError("Auxiliary variable function not defined.")
f = self.pde(model.net.inputs, outputs, model.net.auxiliary_vars)
if not isinstance(f, (list, tuple)):
f = [f]
if not isinstance(loss, (list, tuple)):
loss = [loss] * (len(f) + len(self.bcs))
elif len(loss) != len(f) + len(self.bcs):
raise ValueError(
"There are {} errors, but only {} losses.".format(
len(f) + len(self.bcs), len(loss)
)
)
bcs_start = | np.cumsum([0] + self.num_bcs) | numpy.cumsum |
# This module has been generated automatically from space group information
# obtained from the Computational Crystallography Toolbox
#
"""
Space groups
This module contains a list of all the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer arrays (rot, tn, td), where
rot is the rotation matrix and tn/td
are the numerator and denominator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.transposed_rotations = N.array([N.transpose(t[0])
for t in transformations])
self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.array_type
:return: a tuple (miller_indices, phase_factor) of two arrays
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.transposed_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(39, 'A b m 2', transformations)
space_groups[39] = sg
space_groups['A b m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(40, 'A m a 2', transformations)
space_groups[40] = sg
space_groups['A m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(41, 'A b a 2', transformations)
space_groups[41] = sg
space_groups['A b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(42, 'F m m 2', transformations)
space_groups[42] = sg
space_groups['F m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(43, 'F d d 2', transformations)
space_groups[43] = sg
space_groups['F d d 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(44, 'I m m 2', transformations)
space_groups[44] = sg
space_groups['I m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(45, 'I b a 2', transformations)
space_groups[45] = sg
space_groups['I b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(46, 'I m a 2', transformations)
space_groups[46] = sg
space_groups['I m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(47, 'P m m m', transformations)
space_groups[47] = sg
space_groups['P m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(48, 'P n n n :2', transformations)
space_groups[48] = sg
space_groups['P n n n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(49, 'P c c m', transformations)
space_groups[49] = sg
space_groups['P c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(50, 'P b a n :2', transformations)
space_groups[50] = sg
space_groups['P b a n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(51, 'P m m a', transformations)
space_groups[51] = sg
space_groups['P m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(52, 'P n n a', transformations)
space_groups[52] = sg
space_groups['P n n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(53, 'P m n a', transformations)
space_groups[53] = sg
space_groups['P m n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(54, 'P c c a', transformations)
space_groups[54] = sg
space_groups['P c c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(55, 'P b a m', transformations)
space_groups[55] = sg
space_groups['P b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(56, 'P c c n', transformations)
space_groups[56] = sg
space_groups['P c c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(57, 'P b c m', transformations)
space_groups[57] = sg
space_groups['P b c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(58, 'P n n m', transformations)
space_groups[58] = sg
space_groups['P n n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(59, 'P m m n :2', transformations)
space_groups[59] = sg
space_groups['P m m n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(60, 'P b c n', transformations)
space_groups[60] = sg
space_groups['P b c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(61, 'P b c a', transformations)
space_groups[61] = sg
space_groups['P b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(62, 'P n m a', transformations)
space_groups[62] = sg
space_groups['P n m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(63, 'C m c m', transformations)
space_groups[63] = sg
space_groups['C m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(64, 'C m c a', transformations)
space_groups[64] = sg
space_groups['C m c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(65, 'C m m m', transformations)
space_groups[65] = sg
space_groups['C m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(66, 'C c c m', transformations)
space_groups[66] = sg
space_groups['C c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(67, 'C m m a', transformations)
space_groups[67] = sg
space_groups['C m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(68, 'C c c a :2', transformations)
space_groups[68] = sg
space_groups['C c c a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(69, 'F m m m', transformations)
space_groups[69] = sg
space_groups['F m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(70, 'F d d d :2', transformations)
space_groups[70] = sg
space_groups['F d d d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(71, 'I m m m', transformations)
space_groups[71] = sg
space_groups['I m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(72, 'I b a m', transformations)
space_groups[72] = sg
space_groups['I b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(73, 'I b c a', transformations)
space_groups[73] = sg
space_groups['I b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(74, 'I m m a', transformations)
space_groups[74] = sg
space_groups['I m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(75, 'P 4', transformations)
space_groups[75] = sg
space_groups['P 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(76, 'P 41', transformations)
space_groups[76] = sg
space_groups['P 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(77, 'P 42', transformations)
space_groups[77] = sg
space_groups['P 42'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(78, 'P 43', transformations)
space_groups[78] = sg
space_groups['P 43'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(79, 'I 4', transformations)
space_groups[79] = sg
space_groups['I 4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(80, 'I 41', transformations)
space_groups[80] = sg
space_groups['I 41'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(81, 'P -4', transformations)
space_groups[81] = sg
space_groups['P -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(82, 'I -4', transformations)
space_groups[82] = sg
space_groups['I -4'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(83, 'P 4/m', transformations)
space_groups[83] = sg
space_groups['P 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(84, 'P 42/m', transformations)
space_groups[84] = sg
space_groups['P 42/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(85, 'P 4/n :2', transformations)
space_groups[85] = sg
space_groups['P 4/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(86, 'P 42/n :2', transformations)
space_groups[86] = sg
space_groups['P 42/n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(87, 'I 4/m', transformations)
space_groups[87] = sg
space_groups['I 4/m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(88, 'I 41/a :2', transformations)
space_groups[88] = sg
space_groups['I 41/a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(89, 'P 4 2 2', transformations)
space_groups[89] = sg
space_groups['P 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(90, 'P 4 21 2', transformations)
space_groups[90] = sg
space_groups['P 4 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(91, 'P 41 2 2', transformations)
space_groups[91] = sg
space_groups['P 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(92, 'P 41 21 2', transformations)
space_groups[92] = sg
space_groups['P 41 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(93, 'P 42 2 2', transformations)
space_groups[93] = sg
space_groups['P 42 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(94, 'P 42 21 2', transformations)
space_groups[94] = sg
space_groups['P 42 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,3])
trans_den = N.array([1,1,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(95, 'P 43 2 2', transformations)
space_groups[95] = sg
space_groups['P 43 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(96, 'P 43 21 2', transformations)
space_groups[96] = sg
space_groups['P 43 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(97, 'I 4 2 2', transformations)
space_groups[97] = sg
space_groups['I 4 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(98, 'I 41 2 2', transformations)
space_groups[98] = sg
space_groups['I 41 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(99, 'P 4 m m', transformations)
space_groups[99] = sg
space_groups['P 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(100, 'P 4 b m', transformations)
space_groups[100] = sg
space_groups['P 4 b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(101, 'P 42 c m', transformations)
space_groups[101] = sg
space_groups['P 42 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(102, 'P 42 n m', transformations)
space_groups[102] = sg
space_groups['P 42 n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(103, 'P 4 c c', transformations)
space_groups[103] = sg
space_groups['P 4 c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(104, 'P 4 n c', transformations)
space_groups[104] = sg
space_groups['P 4 n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(105, 'P 42 m c', transformations)
space_groups[105] = sg
space_groups['P 42 m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(106, 'P 42 b c', transformations)
space_groups[106] = sg
space_groups['P 42 b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(107, 'I 4 m m', transformations)
space_groups[107] = sg
space_groups['I 4 m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(108, 'I 4 c m', transformations)
space_groups[108] = sg
space_groups['I 4 c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(109, 'I 41 m d', transformations)
space_groups[109] = sg
space_groups['I 41 m d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(110, 'I 41 c d', transformations)
space_groups[110] = sg
space_groups['I 41 c d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(111, 'P -4 2 m', transformations)
space_groups[111] = sg
space_groups['P -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(112, 'P -4 2 c', transformations)
space_groups[112] = sg
space_groups['P -4 2 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(113, 'P -4 21 m', transformations)
space_groups[113] = sg
space_groups['P -4 21 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(114, 'P -4 21 c', transformations)
space_groups[114] = sg
space_groups['P -4 21 c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(115, 'P -4 m 2', transformations)
space_groups[115] = sg
space_groups['P -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(116, 'P -4 c 2', transformations)
space_groups[116] = sg
space_groups['P -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(117, 'P -4 b 2', transformations)
space_groups[117] = sg
space_groups['P -4 b 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(118, 'P -4 n 2', transformations)
space_groups[118] = sg
space_groups['P -4 n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(119, 'I -4 m 2', transformations)
space_groups[119] = sg
space_groups['I -4 m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(120, 'I -4 c 2', transformations)
space_groups[120] = sg
space_groups['I -4 c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(121, 'I -4 2 m', transformations)
space_groups[121] = sg
space_groups['I -4 2 m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,3])
trans_den = N.array([2,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,5])
trans_den = N.array([1,2,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(122, 'I -4 2 d', transformations)
space_groups[122] = sg
space_groups['I -4 2 d'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(123, 'P 4/m m m', transformations)
space_groups[123] = sg
space_groups['P 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(124, 'P 4/m c c', transformations)
space_groups[124] = sg
space_groups['P 4/m c c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(125, 'P 4/n b m :2', transformations)
space_groups[125] = sg
space_groups['P 4/n b m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(126, 'P 4/n n c :2', transformations)
space_groups[126] = sg
space_groups['P 4/n n c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(127, 'P 4/m b m', transformations)
space_groups[127] = sg
space_groups['P 4/m b m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(128, 'P 4/m n c', transformations)
space_groups[128] = sg
space_groups['P 4/m n c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(129, 'P 4/n m m :2', transformations)
space_groups[129] = sg
space_groups['P 4/n m m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(130, 'P 4/n c c :2', transformations)
space_groups[130] = sg
space_groups['P 4/n c c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(131, 'P 42/m m c', transformations)
space_groups[131] = sg
space_groups['P 42/m m c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(132, 'P 42/m c m', transformations)
space_groups[132] = sg
space_groups['P 42/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(133, 'P 42/n b c :2', transformations)
space_groups[133] = sg
space_groups['P 42/n b c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(134, 'P 42/n n m :2', transformations)
space_groups[134] = sg
space_groups['P 42/n n m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(135, 'P 42/m b c', transformations)
space_groups[135] = sg
space_groups['P 42/m b c'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(136, 'P 42/m n m', transformations)
space_groups[136] = sg
space_groups['P 42/m n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(137, 'P 42/n m c :2', transformations)
space_groups[137] = sg
space_groups['P 42/n m c :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(138, 'P 42/n c m :2', transformations)
space_groups[138] = sg
space_groups['P 42/n c m :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(139, 'I 4/m m m', transformations)
space_groups[139] = sg
space_groups['I 4/m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(140, 'I 4/m c m', transformations)
space_groups[140] = sg
space_groups['I 4/m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(141, 'I 41/a m d :2', transformations)
space_groups[141] = sg
space_groups['I 41/a m d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-3,-3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,5,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,5,5])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,-1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,0,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,-1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,1,0,1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(142, 'I 41/a c d :2', transformations)
space_groups[142] = sg
space_groups['I 41/a c d :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(143, 'P 3', transformations)
space_groups[143] = sg
space_groups['P 3'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(144, 'P 31', transformations)
space_groups[144] = sg
space_groups['P 31'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([0,-1,0,1,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,2])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,1,0,-1,0,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,3])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(145, 'P 32', transformations)
space_groups[145] = sg
space_groups['P 32'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = | N.array([1,1,1]) | numpy.array |
import copy
import math
import datetime
import numpy as np
from kbdiffdi.features import feature
np.warnings.filterwarnings('ignore')
class FFDI(object):
def __init__(self):
self.KBDI = None
self.prcp = None
self.temp = None
self.wind = None
self.rel_hum = None
def fit(self, initKBDI, initprcp, inittemp, initwind, initrelhum):
self.KBDI = initKBDI
self.prcp = initprcp
self.temp = inittemp
self.wind = initwind
self.rel_hum = initrelhum
x = self.calculate_sig_rain_event()
x_lim = self.calc_x_lim()
DF = self.griffith_drought_factor(x, x_lim)
FFDI = self.forest_fire_danger_index(DF)
return FFDI, DF
def calculate_sig_rain_event(self):
"""
See Finkele et al. 2006 and Lucas 2010 for a detailed explanation. Basically, in order to calculate
the drought factor, a calculation of "significant rainfall events" during the past
20 days needs to be calculated. A rainfall event is defined as a set of consecutive days
each with rainfall above 2 mm. the rainfall event amount P (not be confused with daily precip)
is the sum of the rainfall within the event. A rainfall event can be a single rain day.
Event age N (not to be confused with days since last rain) is defined as the number of
days since the day with the largest daily rainfall amount within the rain event.
Precip data should be in millimeters
"""
#daysagolist = []
#rainsumlist = []
n = 0
window = 20 # to look at the past 20 days. (20 including the current day. So there will only be a 19 days ago max. today is zero days ago. there is a total of 20 days analyzed though)
x_3d_arr = None
while n < len(self.prcp.data):
if n < window:
prev_rain_cube = np.array(self.prcp.data[:n+1]) # use all available past data, because there hasn't been 20 days of data yet
else:
prev_rain_cube = np.array(self.prcp.data[n+1-window:n+1]) # to get the last 20 days # disgusting off by one... :(
# now that there is a datastructure holding the previous 20 days of rain data,
# iterate through the prevRainCube, and update the sigEvent data to hold
# the relevant information for the significant rain event in the past 20 days
prev_rain_cube = np.where(prev_rain_cube < 2, 0, prev_rain_cube) # rain events need to have more than 2 mm of precipitation
days_ago = np.zeros(shape=(len(prev_rain_cube), len(prev_rain_cube[0]), len(prev_rain_cube[0][0]), len(prev_rain_cube[0][0][0])))
rain_sum = np.zeros(shape=(len(prev_rain_cube), len(prev_rain_cube[0]), len(prev_rain_cube[0][0]), len(prev_rain_cube[0][0][0])))
running_total = np.zeros(shape=(len(self.prcp.data[0][0]), len(self.prcp.data[0][0][0])))
cur_max = np.zeros(shape=(len(self.prcp.data[0][0]), len(self.prcp.data[0][0][0])))
cur_max_idx = np.zeros(shape=(len(self.prcp.data[0][0]), len(self.prcp.data[0][0][0])))
for layer in range(len(prev_rain_cube)):
running_total = running_total + prev_rain_cube[layer]
rain_sum[layer] = np.where(prev_rain_cube[layer] == 0, 0, rain_sum[layer])
days_ago[layer] = np.where(prev_rain_cube[layer] == 0, len(prev_rain_cube)-layer-1, days_ago[layer])
# first day of 20
if layer == 0 and layer != len(prev_rain_cube)-1:
# a consecutive event, start the tallying
running_total = np.where((prev_rain_cube[layer] != 0) & (prev_rain_cube[layer+1] != 0), prev_rain_cube[layer], running_total)
cur_max_idx = | np.where((prev_rain_cube[layer] != 0) & (prev_rain_cube[layer+1] !=0), layer, cur_max_idx) | numpy.where |
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import numpy as np
from pyscf import lib
from pyscf import scf
from pyscf.lib import logger
from pyscf.cc import ccsd
from pyscf.cc import uccsd
from pyscf.cc import eom_rccsd
from pyscf.cc import eom_gccsd
from pyscf.cc import addons
########################################
# EOM-IP-CCSD
########################################
class EOMIP(eom_gccsd.EOMIP):
def __init__(self, cc):
gcc = addons.convert_to_gccsd(cc)
eom_gccsd.EOMIP.__init__(self, gcc)
########################################
# EOM-EA-CCSD
########################################
class EOMEA(eom_gccsd.EOMEA):
def __init__(self, cc):
gcc = addons.convert_to_gccsd(cc)
eom_gccsd.EOMEA.__init__(self, gcc)
########################################
# EOM-EE-CCSD
########################################
def eeccsd(eom, nroots=1, koopmans=False, guess=None, eris=None, imds=None):
'''Calculate N-electron neutral excitations via EOM-EE-CCSD.
Kwargs:
nroots : int
Number of roots (eigenvalues) requested
koopmans : bool
Calculate Koopmans'-like (1p1h) excitations only, targeting via
overlap.
guess : list of ndarray
List of guess vectors to use for targeting via overlap.
'''
if eris is None: eris = eom._cc.ao2mo()
if imds is None: imds = eom.make_imds(eris)
spinvec_size = eom.vector_size()
nroots = min(nroots, spinvec_size)
diag_ee, diag_sf = eom.get_diag(imds)
guess_ee = []
guess_sf = []
if guess and guess[0].size == spinvec_size:
raise NotImplementedError
#TODO: initial guess from GCCSD EOM amplitudes
#orbspin = scf.addons.get_ghf_orbspin(eris.mo_coeff)
#nmo = np.sum(eom.nmo)
#nocc = np.sum(eom.nocc)
#for g in guess:
# r1, r2 = eom_gccsd.vector_to_amplitudes_ee(g, nmo, nocc)
# r1aa = r1[orbspin==0][:,orbspin==0]
# r1ab = r1[orbspin==0][:,orbspin==1]
# if abs(r1aa).max() > 1e-7:
# r1 = addons.spin2spatial(r1, orbspin)
# r2 = addons.spin2spatial(r2, orbspin)
# guess_ee.append(eom.amplitudes_to_vector(r1, r2))
# else:
# r1 = spin2spatial_eomsf(r1, orbspin)
# r2 = spin2spatial_eomsf(r2, orbspin)
# guess_sf.append(amplitudes_to_vector_eomsf(r1, r2))
# r1 = r2 = r1aa = r1ab = g = None
#nroots_ee = len(guess_ee)
#nroots_sf = len(guess_sf)
elif guess:
for g in guess:
if g.size == diag_ee.size:
guess_ee.append(g)
else:
guess_sf.append(g)
nroots_ee = len(guess_ee)
nroots_sf = len(guess_sf)
else:
dee = np.sort(diag_ee)[:nroots]
dsf = np.sort(diag_sf)[:nroots]
dmax = np.sort(np.hstack([dee,dsf]))[nroots-1]
nroots_ee = np.count_nonzero(dee <= dmax)
nroots_sf = np.count_nonzero(dsf <= dmax)
guess_ee = guess_sf = None
def eomee_sub(cls, nroots, guess, diag):
ee_sub = cls(eom._cc)
ee_sub.__dict__.update(eom.__dict__)
e, v = ee_sub.kernel(nroots, koopmans, guess, eris, imds, diag=diag)
if nroots == 1:
e, v = [e], [v]
ee_sub.converged = [ee_sub.converged]
return list(ee_sub.converged), list(e), list(v)
e0 = e1 = []
v0 = v1 = []
conv0 = conv1 = []
if nroots_ee > 0:
conv0, e0, v0 = eomee_sub(EOMEESpinKeep, nroots_ee, guess_ee, diag_ee)
if nroots_sf > 0:
conv1, e1, v1 = eomee_sub(EOMEESpinFlip, nroots_sf, guess_sf, diag_sf)
e = np.hstack([e0,e1])
idx = e.argsort()
e = e[idx]
conv = conv0 + conv1
conv = [conv[x] for x in idx]
v = v0 + v1
v = [v[x] for x in idx]
if nroots == 1:
conv = conv[0]
e = e[0]
v = v[0]
eom.converged = conv
eom.e = e
eom.v = v
return eom.e, eom.v
def eomee_ccsd(eom, nroots=1, koopmans=False, guess=None,
eris=None, imds=None, diag=None):
if eris is None: eris = eom._cc.ao2mo()
if imds is None: imds = eom.make_imds(eris)
eom.converged, eom.e, eom.v \
= eom_rccsd.kernel(eom, nroots, koopmans, guess, imds=imds, diag=diag)
return eom.e, eom.v
def eomsf_ccsd(eom, nroots=1, koopmans=False, guess=None,
eris=None, imds=None, diag=None):
'''Spin flip EOM-EE-CCSD
'''
return eomee_ccsd(eom, nroots, koopmans, guess, eris, imds, diag)
amplitudes_to_vector_ee = uccsd.amplitudes_to_vector
vector_to_amplitudes_ee = uccsd.vector_to_amplitudes
def amplitudes_to_vector_eomsf(t1, t2, out=None):
t1ab, t1ba = t1
t2baaa, t2aaba, t2abbb, t2bbab = t2
nocca, nvirb = t1ab.shape
noccb, nvira = t1ba.shape
otrila = np.tril_indices(nocca, k=-1)
otrilb = np.tril_indices(noccb, k=-1)
vtrila = np.tril_indices(nvira, k=-1)
vtrilb = np.tril_indices(nvirb, k=-1)
baaa = np.take(t2baaa.reshape(noccb*nocca,nvira*nvira),
vtrila[0]*nvira+vtrila[1], axis=1)
abbb = np.take(t2abbb.reshape(nocca*noccb,nvirb*nvirb),
vtrilb[0]*nvirb+vtrilb[1], axis=1)
vector = np.hstack((t1ab.ravel(), t1ba.ravel(),
baaa.ravel(), t2aaba[otrila].ravel(),
abbb.ravel(), t2bbab[otrilb].ravel()))
return vector
def vector_to_amplitudes_eomsf(vector, nmo, nocc):
nocca, noccb = nocc
nmoa, nmob = nmo
nvira, nvirb = nmoa-nocca, nmob-noccb
t1ab = vector[:nocca*nvirb].reshape(nocca,nvirb).copy()
t1ba = vector[nocca*nvirb:nocca*nvirb+noccb*nvira].reshape(noccb,nvira).copy()
pvec = vector[t1ab.size+t1ba.size:]
nbaaa = noccb*nocca*nvira*(nvira-1)//2
naaba = nocca*(nocca-1)//2*nvirb*nvira
nabbb = nocca*noccb*nvirb*(nvirb-1)//2
nbbab = noccb*(noccb-1)//2*nvira*nvirb
t2baaa = np.zeros((noccb*nocca,nvira*nvira), dtype=vector.dtype)
t2aaba = np.zeros((nocca*nocca,nvirb*nvira), dtype=vector.dtype)
t2abbb = np.zeros((nocca*noccb,nvirb*nvirb), dtype=vector.dtype)
t2bbab = np.zeros((noccb*noccb,nvira*nvirb), dtype=vector.dtype)
otrila = np.tril_indices(nocca, k=-1)
otrilb = np.tril_indices(noccb, k=-1)
vtrila = np.tril_indices(nvira, k=-1)
vtrilb = np.tril_indices(nvirb, k=-1)
oidxab = np.arange(nocca*noccb, dtype=np.int32)
vidxab = np.arange(nvira*nvirb, dtype=np.int32)
v = pvec[:nbaaa].reshape(noccb*nocca,-1)
lib.takebak_2d(t2baaa, v, oidxab, vtrila[0]*nvira+vtrila[1])
lib.takebak_2d(t2baaa,-v, oidxab, vtrila[1]*nvira+vtrila[0])
v = pvec[nbaaa:nbaaa+naaba].reshape(-1,nvirb*nvira)
lib.takebak_2d(t2aaba, v, otrila[0]*nocca+otrila[1], vidxab)
lib.takebak_2d(t2aaba,-v, otrila[1]*nocca+otrila[0], vidxab)
v = pvec[nbaaa+naaba:nbaaa+naaba+nabbb].reshape(nocca*noccb,-1)
lib.takebak_2d(t2abbb, v, oidxab, vtrilb[0]*nvirb+vtrilb[1])
lib.takebak_2d(t2abbb,-v, oidxab, vtrilb[1]*nvirb+vtrilb[0])
v = pvec[nbaaa+naaba+nabbb:].reshape(-1,nvira*nvirb)
lib.takebak_2d(t2bbab, v, otrilb[0]*noccb+otrilb[1], vidxab)
lib.takebak_2d(t2bbab,-v, otrilb[1]*noccb+otrilb[0], vidxab)
t2baaa = t2baaa.reshape(noccb,nocca,nvira,nvira)
t2aaba = t2aaba.reshape(nocca,nocca,nvirb,nvira)
t2abbb = t2abbb.reshape(nocca,noccb,nvirb,nvirb)
t2bbab = t2bbab.reshape(noccb,noccb,nvira,nvirb)
return (t1ab,t1ba), (t2baaa, t2aaba, t2abbb, t2bbab)
def spatial2spin_eomsf(rx, orbspin):
'''Convert EOM spatial R1,R2 to spin-orbital R1,R2'''
if len(rx) == 2: # r1
r1ab, r1ba = rx
nocca, nvirb = r1ab.shape
noccb, nvira = r1ba.shape
else:
r2baaa,r2aaba,r2abbb,r2bbab = rx
noccb, nocca, nvira = r2baaa.shape[:3]
nvirb = r2aaba.shape[2]
nocc = nocca + noccb
nvir = nvira + nvirb
idxoa = np.where(orbspin[:nocc] == 0)[0]
idxob = np.where(orbspin[:nocc] == 1)[0]
idxva = np.where(orbspin[nocc:] == 0)[0]
idxvb = np.where(orbspin[nocc:] == 1)[0]
if len(rx) == 2: # r1
r1 = np.zeros((nocc,nvir), dtype=r1ab.dtype)
lib.takebak_2d(r1, r1ab, idxoa, idxvb)
lib.takebak_2d(r1, r1ba, idxob, idxva)
return r1
else:
r2 = np.zeros((nocc**2,nvir**2), dtype=r2aaba.dtype)
idxoaa = idxoa[:,None] * nocc + idxoa
idxoab = idxoa[:,None] * nocc + idxob
idxoba = idxob[:,None] * nocc + idxoa
idxobb = idxob[:,None] * nocc + idxob
idxvaa = idxva[:,None] * nvir + idxva
idxvab = idxva[:,None] * nvir + idxvb
idxvba = idxvb[:,None] * nvir + idxva
idxvbb = idxvb[:,None] * nvir + idxvb
r2baaa = r2baaa.reshape(noccb*nocca,nvira*nvira)
r2aaba = r2aaba.reshape(nocca*nocca,nvirb*nvira)
r2abbb = r2abbb.reshape(nocca*noccb,nvirb*nvirb)
r2bbab = r2bbab.reshape(noccb*noccb,nvira*nvirb)
lib.takebak_2d(r2, r2baaa, idxoba.ravel(), idxvaa.ravel())
lib.takebak_2d(r2, r2aaba, idxoaa.ravel(), idxvba.ravel())
lib.takebak_2d(r2, r2abbb, idxoab.ravel(), idxvbb.ravel())
lib.takebak_2d(r2, r2bbab, idxobb.ravel(), idxvab.ravel())
lib.takebak_2d(r2, r2baaa, idxoab.T.ravel(), idxvaa.T.ravel())
lib.takebak_2d(r2, r2aaba, idxoaa.T.ravel(), idxvab.T.ravel())
lib.takebak_2d(r2, r2abbb, idxoba.T.ravel(), idxvbb.T.ravel())
lib.takebak_2d(r2, r2bbab, idxobb.T.ravel(), idxvba.T.ravel())
return r2.reshape(nocc,nocc,nvir,nvir)
def spin2spatial_eomsf(rx, orbspin):
'''Convert EOM spin-orbital R1,R2 to spatial R1,R2'''
if rx.ndim == 2: # r1
nocc, nvir = rx.shape
else:
nocc, nvir = rx.shape[1:3]
idxoa = np.where(orbspin[:nocc] == 0)[0]
idxob = np.where(orbspin[:nocc] == 1)[0]
idxva = np.where(orbspin[nocc:] == 0)[0]
idxvb = np.where(orbspin[nocc:] == 1)[0]
nocca = len(idxoa)
noccb = len(idxob)
nvira = len(idxva)
nvirb = len(idxvb)
if rx.ndim == 2:
r1ab = lib.take_2d(rx, idxoa, idxvb)
r1ba = lib.take_2d(rx, idxob, idxva)
return r1ab, r1ba
else:
idxoaa = idxoa[:,None] * nocc + idxoa
idxoab = idxoa[:,None] * nocc + idxob
idxoba = idxob[:,None] * nocc + idxoa
idxobb = idxob[:,None] * nocc + idxob
idxvaa = idxva[:,None] * nvir + idxva
idxvab = idxva[:,None] * nvir + idxvb
idxvba = idxvb[:,None] * nvir + idxva
idxvbb = idxvb[:,None] * nvir + idxvb
r2 = rx.reshape(nocc**2,nvir**2)
r2baaa = lib.take_2d(r2, idxoba.ravel(), idxvaa.ravel())
r2aaba = lib.take_2d(r2, idxoaa.ravel(), idxvba.ravel())
r2abbb = lib.take_2d(r2, idxoab.ravel(), idxvbb.ravel())
r2bbab = lib.take_2d(r2, idxobb.ravel(), idxvab.ravel())
r2baaa = r2baaa.reshape(noccb,nocca,nvira,nvira)
r2aaba = r2aaba.reshape(nocca,nocca,nvirb,nvira)
r2abbb = r2abbb.reshape(nocca,noccb,nvirb,nvirb)
r2bbab = r2bbab.reshape(noccb,noccb,nvira,nvirb)
return r2baaa,r2aaba,r2abbb,r2bbab
# Ref: <NAME>, and <NAME>. Chem. Theory Comput. 10, 5567 (2014) Eqs.(9)-(10)
# Note: Last line in Eq. (10) is superfluous.
# See, e.g. Gwaltney, Nooijen, and Barlett, Chem. Phys. Lett. 248, 189 (1996)
def eomee_ccsd_matvec(eom, vector, imds=None):
if imds is None: imds = eom.make_imds()
t1, t2, eris = imds.t1, imds.t2, imds.eris
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
nocca, noccb, nvira, nvirb = t2ab.shape
nmoa, nmob = nocca+nvira, noccb+nvirb
r1, r2 = vector_to_amplitudes_ee(vector, (nmoa,nmob), (nocca,noccb))
r1a, r1b = r1
r2aa, r2ab, r2bb = r2
#:eris_vvvv = ao2mo.restore(1, np.asarray(eris.vvvv), nvirb)
#:eris_VVVV = ao2mo.restore(1, np.asarray(eris.VVVV), nvirb)
#:eris_vvVV = _restore(np.asarray(eris.vvVV), nvira, nvirb)
#:Hr2aa += lib.einsum('ijef,aebf->ijab', tau2aa, eris_vvvv) * .5
#:Hr2bb += lib.einsum('ijef,aebf->ijab', tau2bb, eris_VVVV) * .5
#:Hr2ab += lib.einsum('iJeF,aeBF->iJaB', tau2ab, eris_vvVV)
tau2aa, tau2ab, tau2bb = uccsd.make_tau(r2, r1, t1, 2)
Hr2aa, Hr2ab, Hr2bb = eom._cc._add_vvvv(None, (tau2aa,tau2ab,tau2bb), eris)
Hr2aa *= .5
Hr2bb *= .5
tau2aa = tau2ab = tau2bb = None
Hr1a = lib.einsum('ae,ie->ia', imds.Fvva, r1a)
Hr1a -= lib.einsum('mi,ma->ia', imds.Fooa, r1a)
Hr1a += np.einsum('me,imae->ia',imds.Fova, r2aa)
Hr1a += np.einsum('ME,iMaE->ia',imds.Fovb, r2ab)
Hr1b = lib.einsum('ae,ie->ia', imds.Fvvb, r1b)
Hr1b -= lib.einsum('mi,ma->ia', imds.Foob, r1b)
Hr1b += np.einsum('me,imae->ia',imds.Fovb, r2bb)
Hr1b += np.einsum('me,mIeA->IA',imds.Fova, r2ab)
Hr2aa += lib.einsum('mnij,mnab->ijab', imds.woooo, r2aa) * .25
Hr2bb += lib.einsum('mnij,mnab->ijab', imds.wOOOO, r2bb) * .25
Hr2ab += lib.einsum('mNiJ,mNaB->iJaB', imds.woOoO, r2ab)
Hr2aa += lib.einsum('be,ijae->ijab', imds.Fvva, r2aa)
Hr2bb += lib.einsum('be,ijae->ijab', imds.Fvvb, r2bb)
Hr2ab += lib.einsum('BE,iJaE->iJaB', imds.Fvvb, r2ab)
Hr2ab += lib.einsum('be,iJeA->iJbA', imds.Fvva, r2ab)
Hr2aa -= lib.einsum('mj,imab->ijab', imds.Fooa, r2aa)
Hr2bb -= lib.einsum('mj,imab->ijab', imds.Foob, r2bb)
Hr2ab -= lib.einsum('MJ,iMaB->iJaB', imds.Foob, r2ab)
Hr2ab -= lib.einsum('mj,mIaB->jIaB', imds.Fooa, r2ab)
#:tau2aa, tau2ab, tau2bb = uccsd.make_tau(r2, r1, t1, 2)
#:eris_ovvv = lib.unpack_tril(np.asarray(eris.ovvv).reshape(nocca*nvira,-1)).reshape(nocca,nvira,nvira,nvira)
#:eris_ovVV = lib.unpack_tril(np.asarray(eris.ovVV).reshape(nocca*nvira,-1)).reshape(nocca,nvira,nvirb,nvirb)
#:eris_OVvv = lib.unpack_tril(np.asarray(eris.OVvv).reshape(noccb*nvirb,-1)).reshape(noccb,nvirb,nvira,nvira)
#:eris_OVVV = lib.unpack_tril(np.asarray(eris.OVVV).reshape(noccb*nvirb,-1)).reshape(noccb,nvirb,nvirb,nvirb)
#:Hr1a += lib.einsum('mfae,imef->ia', eris_ovvv, r2aa)
#:tmpaa = lib.einsum('meaf,ijef->maij', eris_ovvv, tau2aa)
#:Hr2aa+= lib.einsum('mb,maij->ijab', t1a, tmpaa)
#:tmpa = lib.einsum('mfae,me->af', eris_ovvv, r1a)
#:tmpa-= lib.einsum('meaf,me->af', eris_ovvv, r1a)
#:Hr1b += lib.einsum('mfae,imef->ia', eris_OVVV, r2bb)
#:tmpbb = lib.einsum('meaf,ijef->maij', eris_OVVV, tau2bb)
#:Hr2bb+= lib.einsum('mb,maij->ijab', t1b, tmpbb)
#:tmpb = lib.einsum('mfae,me->af', eris_OVVV, r1b)
#:tmpb-= lib.einsum('meaf,me->af', eris_OVVV, r1b)
#:Hr1b += lib.einsum('mfAE,mIfE->IA', eris_ovVV, r2ab)
#:tmpab = lib.einsum('meAF,iJeF->mAiJ', eris_ovVV, tau2ab)
#:Hr2ab-= lib.einsum('mb,mAiJ->iJbA', t1a, tmpab)
#:tmpb-= lib.einsum('meAF,me->AF', eris_ovVV, r1a)
#:Hr1a += lib.einsum('MFae,iMeF->ia', eris_OVvv, r2ab)
#:tmpba =-lib.einsum('MEaf,iJfE->MaiJ', eris_OVvv, tau2ab)
#:Hr2ab+= lib.einsum('MB,MaiJ->iJaB', t1b, tmpba)
#:tmpa-= lib.einsum('MEaf,ME->af', eris_OVvv, r1b)
tau2aa = uccsd.make_tau_aa(r2aa, r1a, t1a, 2)
mem_now = lib.current_memory()[0]
max_memory = max(0, eom.max_memory - mem_now)
tmpa = np.zeros((nvira,nvira))
tmpb = np.zeros((nvirb,nvirb))
blksize = min(nocca, max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvira**3*3))))
for p0, p1 in lib.prange(0, nocca, blksize):
ovvv = eris.get_ovvv(slice(p0,p1)) # ovvv = eris.ovvv[p0:p1]
Hr1a += lib.einsum('mfae,imef->ia', ovvv, r2aa[:,p0:p1])
tmpaa = lib.einsum('meaf,ijef->maij', ovvv, tau2aa)
Hr2aa+= lib.einsum('mb,maij->ijab', t1a[p0:p1], tmpaa)
tmpa+= lib.einsum('mfae,me->af', ovvv, r1a[p0:p1])
tmpa-= lib.einsum('meaf,me->af', ovvv, r1a[p0:p1])
ovvv = tmpaa = None
tau2aa = None
tau2bb = uccsd.make_tau_aa(r2bb, r1b, t1b, 2)
blksize = min(noccb, max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvirb**3*3))))
for p0, p1 in lib.prange(0, noccb, blksize):
OVVV = eris.get_OVVV(slice(p0,p1)) # OVVV = eris.OVVV[p0:p1]
Hr1b += lib.einsum('mfae,imef->ia', OVVV, r2bb[:,p0:p1])
tmpbb = lib.einsum('meaf,ijef->maij', OVVV, tau2bb)
Hr2bb+= lib.einsum('mb,maij->ijab', t1b[p0:p1], tmpbb)
tmpb+= lib.einsum('mfae,me->af', OVVV, r1b[p0:p1])
tmpb-= lib.einsum('meaf,me->af', OVVV, r1b[p0:p1])
OVVV = tmpbb = None
tau2bb = None
tau2ab = uccsd.make_tau_ab(r2ab, r1 , t1 , 2)
blksize = min(nocca, max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvira*nvirb**2*3))))
for p0, p1 in lib.prange(0, nocca, blksize):
ovVV = eris.get_ovVV(slice(p0,p1)) # ovVV = eris.ovVV[p0:p1]
Hr1b += lib.einsum('mfAE,mIfE->IA', ovVV, r2ab[p0:p1])
tmpab = lib.einsum('meAF,iJeF->mAiJ', ovVV, tau2ab)
Hr2ab-= lib.einsum('mb,mAiJ->iJbA', t1a[p0:p1], tmpab)
tmpb-= lib.einsum('meAF,me->AF', ovVV, r1a[p0:p1])
ovVV = tmpab = None
blksize = min(noccb, max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvirb*nvira**2*3))))
for p0, p1 in lib.prange(0, noccb, blksize):
OVvv = eris.get_OVvv(slice(p0,p1)) # OVvv = eris.OVvv[p0:p1]
Hr1a += lib.einsum('MFae,iMeF->ia', OVvv, r2ab[:,p0:p1])
tmpba = lib.einsum('MEaf,iJfE->MaiJ', OVvv, tau2ab)
Hr2ab-= lib.einsum('MB,MaiJ->iJaB', t1b[p0:p1], tmpba)
tmpa-= lib.einsum('MEaf,ME->af', OVvv, r1b[p0:p1])
OVvv = tmpba = None
tau2ab = None
Hr2aa-= lib.einsum('af,ijfb->ijab', tmpa, t2aa)
Hr2bb-= lib.einsum('af,ijfb->ijab', tmpb, t2bb)
Hr2ab-= lib.einsum('af,iJfB->iJaB', tmpa, t2ab)
Hr2ab-= lib.einsum('AF,iJbF->iJbA', tmpb, t2ab)
eris_ovov = np.asarray(eris.ovov)
eris_OVOV = np.asarray(eris.OVOV)
eris_ovOV = np.asarray(eris.ovOV)
tau2aa = uccsd.make_tau_aa(r2aa, r1a, t1a, 2)
tauaa = uccsd.make_tau_aa(t2aa, t1a, t1a)
tmpaa = lib.einsum('menf,ijef->mnij', eris_ovov, tau2aa)
Hr2aa += lib.einsum('mnij,mnab->ijab', tmpaa, tauaa) * 0.25
tau2aa = tauaa = None
tau2bb = uccsd.make_tau_aa(r2bb, r1b, t1b, 2)
taubb = uccsd.make_tau_aa(t2bb, t1b, t1b)
tmpbb = lib.einsum('menf,ijef->mnij', eris_OVOV, tau2bb)
Hr2bb += lib.einsum('mnij,mnab->ijab', tmpbb, taubb) * 0.25
tau2bb = taubb = None
tau2ab = uccsd.make_tau_ab(r2ab, r1 , t1 , 2)
tauab = uccsd.make_tau_ab(t2ab, t1 , t1)
tmpab = lib.einsum('meNF,iJeF->mNiJ', eris_ovOV, tau2ab)
Hr2ab += lib.einsum('mNiJ,mNaB->iJaB', tmpab, tauab)
tau2ab = tauab = None
tmpa = lib.einsum('menf,imef->ni', eris_ovov, r2aa)
tmpa-= lib.einsum('neMF,iMeF->ni', eris_ovOV, r2ab)
tmpb = lib.einsum('menf,imef->ni', eris_OVOV, r2bb)
tmpb-= lib.einsum('mfNE,mIfE->NI', eris_ovOV, r2ab)
Hr1a += lib.einsum('na,ni->ia', t1a, tmpa)
Hr1b += lib.einsum('na,ni->ia', t1b, tmpb)
Hr2aa+= lib.einsum('mj,imab->ijab', tmpa, t2aa)
Hr2bb+= lib.einsum('mj,imab->ijab', tmpb, t2bb)
Hr2ab+= lib.einsum('MJ,iMaB->iJaB', tmpb, t2ab)
Hr2ab+= lib.einsum('mj,mIaB->jIaB', tmpa, t2ab)
tmp1a = np.einsum('menf,mf->en', eris_ovov, r1a)
tmp1a-= np.einsum('mfne,mf->en', eris_ovov, r1a)
tmp1a-= np.einsum('neMF,MF->en', eris_ovOV, r1b)
tmp1b = np.einsum('menf,mf->en', eris_OVOV, r1b)
tmp1b-= np.einsum('mfne,mf->en', eris_OVOV, r1b)
tmp1b-= np.einsum('mfNE,mf->EN', eris_ovOV, r1a)
tmpa = np.einsum('en,nb->eb', tmp1a, t1a)
tmpa+= lib.einsum('menf,mnfb->eb', eris_ovov, r2aa)
tmpa-= lib.einsum('meNF,mNbF->eb', eris_ovOV, r2ab)
tmpb = np.einsum('en,nb->eb', tmp1b, t1b)
tmpb+= lib.einsum('menf,mnfb->eb', eris_OVOV, r2bb)
tmpb-= lib.einsum('nfME,nMfB->EB', eris_ovOV, r2ab)
Hr2aa+= lib.einsum('eb,ijae->ijab', tmpa, t2aa)
Hr2bb+= lib.einsum('eb,ijae->ijab', tmpb, t2bb)
Hr2ab+= lib.einsum('EB,iJaE->iJaB', tmpb, t2ab)
Hr2ab+= lib.einsum('eb,iJeA->iJbA', tmpa, t2ab)
eirs_ovov = eris_ovOV = eris_OVOV = None
Hr2aa-= lib.einsum('mbij,ma->ijab', imds.wovoo, r1a)
Hr2bb-= lib.einsum('mbij,ma->ijab', imds.wOVOO, r1b)
Hr2ab-= lib.einsum('mBiJ,ma->iJaB', imds.woVoO, r1a)
Hr2ab-= lib.einsum('MbJi,MA->iJbA', imds.wOvOo, r1b)
Hr1a-= 0.5*lib.einsum('mnie,mnae->ia', imds.wooov, r2aa)
Hr1a-= lib.einsum('mNiE,mNaE->ia', imds.woOoV, r2ab)
Hr1b-= 0.5*lib.einsum('mnie,mnae->ia', imds.wOOOV, r2bb)
Hr1b-= lib.einsum('MnIe,nMeA->IA', imds.wOoOv, r2ab)
tmpa = lib.einsum('mnie,me->ni', imds.wooov, r1a)
tmpa-= lib.einsum('nMiE,ME->ni', imds.woOoV, r1b)
tmpb = lib.einsum('mnie,me->ni', imds.wOOOV, r1b)
tmpb-= lib.einsum('NmIe,me->NI', imds.wOoOv, r1a)
Hr2aa+= lib.einsum('ni,njab->ijab', tmpa, t2aa)
Hr2bb+= lib.einsum('ni,njab->ijab', tmpb, t2bb)
Hr2ab+= lib.einsum('ni,nJaB->iJaB', tmpa, t2ab)
Hr2ab+= lib.einsum('NI,jNaB->jIaB', tmpb, t2ab)
for p0, p1 in lib.prange(0, nvira, nocca):
Hr2aa+= lib.einsum('ejab,ie->ijab', imds.wvovv[p0:p1], r1a[:,p0:p1])
Hr2ab+= lib.einsum('eJaB,ie->iJaB', imds.wvOvV[p0:p1], r1a[:,p0:p1])
for p0, p1 in lib.prange(0, nvirb, noccb):
Hr2bb+= lib.einsum('ejab,ie->ijab', imds.wVOVV[p0:p1], r1b[:,p0:p1])
Hr2ab+= lib.einsum('EjBa,IE->jIaB', imds.wVoVv[p0:p1], r1b[:,p0:p1])
Hr1a += np.einsum('maei,me->ia',imds.wovvo,r1a)
Hr1a += np.einsum('MaEi,ME->ia',imds.wOvVo,r1b)
Hr1b += np.einsum('maei,me->ia',imds.wOVVO,r1b)
Hr1b += np.einsum('mAeI,me->IA',imds.woVvO,r1a)
Hr2aa+= lib.einsum('mbej,imae->ijab', imds.wovvo, r2aa) * 2
Hr2aa+= lib.einsum('MbEj,iMaE->ijab', imds.wOvVo, r2ab) * 2
Hr2bb+= lib.einsum('mbej,imae->ijab', imds.wOVVO, r2bb) * 2
Hr2bb+= lib.einsum('mBeJ,mIeA->IJAB', imds.woVvO, r2ab) * 2
Hr2ab+= lib.einsum('mBeJ,imae->iJaB', imds.woVvO, r2aa)
Hr2ab+= lib.einsum('MBEJ,iMaE->iJaB', imds.wOVVO, r2ab)
Hr2ab+= lib.einsum('mBEj,mIaE->jIaB', imds.woVVo, r2ab)
Hr2ab+= lib.einsum('mbej,mIeA->jIbA', imds.wovvo, r2ab)
Hr2ab+= lib.einsum('MbEj,IMAE->jIbA', imds.wOvVo, r2bb)
Hr2ab+= lib.einsum('MbeJ,iMeA->iJbA', imds.wOvvO, r2ab)
Hr2aa *= .5
Hr2bb *= .5
Hr2aa = Hr2aa - Hr2aa.transpose(0,1,3,2)
Hr2aa = Hr2aa - Hr2aa.transpose(1,0,2,3)
Hr2bb = Hr2bb - Hr2bb.transpose(0,1,3,2)
Hr2bb = Hr2bb - Hr2bb.transpose(1,0,2,3)
vector = amplitudes_to_vector_ee((Hr1a,Hr1b), (Hr2aa,Hr2ab,Hr2bb))
return vector
def eomsf_ccsd_matvec(eom, vector, imds=None):
'''Spin flip EOM-CCSD'''
if imds is None: imds = eom.make_imds()
t1, t2, eris = imds.t1, imds.t2, imds.eris
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
nocca, noccb, nvira, nvirb = t2ab.shape
nmoa, nmob = nocca+nvira, noccb+nvirb
r1, r2 = vector_to_amplitudes_eomsf(vector, (nmoa,nmob), (nocca,noccb))
r1ab, r1ba = r1
r2baaa, r2aaba, r2abbb, r2bbab = r2
Hr1ab = np.einsum('ae,ie->ia', imds.Fvvb, r1ab)
Hr1ab -= np.einsum('mi,ma->ia', imds.Fooa, r1ab)
Hr1ab += np.einsum('me,imae->ia', imds.Fovb, r2abbb)
Hr1ab += np.einsum('me,imae->ia', imds.Fova, r2aaba)
Hr1ba = np.einsum('ae,ie->ia', imds.Fvva, r1ba)
Hr1ba -= np.einsum('mi,ma->ia', imds.Foob, r1ba)
Hr1ba += np.einsum('me,imae->ia', imds.Fova, r2baaa)
Hr1ba += np.einsum('me,imae->ia', imds.Fovb, r2bbab)
Hr2baaa = .5 *lib.einsum('nMjI,Mnab->Ijab', imds.woOoO, r2baaa)
Hr2aaba = .25*lib.einsum('mnij,mnAb->ijAb', imds.woooo, r2aaba)
Hr2abbb = .5 *lib.einsum('mNiJ,mNAB->iJAB', imds.woOoO, r2abbb)
Hr2bbab = .25*lib.einsum('MNIJ,MNaB->IJaB', imds.wOOOO, r2bbab)
Hr2baaa += lib.einsum('be,Ijae->Ijab', imds.Fvva , r2baaa)
Hr2baaa -= lib.einsum('mj,imab->ijab', imds.Fooa*.5, r2baaa)
Hr2baaa -= lib.einsum('MJ,Miab->Jiab', imds.Foob*.5, r2baaa)
Hr2bbab -= lib.einsum('mj,imab->ijab', imds.Foob , r2bbab)
Hr2bbab += lib.einsum('BE,IJaE->IJaB', imds.Fvvb*.5, r2bbab)
Hr2bbab += lib.einsum('be,IJeA->IJbA', imds.Fvva*.5, r2bbab)
Hr2aaba -= lib.einsum('mj,imab->ijab', imds.Fooa , r2aaba)
Hr2aaba += lib.einsum('be,ijAe->ijAb', imds.Fvva*.5, r2aaba)
Hr2aaba += lib.einsum('BE,ijEa->ijBa', imds.Fvvb*.5, r2aaba)
Hr2abbb += lib.einsum('BE,iJAE->iJAB', imds.Fvvb , r2abbb)
Hr2abbb -= lib.einsum('mj,imab->ijab', imds.Foob*.5, r2abbb)
Hr2abbb -= lib.einsum('mj,mIAB->jIAB', imds.Fooa*.5, r2abbb)
tau2baaa = np.einsum('ia,jb->ijab', r1ba, t1a)
tau2baaa = tau2baaa - tau2baaa.transpose(0,1,3,2)
tau2abbb = np.einsum('ia,jb->ijab', r1ab, t1b)
tau2abbb = tau2abbb - tau2abbb.transpose(0,1,3,2)
tau2aaba = np.einsum('ia,jb->ijab', r1ab, t1a)
tau2aaba = tau2aaba - tau2aaba.transpose(1,0,2,3)
tau2bbab = np.einsum('ia,jb->ijab', r1ba, t1b)
tau2bbab = tau2bbab - tau2bbab.transpose(1,0,2,3)
tau2baaa += r2baaa
tau2bbab += r2bbab
tau2abbb += r2abbb
tau2aaba += r2aaba
#:eris_ovvv = lib.unpack_tril(np.asarray(eris.ovvv).reshape(nocca*nvira,-1)).reshape(nocca,nvira,nvira,nvira)
#:Hr1ba += lib.einsum('mfae,Imef->Ia', eris_ovvv, r2baaa)
#:tmp1aaba = lib.einsum('meaf,Ijef->maIj', eris_ovvv, tau2baaa)
#:Hr2baaa += lib.einsum('mb,maIj->Ijab', t1a , tmp1aaba)
mem_now = lib.current_memory()[0]
max_memory = max(0, eom.max_memory - mem_now)
blksize = min(nocca, max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvira**3*3))))
for p0,p1 in lib.prange(0, nocca, blksize):
ovvv = eris.get_ovvv(slice(p0,p1)) # ovvv = eris.ovvv[p0:p1]
Hr1ba += lib.einsum('mfae,Imef->Ia', ovvv, r2baaa[:,p0:p1])
tmp1aaba = lib.einsum('meaf,Ijef->maIj', ovvv, tau2baaa)
Hr2baaa += lib.einsum('mb,maIj->Ijab', t1a[p0:p1], tmp1aaba)
ovvv = tmp1aaba = None
#:eris_OVVV = lib.unpack_tril(np.asarray(eris.OVVV).reshape(noccb*nvirb,-1)).reshape(noccb,nvirb,nvirb,nvirb)
#:Hr1ab += lib.einsum('MFAE,iMEF->iA', eris_OVVV, r2abbb)
#:tmp1bbab = lib.einsum('MEAF,iJEF->MAiJ', eris_OVVV, tau2abbb)
#:Hr2abbb += lib.einsum('MB,MAiJ->iJAB', t1b , tmp1bbab)
blksize = min(noccb, max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvirb**3*3))))
for p0, p1 in lib.prange(0, noccb, blksize):
OVVV = eris.get_OVVV(slice(p0,p1)) # OVVV = eris.OVVV[p0:p1]
Hr1ab += lib.einsum('MFAE,iMEF->iA', OVVV, r2abbb[:,p0:p1])
tmp1bbab = lib.einsum('MEAF,iJEF->MAiJ', OVVV, tau2abbb)
Hr2abbb += lib.einsum('MB,MAiJ->iJAB', t1b[p0:p1], tmp1bbab)
OVVV = tmp1bbab = None
#:eris_ovVV = lib.unpack_tril(np.asarray(eris.ovVV).reshape(nocca*nvira,-1)).reshape(nocca,nvira,nvirb,nvirb)
#:Hr1ab += lib.einsum('mfAE,imEf->iA', eris_ovVV, r2aaba)
#:tmp1abaa = lib.einsum('meAF,ijFe->mAij', eris_ovVV, tau2aaba)
#:tmp1abbb = lib.einsum('meAF,IJeF->mAIJ', eris_ovVV, tau2bbab)
#:tmp1ba = lib.einsum('mfAE,mE->Af', eris_ovVV, r1ab)
#:Hr2bbab -= lib.einsum('mb,mAIJ->IJbA', t1a*.5, tmp1abbb)
#:Hr2aaba -= lib.einsum('mb,mAij->ijAb', t1a*.5, tmp1abaa)
tmp1ba = np.zeros((nvirb,nvira))
blksize = min(nocca, max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvira*nvirb**2*3))))
for p0,p1 in lib.prange(0, nocca, blksize):
ovVV = eris.get_ovVV(slice(p0,p1)) # ovVV = eris.ovVV[p0:p1]
Hr1ab += lib.einsum('mfAE,imEf->iA', ovVV, r2aaba[:,p0:p1])
tmp1abaa = lib.einsum('meAF,ijFe->mAij', ovVV, tau2aaba)
tmp1abbb = lib.einsum('meAF,IJeF->mAIJ', ovVV, tau2bbab)
tmp1ba += lib.einsum('mfAE,mE->Af', ovVV, r1ab[p0:p1])
Hr2bbab -= lib.einsum('mb,mAIJ->IJbA', t1a[p0:p1]*.5, tmp1abbb)
Hr2aaba -= lib.einsum('mb,mAij->ijAb', t1a[p0:p1]*.5, tmp1abaa)
#:eris_OVvv = lib.unpack_tril(np.asarray(eris.OVvv).reshape(noccb*nvirb,-1)).reshape(noccb,nvirb,nvira,nvira)
#:Hr1ba += lib.einsum('MFae,IMeF->Ia', eris_OVvv, r2bbab)
#:tmp1baaa = lib.einsum('MEaf,ijEf->Maij', eris_OVvv, tau2aaba)
#:tmp1babb = lib.einsum('MEaf,IJfE->MaIJ', eris_OVvv, tau2bbab)
#:tmp1ab = lib.einsum('MFae,Me->aF', eris_OVvv, r1ba)
#:Hr2aaba -= lib.einsum('MB,Maij->ijBa', t1b*.5, tmp1baaa)
#:Hr2bbab -= lib.einsum('MB,MaIJ->IJaB', t1b*.5, tmp1babb)
tmp1ab = np.zeros((nvira,nvirb))
blksize = min(noccb, max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvirb*nvira**2*3))))
for p0, p1 in lib.prange(0, noccb, blksize):
OVvv = eris.get_OVvv(slice(p0,p1)) # OVvv = eris.OVvv[p0:p1]
Hr1ba += lib.einsum('MFae,IMeF->Ia', OVvv, r2bbab[:,p0:p1])
tmp1baaa = lib.einsum('MEaf,ijEf->Maij', OVvv, tau2aaba)
tmp1babb = lib.einsum('MEaf,IJfE->MaIJ', OVvv, tau2bbab)
tmp1ab+= lib.einsum('MFae,Me->aF', OVvv, r1ba[p0:p1])
Hr2aaba -= lib.einsum('MB,Maij->ijBa', t1b[p0:p1]*.5, tmp1baaa)
Hr2bbab -= lib.einsum('MB,MaIJ->IJaB', t1b[p0:p1]*.5, tmp1babb)
Hr2baaa += lib.einsum('aF,jIbF->Ijba', tmp1ab , t2ab)
Hr2bbab -= lib.einsum('aF,IJFB->IJaB', tmp1ab*.5, t2bb)
Hr2abbb += lib.einsum('Af,iJfB->iJBA', tmp1ba , t2ab)
Hr2aaba -= lib.einsum('Af,ijfb->ijAb', tmp1ba*.5, t2aa)
Hr2baaa -= lib.einsum('MbIj,Ma->Ijab', imds.wOvOo, r1ba )
Hr2bbab -= lib.einsum('MBIJ,Ma->IJaB', imds.wOVOO, r1ba*.5)
Hr2abbb -= lib.einsum('mBiJ,mA->iJAB', imds.woVoO, r1ab )
Hr2aaba -= lib.einsum('mbij,mA->ijAb', imds.wovoo, r1ab*.5)
Hr1ab -= 0.5*lib.einsum('mnie,mnAe->iA', imds.wooov, r2aaba)
Hr1ab -= lib.einsum('mNiE,mNAE->iA', imds.woOoV, r2abbb)
Hr1ba -= 0.5*lib.einsum('MNIE,MNaE->Ia', imds.wOOOV, r2bbab)
Hr1ba -= lib.einsum('MnIe,Mnae->Ia', imds.wOoOv, r2baaa)
tmp1ab = lib.einsum('MnIe,Me->nI', imds.wOoOv, r1ba)
tmp1ba = lib.einsum('mNiE,mE->Ni', imds.woOoV, r1ab)
Hr2baaa += lib.einsum('nI,njab->Ijab', tmp1ab*.5, t2aa)
Hr2bbab += lib.einsum('nI,nJaB->IJaB', tmp1ab , t2ab)
Hr2abbb += lib.einsum('Ni,NJAB->iJAB', tmp1ba*.5, t2bb)
Hr2aaba += lib.einsum('Ni,jNbA->ijAb', tmp1ba , t2ab)
for p0, p1 in lib.prange(0, nvira, nocca):
Hr2baaa += lib.einsum('ejab,Ie->Ijab', imds.wvovv[p0:p1], r1ba[:,p0:p1]*.5)
Hr2bbab += lib.einsum('eJaB,Ie->IJaB', imds.wvOvV[p0:p1], r1ba[:,p0:p1] )
for p0, p1 in lib.prange(0, nvirb, noccb):
Hr2abbb += lib.einsum('EJAB,iE->iJAB', imds.wVOVV[p0:p1], r1ab[:,p0:p1]*.5)
Hr2aaba += lib.einsum('EjAb,iE->ijAb', imds.wVoVv[p0:p1], r1ab[:,p0:p1] )
Hr1ab += np.einsum('mAEi,mE->iA', imds.woVVo, r1ab)
Hr1ba += np.einsum('MaeI,Me->Ia', imds.wOvvO, r1ba)
Hr2baaa += lib.einsum('mbej,Imae->Ijab', imds.wovvo, r2baaa)
Hr2baaa += lib.einsum('MbeJ,Miae->Jiab', imds.wOvvO, r2baaa)
Hr2baaa += lib.einsum('MbEj,IMaE->Ijab', imds.wOvVo, r2bbab)
Hr2bbab += lib.einsum('MBEJ,IMaE->IJaB', imds.wOVVO, r2bbab)
Hr2bbab += lib.einsum('MbeJ,IMeA->IJbA', imds.wOvvO, r2bbab)
Hr2bbab += lib.einsum('mBeJ,Imae->IJaB', imds.woVvO, r2baaa)
Hr2aaba += lib.einsum('mbej,imAe->ijAb', imds.wovvo, r2aaba)
Hr2aaba += lib.einsum('mBEj,imEa->ijBa', imds.woVVo, r2aaba)
Hr2aaba += lib.einsum('MbEj,iMAE->ijAb', imds.wOvVo, r2abbb)
Hr2abbb += lib.einsum('MBEJ,iMAE->iJAB', imds.wOVVO, r2abbb)
Hr2abbb += lib.einsum('mBEj,mIAE->jIAB', imds.woVVo, r2abbb)
Hr2abbb += lib.einsum('mBeJ,imAe->iJAB', imds.woVvO, r2aaba)
eris_ovov = np.asarray(eris.ovov)
eris_OVOV = np.asarray(eris.OVOV)
eris_ovOV = np.asarray(eris.ovOV)
tauaa, tauab, taubb = uccsd.make_tau(t2, t1, t1)
tmp1baaa = lib.einsum('nfME,ijEf->Mnij', eris_ovOV, tau2aaba)
tmp1aaba = lib.einsum('menf,Ijef->mnIj', eris_ovov, tau2baaa)
tmp1abbb = lib.einsum('meNF,IJeF->mNIJ', eris_ovOV, tau2bbab)
tmp1bbab = lib.einsum('MENF,iJEF->MNiJ', eris_OVOV, tau2abbb)
Hr2baaa += 0.5*.5*lib.einsum('mnIj,mnab->Ijab', tmp1aaba, tauaa)
Hr2bbab += .5*lib.einsum('nMIJ,nMaB->IJaB', tmp1abbb, tauab)
Hr2aaba += .5*lib.einsum('Nmij,mNbA->ijAb', tmp1baaa, tauab)
Hr2abbb += 0.5*.5*lib.einsum('MNiJ,MNAB->iJAB', tmp1bbab, taubb)
tauaa = tauab = taubb = None
tmpab = lib.einsum('menf,Imef->nI', eris_ovov, r2baaa)
tmpab -= lib.einsum('nfME,IMfE->nI', eris_ovOV, r2bbab)
tmpba = lib.einsum('MENF,iMEF->Ni', eris_OVOV, r2abbb)
tmpba -= lib.einsum('meNF,imFe->Ni', eris_ovOV, r2aaba)
Hr1ab += np.einsum('NA,Ni->iA', t1b, tmpba)
Hr1ba += np.einsum('na,nI->Ia', t1a, tmpab)
Hr2baaa -= lib.einsum('mJ,imab->Jiab', tmpab*.5, t2aa)
Hr2bbab -= lib.einsum('mJ,mIaB->IJaB', tmpab*.5, t2ab) * 2
Hr2aaba -= lib.einsum('Mj,iMbA->ijAb', tmpba*.5, t2ab) * 2
Hr2abbb -= lib.einsum('Mj,IMAB->jIAB', tmpba*.5, t2bb)
tmp1ab = np.einsum('meNF,mF->eN', eris_ovOV, r1ab)
tmp1ba = np.einsum('nfME,Mf->En', eris_ovOV, r1ba)
tmpab = np.einsum('eN,NB->eB', tmp1ab, t1b)
tmpba = np.einsum('En,nb->Eb', tmp1ba, t1a)
tmpab -= lib.einsum('menf,mnBf->eB', eris_ovov, r2aaba)
tmpab += lib.einsum('meNF,mNFB->eB', eris_ovOV, r2abbb)
tmpba -= lib.einsum('MENF,MNbF->Eb', eris_OVOV, r2bbab)
tmpba += lib.einsum('nfME,Mnfb->Eb', eris_ovOV, r2baaa)
Hr2baaa -= lib.einsum('Eb,jIaE->Ijab', tmpba*.5, t2ab) * 2
Hr2bbab -= lib.einsum('Eb,IJAE->IJbA', tmpba*.5, t2bb)
Hr2aaba -= lib.einsum('eB,ijae->ijBa', tmpab*.5, t2aa)
Hr2abbb -= lib.einsum('eB,iJeA->iJAB', tmpab*.5, t2ab) * 2
eris_ovov = eris_OVOV = eris_ovOV = None
#:eris_vvvv = ao2mo.restore(1, np.asarray(eris.vvvv), nvirb)
#:eris_VVVV = ao2mo.restore(1, np.asarray(eris.VVVV), nvirb)
#:eris_vvVV = _restore(np.asarray(eris.vvVV), nvira, nvirb)
#:Hr2baaa += .5*lib.einsum('Ijef,aebf->Ijab', tau2baaa, eris_vvvv)
#:Hr2abbb += .5*lib.einsum('iJEF,AEBF->iJAB', tau2abbb, eris_VVVV)
#:Hr2bbab += .5*lib.einsum('IJeF,aeBF->IJaB', tau2bbab, eris_vvVV)
#:Hr2aaba += .5*lib.einsum('ijEf,bfAE->ijAb', tau2aaba, eris_vvVV)
fakeri = uccsd._ChemistsERIs()
fakeri.mol = eris.mol
if eom._cc.direct:
orbva = eris.mo_coeff[0][:,nocca:]
orbvb = eris.mo_coeff[1][:,noccb:]
tau2baaa = lib.einsum('ijab,pa,qb->ijpq', tau2baaa, .5*orbva, orbva)
tmp = eris._contract_vvvv_t2(eom._cc, tau2baaa, True)
Hr2baaa += lib.einsum('ijpq,pa,qb->ijab', tmp, orbva.conj(), orbva.conj())
tmp = None
tau2abbb = lib.einsum('ijab,pa,qb->ijpq', tau2abbb, .5*orbvb, orbvb)
tmp = eris._contract_VVVV_t2(eom._cc, tau2abbb, True)
Hr2abbb += lib.einsum('ijpq,pa,qb->ijab', tmp, orbvb.conj(), orbvb.conj())
tmp = None
else:
tau2baaa *= .5
Hr2baaa += eris._contract_vvvv_t2(eom._cc, tau2baaa, False)
tau2abbb *= .5
Hr2abbb += eris._contract_VVVV_t2(eom._cc, tau2abbb, False)
tau2bbab *= .5
Hr2bbab += eom._cc._add_vvVV(None, tau2bbab, eris)
tau2aaba = tau2aaba.transpose(0,1,3,2)*.5
Hr2aaba += eom._cc._add_vvVV(None, tau2aaba, eris).transpose(0,1,3,2)
Hr2baaa = Hr2baaa - Hr2baaa.transpose(0,1,3,2)
Hr2bbab = Hr2bbab - Hr2bbab.transpose(1,0,2,3)
Hr2abbb = Hr2abbb - Hr2abbb.transpose(0,1,3,2)
Hr2aaba = Hr2aaba - Hr2aaba.transpose(1,0,2,3)
vector = amplitudes_to_vector_eomsf((Hr1ab, Hr1ba), (Hr2baaa,Hr2aaba,Hr2abbb,Hr2bbab))
return vector
def eeccsd_diag(eom, imds=None):
if imds is None: imds = eom.make_imds()
eris = imds.eris
t1, t2 = imds.t1, imds.t2
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
tauaa, tauab, taubb = uccsd.make_tau(t2, t1, t1)
nocca, noccb, nvira, nvirb = t2ab.shape
Foa = imds.Fooa.diagonal()
Fob = imds.Foob.diagonal()
Fva = imds.Fvva.diagonal()
Fvb = imds.Fvvb.diagonal()
Wovaa = np.einsum('iaai->ia', imds.wovvo)
Wovbb = np.einsum('iaai->ia', imds.wOVVO)
Wovab = np.einsum('iaai->ia', imds.woVVo)
Wovba = np.einsum('iaai->ia', imds.wOvvO)
Hr1aa = lib.direct_sum('-i+a->ia', Foa, Fva)
Hr1bb = lib.direct_sum('-i+a->ia', Fob, Fvb)
Hr1ab = lib.direct_sum('-i+a->ia', Foa, Fvb)
Hr1ba = lib.direct_sum('-i+a->ia', Fob, Fva)
Hr1aa += Wovaa
Hr1bb += Wovbb
Hr1ab += Wovab
Hr1ba += Wovba
eris_ovov = np.asarray(eris.ovov)
eris_OVOV = np.asarray(eris.OVOV)
eris_ovOV = np.asarray(eris.ovOV)
ovov = eris_ovov - eris_ovov.transpose(0,3,2,1)
OVOV = eris_OVOV - eris_OVOV.transpose(0,3,2,1)
Wvvaa = .5*np.einsum('mnab,manb->ab', tauaa, eris_ovov)
Wvvbb = .5*np.einsum('mnab,manb->ab', taubb, eris_OVOV)
Wvvab = np.einsum('mNaB,maNB->aB', tauab, eris_ovOV)
ijb = np.einsum('iejb,ijbe->ijb', ovov, t2aa)
IJB = np.einsum('iejb,ijbe->ijb', OVOV, t2bb)
iJB =-np.einsum('ieJB,iJeB->iJB', eris_ovOV, t2ab)
Ijb =-np.einsum('jbIE,jIbE->Ijb', eris_ovOV, t2ab)
iJb =-np.einsum('ibJE,iJbE->iJb', eris_ovOV, t2ab)
IjB =-np.einsum('jeIB,jIeB->IjB', eris_ovOV, t2ab)
jab = np.einsum('kajb,jkab->jab', ovov, t2aa)
JAB = np.einsum('kajb,jkab->jab', OVOV, t2bb)
jAb =-np.einsum('jbKA,jKbA->jAb', eris_ovOV, t2ab)
JaB =-np.einsum('kaJB,kJaB->JaB', eris_ovOV, t2ab)
jaB =-np.einsum('jaKB,jKaB->jaB', eris_ovOV, t2ab)
JAb =-np.einsum('kbJA,kJbA->JAb', eris_ovOV, t2ab)
eris_ovov = eris_ovOV = eris_OVOV = ovov = OVOV = None
Hr2aa = lib.direct_sum('ijb+a->ijba', ijb, Fva)
Hr2bb = lib.direct_sum('ijb+a->ijba', IJB, Fvb)
Hr2ab = lib.direct_sum('iJb+A->iJbA', iJb, Fvb)
Hr2ab+= lib.direct_sum('iJB+a->iJaB', iJB, Fva)
Hr2aa+= lib.direct_sum('-i+jab->ijab', Foa, jab)
Hr2bb+= lib.direct_sum('-i+jab->ijab', Fob, JAB)
Hr2ab+= lib.direct_sum('-i+JaB->iJaB', Foa, JaB)
Hr2ab+= lib.direct_sum('-I+jaB->jIaB', Fob, jaB)
Hr2aa = Hr2aa + Hr2aa.transpose(0,1,3,2)
Hr2aa = Hr2aa + Hr2aa.transpose(1,0,2,3)
Hr2bb = Hr2bb + Hr2bb.transpose(0,1,3,2)
Hr2bb = Hr2bb + Hr2bb.transpose(1,0,2,3)
Hr2aa *= .5
Hr2bb *= .5
Hr2baaa = lib.direct_sum('Ijb+a->Ijba', Ijb, Fva)
Hr2aaba = lib.direct_sum('ijb+A->ijAb', ijb, Fvb)
Hr2aaba+= Fva.reshape(1,1,1,-1)
Hr2abbb = lib.direct_sum('iJB+A->iJBA', iJB, Fvb)
Hr2bbab = lib.direct_sum('IJB+a->IJaB', IJB, Fva)
Hr2bbab+= Fvb.reshape(1,1,1,-1)
Hr2baaa = Hr2baaa + Hr2baaa.transpose(0,1,3,2)
Hr2abbb = Hr2abbb + Hr2abbb.transpose(0,1,3,2)
Hr2baaa+= lib.direct_sum('-I+jab->Ijab', Fob, jab)
Hr2baaa-= Foa.reshape(1,-1,1,1)
tmpaaba = lib.direct_sum('-i+jAb->ijAb', Foa, jAb)
Hr2abbb+= lib.direct_sum('-i+JAB->iJAB', Foa, JAB)
Hr2abbb-= Fob.reshape(1,-1,1,1)
tmpbbab = lib.direct_sum('-I+JaB->IJaB', Fob, JaB)
Hr2aaba+= tmpaaba + tmpaaba.transpose(1,0,2,3)
Hr2bbab+= tmpbbab + tmpbbab.transpose(1,0,2,3)
tmpaaba = tmpbbab = None
Hr2aa += Wovaa.reshape(1,nocca,1,nvira)
Hr2aa += Wovaa.reshape(nocca,1,1,nvira)
Hr2aa += Wovaa.reshape(nocca,1,nvira,1)
Hr2aa += Wovaa.reshape(1,nocca,nvira,1)
Hr2ab += Wovbb.reshape(1,noccb,1,nvirb)
Hr2ab += Wovab.reshape(nocca,1,1,nvirb)
Hr2ab += Wovaa.reshape(nocca,1,nvira,1)
Hr2ab += Wovba.reshape(1,noccb,nvira,1)
Hr2bb += Wovbb.reshape(1,noccb,1,nvirb)
Hr2bb += Wovbb.reshape(noccb,1,1,nvirb)
Hr2bb += Wovbb.reshape(noccb,1,nvirb,1)
Hr2bb += Wovbb.reshape(1,noccb,nvirb,1)
Hr2baaa += Wovaa.reshape(1,nocca,1,nvira)
Hr2baaa += Wovba.reshape(noccb,1,1,nvira)
Hr2baaa += Wovba.reshape(noccb,1,nvira,1)
Hr2baaa += Wovaa.reshape(1,nocca,nvira,1)
Hr2aaba += Wovaa.reshape(1,nocca,1,nvira)
Hr2aaba += Wovaa.reshape(nocca,1,1,nvira)
Hr2aaba += Wovab.reshape(nocca,1,nvirb,1)
Hr2aaba += Wovab.reshape(1,nocca,nvirb,1)
Hr2abbb += Wovbb.reshape(1,noccb,1,nvirb)
Hr2abbb += Wovab.reshape(nocca,1,1,nvirb)
Hr2abbb += Wovab.reshape(nocca,1,nvirb,1)
Hr2abbb += Wovbb.reshape(1,noccb,nvirb,1)
Hr2bbab += Wovbb.reshape(1,noccb,1,nvirb)
Hr2bbab += Wovbb.reshape(noccb,1,1,nvirb)
Hr2bbab += Wovba.reshape(noccb,1,nvira,1)
Hr2bbab += Wovba.reshape(1,noccb,nvira,1)
Wooaa = np.einsum('ijij->ij', imds.woooo).copy()
Wooaa -= np.einsum('ijji->ij', imds.woooo)
Woobb = np.einsum('ijij->ij', imds.wOOOO).copy()
Woobb -= np.einsum('ijji->ij', imds.wOOOO)
Wooab = np.einsum('ijij->ij', imds.woOoO)
Wooba = Wooab.T
Wooaa *= .5
Woobb *= .5
Hr2aa += Wooaa.reshape(nocca,nocca,1,1)
Hr2ab += Wooab.reshape(nocca,noccb,1,1)
Hr2bb += Woobb.reshape(noccb,noccb,1,1)
Hr2baaa += Wooba.reshape(noccb,nocca,1,1)
Hr2aaba += Wooaa.reshape(nocca,nocca,1,1)
Hr2abbb += Wooab.reshape(nocca,noccb,1,1)
Hr2bbab += Woobb.reshape(noccb,noccb,1,1)
#:eris_ovvv = lib.unpack_tril(np.asarray(eris.ovvv).reshape(nocca*nvira,-1)).reshape(nocca,nvira,nvira,nvira)
#:Wvvaa += np.einsum('mb,maab->ab', t1a, eris_ovvv)
#:Wvvaa -= np.einsum('mb,mbaa->ab', t1a, eris_ovvv)
mem_now = lib.current_memory()[0]
max_memory = max(0, eom.max_memory - mem_now)
blksize = min(nocca, max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvira**3*3))))
for p0,p1 in lib.prange(0, nocca, blksize):
ovvv = eris.get_ovvv(slice(p0,p1)) # ovvv = eris.ovvv[p0:p1]
Wvvaa += np.einsum('mb,maab->ab', t1a[p0:p1], ovvv)
Wvvaa -= np.einsum('mb,mbaa->ab', t1a[p0:p1], ovvv)
ovvv = None
#:eris_OVVV = lib.unpack_tril(np.asarray(eris.OVVV).reshape(noccb*nvirb,-1)).reshape(noccb,nvirb,nvirb,nvirb)
#:Wvvbb += np.einsum('mb,maab->ab', t1b, eris_OVVV)
#:Wvvbb -= np.einsum('mb,mbaa->ab', t1b, eris_OVVV)
blksize = min(noccb, max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvirb**3*3))))
for p0, p1 in lib.prange(0, noccb, blksize):
OVVV = eris.get_OVVV(slice(p0,p1)) # OVVV = eris.OVVV[p0:p1]
Wvvbb += np.einsum('mb,maab->ab', t1b[p0:p1], OVVV)
Wvvbb -= np.einsum('mb,mbaa->ab', t1b[p0:p1], OVVV)
OVVV = None
#:eris_ovVV = lib.unpack_tril(np.asarray(eris.ovVV).reshape(nocca*nvira,-1)).reshape(nocca,nvira,nvirb,nvirb)
#:Wvvab -= np.einsum('mb,mbaa->ba', t1a, eris_ovVV)
blksize = min(nocca, max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvira*nvirb**2*3))))
for p0,p1 in lib.prange(0, nocca, blksize):
ovVV = eris.get_ovVV(slice(p0,p1)) # ovVV = eris.ovVV[p0:p1]
Wvvab -= np.einsum('mb,mbaa->ba', t1a[p0:p1], ovVV)
ovVV = None
blksize = min(noccb, max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvirb*nvira**2*3))))
#:eris_OVvv = lib.unpack_tril(np.asarray(eris.OVvv).reshape(noccb*nvirb,-1)).reshape(noccb,nvirb,nvira,nvira)
#:Wvvab -= np.einsum('mb,mbaa->ab', t1b, eris_OVvv)
#idxa = np.arange(nvira)
#idxa = idxa*(idxa+1)//2+idxa
#for p0, p1 in lib.prange(0, noccb, blksize):
# OVvv = np.asarray(eris.OVvv[p0:p1])
# Wvvab -= np.einsum('mb,mba->ab', t1b[p0:p1], OVvv[:,:,idxa])
# OVvv = None
for p0, p1 in lib.prange(0, noccb, blksize):
OVvv = eris.get_OVvv(slice(p0,p1)) # OVvv = eris.OVvv[p0:p1]
Wvvab -= np.einsum('mb,mbaa->ab', t1b[p0:p1], OVvv)
OVvv = None
Wvvaa = Wvvaa + Wvvaa.T
Wvvbb = Wvvbb + Wvvbb.T
#:eris_vvvv = ao2mo.restore(1, np.asarray(eris.vvvv), nvirb)
#:eris_VVVV = ao2mo.restore(1, np.asarray(eris.VVVV), nvirb)
#:eris_vvVV = _restore(np.asarray(eris.vvVV), nvira, nvirb)
#:Wvvaa += np.einsum('aabb->ab', eris_vvvv) - np.einsum('abba->ab', eris_vvvv)
#:Wvvbb += np.einsum('aabb->ab', eris_VVVV) - np.einsum('abba->ab', eris_VVVV)
#:Wvvab += np.einsum('aabb->ab', eris_vvVV)
if eris.vvvv is not None:
for i in range(nvira):
i0 = i*(i+1)//2
vvv = lib.unpack_tril(np.asarray(eris.vvvv[i0:i0+i+1]))
tmp = np.einsum('bb->b', vvv[i])
Wvvaa[i] += tmp
tmp = np.einsum('bb->b', vvv[:,:i+1,i])
Wvvaa[i,:i+1] -= tmp
Wvvaa[:i ,i] -= tmp[:i]
vvv = lib.unpack_tril(np.asarray(eris.vvVV[i0:i0+i+1]))
Wvvab[i] += np.einsum('bb->b', vvv[i])
vvv = None
for i in range(nvirb):
i0 = i*(i+1)//2
vvv = lib.unpack_tril(np.asarray(eris.VVVV[i0:i0+i+1]))
tmp = np.einsum('bb->b', vvv[i])
Wvvbb[i] += tmp
tmp = np.einsum('bb->b', vvv[:,:i+1,i])
Wvvbb[i,:i+1] -= tmp
Wvvbb[:i ,i] -= tmp[:i]
vvv = None
Wvvba = Wvvab.T
Hr2aa += Wvvaa.reshape(1,1,nvira,nvira)
Hr2ab += Wvvab.reshape(1,1,nvira,nvirb)
Hr2bb += Wvvbb.reshape(1,1,nvirb,nvirb)
Hr2baaa += Wvvaa.reshape(1,1,nvira,nvira)
Hr2aaba += Wvvba.reshape(1,1,nvirb,nvira)
Hr2abbb += Wvvbb.reshape(1,1,nvirb,nvirb)
Hr2bbab += Wvvab.reshape(1,1,nvira,nvirb)
vec_ee = amplitudes_to_vector_ee((Hr1aa,Hr1bb), (Hr2aa,Hr2ab,Hr2bb))
vec_sf = amplitudes_to_vector_eomsf((Hr1ab,Hr1ba), (Hr2baaa,Hr2aaba,Hr2abbb,Hr2bbab))
return vec_ee, vec_sf
class EOMEE(eom_rccsd.EOMEE):
def __init__(self, cc):
eom_rccsd.EOMEE.__init__(self, cc)
self.nocc = cc.get_nocc()
self.nmo = cc.get_nmo()
kernel = eeccsd
eeccsd = eeccsd
get_diag = eeccsd_diag
def vector_size(self):
'''size of the vector based on spin-orbital basis'''
nocc = | np.sum(self.nocc) | numpy.sum |
# -*- coding: utf-8 -*-
# Run this app with `python3 sens_matrix_dashboard.py` and
# view the plots at http://127.0.0.1:8050/ in your web browser.
# (To open a web browser on a larson-group computer,
# login to malan with `ssh -X` and then type `firefox &`.)
def main():
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.express as px
import plotly.graph_objects as go
import pandas as pd
import numpy as np
import pdb
import sklearn
from plotly.figure_factory import create_quiver
from itertools import chain
from analyze_sensitivity_matrix import \
analyzeSensMatrix, setupObsCol, setupDefaultMetricValsCol, \
findOutliers, findParamsUsingElastic
from test_analyzeSensMatrix import write_test_netcdf_files
# Metrics are observed quantities that we want a tuned simulation to match.
# The order of metricNames determines the order of rows in sensMatrix.
# Column vector of (positive) weights. A small value de-emphasizes
# the corresponding metric in the fit.
metricsNamesAndWeights = [ \
['SWCF_GLB', 4.01], \
['SWCF_DYCOMS', 1.01], \
['SWCF_HAWAII', 1.01], \
['SWCF_VOCAL', 1.01], \
['SWCF_LBA', 1.01], \
['SWCF_WP', 1.01], \
['SWCF_EP', 1.01], \
['SWCF_NP', 1.01], \
['SWCF_SP', 1.01], \
## ['SWCF_PA', 1.01], \
['SWCF_CAF', 1.01], \
['LWCF_GLB', 4.01], \
# ['LWCF_DYCOMS', 1.01], \
# ['LWCF_HAWAII', 1.01], \
# ['LWCF_VOCAL', 1.01], \
['LWCF_LBA', 1.01], \
['LWCF_WP', 1.01], \
# ['LWCF_EP', 1.01], \
# ['LWCF_NP', 1.01], \
# ['LWCF_SP', 1.01], \
## ['LWCF_PA', 1.01], \
# ['LWCF_CAF', 1.01], \
['PRECT_GLB', 4.01], \
['PRECT_LBA', 1.01], \
['PRECT_WP', 1.01], \
# ['PRECT_EP', 1.01], \
# ['PRECT_NP', 1.01], \
# ['PRECT_SP', 1.01], \
## ['PRECT_PA', 1.01], \
['PRECT_CAF', 1.01] \
]
# ['PRECT_DYCOMS', 0.01], \
# ['PRECT_HAWAII', 0.01], \
# ['PRECT_VOCAL', 0.01], \
dfMetricsNamesAndWeights = \
pd.DataFrame( metricsNamesAndWeights, columns = ['metricsNames', 'metricsWeights'] )
metricsNames = dfMetricsNamesAndWeights[['metricsNames']].to_numpy().astype(str)[:,0]
metricsWeights = dfMetricsNamesAndWeights[['metricsWeights']].to_numpy()
# Parameters are tunable model parameters, e.g. clubb_C8.
# The float listed below is a factor that is used below for scaling plots.
# Each parameter is associated with two sensitivity simulations in which that parameter is perturbed
# either up or down.
# The output from each sensitivity simulation is expected to be stored in its own netcdf file.
# Each netcdf file contains metric values and parameter values for a single simulation.
paramsNamesScalesAndFilenames = [ \
## ['clubb_c7', 1.0, \
## '20220516/sens.tau_2_Regional.nc', \
## '20220516/sens.tau_3_Regional.nc'], \
['clubb_c11', 1.0, \
'20220516/sens.tau_4_Regional.nc', \
'20220516/sens.tau_5_Regional.nc'], \
['clubb_gamma_coef', 1.0, \
'20220516/sens.tau_6_Regional.nc', \
'20220516/sens.tau_7_Regional.nc'], \
## ['clubb_c8', 1.0, \
## '20220516/sens.tau_9_Regional.nc', \
## '20220516/sens.tau_8_Regional.nc'], \
['clubb_c_k10', 1.0, \
'20220516/sens.tau_10_Regional.nc', \
'20220516/sens.tau_11_Regional.nc'], \
['clubb_c_invrs_tau_n2', 1.0, \
'20220516/sens.tau_12_Regional.nc',
'20220516/sens.tau_13_Regional.nc'], \
## ['clubb_c_invrs_tau_wpxp_n2_thresh', 1.e3, \
## '20220516/sens.tau_14_Regional.nc', \
## '20220516/sens.tau_15_Regional.nc'], \
## ['micro_vqit', 1.0, \
## '20220516/sens.tau_16_Regional.nc', \
## '20220516/sens.tau_17_Regional.nc'], \
]
dfparamsNamesScalesAndFilenames = \
pd.DataFrame( paramsNamesScalesAndFilenames, \
columns = ['paramsNames', 'paramsScales',
'sensNcFilenamesExt', 'sensNcFilenames'] )
paramsNames = dfparamsNamesScalesAndFilenames[['paramsNames']].to_numpy().astype(str)[:,0]
# Extract scaling factors of parameter values from user-defined list paramsNamesScalesAndFilenames.
# The scaling is not used for any calculations, but it allows us to avoid plotting very large or small values.
paramsScales = dfparamsNamesScalesAndFilenames[['paramsScales']].to_numpy().astype(float)[:,0]
sensNcFilenames = dfparamsNamesScalesAndFilenames[['sensNcFilenames']].to_numpy().astype(str)[:,0]
sensNcFilenamesExt = dfparamsNamesScalesAndFilenames[['sensNcFilenamesExt']].to_numpy().astype(str)[:,0]
# This the subset of paramsNames that vary from [0,1] (e.g., C5)
# and hence will be transformed to [0,infinity] in order to make
# the relationship between parameters and metrics more linear:
#transformedParamsNames = np.array(['clubb_c8','clubb_c_invrs_tau_n2', 'clubb_c_invrs_tau_n2_clear_wp3'])
transformedParamsNames = np.array([''])
# Netcdf file containing metric and parameter values from the default simulation
defaultNcFilename = \
'20220516/sens.tau_1_Regional.nc'
# Metrics from simulation that use the SVD-recommended parameter values
# Here, we use default simulation just as a placeholder.
linSolnNcFilename = \
'20220516/sens.tau_1_Regional.nc'
# Observed values of our metrics, from, e.g., CERES-EBAF.
# These observed metrics will be matched as closely as possible by analyzeSensMatrix.
# NOTE: PRECT is in the unit of m/s
obsMetricValsDict = { \
'LWCF_GLB': 28.008, 'PRECT_GLB': 0.000000031134259, 'SWCF_GLB': -45.81, 'TMQ_GLB': 24.423, \
'LWCF_DYCOMS': 19.36681938, 'PRECT_DYCOMS':0.000000007141516, 'SWCF_DYCOMS': -63.49394226, 'TMQ_DYCOMS':20.33586884,\
'LWCF_LBA': 43.83245087, 'PRECT_LBA':0.000000063727875, 'SWCF_LBA': -55.10041809, 'TMQ_LBA': 44.27890396,\
'LWCF_HAWAII': 24.78801537, 'PRECT_HAWAII':0.000000020676041, 'SWCF_HAWAII': -36.49626541, 'TMQ_HAWAII': 33.17501068,\
'LWCF_WP': 54.73321152, 'PRECT_WP':0.000000078688704, 'SWCF_WP': -62.09819031, 'TMQ_WP':51.13026810,\
'LWCF_EP': 33.42149734, 'PRECT_EP': 0.000000055586694, 'SWCF_EP': -51.79394531, 'TMQ_EP':44.34251404,\
'LWCF_NP': 26.23941231, 'PRECT_NP':0.000000028597503, 'SWCF_NP': -50.92364502, 'TMQ_NP':12.72111988,\
'LWCF_SP': 31.96141052, 'PRECT_SP':0.000000034625369, 'SWCF_SP': -70.26461792, 'TMQ_SP':10.95032024,\
'LWCF_PA': 47.32126999, 'PRECT_PA':0.000000075492694, 'SWCF_PA': -78.27433014, 'TMQ_PA':47.25967789,\
'LWCF_CAF': 43.99757003784179687500, 'PRECT_CAF':0.000000042313699, 'SWCF_CAF': -52.50243378, 'TMQ_CAF':36.79592514,\
'LWCF_VOCAL': 43.99757004, 'PRECT_VOCAL':0.000000001785546, 'SWCF_VOCAL': -77.26232147, 'TMQ_VOCAL':17.59922791 }
# Estimate non-linearity of the global model to perturbations in parameter values.
# To do so, calculate radius of curvature of the three points from the default simulation
# and the two sensitivity simulations.
#calcNormlzdRadiusCurv(metricsNames, paramsNames, transformedParamsNames,
# metricsWeights,
# sensNcFilenames, sensNcFilenamesExt, defaultNcFilename)
# Set up a column vector of observed metrics
obsMetricValsCol = setupObsCol(obsMetricValsDict, metricsNames)
# Calculate changes in parameter values needed to match metrics.
defaultMetricValsCol, defaultBiasesCol, \
defaultBiasesApprox, defaultBiasesApproxLowVals, defaultBiasesApproxHiVals, \
defaultBiasesApproxPC, defaultBiasesApproxLowValsPC, defaultBiasesApproxHiValsPC, \
normlzdWeightedDefaultBiasesApprox, normlzdWeightedDefaultBiasesApproxPC, \
defaultBiasesOrigApprox, defaultBiasesOrigApproxPC, \
sensMatrixOrig, sensMatrix, normlzdSensMatrix, \
normlzdWeightedSensMatrix, biasNormlzdSensMatrix, svdInvrsNormlzdWeighted, \
vhNormlzd, uNormlzd, sNormlzd, \
vhNormlzdWeighted, uNormlzdWeighted, sNormlzdWeighted, \
magParamValsRow, \
defaultParamValsOrigRow, dparamsSoln, dnormlzdParamsSoln, \
dparamsSolnPC, dnormlzdParamsSolnPC, \
paramsSoln, paramsLowVals, paramsHiVals, \
paramsSolnPC, paramsLowValsPC, paramsHiValsPC = \
analyzeSensMatrix(metricsNames, paramsNames, transformedParamsNames,
metricsWeights,
sensNcFilenames, defaultNcFilename,
obsMetricValsDict)
paramsLowValsPCBound, paramsHiValsPCBound = \
calcParamsBounds(metricsNames, paramsNames, transformedParamsNames,
metricsWeights, obsMetricValsCol,
magParamValsRow,
sensNcFilenames, sensNcFilenamesExt, defaultNcFilename)
# Create scatterplot to look at outliers
#createPcaBiplot(normlzdSensMatrix, defaultBiasesCol, obsMetricValsCol, metricsNames, paramsNames)
# Find outliers by use of the ransac algorithm
outlier_mask, defaultBiasesApproxRansac, normlzdWeightedDefaultBiasesApproxRansac, \
dnormlzdParamsSolnRansac, paramsSolnRansac = \
findOutliers(normlzdSensMatrix, normlzdWeightedSensMatrix, \
defaultBiasesCol, obsMetricValsCol, magParamValsRow, defaultParamValsOrigRow)
print( "ransac_outliers = ", metricsNames[outlier_mask] )
print( "ransac_inliers = ", metricsNames[~outlier_mask] )
#pdb.set_trace()
# Find best-fit params by use of the Elastic Net algorithm
defaultBiasesApproxElastic, normlzdWeightedDefaultBiasesApproxElastic, \
dnormlzdParamsSolnElastic, paramsSolnElastic = \
findParamsUsingElastic(normlzdSensMatrix, normlzdWeightedSensMatrix, \
defaultBiasesCol, obsMetricValsCol, metricsWeights, magParamValsRow, defaultParamValsOrigRow)
defaultBiasesApproxElasticCheck = ( normlzdWeightedSensMatrix @ dnormlzdParamsSolnElastic ) \
* np.reciprocal(metricsWeights) * np.abs(obsMetricValsCol)
print("defaultBiasesApproxElastic = ", defaultBiasesApproxElastic)
print("defaultBiasesApproxElasticCheck = ", defaultBiasesApproxElasticCheck)
#pdb.set_trace()
# Set up a column vector of metric values from the default simulation
defaultMetricValsCol = setupDefaultMetricValsCol(metricsNames, defaultNcFilename)
# Set up a column vector of metric values from the global simulation based on optimized
# parameter values.
linSolnMetricValsCol = setupDefaultMetricValsCol(metricsNames, linSolnNcFilename)
# Store biases in default simulation, ( global_model - default )
linSolnBiasesCol = np.subtract(linSolnMetricValsCol, defaultMetricValsCol)
# Calculate the fraction of the default-sim bias that remains after tuning.
# This is unweighted and hence is not necessarily less than one.
# defaultBiasesApprox = J*delta_p = ( fwd - def )
# numerator = ( fwd - def ) + ( def - obs ) = ( fwd - obs )
Bias = ( defaultBiasesApprox + defaultBiasesCol )
# defaultBiasesCol = delta_b = ( default - obs ) = denominator
BiasMagRatio = np.linalg.norm(Bias/np.abs(obsMetricValsCol))**2 / \
np.linalg.norm(defaultBiasesCol/np.abs(obsMetricValsCol))**2
# Calculate the fraction of the default-sim bias that remains after tuning,
# but using a truncated PC observation.
# This is unweighted and hence is not necessarily less than one.
# defaultBiasesApproxPC = J*delta_p = ( fwd - def )
# numerator = ( fwd - def ) + ( def - obs ) = ( fwd - obs )
BiasPC = ( defaultBiasesApproxPC + defaultBiasesCol )
# defaultBiasesCol = delta_b = ( default - obs ) = denominator
BiasPCMagRatio = np.linalg.norm(BiasPC/np.abs(obsMetricValsCol))**2 / \
np.linalg.norm(defaultBiasesCol/ | np.abs(obsMetricValsCol) | numpy.abs |
import numpy as np
X = np.array([[3,4],[5,12],[24,7]])
print(X)
X = X ** 2
print(X)
X = | np.sum(X, axis=1) | numpy.sum |
'''
Copyright 2018 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
# from numba import jit # import numba and uncomment the @jit infront of the function if code is too slow -- can be around factor 3 for large intergers
from math import factorial as fac
import numpy as np
# @jit
def number_coalitions_weighting_x(quota,weights):
''' input, integers, quota
list or tuple of integers, weight vector,
output, vector with lenght sum(weights)+1, containing quota many
leading zeros 0,...,0 and
then the number of coalitions which weight quota,...,sum(weights)
i.e. whose members have weights summing up to x = quota,...,sum(weights)
'''
W = | np.array(weights, dtype=np.int64) | numpy.array |
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 11 20:02:37 2017
@author: tzave
"""
import numpy as np
def swap(r, p, E, cols=None):
if cols is None:
b = np.copy(E[r, :])
E[r, :] = E[p, :]
E[p, :] = np.copy(b)
else:
b = np.copy(E[:, r])
E[:, r] = E[:, p]
E[:, p] = np.copy(b)
def ForwardSubstitution(A, b):
rows = A.shape[0]
y = np.zeros(rows)
for i in range(rows):
s = 0.
for j in range(i):
s = s + A[i, j] * y[j]
y[i] = (b[i] - s) / A[i, i]
return y
def BackSubstitution(A, b):
rows = A.shape[0]
x = np.zeros(rows)
for i in reversed(range(rows)):
s = 0
for j in range(i + 1, rows):
s = s + A[i, j] * x[j]
x[i] = (b[i] - s) / A[i, i]
return x
def foundNonZeroPivot(r, E):
rows = E.shape[0]
PivotFound = False
for p in range(r, rows - 1):
if | np.isclose(E[p, r], 0) | numpy.isclose |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Transform the gamestate data to onehot vectors
"""
from sklearn.preprocessing import OneHotEncoder,LabelEncoder
import pandas as pd
import numpy as np
import re
import os
from pathlib import Path
# settlements and cities are built on node coordinates
# roads are built on edge coordinates
# nodes are named after an adjacent rode
# hence the set of node coords is a subset of the edge coords
# Additionally, no nodes close to the sea are needed...
# these are inaccessible for building
# edge_coordinates contains the edge coordinates on which a player
# could actually built
# len(edge_coordinates) = 72
edge_coordinates = ['0x27','0x38','0x49','0x5a','0x6b','0x7c',
'0x26','0x48','0x6a','0x8c',
'0x25','0x36','0x47','0x58','0x69','0x7a','0x8b','0x9c',
'0x24','0x46','0x68','0x8a','0xac',
'0x23','0x34','0x45','0x56','0x67','0x78','0x89','0x9a','0xab','0xbc',
'0x22','0x44','0x66','0x88','0xaa','0xcc',
'0x32','0x43','0x54','0x65','0x76','0x87','0x98','0xa9','0xba','0xcb',
'0x42','0x64','0x86','0xa8','0xca',
'0x52','0x63','0x74','0x85','0x96','0xa7','0xb8', '0xc9',
'0x62','0x84','0xa6','0xc8',
'0x72','0x83','0x94','0xa5','0xb6','0xc7']
# additional node coordinates
# (that are not in the accessible edge_coordinates list)
# the ones on the right side of the land that are named after
# sea edge nodes
node_coordinates = ['0x8d', '0xad','0xcd','0xdc','0xda','0xd8']
# all the coordinates of the table that a player can build on
# plus the none value for when the player has not built
# len(build_coords) = 79
build_coords = edge_coordinates + node_coordinates + ['None']
################################
# encoding the build coordinates
################################
np_build_coords = np.array(build_coords)
label_encoder = LabelEncoder()
integer_encoded_build_coords = label_encoder.fit_transform(np_build_coords)
#print(label_encoder.transform(np.array(['0x69'])))
######################
# for debugging use:
######################
#print('building coordinates label encoding')
#for x in build_coords:
# print('coordinate ' + str(x) + ' : '+str(label_encoder.transform(np.ravel(x))))
#print('-----------------------------------')
#building coordinates label encoding
#coordinate 0x27 : [5]
#coordinate 0x38 : [9]
#coordinate 0x49 : [17]
#coordinate 0x5a : [22]
#coordinate 0x6b : [32]
#coordinate 0x7c : [38]
#coordinate 0x26 : [4]
#coordinate 0x48 : [16]
#coordinate 0x6a : [31]
#coordinate 0x8c : [48]
#coordinate 0x25 : [3]
#coordinate 0x36 : [8]
#coordinate 0x47 : [15]
#coordinate 0x58 : [21]
#coordinate 0x69 : [30]
#coordinate 0x7a : [37]
#coordinate 0x8b : [47]
#coordinate 0x9c : [54]
#coordinate 0x24 : [2]
#coordinate 0x46 : [14]
#coordinate 0x68 : [29]
#coordinate 0x8a : [46]
#coordinate 0xac : [62]
#coordinate 0x23 : [1]
#coordinate 0x34 : [7]
#coordinate 0x45 : [13]
#coordinate 0x56 : [20]
#coordinate 0x67 : [28]
#coordinate 0x78 : [36]
#coordinate 0x89 : [45]
#coordinate 0x9a : [53]
#coordinate 0xab : [61]
#coordinate 0xbc : [67]
#coordinate 0x22 : [0]
#coordinate 0x44 : [12]
#coordinate 0x66 : [27]
#coordinate 0x88 : [44]
#coordinate 0xaa : [60]
#coordinate 0xcc : [73]
#coordinate 0x32 : [6]
#coordinate 0x43 : [11]
#coordinate 0x54 : [19]
#coordinate 0x65 : [26]
#coordinate 0x76 : [35]
#coordinate 0x87 : [43]
#coordinate 0x98 : [52]
#coordinate 0xa9 : [59]
#coordinate 0xba : [66]
#coordinate 0xcb : [72]
#coordinate 0x42 : [10]
#coordinate 0x64 : [25]
#coordinate 0x86 : [42]
#coordinate 0xa8 : [58]
#coordinate 0xca : [71]
#coordinate 0x52 : [18]
#coordinate 0x63 : [24]
#coordinate 0x74 : [34]
#coordinate 0x85 : [41]
#coordinate 0x96 : [51]
#coordinate 0xa7 : [57]
#coordinate 0xb8 : [65]
#coordinate 0xc9 : [70]
#coordinate 0x62 : [23]
#coordinate 0x84 : [40]
#coordinate 0xa6 : [56]
#coordinate 0xc8 : [69]
#coordinate 0x72 : [33]
#coordinate 0x83 : [39]
#coordinate 0x94 : [50]
#coordinate 0xa5 : [55]
#coordinate 0xb6 : [64]
#coordinate 0xc7 : [68]
#coordinate 0x8d : [49]
#coordinate 0xad : [63]
#coordinate 0xcd : [74]
#coordinate 0xdc : [77]
#coordinate 0xda : [76]
#coordinate 0xd8 : [75]
#coordinate None : [78]
# binary encode
onehot_encoder = OneHotEncoder(sparse=False)
integer_encoded_build_coords = integer_encoded_build_coords.reshape(len(integer_encoded_build_coords), 1)
onehot_encoded_build_coords = onehot_encoder.fit_transform(integer_encoded_build_coords)
#print(onehot_encoded_build_coords)
##############################################
# Testing
##############################################
# test label transform ['0x69' '0x89' 'None']
#print('Testing the build coordinates')
#y = gamestates.iloc[2,6:9]
#values = np.array(y)
#print(values)
#integer_encoded = label_encoder.transform(np.ravel(values))
#print(integer_encoded)
#integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)
#onehot_encoded = onehot_encoder.transform(integer_encoded)
#print(onehot_encoded)
#print('eotesting build coordinates')
# robber can be placed on land hexes (19)
land_coords = ['0x37','0x59','0x7b',
'0x35','0x57','0x79','0x9b',
'0x33','0x55','0x77','0x99','0xbb',
'0x53','0x75','0x97','0xb9',
'0x73','0x95','0xb7'
]
################################
# encoding the land coordinates
# aka robber coordinates
################################
np_rob_coords = np.array(land_coords)
rob_label_encoder = LabelEncoder()
integer_encoded_rob_coords = rob_label_encoder.fit_transform(np_rob_coords)
# print(integer_encoded_rob_coords)
# [ 2 6 11 1 5 10 15 0 4 9 14 18 3 8 13 17 7 12 16]
# binary encode
rob_onehot_encoder = OneHotEncoder(sparse=False)
integer_encoded_rob_coords = integer_encoded_rob_coords.reshape(len(integer_encoded_rob_coords), 1)
onehot_encoded_rob_coords = rob_onehot_encoder.fit_transform(integer_encoded_rob_coords)
#print(onehot_encoded_rob_coords)
##############################################
# Testing
##############################################
## test robber coordinates of pilot01
#print('Testing the robber ')
#y = gamestates.iloc[:,3]
#values = np.array(y)
#print(values)
#integer_encoded = rob_label_encoder.transform(np.ravel(values))
#print(integer_encoded)
#integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)
#onehot_encoded = rob_onehot_encoder.transform(integer_encoded)
#print(onehot_encoded)
#print('eotesting robber')
################################
# encoding the hex typed
################################
# this needs to have custom categories because of the ports
# in the game version of the data
# 6: water
# 0: desert
# 1: clay
# 2: ore
# 3: sheep
# 4: wheat
# 5: wood
# 7 - 12 : miscelaneous ports(3:1) facing on the different directions
# 16+ : non miscelaneous ports(2:1)
#
# 9 categories
def hexLabelEncoder(hextypes):
'''
converts the hextypes to labeled (9 labels for the 9 categories)
Parameters: hex board layout array
Returns: array that contains the labels
'''
y = []
# pilot1 hexlayout is
#[9, 6, 67, 6, 6, 2, 5, 1, 66, 8, 2, 3, 1, 2, 6, 6, 5, 3, 4, 1, 4, 11, 36, 5, 4, 0, 5, 6, 6, 4, 3, 3, 97, 21, 6, 12, 6]
for x in hextypes:
if x < 7 :
y.append(x)
elif 7<= x <= 12:
y.append(7)
else :
y.append(8)
return y
###### checking the general fit
###### generalized ohe encoder for list of all possible land types
hex_type_OHencoder = OneHotEncoder(sparse=False)
hex_type_labels = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
#integer_encoded_types = integer_encoded_types.reshape(len(integer_encoded_types),1)
OHE_land_types = hex_type_OHencoder.fit_transform(hex_type_labels.reshape(len(hex_type_labels),1))
#print(OHE_land_types)
################################################
# Testing
##############################################
## test land types of pilot01
#hextypes = gamestates.iloc[0,1]
#integer_encoded_types = np.array(hexLabelEncoder(hextypes))
#print(integer_encoded_types)
# outputs:
# pilot1 hexlayout is
#[9, 6, 67, 6, 6, 2, 5, 1, 66, 8, 2, 3, 1, 2, 6, 6, 5, 3, 4, 1, 4, 11, 36, 5, 4, 0, 5, 6, 6, 4, 3, 3, 97, 21, 6, 12, 6]
# converted to:
# [7 6 8 6 6 2 5 1 8 7 2 3 1 2 6 6 5 3 4 1 4 7 8 5 4 0 5 6 6 4 3 3 8 8 6 7 6]
#ohe_hex_layout = hex_type_OHencoder.transform(integer_encoded_types.reshape(len(integer_encoded_types),1))
######################################################
# create the numpy array that contains the ohe vectors
######################################################
#
# store the data to an np array so that the can be used
# in keras
#
# a massive np array will be created with all the games at the end, when we
# will be ready to train
# to convert to ohe you first transform to label encoded
# and then to one-hot encode
# np array size :
# rows : 4150
# i.e. for all 57 games we have 4150 gameturns
# columns :
# hex layout : 37 hexes x 9 categories
# -> 333
# robber positions : 19 possible positions (land hexes)
# -> 19
# player state :
# builds : 24 building blocks x 79 categories(coords)
# -> 1896
# dev cards : 25 dev cards (true-false)
# -> 25
##
# total : 333 + 19 + 4x(1896+25) = 8017 + 19 = 8036
######### IMPORTAND ##########
## Instead of a big, chaotic table, save to various small np arrays
##
#ohedata = np.zeros((4150,8036))
## saving pilot1 to np data array
## land hex types
#temp = np.array(hexLabelEncoder(gamestates.iloc[0,1]))
#print('-------')
#print(temp)
#print(hex_type_OHencoder.transform(temp.reshape(len(temp),1)))
#
##oned_temp = np.ravel(hex_type_OHencoder.transform(temp.reshape(len(temp),1)))
## this goes from 0 to 332
#ohedata[0,0:333] = np.ravel(hex_type_OHencoder.transform(temp.reshape(len(temp),1)))
#ohedata[0,0:3]=1 # -> writes 1 to columns 0,1,2
######## IMPORTAND ##########
# OHE conversion steps:
# 1. convert hex layout
# 2. convert robber position and append it
# 3. convert player1 build and append them
# 4. convert player1 devcard and append them
# 5. convert player2 3 4
# 6. check size of all this
def convert_hex_layout(hexlayout):
''' converts the gamestates hexlayout to one hot encoding
PARAMETERS
----------
hexlayout : the gamestates hexlayout
Returns
-------
an np array of size (1,333)
'''
# convert the layout to label encoding
labeled = np.array(hexLabelEncoder(hexlayout))
# convert the layout to one hot encoding
ohe = hex_type_OHencoder.transform(labeled.reshape(len(labeled),1))
return np.ravel(ohe)
####Testing OK
#print('Testing hex layout conversion')
#methodlayout = convert_hex_layout(gamestates.iloc[0,1])
#scriptlayout = np.ravel(hex_type_OHencoder.transform(temp.reshape(len(temp),1)))
def convert_robber_position(robber):
''' converts the robber position coordinates to one hot encoding
Parameters
----------
robber: the robber coordinates from the gamestates dataframe
Returns
-------
encoded np array of size 19
'''
# convert the robber position to labeled encoding
robber = np.array(robber)
labeled = rob_label_encoder.transform(np.ravel(robber))
# convert the robber position to one hot encoding
labeled = labeled.reshape(len(labeled),1)
ohe = rob_onehot_encoder.transform(labeled)
# return with ravel to avoid the double list [[]]
return np.ravel(ohe)
####Testing OK
#print('Testing the robber ')
#y = gamestates.iloc[1,3]
#values = np.array(y)
#print(values)
#integer_encoded = rob_label_encoder.transform(np.ravel(values))
#print(integer_encoded)
#integer_encoded = integer_encoded.reshape(len(integer_encoded), 1)
#onehot_encoded = rob_onehot_encoder.transform(integer_encoded)
#print(onehot_encoded)
#print('eotesting robber')
#print('Testing the robber method')
#methodrobber = convert_robber_position(gamestates.iloc[1,3])
#print(methodrobber)
def convert_player_buildings(playerbuildings):
'''
Converts the player buildings coordinates to one hot encoding
Parameters
----------
from the gamestate the players columns of settlements, cities and roads
a list of 24 coordinates
Returns
-------
np array of one hot encoding for all 24 building blocks of the player
size should be (24,79) (in one line vector 24*79 = 1896)
'''
# list of the buildings
buildings = []
for coord in playerbuildings:
ohe_coord = convert_building_coord(coord)
buildings.append(ohe_coord)
#print(buildings)
npbuildings = np.array(buildings)
return np.ravel(npbuildings)
def convert_building_coord(hexcoord):
'''
Convert a hex building coordinate to one hot encoding
Parameters
----------
a hex coordinate
Returns
-------
one hot encoding of the coordinate, an np array or size 79
'''
value = np.array(hexcoord)
# convert the coordinate to labeled encoding
labeled = label_encoder.transform(np.ravel(value))
# convert the coordinate to one hot encoding
labeled = labeled.reshape(len(labeled), 1)
ohe = onehot_encoder.transform(labeled)
return ohe
#######
## Testing the coordinate convertion OK
#print('Testing the coordinate convertion to ohe')
## testing only one coordinate
#coord = gamestates.iloc[2,6]
#print(coord)
#methodcoord = convert_building_coord(coord)
## testing group of coordinates OK
#coords = gamestates.iloc[2,6:9]
#print(coords)
#methodcoords = convert_player_buildings(coords)
#print(methodcoords)
#print(methodcoords.reshape(3,79))
def convert_player_devcards(dev_cards):
'''
Coverts the gamestate fields of the players dev cards
from true/false to binary 1/0
Parameters
----------
dev_cards : the 25 dev cards potentialy available to the player
Returns
-------
np array of size 25 where true is 1 and false is 0
'''
binary_dev_cards =[]
for card in dev_cards:
# type is np.bool, don't use quotes
if card == True :
binary_dev_cards.append(1)
else:
binary_dev_cards.append(0)
return np.array(binary_dev_cards)
#### Testing player dev cards OK
#dev_cards = gamestates.loc[58, 'pl0knight1' : 'pl0vp5']
#dclist = convert_player_devcards(dev_cards)
#print(dclist)
##############################################################################
# OHE conversion
##############################################################################
# convert each dataframe to np arrays
# each game has 10 np arrays of the board, robber and player states in ohe data
datafiles = ["../soclogsData_NoResources/DataTables/pilot/pilot03_gamestates.pkl",
"../soclogsData_NoResources/DataTables/pilot/pilot15_gamestates.pkl",
"../soclogsData_NoResources/DataTables/pilot/pilot17_gamestates.pkl",
"../soclogsData_NoResources/DataTables/pilot/pilot04_gamestates.pkl",
"../soclogsData_NoResources/DataTables/pilot/pilot21_gamestates.pkl",
"../soclogsData_NoResources/DataTables/pilot/pilot02_gamestates.pkl",
"../soclogsData_NoResources/DataTables/pilot/pilot08_gamestates.pkl",
"../soclogsData_NoResources/DataTables/pilot/pilot09_gamestates.pkl",
"../soclogsData_NoResources/DataTables/pilot/pilot14_gamestates.pkl",
"../soclogsData_NoResources/DataTables/pilot/pilot11_gamestates.pkl",
"../soclogsData_NoResources/DataTables/pilot/pilot05_gamestates.pkl",
"../soclogsData_NoResources/DataTables/pilot/pilot16_gamestates.pkl",
"../soclogsData_NoResources/DataTables/pilot/pilot01_gamestates.pkl",
"../soclogsData_NoResources/DataTables/pilot/pilot20_gamestates.pkl",
"../soclogsData_NoResources/DataTables/pilot/pilot13_gamestates.pkl",
"../soclogsData_NoResources/DataTables/pilot/pilot10_gamestates.pkl",
"../soclogsData_NoResources/DataTables/pilot/pilot12_gamestates.pkl",
"../soclogsData_NoResources/DataTables/pilot/pilot07_gamestates.pkl",
"../soclogsData_NoResources/DataTables/pilot/pilot06_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season2/league4_attempt2-2012-11-14-19-46-22-+0000_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season2/practice-2012-10-30-18-41-07-+0000_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season2/League4-2012-11-24-09-17-47-+0000_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season2/Test-2012-10-16-14-53-15-+0100_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season2/L5 Real game-2012-11-11-19-58-55-+0000_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season2/Master League final-2012-12-05-16-59-57-+0000_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season2/League 8 Game 2-2012-11-26-18-55-31-+0000_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season2/SOCL League 5 Game 2-2012-11-25-17-25-09-+0000_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season2/league 5 last game-2012-12-09-21-08-39-+0000_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season2/SOCL League 5 Game 4-2012-12-03-02-11-10-+0000_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season2/League8-2012-11-24-12-04-51-+0000_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season2/League3Game5-2012-11-30-19-59-18-+0000_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season2/master league 4-2012-12-04-17-37-56-+0000_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season2/Master league game 2-2012-11-13-18-07-14-+0000_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season2/Game 3-2012-11-25-20-09-16-+0000_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season2/League 5 game 3-2012-11-26-00-51-20-+0000_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season2/League4-2012-11-09-19-08-53-+0000_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season2/3version2-2012-11-21-20-23-31-+0000_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season2/League3Game1-2012-11-18-20-34-38-+0000_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season2/League3Game4-2012-11-28-20-01-30-+0000_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season2/L5 practicegame-2012-11-11-19-26-36-+0000_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season2/Master League Game 3-2012-11-17-17-01-18-+0000_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season2/Settles league 1-2012-11-08-18-05-34-+0000_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season1/league3practice-2012-05-31-19-23-46-+0100_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season1/League2.4-2012-06-26-22-47-04-+0100_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season1/league3-2012-05-27-19-53-48-+0100_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season1/league2.2-2012-06-18-20-50-12-+0100_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season1/league3 michael-2012-06-17-20-54-03-+0100_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season1/3-2012-06-06-19-58-56-+0100_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season1/League 1-2012-06-17-19-53-24-+0100_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season1/League 1.2-2012-06-21-20-27-05-+0100_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season1/league 3 (-k)-2012-06-25-18-22-53-+0100_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season1/league3minus1-2012-05-25-22-22-21-+0100_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season1/League 2-2012-06-26-20-23-20-+0100_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season1/League 1 game-2012-06-19-18-49-00-+0100_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season1/League 1.1-2012-06-21-18-58-22-+0100_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season1/league1 31may-2012-05-31-19-59-37-+0100_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season1/League 3 Finale-2012-06-25-21-57-53-+0100_gamestates.pkl",
"../soclogsData_NoResources/DataTables/season1/League2-2012-06-17-19-58-07-+0100_gamestates.pkl"
]
# make directories to save the results
ohedata_dir = Path.cwd() / "OHEdata/season2"
ohedata_dir.mkdir(parents=True, exist_ok=True)
ohedata_dir = Path.cwd() / "OHEdata/season1"
ohedata_dir.mkdir(parents=True, exist_ok=True)
ohedata_dir = Path.cwd() / "OHEdata/pilot"
ohedata_dir.mkdir(parents=True, exist_ok=True)
print('Converting gamestates data to ohe-hot encoded vectors')
print('This might take a while. Please be patient...')
for file in datafiles:
# create a dir with the game name
# to save the 11 np arrays of the game
# with the data in ohe
filename_parts = re.split(r'[/]', file)
season = filename_parts[3]
dest = "./OHEdata/"+season
gamename = filename_parts[4][:-15] #exclude the _gamestates.pkl part :-)
path = dest+"/"+gamename
try:
os.mkdir(path)
except OSError :
print("Creation of the directory %s failed" %path)
gamestates = pd.read_pickle(file)
# replace None values with 'None' to work with np
gamestates.replace(to_replace=[None], value='None', inplace=True)
# initialize nptables
nplayout = | np.zeros((1,333)) | numpy.zeros |
import argparse
import numpy as np
if __name__ == '__main__':
try:
parser = argparse.ArgumentParser()
parser.add_argument("--file_name", help="input: set a path to the accuuracy file")
args = parser.parse_args()
epochs = 350
test_accuracy = np.zeros((epochs,10))
with open(args.file_name, 'r') as filehandle:
filecontents = filehandle.readlines()
index = 0
col = 0
for line in filecontents:
t_acc = line[:-1]
test_accuracy[index][col] = float(t_acc)
index += 1
if index == epochs:
index = 0
col += 1
if col == 10:
break
ave_accuracy = np.mean(test_accuracy, axis=1)
std_accuracy = | np.std(test_accuracy, axis=1) | numpy.std |
import matplotlib.pyplot as plt
import sys
import numpy as np
from numpy import exp, abs
DEL = 1 # 0 = not deleted
class GrahamsScan:
def __init__(self, points):
self.points = points
self.delete = | np.zeros(shape=(points.shape[0], 1)) | numpy.zeros |
import numpy as np
import matplotlib.pyplot as plt
import astropy.io.fits as fits
import os
import poppy
from .main import GeminiPrimary
# Classes for dealing with AO Telemetry sets
class GPI_Globals(object):
""" Container for same constants as gpilib's gpi_globals,
with same variable names to ease porting of code. Plus some
other variables as needed."""
gpi_tweet_n = 48
gpi_woof_n = 9
gpi_numacross=43.2
gpi_tweet_spacing = GeminiPrimary.primary_diameter/gpi_numacross
gpi_woof_spacing = GeminiPrimary.primary_diameter/gpi_numacross*5.5
# below ones are not in gpi_globals
pupil_center_subap = 23
pupil_center_tweeter=23.5
pupil_center_woofer=4
class DeformableMirror(poppy.AnalyticOpticalElement):
""" Generic deformable mirror, of the continuous face sheet variety"""
def __init__(self, shape=(10,10)):
poppy.OpticalElement.__init__(self, planetype=poppy.poppy_core._PUPIL)
self._shape = shape # number of actuators
self._surface = np.zeros(shape) # array for the DM surface WFE
self.numacross = shape[0] # number of actuators across diameter of
# the optic's cleared aperture (may be
# less than full diameter of array)
self.actuator_spacing = 1.0/self.numacross # distance between actuators,
# projected onto the primary
self.pupil_center = (shape[0]-1.)/2 # center of clear aperture in actuator units
# (may be offset from center of DM)
@property
def shape(self):
return self._shape
@property
def surface(self):
""" The surface shape of the deformable mirror, in
**meters** """
return self._surface
def set_surface(self, new_surface, units='nm'):
""" Set the entire surface shape of the DM.
Parameters
-------------
new_surface : 2d ndarray
Desired DM surface shape
(note that wavefront error will be 2x this)
units : string
Right now this *must* be 'nm' for nanometers,
which is the default. Other units may be added later
if needed.
"""
assert new_surface.shape == self.shape
if units!='nm':
raise NotImplementedError("Units other than nanometers not yet implemented.")
self._surface[:] = | np.asarray(new_surface, dtype=float) | numpy.asarray |
# Standard lib
import unittest
# 3rd party
import numpy as np
# Our own imports
from deep_hipsc_tracking import tracking
from .. import helpers
# Data
T1 = np.array([
[1.0, 2.0, 3.0, 4.0, 5.0],
[1.0, 2.0, 3.0, 4.0, 5.0],
]).T
T2 = np.array([
[1.1, 2.1, 3.1, 4.1],
[1.1, 2.1, 3.1, 4.1],
]).T
T3 = np.array([
[1.2, 2.2, 3.2, 4.2, 5.2],
[1.2, 2.2, 3.2, 4.2, 5.2],
]).T
T4 = np.array([
[1.3, 3.3, 4.3, 5.3],
[1.3, 3.3, 4.3, 5.3],
]).T
T5 = np.array([
[1.4, 2.4, 3.4, 5.4],
[1.4, 2.4, 3.4, 5.4],
]).T
TRACKS = [
(1, None, T1),
(2, None, T2),
(3, None, T3),
(4, None, T4),
(5, None, T5),
]
# Tests
class TestFindFlatRegions(unittest.TestCase):
def test_finds_flat_region_all_flat(self):
tt = np.linspace(0, 100, 100)
yy = tt * 2
res = tracking.find_flat_regions(tt, yy, interp_points=10, cutoff=10, noise_points=5)
exp = [np.ones((100, ), dtype=np.bool)]
msg = 'Got {} rois expected {}'.format(len(res), len(exp))
self.assertEqual(len(res), len(exp), msg)
for r, e in zip(res, exp):
np.testing.assert_almost_equal(r, e)
def test_finds_flat_region_all_spikey(self):
tt = np.linspace(0, 100, 100)
yy = np.array([-100, 0, 100] * 50)
res = tracking.find_flat_regions(tt, yy, interp_points=5, cutoff=1, noise_points=1)
exp = []
msg = 'Got {} rois expected {}'.format(len(res), len(exp))
self.assertEqual(len(res), len(exp), msg)
for r, e in zip(res, exp):
np.testing.assert_almost_equal(r, e)
def test_finds_flat_region_square_waves(self):
tt = np.linspace(0, 100, 100)
yy = np.array(([-100] * 10 + [100] * 10)*5)
res = tracking.find_flat_regions(tt, yy, interp_points=5, cutoff=1, noise_points=1)
exp = []
for i in range(0, 100, 10):
mask = np.zeros((100, ), dtype=np.bool)
if i == 0:
mask[i:i+8] = 1
elif i == 90:
mask[i+2:i+10] = 1
else:
mask[i+2:i+8] = 1
exp.append(mask)
msg = 'Got {} rois expected {}'.format(len(res), len(exp))
self.assertEqual(len(res), len(exp), msg)
for r, e in zip(res, exp):
np.testing.assert_almost_equal(r, e)
class TestRollingFuncs(unittest.TestCase):
def test_rolling_rolling_window(self):
xp = np.array([1, 2, 3, 4, 5])
exp = np.array([2, 3, 4])
res = np.mean(tracking.rolling_window(xp, window=3), axis=-1)
np.testing.assert_almost_equal(res, exp)
exp = np.array([1.5, 2.5, 3.5, 4.5])
res = np.mean(tracking.rolling_window(xp, window=2), axis=-1)
np.testing.assert_almost_equal(res, exp)
exp = np.array([1.3333, 2, 3, 4, 4.6666])
res = np.mean(tracking.rolling_window(xp, window=3, pad='same'), axis=-1)
np.testing.assert_almost_equal(res, exp, decimal=3)
def test_interpolate_window(self):
xp = np.array([1, 2, 3, 4, 5])
yp = np.array([5, 4, 3, 2, 1])
x = np.array([0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4, 5, 5.5])
y = np.array([5.5, 5, 4.5, 4, 3.5, 3, 2.5, 2, 1, 0.5])
res = tracking.rolling_interp(x, xp, yp, 3)
np.testing.assert_almost_equal(y, res)
def test_slope_window(self):
xp = np.array([1, 2, 3, 4, 5])
yp = np.array([5, 4, 3, 2, 1])
x = np.array([0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4, 5, 5.5])
a = np.array([-1, -1, -1, -1, -1, -1, -1, -1, -1, -1])
res = tracking.rolling_slope(x, xp, yp, 3)
np.testing.assert_almost_equal(a, res)
class TestMergePointsCluster(unittest.TestCase):
def test_merges_points_with_nans(self):
points1 = np.array([
[0.0, 0.1],
[1.0, 1.1],
[2.0, 2.1],
])
points2 = np.array([
[0.0, 0.1],
[1.0, np.nan],
[3.0, 3.1],
])
points = tracking.tracking.merge_points_cluster(points1, points2, max_dist=0.1)
exp_points = np.array([
[0.0, 0.1],
[1.0, 1.1],
[2.0, 2.1],
[3.0, 3.1],
])
np.testing.assert_almost_equal(points, exp_points)
def test_merges_same_set(self):
points1 = np.array([
[0.0, 0.1],
[1.0, 1.1],
[2.0, 2.1],
])
points = tracking.tracking.merge_points_cluster(points1, points1, max_dist=0.1)
np.testing.assert_almost_equal(points, points1)
def test_merges_both_different(self):
points1 = np.array([
[0.0, 0.1],
[1.0, 1.1],
[2.0, 2.1],
])
points2 = np.array([
[0.0, 0.1],
[1.0, 1.1],
[3.0, 3.1],
])
points = tracking.tracking.merge_points_cluster(points1, points2, max_dist=0.1)
exp_points = np.array([
[0.0, 0.1],
[1.0, 1.1],
[2.0, 2.1],
[3.0, 3.1],
])
np.testing.assert_almost_equal(points, exp_points)
def test_merges_superset_left(self):
points1 = np.array([
[0.0, 0.1],
[1.0, 1.1],
[2.0, 2.1],
[3.0, 3.1],
])
points2 = np.array([
[0.0, 0.1],
[1.0, 1.1],
[3.0, 3.1],
])
points = tracking.tracking.merge_points_cluster(points1, points2, max_dist=0.1)
exp_points = np.array([
[0.0, 0.1],
[1.0, 1.1],
[2.0, 2.1],
[3.0, 3.1],
])
np.testing.assert_almost_equal(points, exp_points)
def test_merges_superset_right(self):
points1 = np.array([
[0.0, 0.1],
[1.0, 1.1],
[3.0, 3.1],
])
points2 = np.array([
[0.0, 0.1],
[1.0, 1.1],
[2.0, 2.1],
[3.0, 3.1],
])
points = tracking.tracking.merge_points_cluster(points1, points2, max_dist=0.1)
exp_points = np.array([
[0.0, 0.1],
[1.0, 1.1],
[3.0, 3.1],
[2.0, 2.1],
])
np.testing.assert_almost_equal(points, exp_points)
def test_merges_superset_slight_motion(self):
points1 = np.array([
[0.0, 0.2],
[1.0, 1.2],
[2.0, 2.2],
[3.0, 3.2],
])
points2 = np.array([
[0.0, 0.1],
[1.0, 1.1],
[3.0, 3.1],
[4.0, 4.1],
])
points = tracking.tracking.merge_points_cluster(points1, points2, max_dist=0.2)
exp_points = np.array([
[0.0, 0.15],
[1.0, 1.15],
[2.0, 2.2],
[3.0, 3.15],
[4.0, 4.1],
])
np.testing.assert_almost_equal(points, exp_points)
class TestMergePointsPairwise(unittest.TestCase):
def test_merges_same_set(self):
points1 = np.array([
[0.0, 0.1],
[1.0, 1.1],
[2.0, 2.1],
])
points = tracking.tracking.merge_points_pairwise(points1, points1, max_dist=0.1)
np.testing.assert_almost_equal(points, points1)
def test_merges_both_different(self):
points1 = np.array([
[0.0, 0.1],
[1.0, 1.1],
[2.0, 2.1],
])
points2 = np.array([
[0.0, 0.1],
[1.0, 1.1],
[3.0, 3.1],
])
points = tracking.tracking.merge_points_pairwise(points1, points2, max_dist=0.1)
exp_points = np.array([
[0.0, 0.1],
[1.0, 1.1],
[2.0, 2.1],
[3.0, 3.1],
])
np.testing.assert_almost_equal(points, exp_points)
def test_merges_superset_left(self):
points1 = np.array([
[0.0, 0.1],
[1.0, 1.1],
[2.0, 2.1],
[3.0, 3.1],
])
points2 = np.array([
[0.0, 0.1],
[1.0, 1.1],
[3.0, 3.1],
])
points = tracking.tracking.merge_points_pairwise(points1, points2, max_dist=0.1)
exp_points = np.array([
[0.0, 0.1],
[1.0, 1.1],
[3.0, 3.1],
[2.0, 2.1],
])
np.testing.assert_almost_equal(points, exp_points)
def test_merges_superset_right(self):
points1 = np.array([
[0.0, 0.1],
[1.0, 1.1],
[3.0, 3.1],
])
points2 = np.array([
[0.0, 0.1],
[1.0, 1.1],
[2.0, 2.1],
[3.0, 3.1],
])
points = tracking.tracking.merge_points_pairwise(points1, points2, max_dist=0.1)
exp_points = np.array([
[0.0, 0.1],
[1.0, 1.1],
[3.0, 3.1],
[2.0, 2.1],
])
np.testing.assert_almost_equal(points, exp_points)
def test_merges_superset_slight_motion(self):
points1 = np.array([
[0.0, 0.2],
[1.0, 1.2],
[2.0, 2.2],
[3.0, 3.2],
])
points2 = np.array([
[0.0, 0.1],
[1.0, 1.1],
[3.0, 3.1],
[4.0, 4.1],
])
points = tracking.tracking.merge_points_pairwise(points1, points2, max_dist=0.2)
exp_points = np.array([
[0.0, 0.15],
[1.0, 1.15],
[3.0, 3.15],
[2.0, 2.2],
[4.0, 4.1],
])
np.testing.assert_almost_equal(points, exp_points)
class TestFindLinkFunctions(unittest.TestCase):
def test_finds_all_the_links(self):
res = tracking.find_link_functions()
exp = {'softassign', 'balltree', 'bipartite_match'}
self.assertEqual(set(res.keys()), exp)
class TestLinks(unittest.TestCase):
def test_to_padded_arrays(self):
tt = np.array([3, 5, 7, 9, 11])
xx = np.array([0, 1, 2, 3, 4])
yy = np.array([1, 2, 3, 4, 5])
chain = tracking.Link.from_arrays(tt, xx, yy)
nan = np.nan
in_tt = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
in_xx = np.array([nan, nan, 0, 0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4, nan])
in_yy = np.array([nan, nan, 1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5, 5, nan])
res_tt, res_xx, res_yy = chain.to_padded_arrays(min_t=1, max_t=13)
np.testing.assert_almost_equal(res_tt, in_tt)
np.testing.assert_almost_equal(res_xx, in_xx)
np.testing.assert_almost_equal(res_yy, in_yy)
in_tt = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
in_xx = np.array([0, 0, 0, 0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4, 4])
in_yy = np.array([1, 1, 1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5, 5, 5])
res_tt, res_xx, res_yy = chain.to_padded_arrays(min_t=1, max_t=13, extrapolate=True)
np.testing.assert_almost_equal(res_tt, in_tt)
np.testing.assert_almost_equal(res_xx, in_xx)
np.testing.assert_almost_equal(res_yy, in_yy)
def test_interpolate_chain_regular(self):
tt = | np.array([3, 5, 7, 9, 11]) | numpy.array |
import numpy as np
from numpy.polynomial import Chebyshev as Ch
from scipy.linalg import cho_factor, cho_solve
from scipy.optimize import minimize
import psoap
from psoap import constants as C
from psoap import matrix_functions
from psoap.data import lredshift
try:
import celerite
from celerite import terms
except ImportError:
print("If you want to use the fast 1D (SB1 or ST1 models), please install celerite")
try:
import george
from george import kernels
except ImportError:
print("If you want to use the fast GP solver (SB2, ST2, or ST3 models) please install george")
def predict_f(lwl_known, fl_known, sigma_known, lwl_predict, amp_f, l_f, mu_GP=1.0):
'''wl_known are known wavelengths.
wl_predict are the prediction wavelengths.
Assumes all inputs are 1D arrays.'''
# determine V11, V12, V21, and V22
M = len(lwl_known)
V11 = np.empty((M, M), dtype=np.float64)
matrix_functions.fill_V11_f(V11, lwl_known, amp_f, l_f)
# V11[np.diag_indices_from(V11)] += sigma_known**2
V11 = V11 + sigma_known**2 * np.eye(M)
N = len(wl_predict)
V12 = np.empty((M, N), dtype=np.float64)
matrix_functions.fill_V12_f(V12, lwl_known, lwl_predict, amp_f, l_f)
V22 = np.empty((N, N), dtype=np.float64)
# V22 is the covariance between the prediction wavelengths
# The routine to fill V11 is the same as V22
matrix_functions.fill_V11_f(V22, lwl_predict, amp_f, l_f)
# Find V11^{-1}
factor, flag = cho_factor(V11)
mu = mu_GP + np.dot(V12.T, cho_solve((factor, flag), (fl_known - mu_GP)))
Sigma = V22 - np.dot(V12.T, cho_solve((factor, flag), V12))
return (mu, Sigma)
def predict_python(wl_known, fl_known, sigma_known, wl_predict, amp_f, l_f, mu_GP=1.0):
'''wl_known are known wavelengths.
wl_predict are the prediction wavelengths.'''
# determine V11, V12, V21, and V22
V11 = get_V11(wl_known, sigma_known, amp_f, l_f)
# V12 is covariance between data wavelengths and prediction wavelengths
V12 = get_V12(wl_known, wl_predict, amp_f, l_f)
# V22 is the covariance between the prediction wavelengths
V22 = get_V22(wl_predict, amp_f, l_f)
# Find V11^{-1}
factor, flag = cho_factor(V11)
mu = mu_GP + np.dot(V12.T, cho_solve((factor, flag), (fl_known - mu_GP)))
Sigma = V22 - np.dot(V12.T, cho_solve((factor, flag), V12))
return (mu, Sigma)
def predict_f_g(lwl_f, lwl_g, fl_fg, sigma_fg, lwl_f_predict, lwl_g_predict, mu_f, amp_f, l_f, mu_g, amp_g, l_g, get_Sigma=True):
'''
Given that f + g is the flux that we're modeling, jointly predict the components.
'''
# Assert that wl_f and wl_g are the same length
assert len(lwl_f) == len(lwl_g), "Input wavelengths must be the same length."
n_pix = len(lwl_f)
assert len(lwl_f_predict) == len(lwl_g_predict), "Prediction wavelengths must be the same length."
n_pix_predict = len(lwl_f_predict)
# Convert mu constants into vectors
mu_f = mu_f * np.ones(n_pix_predict)
mu_g = mu_g * np.ones(n_pix_predict)
# Cat these into a single vector
mu_cat = np.hstack((mu_f, mu_g))
# Create the matrices for the input data
# print("allocating V11_f, V11_g", n_pix, n_pix)
V11_f = np.empty((n_pix, n_pix), dtype=np.float)
V11_g = np.empty((n_pix, n_pix), dtype=np.float)
# print("filling V11_f, V11_g", n_pix, n_pix)
matrix_functions.fill_V11_f(V11_f, lwl_f, amp_f, l_f)
matrix_functions.fill_V11_f(V11_g, lwl_g, amp_g, l_g)
B = V11_f + V11_g
B[np.diag_indices_from(B)] += sigma_fg**2
# print("factoring sum")
factor, flag = cho_factor(B)
# print("Allocating prediction matrices")
# Now create separate matrices for the prediction
V11_f_predict = np.empty((n_pix_predict, n_pix_predict), dtype=np.float)
V11_g_predict = np.empty((n_pix_predict, n_pix_predict), dtype=np.float)
# print("Filling prediction matrices")
matrix_functions.fill_V11_f(V11_f_predict, lwl_f_predict, amp_f, l_f)
matrix_functions.fill_V11_f(V11_g_predict, lwl_g_predict, amp_g, l_g)
zeros = np.zeros((n_pix_predict, n_pix_predict))
A = np.vstack((np.hstack([V11_f_predict, zeros]), np.hstack([zeros, V11_g_predict])))
# A[np.diag_indices_from(A)] += 1e-4 # Add a small nugget term
# C is now the cross-matrices between the predicted wavelengths and the data wavelengths
V12_f = np.empty((n_pix_predict, n_pix), dtype=np.float)
V12_g = np.empty((n_pix_predict, n_pix), dtype=np.float)
# print("Filling cross-matrices")
matrix_functions.fill_V12_f(V12_f, lwl_f_predict, lwl_f, amp_f, l_f)
matrix_functions.fill_V12_f(V12_g, lwl_g_predict, lwl_g, amp_g, l_g)
C = np.vstack((V12_f, V12_g))
# print("Sloving for mu, sigma")
# the 1.0 signifies that mu_f + mu_g = mu_fg = 1
mu = mu_cat + np.dot(C, cho_solve((factor, flag), fl_fg - 1.0))
if get_Sigma:
Sigma = A - np.dot(C, cho_solve((factor, flag), C.T))
return mu, Sigma
else:
return mu
def predict_f_g_sum(lwl_f, lwl_g, fl_fg, sigma_fg, lwl_f_predict, lwl_g_predict, mu_fg, amp_f, l_f, amp_g, l_g):
# Assert that wl_f and wl_g are the same length
assert len(lwl_f) == len(lwl_g), "Input wavelengths must be the same length."
M = len(lwl_f_predict)
N = len(lwl_f)
V11_f = np.empty((M, M), dtype=np.float)
V11_g = np.empty((M, M), dtype=np.float)
matrix_functions.fill_V11_f(V11_f, lwl_f_predict, amp_f, l_f)
matrix_functions.fill_V11_f(V11_g, lwl_g_predict, amp_g, l_g)
V11 = V11_f + V11_g
V11[np.diag_indices_from(V11)] += 1e-8
V12_f = np.empty((M, N), dtype=np.float64)
V12_g = np.empty((M, N), dtype=np.float64)
matrix_functions.fill_V12_f(V12_f, lwl_f_predict, lwl_f, amp_f, l_f)
matrix_functions.fill_V12_f(V12_g, lwl_g_predict, lwl_g, amp_g, l_g)
V12 = V12_f + V12_g
V22_f = np.empty((N,N), dtype=np.float)
V22_g = np.empty((N,N), dtype=np.float)
# It's a square matrix, so we can just reuse fill_V11_f
matrix_functions.fill_V11_f(V22_f, lwl_f, amp_f, l_f)
matrix_functions.fill_V11_f(V22_g, lwl_g, amp_g, l_g)
V22 = V22_f + V22_g
V22[np.diag_indices_from(V22)] += sigma_fg**2
factor, flag = cho_factor(V22)
mu = mu_fg + np.dot(V12, cho_solve((factor, flag), (fl_fg - 1.0)))
Sigma = V11 - np.dot(V12, cho_solve((factor, flag), V12.T))
return mu, Sigma
def predict_f_g_h(lwl_f, lwl_g, lwl_h, fl_fgh, sigma_fgh, lwl_f_predict, lwl_g_predict, lwl_h_predict, mu_f, mu_g, mu_h, amp_f, l_f, amp_g, l_g, amp_h, l_h):
'''
Given that f + g + h is the flux that we're modeling, jointly predict the components.
'''
# Assert that wl_f and wl_g are the same length
assert len(lwl_f) == len(lwl_g), "Input wavelengths must be the same length."
assert len(lwl_f) == len(lwl_h), "Input wavelengths must be the same length."
n_pix = len(lwl_f)
assert len(lwl_f_predict) == len(lwl_g_predict), "Prediction wavelengths must be the same length."
assert len(lwl_f_predict) == len(lwl_h_predict), "Prediction wavelengths must be the same length."
n_pix_predict = len(lwl_f_predict)
# Convert mu constants into vectors
mu_f = mu_f * np.ones(n_pix_predict)
mu_g = mu_g * np.ones(n_pix_predict)
mu_h = mu_h * np.ones(n_pix_predict)
# Cat these into a single vector
mu_cat = np.hstack((mu_f, mu_g, mu_h))
V11_f = np.empty((n_pix, n_pix), dtype=np.float)
V11_g = np.empty((n_pix, n_pix), dtype=np.float)
V11_h = np.empty((n_pix, n_pix), dtype=np.float)
matrix_functions.fill_V11_f(V11_f, lwl_f, amp_f, l_f)
matrix_functions.fill_V11_f(V11_g, lwl_g, amp_g, l_g)
matrix_functions.fill_V11_f(V11_h, lwl_h, amp_h, l_h)
B = V11_f + V11_g + V11_h
B[np.diag_indices_from(B)] += sigma_fgh**2
factor, flag = cho_factor(B)
# Now create separate matrices for the prediction
V11_f_predict = np.empty((n_pix_predict, n_pix_predict), dtype=np.float)
V11_g_predict = np.empty((n_pix_predict, n_pix_predict), dtype=np.float)
V11_h_predict = np.empty((n_pix_predict, n_pix_predict), dtype=np.float)
# Fill the prediction matrices
matrix_functions.fill_V11_f(V11_f_predict, lwl_f_predict, amp_f, l_f)
matrix_functions.fill_V11_f(V11_g_predict, lwl_g_predict, amp_g, l_g)
matrix_functions.fill_V11_f(V11_h_predict, lwl_h_predict, amp_h, l_h)
zeros = np.zeros((n_pix_predict, n_pix_predict))
A = np.vstack((np.hstack([V11_f_predict, zeros, zeros]), np.hstack([zeros, V11_g_predict, zeros]), np.hstack([zeros, zeros, V11_h_predict])))
V12_f = np.empty((n_pix_predict, n_pix), dtype=np.float)
V12_g = np.empty((n_pix_predict, n_pix), dtype=np.float)
V12_h = np.empty((n_pix_predict, n_pix), dtype=np.float)
matrix_functions.fill_V12_f(V12_f, lwl_f_predict, lwl_f, amp_f, l_f)
matrix_functions.fill_V12_f(V12_g, lwl_g_predict, lwl_g, amp_g, l_g)
matrix_functions.fill_V12_f(V12_h, lwl_h_predict, lwl_h, amp_h, l_h)
C = np.vstack((V12_f, V12_g, V12_h))
mu = mu_cat + np.dot(C, cho_solve((factor, flag), fl_fgh - 1.0))
Sigma = A - np.dot(C, cho_solve((factor, flag), C.T))
return mu, Sigma
def predict_f_g_h_sum(lwl_f, lwl_g, lwl_h, fl_fgh, sigma_fgh, lwl_f_predict, lwl_g_predict, lwl_h_predict, mu_fgh, amp_f, l_f, amp_g, l_g, amp_h, l_h):
'''
Given that f + g + h is the flux that we're modeling, predict the joint sum.
'''
# Assert that wl_f and wl_g are the same length
assert len(lwl_f) == len(lwl_g), "Input wavelengths must be the same length."
M = len(lwl_f_predict)
N = len(lwl_f)
V11_f = np.empty((M, M), dtype=np.float)
V11_g = np.empty((M, M), dtype=np.float)
V11_h = np.empty((M, M), dtype=np.float)
matrix_functions.fill_V11_f(V11_f, lwl_f_predict, amp_f, l_f)
matrix_functions.fill_V11_f(V11_g, lwl_g_predict, amp_g, l_g)
matrix_functions.fill_V11_f(V11_h, lwl_h_predict, amp_h, l_h)
V11 = V11_f + V11_g + V11_h
# V11[np.diag_indices_from(V11)] += 1e-5 # small nugget term
V12_f = np.empty((M, N), dtype=np.float64)
V12_g = np.empty((M, N), dtype=np.float64)
V12_h = | np.empty((M, N), dtype=np.float64) | numpy.empty |
''' Unit tests for utils
'''
import collections
import numpy as np
import nose.tools
import mir_eval
from mir_eval import util
def test_interpolate_intervals():
"""Check that an interval set is interpolated properly, with boundaries
conditions and out-of-range values.
"""
labels = list('abc')
intervals = np.array([(n, n + 1.0) for n in range(len(labels))])
time_points = [-1.0, 0.1, 0.9, 1.0, 2.3, 4.0]
expected_ans = ['N', 'a', 'a', 'b', 'c', 'N']
assert (util.interpolate_intervals(intervals, labels, time_points, 'N') ==
expected_ans)
def test_interpolate_intervals_gap():
"""Check that an interval set is interpolated properly, with gaps."""
labels = list('abc')
intervals = np.array([[0.5, 1.0], [1.5, 2.0], [2.5, 3.0]])
time_points = [0.0, 0.75, 1.25, 1.75, 2.25, 2.75, 3.5]
expected_ans = ['N', 'a', 'N', 'b', 'N', 'c', 'N']
assert (util.interpolate_intervals(intervals, labels, time_points, 'N') ==
expected_ans)
@nose.tools.raises(ValueError)
def test_interpolate_intervals_badtime():
"""Check that interpolate_intervals throws an exception if
input is unordered.
"""
labels = list('abc')
intervals = np.array([(n, n + 1.0) for n in range(len(labels))])
time_points = [-1.0, 0.1, 0.9, 0.8, 2.3, 4.0]
mir_eval.util.interpolate_intervals(intervals, labels, time_points)
def test_intervals_to_samples():
"""Check that an interval set is sampled properly, with boundaries
conditions and out-of-range values.
"""
labels = list('abc')
intervals = np.array([(n, n + 1.0) for n in range(len(labels))])
expected_times = [0.0, 0.5, 1.0, 1.5, 2.0, 2.5]
expected_labels = ['a', 'a', 'b', 'b', 'c', 'c']
result = util.intervals_to_samples(
intervals, labels, offset=0, sample_size=0.5, fill_value='N')
assert result[0] == expected_times
assert result[1] == expected_labels
expected_times = [0.25, 0.75, 1.25, 1.75, 2.25, 2.75]
expected_labels = ['a', 'a', 'b', 'b', 'c', 'c']
result = util.intervals_to_samples(
intervals, labels, offset=0.25, sample_size=0.5, fill_value='N')
assert result[0] == expected_times
assert result[1] == expected_labels
def test_intersect_files():
"""Check that two non-identical yield correct results.
"""
flist1 = ['/a/b/abc.lab', '/c/d/123.lab', '/e/f/xyz.lab']
flist2 = ['/g/h/xyz.npy', '/i/j/123.txt', '/k/l/456.lab']
sublist1, sublist2 = util.intersect_files(flist1, flist2)
assert sublist1 == ['/e/f/xyz.lab', '/c/d/123.lab']
assert sublist2 == ['/g/h/xyz.npy', '/i/j/123.txt']
sublist1, sublist2 = util.intersect_files(flist1[:1], flist2[:1])
assert sublist1 == []
assert sublist2 == []
def test_merge_labeled_intervals():
"""Check that two labeled interval sequences merge correctly.
"""
x_intvs = np.array([
[0.0, 0.44],
[0.44, 2.537],
[2.537, 4.511],
[4.511, 6.409]])
x_labels = ['A', 'B', 'C', 'D']
y_intvs = np.array([
[0.0, 0.464],
[0.464, 2.415],
[2.415, 4.737],
[4.737, 6.409]])
y_labels = [0, 1, 2, 3]
expected_intvs = [
[0.0, 0.44],
[0.44, 0.464],
[0.464, 2.415],
[2.415, 2.537],
[2.537, 4.511],
[4.511, 4.737],
[4.737, 6.409]]
expected_x_labels = ['A', 'B', 'B', 'B', 'C', 'D', 'D']
expected_y_labels = [0, 0, 1, 2, 2, 2, 3]
new_intvs, new_x_labels, new_y_labels = util.merge_labeled_intervals(
x_intvs, x_labels, y_intvs, y_labels)
assert new_x_labels == expected_x_labels
assert new_y_labels == expected_y_labels
assert new_intvs.tolist() == expected_intvs
# Check that invalid inputs raise a ValueError
y_intvs[-1, -1] = 10.0
nose.tools.assert_raises(ValueError, util.merge_labeled_intervals, x_intvs,
x_labels, y_intvs, y_labels)
def test_boundaries_to_intervals():
# Basic tests
boundaries = np.arange(10)
correct_intervals = np.array([np.arange(10 - 1), np.arange(1, 10)]).T
intervals = mir_eval.util.boundaries_to_intervals(boundaries)
assert np.all(intervals == correct_intervals)
def test_adjust_events():
# Test appending at the end
events = np.arange(1, 11)
labels = [str(n) for n in range(10)]
new_e, new_l = mir_eval.util.adjust_events(events, labels, 0.0, 11.)
assert new_e[0] == 0.
assert new_l[0] == '__T_MIN'
assert new_e[-1] == 11.
assert new_l[-1] == '__T_MAX'
assert np.all(new_e[1:-1] == events)
assert new_l[1:-1] == labels
# Test trimming
new_e, new_l = mir_eval.util.adjust_events(events, labels, 0.0, 9.)
assert new_e[0] == 0.
assert new_l[0] == '__T_MIN'
assert new_e[-1] == 9.
assert np.all(new_e[1:] == events[:-1])
assert new_l[1:] == labels[:-1]
def test_bipartite_match():
# This test constructs a graph as follows:
# v9 -- (u0)
# v8 -- (u0, u1)
# v7 -- (u0, u1, u2)
# ...
# v0 -- (u0, u1, ..., u9)
#
# This structure and ordering of this graph should force Hopcroft-Karp to
# hit each algorithm/layering phase
#
G = collections.defaultdict(list)
u_set = ['u{:d}'.format(_) for _ in range(10)]
v_set = ['v{:d}'.format(_) for _ in range(len(u_set)+1)]
for i, u in enumerate(u_set):
for v in v_set[:-i-1]:
G[v].append(u)
matching = util._bipartite_match(G)
# Make sure that each u vertex is matched
nose.tools.eq_(len(matching), len(u_set))
# Make sure that there are no duplicate keys
lhs = set([k for k in matching])
rhs = set([matching[k] for k in matching])
nose.tools.eq_(len(matching), len(lhs))
nose.tools.eq_(len(matching), len(rhs))
# Finally, make sure that all detected edges are present in G
for k in matching:
v = matching[k]
assert v in G[k] or k in G[v]
def test_outer_distance_mod_n():
ref = [1., 2., 3.]
est = [1.1, 6., 1.9, 5., 10.]
expected = np.array([
[0.1, 5., 0.9, 4., 3.],
[0.9, 4., 0.1, 3., 4.],
[1.9, 3., 1.1, 2., 5.]])
actual = mir_eval.util._outer_distance_mod_n(ref, est)
assert np.allclose(actual, expected)
ref = [13., 14., 15.]
est = [1.1, 6., 1.9, 5., 10.]
expected = np.array([
[0.1, 5., 0.9, 4., 3.],
[0.9, 4., 0.1, 3., 4.],
[1.9, 3., 1.1, 2., 5.]])
actual = mir_eval.util._outer_distance_mod_n(ref, est)
assert np.allclose(actual, expected)
def test_match_events():
ref = [1., 2., 3.]
est = [1.1, 6., 1.9, 5., 10.]
expected = [(0, 0), (1, 2)]
actual = mir_eval.util.match_events(ref, est, 0.5)
assert actual == expected
ref = [1., 2., 3., 11.9]
est = [1.1, 6., 1.9, 5., 10., 0.]
expected = [(0, 0), (1, 2), (3, 5)]
actual = mir_eval.util.match_events(
ref, est, 0.5, distance=mir_eval.util._outer_distance_mod_n)
assert actual == expected
def test_fast_hit_windows():
ref = [1., 2., 3.]
est = [1.1, 6., 1.9, 5., 10.]
ref_fast, est_fast = mir_eval.util._fast_hit_windows(ref, est, 0.5)
ref_slow, est_slow = np.where(np.abs(np.subtract.outer(ref, est)) <= 0.5)
assert np.all(ref_fast == ref_slow)
assert np.all(est_fast == est_slow)
def test_validate_intervals():
# Test for ValueError when interval shape is invalid
nose.tools.assert_raises(
ValueError, mir_eval.util.validate_intervals,
np.array([[1.], [2.5], [5.]]))
# Test for ValueError when times are negative
nose.tools.assert_raises(
ValueError, mir_eval.util.validate_intervals,
np.array([[1., -2.], [2.5, 3.], [5., 6.]]))
# Test for ValueError when duration is zero
nose.tools.assert_raises(
ValueError, mir_eval.util.validate_intervals,
np.array([[1., 2.], [2.5, 2.5], [5., 6.]]))
# Test for ValueError when duration is negative
nose.tools.assert_raises(
ValueError, mir_eval.util.validate_intervals,
np.array([[1., 2.], [2.5, 1.5], [5., 6.]]))
def test_validate_events():
# Test for ValueError when max_time is violated
nose.tools.assert_raises(
ValueError, mir_eval.util.validate_events, np.array([100., 100000.]))
# Test for ValueError when events aren't 1-d arrays
nose.tools.assert_raises(
ValueError, mir_eval.util.validate_events,
np.array([[1., 2.], [3., 4.]]))
# Test for ValueError when event times are not increasing
nose.tools.assert_raises(
ValueError, mir_eval.util.validate_events,
np.array([1., 2., 5., 3.]))
def test_validate_frequencies():
# Test for ValueError when max_freq is violated
nose.tools.assert_raises(
ValueError, mir_eval.util.validate_frequencies,
np.array([100., 100000.]), 5000., 20.)
# Test for ValueError when min_freq is violated
nose.tools.assert_raises(
ValueError, mir_eval.util.validate_frequencies,
np.array([2., 200.]), 5000., 20.)
# Test for ValueError when events aren't 1-d arrays
nose.tools.assert_raises(
ValueError, mir_eval.util.validate_frequencies,
np.array([[100., 200.], [300., 400.]]), 5000., 20.)
# Test for ValueError when allow_negatives is false and negative values
# are passed
nose.tools.assert_raises(
ValueError, mir_eval.util.validate_frequencies,
np.array([[-100., 200.], [300., 400.]]), 5000., 20.,
allow_negatives=False)
# Test for ValueError when max_freq is violated and allow_negatives=True
nose.tools.assert_raises(
ValueError, mir_eval.util.validate_frequencies,
np.array([100., -100000.]), 5000., 20., allow_negatives=True)
# Test for ValueError when min_freq is violated and allow_negatives=True
nose.tools.assert_raises(
ValueError, mir_eval.util.validate_frequencies,
np.array([-2., 200.]), 5000., 20., allow_negatives=True)
def test_has_kwargs():
def __test(target, f):
assert target == mir_eval.util.has_kwargs(f)
def f1(_):
return None
def f2(_=5):
return None
def f3(*_):
return None
def f4(_, **kw):
return None
def f5(_=5, **kw):
return None
yield __test, False, f1
yield __test, False, f2
yield __test, False, f3
yield __test, True, f4
yield __test, True, f5
def test_sort_labeled_intervals():
def __test_labeled(x, labels, x_true, lab_true):
xs, ls = mir_eval.util.sort_labeled_intervals(x, labels)
assert np.allclose(xs, x_true)
nose.tools.eq_(ls, lab_true)
def __test(x, x_true):
xs = mir_eval.util.sort_labeled_intervals(x)
assert | np.allclose(xs, x_true) | numpy.allclose |
# Common Python library imports
# Pip package imports
from loguru import logger
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Internal package imports
from soccer_fw.utils import listify, fulltime_result_tags
DEFAULT_ENGINE_NAME = "default"
DEFAULT_ENGINE_VERSION = "v0_1"
class StatisticModel():
def __init__(self, bankroll, *args, **kwargs):
self._initial_bankroll = bankroll
self._bankroll = bankroll
self._v_bankroll = bankroll
self._pending_stakes = pd.DataFrame()
self._win_streaks = np.array([0])
self._loose_streaks = np.array([0])
self._profit = 0
self._matches_won = 0
self._total_matches = 0
self._matches_played = 0
self._idx = None
self._stat_df = pd.DataFrame()
@property
def hitrate(self):
return self._matches_won / self._matches_played
@property
def roi(self):
return self._profit / self._stat_df['Stake'].sum()
@property
def dataframe(self):
if self._idx is not None:
return self._stat_df.set_index(self._idx)
return self._stat_df
@property
def bankroll(self):
return self._bankroll
@property
def profit(self):
return self._profit
@property
def loose_streak(self):
return self._loose_streaks
@property
def win_streak(self):
return self._win_streaks
def plot(self):
# gca stands for 'get current axis'
ax = plt.gca()
df = pd.concat([self.dataframe, pd.DataFrame({ 'Win Streak': self.win_streak, 'Lose Streak': self.loose_streak})], axis=1)
ls_max = df['Lose Streak'].max()
b_maximum = df['Bankroll'].max() * 0.7 / ls_max
df['Win Streak'] = df['Win Streak'] * b_maximum
df['Lose Streak'] = df['Lose Streak'] * b_maximum
df.plot(kind='line', y='Bankroll', color='blue', ax=ax)
df.plot(kind='bar', y='Stake', color='green', ax=ax)
df.plot(kind='bar', y='Profit', color='yellow', ax=ax)
df.plot(kind='bar', y='Lose Streak', color='red', ax=ax)
df.plot(kind='bar', y='Win Streak', color='grey', ax=ax)
plt.show()
def place_bet(self, stake, odd, win):
"""
Place a bet. The stake will be substracted from the bankroll and added to the pending bets.
Pending bet's has to be evaluated by the eval_bet call. The function return with the bet index
:param stake: Amount money to bet
:return: Index of the bet
"""
assert (self._bankroll - stake > 0), "The bankroll cannot go to negative"
self._pending_stakes = self._pending_stakes.append({ 'Stake': stake, 'Odd': odd, 'Win': win}, ignore_index=True)
self._bankroll -= stake
return len(self._pending_stakes.index) - 1
def eval_bet(self, index, key=None):
assert key is None or isinstance(key, tuple), "Key has to a tuple or None"
index_row = self._pending_stakes[self._pending_stakes.index == index]
pending_stake = self._pending_stakes[self._pending_stakes.index > index]['Stake'].sum()
stake = float(index_row['Stake'])
odd = float(index_row['Odd'])
win = float(index_row['Win'])
won_amount = 0
# Modify the statistic only when the match was played
if stake > 0:
self._matches_played += 1
if win > 0.0:
won_amount = float(stake) * float(odd)
self._bankroll += won_amount
# Store statistics
self._profit += (won_amount - stake)
self._win_streaks = np.append(self._win_streaks, self._win_streaks[-1] + 1)
self._loose_streaks = np.append(self._loose_streaks, 0)
self._matches_won += 1
else:
self._profit -= stake
# Store statistics
self._loose_streaks = | np.append(self._loose_streaks, self._loose_streaks[-1] + 1) | numpy.append |
"""Topology optimization problem to solve."""
import abc
import numpy
import scipy.sparse
import scipy.sparse.linalg
import cvxopt
import cvxopt.cholmod
from .boundary_conditions import BoundaryConditions
from .utils import deleterowcol
class Problem(abc.ABC):
"""
Abstract topology optimization problem.
Attributes
----------
bc: BoundaryConditions
The boundary conditions for the problem.
penalty: float
The SIMP penalty value.
f: numpy.ndarray
The right-hand side of the FEM equation (forces).
u: numpy.ndarray
The variables of the FEM equation.
obje: numpy.ndarray
The per element objective values.
"""
def __init__(self, bc: BoundaryConditions, penalty: float):
"""
Create the topology optimization problem.
Parameters
----------
bc:
The boundary conditions of the problem.
penalty:
The penalty value used to penalize fractional densities in SIMP.
"""
# Problem size
self.nelx = bc.nelx
self.nely = bc.nely
self.nel = self.nelx * self.nely
# Count degrees of fredom
self.ndof = 2 * (self.nelx + 1) * (self.nely + 1)
# SIMP penalty
self.penalty = penalty
# BC's and support (half MBB-beam)
self.bc = bc
dofs = numpy.arange(self.ndof)
self.fixed = bc.fixed_nodes
self.free = numpy.setdiff1d(dofs, self.fixed)
# RHS and Solution vectors
self.f = bc.forces
self.u = numpy.zeros(self.f.shape)
# Per element objective
self.obje = numpy.zeros(self.nely * self.nelx)
def __str__(self) -> str:
"""Create a string representation of the problem."""
return self.__class__.__name__
def __format__(self, format_spec) -> str:
"""Create a formated representation of the problem."""
return str(self)
def __repr__(self) -> str:
"""Create a representation of the problem."""
return "{}(bc={!r}, penalty={:g})".format(
self.__class__.__name__, self.penalty, self.bc)
def penalize_densities(self, x: numpy.ndarray, drho: numpy.ndarray = None
) -> numpy.ndarray:
"""
Compute the penalized densties (and optionally its derivative).
Parameters
----------
x:
The density variables to penalize.
drho:
The derivative of the penealized densities to compute. Only set if
drho is not None.
Returns
-------
numpy.ndarray
The penalized densities used for SIMP.
"""
rho = x**self.penalty
if drho is not None:
assert(drho.shape == x.shape)
drho[:] = rho
valid = x != 0 # valid values for division
drho[valid] *= self.penalty / x[valid]
return rho
@abc.abstractmethod
def compute_objective(
self, xPhys: numpy.ndarray, dobj: numpy.ndarray) -> float:
"""
Compute objective and its gradient.
Parameters
----------
xPhys:
The design variables.
dobj:
The gradient of the objective to compute.
Returns
-------
float
The objective value.
"""
pass
class ElasticityProblem(Problem):
"""
Abstract elasticity topology optimization problem.
Attributes
----------
Emin: float
The Young's modulus use for the void regions.
Emax: float
The Young's modulus use for the solid regions.
nu: float
Poisson's ratio of the material.
f: numpy.ndarray
The right-hand side of the FEM equation (forces).
u: numpy.ndarray
The variables of the FEM equation (displacments).
nloads: int
The number of loads applied to the material.
"""
@staticmethod
def lk(E: float = 1.0, nu: float = 0.3) -> numpy.ndarray:
"""
Build the element stiffness matrix.
Parameters
----------
E:
The Young's modulus of the material.
nu:
The Poisson's ratio of the material.
Returns
-------
numpy.ndarray
The element stiffness matrix for the material.
"""
k = numpy.array([
0.5 - nu / 6., 0.125 + nu / 8., -0.25 - nu / 12.,
-0.125 + 0.375 * nu, -0.25 + nu / 12., -0.125 - nu / 8., nu / 6.,
0.125 - 0.375 * nu])
KE = E / (1 - nu**2) * numpy.array([
[k[0], k[1], k[2], k[3], k[4], k[5], k[6], k[7]],
[k[1], k[0], k[7], k[6], k[5], k[4], k[3], k[2]],
[k[2], k[7], k[0], k[5], k[6], k[3], k[4], k[1]],
[k[3], k[6], k[5], k[0], k[7], k[2], k[1], k[4]],
[k[4], k[5], k[6], k[7], k[0], k[1], k[2], k[3]],
[k[5], k[4], k[3], k[2], k[1], k[0], k[7], k[6]],
[k[6], k[3], k[4], k[1], k[2], k[7], k[0], k[5]],
[k[7], k[2], k[1], k[4], k[3], k[6], k[5], k[0]]])
return KE
def __init__(self, bc: BoundaryConditions, penalty: float):
"""
Create the topology optimization problem.
Parameters
----------
bc:
The boundary conditions of the problem.
penalty:
The penalty value used to penalize fractional densities in SIMP.
"""
super().__init__(bc, penalty)
# Max and min stiffness
self.Emin = 1e-9
self.Emax = 1.0
# FE: Build the index vectors for the for coo matrix format.
self.nu = 0.3
self.build_indices()
# BC's and support (half MBB-beam)
self.bc = bc
dofs = numpy.arange(self.ndof)
self.fixed = bc.fixed_nodes
self.free = numpy.setdiff1d(dofs, self.fixed)
# Number of loads
self.nloads = self.f.shape[1]
def build_indices(self) -> None:
"""Build the index vectors for the finite element coo matrix format."""
self.KE = self.lk(E=self.Emax, nu=self.nu)
self.edofMat = numpy.zeros((self.nelx * self.nely, 8), dtype=int)
for elx in range(self.nelx):
for ely in range(self.nely):
el = ely + elx * self.nely
n1 = (self.nely + 1) * elx + ely
n2 = (self.nely + 1) * (elx + 1) + ely
self.edofMat[el, :] = numpy.array([
2 * n1 + 2, 2 * n1 + 3, 2 * n2 + 2, 2 * n2 + 3, 2 * n2,
2 * n2 + 1, 2 * n1, 2 * n1 + 1])
# Construct the index pointers for the coo format
self.iK = numpy.kron(self.edofMat, numpy.ones((8, 1))).flatten()
self.jK = numpy.kron(self.edofMat, numpy.ones((1, 8))).flatten()
def compute_young_moduli(self, x: numpy.ndarray, dE: numpy.ndarray = None
) -> numpy.ndarray:
"""
Compute the Young's modulus of each element from the densties.
Optionally compute the derivative of the Young's modulus.
Parameters
----------
x:
The density variable of each element.
dE:
The derivative of Young's moduli to compute. Only set if dE is not
None.
Returns
-------
numpy.ndarray
The elements' Young's modulus.
"""
drho = None if dE is None else numpy.empty(x.shape)
rho = self.penalize_densities(x, drho)
if drho is not None and dE is not None:
assert(dE.shape == x.shape)
dE[:] = (self.Emax - self.Emin) * drho
return (self.Emax - self.Emin) * rho + self.Emin
def build_K(self, xPhys: numpy.ndarray, remove_constrained: bool = True
) -> scipy.sparse.coo_matrix:
"""
Build the stiffness matrix for the problem.
Parameters
----------
xPhys:
The element densisities used to build the stiffness matrix.
remove_constrained:
Should the constrained nodes be removed?
Returns
-------
scipy.sparse.coo_matrix
The stiffness matrix for the mesh.
"""
sK = ((self.KE.flatten()[numpy.newaxis]).T *
self.compute_young_moduli(xPhys)).flatten(order='F')
K = scipy.sparse.coo_matrix(
(sK, (self.iK, self.jK)), shape=(self.ndof, self.ndof))
if remove_constrained:
# Remove constrained dofs from matrix and convert to coo
K = deleterowcol(K.tocsc(), self.fixed, self.fixed).tocoo()
return K
def compute_displacements(self, xPhys: numpy.ndarray) -> numpy.ndarray:
"""
Compute the displacements given the densities.
Compute the displacment, :math:`u`, using linear elastic finite
element analysis (solving :math:`Ku = f` where :math:`K` is the
stiffness matrix and :math:`f` is the force vector).
Parameters
----------
xPhys:
The element densisities used to build the stiffness matrix.
Returns
-------
numpy.ndarray
The distplacements solve using linear elastic finite element
analysis.
"""
# Setup and solve FE problem
K = self.build_K(xPhys)
K = cvxopt.spmatrix(
K.data, K.row.astype(numpy.int), K.col.astype(numpy.int))
# Solve system
F = cvxopt.matrix(self.f[self.free, :])
cvxopt.cholmod.linsolve(K, F) # F stores solution after solve
new_u = self.u.copy()
new_u[self.free, :] = numpy.array(F)[:, :]
return new_u
def update_displacements(self, xPhys: numpy.ndarray) -> None:
"""
Update the displacements of the problem.
Parameters
----------
xPhys:
The element densisities used to compute the displacements.
"""
self.u[:, :] = self.compute_displacements(xPhys)
class ComplianceProblem(ElasticityProblem):
r"""
Topology optimization problem to minimize compliance.
:math:`\begin{aligned}
\min_{\boldsymbol{\rho}} \quad & \mathbf{f}^T\mathbf{u}\\
\textrm{subject to}: \quad & \mathbf{K}\mathbf{u} = \mathbf{f}\\
& \sum_{e=1}^N v_e\rho_e \leq V_\text{frac},
\quad 0 < \rho_\min \leq \rho_e \leq 1\\
\end{aligned}`
where :math:`\mathbf{f}` are the forces, :math:`\mathbf{u}` are the \
displacements, :math:`\mathbf{K}` is the striffness matrix, and :math:`V`
is the volume.
"""
def compute_objective(
self, xPhys: numpy.ndarray, dobj: numpy.ndarray) -> float:
r"""
Compute compliance and its gradient.
The objective is :math:`\mathbf{f}^{T} \mathbf{u}`. The gradient of
the objective is
:math:`\begin{align}
\mathbf{f}^T\mathbf{u} &= \mathbf{f}^T\mathbf{u} -
\boldsymbol{\lambda}^T(\mathbf{K}\mathbf{u} - \mathbf{f})\\
\frac{\partial}{\partial \rho_e}(\mathbf{f}^T\mathbf{u}) &=
(\mathbf{K}\boldsymbol{\lambda} - \mathbf{f})^T
\frac{\partial \mathbf u}{\partial \rho_e} +
\boldsymbol{\lambda}^T\frac{\partial \mathbf K}{\partial \rho_e}
\mathbf{u}
= \mathbf{u}^T\frac{\partial \mathbf K}{\partial \rho_e}\mathbf{u}
\end{align}`
where :math:`\boldsymbol{\lambda} = \mathbf{u}`.
Parameters
----------
xPhys:
The element densities.
dobj:
The gradient of compliance.
Returns
-------
float
The compliance value.
"""
# Setup and solve FE problem
self.update_displacements(xPhys)
obj = 0.0
dobj[:] = 0.0
dE = numpy.empty(xPhys.shape)
E = self.compute_young_moduli(xPhys, dE)
for i in range(self.nloads):
ui = self.u[:, i][self.edofMat].reshape(-1, 8)
self.obje[:] = (ui @ self.KE * ui).sum(1)
obj += (E * self.obje).sum()
dobj[:] += -dE * self.obje
dobj /= float(self.nloads)
return obj / float(self.nloads)
class HarmonicLoadsProblem(ElasticityProblem):
r"""
Topology optimization problem to minimize dynamic compliance.
Replaces standard forces with undamped forced vibrations.
:math:`\begin{aligned}
\min_{\boldsymbol{\rho}} \quad & \mathbf{f}^T\mathbf{u}\\
\textrm{subject to}: \quad & \mathbf{S}\mathbf{u} = \mathbf{f}\\
& \sum_{e=1}^N v_e\rho_e \leq V_\text{frac},
\quad 0 < \rho_\min \leq \rho_e \leq 1\\
\end{aligned}`
where :math:`\mathbf{f}` is the amplitude of the load, :math:`\mathbf{u}`
is the amplitude of vibration, and :math:`\mathbf{S}` is the system matrix
(or "dynamic striffness" matrix) defined as
:math:`\begin{aligned}
\mathbf{S} = \mathbf{K} - \omega^2\mathbf{M}
\end{aligned}`
where :math:`\omega` is the angular frequency of the load, and
:math:`\mathbf{M}` is the global mass matrix.
"""
@staticmethod
def lm(nel: int) -> numpy.ndarray:
r"""
Build the element mass matrix.
:math:`M = \frac{1}{9 \times 4n}\begin{bmatrix}
4 & 0 & 2 & 0 & 1 & 0 & 2 & 0 \\
0 & 4 & 0 & 2 & 0 & 1 & 0 & 2 \\
2 & 0 & 4 & 0 & 2 & 0 & 1 & 0 \\
0 & 2 & 0 & 4 & 0 & 2 & 0 & 1 \\
1 & 0 & 2 & 0 & 4 & 0 & 2 & 0 \\
0 & 1 & 0 & 2 & 0 & 4 & 0 & 2 \\
2 & 0 & 1 & 0 & 2 & 0 & 4 & 0 \\
0 & 2 & 0 & 1 & 0 & 2 & 0 & 4
\end{bmatrix}`
Where :math:`n` is the total number of elements. The total mass is
equal to unity.
Parameters
----------
nel:
The total number of elements.
Returns
-------
numpy.ndarray
The element mass matrix for the material.
"""
return numpy.array([
[4, 0, 2, 0, 1, 0, 2, 0],
[0, 4, 0, 2, 0, 1, 0, 2],
[2, 0, 4, 0, 2, 0, 1, 0],
[0, 2, 0, 4, 0, 2, 0, 1],
[1, 0, 2, 0, 4, 0, 2, 0],
[0, 1, 0, 2, 0, 4, 0, 2],
[2, 0, 1, 0, 2, 0, 4, 0],
[0, 2, 0, 1, 0, 2, 0, 4]], dtype=float) / (36 * nel)
def __init__(self, bc: BoundaryConditions, penalty: float):
"""
Create the topology optimization problem.
Parameters
----------
bc:
The boundary conditions of the problem.
penalty:
The penalty value used to penalize fractional densities in SIMP.
"""
super().__init__(bc, penalty)
self.angular_frequency = 0e-2
def build_indices(self) -> None:
"""Build the index vectors for the finite element coo matrix format."""
super().build_indices()
self.ME = self.lm(self.nel)
def build_M(self, xPhys: numpy.ndarray, remove_constrained: bool = True
) -> scipy.sparse.coo_matrix:
"""
Build the stiffness matrix for the problem.
Parameters
----------
xPhys:
The element densisities used to build the stiffness matrix.
remove_constrained:
Should the constrained nodes be removed?
Returns
-------
scipy.sparse.coo_matrix
The stiffness matrix for the mesh.
"""
# vals = numpy.tile(self.ME.flatten(), xPhys.size)
vals = (self.ME.reshape(-1, 1) *
self.penalize_densities(xPhys)).flatten(order='F')
M = scipy.sparse.coo_matrix((vals, (self.iK, self.jK)),
shape=(self.ndof, self.ndof))
if remove_constrained:
# Remove constrained dofs from matrix and convert to coo
M = deleterowcol(M.tocsc(), self.fixed, self.fixed).tocoo()
return M
def compute_displacements(self, xPhys: numpy.ndarray) -> numpy.ndarray:
r"""
Compute the amplitude of vibration given the densities.
Compute the amplitude of vibration, :math:`\mathbf{u}`, using linear
elastic finite element analysis (solving
:math:`\mathbf{S}\mathbf{u} = \mathbf{f}` where :math:`\mathbf{S} =
\mathbf{K} - \omega^2\mathbf{M}` is the system matrix and
:math:`\mathbf{f}` is the force vector).
Parameters
----------
xPhys:
The element densisities used to build the stiffness matrix.
Returns
-------
numpy.ndarray
The displacements solve using linear elastic finite element
analysis.
"""
# Setup and solve FE problem
K = self.build_K(xPhys)
M = self.build_M(xPhys)
S = (K - self.angular_frequency**2 * M).tocoo()
cvxopt_S = cvxopt.spmatrix(
S.data, S.row.astype(numpy.int), S.col.astype(numpy.int))
# Solve system
F = cvxopt.matrix(self.f[self.free, :])
try:
# F stores solution after solve
cvxopt.cholmod.linsolve(cvxopt_S, F)
except Exception:
F = scipy.sparse.linalg.spsolve(S.tocsc(), self.f[self.free, :])
F = F.reshape(-1, self.nloads)
new_u = self.u.copy()
new_u[self.free, :] = numpy.array(F)[:, :]
return new_u
def compute_objective(
self, xPhys: numpy.ndarray, dobj: numpy.ndarray) -> float:
r"""
Compute compliance and its gradient.
The objective is :math:`\mathbf{f}^{T} \mathbf{u}`. The gradient of
the objective is
:math:`\begin{align}
\mathbf{f}^T\mathbf{u} &= \mathbf{f}^T\mathbf{u} -
\boldsymbol{\lambda}^T(\mathbf{K}\mathbf{u} - \mathbf{f})\\
\frac{\partial}{\partial \rho_e}(\mathbf{f}^T\mathbf{u}) &=
(\mathbf{K}\boldsymbol{\lambda} - \mathbf{f})^T
\frac{\partial \mathbf u}{\partial \rho_e} +
\boldsymbol{\lambda}^T\frac{\partial \mathbf K}{\partial \rho_e}
\mathbf{u}
= \mathbf{u}^T\frac{\partial \mathbf K}{\partial \rho_e}\mathbf{u}
\end{align}`
where :math:`\boldsymbol{\lambda} = \mathbf{u}`.
Parameters
----------
xPhys:
The element densities.
dobj:
The gradient of compliance.
Returns
-------
float
The compliance value.
"""
# Setup and solve FE problem
self.update_displacements(xPhys)
obj = 0.0
dobj[:] = 0.0
dE = numpy.empty(xPhys.shape)
E = self.compute_young_moduli(xPhys, dE)
drho = numpy.empty(xPhys.shape)
penalty = self.penalty
self.penalty = 2
rho = self.penalize_densities(xPhys, drho)
self.penalty = penalty
for i in range(self.nloads):
ui = self.u[:, i][self.edofMat].reshape(-1, 8)
obje1 = (ui @ self.KE * ui).sum(1)
obje2 = (ui @ (-self.angular_frequency**2 * self.ME) * ui).sum(1)
self.obje[:] = obje1 + obje2
obj += (E * obje1 + rho * obje2).sum()
dobj[:] += -(dE * obje1 + drho * obje2)
dobj /= float(self.nloads)
return obj / float(self.nloads)
class VonMisesStressProblem(ElasticityProblem):
"""
Topology optimization problem to minimize stress.
Todo:
* Currently this problem minimizes compliance and computes stress on
the side. This needs to be replaced to match the promise of
minimizing stress.
"""
@staticmethod
def B(side: float) -> numpy.ndarray:
r"""
Construct a strain-displacement matrix for a 2D regular grid.
:math:`B = \frac{1}{2s}\begin{bmatrix}
1 & 0 & -1 & 0 & -1 & 0 & 1 & 0 \\
0 & 1 & 0 & 1 & 0 & -1 & 0 & -1 \\
1 & 1 & 1 & -1 & -1 & -1 & -1 & 1
\end{bmatrix}`
where :math:`s` is the side length of the square elements.
Todo:
* Check that this is not -B
Parameters
----------
side:
The side length of the square elements.
Returns
-------
numpy.ndarray
The strain-displacement matrix for a 2D regular grid.
"""
n = -0.5 / side
p = 0.5 / side
return numpy.array([[p, 0, n, 0, n, 0, p, 0],
[0, p, 0, p, 0, n, 0, n],
[p, p, p, n, n, n, n, p]])
@staticmethod
def E(nu):
r"""
Construct a constitutive matrix for a 2D regular grid.
:math:`E = \frac{1}{1 - \nu^2}\begin{bmatrix}
1 & \nu & 0 \\
\nu & 1 & 0 \\
0 & 0 & \frac{1 - \nu}{2}
\end{bmatrix}`
Parameters
----------
nu:
The Poisson's ratio of the material.
Returns
-------
numpy.ndarray
The constitutive matrix for a 2D regular grid.
"""
return numpy.array([[1, nu, 0],
[nu, 1, 0],
[0, 0, (1 - nu) / 2.]]) / (1 - nu**2)
def __init__(self, nelx, nely, penalty, bc, side=1):
super().__init__(bc, penalty)
self.EB = self.E(self.nu) @ self.B(side)
self.du = numpy.zeros((self.ndof, self.nel * self.nloads))
self.stress = numpy.zeros(self.nel)
self.dstress = numpy.zeros(self.nel)
def build_dK0(self, drho_xi, i, remove_constrained=True):
sK = ((self.KE.flatten()[numpy.newaxis]).T * drho_xi).flatten(
order='F')
iK = self.iK[64 * i: 64 * i + 64]
jK = self.jK[64 * i: 64 * i + 64]
dK = scipy.sparse.coo_matrix(
(sK, (iK, jK)), shape=(self.ndof, self.ndof))
# Remove constrained dofs from matrix and convert to coo
if remove_constrained:
dK = deleterowcol(dK.tocsc(), self.fixed, self.fixed).tocoo()
return dK
def build_dK(self, xPhys, remove_constrained=True):
drho = numpy.empty(xPhys.shape)
self.compute_young_moduli(xPhys, drho)
blocks = [self.build_dK0(drho[i], i, remove_constrained)
for i in range(drho.shape[0])]
dK = scipy.sparse.block_diag(blocks, format="coo")
return dK
@staticmethod
def sigma_pow(s11: numpy.ndarray, s22: numpy.ndarray, s12: numpy.ndarray,
p: float) -> numpy.ndarray:
r"""
Compute the von Mises stress raised to the :math:`p^{\text{th}}` power.
:math:`\sigma^p = \left(\sqrt{\sigma_{11}^2 - \sigma_{11}\sigma_{22} +
\sigma_{22}^2 + 3\sigma_{12}^2}\right)^p`
Todo:
* Properly document what the sigma variables represent.
* Rename the sigma variables to something more readable.
Parameters
----------
s11:
:math:`\sigma_{11}`
s22:
:math:`\sigma_{22}`
s12:
:math:`\sigma_{12}`
p:
The power (:math:`p`) to raise the von Mises stress.
Returns
-------
numpy.ndarray
The von Mises stress to the :math:`p^{\text{th}}` power.
"""
return | numpy.sqrt(s11**2 - s11 * s22 + s22**2 + 3 * s12**2) | numpy.sqrt |
# -*- coding: utf-8 -*-
"""
Orbital functions
-----------------
Functions used within multiple orbital classes in Stone Soup
"""
import numpy as np
from . import dotproduct
from ..types.array import StateVector
def stumpff_s(z):
r"""The Stumpff S function
.. math::
S(z) = \begin{cases}\frac{\sqrt(z) - \sin{\sqrt(z)}}{(\sqrt(z))^{3}}, & (z > 0)\\
\frac{\sinh(\sqrt(-z)) - \sqrt(-z)}{(\sqrt(-z))^{3}}, & (z < 0) \\
\frac{1}{6}, & (z = 0)\end{cases}
Parameters
----------
z : float
input parameter, :math:`z`
Returns
-------
: float
Output value, :math:`S(z)`
"""
if z > 0:
sqz = np.sqrt(z)
return (sqz - np.sin(sqz)) / sqz ** 3
elif z < 0:
sqz = np.sqrt(-z)
return (np.sinh(sqz) - sqz) / sqz ** 3
else: # which means z== 0:
return 1 / 6
def stumpff_c(z):
r"""The Stumpff C function
.. math::
C(z) = \begin{cases}\frac{1 - \cos{\sqrt(z)}}{z}, & (z > 0)\\
\frac{\cosh{\sqrt(-z)} - 1}{-z}, & (z < 0) \\
\frac{1}{2}, & (z = 0)\end{cases}
Parameters
----------
z : float
input parameter, :math:`z`
Returns
-------
: float
Output value, :math:`C(z)`
"""
if z > 0:
sqz = np.sqrt(z)
return (1 - np.cos(sqz)) / sqz ** 2
elif z < 0:
sqz = np.sqrt(-z)
return (np.cosh(sqz) - 1) / sqz ** 2
else: # which means z == 0:
return 1 / 2
def universal_anomaly_newton(o_state_vector, delta_t,
grav_parameter=3.986004418e14, precision=1e-8, max_iterations=1e5):
r"""Calculate the universal anomaly via Newton's method. Algorithm 3.3 in [1]_.
Parameters
----------
o_state_vector : :class:`~StateVector`
The orbital state vector formed as
:math:`[r_x, r_y, r_z, \dot{r}_x, \dot{r}_y, \dot{r}_z]^T`
delta_t : timedelta
The time interval over which to estimate the universal anomaly
grav_parameter : float, optional
The universal gravitational parameter. Defaults to that of the
Earth, :math:`3.986004418 \times 10^{14} \ \mathrm{m}^{3} \
\mathrm{s}^{-2}`
precision : float, optional
For Newton's method, the difference between new and old estimates of the universal anomaly
below which the iteration stops and the answer is returned, (default = 1e-8)
max_iterations : float, optional
Maximum number of iterations allowed in while loop (default = 1e5)
Returns
-------
: float
The universal anomaly, :math:`\chi`
References
----------
.. [1] <NAME>. 2010, Orbital Mechanics for Engineering Students, 3rd Ed., Elsevier
"""
# For convenience
mag_r_0 = np.sqrt(dotproduct(o_state_vector[0:3], o_state_vector[0:3]))
mag_v_0 = np.sqrt(dotproduct(o_state_vector[3:6], o_state_vector[3:6]))
v_rad_0 = dotproduct(o_state_vector[3:6], o_state_vector[0:3])/mag_r_0
root_mu = np.sqrt(grav_parameter)
inv_sma = 2/mag_r_0 - (mag_v_0**2)/grav_parameter
# Initial estimate of Chi
chi_i = root_mu * np.abs(inv_sma) * delta_t.total_seconds()
ratio = 1
count = 0
# Do Newton's method
while np.abs(ratio) > precision and count <= max_iterations:
z_i = inv_sma * chi_i ** 2
f_chi_i = mag_r_0 * v_rad_0 / root_mu * chi_i ** 2 * \
stumpff_c(z_i) + (1 - inv_sma * mag_r_0) * chi_i ** 3 * \
stumpff_s(z_i) + mag_r_0 * chi_i - root_mu * \
delta_t.total_seconds()
fp_chi_i = mag_r_0 * v_rad_0 / root_mu * chi_i * \
(1 - inv_sma * chi_i ** 2 * stumpff_s(z_i)) + \
(1 - inv_sma * mag_r_0) * chi_i ** 2 * stumpff_c(z_i) + \
mag_r_0
ratio = f_chi_i / fp_chi_i
chi_i = chi_i - ratio
count += 1
return chi_i
def lagrange_coefficients_from_universal_anomaly(o_state_vector, delta_t,
grav_parameter=3.986004418e14,
precision=1e-8, max_iterations=1e5):
r""" Calculate the Lagrangian coefficients, f and g, and their time derivatives, by way of the
universal anomaly and the Stumpff functions [2]_.
Parameters
----------
o_state_vector : StateVector
The (Cartesian) orbital state vector,
:math:`[r_x, r_y, r_z, \dot{r}_x, \dot{r}_y, \dot{r}_z]^T`
delta_t : timedelta
The time interval over which to calculate
grav_parameter : float, optional
The universal gravitational parameter. Defaults to that of the
Earth, :math:`3.986004418 \times 10^{14} \ \mathrm{m}^{3} \
\mathrm{s}^{-2}`. Note that the units of time must be seconds.
precision : float, optional
Precision to which to calculate the :meth:`universal anomaly` (default = 1e-8). See the doc
section for that function
max_iterations : float, optional
Maximum number of iterations in determining universal anomaly (default = 1e5)
Returns
-------
: float, float, float, float
The Lagrange coefficients, :math:`f, g, \dot{f}, \dot{g}`, in that order.
References
----------
.. [2] <NAME>., <NAME>. 1996, Modern Astrodynamics: Fundamentals and Perturbation
Methods, Princeton University Press
"""
# First get the universal anomaly using Newton's method
chii = universal_anomaly_newton(o_state_vector, delta_t,
grav_parameter=grav_parameter,
precision=precision, max_iterations=max_iterations)
# Get the position and velocity vectors
bold_r_0 = o_state_vector[0:3]
bold_v_0 = o_state_vector[3:6]
# Calculate the magnitude of the position and velocity vectors
r_0 = np.sqrt(dotproduct(bold_r_0, bold_r_0))
v_0 = np.sqrt(dotproduct(bold_v_0, bold_v_0))
# For convenience
root_mu = np.sqrt(grav_parameter)
inv_sma = 2 / r_0 - (v_0 ** 2) / grav_parameter
z = inv_sma * chii ** 2
# Get the Lagrange coefficients using Stumpf
f = 1 - chii ** 2 / r_0 * stumpff_c(z)
g = delta_t.total_seconds() - 1 / root_mu * chii ** 3 * \
stumpff_s(z)
# Get the position vector and magnitude of that vector
bold_r = f * bold_r_0 + g * bold_v_0
r = np.sqrt(dotproduct(bold_r, bold_r))
# and the Lagrange (time) derivatives also using Stumpf
fdot = root_mu / (r * r_0) * (inv_sma * chii ** 3 * stumpff_s(z) - chii)
gdot = 1 - (chii ** 2 / r) * stumpff_c(z)
return f, g, fdot, gdot
def eccentric_anomaly_from_mean_anomaly(mean_anomaly, eccentricity,
precision=1e-8, max_iterations=1e5):
r"""Approximately solve the transcendental equation :math:`E - e sin E = M_e` for E. This is
an iterative process using Newton's method.
Parameters
----------
mean_anomaly : float
Current mean anomaly
eccentricity : float
Orbital eccentricity
precision : float, optional
Precision used for the stopping point in determining eccentric anomaly from mean anomaly,
(default = 1e-8)
max_iterations : float, optional
Maximum number of iterations for the while loop, (default = 1e5)
Returns
-------
: float
Eccentric anomaly of the orbit
"""
if mean_anomaly < np.pi:
ecc_anomaly = mean_anomaly + eccentricity / 2
else:
ecc_anomaly = mean_anomaly - eccentricity / 2
ratio = 1
count = 0
while np.abs(ratio) > precision and count <= max_iterations:
f = ecc_anomaly - eccentricity * np.sin(ecc_anomaly) - mean_anomaly
fp = 1 - eccentricity * np.cos(ecc_anomaly)
ratio = f / fp # Need to check conditioning
ecc_anomaly = ecc_anomaly - ratio
count += 1
return ecc_anomaly # Check whether this ever goes outside 0 < 2pi
def tru_anom_from_mean_anom(mean_anomaly, eccentricity, precision=1e-8, max_iterations=1e5):
r"""Get the true anomaly from the mean anomaly via the eccentric anomaly
Parameters
----------
mean_anomaly : float
The mean anomaly
eccentricity : float
Eccentricity
precision : float, optional
Precision used for the stopping point in determining eccentric anomaly from mean anomaly,
(default = 1e-8)
max_iterations : float, optional
Maximum number of iterations in determining eccentric anomaly, (default = 1e5)
Returns
-------
: float
True anomaly
"""
cos_ecc_anom = np.cos(eccentric_anomaly_from_mean_anomaly(
mean_anomaly, eccentricity, precision=precision, max_iterations=max_iterations))
sin_ecc_anom = np.sin(eccentric_anomaly_from_mean_anomaly(
mean_anomaly, eccentricity, precision=precision, max_iterations=max_iterations))
# This only works for M_e < \pi
# return np.arccos(np.clip((eccentricity - cos_ecc_anom) /
# (eccentricity*cos_ecc_anom - 1), -1, 1))
return np.remainder(np.arctan2(np.sqrt(1 - eccentricity**2) *
sin_ecc_anom,
cos_ecc_anom - eccentricity), 2*np.pi)
def perifocal_position(eccentricity, semimajor_axis, true_anomaly):
r"""The position vector in perifocal coordinates calculated from the Keplerian elements
Parameters
----------
eccentricity : float
Orbit eccentricity
semimajor_axis : float
Orbit semi-major axis
true_anomaly
Orbit true anomaly
Returns
-------
: numpy.array
:math:`[r_x, r_y, r_z]` position in perifocal coordinates
"""
# Cache some trigonometric functions
c_tran = np.cos(true_anomaly)
s_tran = | np.sin(true_anomaly) | numpy.sin |
from typing import Union
import numpy as np
from probnum import randvars
try:
# functools.cached_property is only available in Python >=3.8
from functools import cached_property
except ImportError:
from cached_property import cached_property
class _RandomVariableList(list):
"""List of RandomVariables with convenient access to means, covariances, etc.
Parameters
----------
rv_list :
:obj:`list` of :obj:`RandomVariable`
"""
def __init__(self, rv_list: list):
if not isinstance(rv_list, list):
raise TypeError("RandomVariableList expects a list.")
# If not empty:
if len(rv_list) > 0:
# First element as a proxy for checking all elements
if not isinstance(rv_list[0], randvars.RandomVariable):
raise TypeError(
"RandomVariableList expects RandomVariable elements, but "
+ f"first element has type {type(rv_list[0])}."
)
super().__init__(rv_list)
def __getitem__(self, idx) -> Union[randvars.RandomVariable, "_RandomVariableList"]:
result = super().__getitem__(idx)
# Make sure to wrap the result into a _RandomVariableList if necessary
if isinstance(result, list):
result = _RandomVariableList(result)
return result
@cached_property
def mean(self) -> np.ndarray:
if len(self) == 0:
return | np.array([]) | numpy.array |
import numpy as np
import pandas as pd
from data.data_utils import standardize, split_train_test, sample_mask, ENSEMBL_to_gene_symbols
from data.pathways import select_genes_pathway
def TCGA_FILE(cancer_type):
return '/local/scratch/rv340/tcga/TCGA-{}.htseq_fpkm.tsv'.format(cancer_type)
def TCGA_METADATA_FILE(cancer_type):
return '/local/scratch/rv340/tcga/{}_clinicalMatrix'.format(cancer_type)
def get_GTEx_tissue(cancer_type):
if cancer_type == 'LAML':
return 'Whole_Blood', 48
elif cancer_type == 'BRCA':
return 'Breast_Mammary_Tissue', 19
elif cancer_type == 'LUAD':
return 'Lung', 31
else:
raise ValueError('Cancer type {} not supported'.format(cancer_type))
def TCGA(file, clinical_file, tissue_idx=None, gtex_gene_symbols=None):
df = pd.read_csv(file, delimiter='\t')
df = df.set_index('Ensembl_ID')
# Transform gene symbols
gene_symbols, ENSEMBL_found = ENSEMBL_to_gene_symbols(df.index)
df = df.loc[ENSEMBL_found]
df = df.rename(index=dict(zip(df.index, gene_symbols)))
if gtex_gene_symbols is not None:
df = df.loc[gtex_gene_symbols]
gene_symbols = gtex_gene_symbols
df = df.groupby('Ensembl_ID', group_keys=False).apply(
lambda x: x[x.sum(axis=1) == np.max(x.sum(axis=1))]) # Remove duplicates, keep max
# Get data
x_TCGA = df.values.T
# Process covariates
sample_ids = df.columns
clinical_df = pd.read_csv(clinical_file, delimiter='\t')
idxs = [np.argwhere(s[:-1] == clinical_df['sampleID']).ravel()[0] for s in df.columns]
gender = | np.array([0 if g == 'MALE' else 1 for g in clinical_df.iloc[idxs]['gender']]) | numpy.array |
# run_grtrans_3D.py
# runs grtrans on koral simulation images at very high resolution
# these will take a long time!
import grtrans_batch as gr
import numpy as np
import sys
import os
import subprocess
import time
import astropy.io.fits as fits
import scipy.ndimage.interpolation as interpolation
####################
#Constants
####################
pcG = 6.67259e-8
pcc2 = 8.98755179e20
msun = 1.99e33
cmperkpc=3.086e21
EP = 1.0e-10
C = 299792458.0
DEGREE = 3.141592653589/180.0
HOUR = 15.0*DEGREE
RADPERAS = DEGREE/3600.0
RADPERUAS = RADPERAS*1.e-6
####################
# Problem setup
####################
SOURCE = 'M87'
RA = 12.51373
DEC = 12.39112
MJD = 58211
hfile = './sim1000_simcgs.dat0001' #mks2
dfile = './sim1000_simcgs.dat'
SPIN=0.9375 #0.25
RESCALE = 1.e-18 #1.e-14 # don't rescale anything!
MBH=6.5e9 # this was fixed in the simulation, but EHT measured 6.5
DTOBH = 16000*cmperkpc # this has been adjusted to get EHT result for M/D = 3.8 uas
TGPERFILE = 10 # gravitational times per file
NPIX_IM = 128
NGEO = 500
RAYTRACESIZE=50. # raytracing volume. For M87 at 17 degrees, 1000 rg = 1 milliarcsec
RERUN = True # rerun even if the output file exists
ROTATE = False # rotate the image before saving
ANGLE = 108 # rotation angle
# parameters to loop over (in serial, in this script)
ang = 60. # inclination angle
sigma_cut = 1.
freq_ghz = 230. # frequency
pixel_size_uas = 2 # microarcseconds per pixel
# derived quantities
lbh = pcG*msun*MBH / pcc2
fac= (4*np.pi*lbh**2)
LumtoJy = 1.e23/(4*np.pi*DTOBH**2)
MuasperRg = lbh / DTOBH / RADPERUAS
secperg = MBH*4.927e-6 #seconds per tg
hourperg = secperg / 3600.
weekperg = secperg / (604800.)
yrperg = secperg / (3.154e7)
size = 20#0.5*NPIX_IM*pixel_size_uas/MuasperRg
####################
# Functions
####################
def main():
# REMEMBER TO MODIFY VERSION FLAGS IN fluid_model_koral3d.f90 -- make better!
name = './test_koral3d'
# skip over if output already exists, or delete and rerun
# write input radiative transfer parameters
mu = np.cos(ang*np.pi/180.)
freq = freq_ghz*1.e9
uout = 1./RAYTRACESIZE
x=gr.grtrans()
npol=4
x.write_grtrans_inputs(name + '.in', oname=name+'.out',
fscalefac=RESCALE, sigcut=sigma_cut,
fname='KORAL3D',phi0=0.,
nfreq=1,fmin=freq,fmax=freq,
ename='SYNCHTHAV',
nvals=npol,
gmin=0, # confusingly, this is rhigh.
spin=SPIN,standard=1,
uout=uout,
mbh=MBH,
mdotmin=1.57e15,mdotmax=1.57e15,nmdot=1,
nmu=1,mumin=mu,mumax=mu,
gridvals=[-size,size,-size,size],
nn=[NPIX_IM,NPIX_IM,NGEO],
hhfile=hfile, hdfile=dfile,
hindf=1,hnt=1,
muval=1.)
run=True
if os.path.exists(name+'.out'):
run = False
if RERUN:
run=True
os.remove(name+'.out')
if run:
x.run_grtrans()
# run grtrans
# Read grtrans output
try:
x.read_grtrans_output()
except:# IOError:
return None
# pixel sizes
da = x.ab[x.nx,0]-x.ab[0,0]
db = x.ab[1,1]-x.ab[0,1]
if (da!=db): raise Exception("pixel da!=db")
psize = da*(lbh/DTOBH)
#image values
if npol==4:
ivals = x.ivals[:,0,0]*fac*da*db*LumtoJy
qvals = x.ivals[:,1,0]*fac*da*db*LumtoJy
uvals = x.ivals[:,2,0]*fac*da*db*LumtoJy
vvals = x.ivals[:,3,0]*fac*da*db*LumtoJy
# mask nan failure points with zeros
ivals = np.array(ivals)
qvals = np.array(qvals)
uvals = np.array(uvals)
vvals = np.array(vvals)
imask = np.isnan(ivals)
qumask = ~(~imask * ~np.isnan(qvals) * ~np.isnan(uvals))
vmask = ~(~imask * ~np.isnan(vvals))
ivals[imask] = 0.
qvals[qumask] = 0.
uvals[qumask] = 0.
vvals[vmask] = 0.
ivals = (np.flipud(np.transpose(ivals.reshape((NPIX_IM,NPIX_IM))))).flatten()
qvals = -(np.flipud(np.transpose(qvals.reshape((NPIX_IM,NPIX_IM))))).flatten()
uvals = -(np.flipud(np.transpose(uvals.reshape((NPIX_IM,NPIX_IM))))).flatten()
vvals = (np.flipud(np.transpose(vvals.reshape((NPIX_IM,NPIX_IM))))).flatten()
else:
ivals = x.ivals[:,0,0]*fac*da*db*LumtoJy
ivals = | np.array(ivals) | numpy.array |
import argparse
import numpy as np
import os
# use GPU or not
# if network is small and shallow, CPU may be faster than GPU
os.environ["CUDA_VISIBLE_DEVICES"]="-1"
import tensorflow as tf
import time
import pickle
import maddpg.common.tf_util as U
from maddpg.trainer.maddpg import MADDPGAgentTrainer
import tensorflow.contrib.layers as layers
from tensorflow.contrib import rnn
from reward_shaping.embedding_model import EmbeddingModel
from reward_shaping.config import Config
from multiagent.multi_discrete import MultiDiscrete
from pyinstrument import Profiler
def parse_args():
parser = argparse.ArgumentParser("Reinforcement Learning experiments for multiagent environments")
# Environment
parser.add_argument("--scenario", type=str, default="simple_reference", help="name of the scenario script")
parser.add_argument("--max-episode-len", type=int, default=2000, help="maximum episode length")
parser.add_argument("--num-episodes", type=int, default=500, help="number of episodes")
parser.add_argument("--num-adversaries", type=int, default=0, help="number of adversaries")
parser.add_argument("--good-policy", type=str, default="matd3", help="policy for good agents")
parser.add_argument("--adv-policy", type=str, default="matd3", help="policy of adversaries")
# Core training parameters
parser.add_argument("--lr", type=float, default=1e-4, help="learning rate for Adam optimizer")
parser.add_argument("--gamma", type=float, default=0.95, help="discount factor")
parser.add_argument("--batch-size", type=int, default=1024, help="number of episodes to optimize at the same time")
parser.add_argument("--num-units", type=int, default=256, help="number of units in the mlp")
# Checkpointing
parser.add_argument("--exp-name", type=str, default="test", help="name of the experiment")
parser.add_argument("--save-dir", type=str, default="./policy/", help="directory in which training state and model should be saved")
parser.add_argument("--save-rate", type=int, default=1, help="save model once every time this many episodes are completed")
parser.add_argument("--load-dir", type=str, default="./policy/", help="directory in which training state and model are loaded")
# Evaluation
parser.add_argument("--restore", action="store_true", default=False)
parser.add_argument("--display", action="store_true", default=False)
parser.add_argument("--benchmark", action="store_true", default=False)
parser.add_argument("--benchmark-iters", type=int, default=100000, help="number of iterations run for benchmarking")
parser.add_argument("--benchmark-dir", type=str, default="./benchmark_files/", help="directory where benchmark data is saved")
parser.add_argument("--plots-dir", type=str, default="./complex_game/", help="directory where plot data is saved")
parser.add_argument("--reward-shaping-ag", action="store_true", default=False, help="whether enable reward shaping of agents")
parser.add_argument("--reward-shaping-adv", action="store_true", default=False, help="whether enable reward shaping of adversaries")
parser.add_argument("--policy_noise", default=0.2,type=float)
parser.add_argument("--noise_clip", default=0.2,type=float)
parser.add_argument("--policy_freq", default=2, type=int)
parser.add_argument("--pettingzoo", action="store_true", default=False)
parser.add_argument("--start_timesteps", default=10, type=int)
return parser.parse_args()
def mlp_model(input, num_outputs, scope, reuse=False, num_units=64, rnn_cell=None):
# This model takes as input an observation and returns values of all actions
with tf.variable_scope(scope, reuse=reuse):
out = input
out = layers.fully_connected(out, num_outputs=num_units, activation_fn=tf.nn.relu)
out = layers.fully_connected(out, num_outputs=num_units//2, activation_fn=tf.nn.relu)
out = layers.fully_connected(out, num_outputs=num_outputs, activation_fn=None)
return out
def make_env(scenario_name, arglist, benchmark=False):
from multiagent.environment import MultiAgentEnv
import multiagent.scenarios as scenarios
from experiments.pz import create_env
print("env is ",arglist.scenario)
if arglist.pettingzoo:
env = create_env(arglist.scenario)
# arglist.num_adversaries = 0
print("adversary agents number is {}".format(arglist.num_adversaries))
return env
# load scenario from script
scenario = scenarios.load(scenario_name + ".py").Scenario()
# create world
world = scenario.make_world()
try:
arglist.num_adversaries = len(scenario.adversaries(world))
except:
if arglist.scenario == 'simple_push':
arglist.num_adversaries = 1
else:
arglist.num_adversaries = 0
arglist.reward_shaping_adv = False
print("adversary agents number is {}".format(arglist.num_adversaries))
# create multiagent environment
if benchmark:
env = MultiAgentEnv(world, scenario.reset_world, scenario.reward, scenario.observation, scenario.benchmark_data)
else:
env = MultiAgentEnv(world, scenario.reset_world, scenario.reward, scenario.observation)
return env
def get_trainers(env, num_adversaries, obs_shape_n, arglist, agents):
trainers = []
model = mlp_model
trainer = MADDPGAgentTrainer
if not arglist.pettingzoo:
for i in range(num_adversaries):
trainers.append(trainer(
"agent_%d" % i, model, obs_shape_n, env.action_space, i, arglist,
local_q_func=(arglist.adv_policy=='ddpg'),agent=None))
for i in range(num_adversaries, env.n):
trainers.append(trainer(
"agent_%d" % i, model, obs_shape_n, env.action_space, i, arglist,
local_q_func=(arglist.good_policy=='ddpg'),agent=None))
else:
trainers.append(trainer(
"agent_%d" % 0, model, obs_shape_n, env.action_spaces.values(), 0, arglist,
local_q_func=(arglist.adv_policy=='ddpg'),agent=agents[0]))
trainers.append(trainer(
"agent_%d" % 1, model, obs_shape_n, env.action_spaces.values(), 1, arglist,
local_q_func=(arglist.good_policy=='ddpg'),agent=agents[1]))
return trainers
def create_dirs(arglist):
import os
os.makedirs(os.path.dirname(arglist.benchmark_dir), exist_ok=True)
os.makedirs(os.path.dirname(arglist.plots_dir), exist_ok=True)
def transform_obs_n(obs_n):
import torch
input = obs_n[0]
for i in range(1, len(obs_n)):
input = np.append(input, obs_n[i])
return torch.from_numpy(input).float()
def train(arglist):
with U.single_threaded_session():
# Create environment
env = make_env(arglist.scenario, arglist, arglist.benchmark)
# Create agent trainers
if not arglist.pettingzoo:
obs_shape_n = [env.observation_space[i].shape for i in range(env.n)]
action_shape_n = []
for i in range(env.n):
if hasattr(env.action_space[i],"n"):
action_shape_n.append(env.action_space[i].n)
elif not isinstance(env.action_space[i],MultiDiscrete):
action_shape_n.append(env.action_space[i].shape)
else:
num = 0
for j in range(len(env.action_space[i].high)):
num+=(env.action_space[i].high[j]-env.action_space[i].low[j]+1)
action_shape_n.append(num)
else:
agents = [agent for agent in (env.possible_agents)]
obs_shape_n = [env.observation_spaces[agent].shape for agent in agents]
action_shape_n = []
for agent in agents:
if hasattr(env.action_spaces[agent],"n"):
action_shape_n.append(env.action_spaces[agent].n)
else:
action_shape_n.append(env.action_spaces[agent].shape)
num_adversaries = min(99, arglist.num_adversaries)
trainers = get_trainers(env, num_adversaries, obs_shape_n, arglist, agents)
print('Using good policy {} and adv policy {}'.format(arglist.good_policy, arglist.adv_policy))
# Initialize
U.initialize()
# Load previous results, if necessary
if arglist.load_dir == "":
arglist.load_dir = arglist.save_dir
if arglist.restore or arglist.benchmark:
print('Loading previous state...')
U.load_state(arglist.load_dir)
# create dirs for saving benchmark data and reward data
create_dirs(arglist)
episode_rewards = [0.0] # sum of rewards for all agents
episode_original_rewards = [0.0] # sum of original rewards for all agents
if not arglist.pettingzoo:
agent_rewards = [[0.0] for _ in range(env.n)] # individual agent reward
agent_original_rewards = [[0.0] for _ in range(env.n)] # individual original agent reward
else:
agent_rewards = [[0.0] for _ in env.possible_agents]
agent_original_rewards = [[0.0] for _ in env.possible_agents] # individual original agent reward
final_ep_rewards = [] # sum of rewards for training curve
final_ep_ag_rewards = [] # agent rewards for training curve
agent_info = [[[]]] # placeholder for benchmarking info
saver = tf.train.Saver()
if not arglist.pettingzoo:
obs_n = env.reset()
else:
from experiments.pz import reset
obs_n = reset(env,agents)
# obs_n=[]
# for agent in agents:
# obs_n.append(t[agent])
episode_step = 0
train_step = 0
# two teams embedding network
embedding_model_adv = EmbeddingModel(obs_size=obs_shape_n[0:num_adversaries], num_outputs=action_shape_n[0:num_adversaries])
embedding_model_ag = EmbeddingModel(obs_size=obs_shape_n[num_adversaries:], num_outputs=action_shape_n[num_adversaries:])
episodic_memory_adv = []
episodic_memory_ag = []
if arglist.reward_shaping_adv:
episodic_memory_adv.append(embedding_model_adv.embedding(transform_obs_n(obs_n[0:num_adversaries])))
if arglist.reward_shaping_ag:
episodic_memory_ag.append(embedding_model_ag.embedding(transform_obs_n(obs_n[num_adversaries:])))
t_start = time.time()
print('Starting iterations...')
# profiler = Profiler()
# profiler.start()
while True:
# get action: possibility distribution
# env.render()
action_n=[]
if len(episode_rewards) < arglist.start_timesteps and not arglist.restore:
for agent,shape in zip(trainers,action_shape_n):
action = np.random.rand(shape)
action = action / np.sum(action)
action_n.append((action,agent.agent))
else:
action_n = [(agent.action(obs),agent.agent) for agent, obs in zip(trainers,obs_n)]
# environment step
if not arglist.pettingzoo:
new_obs_n, rew_n, done_n, info_n = env.step(action_n)
else:
from experiments.pz import step
# 预防环境异常
try:
new_obs_n, rew_n, done_n, info_n = step(action_n,env)
except Exception as e:
print(repr(e))
from experiments.pz import reset
obs_n = reset(env,agents)
continue
original_rew_n = rew_n.copy()
action_n = [action for action,agent in action_n]
# add reward shaping
if arglist.reward_shaping_adv == True:
new_obs_tensor = transform_obs_n(new_obs_n[0:num_adversaries])
next_state_emb_adv = embedding_model_adv.embedding(new_obs_tensor)
intrinsic_reward_adv = embedding_model_adv.compute_intrinsic_reward(episodic_memory_adv, next_state_emb_adv,new_obs_tensor)
episodic_memory_adv.append(next_state_emb_adv)
for i in range(0,num_adversaries):
# can add life long curiosity
rew_n[i] += Config.beta *intrinsic_reward_adv
if arglist.reward_shaping_ag == True:
new_obs_tensor = transform_obs_n(new_obs_n[num_adversaries:])
next_state_emb_ag = embedding_model_ag.embedding(new_obs_tensor)
intrinsic_reward_ag = embedding_model_ag.compute_intrinsic_reward(episodic_memory_ag, next_state_emb_ag,new_obs_tensor)
episodic_memory_ag.append(next_state_emb_ag)
if not arglist.pettingzoo:
for i in range(num_adversaries,env.n):
rew_n[i] += Config.beta * intrinsic_reward_ag
else:
for i in range(num_adversaries,len(env.possible_agents)):
rew_n[i] += Config.beta * intrinsic_reward_ag
episode_step += 1
done = all(done_n)
terminal = (episode_step >= arglist.max_episode_len)
# collect experience
for i, agent in enumerate(trainers):
agent.experience(obs_n[i], action_n[i], rew_n[i], new_obs_n[i], done_n[i], terminal)
obs_n = new_obs_n
for i, rew in enumerate(rew_n):
episode_rewards[-1] += rew
episode_original_rewards[-1] += original_rew_n[i]
agent_rewards[i][-1] += rew
agent_original_rewards[i][-1] += original_rew_n[i]
if done or terminal:
terminal = True
# obs_n = env.reset()
if not arglist.pettingzoo:
obs_n = env.reset()
else:
from experiments.pz import reset
obs_n = reset(env,agents)
episode_step = 0
episode_rewards.append(0)
episode_original_rewards.append(0)
for a in agent_rewards:
a.append(0)
for a in agent_original_rewards:
a.append(0)
agent_info.append([[]])
# reset episode embedding network
episodic_memory_adv.clear()
# embedding_model_adv.lastReward=0
episodic_memory_ag.clear()
# embedding_model_ag.lastReward=0
if arglist.reward_shaping_adv:
episodic_memory_adv.append(embedding_model_adv.embedding(transform_obs_n(obs_n[0:num_adversaries])))
if arglist.reward_shaping_ag:
episodic_memory_ag.append(embedding_model_ag.embedding(transform_obs_n(obs_n[num_adversaries:])))
# increment global step counter
train_step += 1
# for benchmarking learned policies
if arglist.benchmark:
for i, info in enumerate(info_n):
agent_info[-1][i].append(info_n['n'])
if train_step > arglist.benchmark_iters and (done or terminal):
file_name = arglist.benchmark_dir + arglist.exp_name + '.pkl'
print('Finished benchmarking, now saving...')
with open(file_name, 'wb') as fp:
pickle.dump(agent_info[:-1], fp)
break
continue
# for displaying learned policies
if arglist.display:
# time.sleep(0.1)
env.render()
if arglist.restore:
continue
# update all trainers, if not in display or benchmark mode
loss = None
for agent in trainers:
agent.preupdate()
for agent in trainers:
loss = agent.update(trainers, train_step)
# train embedding network
obs_n_train = []
obs_next_n_train = []
act_n_train = []
if (arglist.reward_shaping_adv or arglist.reward_shaping_ag):
if arglist.reward_shaping_adv == True and train_step > Config.train_episode_num * 10:
for i in range(0,num_adversaries):
obs, act, rew, obs_next, done = trainers[i].sample(Config.train_episode_num)
obs_n_train.append(obs)
obs_next_n_train.append(obs_next)
act_n_train.append(act)
embedding_loss_adv = embedding_model_adv.train_model(obs_n_train,obs_next_n_train,act_n_train)
if arglist.reward_shaping_ag == True and train_step > Config.train_episode_num * 10:
obs_n_train = []
obs_next_n_train = []
act_n_train = []
n = 0
if not arglist.pettingzoo:
n= env.n
else:
n= len(env.possible_agents)
for i in range(num_adversaries,n):
obs, act, rew, obs_next, done = trainers[i].sample(Config.train_episode_num)
obs_n_train.append(obs)
obs_next_n_train.append(obs_next)
act_n_train.append(act)
embedding_loss_ag = embedding_model_ag.train_model(obs_n_train,obs_next_n_train,act_n_train)
# save model, display training output
if (terminal) and (len(episode_rewards) % arglist.save_rate == 0):
U.save_state(arglist.save_dir, saver=saver)
# print statement depends on whether or not there are adversaries
if num_adversaries == 0:
print("steps: {}, episodes: {}, mean episode reward: {}, {}, time: {}".format(
train_step, len(episode_rewards)-1, np.mean(episode_original_rewards[-arglist.save_rate-1:-1]),
np.mean(episode_rewards[-arglist.save_rate-1:-1]), round(time.time()-t_start, 3)))
else:
print("steps: {}, episodes: {}, mean episode reward: {}, agent episode reward: {}, {}, time: {}".format(
train_step, len(episode_rewards)-1, | np.mean(episode_original_rewards[-arglist.save_rate-1:-1]) | numpy.mean |
# -*- coding:utf-8 -*-
import numpy as np
import datetime
import torch
import torch.nn as nn
from torchnet import meter
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
from nets.speaker_net_cnn import SpeakerNetEM
# from nets.speaker_net_lstm import SpeakerNetLSTM
from nets.intra_class_loss import IntraClassLoss
from datasets.librispeech import LibriSpeech
from datasets.st_cmds_20170001_1 import ST_CMDS_20170001_1
from datasets.voxceleb2 import VoxCeleb2
from datasets.voxceleb1 import VoxCeleb1
from datasets.merged_dataset import MergedDataset
from config import opt
from utils import audio_util, metric_util
# from train import compute_equal_error_rate
from utils import pil_util
test_audio_path = 'audios/1_src2.wav'
model_path = 'checkpoints/cnn/27_1770000__2019-05-28_07_25_26.pth'
speaker_net = SpeakerNetEM(opt.n_mels*3, opt.dropout_keep_prop)
device = torch.device('cuda') if opt.gpu else torch.device('cpu')
map_location = lambda storage, loc: storage.cuda(0) if opt.gpu else lambda storage, loc: storage
speaker_net.to(device)
status_dict = torch.load(model_path, map_location)
speaker_net.load_state_dict(status_dict['net'])
speaker_net.eval()
data = audio_util.norm_magnitude_spectrum(audio_path, opt.sr, opt.n_fft,
opt.n_overlap, opt.win_length)
# 14s
testdata = np.expand_dims(data[:3 * 300, :], 0)
testdata = testdata.reshape(-1, 300, 513)
tensor_teset_data = torch.tensor(testdata).to(device)
ret = speaker_net(tensor_teset_data)
ret = ret.cpu().detach().numpy()
sim_mat = | np.zeros((ret.shape[0], ret.shape[0])) | numpy.zeros |
import os
import librosa # for audio processing
import IPython.display as ipd
import matplotlib.pyplot as plt
import numpy as np
from scipy.io import wavfile # for audio processing
import warnings
import soundfile as sf
from sklearn.preprocessing import LabelEncoder
from keras.utils import np_utils
from sklearn.model_selection import train_test_split
warnings.filterwarnings("ignore")
train_audio_path = 'D:/Phd/MAHSA_PROJECT/Dataset_speechRecog/train/audio'
labels = os.listdir(train_audio_path)
# no_of_recordings = []
# for label in labels:
# waves = [f for f in os.listdir(train_audio_path + '/' + label) if f.endswith('.wav')]
# no_of_recordings.append(len(waves))
# # plot
# plt.figure(figsize=(10, 5))
# index = np.arange(len(labels))
# plt.bar(index, no_of_recordings)
# plt.xlabel('Commands', fontsize=12)
# plt.ylabel('No of recordings', fontsize=12)
# plt.xticks(index, labels, fontsize=10, rotation=0)
# plt.title('No. of recordings for each command')
# plt.show()
#
# duration_of_recordings = []
# for label in labels:
# waves = [f for f in os.listdir(train_audio_path + '/' + label) if f.endswith('.wav')]
# print(label)
# for wav in waves:
# samples,sample_rate = librosa.load(train_audio_path + '/' + label + '/' + wav)
# duration_of_recordings.append(float(len(samples) / sample_rate))
# print(wav)
#
# plt.hist(np.array(duration_of_recordings))
# plt.show()
def padfunc(offset, samples, sample_rate, fsnew):
pad_len = int(np.ceil((offset - (len(samples) / sample_rate)) * fsnew))
padding = np.zeros(pad_len)
samples_ = | np.concatenate((samples, padding)) | numpy.concatenate |
"""Tests for imfprefict.timeSeriesDataset module."""
import pytest
import numpy as np
from imfprefict.timeSeriesDataset import TimeSeriesDataset
class TestTimeSeriesDataset:
@pytest.mark.parametrize("x_dims_updated_data", [
{"data_x": np.array([[1, 2], [2, 3], [3, 4], [4, 5]]), "expected_x_shape": (4, 2, 1)},
{"data_x": np.array([[1, 2, 3], [2, 3, 4], [3, 4, 5]]), "expected_x_shape": (3, 3, 1)},
{"data_x": np.array([[1, 2, 3, 4], [2, 3, 4, 5]]), "expected_x_shape": (2, 4, 1)},
{"data_x": np.array([[1, 2, 3, 4, 5]]), "expected_x_shape": (1, 5, 1)}
])
def test_x_dims_updated(self, x_dims_updated_data):
data_x = x_dims_updated_data["data_x"]
data_y = np.array([])
ts = TimeSeriesDataset(data_x, data_y)
assert ts.x.shape == x_dims_updated_data["expected_x_shape"]
def test_x_values_unchanged(self):
data_x = np.array([[1, 2], [2, 3], [3, 4], [4, 5]])
data_y = np.array([1, 2, 3, 4, 5])
ts = TimeSeriesDataset(data_x, data_y)
for expected_x, x in zip(data_x, ts.x):
for expected_x_val, x_val in zip(expected_x, x):
assert expected_x_val == x_val
def test_getitem(self):
data_x = | np.array([[1, 2], [2, 3], [3, 4], [4, 5], [5, 6]]) | numpy.array |
import numpy as np
from tqdm import tqdm
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.decomposition import PCA
from modAL.models import ActiveLearner, Committee
from utils import get_initial_indexes
from dataset import Dataset
class AL_Process:
def __init__(self, queries=10, instances=10, experiments=3, n_initial=100, classes='all', dataset=Dataset['CIFAR10']):
self.queries=queries
self.instances=instances
self.experiments=experiments
self.classes=classes
self.dataset=dataset
self.n_initial=n_initial
self.load_data()
def train_adaBoost(self, strategy):
performance_history = []
for i in tqdm(range(self.experiments)):
h=[]
X = self.X_pool.copy()
y = self.y_pool.copy()
model = ActiveLearner(
estimator = AdaBoostClassifier(),
X_training = self.X_initial.copy(), y_training = self.y_initial.copy(),
query_strategy = strategy,
)
for idx in tqdm(range(self.queries)):
query_idx, _ = model.query(X, n_instances=self.instances)
model.teach(X=X[query_idx], y=y[query_idx])
acc = model.score(self.X_test, self.y_test)
h.append(acc)
# remove queried instance from pool
X = np.delete(X, query_idx, axis=0)
y = np.delete(y, query_idx, axis=0)
performance_history.append(h)
return model, np.mean(performance_history, axis=0)
def train_committee(self, strategy):
performance_history = []
for i in tqdm(range(self.experiments)):
learner_1 = ActiveLearner(
estimator=RandomForestClassifier(),
query_strategy=strategy,
X_training=self.X_initial.copy(), y_training=self.y_initial.copy()
)
learner_2 = ActiveLearner(
estimator=AdaBoostClassifier(),
query_strategy=strategy,
X_training=self.X_initial.copy(), y_training=self.y_initial.copy()
)
model = Committee(learner_list=[learner_1, learner_2])
h=[]
X = self.X_pool.copy()
y = self.y_pool.copy()
for idx in tqdm(range(self.queries)):
query_idx, _ = model.query(X, n_instances=self.instances)
model.teach(X=X[query_idx], y=y[query_idx])
acc = model.score(self.X_test, self.y_test)
h.append(acc)
# remove queried instance from pool
X = np.delete(X, query_idx, axis=0)
y = np.delete(y, query_idx, axis=0)
performance_history.append(h)
return model, np.mean(performance_history, axis=0)
def load_data(self):
(X_train, y_train), (X_test, y_test) = self.dataset.load_data()
X_train = X_train
y_train = y_train
self.IMG_WIDTH = X_train.shape[1]
self.IMG_HEIGHT = X_train.shape[2]
self.CHANNELS = 1 if len(X_train.shape) == 3 else X_train.shape[3]
selected_classes = | np.unique(y_train) | numpy.unique |
import numpy as np
import csv
import scipy.io
from tensorflow.keras.datasets import (
mnist as mnist_keras,
fashion_mnist as fashion_mnist_keras,
cifar10 as cifar10_keras,
)
from groot.datasets import load_mnist
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
data_dir = "/home/maksym/boost/data/"
def split_train_test(X_all, y_all, frac_train):
"""
The first X% of X_all, y_all become the training set, the rest (1-X)% become the test set.
Note that this assumes that the samples are already shuffled or if not (e.g. if we were to split MNIST) that
this behavior is intended.
"""
num_total = X_all.shape[0]
num_train = int(frac_train * num_total)
X_train, y_train = X_all[:num_train], y_all[:num_train]
X_test, y_test = X_all[num_train:], y_all[num_train:]
return X_train, y_train, X_test, y_test
def normalize_per_feature_0_1(X_train, X_test):
"""
We are not allowed to touch the test data, thus we do the normalization just based on the training data.
"""
X_train_max = X_train.max(axis=0, keepdims=True)
X_train_min = X_train.min(axis=0, keepdims=True)
X_train = (X_train - X_train_min) / (X_train_max - X_train_min)
X_test = (X_test - X_train_min) / (X_train_max - X_train_min)
return X_train, X_test
def split_train_validation(X_train_orig, y_train_orig, frac_valid, shuffle=True):
num_total = X_train_orig.shape[0]
n_valid = int(frac_valid * num_total)
idx = np.random.permutation(num_total) if shuffle else np.arange(num_total)
if shuffle:
X_valid, y_valid = X_train_orig[idx][:n_valid], y_train_orig[idx][:n_valid]
X_train, y_train = X_train_orig[idx][n_valid:], y_train_orig[idx][n_valid:]
else:
# If no shuffle, then one has to ensure that the classes are balanced
idx_valid, idx_train = [], []
for cls in np.unique(y_train_orig):
indices_cls = np.where(y_train_orig == cls)[0]
proportion_cls = len(indices_cls) / num_total
n_class_balanced_valid = int(proportion_cls * n_valid)
idx_valid.extend(list(indices_cls[:n_class_balanced_valid]))
idx_train.extend(list(indices_cls[n_class_balanced_valid:]))
idx_valid, idx_train = np.array(idx_valid), np.array(idx_train)
X_valid, y_valid = X_train_orig[idx_valid], y_train_orig[idx_valid]
X_train, y_train = X_train_orig[idx_train], y_train_orig[idx_train]
return X_train, y_train, X_valid, y_valid
def binary_from_multiclass(X_train, y_train, X_test, y_test, classes):
classes = np.array(classes) # for indexing only arrays work, not lists
idx_train1, idx_train2 = y_train == classes[0], y_train == classes[1]
idx_test1, idx_test2 = y_test == classes[0], y_test == classes[1]
X_train, X_test = X_train[idx_train1 + idx_train2], X_test[idx_test1 + idx_test2]
y_train = idx_train1 * 1 + idx_train2 * -1
y_test = idx_test1 * 1 + idx_test2 * -1
y_train, y_test = y_train[idx_train1 + idx_train2], y_test[idx_test1 + idx_test2]
return X_train, y_train, X_test, y_test
def transform_labels_one_vs_all(y_train_orig, y_valid_orig, y_test_orig):
n_cls = int(y_train_orig.max()) + 1
if n_cls == 2:
return y_train_orig[None, :], y_valid_orig[None, :], y_test_orig[None, :]
labels = np.unique(y_train_orig)
n_cls = len(labels)
n_train, n_valid, n_test = (
y_train_orig.shape[0],
y_valid_orig.shape[0],
y_test_orig.shape[0],
)
y_train, y_valid, y_test = (
np.zeros([n_cls, n_train]),
np.zeros([n_cls, n_valid]),
np.zeros([n_cls, n_test]),
)
for i_cls in range(n_cls):
# convert from False/True to -1/1 compatible with One-vs-All formulation
y_train[i_cls] = 2 * (y_train_orig == i_cls) - 1
y_valid[i_cls] = 2 * (y_valid_orig == i_cls) - 1
y_test[i_cls] = 2 * (y_test_orig == i_cls) - 1
return y_train, y_valid, y_test
def toy_2d_stumps():
X = np.array(
[
[0.38, 0.75],
[0.50, 0.93],
[0.05, 0.70],
[0.30, 0.90],
[0.15, 0.80],
# [0.15, 1.0], [0.125, 0.75], [0.1, 0.85], [0.045, 0.22], [0.725, 0.955], # small margin
# [0.15, 1.0], [0.125, 0.75], [0.1, 0.85], [0.075, 0.2], [0.775, 0.925], # small margin
[0.15, 1.0],
[0.125, 0.5],
[0.1, 0.85],
[0.02, 0.25],
[0.775, 0.975],
[0.05, 0.05],
[0.2, 0.1],
[0.4, 0.075],
[0.6, 0.22],
[0.8, 0.1],
[0.95, 0.05],
[0.9, 0.2],
[0.925, 0.4],
[0.79, 0.6],
[0.81, 0.8],
]
)
y = np.array([-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
eps_dataset = 0.075
return X, y, eps_dataset
def toy_2d_trees():
X = np.array(
[
[0.38, 0.75],
[0.50, 0.93],
[0.05, 0.70],
[0.30, 0.90],
[0.15, 0.80],
[0.75, 0.38],
[0.95, 0.48],
[0.70, 0.05],
[0.65, 0.30],
[0.80, 0.30],
[0.05, 0.1],
[0.35, 0.1],
[0.45, 0.075],
[0.3, 0.2],
[0.25, 0.1],
[0.95, 0.65],
[0.7, 0.9],
[0.925, 0.7],
[0.79, 0.55],
[0.81, 0.8],
]
)
y = np.array([-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
eps_dataset = 0.075
return X, y, eps_dataset
def toy_2d_xor():
X = np.array([[0.05, 0.05], [0.95, 0.95], [0.05, 0.95], [0.95, 0.05]])
y = np.array([-1, -1, 1, 1])
eps_dataset = 0.15
return X, y, eps_dataset
def toy_2d_wong():
# random points at least 2r apart
m = 12
# seed=10 illustrates that by default the margin can be easily close to 0
# both plain and robust model have 0 train error, but the robust model additionally enforces a large margin
np.random.seed(10)
x = [np.random.uniform(size=2)]
r = 0.16
while len(x) < m:
p = np.random.uniform(size=2)
if min(np.abs(p - a).sum() for a in x) > 2 * r:
x.append(p)
eps_dataset = r / 2
X = np.array(x)
y = np.sign(np.random.uniform(-0.5, 0.5, size=m))
return X, y, eps_dataset
def breast_cancer():
"""
Taken from the UCI repository:
http://archive.ics.uci.edu/ml/datasets/breast+cancer+wisconsin+%28diagnostic%29 file: breast-cancer-wisconsin.data
After filtering the points with missing data, we have exactly the same as Chen et al, 2019
train: 546x10, test: 137x10
"""
eps_dataset = 0.3 # same as in Chen et al, 2019, worked well for them
path = data_dir + "breast_cancer/breast-cancer-wisconsin.data"
lst = []
for line in csv.reader(open(path, "r").readlines()):
if "?" not in line:
lst.append(line)
data_arr = np.array(lst, dtype=int)
X_all, y_all = data_arr[:, :10], data_arr[:, 10]
y_all[y_all == 2], y_all[y_all == 4] = -1, 1 # from 2, 4 to -1, 1
X_train, y_train, X_test, y_test = split_train_test(X_all, y_all, frac_train=0.8)
X_train, X_test = normalize_per_feature_0_1(X_train, X_test)
return X_train, y_train, X_test, y_test, eps_dataset
def diabetes():
"""
Taken from Kaggle:
https://www.kaggle.com/uciml/pima-indians-diabetes-database file: diabetes.csv
train: 614x8, test: 154x8
"""
eps_dataset = 0.05 # Chen et al, 2019 used 0.2, but it was too high
path = data_dir + "diabetes/diabetes.csv"
data_arr = np.loadtxt(path, delimiter=",", skiprows=1) # loaded as float64
X_all, y_all = data_arr[:, :8], data_arr[:, 8]
y_all[y_all == 0], y_all[y_all == 1] = -1, 1 # from 0, 1 to -1, 1
X_train, y_train, X_test, y_test = split_train_test(X_all, y_all, frac_train=0.8)
X_train, X_test = normalize_per_feature_0_1(X_train, X_test)
return X_train, y_train, X_test, y_test, eps_dataset
def ijcnn1():
"""
Taken from LIBSVM data repository:
https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary.html
train: 49990x22, test: 91701x22
note: imbalanced classes (-1: 90.3% vs 1: 9.7%)
"""
eps_dataset = 0.01 # Chen et al, 2019 used 0.1, but it was too high
folder = data_dir + "ijcnn1/"
path_train, path_val, path_test = (
folder + "ijcnn1.tr",
folder + "ijcnn1.val",
folder + "ijcnn1.t",
)
num_train, num_test, dim = 49990, 91701, 22
X_train = np.zeros((num_train, dim))
y_train = | np.zeros(num_train) | numpy.zeros |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
This module contains classes and functions used for implementing
the Bayesian Online Changepoint Detection algorithm.
"""
import logging
import math
from dataclasses import dataclass, field
from enum import Enum
from typing import Any, Dict, List, Optional, Tuple, Type, Union
from abc import ABC, abstractmethod
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from kats.consts import (
TimeSeriesChangePoint,
TimeSeriesData,
SearchMethodEnum
)
import kats.utils.time_series_parameter_tuning as tpt
from kats.detectors.detector import Detector
# pyre-fixme[21]: Could not find name `invgamma` in `scipy.stats`.
# pyre-fixme[21]: Could not find name `nbinom` in `scipy.stats`.
from scipy.stats import invgamma, linregress, norm, nbinom # @manual
from scipy.special import logsumexp # @manual
_MIN_POINTS = 10
_LOG_SQRT2PI = 0.5 * np.log(2 * np.pi)
class BOCPDModelType(Enum):
"""Bayesian Online Change Point Detection model type.
Describes the type of predictive model used by the
BOCPD algorithm.
"""
NORMAL_KNOWN_MODEL = 1
TREND_CHANGE_MODEL = 2
POISSON_PROCESS_MODEL = 3
class BOCPDMetadata:
"""Metadata for the BOCPD model.
This gives information about
the type of detector, the name of the time series and
the model used for detection.
Attributes:
model: The kind of predictive model used.
ts_name: string, name of the time series for which the detector is
is being run.
"""
def __init__(self, model: BOCPDModelType, ts_name: Optional[str] = None):
self._detector_type = BOCPDetector
self._model = model
self._ts_name = ts_name
@property
def detector_type(self):
return self._detector_type
@property
def model(self):
return self._model
@property
def ts_name(self):
return self._ts_name
@dataclass
class BOCPDModelParameters(ABC):
"""Data class containing data for predictive models used in BOCPD.
Particular predictive models derive from this class.
Attributes:
prior_choice: list of changepoint probability priors
over which we will search hyperparameters
cp_prior: default prior for probability of changepoint.
search_method: string, representing the search method
for the hyperparameter tuning library. Allowed values
are 'random' and 'gridsearch'.
"""
data: Optional[TimeSeriesData] = None
prior_choice: Dict[str, List[float]] = field(
default_factory=lambda: {'cp_prior': [0.001, 0.002, 0.005, 0.01, 0.02]}
)
cp_prior: float = 0.1
search_method: str = 'random'
def set_prior(self, param_dict: Dict[str, float]):
"""Setter method, which sets the value of the parameters.
Currently, this sets the value of the prior probability of changepoint.
Args:
param_dict: dictionary of the form {param_name: param_value}.
Returns:
None.
"""
if 'cp_prior' in param_dict:
self.cp_prior = param_dict['cp_prior']
@dataclass
class NormalKnownParameters(BOCPDModelParameters):
"""Data class containing the parameters for Normal predictive model.
This assumes that the data comes from a normal distribution with known
precision.
Attributes:
empirical: Boolean, should we derive the prior empirically. When
this is true, the mean_prior, mean_prec_prior and known_prec
are derived from the data, and don't need to be specified.
mean_prior: float, mean of the prior normal distribution.
mean_prec_prior: float, precision of the prior normal distribution.
known_prec: float, known precision of the data.
known_prec_multiplier: float, a multiplier of the known precision.
This is a variable, that is used in the hyperparameter search,
to multiply with the known_prec value.
prior_choice: List of parameters to search, for hyperparameter tuning.
"""
empirical: bool = True
mean_prior: Optional[float] = None
mean_prec_prior: Optional[float] = None
known_prec: Optional[float] = None
known_prec_multiplier: float = 1.
prior_choice: Dict[str, List[float]] = field(
default_factory=lambda : {
'known_prec_multiplier': [1., 2., 3., 4., 5.],
'cp_prior': [0.001, 0.002, 0.005, 0.01, 0.02]
}
)
def set_prior(self, param_dict: Dict[str, float]):
"""Sets priors
Sets the value of the prior based on the
parameter dictionary passed.
Args:
param_dict: Dictionary of parameters required for
setting the prior value.
Returns:
None.
"""
if 'known_prec_multiplier' in param_dict:
self.known_prec_multiplier = param_dict['known_prec_multiplier']
if 'cp_prior' in param_dict:
self.cp_prior = param_dict['cp_prior']
@dataclass
class TrendChangeParameters(BOCPDModelParameters):
"""Parameters for the trend change predictive model.
This model assumes that the data is generated from a Bayesian
linear model.
Attributes:
mu_prior: array, mean of the normal priors on the slope and intercept
num_likelihood_samples: int, number of samples generated, to calculate
the posterior.
num_points_prior: int,
readjust_sigma_prior: Boolean, whether we should readjust the Inv. Gamma
prior for the variance, based on the data.
plot_regression_prior: Boolean, plot prior. set as False, unless trying to
debug.
"""
mu_prior: Optional[np.ndarray] = None
num_likelihood_samples: int = 100
num_points_prior: int = _MIN_POINTS
readjust_sigma_prior: bool = False
plot_regression_prior: bool = False
@dataclass
class PoissonModelParameters(BOCPDModelParameters):
"""Parameters for the Poisson predictive model.
Here, the data is generated from a Poisson distribution.
Attributes:
alpha_prior: prior value of the alpha value of the Gamma prior.
beta_prior: prior value of the beta value of the Gamma prior.
"""
alpha_prior: float = 1.0
beta_prior: float = 0.05
class BOCPDetector(Detector):
"""Bayesian Online Changepoint Detection.
Given an univariate time series, this class
performs changepoint detection, i.e. it tells
us when the time series shows a change. This is online,
which means it gives the best estimate based on a
lookehead number of time steps (which is the lag).
This faithfully implements the algorithm in
<NAME>, 2007. "Bayesian Online Changepoint Detection"
https://arxiv.org/abs/0710.3742
The basic idea is to see whether the new values are
improbable, when compared to a bayesian predictive model,
built from the previous observations.
Attrbutes:
data: TimeSeriesData, data on which we will run the BOCPD algorithm.
"""
def __init__(self, data: TimeSeriesData) -> None:
self.data = data
self.models: Dict[BOCPDModelType, Type[_PredictiveModel]] = {
BOCPDModelType.NORMAL_KNOWN_MODEL: _NormalKnownPrec,
BOCPDModelType.TREND_CHANGE_MODEL: _BayesianLinReg,
BOCPDModelType.POISSON_PROCESS_MODEL: _PoissonProcessModel,
}
self.parameter_type: Dict[BOCPDModelType, Type[BOCPDModelParameters]] = {
BOCPDModelType.NORMAL_KNOWN_MODEL: NormalKnownParameters,
BOCPDModelType.TREND_CHANGE_MODEL: TrendChangeParameters,
BOCPDModelType.POISSON_PROCESS_MODEL: PoissonModelParameters,
}
self.available_models = self.models.keys()
self.change_prob = {}
self._run_length_prob = {}
self.detected_flag = False
assert (
self.models.keys() == self.parameter_type.keys()
), f"Expected equivalent models in .models and .parameter_types, but got {self.models.keys()} and {self.parameter_type.keys()}"
# pyre-fixme[14]: `detector` overrides method defined in `Detector` inconsistently.
def detector(
self,
model: BOCPDModelType = BOCPDModelType.NORMAL_KNOWN_MODEL,
model_parameters: Union[
None, BOCPDModelParameters
] = None,
lag: int = 10,
choose_priors: bool = True,
changepoint_prior: float = 0.01,
threshold: float = 0.5,
debug: bool = False,
agg_cp: bool = True,
) -> List[Tuple[TimeSeriesChangePoint, BOCPDMetadata]]:
"""The main detector method.
This function runs the BOCPD detector
and returns the list of changepoints, along with some metadata
Args:
model: This specifies the probabilistic model, that generates
the data within each segment. The user can input several
model types depending on the behavior of the time series.
Currently allowed models are:
NORMAL_KNOWN_MODEL: Normal model with variance known. Use
this to find level shifts in normally distributed data.
TREND_CHANGE_MODEL : This model assumes each segment is
generated from ordinary linear regression. Use this model
to understand changes in slope, or trend in time series.
POISSON_PROCESS_MODEL: This assumes a poisson generative model.
Use this for count data, where most of the values are close
to zero.
model_parameters: Model Parameters correspond to specific parameters
for a specific model. They are defined in the
NormalKnownParameters, TrendChangeParameters,
PoissonModelParameters classes.
lag: integer referring to the lag in reporting the changepoint. We
report the changepoint after seeing "lag" number of data points.
Higher lag gives greater certainty that this is indeed a changepoint.
Lower lag will detect the changepoint faster. This is the tradeoff.
choose_priors: If True, then hyperparameter tuning library (HPT) is used
to choose the best priors which maximizes the posterior predictive
changepoint_prior: This is a Bayesian algorithm. Hence, this parameter
specifies the prior belief on the probability
that a given point is a changepoint. For example,
if you believe 10% of your data will be a changepoint,
you can set this to 0.1.
threshold: We report the probability of observing the changepoint
at each instant. The actual changepoints are obtained by
denoting the points above this threshold to be a changepoint.
debug: This surfaces additional information, such as the plots of
predicted means and variances, which allows the user to see
debug why changepoints were not properly detected.
agg_cp: It is tested and believed that by aggregating run-length
posterior, we may have a stronger signal for changepoint
detection. When setting this parameter as True, posterior
will be the aggregation of run-length posterior by fetching
maximum values diagonally.
Returns:
List[Tuple[TimeSeriesChangePoint, BOCPDMetadata]]: Each element in this
list is a changepoint, an object of TimeSeriesChangepoint class. The start_time
gives the time that the change was detected. The metadata contains data about
the name of the time series (useful when multiple time series are run simultaneously),
and the predictive model used.
"""
assert (
model in self.available_models
), f"Requested model {model} not currently supported. Please choose one from: {self.available_models}"
if model_parameters is None:
model_parameters = self.parameter_type[model]()
assert isinstance(
model_parameters, self.parameter_type[model]
), f"Expected parameter type {self.parameter_type[model]}, but got {model_parameters}"
if choose_priors:
changepoint_prior, model_parameters = self._choose_priors(model, model_parameters)
if getattr(model_parameters, "data", 0) is None:
model_parameters.data = self.data
logging.debug(f"Newest model parameters: {model_parameters}")
if not self.data.is_univariate() and not self.models[model].is_multivariate():
msg = "Model {model.name} support univariate time series, but get {type}.".format(
model=model,
type=type(self.data.value)
)
logging.error(msg)
raise ValueError(msg)
# parameters_dict = dataclasses.asdict(model_parameters)
# pyre-fixme[45]: Cannot instantiate abstract class `_PredictiveModel` with `__init__`, `is_multivariate`, `pred_mean` and 4 additional abstract methods.Pyre
underlying_model = self.models[model](data=self.data, parameters=model_parameters)
underlying_model.setup()
logging.debug(f"Creating detector with lag {lag} and debug option {debug}.")
bocpd = _BayesOnlineChangePoint(data=self.data, lag=lag, debug=debug, agg_cp=agg_cp)
logging.debug(
f"Running .detector() with model {underlying_model}, threshold {threshold}, changepoint prior {changepoint_prior}."
)
detector_results_all = bocpd.detector(
model=underlying_model,
threshold=threshold,
changepoint_prior=changepoint_prior,
)
self.detected_flag = True
change_points = []
for ts_name, detector_results in detector_results_all.items():
change_indices = detector_results["change_points"]
change_probs = detector_results["change_prob"]
self.change_prob[ts_name] = change_probs
self._run_length_prob[ts_name] = detector_results["run_length_prob"]
logging.debug(
f"Obtained {len(change_indices)} change points from underlying model in ts={ts_name}."
)
for cp_index in change_indices:
cp_time = self.data.time.values[cp_index]
cp = TimeSeriesChangePoint(
start_time=cp_time,
end_time=cp_time,
confidence=change_probs[cp_index],
)
bocpd_metadata = BOCPDMetadata(model=model, ts_name=ts_name)
change_points.append((cp, bocpd_metadata))
logging.debug(f"Returning {len(change_points)} change points to client in ts={ts_name}.")
return change_points
def plot(
self,
change_points: List[Tuple[TimeSeriesChangePoint, BOCPDMetadata]],
ts_names: Optional[List[str]] = None
) -> None:
"""Plots the change points, along with the time series.
Use this function to visualize the results of the changepoint detection.
Args:
change_points: List of changepoints, which are the return value of the detector() function.
ts_names: List of names of the time series, useful in case multiple time series are used.
Returns:
None.
"""
# TODO note: Once D23226664 lands, replace this with self.data.time_col_name
time_col_name = 'time'
# Group changepoints together
change_points_per_ts = self.group_changepoints_by_timeseries(change_points)
ts_names = ts_names or list(change_points_per_ts.keys())
data_df = self.data.to_dataframe()
for ts_name in ts_names:
ts_changepoints = change_points_per_ts[ts_name]
plt.plot(data_df[time_col_name].values, data_df[ts_name].values)
logging.info(f"Plotting {len(ts_changepoints)} change points for {ts_name}.")
if len(ts_changepoints) == 0:
logging.warning("No change points detected!")
for change in ts_changepoints:
plt.axvline(x=change[0].start_time, color="red")
plt.show()
def _choose_priors(self, model: BOCPDModelType,
params: BOCPDModelParameters) -> Tuple[Any, BOCPDModelParameters]:
"""Chooses priors which are defined by the model parameters.
Chooses priors which are defined by the model parameters.
All BOCPDModelParameters classes have a changepoint prior to iterate on.
Other parameters can be added to specific models.
This function runs a parameter search using the hyperparameter tuning library
to get the best hyperparameters.
Args:
model: Type of predictive model.
params: Parameters class, containing list of values of the parameters
on which to run hyperparameter tuning.
Returns:
best_cp_prior: best value of the prior on the changepoint probabilities.
params: parameter dictionary, where the selected values are set.
"""
# test these changepoint_priors
param_dict = params.prior_choice
# which parameter seaching method are we using
search_method = params.search_method
# pick search iterations and method based on definition
if search_method == 'random':
search_N, SearchMethod = 3, SearchMethodEnum.RANDOM_SEARCH_UNIFORM
elif search_method == 'gridsearch':
search_N, SearchMethod = 1, SearchMethodEnum.GRID_SEARCH
else:
raise Exception(f'Search method has to be in random or gridsearch but it is {search_method}!')
# construct the custom parameters for the HPT library
custom_parameters = [
{"name": k,
"type": "choice",
"values": v,
"value_type": "float",
"is_ordered": False
} for k, v in param_dict.items()
]
eval_fn = self._get_eval_function(model, params)
# Use the HPT library
seed_value = 100
ts_tuner = tpt.SearchMethodFactory.create_search_method(
parameters=custom_parameters,
selected_search_method=SearchMethod,
seed=seed_value
)
for _ in range(search_N):
ts_tuner.generate_evaluate_new_parameter_values(
evaluation_function=eval_fn, arm_count=4
)
scores_df = (
ts_tuner.list_parameter_value_scores()
)
scores_df = scores_df.sort_values(by='mean', ascending=False)
best_params = scores_df.parameters.values[0]
params.set_prior(best_params)
best_cp_prior = best_params['cp_prior']
return best_cp_prior, params
def _get_eval_function(self, model: BOCPDModelType,
model_parameters: BOCPDModelParameters):
"""
generates the objective function evaluated by hyperparameter
tuning library for choosing the priors
"""
def eval_fn(params_to_eval: Dict[str, float]) -> float:
changepoint_prior = params_to_eval['cp_prior']
model_parameters.set_prior(params_to_eval)
logging.debug(model_parameters)
logging.debug(params_to_eval)
# pyre-fixme[45]: Cannot instantiate abstract class `_PredictiveModel` with `__init__`, `is_multivariate`, `pred_mean` and 4 additional abstract methods.Pyre
underlying_model = self.models[model](data=self.data, parameters=model_parameters)
change_point = _BayesOnlineChangePoint(data=self.data, lag=3, debug=False)
change_point.detector(model=underlying_model,
changepoint_prior=changepoint_prior,
threshold=0.4)
post_pred = np.mean(change_point.get_posterior_predictive())
return post_pred
return eval_fn
def group_changepoints_by_timeseries(
self,
change_points: List[Tuple[TimeSeriesChangePoint, BOCPDMetadata]]
) -> Dict[str, List[Tuple[TimeSeriesChangePoint, BOCPDMetadata]]]:
"""Helper function to group changepoints by time series.
For multivariate inputs, all changepoints are output in
a list and the time series they correspond to is referenced
in the metadata. This function is a helper function to
group these changepoints by time series.
Args:
change_points: List of changepoints, with metadata containing the time
series names. This is the return value of the detector() method.
Returns:
Dictionary, with time series names, and their corresponding changepoints.
"""
if self.data.is_univariate():
data_df = self.data.to_dataframe()
ts_names = [x for x in data_df.columns if x != 'time']
else:
# Multivariate
ts_names = self.data.value.columns
change_points_per_ts = {}
for ts_name in ts_names:
change_points_per_ts[ts_name] = []
for cp in change_points:
change_points_per_ts[cp[1].ts_name].append(cp)
return dict(change_points_per_ts)
def get_change_prob(self) -> Dict[str, np.ndarray]:
"""Returns the probability of being a changepoint.
Args:
None.
Returns:
For every point in the time series. The return
type is a dict, with the name of the timeseries
as the key, and the value is an array of probabilities
of the same length as the timeseries data.
"""
if not self.detected_flag:
raise ValueError('detector needs to be run before getting prob')
return self.change_prob
def get_run_length_matrix(self) -> Dict[str, np.ndarray]:
"""Returns the entire run-time posterior.
Args:
None.
Returns:
The return type is a dict, with the name of the timeseries
as the key, and the value is an array of probabilities
of the same length as the timeseries data.
"""
if not self.detected_flag:
raise ValueError('detector needs to be run before getting prob')
return self._run_length_prob
class _BayesOnlineChangePoint(Detector):
"""The underlying implementation of the BOCPD algorithm.
This is called by the class BayesianOnlineChangepoint. The user should
call the top level class, and not this one.
Given an univariate time series, this class
performs changepoint detection, i.e. it tells
us when the time series shows a change. This is online,
which means it gives the best estimate based on a
lookehead number of time steps (which is the lag).
This faithfully implements the algorithm in
Adams & McKay, 2007. "Bayesian Online Changepoint Detection"
https://arxiv.org/abs/0710.3742
The basic idea is to see whether the new values are
improbable, when compared to a bayesian predictive model,
built from the previous observations.
Attributes::
data: This is univariate time series data. We require more
than 10 points, otherwise it is not very meaningful to define
changepoints.
T: number of values in the time series data.
lag: This specifies, how many time steps we will look ahead to
determine the change. There is a tradeoff in setting this parameter.
A small lag means we can detect a change really fast, which is important
in many applications. However, this also means we will make more
mistakes/have lower confidence since we might mistake a spike for change.
threshold: Threshold between 0 and 1. Probability values above this threshold
will be denoted as changepoint.
debug: This is a boolean. If set to true, this shows additional plots.
Currently, it shows a plot of the predicted mean and variance, after
lag steps, and the predictive probability of the next point. If the
results are unusual, the user should set it to true in order to
debug.
agg_cp: It is tested and believed that by aggregating run-length
posterior, we may have a stronger signal for changepoint
detection. When setting this parameter as True, posterior
will be the aggregation of run-length posterior by fetching
maximum values diagonally.
"""
rt_posterior: Optional[np.ndarray] = None
pred_mean_arr: Optional[np.ndarray] = None
pred_std_arr: Optional[np.ndarray] = None
next_pred_prob: Optional[np.ndarray] = None
def __init__(self, data: TimeSeriesData, lag: int = 10, debug: bool = False, agg_cp: bool = False):
self.data = data
self.T = data.value.shape[0]
self.lag = lag
self.threshold = None
self.debug = debug
self.agg_cp = agg_cp
# We use tensors for all data throughout; if the data is univariate
# then the last dimension is trivial. In this way, we standardise
# the same calculation throughout with fewer additional checks
# for univariate and bivariate data.
if not data.is_univariate():
self._ts_slice = slice(None)
self.P = data.value.shape[1] # Number of time series
self._ts_names = self.data.value.columns
self.data_values = data.value.values
else:
self.P = 1
self._ts_slice = 0
data_df = self.data.to_dataframe()
self._ts_names = [x for x in data_df.columns if x != 'time']
self.data_values = np.expand_dims(data.value.values, axis=1)
self.posterior_predictive = 0.
self._posterior_shape = (self.T, self.T, self.P)
self._message_shape = (self.T, self.P)
# pyre-fixme[14]: `detector` overrides method defined in `Detector` inconsistently.
def detector(
self,
model: Any,
threshold: Union[float, np.ndarray] = 0.5,
changepoint_prior: Union[float, np.ndarray] = 0.01
) -> Dict[str, Any]:
"""Runs the actual BOCPD detection algorithm.
Args:
model: Predictive Model for BOCPD
threshold: values between 0 and 1, array since this can be specified
separately for each time series.
changepoint_prior: array, each element between 0 and 1. Each element
specifies the prior probability of observing a changepoint
in each time series.
Returns:
Dictionary, with key as the name of the time series, and value containing
list of change points and their probabilities.
"""
self.threshold = threshold
if isinstance(self.threshold, float):
self.threshold = np.repeat(threshold, self.P)
if isinstance(changepoint_prior, float):
changepoint_prior = np.repeat(changepoint_prior, self.P)
self.rt_posterior = self._find_posterior(model, changepoint_prior)
return self._construct_output(self.threshold, lag=self.lag)
def get_posterior_predictive(self):
"""Returns the posterior predictive.
This is sum_{t=1}^T P(x_{t+1}|x_{1:t})
Args:
None.
Returns:
Array of predicted log probabilities for the next point.
"""
return self.posterior_predictive
def _find_posterior(self, model: Any, changepoint_prior: np.ndarray) -> np.ndarray:
"""
This calculates the posterior distribution over changepoints.
The steps here are the same as the algorithm described in
<NAME>, 2007. https://arxiv.org/abs/0710.3742
"""
# P(r_t|x_t)
rt_posterior = np.zeros(self._posterior_shape)
# initialize first step
# P(r_0=1) = 1
rt_posterior[0, 0] = 1.0
model.update_sufficient_stats(x=self.data_values[0, self._ts_slice])
# To avoid growing a large dynamic list, we construct a large
# array and grow the array backwards from the end.
# This is conceptually equivalent to array, which we insert/append
# to the beginning - but avoids reallocating memory.
message = np.zeros(self._message_shape)
m_ptr = -1
# set up arrays for debugging
self.pred_mean_arr = np.zeros(self._posterior_shape)
self.pred_std_arr = np.zeros(self._posterior_shape)
self.next_pred_prob = np.zeros(self._posterior_shape)
# Calculate the log priors once outside the for-loop.
log_cp_prior = np.log(changepoint_prior)
log_om_cp_prior = np.log(1. - changepoint_prior)
self.posterior_predictive = 0.
log_posterior = 0.
# from the second step onwards
for i in range(1, self.T):
this_pt = self.data_values[i, self._ts_slice]
# P(x_t | r_t-1, x_t^r)
# this arr has a size of t, each element says what is the predictive prob.
# of a point, it the current streak began at t
# Step 3 of paper
pred_arr = model.pred_prob(t=i, x=this_pt)
# Step 9 posterior predictive
if i > 1:
self.posterior_predictive += logsumexp(pred_arr + log_posterior)
# record the mean/variance/prob for debugging
if self.debug:
pred_mean = model.pred_mean(t=i, x=this_pt)
pred_std = model.pred_std(t=i, x=this_pt)
# pyre-fixme[16]: `Optional` has no attribute `__setitem__`.
self.pred_mean_arr[i, 0:i, self._ts_slice] = pred_mean
self.pred_std_arr[i, 0:i, self._ts_slice] = pred_std
self.next_pred_prob[i, 0:i, self._ts_slice] = pred_arr
# calculate prob that this is a changepoint, i.e. r_t = 0
# step 5 of paper
# this is elementwise multiplication of pred and message
log_change_point_prob = np.logaddexp.reduce(
pred_arr + message[self.T + m_ptr: self.T, self._ts_slice] + log_cp_prior,
axis=0
)
# step 4
# log_growth_prob = pred_arr + message + np.log(1.0 - changepoint_prior)
message[self.T + m_ptr: self.T, self._ts_slice] = (
pred_arr + message[self.T + m_ptr: self.T, self._ts_slice] + log_om_cp_prior
)
# P(r_t, x_1:t)
# log_joint_prob = np.append(log_change_point_prob, log_growth_prob)
m_ptr -= 1
message[self.T + m_ptr, self._ts_slice] = log_change_point_prob
# calculate evidence, step 6
# (P(x_1:t))
# log_evidence = logsumexp(log_joint_prob)
#
# We use two facts here to make this more efficient:
#
# (1) log(e^(x_1+c) + ... + e^(x_n+c))
# = log(e^c . (e^(x_1) + ... + e^(x_n)))
# = c + log(e^(x_1) + ... + e^(x_n))
#
# (2) log(e^x_1 + e^x_2 + ... + e^x_n) [Associativity of logsumexp]
# = log(e^x_1 + e^(log(e^x_2 + ... + e^x_n)))
#
# In particular, we rewrite:
#
# (5) logaddexp_vec(pred_arr + message + log_cp_prior)
# (4+6) logaddexp_vec(append(log_change_point_prob, pred_arr + message + log_om_cp_prior))
#
# to
#
# M = logaddexp_vector(pred_arr + message) + log_cp_prior (using (1))
# logaddexp_binary( (using (2))
# log_change_point_prob,
# M - log_cp_prior + log_om_cp_prior (using (1))
# )
#
# In this way, we avoid up to T expensive log and exp calls by avoiding
# the repeated calculation of logaddexp_vector(pred_arr + message)
# while adding in only a single binary (not T length) logsumexp
# call in return and some fast addition and multiplications.
log_evidence = np.logaddexp(
log_change_point_prob,
log_change_point_prob - log_cp_prior + log_om_cp_prior
)
# step 7
# log_posterior = log_joint_prob - log_evidence
log_posterior = message[self.T + m_ptr: self.T, self._ts_slice] - log_evidence
rt_posterior[i, 0 : (i + 1), self._ts_slice] = np.exp(log_posterior)
# step 8
model.update_sufficient_stats(x=this_pt)
# pass the joint as a message to next step
# message = log_joint_prob
# Message is now passed implicitly - as we set it directly above.
return rt_posterior
def plot(self, threshold: Optional[Union[float, np.ndarray]] = None, lag: Optional[int] = None, ts_names: Optional[List[str]] = None):
"""Plots the changepoints along with the timeseries.
Args:
threshold: between 0 and 1. probability values above the threshold will be
determined to be changepoints.
lag: lags to use. If None, use the lags this was initialized with.
ts_names: list of names of the time series. Useful when there are multiple
time series.
Returns:
None.
"""
if threshold is None:
threshold = self.threshold
if lag is None:
lag = self.lag
# do some work to define the changepoints
cp_outputs = self._construct_output(threshold=threshold, lag=lag)
if ts_names is None:
ts_names = self._ts_names
for ts_ix, ts_name in enumerate(ts_names):
cp_output = cp_outputs[ts_name]
change_points = cp_output["change_points"]
ts_values = self.data.value[ts_name].values
y_min_cpplot = np.min(ts_values)
y_max_cpplot = np.max(ts_values)
sns.set()
# Plot the time series
plt.figure(figsize=(10, 8))
ax1 = plt.subplot(211)
ax1.plot(list(range(self.T)), ts_values, "r-")
ax1.set_xlabel("Time")
ax1.set_ylabel("Values")
# plot change points on the time series
ax1.vlines(
x=change_points,
ymin=y_min_cpplot,
ymax=y_max_cpplot,
colors="b",
linestyles="dashed",
)
# if in debugging mode, plot the mean and variance as well
if self.debug:
x_debug = list(range(lag + 1, self.T))
# pyre-fixme[16]: `Optional` has no attribute `__getitem__`.
y_debug_mean = self.pred_mean_arr[lag + 1 : self.T, lag, ts_ix]
y_debug_uv = (
self.pred_mean_arr[lag + 1 : self.T, lag, ts_ix]
+ self.pred_std_arr[lag + 1 : self.T, lag, ts_ix]
)
y_debug_lv = (
self.pred_mean_arr[lag + 1 : self.T, lag, ts_ix]
- self.pred_std_arr[lag + 1 : self.T, lag, ts_ix]
)
ax1.plot(x_debug, y_debug_mean, "k-")
ax1.plot(x_debug, y_debug_uv, "k--")
ax1.plot(x_debug, y_debug_lv, "k--")
ax2 = plt.subplot(212, sharex=ax1)
cp_plot_x = list(range(0, self.T - lag))
cp_plot_y = np.copy(self.rt_posterior[lag : self.T, lag, ts_ix])
# handle the fact that first point is not a changepoint
cp_plot_y[0] = 0.0
ax2.plot(cp_plot_x, cp_plot_y)
ax2.set_xlabel("Time")
ax2.set_ylabel("Changepoint Probability")
# if debugging, we also want to show the predictive probabities
if self.debug:
plt.figure(figsize=(10, 4))
plt.plot(
list(range(lag + 1, self.T)),
self.next_pred_prob[lag + 1 : self.T, lag, ts_ix],
"k-",
)
plt.xlabel("Time")
plt.ylabel("Log Prob. Density Function")
plt.title("Debugging: Predicted Probabilities")
def _calc_agg_cppprob(self, t: int) -> np.ndarray:
rt_posterior = self.rt_posterior
assert rt_posterior is not None
run_length_pos = rt_posterior[:,:,t]
np.fill_diagonal(run_length_pos, 0.0)
change_prob = np.zeros(self.T)
for i in range(self.T):
change_prob[i] = np.max(run_length_pos[i:,:(self.T-i)].diagonal())
return change_prob
def _construct_output(self, threshold: np.ndarray, lag: int) -> Dict[str, Any]:
output = {}
rt_posterior = self.rt_posterior
assert rt_posterior is not None
for t, t_name in enumerate(self._ts_names):
if not self.agg_cp:
# till lag, prob = 0, so prepend array with zeros
change_prob = np.hstack((rt_posterior[lag : self.T, lag, t], np.zeros(lag)))
# handle the fact that the first point is not a changepoint
change_prob[0] = 0.
elif self.agg_cp:
change_prob = self._calc_agg_cppprob(t)
change_points = np.where(change_prob > threshold[t])[0]
output[t_name] = {
"change_prob": change_prob,
"change_points": change_points,
"run_length_prob": rt_posterior[:,:,t]
}
return output
def adjust_parameters(self, threshold: np.ndarray, lag: int) -> Dict[str, Any]:
"""Adjust the parameters.
If the preset parameters are not giving the desired result,
the user can adjust the parameters. Since the algorithm
calculates changepoints for all lags, we can see how
changepoints look like for other lag/threshold.
Args:
threshold: between 0 and 1. Probabilities above threshold are
considered to be changepoints.
lag: lag at which changepoints are calculated.
Returns:
cp_output: Dictionary with changepoint list and probabilities.
"""
cp_output = self._construct_output(threshold=threshold, lag=lag)
self.plot(threshold=threshold, lag=lag)
return cp_output
def check_data(data: TimeSeriesData):
"""Small helper function to check if the data is in the appropriate format.
Currently, this only checks if we have enough data points to run the
algorithm meaningfully.
Args:
data: TimeSeriesData object, on which to run the algorithm.
Returns:
None.
"""
if data.value.shape[0] < _MIN_POINTS:
raise ValueError(
f"""
Data must have {_MIN_POINTS} points,
it only has {data.value.shape[0]} points
"""
)
class _PredictiveModel(ABC):
"""Abstract class for BOCPD Predictive models.
This is an abstract class. All Predictive models
for BOCPD derive from this class.
Attributes:
data: TimeSeriesdata object we are modeling.
parameters: Parameter class, which contains BOCPD model parameters.
"""
@abstractmethod
def __init__(self, data: TimeSeriesData, parameters: BOCPDModelParameters) -> None:
pass
@abstractmethod
def setup(self):
pass
@abstractmethod
def pred_prob(self, t: int, x: float) -> np.ndarray:
pass
@abstractmethod
def pred_mean(self, t: int, x: float) -> np.ndarray:
pass
@abstractmethod
def pred_std(self, t: int, x: float) -> np.ndarray:
pass
@abstractmethod
def update_sufficient_stats(self, x: float) -> None:
pass
@staticmethod
@abstractmethod
def is_multivariate() -> bool:
pass
class _NormalKnownPrec(_PredictiveModel):
"""Predictive model where data comes from a Normal distribution.
This model is the Normal-Normal model, with known precision
It is specified in terms of precision for convenience.
It assumes that the data is generated from a normal distribution with
known precision.
The prior on the mean of the normal, is a normal distribution.
Attributes:
data: The Timeseriesdata object, for which the algorithm is run.
parameters: Parameters specifying the prior.
"""
def __init__(
self,
data: TimeSeriesData,
parameters: NormalKnownParameters
):
# \mu \sim N(\mu0, \frac{1}{\lambda0})
# x \sim N(\mu,\frac{1}{\lambda})
empirical = parameters.empirical
mean_prior = parameters.mean_prior
mean_prec_prior = parameters.mean_prec_prior
known_prec = parameters.known_prec
self.parameters = parameters
self._maxT = len(data)
# hyper parameters for mean and precision
self.mu_0 = mean_prior
self.lambda_0 = mean_prec_prior
self.lambda_val = known_prec
if data.is_univariate():
self._data_shape = self._maxT
else:
# Multivariate
self.P = data.value.values.shape[1]
# If the user didn't specify the priors as multivariate
# then we assume the same prior(s) over all time series.
if self.mu_0 is not None and isinstance(self.mu_0, float):
self.mu_0 = np.repeat(self.mu_0, self.P)
if self.mu_0 is not None and isinstance(self.lambda_0, float):
self.lambda_0 = np.repeat(self.lambda_0, self.P)
if self.mu_0 is not None and isinstance(self.lambda_val, float):
self.lambda_val = np.repeat(self.lambda_val, self.P)
self._data_shape = (self._maxT, self.P)
# For efficiency, we simulate a dynamically growing list with
# insertions at the start, by a fixed size array with a pointer
# where we grow the array from the end of the array. This
# makes insertions constant time and means we can use
# vectorized computation throughout.
self._mean_arr_num = np.zeros(self._data_shape)
self._std_arr = np.zeros(self._data_shape)
self._ptr = 0
# if priors are going to be decided empirically,
# we ignore these settings above
# Also, we need to pass on the data in this case
if empirical:
check_data(data)
self._find_empirical_prior(data)
if self.lambda_0 is not None and self.lambda_val is not None and self.mu_0 is not None:
# We set these here to avoid recomputing the linear expression
# throughout + avoid unnecessarily zeroing the memory etc.
self._mean_arr = np.repeat(
np.expand_dims(self.mu_0 * self.lambda_0, axis=0),
self._maxT,
axis=0
)
self._prec_arr = np.repeat(
np.expand_dims(self.lambda_0, axis=0),
self._maxT,
axis=0
)
else:
raise ValueError("Priors for NormalKnownPrec should not be None.")
def setup(self):
# everything is already set up in __init__!
pass
def _find_empirical_prior(self, data: TimeSeriesData):
"""
if priors are not defined, we take an empirical Bayes
approach and define the priors from the data
"""
data_arr = data.value
# best guess of mu0 is data mean
if data.is_univariate():
self.mu_0 = data_arr.mean(axis=0)
else:
self.mu_0 = data_arr.mean(axis=0).values
# variance of the mean: \lambda_0 = \frac{N}{\sigma^2}
if data.is_univariate():
self.lambda_0 = 1.0 / data_arr.var(axis=0)
else:
self.lambda_0 = 1.0 / data_arr.var(axis=0).values
# to find the variance of the data we just look at small
# enough windows such that the mean won't change between
window_size = 10
var_arr = data_arr.rolling(window_size).var()[window_size - 1 :]
if data.is_univariate():
self.lambda_val = self.parameters.known_prec_multiplier / var_arr.mean()
else:
self.lambda_val = self.parameters.known_prec_multiplier / var_arr.mean().values
logging.debug("Empirical Prior: mu_0:", self.mu_0)
logging.debug("Empirical Prior: lambda_0:", self.lambda_0)
logging.debug("Empirical Prior: lambda_val:", self.lambda_val)
@staticmethod
def _norm_logpdf(x, mean, std):
"""
Hardcoded version of scipy.norm.logpdf.
This is hardcoded because scipy version is slow due to checks +
uses log(pdf(...)) - which wastefully computes exp(..) and log(...).
"""
return -np.log(std) - _LOG_SQRT2PI - 0.5 * ((x - mean) / std)**2
def pred_prob(self, t: int, x: float) -> np.ndarray:
"""Returns log predictive probabilities.
We will give log predictive probabilities for
changepoints that started at times from 0 to t.
This posterior predictive is from
https://www.cs.ubc.ca/~murphyk/Papers/bayesGauss.pdf
equation 36.
Args:
t is the time,
x is the new data point
Returns:
pred_arr: Array with predicted log probabilities for each starting point.
"""
pred_arr = self._norm_logpdf(
x,
self._mean_arr[self._maxT + self._ptr : self._maxT + self._ptr + t],
self._std_arr[self._maxT + self._ptr : self._maxT + self._ptr + t]
)
return pred_arr
def pred_mean(self, t: int, x: float) -> np.ndarray:
return self._mean_arr[self._maxT + self._ptr : self._maxT + self._ptr + t]
def pred_std(self, t: int, x: float) -> np.ndarray:
return self._std_arr[self._maxT + self._ptr : self._maxT + self._ptr + t]
def update_sufficient_stats(self, x: float) -> None:
"""Updates sufficient statistics with new data.
We will store the sufficient stats for
a streak starting at times 0, 1, ....t.
This is eqn 29 and 30 in <NAME>'s note:
https://www.cs.ubc.ca/~murphyk/Papers/bayesGauss.pdf
Args:
x: The new data point.
Returns:
None.
"""
# \lambda = \lambda_0 + n * \lambda
# hence, online, at each step: lambda[i] = lambda[i-1] + 1* lambda
# for numerator of the mean.
# n*\bar{x}*\lambda + \mu_0 * \lambda_0
# So, online we add x*\lambda to the numerator from the previous step
# I think we can do it online, but I will need to think more
# for now we'll just keep track of the sum
# Grow list (backwards from the end of the array for efficiency)
self._ptr -= 1
# update the precision array
self._prec_arr[self._maxT + self._ptr : self._maxT] += self.lambda_val
# update the numerator of the mean array
self._mean_arr_num[self._maxT + self._ptr : self._maxT] += x * self.lambda_val
# This is now handled by initializing the array with this value.
# self._prec_arr[self._ptr] = self.lambda_0 + 1. * self.lambda_val
self._std_arr[self._maxT + self._ptr : self._maxT] = np.sqrt(
1. / self._prec_arr[self._maxT + self._ptr : self._maxT] + 1. / self.lambda_val
)
# This is now handled by initializing the array with self.mu_0 * self.lambda_0
# self._mean_arr_num[self._ptr] = (x * self.lambda_val + self.mu_0 * self.lambda_0)
# update the mean array itself
self._mean_arr[self._maxT + self._ptr : self._maxT] = (
self._mean_arr_num[self._maxT + self._ptr : self._maxT]
/ self._prec_arr[self._maxT + self._ptr : self._maxT]
)
@staticmethod
def is_multivariate():
return True
class _BayesianLinReg(_PredictiveModel):
"""Predictive model for BOCPD where data comes from linear model.
Defines the predictive model, where we assume that the data points
come from a Bayesian Linear model, where the values are regressed
against time.
We use a conjugate prior, where we impose an Inverse gamma prior on
sigma^2 and normal prior on the conditional distribution of beta
p(beta|sigma^2)
See https://en.wikipedia.org/wiki/Bayesian_linear_regression
for the calculations.
Attributes:
data: TimeSeriesData object, on which algorithm is run
parameters: Specifying all the priors.
"""
mu_prior: Optional[np.ndarray] = None
prior_regression_numpoints: Optional[int] = None
def __init__(
self,
data: TimeSeriesData,
parameters: TrendChangeParameters,
):
mu_prior = parameters.mu_prior
num_likelihood_samples = parameters.num_likelihood_samples
num_points_prior = parameters.num_points_prior
readjust_sigma_prior = parameters.readjust_sigma_prior
plot_regression_prior = parameters.plot_regression_prior
self.parameters = parameters
self.data = data
logging.info(
f"Initializing bayesian linear regression with data {data}, "
f"mu_prior {mu_prior}, {num_likelihood_samples} likelihood samples, "
f"{num_points_prior} points to run basic linear regression with, "
f"sigma prior adjustment {readjust_sigma_prior}, "
f"and plot prior regression {plot_regression_prior}"
)
self._x = None
self._y = None
self.t = 0
# Random numbers I tried out to make the sigma_squared values really large
self.a_0 = 0.1 # TODO find better priors?
self.b_0 = 200 # TODO
self.all_time = np.array(range(data.time.shape[0]))
self.all_vals = data.value
self.lambda_prior = 2e-7 * np.identity(2)
self.num_likelihood_samples = num_likelihood_samples
self.min_sum_samples = (
math.sqrt(self.num_likelihood_samples) / 10000
) # TODO: Hack for getting around probabilities of 0 -- cap it at some minimum
self._mean_arr = {}
self._std_arr = {}
def setup(self) -> None:
"""Sets up the regression, by calculating the priors.
Args:
None.
Returns:
None.
"""
data = self.data
mu_prior = self.parameters.mu_prior
num_points_prior = self.parameters.num_points_prior
readjust_sigma_prior = self.parameters.readjust_sigma_prior
plot_regression_prior = self.parameters.plot_regression_prior
# Set up linear regression prior
if mu_prior is None:
if data is not None:
self.prior_regression_numpoints = num_points_prior
time = self.all_time[: self.prior_regression_numpoints]
vals = self.all_vals[: self.prior_regression_numpoints]
logging.info("Running basic linear regression.")
# Compute basic linear regression
slope, intercept, r_value, p_value, std_err = linregress(time, vals)
self.mu_prior = mu_prior = np.array([intercept, slope]) # Set up mu_prior
if readjust_sigma_prior:
logging.info("Readjusting the prior for Inv-Gamma for sigma^2.")
# these values are the mean/variance of sigma^2: Inv-Gamma(*,*)
sigma_squared_distribution_mean = _BayesianLinReg._residual_variance(
time, vals, intercept, slope
)
sigma_squared_distribution_variance = 1000 # TODO: we don't really know what the variance of sigma^2: Inv-Gamma(a, b) should be
# The following values are computed from https://reference.wolfram.com/language/ref/InverseGammaDistribution.html
# We want to match the mean of Inv-Gamma(a, b) to the sigma^2 mean (called mu), and variances together too (called var).
# We obtain mu = b / (a-1) and var = b^2 / ((a-2) * (a-1)^2) and then we simply solve for a and b.
self.a_0 = 2.0 + (
sigma_squared_distribution_mean
/ sigma_squared_distribution_variance
)
self.b_0 = sigma_squared_distribution_mean * (self.a_0 - 1)
else:
self.mu_prior = mu_prior = np.zeros(2)
logging.warning("No data provided -- reverting to default mu_prior.")
else:
self.mu_prior = mu_prior
logging.info(f"Obtained mu_prior: {self.mu_prior}")
logging.info(f"Obtained a_0, b_0 values of {self.a_0}, {self.b_0}")
if plot_regression_prior:
intercept, slope = tuple(mu_prior)
_BayesianLinReg._plot_regression(self.all_time, self.all_vals, intercept, slope)
@staticmethod
def _plot_regression(x, y, intercept, slope):
plt.plot(x, y, ".")
plt.plot(x, intercept + slope * x, "-")
plt.show()
@staticmethod
def _residual_variance(x, y, intercept, slope):
n = len(x)
assert n == len(y)
x = np.array(x)
y = np.array(y)
predictions = intercept + slope * x
residuals = predictions - y
return np.sum(np.square(residuals)) / (n - 2)
@staticmethod
def _sample_bayesian_linreg(mu_n, lambda_n, a_n, b_n, num_samples):
#this is to make sure the results are consistent
# and tests don't break randomly
seed_value = 100
np.random.seed(seed_value)
sample_sigma_squared = invgamma.rvs(a_n, scale=b_n, size=1)
# Sample a beta value from Normal(mu_n, sigma^2 * inv(lambda_n))
assert (
len(mu_n.shape) == 1
), f"Expected 1 dimensional mu_n, but got {mu_n.shape}"
all_beta_samples = np.random.multivariate_normal(
mu_n, sample_sigma_squared * np.linalg.inv(lambda_n), size=num_samples
)
return all_beta_samples, sample_sigma_squared
@staticmethod
def _compute_bayesian_likelihood(beta, sigma_squared, x, val):
prediction = np.matmul(beta, x)
bayesian_likelihoods = norm.pdf(
val, loc=prediction, scale=np.sqrt(sigma_squared)
)
return bayesian_likelihoods, prediction
@staticmethod
def _sample_likelihood(mu_n, lambda_n, a_n, b_n, x, val, num_samples):
all_sample_betas, sample_sigma_squared = _BayesianLinReg._sample_bayesian_linreg(
mu_n, lambda_n, a_n, b_n, num_samples
)
bayesian_likelihoods, prediction = _BayesianLinReg._compute_bayesian_likelihood(
all_sample_betas, sample_sigma_squared, x, val
)
return bayesian_likelihoods, prediction, sample_sigma_squared
def pred_prob(self, t, x) -> np.ndarray:
"""Predictive probability of a new data point
Args:
t: time
x: the new data point
Returns:
pred_arr: Array with log predictive probabilities for each starting point.
"""
# TODO: use better priors
def log_post_pred(y, t, rl):
N = self._x.shape[0]
x_arr = self._x[N - rl - 1 : N, :]
y_arr = self._y[N - rl - 1 : N].reshape(-1, 1)
xtx = np.matmul(x_arr.transpose(), x_arr) # computes X^T X
xty = np.squeeze(np.matmul(x_arr.transpose(), y_arr)) # computes X^T Y
yty = np.matmul(y_arr.transpose(), y_arr) # computes Y^T Y
# Bayesian learning update
lambda_n = xtx + self.lambda_prior
mu_n = np.matmul(
| np.linalg.inv(lambda_n) | numpy.linalg.inv |
import os
import numpy as np
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
import argparse
import math
import my_config
from prohmr.datasets.pw3d_eval_dataset import PW3DEvalDataset
from prohmr.configs import get_config, prohmr_config
from prohmr.models import ProHMR
from prohmr.models.smpl_mine import SMPL
from prohmr.utils.pose_utils import compute_similarity_transform_batch_numpy, scale_and_translation_transform_batch
from prohmr.utils.geometry import undo_keypoint_normalisation, orthographic_project_torch, convert_weak_perspective_to_camera_translation
from prohmr.utils.renderer import Renderer
from prohmr.utils.sampling_utils import compute_vertex_uncertainties_from_samples
import subsets
def evaluate_3dpw(model,
model_cfg,
eval_dataset,
metrics_to_track,
device,
save_path,
num_pred_samples,
num_workers=4,
pin_memory=True,
vis_every_n_batches=1000,
num_samples_to_visualise=10,
save_per_frame_uncertainty=True):
eval_dataloader = DataLoader(eval_dataset,
batch_size=1,
shuffle=False,
drop_last=True,
num_workers=num_workers,
pin_memory=pin_memory)
smpl_neutral = SMPL(my_config.SMPL_MODEL_DIR, batch_size=1).to(device)
smpl_male = SMPL(my_config.SMPL_MODEL_DIR, batch_size=1, gender='male').to(device)
smpl_female = SMPL(my_config.SMPL_MODEL_DIR, batch_size=1, gender='female').to(device)
metric_sums = {'num_datapoints': 0}
per_frame_metrics = {}
for metric in metrics_to_track:
metric_sums[metric] = 0.
per_frame_metrics[metric] = []
if metric == 'joints3D_coco_invis_samples_dist_from_mean':
metric_sums['num_invis_joints3Dsamples'] = 0
elif metric == 'hrnet_joints2D_l2es':
metric_sums['num_vis_hrnet_joints2D'] = 0
elif metric == 'hrnet_joints2Dsamples_l2es':
metric_sums['num_vis_hrnet_joints2Dsamples'] = 0
fname_per_frame = []
pose_per_frame = []
shape_per_frame = []
cam_per_frame = []
if save_per_frame_uncertainty:
vertices_uncertainty_per_frame = []
renderer = Renderer(model_cfg, faces=model.smpl.faces)
reposed_cam_wp = np.array([0.85, 0., -0.2])
reposed_cam_t = convert_weak_perspective_to_camera_translation(cam_wp=reposed_cam_wp,
focal_length=model_cfg.EXTRA.FOCAL_LENGTH,
resolution=model_cfg.MODEL.IMAGE_SIZE)
model.eval()
for batch_num, samples_batch in enumerate(tqdm(eval_dataloader)):
# if batch_num == 2:
# break
# ------------------------------- TARGETS and INPUTS -------------------------------
input = samples_batch['input'].to(device)
target_pose = samples_batch['pose'].to(device)
target_shape = samples_batch['shape'].to(device)
target_gender = samples_batch['gender'][0]
hrnet_joints2D_coco = samples_batch['hrnet_kps'].cpu().detach().numpy()
hrnet_joints2D_coco_vis = samples_batch['hrnet_kps_vis'].cpu().detach().numpy()
fname = samples_batch['fname']
if target_gender == 'm':
target_smpl_output = smpl_male(body_pose=target_pose[:, 3:],
global_orient=target_pose[:, :3],
betas=target_shape)
target_reposed_smpl_output = smpl_male(betas=target_shape)
elif target_gender == 'f':
target_smpl_output = smpl_female(body_pose=target_pose[:, 3:],
global_orient=target_pose[:, :3],
betas=target_shape)
target_reposed_smpl_output = smpl_female(betas=target_shape)
target_vertices = target_smpl_output.vertices
target_joints_h36mlsp = target_smpl_output.joints[:, my_config.ALL_JOINTS_TO_H36M_MAP, :][:, my_config.H36M_TO_J14, :]
target_reposed_vertices = target_reposed_smpl_output.vertices
# ------------------------------- PREDICTIONS -------------------------------
out = model({'img': input})
"""
out is a dict with keys:
- pred_cam: (1, num_samples, 3) tensor, camera is same for all samples
- pred_cam_t: (1, num_samples, 3) tensor, camera is same for all samples
- This is just pred_cam converted from weak-perspective (i.e. [s, tx, ty]) to
full-perspective (i.e. [tx, ty, tz] and focal_length = 5000 --> this is basically just weak-perspective anyway)
- pred_smpl_params: dict with keys:
- global_orient: (1, num_samples, 1, 3, 3) tensor
- body_pose: (1, num_samples, 23, 3, 3) tensor
- betas: (1, num_samples, 10) tensor, betas are same for all samples
- pred_pose_6d: (1, num_samples, 144) tensor
- pred_vertices: (1, num_samples, 6890, 3) tensor
- pred_keypoints_3d: (1, num_samples, 44, 3) tensor
- pred_keypoints_2d: (1, num_samples, 44, 2) tensor
- log_prob: (1, num_samples) tensor
- conditioning_feats: (1, 2047) tensor
"""
pred_cam_wp = out['pred_cam'][:, 0, :]
pred_pose_rotmats_mode = out['pred_smpl_params']['body_pose'][:, 0, :, :, :]
pred_glob_rotmat_mode = out['pred_smpl_params']['global_orient'][:, 0, :, :, :]
pred_shape_mode = out['pred_smpl_params']['betas'][:, 0, :]
pred_pose_rotmats_samples = out['pred_smpl_params']['body_pose'][0, 1:, :, :, :]
pred_glob_rotmat_samples = out['pred_smpl_params']['global_orient'][0, 1:, :, :, :]
pred_shape_samples = out['pred_smpl_params']['betas'][0, 1:, :]
assert pred_pose_rotmats_samples.shape[0] == num_pred_samples
pred_smpl_output_mode = smpl_neutral(body_pose=pred_pose_rotmats_mode,
global_orient=pred_glob_rotmat_mode,
betas=pred_shape_mode,
pose2rot=False)
pred_vertices_mode = pred_smpl_output_mode.vertices # (1, 6890, 3)
pred_joints_h36mlsp_mode = pred_smpl_output_mode.joints[:, my_config.ALL_JOINTS_TO_H36M_MAP, :][:, my_config.H36M_TO_J14, :] # (1, 14, 3)
pred_joints_coco_mode = pred_smpl_output_mode.joints[:, my_config.ALL_JOINTS_TO_COCO_MAP, :] # (1, 17, 3)
pred_vertices2D_mode = orthographic_project_torch(pred_vertices_mode, pred_cam_wp, scale_first=False)
pred_vertices2D_mode = undo_keypoint_normalisation(pred_vertices2D_mode, input.shape[-1])
pred_joints2D_coco_mode = orthographic_project_torch(pred_joints_coco_mode, pred_cam_wp) # (1, 17, 2)
pred_joints2D_coco_mode = undo_keypoint_normalisation(pred_joints2D_coco_mode, input.shape[-1])
pred_reposed_vertices_mean = smpl_neutral(betas=pred_shape_mode).vertices # (1, 6890, 3)
pred_smpl_output_samples = smpl_neutral(body_pose=pred_pose_rotmats_samples,
global_orient=pred_glob_rotmat_samples,
betas=pred_shape_samples,
pose2rot=False)
pred_vertices_samples = pred_smpl_output_samples.vertices # (num_pred_samples, 6890, 3)
pred_joints_h36mlsp_samples = pred_smpl_output_samples.joints[:, my_config.ALL_JOINTS_TO_H36M_MAP, :][:, my_config.H36M_TO_J14, :] # (num_samples, 14, 3)
pred_joints_coco_samples = pred_smpl_output_samples.joints[:, my_config.ALL_JOINTS_TO_COCO_MAP, :] # (num_pred_samples, 17, 3)
pred_joints2D_coco_samples = orthographic_project_torch(pred_joints_coco_samples, pred_cam_wp) # (num_pred_samples, 17, 2)
pred_joints2D_coco_samples = undo_keypoint_normalisation(pred_joints2D_coco_samples, input.shape[-1])
pred_reposed_vertices_samples = smpl_neutral(body_pose=torch.zeros(num_pred_samples, 69, device=device, dtype=torch.float32),
global_orient=torch.zeros(num_pred_samples, 3, device=device, dtype=torch.float32),
betas=pred_shape_samples).vertices # (num_pred_samples, 6890, 3)
# ------------------------------------------------ METRICS ------------------------------------------------
# Numpy-fying targets
target_vertices = target_vertices.cpu().detach().numpy()
target_joints_h36mlsp = target_joints_h36mlsp.cpu().detach().numpy()
target_reposed_vertices = target_reposed_vertices.cpu().detach().numpy()
# Numpy-fying preds
pred_vertices_mode = pred_vertices_mode.cpu().detach().numpy()
pred_joints_h36mlsp_mode = pred_joints_h36mlsp_mode.cpu().detach().numpy()
pred_joints_coco_mode = pred_joints_coco_mode.cpu().detach().numpy()
pred_vertices2D_mode = pred_vertices2D_mode.cpu().detach().numpy()
pred_joints2D_coco_mode = pred_joints2D_coco_mode.cpu().detach().numpy()
pred_reposed_vertices_mean = pred_reposed_vertices_mean.cpu().detach().numpy()
pred_vertices_samples = pred_vertices_samples.cpu().detach().numpy()
pred_joints_h36mlsp_samples = pred_joints_h36mlsp_samples.cpu().detach().numpy()
pred_joints_coco_samples = pred_joints_coco_samples.cpu().detach().numpy()
pred_joints2D_coco_samples = pred_joints2D_coco_samples.cpu().detach().numpy()
pred_reposed_vertices_samples = pred_reposed_vertices_samples.cpu().detach().numpy()
# -------------- 3D Metrics with Mode and Minimum Error Samples --------------
if 'pves' in metrics_to_track:
pve_batch = np.linalg.norm(pred_vertices_mode - target_vertices,
axis=-1) # (bs, 6890)
metric_sums['pves'] += np.sum(pve_batch) # scalar
per_frame_metrics['pves'].append(np.mean(pve_batch, axis=-1))
if 'pves_samples_min' in metrics_to_track:
pve_per_sample = np.linalg.norm(pred_vertices_samples - target_vertices, axis=-1) # (num samples, 6890)
min_pve_sample = np.argmin(np.mean(pve_per_sample, axis=-1))
pve_samples_min_batch = pve_per_sample[min_pve_sample] # (6890,)
metric_sums['pves_samples_min'] += np.sum(pve_samples_min_batch)
per_frame_metrics['pves_samples_min'].append( | np.mean(pve_samples_min_batch, axis=-1, keepdims=True) | numpy.mean |
import h5py
import numpy as np
from pathlib import Path
from collections import OrderedDict
NCLV = 5 # number of microphysics variables
def load_input_fields(path, transpose=False):
"""
"""
fields = OrderedDict()
argnames = [
'PT', 'PQ', 'PAP', 'PAPH', 'PLU', 'PLUDE', 'PMFU', 'PMFD',
'PA', 'PCLV', 'PSUPSAT', 'TENDENCY_CML_T', 'TENDENCY_CML_Q',
'TENDENCY_CML_CLD'
]
with h5py.File(path, 'r') as f:
fields['KLON'] = f['KLON'][0]
fields['KLEV'] = f['KLEV'][0]
fields['PTSPHY'] = f['PTSPHY'][0]
klon = fields['KLON']
klev = fields['KLEV']
for argname in argnames:
fields[argname] = np.ascontiguousarray(f[argname])
fields['PQSAT'] = np.ndarray(order="C", shape=(klev, klon))
fields['TENDENCY_LOC_A'] = | np.ndarray(order="C", shape=(klev, klon)) | numpy.ndarray |
import numpy as np
from scipy.optimize import curve_fit, minimize_scalar
h_planck = 4.135667662e-3 # eV/ps
h_planck_bar = 6.58211951e-4 # eV/ps
kb_boltzmann = 8.6173324e-5 # eV/K
def get_standard_errors_from_covariance(covariance):
# return np.linalg.eigvals(covariance)
return np.sqrt(np.diag(covariance))
#return np.sqrt(np.trace(covariance))
class Lorentzian:
def __init__(self,
test_frequencies_range,
power_spectrum,
guess_position=None,
guess_height=None):
self.test_frequencies_range = test_frequencies_range
self.power_spectrum = power_spectrum
self.guess_pos = guess_position
self.guess_height = guess_height
self._fit_params = None
self._fit_covariances = None
self.curve_name = 'Lorentzian'
def _function(self, x, a, b, c, d):
"""Lorentzian function
x: frequency coordinate
a: peak position
b: half width
c: area proportional parameter
d: base line
"""
return c/(np.pi*b*(1.0+((x - a)/b)**2))+d
def get_fitting_parameters(self):
if self._fit_params is None:
if self.guess_pos is None or self.guess_height is None:
fit_params, fit_covariances = curve_fit(self._function,
self.test_frequencies_range,
self.power_spectrum)
else:
fit_params, fit_covariances = curve_fit(self._function,
self.test_frequencies_range,
self.power_spectrum,
p0=[self.guess_pos, 0.1, self.guess_height, 0.0])
self._fit_covariances = fit_covariances
self._fit_params = fit_params
return self._fit_params, self._fit_covariances
def get_fitting(self):
from scipy.integrate import quad
try:
fit_params, fit_covariances = self.get_fitting_parameters()
maximum = fit_params[2]/(fit_params[1]*np.pi)
width = 2.0*fit_params[1]
frequency = fit_params[0]
area = fit_params[2]
standard_errors = get_standard_errors_from_covariance(fit_covariances)
global_error = np.average(standard_errors[:2])/np.sqrt(area)
if np.isnan(global_error):
raise RuntimeError
#error = get_error_from_covariance(fit_covariances)
base_line = fit_params[3]
return {'maximum': maximum,
'width': width,
'peak_position': frequency,
'standard_errors': standard_errors,
'global_error': global_error,
'area': area,
'base_line': base_line,
'all_good': True}
except RuntimeError:
return {'all_good': False}
def get_curve(self, frequency_range):
return self._function(frequency_range, *self.get_fitting_parameters()[0])
class Lorentzian_asymmetric:
def __init__(self,
test_frequencies_range,
power_spectrum,
guess_position=None,
guess_height=None):
self.test_frequencies_range = test_frequencies_range
self.power_spectrum = power_spectrum
self.guess_pos = guess_position
self.guess_height = guess_height
self._fit_params = None
self._fit_covariances = None
self.curve_name = 'Assym. Lorentzian'
def _g_a (self, x, a, b, s):
"""Asymmetric width term
x: frequency coordinate
a: peak position
b: half width
s: asymmetry parameter
"""
return 2*b/(1.0+np.exp(s*(x-a)))
def _function(self, x, a, b, c, d, s):
"""Lorentzian asymmetric function
x: frequency coordinate
a: peak position
b: half width
c: area proportional parameter
d: base line
s: asymmetry parameter
"""
return c/(np.pi*self._g_a(x, a, b, s)*(1.0+((x-a)/(self._g_a(x, a, b, s)))**2))+d
def get_fitting_parameters(self):
if self._fit_params is None:
if self.guess_pos is None or self.guess_height is None:
fit_params, fit_covariances = curve_fit(self._function,
self.test_frequencies_range,
self.power_spectrum)
else:
fit_params, fit_covariances = curve_fit(self._function,
self.test_frequencies_range,
self.power_spectrum,
p0=[self.guess_pos, 0.1, self.guess_height, 0.0, 0.0])
self._fit_covariances = fit_covariances
self._fit_params = fit_params
return self._fit_params, self._fit_covariances
def get_fitting(self):
from scipy.integrate import quad
try:
fit_params, fit_covariances = self.get_fitting_parameters()
peak_pos = minimize_scalar(lambda x: -self._function(x, *fit_params), fit_params[0],
bounds=[self.test_frequencies_range[0], self.test_frequencies_range[-1]],
method='bounded')
frequency = peak_pos["x"]
maximum = -peak_pos["fun"]
width = 2.0 * self._g_a(frequency, fit_params[0], fit_params[1], fit_params[4])
asymmetry = fit_params[4]
area, error_integration = quad(self._function, 0, self.test_frequencies_range[-1],
args=tuple(fit_params),
epsabs=1e-8)
# area = fit_params[2]
standard_errors = get_standard_errors_from_covariance(fit_covariances)
global_error = np.average(standard_errors[:2])/ | np.sqrt(area) | numpy.sqrt |
from src.attack_dataset_config import AttackDatasetConfig
from src.backdoor.edge_case_attack import EdgeCaseAttack
from src.client_attacks import Attack
from src.data.tf_data import Dataset
from src.data.tf_data_global import GlobalDataset, IIDGlobalDataset, NonIIDGlobalDataset, DirichletDistributionDivider
from src.config.definitions import Config
from src.data.leaf_loader import load_leaf_dataset, process_text_input_indices, process_char_output_indices
import numpy as np
def load_global_dataset(config, malicious_clients, attack_dataset) -> GlobalDataset:
"""Loads dataset according to config parameter, returns GlobalData
:type config: Config
:type malicious_clients: np.array boolean list of clients malicious state
"""
attack_type = Attack(config.client.malicious.attack_type) \
if config.client.malicious is not None else None
dataset: GlobalDataset
if attack_type == Attack.BACKDOOR and attack_dataset.type == 'edge':
pass # We are reloading in edge
else:
(dataset, (x_train, y_train)) = get_dataset(config, attack_dataset)
if attack_type == Attack.BACKDOOR:
attack_ds_config: AttackDatasetConfig = attack_dataset
if attack_ds_config.type == 'semantic':
assert attack_ds_config.train != [] and attack_ds_config.test, \
"Must set train and test for a semantic backdoor!"
# Based on pre-chosen images
build_attack_selected_aux(dataset, x_train, y_train,
attack_ds_config.train,
attack_ds_config.test,
attack_ds_config.target_label,
[], #config['backdoor_feature_benign_regular'],
attack_ds_config.remove_from_benign_dataset)
elif attack_ds_config.type == 'tasks':
# Construct 'backdoor tasks'
build_attack_backdoor_tasks(dataset, malicious_clients,
attack_ds_config.tasks,
[attack_ds_config.source_label, attack_ds_config.target_label],
attack_ds_config.aux_samples,
attack_ds_config.augment_times)
elif attack_ds_config.type == 'edge':
assert attack_ds_config.edge_case_type is not None, "Please specify an edge case type"
# We have to reload the dataset adding the benign samples.
(x_aux_train, mal_aux_labels_train), (x_aux_test, mal_aux_labels_test), (benign_x, benign_y) =\
build_edge_case_attack(attack_ds_config.edge_case_type, attack_ds_config.edge_case_p,
config.dataset.normalize_mnist_data)
(dataset, (x_t_tst, _)) = get_dataset(config, attack_dataset, benign_x, benign_y)
(dataset.x_aux_train, dataset.mal_aux_labels_train), (dataset.x_aux_test, dataset.mal_aux_labels_test) = \
(x_aux_train, mal_aux_labels_train), (x_aux_test, mal_aux_labels_test)
elif attack_ds_config.type == 'pixel_pattern':
# do nothing
build_pixel_pattern(dataset, attack_ds_config.target_label)
else:
raise NotImplementedError(f"Backdoor type {attack_ds_config.type} not supported!")
return dataset
def build_attack_backdoor_tasks(dataset, malicious_clients,
backdoor_tasks, malicious_objective, aux_samples, augment_times):
dataset.build_global_aux(malicious_clients,
backdoor_tasks,
malicious_objective,
aux_samples,
augment_times)
def build_attack_selected_aux(ds, x_train, y_train,
backdoor_train_set, backdoor_test_set, backdoor_target,
benign_train_set_extra, remove_malicious_samples):
"""Builds attack based on selected backdoor images"""
(ds.x_aux_train, ds.y_aux_train), (ds.x_aux_test, ds.y_aux_test) = \
(x_train[np.array(backdoor_train_set)],
y_train[np.array(backdoor_train_set)]), \
(x_train[np.array(backdoor_test_set)],
y_train[np.array(backdoor_test_set)])
ds.mal_aux_labels_train = np.repeat(backdoor_target,
ds.y_aux_train.shape).astype(np.uint8)
ds.mal_aux_labels_test = np.repeat(backdoor_target, ds.y_aux_test.shape).astype(np.uint8)
if benign_train_set_extra:
extra_train_x, extra_train_y = x_train[np.array(benign_train_set_extra)], \
y_train[np.array(benign_train_set_extra)]
ds.x_aux_train = np.concatenate([ds.x_aux_train, extra_train_x])
ds.y_aux_train = np.concatenate([ds.y_aux_train, extra_train_y])
ds.mal_aux_labels_train = np.concatenate([ds.mal_aux_labels_train, extra_train_y])
if remove_malicious_samples:
np.delete(x_train, backdoor_train_set, axis=0)
np.delete(y_train, backdoor_train_set, axis=0)
np.delete(x_train, backdoor_test_set, axis=0)
np.delete(y_train, backdoor_test_set, axis=0)
def shuffle(x, y):
perms = np.random.permutation(x.shape[0])
return x[perms, :], y[perms]
def build_edge_case_attack(edge_case, adv_edge_case_p, normalize_mnist_data):
attack: EdgeCaseAttack = factory(edge_case)
(x_aux_train, mal_aux_labels_train), (x_aux_test, mal_aux_labels_test), (orig_y_train, _) =\
attack.load()
if normalize_mnist_data:
emnist_mean, emnist_std = 0.036910772, 0.16115953
x_aux_train = (x_aux_train - emnist_mean) / emnist_std
x_aux_test = (x_aux_test - emnist_mean) / emnist_std
# If necessary, distribute edge_case samples by p
# TODO: Fix shuffle for orig_y_train, only not working when labels differ!!!
x_aux_train, mal_aux_labels_train = shuffle(x_aux_train, mal_aux_labels_train)
x_aux_test, mal_aux_labels_test = shuffle(x_aux_test, mal_aux_labels_test)
x_benign, y_benign = None, None
if adv_edge_case_p < 1.0:
# Some edge case values must be incorporated into the benign training set.
index = int(adv_edge_case_p * x_aux_train.shape[0])
x_benign, y_benign = x_aux_train[index:, :], orig_y_train[index:]
x_aux_train, mal_aux_labels_train = x_aux_train[:index, :], mal_aux_labels_train[:index]
return (x_aux_train, mal_aux_labels_train), (x_aux_test, mal_aux_labels_test), (x_benign, y_benign)
# Note: ds.y_aux_train, ds.y_aux_test not set
def build_pixel_pattern(ds, backdoor_target):
# (ds.x_aux_train, ds.y_aux_train), (ds.x_aux_test, ds.y_aux_test) = \
# (ds.x_train, ds.y_train), \
# (ds.x_test, ds. y_test)
# ds.mal_aux_labels_train = np.repeat(backdoor_target,
# ds.y_aux_train.shape).astype(np.uint8)
# ds.mal_aux_labels_test = np.repeat(backdoor_target, ds.y_aux_test.shape).astype(np.uint8)
# Assign test set
(ds.x_aux_test, ds.y_aux_test) = \
(ds.x_test, ds. y_test)
ds.mal_aux_labels_test = np.repeat(backdoor_target, ds.y_aux_test.shape).astype(np.uint8)
def factory(classname):
from src.backdoor import edge_case_attack
cls = getattr(edge_case_attack, classname)
return cls()
def get_dataset(config, attack_ds_config, add_x_train=None, add_y_train=None):
"""
@param config:
@param attack_ds_config:
@param add_x_train: x_train samples to add to training set
@param add_y_train: y_train samples to add to training set
@return:
"""
dataset = config.dataset.dataset
number_of_samples = config.dataset.number_of_samples
data_distribution = config.dataset.data_distribution
normalize_mnist_data = config.dataset.normalize_mnist_data # Legacy
num_clients = config.environment.num_clients
if dataset == 'mnist':
(x_train, y_train), (x_test, y_test) = Dataset.get_mnist_dataset(number_of_samples)
if add_x_train is not None:
x_train = np.concatenate([x_train, add_x_train])
y_train = np.concatenate([y_train, add_y_train])
if data_distribution == 'IID':
ds = IIDGlobalDataset(x_train, y_train, num_clients=num_clients, x_test=x_test, y_test=y_test)
else:
(x_train_dist, y_train_dist) = \
DirichletDistributionDivider(x_train, y_train, attack_ds_config.train,
attack_ds_config.test,
attack_ds_config.remove_from_benign_dataset,
num_clients).build()
ds = NonIIDGlobalDataset(x_train_dist, y_train_dist, x_test, y_test, num_clients=num_clients)
elif dataset == 'fmnist':
if data_distribution == 'IID':
(x_train, y_train), (x_test, y_test) = Dataset.get_fmnist_dataset(number_of_samples)
if add_x_train is not None:
x_train = np.concatenate([x_train, add_x_train])
y_train = np.concatenate([y_train, add_y_train])
ds = IIDGlobalDataset(x_train, y_train, num_clients=num_clients, x_test=x_test, y_test=y_test)
else:
raise Exception('Distribution not supported')
elif dataset == 'femnist':
if data_distribution == 'IID':
(x_train, y_train), (x_test, y_test) = Dataset.get_emnist_dataset(number_of_samples,
num_clients,
normalize_mnist_data)
(x_train, y_train), (x_test, y_test) = (
Dataset.keep_samples(np.concatenate(x_train), np.concatenate(y_train), number_of_samples),
Dataset.keep_samples(np.concatenate(x_test), np.concatenate(y_test), number_of_samples))
if add_x_train is not None:
x_train = np.concatenate([x_train, add_x_train])
y_train = np.concatenate([y_train, add_y_train])
ds = IIDGlobalDataset(x_train, y_train, num_clients, x_test, y_test)
else:
(x_train, y_train), (x_test, y_test) = Dataset.get_emnist_dataset(number_of_samples,
num_clients,
normalize_mnist_data)
if add_x_train is not None:
# Here, x_train and y_train are already separated by handwriter.. Add to random handwriters
handwriter_indices = np.random.choice(len(x_train), add_x_train.shape[0], replace=True)
for index in handwriter_indices:
x_train[index] = np.concatenate([x_train[index], add_x_train[index:(index+1), :]])
y_train[index] = np.concatenate([y_train[index], add_y_train[index:(index + 1)]])
for index in range(len(x_train)):
x_train[index], y_train[index] = shuffle(x_train[index], y_train[index])
ds = NonIIDGlobalDataset(x_train, y_train, np.concatenate(x_test), np.concatenate(y_test),
num_clients)
x_train, y_train = | np.concatenate(x_train) | numpy.concatenate |
from __future__ import print_function, division
import os
from os import listdir
from os.path import isfile, join
import sys
import pandas as pd
import random
import numpy as np
import copy
from operator import add
# root = os.path.join(os.getcwd().split('src')[0], 'src/defects')
# if root not in sys.path:
# sys.path.append(root)
import warnings
from mklaren.kernel.kinterface import Kinterface
from mklaren.kernel.kernel import *
from mklaren.projection.icd import ICD
from pdb import set_trace
from scipy.spatial.distance import pdist, squareform
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn import metrics
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import KFold
from multiprocessing import Pool, cpu_count
from threading import Thread
from multiprocessing import Queue
# import tl_algs
# from tl_algs import tca_plus
import SMOTE
from utils import *
import metrics
class ThreadWithReturnValue(Thread):
def __init__(self, group=None, target=None, name=None,
args=(), kwargs={}, Verbose=None):
Thread.__init__(self, group, target, name, args, kwargs)
self._return = None
def run(self):
#print(type(self._target))
if self._target is not None:
self._return = self._target(*self._args,
**self._kwargs)
def join(self, *args):
Thread.join(self, *args)
return self._return
def apply_smote(df):
cols = df.columns
smt = SMOTE.smote(df)
df = smt.run()
df.columns = cols
return df
def _replaceitem(x):
if x >= 0.5:
return 0.5
else:
return 0.0
def _replaceitem_logistic(x):
if x >= 0.5:
return 1
else:
return 0
def prepare_data(project, metric):
data_path = '../data/merged_data_original/' + project + '.csv'
data_df = pd.read_csv(data_path)
data_df.rename(columns = {'Unnamed: 0':'id'},inplace = True)
for col in ['id', 'commit_hash', 'release']:
if col in data_df.columns:
data_df = data_df.drop([col], axis = 1)
data_df = data_df.dropna()
y = data_df.Bugs
X = data_df.drop(['Bugs'],axis = 1)
if metric == 'process':
X = X[['file_la', 'file_ld', 'file_lt', 'file_age', 'file_ddev',
'file_nuc', 'own', 'minor', 'file_ndev', 'file_ncomm', 'file_adev',
'file_nadev', 'file_avg_nddev', 'file_avg_nadev', 'file_avg_ncomm',
'file_ns', 'file_exp', 'file_sexp', 'file_rexp', 'file_nd', 'file_sctr']]
elif metric == 'product':
X = X.drop(['file_la', 'file_ld', 'file_lt', 'file_age', 'file_ddev',
'file_nuc', 'own', 'minor', 'file_ndev', 'file_ncomm', 'file_adev',
'file_nadev', 'file_avg_nddev', 'file_avg_nadev', 'file_avg_ncomm',
'file_ns', 'file_exp', 'file_sexp', 'file_rexp', 'file_nd', 'file_sctr'],axis = 1)
else:
X = X
df = X
df['Bugs'] = y
return df
def get_kernel_matrix(dframe, n_dim=15):
"""
This returns a Kernel Transformation Matrix $\Theta$
It uses kernel approximation offered by the MKlaren package
For the sake of completeness (and for my peace of mind, I use the best possible approx.)
:param dframe: input data as a pandas dataframe.
:param n_dim: Number of dimensions for the kernel matrix (default=15)
:return: $\Theta$ matrix
"""
ker = Kinterface(data=dframe.values, kernel=linear_kernel)
model = ICD(rank=n_dim)
model.fit(ker)
g_nystrom = model.G
return g_nystrom
def map_transform(src, tgt, n_components=5):
"""
Run a map and transform x and y onto a new space using TCA
:param src: IID samples
:param tgt: IID samples
:return: Mapped x and y
"""
s_col = [col for col in src.columns[:-1] if '?' not in col]
t_col = [col for col in tgt.columns[:-1] if '?' not in col]
S = src[s_col]
T = tgt[t_col]
col_name = ["Col_" + str(i) for i in range(n_components)]
x0 = pd.DataFrame(get_kernel_matrix(S, n_components), columns=col_name)
y0 = pd.DataFrame(get_kernel_matrix(T, n_components), columns=col_name)
# set_trace()
x0.loc[:, src.columns[-1]] = pd.Series(src[src.columns[-1]], index=x0.index)
y0.loc[:, tgt.columns[-1]] = pd.Series(tgt[tgt.columns[-1]], index=y0.index)
return x0, y0
def create_model(train):
"""
:param train:
:type train:
:param test:
:type test:
:param weka:
:type weka:
:return:
"""
train = apply_smote(train)
train_y = train.Bugs
train_X = train.drop(labels = ['Bugs'],axis = 1)
clf = LogisticRegression()
clf.fit(train_X, train_y)
return clf
def predict_defects(clf, test):
test_y = test.Bugs
test_X = test.drop(labels = ['Bugs'],axis = 1)
predicted = clf.predict(test_X)
predicted_proba = clf.predict_proba(test_X)
return test_y, predicted, predicted_proba
def get_dcv(src, tgt):
"""Get dataset characteristic vector."""
s_col = [col for col in src.columns[:-1] if '?' not in col]
t_col = [col for col in tgt.columns[:-1] if '?' not in col]
S = src[s_col]
T = tgt[t_col]
def self_dist_mtx(arr):
dist_arr = pdist(arr)
return squareform(dist_arr)
dist_src = self_dist_mtx(S.values)
dist_tgt = self_dist_mtx(T.values)
dcv_src = [np.mean(dist_src), np.median(dist_src), np.min(dist_src), np.max(dist_src), np.std(dist_src),
len(S.values)]
dcv_tgt = [ | np.mean(dist_tgt) | numpy.mean |
"""
Test for file IO.
It tests the results of an optimal control problem with torque_driven_with_contact problem type regarding the proper functioning of :
- the maximize/minimize_predicted_height_CoM objective
- the contact_forces_inequality constraint
- the non_slipping constraint
"""
import importlib.util
from pathlib import Path
import pytest
import numpy as np
from biorbd_optim import Data, OdeSolver
from .utils import TestUtils
PROJECT_FOLDER = Path(__file__).parent / ".."
spec = importlib.util.spec_from_file_location(
"maximize_predicted_height_CoM",
str(PROJECT_FOLDER) + "/examples/torque_driven_with_contact/maximize_predicted_height_CoM.py",
)
maximize_predicted_height_CoM = importlib.util.module_from_spec(spec)
spec.loader.exec_module(maximize_predicted_height_CoM)
spec = importlib.util.spec_from_file_location(
"contact_forces_inequality_constraint",
str(PROJECT_FOLDER) + "/examples/torque_driven_with_contact/contact_forces_inequality_constraint.py",
)
contact_forces_inequality_GREATER_THAN_constraint = importlib.util.module_from_spec(spec)
spec.loader.exec_module(contact_forces_inequality_GREATER_THAN_constraint)
spec = importlib.util.spec_from_file_location(
"contact_forces_inequality_constraint",
str(PROJECT_FOLDER) + "/examples/torque_driven_with_contact/contact_forces_inequality_constraint.py",
)
contact_forces_inequality_LESSER_THAN_constraint = importlib.util.module_from_spec(spec)
spec.loader.exec_module(contact_forces_inequality_LESSER_THAN_constraint)
spec = importlib.util.spec_from_file_location(
"non_slipping_constraint", str(PROJECT_FOLDER) + "/examples/torque_driven_with_contact/non_slipping_constraint.py",
)
non_slipping_constraint = importlib.util.module_from_spec(spec)
spec.loader.exec_module(non_slipping_constraint)
@pytest.mark.parametrize("ode_solver", [OdeSolver.RK])
def test_maximize_predicted_height_CoM(ode_solver):
ocp = maximize_predicted_height_CoM.prepare_ocp(
model_path=str(PROJECT_FOLDER) + "/examples/torque_driven_with_contact/2segments_4dof_2contacts.bioMod",
phase_time=0.5,
number_shooting_points=20,
)
sol = ocp.solve()
# Check objective function value
f = np.array(sol["f"])
np.testing.assert_equal(f.shape, (1, 1))
np.testing.assert_almost_equal(f[0, 0], 0.7592028279017864)
# Check constraints
g = np.array(sol["g"])
np.testing.assert_equal(g.shape, (160, 1))
np.testing.assert_almost_equal(g, np.zeros((160, 1)))
# Check some of the results
states, controls = Data.get_data(ocp, sol["x"])
q, qdot, tau = states["q"], states["q_dot"], controls["tau"]
# initial and final position
np.testing.assert_almost_equal(q[:, 0], np.array((0.0, 0.0, -0.5, 0.5)))
np.testing.assert_almost_equal(q[:, -1], np.array((0.1189651, -0.0904378, -0.7999996, 0.7999996)))
# initial and final velocities
np.testing.assert_almost_equal(qdot[:, 0], np.array((0, 0, 0, 0)))
np.testing.assert_almost_equal(qdot[:, -1], np.array((1.2636414, -1.3010929, -3.6274687, 3.6274687)))
# initial and final controls
np.testing.assert_almost_equal(tau[:, 0], np.array((-22.1218282)))
np.testing.assert_almost_equal(tau[:, -1], np.array(0.2653957))
# save and load
TestUtils.save_and_load(sol, ocp, False)
@pytest.mark.parametrize("ode_solver", [OdeSolver.RK])
def test_contact_forces_inequality_GREATER_THAN_constraint(ode_solver):
boundary = 50
ocp = contact_forces_inequality_GREATER_THAN_constraint.prepare_ocp(
model_path=str(PROJECT_FOLDER) + "/examples/torque_driven_with_contact/2segments_4dof_2contacts.bioMod",
phase_time=0.3,
number_shooting_points=10,
direction="GREATER_THAN",
boundary=boundary,
)
sol = ocp.solve()
# Check objective function value
f = np.array(sol["f"])
np.testing.assert_equal(f.shape, (1, 1))
np.testing.assert_almost_equal(f[0, 0], 0.14525621569048172)
# Check constraints
g = np.array(sol["g"])
np.testing.assert_equal(g.shape, (100, 1))
np.testing.assert_almost_equal(g[:80], np.zeros((80, 1)))
np.testing.assert_array_less(-g[80:], -boundary)
expected_pos_g = np.array(
[
[50.76491919],
[51.42493119],
[57.79007374],
[64.29551934],
[67.01905769],
[68.3225625],
[67.91793917],
[65.26700138],
[59.57311867],
[50.18463134],
[160.14834799],
[141.15361769],
[85.13345729],
[56.33535022],
[53.32684286],
[52.21679255],
[51.62923106],
[51.25728666],
[50.9871531],
[50.21972377],
]
)
np.testing.assert_almost_equal(g[80:], expected_pos_g)
# Check some of the results
states, controls = Data.get_data(ocp, sol["x"])
q, qdot, tau = states["q"], states["q_dot"], controls["tau"]
# initial and final position
np.testing.assert_almost_equal(q[:, 0], np.array((0.0, 0.0, -0.75, 0.75)))
np.testing.assert_almost_equal(q[:, -1], np.array((-0.34054748, 0.1341555, -0.0005438, 0.0005438)))
# initial and final velocities
np.testing.assert_almost_equal(qdot[:, 0], np.array((0, 0, 0, 0)))
np.testing.assert_almost_equal(qdot[:, -1], np.array((-2.01097559, 1.09352001e-03, 4.02195175, -4.02195175)))
# initial and final controls
np.testing.assert_almost_equal(tau[:, 0], np.array((-54.1684018)))
np.testing.assert_almost_equal(tau[:, -1], np.array((-15.69338332)))
# save and load
TestUtils.save_and_load(sol, ocp, False)
@pytest.mark.parametrize("ode_solver", [OdeSolver.RK])
def test_contact_forces_inequality_LESSER_THAN_constraint(ode_solver):
boundary = 100
ocp = contact_forces_inequality_LESSER_THAN_constraint.prepare_ocp(
model_path=str(PROJECT_FOLDER) + "/examples/torque_driven_with_contact/2segments_4dof_2contacts.bioMod",
phase_time=0.3,
number_shooting_points=10,
direction="LESSER_THAN",
boundary=boundary,
)
sol = ocp.solve()
# Check objective function value
f = np.array(sol["f"])
np.testing.assert_equal(f.shape, (1, 1))
np.testing.assert_almost_equal(f[0, 0], 0.14525619649247054)
# Check constraints
g = np.array(sol["g"])
np.testing.assert_equal(g.shape, (100, 1))
np.testing.assert_almost_equal(g[:80], np.zeros((80, 1)))
np.testing.assert_array_less(g[80:], boundary)
expected_non_zero_g = np.array(
[
[63.27237842],
[63.02339946],
[62.13898369],
[60.38380769],
[57.31193141],
[52.19952395],
[43.9638679],
[31.14938032],
[12.45022537],
[-6.35179034],
[99.06328211],
[98.87711942],
[98.64440005],
[98.34550037],
[97.94667107],
[97.38505013],
[96.52820867],
[95.03979128],
[91.73734926],
[77.48803304],
]
)
np.testing.assert_almost_equal(g[80:], expected_non_zero_g)
# Check some of the results
states, controls = Data.get_data(ocp, sol["x"])
q, qdot, tau = states["q"], states["q_dot"], controls["tau"]
# initial and final position
np.testing.assert_almost_equal(q[:, 0], np.array((0.0, 0.0, -0.75, 0.75)))
np.testing.assert_almost_equal(
q[:, -1], np.array((-3.40655617e-01, 1.34155544e-01, -3.27530886e-04, 3.27530886e-04))
)
# initial and final velocities
np.testing.assert_almost_equal(qdot[:, 0], np.array((0, 0, 0, 0)))
np.testing.assert_almost_equal(qdot[:, -1], np.array((-2.86650427, 9.38827988e-04, 5.73300901, -5.73300901)))
# initial and final controls
np.testing.assert_almost_equal(tau[:, 0], np.array((-32.78862874)))
np.testing.assert_almost_equal(tau[:, -1], np.array((-25.23729156)))
# save and load
TestUtils.save_and_load(sol, ocp, False)
@pytest.mark.parametrize("ode_solver", [OdeSolver.RK])
def test_non_slipping_constraint(ode_solver):
ocp = non_slipping_constraint.prepare_ocp(
model_path=str(PROJECT_FOLDER) + "/examples/torque_driven_with_contact/2segments_4dof_2contacts.bioMod",
phase_time=0.6,
number_shooting_points=10,
mu=0.005,
)
sol = ocp.solve()
# Check objective function value
f = np.array(sol["f"])
np.testing.assert_equal(f.shape, (1, 1))
np.testing.assert_almost_equal(f[0, 0], 0.23984490846250128)
# Check constraints
g = | np.array(sol["g"]) | numpy.array |
#!/usr/bin/env python3
import numpy as np
import random
import sys
from time import sleep
try:
import sounddevice as sd
except ImportError:
sd = None
P = {"r": "rock", "p": "paper", "s": "scissors"}
FS = 44100 # needs to be int, non-44100 may not work on HDMI
def main():
print_choices()
if len(sys.argv) == 2:
N = int(sys.argv[1])
else:
N = 10
stat = 0
wins = 0
losses = 0
for _ in range(N):
computer = random.choice(list(P.keys()))
try:
human = input()
if human not in P.keys():
print_choices()
continue
except EOFError:
print("goodbye")
break
if human == computer:
print("draw!")
stat = 0
elif human == "r":
if computer == "s":
print("Human win: ⭖ smashes ✄")
stat = 1
else:
print("computer wins: 📰 covers ⭖")
stat = -1
elif human == "s":
if computer == "r":
print("computer wins: ⭖ smashes ✄")
stat = -1
else:
print("human wins: ✄ cuts 📰")
stat = 1
elif human == "p":
if computer == "s":
print("computer wins: ✄ cuts 📰")
stat = -1
else:
print("human wins: 📰 covers ⭖")
stat = 1
if stat == 1:
wins += 1
elif stat == -1:
losses += 1
feedback(stat)
print("Wins:", wins, "losses:", losses)
def print_choices():
for k, v in P.items():
print(k, v, end=" ")
def feedback(stat: int):
global sd
if sd is None:
return
if stat == -1:
f1 = 700.0
f2 = 400.0
elif stat == 1:
f1 = 400.0
f2 = 700.0
else:
return
T = 0.3
tp = 0.2
t = np.arange(0, T + tp, 1 / FS)
Nt = t.size
Np = int(tp // FS)
ih = (Nt - Np) // 2
sound = np.empty_like(t)
sound[Np:ih] = np.sin(2 * np.pi * f1 * t[Np:ih])
sound[ih:] = | np.sin(2 * np.pi * f2 * t[ih:]) | numpy.sin |
from argparse import ArgumentParser
import os
import numpy as np
from joblib import dump
from mldftdat.workflow_utils import SAVE_ROOT
from mldftdat.models.gp import *
from mldftdat.data import load_descriptors, filter_descriptors
import yaml
def parse_settings(args):
fname = args.datasets_list[0]
if args.suffix is not None:
fname = fname + '_' + args.suffix
fname = os.path.join(SAVE_ROOT, 'DATASETS', args.functional,
args.basis, args.version, fname)
print(fname)
with open(os.path.join(fname, 'settings.yaml'), 'r') as f:
d = yaml.load(f, Loader=yaml.Loader)
args.gg_a0 = d.get('a0')
args.gg_amin = d.get('amin')
args.gg_facmul = d.get('fac_mul')
def parse_dataset(args, i, val=False):
if val:
fname = args.validation_set[2*i]
n = int(args.validation_set[2*i+1])
else:
fname = args.datasets_list[2*i]
n = int(args.datasets_list[2*i+1])
if args.suffix is not None:
fname = fname + '_' + args.suffix
fname = os.path.join(SAVE_ROOT, 'DATASETS', args.functional,
args.basis, args.version, fname)
print(fname)
X, y, rho_data = load_descriptors(fname)
if val:
# offset in case repeat datasets are used
X, y, rho_data = X[n//2+1:,:], y[n//2+1:], rho_data[:,n//2+1:]
X, y, rho, rho_data = filter_descriptors(X, y, rho_data,
tol=args.density_cutoff)
print(X.shape, n)
if args.randomize:
inds = np.arange(X.shape[0])
np.random.shuffle(inds)
X = X[inds,:]
y = y[inds]
rho = rho[inds]
rho_data = rho_data[:,inds]
return X[::n,:], y[::n], rho[::n], rho_data[:,::n]
def parse_list(lststr, T=int):
return [T(substr) for substr in lststr.split(',')]
def main():
parser = ArgumentParser(description='Trains a GP exchange model')
parser.add_argument('save_file', type=str)
parser.add_argument('feature_file', type=str,
help='serialized FeatureList object in yaml format')
parser.add_argument('datasets_list', nargs='+',
help='pairs of dataset names and inverse sampling densities')
parser.add_argument('basis', metavar='basis', type=str,
help='basis set code')
parser.add_argument('--functional', metavar='functional', type=str, default=None,
help='exchange-correlation functional, HF for Hartree-Fock')
parser.add_argument('-r', '--randomize', action='store_true')
parser.add_argument('-c', '--density-cutoff', type=float, default=1e-4)
#parser.add_argument('-m', '--model-class', type=str, default=None)
#parser.add_argument('-k', '--kernel', help='kernel initialization strategy', type=str, default=None)
parser.add_argument('-s', '--seed', help='random seed', default=0, type=int)
parser.add_argument('-vs', '--validation-set', nargs='+')
parser.add_argument('-d', '--delete-k', action='store_true',
help='Delete L (LL^T=K the kernel matrix) to save disk space. Need to refit when reloading to calculate covariance.')
parser.add_argument('--heg', action='store_true', help='HEG exact constraint')
parser.add_argument('--tail', action='store_true', help='atomic tail exact constraint')
parser.add_argument('-o', '--desc-order', default=None,
help='comma-separated list of descriptor order with no spaces. must start with 0,1.')
parser.add_argument('-l', '--length-scale', default=None,
help='comma-separated list initial length-scale guesses')
parser.add_argument('--length-scale-mul', type=float, default=1.0,
help='Used for automatic length-scale initial guess')
parser.add_argument('-a', '--agpr', action='store_true',
help='Whether to use Additive RBF. If False, use RBF')
parser.add_argument('-as', '--agpr-scale', default=None)
parser.add_argument('-ao', '--agpr-order', default=2, type=int)
parser.add_argument('-an', '--agpr-nsingle', default=1, type=int)
parser.add_argument('-x', '--xed-y-code', default='CHACHIYO', type=str)
parser.add_argument('-on', '--optimize-noise', action='store_true',
help='Whether to optimzie exponent of density noise.')
parser.add_argument('-v', '--version', default='c', type=str,
help='version of descriptor set. Default c')
parser.add_argument('--suffix', default=None, type=str,
help='customize data directories with this suffix')
args = parser.parse_args()
parse_settings(args)
np.random.seed(args.seed)
feature_list = FeatureList.load(args.feature_file)
if args.length_scale is not None:
args.length_scale = parse_list(args.length_scale, T=float)
if args.agpr_scale is not None:
args.agpr_scale = parse_list(args.agpr_scale, T=float)
if args.desc_order is not None:
args.desc_order = parse_list(args.desc_order)
assert len(args.datasets_list) % 2 == 0, 'Need pairs of entries for datasets list.'
assert len(args.datasets_list) != 0, 'Need training data'
nd = len(args.datasets_list) // 2
if args.validation_set is None:
nv = 0
else:
assert len(args.validation_set) % 2 == 0, 'Need pairs of entries for datasets list.'
nv = len(args.validation_set) // 2
X, y, rho, rho_data = parse_dataset(args, 0)
for i in range(1, nd):
Xn, yn, rhon, rho_datan, = parse_dataset(args, i)
X = np.append(X, Xn, axis=0)
y = np.append(y, yn, axis=0)
rho = | np.append(rho, rhon, axis=0) | numpy.append |
"""
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
from ..config import ConfigValidator, StringField, NumberField
from ..representation import (ClassificationPrediction, DetectionPrediction, ReIdentificationPrediction,
SegmentationPrediction, CharacterRecognitionPrediction, ContainerPrediction,
RegressionPrediction, FacialLandmarksPrediction, MultilabelRecognitionPrediction,
SuperResolutionPrediction)
from ..utils import get_or_parse_value
from .adapter import Adapter
class ClassificationAdapter(Adapter):
"""
Class for converting output of classification model to ClassificationPrediction representation
"""
__provider__ = 'classification'
def process(self, raw, identifiers=None, frame_meta=None):
"""
Args:
identifiers: list of input data identifiers
raw: output of model
Returns:
list of ClassificationPrediction objects
"""
prediction = raw[self.output_blob]
prediction = np.reshape(prediction, (prediction.shape[0], -1))
result = []
for identifier, output in zip(identifiers, prediction):
result.append(ClassificationPrediction(identifier, output))
return result
class SegmentationAdapter(Adapter):
__provider__ = 'segmentation'
def process(self, raw, identifiers=None, frame_meta=None):
prediction = raw[self.output_blob]
result = []
for identifier, output in zip(identifiers, prediction):
result.append(SegmentationPrediction(identifier, output))
return result
class TinyYOLOv1Adapter(Adapter):
"""
Class for converting output of Tiny YOLO v1 model to DetectionPrediction representation
"""
__provider__ = 'tiny_yolo_v1'
def process(self, raw, identifiers=None, frame_meta=None):
"""
Args:
identifiers: list of input data identifiers
raw: output of model
Returns:
list of DetectionPrediction objects
"""
prediction = raw[self.output_blob]
PROBABILITY_SIZE = 980
CONFIDENCE_SIZE = 98
BOXES_SIZE = 392
CELLS_X, CELLS_Y = 7, 7
CLASSES = 20
OBJECTS_PER_CELL = 2
result = []
for identifier, output in zip(identifiers, prediction):
assert PROBABILITY_SIZE + CONFIDENCE_SIZE + BOXES_SIZE == output.shape[0]
probability, scale, boxes = np.split(output, [PROBABILITY_SIZE, PROBABILITY_SIZE + CONFIDENCE_SIZE])
probability = np.reshape(probability, (CELLS_Y, CELLS_X, CLASSES))
scale = np.reshape(scale, (CELLS_Y, CELLS_X, OBJECTS_PER_CELL))
boxes = np.reshape(boxes, (CELLS_Y, CELLS_X, OBJECTS_PER_CELL, 4))
confidence = np.zeros((CELLS_Y, CELLS_X, OBJECTS_PER_CELL, CLASSES + 4))
for cls in range(CLASSES):
confidence[:, :, 0, cls] = np.multiply(probability[:, :, cls], scale[:, :, 0])
confidence[:, :, 1, cls] = np.multiply(probability[:, :, cls], scale[:, :, 1])
labels, scores, x_mins, y_mins, x_maxs, y_maxs = [], [], [], [], [], []
for i, j, k in np.ndindex((CELLS_X, CELLS_Y, OBJECTS_PER_CELL)):
box = boxes[j, i, k]
box = [(box[0] + i) / float(CELLS_X), (box[1] + j) / float(CELLS_Y), box[2] ** 2, box[3] ** 2]
label = np.argmax(confidence[j, i, k, :CLASSES])
score = confidence[j, i, k, label]
labels.append(label + 1)
scores.append(score)
x_mins.append(box[0] - box[2] / 2.0)
y_mins.append(box[1] - box[3] / 2.0)
x_maxs.append(box[0] + box[2] / 2.0)
y_maxs.append(box[1] + box[3] / 2.0)
result.append(DetectionPrediction(identifier, labels, scores, x_mins, y_mins, x_maxs, y_maxs))
return result
class ReidAdapter(Adapter):
"""
Class for converting output of Reid model to ReIdentificationPrediction representation
"""
__provider__ = 'reid'
def configure(self):
"""
Specifies parameters of config entry
"""
self.grn_workaround = self.launcher_config.get("grn_workaround", True)
def process(self, raw, identifiers=None, frame_meta=None):
"""
Args:
identifiers: list of input data identifiers
raw: output of model
Returns:
list of ReIdentificationPrediction objects
"""
prediction = raw[self.output_blob]
if self.grn_workaround:
# workaround: GRN layer
prediction = self._grn_layer(prediction)
return [ReIdentificationPrediction(identifier, embedding.reshape(-1))
for identifier, embedding in zip(identifiers, prediction)]
@staticmethod
def _grn_layer(prediction):
GRN_BIAS = 0.000001
sum_ = np.sum(prediction ** 2, axis=1)
prediction = prediction / np.sqrt(sum_[:, np.newaxis] + GRN_BIAS)
return prediction
class YoloV2AdapterConfig(ConfigValidator):
classes = NumberField(floats=False, optional=True, min_value=1)
coords = NumberField(floats=False, optional=True, min_value=1)
num = NumberField(floats=False, optional=True, min_value=1)
anchors = StringField(optional=True)
PRECOMPUTED_ANCHORS = {
'yolo_v2': [1.3221, 1.73145, 3.19275, 4.00944, 5.05587, 8.09892, 9.47112, 4.84053, 11.2364, 10.0071],
'tiny_yolo_v2': [1.08, 1.19, 3.42, 4.41, 6.63, 11.38, 9.42, 5.11, 16.62, 10.52]
}
class YoloV2Adapter(Adapter):
"""
Class for converting output of YOLO v2 family models to DetectionPrediction representation
"""
__provider__ = 'yolo_v2'
def validate_config(self):
yolo_v2_adapter_config = YoloV2AdapterConfig('YoloV2Adapter_Config')
yolo_v2_adapter_config.validate(self.launcher_config)
def configure(self):
self.classes = self.launcher_config.get('classes', 20)
self.coords = self.launcher_config.get('coords', 4)
self.num = self.launcher_config.get('num', 5)
self.anchors = get_or_parse_value(self.launcher_config.get('anchors', 'yolo_v2'), PRECOMPUTED_ANCHORS)
def process(self, raw, identifiers=None, frame_meta=None):
"""
Args:
identifiers: list of input data identifiers
raw: output of model
Returns:
list of DetectionPrediction objects
"""
predictions = raw[self.output_blob]
def entry_index(w, h, n_coords, n_classes, pos, entry):
row = pos // (w * h)
col = pos % (w * h)
return row * w * h * (n_classes + n_coords + 1) + entry * w * h + col
cells_x, cells_y = 13, 13
result = []
for identifier, prediction in zip(identifiers, predictions):
labels, scores, x_mins, y_mins, x_maxs, y_maxs = [], [], [], [], [], []
for y, x, n in np.ndindex((cells_y, cells_x, self.num)):
index = n * cells_y * cells_x + y * cells_x + x
box_index = entry_index(cells_x, cells_y, self.coords, self.classes, index, 0)
obj_index = entry_index(cells_x, cells_y, self.coords, self.classes, index, self.coords)
scale = prediction[obj_index]
box = [
(x + prediction[box_index + 0 * (cells_y * cells_x)]) / cells_x,
(y + prediction[box_index + 1 * (cells_y * cells_x)]) / cells_y,
np.exp(prediction[box_index + 2 * (cells_y * cells_x)]) * self.anchors[2 * n + 0] / cells_x,
np.exp(prediction[box_index + 3 * (cells_y * cells_x)]) * self.anchors[2 * n + 1] / cells_y
]
classes_prob = np.empty(self.classes)
for cls in range(self.classes):
cls_index = entry_index(cells_x, cells_y, self.coords, self.classes, index, self.coords + 1 + cls)
classes_prob[cls] = prediction[cls_index]
classes_prob = classes_prob * scale
label = np.argmax(classes_prob)
labels.append(label + 1)
scores.append(classes_prob[label])
x_mins.append(box[0] - box[2] / 2.0)
y_mins.append(box[1] - box[3] / 2.0)
x_maxs.append(box[0] + box[2] / 2.0)
y_maxs.append(box[1] + box[3] / 2.0)
result.append(DetectionPrediction(identifier, labels, scores, x_mins, y_mins, x_maxs, y_maxs))
return result
class LPRAdapter(Adapter):
__provider__ = 'lpr'
def process(self, raw, identifiers=None, frame_meta=None):
predictions = raw[self.output_blob]
result = []
for identifier, output in zip(identifiers, predictions):
decoded_out = self.decode(output.reshape(-1))
result.append(CharacterRecognitionPrediction(identifier, decoded_out))
return result
def decode(self, outputs):
decode_out = str()
for output in outputs:
if output == -1:
break
decode_out += str(self.label_map[output])
return decode_out
class SSDAdapter(Adapter):
"""
Class for converting output of SSD model to DetectionPrediction representation
"""
__provider__ = 'ssd'
def process(self, raw, identifiers=None, frame_meta=None):
"""
Args:
identifiers: list of input data identifiers
raw: output of model
Returns:
list of DetectionPrediction objects
"""
prediction_batch = raw[self.output_blob]
prediction_count = prediction_batch.shape[2]
prediction_batch = prediction_batch.reshape(prediction_count, -1)
prediction_batch = self.remove_empty_detections(prediction_batch)
result = []
for batch_index, identifier in enumerate(identifiers):
prediction_mask = np.where(prediction_batch[:, 0] == batch_index)
detections = prediction_batch[prediction_mask]
detections = detections[:, 1::]
result.append(DetectionPrediction(identifier, *zip(*detections)))
return result
@staticmethod
def remove_empty_detections(prediction_blob):
ind = prediction_blob[:, 0]
ind_ = np.where(ind == -1)[0]
m = ind_[0] if ind_.size else prediction_blob.shape[0]
return prediction_blob[:m, :]
class FacePersonDetectionAdapterConfig(ConfigValidator):
type = StringField()
face_out = StringField()
person_out = StringField()
class FacePersonAdapter(Adapter):
__provider__ = 'face_person_detection'
def validate_config(self):
face_person_detection_adapter_config = FacePersonDetectionAdapterConfig(
'FacePersonDetection_Config', on_extra_argument=FacePersonDetectionAdapterConfig.ERROR_ON_EXTRA_ARGUMENT)
face_person_detection_adapter_config.validate(self.launcher_config)
def configure(self):
self.face_detection_out = self.launcher_config['face_out']
self.person_detection_out = self.launcher_config['person_out']
self.face_adapter = SSDAdapter(self.launcher_config, self.label_map, self.face_detection_out)
self.person_adapter = SSDAdapter(self.launcher_config, self.label_map, self.person_detection_out)
def process(self, raw, identifiers=None, frame_meta=None):
face_batch_result = self.face_adapter(raw, identifiers)
person_batch_result = self.person_adapter(raw, identifiers)
result = [ContainerPrediction({self.face_detection_out: face_result, self.person_detection_out: person_result})
for face_result, person_result in zip(face_batch_result, person_batch_result)]
return result
class HeadPoseEstimatorAdapterConfig(ConfigValidator):
type = StringField()
angle_yaw = StringField()
angle_pitch = StringField()
angle_roll = StringField()
class HeadPoseEstimatorAdapter(Adapter):
"""
Class for converting output of HeadPoseEstimator to HeadPosePrediction representation
"""
__provider__ = 'head_pose'
def validate_config(self):
head_pose_estimator_adapter_config = HeadPoseEstimatorAdapterConfig(
'HeadPoseEstimator_Config', on_extra_argument=HeadPoseEstimatorAdapterConfig.ERROR_ON_EXTRA_ARGUMENT)
head_pose_estimator_adapter_config.validate(self.launcher_config)
def configure(self):
"""
Specifies parameters of config entry
"""
self.angle_yaw = self.launcher_config['angle_yaw']
self.angle_pitch = self.launcher_config['angle_pitch']
self.angle_roll = self.launcher_config['angle_roll']
def process(self, raw, identifiers=None, frame_meta=None):
"""
Args:
identifiers: list of input data identifiers
raw: output of model
Returns:
list of ContainerPrediction objects
"""
result = []
for identifier, yaw, pitch, roll in zip(
identifiers, raw[self.angle_yaw], raw[self.angle_pitch], raw[self.angle_roll]):
prediction = ContainerPrediction({'angle_yaw': RegressionPrediction(identifier, yaw[0]),
'angle_pitch': RegressionPrediction(identifier, pitch[0]),
'angle_roll': RegressionPrediction(identifier, roll[0])})
result.append(prediction)
return result
class VehicleAttributesRecognitionAdapterConfig(ConfigValidator):
type = StringField()
color_out = StringField()
type_out = StringField()
class VehicleAttributesRecognitionAdapter(Adapter):
__provider__ = 'vehicle_attributes'
def validate_config(self):
attributes_recognition_adapter_config = VehicleAttributesRecognitionAdapterConfig(
'VehicleAttributesRecognition_Config',
on_extra_argument=VehicleAttributesRecognitionAdapterConfig.ERROR_ON_EXTRA_ARGUMENT)
attributes_recognition_adapter_config.validate(self.launcher_config)
def configure(self):
"""
Specifies parameters of config entry
"""
self.color_out = self.launcher_config['color_out']
self.type_out = self.launcher_config['type_out']
def process(self, raw, identifiers=None, frame_meta=None):
res = []
for identifier, colors, types in zip(identifiers,
raw[self.color_out], raw[self.type_out]):
res.append(ContainerPrediction({'color': ClassificationPrediction(identifier, colors.reshape(-1)),
'type': ClassificationPrediction(identifier, types.reshape(-1))}))
return res
class AgeGenderAdapterConfig(ConfigValidator):
type = StringField()
age_out = StringField()
gender_out = StringField()
class AgeGenderAdapter(Adapter):
__provider__ = 'age_gender'
def configure(self):
self.age_out = self.launcher_config['age_out']
self.gender_out = self.launcher_config['gender_out']
def validate_config(self):
age_gender_adapter_config = AgeGenderAdapterConfig(
'AgeGender_Config', on_extra_argument=AgeGenderAdapterConfig.ERROR_ON_EXTRA_ARGUMENT)
age_gender_adapter_config.validate(self.launcher_config)
@staticmethod
def get_age_scores(age):
age_scores = np.zeros(4)
if age < 19:
age_scores[0] = 1
return age_scores
if age < 36:
age_scores[1] = 1
return age_scores
if age < 66:
age_scores[2] = 1
return age_scores
age_scores[3] = 1
return age_scores
def process(self, raw, identifiers=None, frame_meta=None):
result = []
for identifier, age, gender in zip(identifiers, raw[self.age_out], raw[self.gender_out]):
gender = gender.reshape(-1)
age = age.reshape(-1)[0]*100
gender_rep = ClassificationPrediction(identifier, gender)
age_claas_rep = ClassificationPrediction(identifier, self.get_age_scores(age))
age_error_rep = RegressionPrediction(identifier, age)
result.append(ContainerPrediction({'gender': gender_rep, 'age_classification': age_claas_rep,
'age_error': age_error_rep}))
return result
class LandmarksRegressionAdapter(Adapter):
__provider__ = 'landmarks_regression'
def process(self, raw, identifiers=None, frame_meta=None):
res = []
for identifier, values in zip(identifiers, raw[self.output_blob]):
x_values, y_values = values[::2], values[1::2]
res.append(FacialLandmarksPrediction(identifier, x_values.reshape(-1), y_values.reshape(-1)))
return res
class PersonAttributesAdapter(Adapter):
__provider__ = 'person_attributes'
def process(self, raw, identifiers=None, frame_meta=None):
result = []
for identifier, multi_label in zip(identifiers, raw[self.output_blob]):
multi_label[np.where(multi_label >= 0.5)] = 1.
multi_label[np.where(multi_label < 0.5)] = 0.
result.append(MultilabelRecognitionPrediction(identifier, multi_label.reshape(-1)))
return result
class ActionDetectorConfig(ConfigValidator):
type = StringField()
priorbox_out = StringField()
loc_out = StringField()
main_conf_out = StringField()
add_conf_out_prefix = StringField()
add_conf_out_count = NumberField(optional=True, min_value=1)
num_action_classes = NumberField()
detection_threshold = NumberField(optional=True, floats=True, min_value=0, max_value=1)
class ActionDetection(Adapter):
__provider__ = 'action_detection'
def validate_config(self):
action_detector_adapter_config = ActionDetectorConfig('ActionDetector_Config')
action_detector_adapter_config.validate(self.launcher_config)
def configure(self):
self.priorbox_out = self.launcher_config['priorbox_out']
self.loc_out = self.launcher_config['loc_out']
self.main_conf_out = self.launcher_config['main_conf_out']
self.num_action_classes = self.launcher_config['num_action_classes']
self.detection_threshold = self.launcher_config.get('detection_threshold', 0)
add_conf_out_count = self.launcher_config.get('add_conf_out_count')
add_conf_out_prefix = self.launcher_config['add_conf_out_prefix']
if add_conf_out_count is None:
self.add_conf_outs = [add_conf_out_prefix]
else:
self.add_conf_outs = []
for num in np.arange(start=1, stop=add_conf_out_count + 1):
self.add_conf_outs.append('{}{}'.format(add_conf_out_prefix, num))
def process(self, raw, identifiers=None, frame_meta=None):
result = []
prior_boxes = raw[self.priorbox_out][0][0].reshape(-1, 4)
prior_variances = raw[self.priorbox_out][0][1].reshape(-1, 4)
for batch_id, identifier in enumerate(identifiers):
labels, class_scores, x_mins, y_mins, x_maxs, y_maxs, main_scores = self.prepare_detection_for_id(
batch_id, raw, prior_boxes, prior_variances
)
action_prediction = DetectionPrediction(identifier, labels, class_scores, x_mins, y_mins, x_maxs, y_maxs)
person_prediction = DetectionPrediction(
identifier, [1] * len(labels), main_scores, x_mins, y_mins, x_maxs, y_maxs
)
result.append(ContainerPrediction({
'action_prediction': action_prediction, 'class_agnostic_prediction': person_prediction
}))
return result
def prepare_detection_for_id(self, batch_id, raw_outputs, prior_boxes, prior_variances):
num_detections = raw_outputs[self.loc_out][batch_id].size // 4
locs = raw_outputs[self.loc_out][batch_id].reshape(-1, 4)
main_conf = raw_outputs[self.main_conf_out][batch_id].reshape(num_detections, -1)
add_confs = list(map(
lambda layer: raw_outputs[layer][batch_id].reshape(-1, self.num_action_classes), self.add_conf_outs
))
anchors_num = len(add_confs)
labels, class_scores, x_mins, y_mins, x_maxs, y_maxs, main_scores = [], [], [], [], [], [], []
for index in range(num_detections):
if main_conf[index, 1] < self.detection_threshold:
continue
x_min, y_min, x_max, y_max = self.decode_box(prior_boxes[index], prior_variances[index], locs[index])
action_confs = add_confs[index % anchors_num][index // anchors_num]
action_label = | np.argmax(action_confs) | numpy.argmax |
import numpy as np
import numpy.linalg as LA
import scipy.sparse as sp
from scipy.stats.mstats import gmean
from time import time
from multiprocessing import Process, Pipe
import sys, os, warnings
from a2dr.precondition import precondition
from a2dr.acceleration import aa_weights
from a2dr.utilities import get_version
sys_stdout_origin = sys.stdout
def map_g(p_list,A,b,v,t,dk,n_list_cumsum):
N=len(p_list)
#
v_list = [v[n_list_cumsum[i]:n_list_cumsum[i+1]] for i in range(N) ]
x_list = [p_list[i](v_list[i],t) for i in range(N)]
x_half = np.concatenate(x_list, axis=0)
v_half = 2*x_half-v
dk = sp.linalg.lsqr(A, A.dot(v_half) - b, atol=1e-10, btol=1e-10, x0=dk)[0]
x_new = v_half - dk
f = x_new - x_half
v_new = v + f
return v_new, f, dk, x_half, x_half - v
def lmaa(p_list, A_list=[], b=np.array([]), v_init=None, n_list=None, *args, **kwargs):
start = time()
# Problem parameters.
max_iter = kwargs.pop("max_iter", 1000)
t_init = kwargs.pop("t_init", 1 / 10) # Step size.
eps_abs = kwargs.pop("eps_abs", 1e-6) # Absolute stopping tolerance.
eps_rel = kwargs.pop("eps_rel", 1e-8) # Relative stopping tolerance.
precond = kwargs.pop("precond", True) # Precondition A and b?
ada_reg = kwargs.pop("ada_reg", True) # Adaptive regularization?
# AA-II parameters.
anderson = kwargs.pop("anderson", True)
m_accel = int(kwargs.pop("m_accel", 10)) # Maximum past iterations to keep (>= 0).
lam_accel = kwargs.pop("lam_accel", 1e-8) # AA-II regularization weight.
aa_method = kwargs.pop("aa_method", "lstsq") # Algorithm for solving AA LS problem.
# Safeguarding parameters.
D_safe = kwargs.pop("D_safe", 1e6)
eps_safe = kwargs.pop("eps_safe", 1e-6)
M_safe = kwargs.pop("M_safe", int(max_iter / 100))
c = kwargs.pop("c", 1-1e-6)
# Printout parameters
verbose = kwargs.pop("verbose", True)
# Validate parameters.
if max_iter <= 0:
raise ValueError("max_iter must be a positive integer.")
if t_init <= 0:
raise ValueError("t_init must be a positive scalar.")
if eps_abs < 0:
raise ValueError("eps_abs must be a non-negative scalar.")
if eps_rel < 0:
raise ValueError("eps_rel must be a non-negative scalar.")
if m_accel <= 0:
raise ValueError("m_accel must be a positive integer.")
if lam_accel < 0:
raise ValueError("lam_accel must be a non-negative scalar.")
if not aa_method in ["lstsq", "lsqr"]:
raise ValueError("aa_method must be either 'lstsq' or 'lsqr'.")
# if D_safe < 0:
# raise ValueError("D_safe must be a non-negative scalar.")
# if eps_safe < 0:
# raise ValueError("eps_safe must be a non-negative scalar.")
# if M_safe <= 0:
# raise ValueError("M_safe must be a positive integer.")
# DRS parameters.
N = len(p_list) # Number of subproblems.
has_constr = len(A_list) != 0
if len(A_list) == 0:
if b.size != 0:
raise ValueError("Dimension mismatch: nrow(A_i) != nrow(b)")
if n_list is not None:
if len(n_list) != N:
raise ValueError("n_list must have exactly {} entries".format(N))
A_list = [sp.csr_matrix((0, ni)) for ni in n_list]
elif v_init is not None:
if len(v_init) != N:
raise ValueError("v_init must be None or contain exactly {} entries".format(N))
A_list = [sp.csr_matrix((0, vi.shape[0])) for vi in v_init]
else:
raise ValueError("n_list or v_init must be defined if A_list and b are empty")
if len(A_list) != N:
raise ValueError("A_list must be empty or contain exactly {} entries".format(N))
if v_init is None:
# v_init = [np.random.randn(A.shape[1]) for A in A_list]
v_init = [np.zeros(A.shape[1]) for A in A_list]
# v_init = [sp.csc_matrix((A.shape[1],1)) for A in A_list]
if len(v_init) != N:
raise ValueError("v_init must be None or contain exactly {} entries".format(N))
# Variable size list.
if n_list is None:
n_list = [A_list[i].shape[1] for i in range(N)]
if len(n_list) != N:
raise ValueError("n_list must be None or contain exactly {} entries".format(N))
n_list_cumsum = np.insert(np.cumsum(n_list), 0, 0)
for i in range(N):
if A_list[i].shape[0] != b.shape[0]:
raise ValueError("Dimension mismatch: nrow(A_i) != nrow(b)")
elif A_list[i].shape[1] != v_init[i].shape[0]:
raise ValueError("Dimension mismatch: ncol(A_i) != nrow(v_i)")
elif A_list[i].shape[1] != n_list[i]:
raise ValueError("Dimension mismatch: ncol(A_i) != n_i")
if not sp.issparse(A_list[i]):
A_list[i] = sp.csr_matrix(A_list[i])
if verbose:
version = get_version("__init__.py")
line_solver = "a2dr v" + version + " - Prox-Affine Distributed Convex Optimization Solver"
dashes = "-" * len(line_solver)
ddashes = "=" * len(line_solver)
line_authors = "(c) <NAME>, <NAME>"
num_spaces_authors = (len(line_solver) - len(line_authors)) // 2
line_affil = "Stanford University 2019"
num_spaces_affil = (len(line_solver) - len(line_affil)) // 2
print(dashes)
print(line_solver)
print(" " * num_spaces_authors + line_authors)
print(" " * num_spaces_affil + line_affil)
print(dashes)
# Precondition data.
if precond and has_constr:
if verbose:
print('### Preconditioning starts ... ###')
p_list, A_list, b, e_pre = precondition(p_list, A_list, b)
t_init = 1 / gmean(e_pre) ** 2 / 10
if verbose:
print('### Preconditioning finished. ###')
sigma0 = kwargs.pop("sigma0", 1e-10)
sigma1 = kwargs.pop("sigma1", 1)
c=max((t_init*sigma1-1)/(t_init*sigma1+1),(1-t_init*sigma0)/(1+t_init*sigma0))
c=np.sqrt((3+c**2))/2
if verbose:
print("max_iter = {}, t_init (after preconditioning) = {:.2f}".format(
max_iter, t_init))
print("eps_abs = {:.2e}, eps_rel = {:.2e}, precond = {!r}".format(
eps_abs, eps_rel, precond))
print("ada_reg = {!r}, anderson = {!r}, m_accel = {}".format(
ada_reg, anderson, m_accel))
print("lam_accel = {:.2e}, aa_method = {}, D_safe = {:.2e}".format(
lam_accel, aa_method, D_safe))
print("eps_safe = {:.2e}, M_safe = {:d}".format(
eps_safe, M_safe))
# Store constraint matrix for projection step.
A = sp.csr_matrix(sp.hstack(A_list))
if verbose:
print("variables n = {}, constraints m = {}".format(A.shape[1], A.shape[0]))
print("nnz(A) = {}".format(A.nnz))
print("Setup time: {:.2e}".format(time() - start))
# Check linear feasibility
sys.stdout = open(os.devnull, 'w')
r1norm = sp.linalg.lsqr(A, b)[3]
sys.stdout.close()
sys.stdout = sys_stdout_origin
if r1norm >= np.sqrt(eps_abs): # infeasible
if verbose:
print('Infeasible linear equality constraint: minimum constraint violation = {:.2e}'.format(r1norm))
print('Status: Terminated due to linear infeasibility')
print("Solve time: {:.2e}".format(time() - start))
return {"x_vals": None, "primal": None, "dual": None, "num_iters": None, "solve_time": None}
if verbose:
print("----------------------------------------------------")
print(" iter | total res | primal res | dual res | time (s)")
print("----------------------------------------------------")
# Set up the workers.
# Initialize AA-II variables.
if anderson: # TODO: Store and update these efficiently as arrays.
n_sum = np.sum([np.prod(v.shape) for v in v_init])
g_vec = np.zeros(n_sum) # g^(k) = v^(k) - F(v^(k)).
s_hist = [] # History of s^(j) = v^(j+1) - v^(j), kept in S^(k) = [s^(k-m_k) ... s^(k-1)].
y_hist = [] # History of y^(j) = g^(j+1) - g^(j), kept in Y^(k) = [y^(k-m_k) ... y^(k-1)].
n_AA = M_AA = 0 # Safeguarding counters.
# A2DR loop.
k = 0
finished = False
safeguard = True
r_primal = np.zeros(max_iter)
r_dual = np.zeros(max_iter)
r_dr = np.zeros(max_iter)
time_iter = np.zeros(max_iter)
r_best = np.inf
# Warm start terms.
dk = np.zeros(A.shape[1])
sol = np.zeros(A.shape[0])
v =np.concatenate(v_init)
f_list = np.zeros((A.shape[1],m_accel+1))
g_list = np.zeros((A.shape[1],m_accel+1))
x_list = np.zeros((A.shape[1],m_accel+1))
F_norm = np.zeros((m_accel+1))
M = np.zeros((m_accel+1,m_accel+1))
idx = 0
eta0 = 2
eta1 = 0.25
mu = 1e-8
delta = 2
p1 = 0.01
p2 = 0.25
curr_dk = dk.copy()
while not finished:
if k==0:
x_list[:,0] = v
curr_g, curr_f, curr_dk, curr_x_half, curr_xvdiff =map_g(p_list,A,b,v,t_init,curr_dk,n_list_cumsum)
g_list[:,0] = curr_g
F_norm[0] = np.sum(curr_f**2)
f_list[:, 0] = curr_f/np.sqrt(F_norm[0])
M[0,0] = 1
Ax_half = A.dot(curr_x_half)
r_primal_vec = (Ax_half) - b
r_primal[k] = LA.norm(r_primal_vec, ord=2)
subgrad = curr_xvdiff / t_init
# sol = LA.lstsq(A.T, subgrad, rcond=None)[0]
sys.stdout = open(os.devnull, 'w')
sol = sp.linalg.lsqr(A.T, subgrad, atol=1e-10, btol=1e-10, x0=sol)[0]
sys.stdout.close()
sys.stdout = sys_stdout_origin
r_dual_vec = A.T.dot(sol) - subgrad
r_dual[k] = LA.norm(r_dual_vec, ord=2)
# Save x_i^(k+1/2) if residual norm is smallest so far.
r_all = LA.norm(np.concatenate([r_primal_vec, r_dual_vec]), ord=2)
# Store ||r^0||_2 for stopping criterion.
r_all_0 = r_all
x_final = curr_x_half
r_best = r_all
k_best = k
r_dr[k] = np.sqrt(F_norm[0])
time_iter[k] = time() - start
v = curr_g.copy()
curr_g, curr_f, curr_dk, curr_x_half, curr_xvdiff = map_g(p_list,A,b,v,t_init,curr_dk,n_list_cumsum)
idx += 1
m_hat = min(idx, m_accel)
id = | np.mod(idx,m_accel+1) | numpy.mod |
import sys
import pytest
from pytest import raises, warns, deprecated_call
import numpy as np
import torch
# atomic-ish generator classes
from neurodiffeq.generators import Generator1D
from neurodiffeq.generators import Generator2D
from neurodiffeq.generators import Generator3D
from neurodiffeq.generators import GeneratorND
from neurodiffeq.generators import GeneratorSpherical
# complex generator classes
from neurodiffeq.generators import ConcatGenerator
from neurodiffeq.generators import StaticGenerator
from neurodiffeq.generators import PredefinedGenerator
from neurodiffeq.generators import TransformGenerator
from neurodiffeq.generators import EnsembleGenerator
from neurodiffeq.generators import FilterGenerator
from neurodiffeq.generators import ResampleGenerator
from neurodiffeq.generators import BatchGenerator
from neurodiffeq.generators import SamplerGenerator
from neurodiffeq.generators import MeshGenerator
from neurodiffeq.generators import _chebyshev_first, _chebyshev_second
@pytest.fixture(autouse=True)
def magic():
MAGIC = 42
torch.manual_seed(MAGIC)
np.random.seed(MAGIC)
return MAGIC
def _check_shape_and_grad(generator, target_size, *xs):
if target_size is not None:
if target_size != generator.size:
print(f"size mismatch {target_size} != {generator.size}", file=sys.stderr)
return False
for x in xs:
if x.shape != (generator.size,):
print(f"Bad shape: {x.shape} != {generator.size}", file=sys.stderr)
return False
if not x.requires_grad:
print(f"Doesn't require grad: {x}", file=sys.stderr)
return False
return True
def _check_boundary(xs, xs_min, xs_max):
for x, x_min, x_max in zip(xs, xs_min, xs_max):
if x_min is not None and (x < x_min).any():
print(f"Lower than minimum: {x} <= {x_min}", file=sys.stderr)
return False
if x_max is not None and (x > x_max).any():
print(f"Higher than maximum: {x} >= {x_max}", file=sys.stderr)
return False
return True
def _check_iterable_equal(x, y, eps=1e-5):
for a, b in zip(x, y):
if abs(float(a) - float(b)) > eps:
print(f"Different values: {a} != {b}", file=sys.stderr)
return False
return True
def test_chebyshev_first():
x = _chebyshev_first(-1, 1, 1000).detach().cpu().numpy()
assert -1 < x.min() < -0.99 and 0.99 < x.max() < 1
delta = x[:-1] - x[1:]
assert (delta > 0).all()
def test_chebyshev_second():
x = _chebyshev_second(-1, 1, 1000).detach().cpu().numpy()
assert x.min() == x[-1] and x.max() == x[0]
assert np.isclose(x[-1], -1) and np.isclose(x[0], 1)
delta = x[:-1] - x[1:]
assert (delta > 0).all()
@pytest.mark.parametrize(argnames='method', argvalues=['log-spaced', 'log-spaced-noisy'])
def test_generator1d_log_spaced_input(method):
size = 32
with pytest.raises(ValueError):
Generator1D(size=size, t_min=0.0, t_max=1.0, method=method)
with pytest.raises(ValueError):
Generator1D(size=size, t_min=1.0, t_max=0.0, method=method)
with pytest.raises(ValueError):
Generator1D(size=size, t_min=-1.0, t_max=1.0, method=method)
with pytest.raises(ValueError):
Generator1D(size=size, t_min=1.0, t_max=-1.0, method=method)
def test_generator1d():
size = 32
generator = Generator1D(size=size, t_min=0.0, t_max=2.0, method='uniform')
x = generator.getter()
assert _check_shape_and_grad(generator, size, x)
generator = Generator1D(size=size, t_min=0.0, t_max=2.0, method='equally-spaced')
x = generator.getter()
assert _check_shape_and_grad(generator, size, x)
generator = Generator1D(size=size, t_min=0.0, t_max=2.0, method='equally-spaced-noisy')
x = generator.getter()
assert _check_shape_and_grad(generator, size, x)
generator = Generator1D(size=size, t_min=0.1, t_max=2.0, method='log-spaced')
x = generator.getter()
assert _check_shape_and_grad(generator, size, x)
generator = Generator1D(size=size, t_min=0.1, t_max=2.0, method='log-spaced-noisy')
x = generator.getter()
assert _check_shape_and_grad(generator, size, x)
generator = Generator1D(size=size, t_min=0.1, t_max=2.0, method='log-spaced-noisy',
noise_std=0.01)
x = generator.getter()
assert _check_shape_and_grad(generator, size, x)
generator = Generator1D(size=size, t_min=0.1, t_max=2.0, method='chebyshev')
x = generator.getter()
assert _check_shape_and_grad(generator, size, x)
generator = Generator1D(size=size, t_min=0.1, t_max=2.0, method='chebyshev1')
x = generator.getter()
assert _check_shape_and_grad(generator, size, x)
generator = Generator1D(size=size, t_min=0.1, t_max=2.0, method='chebyshev2')
x = generator.getter()
assert _check_shape_and_grad(generator, size, x)
with raises(ValueError):
generator = Generator1D(size=size, t_min=0.0, t_max=2.0, method='magic')
str(generator)
repr(generator)
def test_generator2d():
grid = (5, 6)
size = grid[0] * grid[1]
x_min, x_max = 0.0, 1.0
y_min, y_max = -1.0, 0.0
x_std, y_std = 0.05, 0.06
generator = Generator2D(grid=grid, xy_min=(x_min, y_min), xy_max=(x_max, y_max), method='equally-spaced-noisy')
x, y = generator.getter()
assert _check_shape_and_grad(generator, size, x, y)
generator = Generator2D(grid=grid, xy_min=(x_min, y_min), xy_max=(x_max, y_max), method='equally-spaced-noisy',
xy_noise_std=(x_std, y_std))
x, y = generator.getter()
assert _check_shape_and_grad(generator, size, x, y)
generator = Generator2D(grid=grid, xy_min=(x_min, y_min), xy_max=(x_max, y_max), method='equally-spaced')
x, y = generator.getter()
assert _check_shape_and_grad(generator, size, x, y)
assert _check_boundary((x, y), (x_min, y_min), (x_max, y_max))
generator = Generator2D(grid=grid, xy_min=(x_min, y_min), xy_max=(x_max, y_max), method='chebyshev')
x, y = generator.getter()
assert _check_shape_and_grad(generator, size, x, y)
assert _check_boundary((x, y), (x_min, y_min), (x_max, y_max))
generator = Generator2D(grid=grid, xy_min=(x_min, y_min), xy_max=(x_max, y_max), method='chebyshev1')
x, y = generator.getter()
assert _check_shape_and_grad(generator, size, x, y)
assert _check_boundary((x, y), (x_min, y_min), (x_max, y_max))
generator = Generator2D(grid=grid, xy_min=(x_min, y_min), xy_max=(x_max, y_max), method='chebyshev2')
x, y = generator.getter()
assert _check_shape_and_grad(generator, size, x, y)
assert _check_boundary((x, y), (x_min, y_min), (x_max, y_max))
with raises(ValueError):
Generator2D(grid=grid, xy_min=(x_min, y_min), xy_max=(x_max, y_max), method='magic')
str(generator)
repr(generator)
def test_generator3d():
grid = (5, 6, 7)
size = grid[0] * grid[1] * grid[2]
x_min, x_max = 0.0, 1.0
y_min, y_max = 1.0, 2.0
z_min, z_max = 2.0, 3.0
generator = Generator3D(grid=grid, xyz_min=(x_min, y_min, z_min), xyz_max=(x_max, y_max, z_max),
method='equally-spaced-noisy')
x, y, z = generator.getter()
assert _check_shape_and_grad(generator, size, x, y, z)
generator = Generator3D(grid=grid, xyz_min=(x_min, y_min, z_min), xyz_max=(x_max, y_max, z_max),
method='equally-spaced')
x, y, z = generator.getter()
assert _check_shape_and_grad(generator, size, x, y, z)
assert _check_boundary((x, y, z), (x_min, y_min, z_min), (x_max, y_max, z_max))
generator = Generator3D(grid=grid, xyz_min=(x_min, y_min, z_min), xyz_max=(x_max, y_max, z_max),
method='chebyshev')
x, y, z = generator.getter()
assert _check_shape_and_grad(generator, size, x, y, z)
assert _check_boundary((x, y, z), (x_min, y_min, z_min), (x_max, y_max, z_max))
generator = Generator3D(grid=grid, xyz_min=(x_min, y_min, z_min), xyz_max=(x_max, y_max, z_max),
method='chebyshev1')
x, y, z = generator.getter()
assert _check_shape_and_grad(generator, size, x, y, z)
assert _check_boundary((x, y, z), (x_min, y_min, z_min), (x_max, y_max, z_max))
generator = Generator3D(grid=grid, xyz_min=(x_min, y_min, z_min), xyz_max=(x_max, y_max, z_max),
method='chebyshev2')
x, y, z = generator.getter()
assert _check_shape_and_grad(generator, size, x, y, z)
assert _check_boundary((x, y, z), (x_min, y_min, z_min), (x_max, y_max, z_max))
with raises(ValueError):
Generator3D(grid=grid, xyz_min=(x_min, y_min, z_min), xyz_max=(x_max, y_max, z_max), method='magic')
str(generator)
repr(generator)
def test_generator_nd():
grid = (5,)
r_min = (0.1,)
r_max = (1.0,)
r_noise_std = (0.05,)
max_dim = 4
while len(grid) < (max_dim + 1):
size = np.prod(grid)
methods = ['uniform' for m in range(len(grid))]
generator = GeneratorND(grid=grid, r_min=r_min, r_max=r_max, methods=methods, noisy=False)
assert _check_shape_and_grad(generator, size, *generator.getter())
assert _check_boundary(generator.getter(), r_min, r_max)
methods = ['equally-spaced' for m in range(len(grid))]
generator = GeneratorND(grid=grid, r_min=r_min, r_max=r_max, methods=methods, noisy=True)
assert _check_shape_and_grad(generator, size, *generator.getter())
generator = GeneratorND(grid=grid, r_min=r_min, r_max=r_max, methods=methods, noisy=True,
r_noise_std=r_noise_std)
assert _check_shape_and_grad(generator, size, *generator.getter())
generator = GeneratorND(grid=grid, r_min=r_min, r_max=r_max, methods=methods, noisy=False)
assert _check_shape_and_grad(generator, size, *generator.getter())
assert _check_boundary(generator.getter(), r_min, r_max)
methods = ['log-spaced' for m in range(len(grid))]
generator = GeneratorND(grid=grid, r_min=r_min, r_max=r_max, methods=methods, noisy=True)
assert _check_shape_and_grad(generator, size, *generator.getter())
generator = GeneratorND(grid=grid, r_min=r_min, r_max=r_max, methods=methods, noisy=False)
assert _check_shape_and_grad(generator, size, *generator.getter())
assert _check_boundary(generator.getter(), r_min, r_max)
methods = ['exp-spaced' for m in range(len(grid))]
generator = GeneratorND(grid=grid, r_min=r_min, r_max=r_max, methods=methods, noisy=True)
assert _check_shape_and_grad(generator, size, *generator.getter())
generator = GeneratorND(grid=grid, r_min=r_min, r_max=r_max, methods=methods, noisy=False)
assert _check_shape_and_grad(generator, size, *generator.getter())
# No check_boundary because doing 10 ** x and then log10() does not always return exactly x
methods = ['chebyshev' for m in range(len(grid))]
generator = GeneratorND(grid=grid, r_min=r_min, r_max=r_max, methods=methods, noisy=True)
assert _check_shape_and_grad(generator, size, *generator.getter())
generator = GeneratorND(grid=grid, r_min=r_min, r_max=r_max, methods=methods, noisy=False)
assert _check_shape_and_grad(generator, size, *generator.getter())
assert _check_boundary(generator.getter(), r_min, r_max)
methods = ['chebyshev1' for m in range(len(grid))]
generator = GeneratorND(grid=grid, r_min=r_min, r_max=r_max, methods=methods, noisy=True)
assert _check_shape_and_grad(generator, size, *generator.getter())
generator = GeneratorND(grid=grid, r_min=r_min, r_max=r_max, methods=methods, noisy=False)
assert _check_shape_and_grad(generator, size, *generator.getter())
assert _check_boundary(generator.getter(), r_min, r_max)
methods = ['chebyshev2' for m in range(len(grid))]
generator = GeneratorND(grid=grid, r_min=r_min, r_max=r_max, methods=methods, noisy=True)
assert _check_shape_and_grad(generator, size, *generator.getter())
generator = GeneratorND(grid=grid, r_min=r_min, r_max=r_max, methods=methods, noisy=False)
assert _check_shape_and_grad(generator, size, *generator.getter())
if len(grid) == 3:
methods = ['uniform', 'equally-spaced', 'log-spaced']
generator = GeneratorND(grid=grid, r_min=r_min, r_max=r_max, methods=methods, noisy=True)
assert _check_shape_and_grad(generator, size, *generator.getter())
generator = GeneratorND(grid=grid, r_min=r_min, r_max=r_max, methods=methods, noisy=False)
assert _check_shape_and_grad(generator, size, *generator.getter())
assert _check_boundary(generator.getter(), r_min, r_max)
grid += (grid[-1] + 1,)
r_min += (r_min[-1] + 1,)
r_max += (r_max[-1] + 1,)
r_noise_std += (r_noise_std[-1] + 0.01,)
str(generator)
repr(generator)
def test_generator_spherical():
size = 64
r_min, r_max = 0.0, 1.0
generator = GeneratorSpherical(size, r_min=r_min, r_max=r_max, method='equally-spaced-noisy')
r, theta, phi = generator.get_examples()
assert _check_shape_and_grad(generator, size, r, theta, phi)
assert _check_boundary((r, theta, phi), (r_min, 0.0, 0.0), (r_max, np.pi, np.pi * 2))
generator = GeneratorSpherical(size, r_min=r_min, r_max=r_max, method='equally-radius-noisy')
r, theta, phi = generator.get_examples()
assert _check_shape_and_grad(generator, size, r, theta, phi)
assert _check_boundary((r, theta, phi), (r_min, 0.0, 0.0), (r_max, np.pi, np.pi * 2))
with pytest.raises(ValueError):
_ = GeneratorSpherical(64, method='bad_generator')
with pytest.raises(ValueError):
_ = GeneratorSpherical(64, r_min=-1.0)
with pytest.raises(ValueError):
_ = GeneratorSpherical(64, r_min=1.0, r_max=0.0)
str(generator)
repr(generator)
def test_concat_generator():
size1, size2 = 10, 20
t_min, t_max = 0.5, 1.5
generator1 = Generator1D(size1, t_min=t_min, t_max=t_max)
generator2 = Generator1D(size2, t_min=t_min, t_max=t_max)
concat_generator = ConcatGenerator(generator1, generator2)
x = concat_generator.get_examples()
assert _check_shape_and_grad(concat_generator, size1 + size2, x)
grid1 = (4, 4, 4)
size1, size2, size3 = grid1[0] * grid1[1] * grid1[2], 100, 200
generator1 = Generator3D(grid=grid1)
generator2 = GeneratorSpherical(size2)
generator3 = GeneratorSpherical(size3)
concat_generator = ConcatGenerator(generator1, generator2, generator3)
r, theta, phi = concat_generator.get_examples()
assert _check_shape_and_grad(concat_generator, size1 + size2 + size3, r, theta, phi)
added_generator = generator1 + generator2 + generator3
r, theta, phi = added_generator.get_examples()
assert _check_shape_and_grad(added_generator, size1 + size2 + size3, r, theta, phi)
str(concat_generator)
repr(concat_generator)
def test_static_generator():
size = 100
generator = Generator1D(size)
static_generator = StaticGenerator(generator)
x1 = static_generator.get_examples()
x2 = static_generator.get_examples()
assert _check_shape_and_grad(generator, size)
assert _check_shape_and_grad(static_generator, size, x1, x2)
assert (x1 == x2).all()
size = 100
generator = GeneratorSpherical(size)
static_generator = StaticGenerator(generator)
r1, theta1, phi1 = static_generator.get_examples()
r2, theta2, phi2 = static_generator.get_examples()
assert _check_shape_and_grad(generator, size)
assert _check_shape_and_grad(static_generator, size, r1, theta1, phi1, r2, theta2, phi2)
assert (r1 == r2).all() and (theta1 == theta2).all() and (phi1 == phi2).all()
str(static_generator)
repr(static_generator)
def test_predefined_generator():
size = 100
old_x = torch.arange(size, dtype=torch.float, requires_grad=False)
predefined_generator = PredefinedGenerator(old_x)
x = predefined_generator.get_examples()
assert _check_shape_and_grad(predefined_generator, size, x)
assert _check_iterable_equal(old_x, x)
old_x = torch.arange(size, dtype=torch.float, requires_grad=False)
old_y = torch.arange(size, dtype=torch.float, requires_grad=True)
old_z = torch.arange(size, dtype=torch.float, requires_grad=False)
predefined_generator = PredefinedGenerator(old_x, old_y, old_z)
x, y, z = predefined_generator.get_examples()
assert _check_shape_and_grad(predefined_generator, size, x, y, z)
assert _check_iterable_equal(old_x, x)
assert _check_iterable_equal(old_y, y)
assert _check_iterable_equal(old_z, z)
x_list = [i * 2.0 for i in range(size)]
y_tuple = tuple([i * 3.0 for i in range(size)])
z_array = np.array([i * 4.0 for i in range(size)]).reshape(-1, 1)
w_tensor = torch.arange(size, dtype=torch.float)
predefined_generator = PredefinedGenerator(x_list, y_tuple, z_array, w_tensor)
x, y, z, w = predefined_generator.get_examples()
assert _check_shape_and_grad(predefined_generator, size, x, y, z, w)
assert _check_iterable_equal(x_list, x)
assert _check_iterable_equal(y_tuple, y)
assert _check_iterable_equal(z_array, z)
assert _check_iterable_equal(w_tensor, w)
str(predefined_generator)
repr(predefined_generator)
def test_transform_generator():
size = 100
x = np.arange(0, size, dtype=np.float32)
x_expected = np.sin(x)
generator = PredefinedGenerator(x)
transform_generator = TransformGenerator(generator, [torch.sin])
x = transform_generator.get_examples()
assert _check_shape_and_grad(transform_generator, size, x)
assert _check_iterable_equal(x, x_expected)
x = np.arange(0, size, dtype=np.float32)
y = np.arange(0, size, dtype=np.float32)
z = np.arange(0, size, dtype=np.float32)
x_expected = np.sin(x)
y_expected = y
z_expected = -z
generator = PredefinedGenerator(x, y, z)
transform_generator = TransformGenerator(generator, [torch.sin, None, lambda a: -a])
x, y, z = transform_generator.get_examples()
assert _check_shape_and_grad(transform_generator, size, x, y, z)
assert _check_iterable_equal(x, x_expected)
assert _check_iterable_equal(y, y_expected)
assert _check_iterable_equal(z, z_expected)
transform_generator = TransformGenerator(generator, transform=lambda x, y, z: (torch.sin(x), y, -z))
x, y, z = transform_generator.get_examples()
assert _check_shape_and_grad(transform_generator, size, x, y, z)
assert _check_iterable_equal(x, x_expected)
assert _check_iterable_equal(y, y_expected)
assert _check_iterable_equal(z, z_expected)
str(transform_generator)
repr(transform_generator)
def test_ensemble_generator():
size = 100
generator1 = Generator1D(size)
ensemble_generator = EnsembleGenerator(generator1)
x = ensemble_generator.get_examples()
assert _check_shape_and_grad(ensemble_generator, size, x)
old_x = torch.rand(size)
old_y = torch.rand(size)
old_z = torch.rand(size)
generator1 = PredefinedGenerator(old_x)
generator2 = PredefinedGenerator(old_y)
generator3 = PredefinedGenerator(old_z)
ensemble_generator = EnsembleGenerator(generator1, generator2, generator3)
x, y, z = ensemble_generator.get_examples()
assert _check_shape_and_grad(ensemble_generator, size, x, y, z)
assert _check_iterable_equal(old_x, x)
assert _check_iterable_equal(old_y, y)
assert _check_iterable_equal(old_z, z)
old_x = torch.rand(size)
old_y = torch.rand(size)
generator1 = PredefinedGenerator(old_x)
generator2 = PredefinedGenerator(old_y)
product_generator = generator1 * generator2
x, y = product_generator.get_examples()
assert _check_shape_and_grad(product_generator, size, x, y)
assert _check_iterable_equal(old_x, x)
assert _check_iterable_equal(old_y, y)
str(ensemble_generator)
repr(ensemble_generator)
def test_mesh_generator():
size = 10
x_min, x_max = 0.0, 1.0
y_min, y_max = 1.0, 2.0
z_min, z_max = 2.0, 3.0
generator1 = Generator1D(size)
mesh_generator = MeshGenerator(generator1)
x = mesh_generator.get_examples()
assert _check_shape_and_grad(mesh_generator, size, x)
mesh_generator = MeshGenerator(generator1, generator1, generator1)
m1, m2, m3 = mesh_generator.get_examples()
assert _check_shape_and_grad(mesh_generator, (size ** 3), m1, m2, m3)
generator1x = Generator1D(size, x_min, x_max, method="equally-spaced")
generator1y = Generator1D(size, y_min, y_max, method="equally-spaced")
generator1z = Generator1D(size, z_min, z_max, method="equally-spaced")
generator3 = Generator3D((size, size, size), (x_min, y_min, z_min), (x_max, y_max, z_max), method="equally-spaced")
mesh_generator = MeshGenerator(generator1x, generator1y, generator1z)
m1, m2, m3 = mesh_generator.get_examples()
g1, g2, g3 = generator3.get_examples()
assert _check_shape_and_grad(mesh_generator, (size ** 3), m1, m2, m3)
assert _check_iterable_equal(g1, m1)
assert _check_iterable_equal(g2, m2)
assert _check_iterable_equal(g3, m3)
generator1x = Generator1D(size, x_min, x_max, method="equally-spaced")
generator1y = Generator1D(size, y_min, y_max, method="equally-spaced")
generator1z = Generator1D(size, z_min, z_max, method="equally-spaced")
generator3 = Generator3D((size, size, size), (x_min, y_min, z_min), (x_max, y_max, z_max), method="equally-spaced")
xor_generator = generator1x ^ generator1y ^ generator1z
m1, m2, m3 = xor_generator.get_examples()
g1, g2, g3 = generator3.get_examples()
assert _check_shape_and_grad(xor_generator, (size ** 3), m1, m2, m3)
assert _check_iterable_equal(g1, m1)
assert _check_iterable_equal(g2, m2)
assert _check_iterable_equal(g3, m3)
str(mesh_generator)
repr(mesh_generator)
def test_filter_generator():
grid = (10, 10)
size = 100
x = [i * 1.0 for i in range(size)]
filter_fn = lambda a: (a[0] % 2 == 0)
filter_fn_2 = lambda a: (a % 2 == 0)
x_expected = filter(filter_fn_2, x)
generator = PredefinedGenerator(x)
filter_generator = FilterGenerator(generator, filter_fn=filter_fn, update_size=True)
x = filter_generator.get_examples()
assert _check_shape_and_grad(filter_generator, size // 2, x)
assert _check_iterable_equal(x_expected, x)
x = [i * 1.0 for i in range(size)]
y = [-i * 1.0 for i in range(size)]
filter_fn = lambda ab: (ab[0] % 2 == 0) & (ab[1] > -size / 2)
x_expected, y_expected = list(zip(*filter(filter_fn, zip(x, y))))
generator = PredefinedGenerator(x, y)
filter_generator = FilterGenerator(generator, filter_fn)
x, y = filter_generator.get_examples()
assert _check_shape_and_grad(filter_generator, size // 4, x, y)
assert _check_iterable_equal(x_expected, x)
assert _check_iterable_equal(y_expected, y)
generator = Generator2D(grid)
filter_fn = lambda ab: (ab[0] > 0.5) & (ab[1] < 0.5)
filter_generator = FilterGenerator(generator, filter_fn)
for _ in range(5):
x, y = filter_generator.get_examples()
assert _check_shape_and_grad(filter_generator, None, x, y)
fixed_size = 42
filter_generator = FilterGenerator(generator, filter_fn, size=fixed_size, update_size=False)
for _ in range(5):
assert _check_shape_and_grad(filter_generator, fixed_size)
filter_generator.get_examples()
str(filter_generator)
repr(filter_generator)
def test_resample_generator():
size = 100
sample_size = size
x_expected = np.arange(size, dtype=np.float32)
generator = PredefinedGenerator(x_expected)
resample_generator = ResampleGenerator(generator, size=sample_size, replacement=False)
x = resample_generator.get_examples()
assert _check_shape_and_grad(resample_generator, sample_size, x)
# noinspection PyTypeChecker
assert _check_iterable_equal(torch.sort(x)[0], x_expected)
sample_size = size // 2
x = np.arange(size, dtype=np.float32)
y = np.arange(size, size * 2, dtype=np.float32)
generator = PredefinedGenerator(x, y)
resample_generator = ResampleGenerator(generator, size=sample_size, replacement=False)
x, y = resample_generator.get_examples()
assert _check_shape_and_grad(resample_generator, sample_size, x, y)
assert _check_iterable_equal(x + 100, y)
assert len(torch.unique(x.detach())) == len(x)
sample_size = size * 3 // 4
x = np.arange(size, dtype=np.float32)
y = np.arange(size, size * 2, dtype=np.float32)
generator = PredefinedGenerator(x, y)
resample_generator = ResampleGenerator(generator, size=sample_size, replacement=True)
x, y = resample_generator.get_examples()
assert _check_shape_and_grad(resample_generator, sample_size, x, y)
assert _check_iterable_equal(x + 100, y)
assert len(torch.unique(x.detach())) < len(x)
sample_size = size * 2
x = np.arange(size, dtype=np.float32)
y = np.arange(size, size * 2, dtype=np.float32)
z = np.arange(size * 2, size * 3, dtype=np.float32)
generator = PredefinedGenerator(x, y, z)
resample_generator = ResampleGenerator(generator, size=sample_size, replacement=True)
x, y, z = resample_generator.get_examples()
assert _check_shape_and_grad(resample_generator, sample_size, x, y, z)
assert _check_iterable_equal(x + 100, y)
assert _check_iterable_equal(y + 100, z)
assert len(torch.unique(x.detach())) < len(x)
str(resample_generator)
repr(resample_generator)
def test_batch_generator():
size = 10
batch_size = 3
x = | np.arange(size, dtype=np.float32) | numpy.arange |
from typing import Optional, Collection
import numpy as np
class VisdomLinePlotter(object):
"""Plots to Visdom"""
def __init__(self, visdom_obj, env_name="main"):
self.viz = visdom_obj
self.env = env_name
self.plots = {}
def plot(self, var_name, split_name, title_name, x, y):
if var_name not in self.plots:
self.plots[var_name] = self.viz.line(
X=np.array([x, x]),
Y=np.array([y, y]),
env=self.env,
opts=dict(
legend=[split_name],
title=title_name,
xlabel="Epochs",
ylabel=var_name,
),
)
else:
self.viz.line(
X= | np.array([x]) | numpy.array |
import numpy as np
from xml.etree.ElementTree import Element
from vispy.color import Colormap
from napari.layers import Image
def test_random_image():
"""Test instantiating Image layer with random 2D data."""
shape = (10, 15)
np.random.seed(0)
data = np.random.random(shape)
layer = Image(data)
assert np.all(layer.data == data)
assert layer.ndim == len(shape)
assert layer.shape == shape
assert layer.range == tuple((0, m, 1) for m in shape)
assert layer.multichannel == False
assert layer._data_view.shape == shape[-2:]
def test_all_zeros_image():
"""Test instantiating Image layer with all zeros data."""
shape = (10, 15)
data = np.zeros(shape, dtype=float)
layer = Image(data)
assert np.all(layer.data == data)
assert layer.ndim == len(shape)
assert layer.shape == shape
assert layer.multichannel == False
assert layer._data_view.shape == shape[-2:]
def test_integer_image():
"""Test instantiating Image layer with integer data."""
shape = (10, 15)
np.random.seed(0)
data = np.round(10 * np.random.random(shape)).astype(int)
layer = Image(data)
assert | np.all(layer.data == data) | numpy.all |
#!/usr/bin/env python
"""A class for handling 5C analysis."""
import os
import sys
from math import log
import numpy
from scipy.stats import linregress
import h5py
from scipy.optimize import fmin_l_bfgs_b as bfgs
import libraries._fivec_binning as _binning
import libraries._fivec_optimize as _optimize
import fivec_binning
import plotting
class FiveC(object):
"""
This is the class for handling 5C analysis.
This class relies on :class:`Fragment <hifive.fragment.Fragment>` and :class:`FiveCData <hifive.fivec_data.FiveCData>` for genomic position and interaction count data. Use this class to perform filtering of fragments based on coverage, model fragment bias and distance dependence, and downstream analysis and manipulation. This includes binning of data, plotting of data, and statistical analysis.
.. note::
This class is also available as hifive.FiveC
When initialized, this class creates an h5dict in which to store all data associated with this object.
:param filename: The file name of the h5dict. This should end with the suffix '.hdf5'
:type filename: str.
:param mode: The mode to open the h5dict with. This should be 'w' for creating or overwriting an h5dict with name given in filename.
:type mode: str.
:param silent: Indicates whether to print information about function execution for this object.
:type silent: bool.
:returns: :class:`FiveC <hifive.fivec.FiveC>` class object.
:attributes: * **file** (*str.*) - A string containing the name of the file passed during object creation for saving the object to.
* **silent** (*bool.*) - A boolean indicating whether to suppress all of the output messages.
* **history** (*str.*) - A string containing all of the commands executed on this object and their outcome.
* **normalization** (*str.*) - A string stating which type of normalization has been performed on this object. This starts with the value 'none'.
In addition, many other attributes are initialized to the 'None' state.
"""
def __init__(self, filename, mode='r', silent=False):
"""Create a FiveC object."""
self.file = os.path.abspath(filename)
self.filetype = 'fivec_project'
self.silent = silent
self.binning_corrections = None
self.binning_correction_indices = None
self.binning_frag_indices = None
self.binning_num_bins = None
self.model_parameters = None
self.corrections = None
self.region_means = None
self.gamma = None
self.sigma = None
self.trans_mean = None
self.normalization = 'none'
self.history = ''
if mode != 'w':
self.load()
return None
def __getitem__(self, key):
"""Dictionary-like lookup."""
if key in self.__dict__:
return self.__dict__[key]
else:
return None
def __setitem__(self, key, value):
"""Dictionary-like value setting."""
self.__dict__[key] = value
return None
def load_data(self, filename):
"""
Load fragment-pair counts and fragment object from :class:`FiveCData <hifive.fivec_data.FiveCData>` object.
:param filename: Specifies the file name of the :class:`FiveCData <hifive.fivec_data.FiveCData>` object to associate with this analysis.
:type filename: str.
:returns: None
:Attributes: * **datafilename** (*str.*) - A string containing the relative path of the FiveCData file.
* **fragfilename** (*str.*) - A string containing the relative path of the Fragment file associated with the FiveCData file.
* **frags** (*filestream*) - A filestream to the hdf5 Fragment file such that all saved Fragment attributes can be accessed through this class attribute.
* **data** (*filestream*) - A filestream to the hdf5 FiveCData file such that all saved FiveCData attributes can be accessed through this class attribute.
* **chr2int** (*dict.*) - A dictionary that converts chromosome names to chromosome indices.
* **filter** (*ndarray*) - A numpy array of type int32 and size N where N is the number of fragments. This contains the inclusion status of each fragment with a one indicating included and zero indicating excluded and is initialized with all fragments included.
When a FiveCData object is associated with the project file, the 'history' attribute is updated with the history of the FiveCData object.
"""
self.history += "FiveC.load_data(filename='%s') - " % filename
# ensure data h5dict exists
if not os.path.exists(filename):
if not self.silent:
print >> sys.stderr, ("Could not find %s. No data loaded.\n") % (filename.split('/')[-1]),
self.history += "Error: '%s' not found\n" % filename
return None
self.datafilename = "%s/%s" % (os.path.relpath(os.path.dirname(os.path.abspath(filename)),
os.path.dirname(self.file)), os.path.basename(filename))
self.data = h5py.File(filename, 'r')
self.history = self.data['/'].attrs['history'] + self.history
fragfilename = self.data['/'].attrs['fragfilename']
if fragfilename[:2] == './':
fragfilename = fragfilename[2:]
parent_count = fragfilename.count('../')
fragfilename = '/'.join(os.path.abspath(filename).split('/')[:-(1 + parent_count)] +
fragfilename.lstrip('/').split('/')[parent_count:])
self.fragfilename = "%s/%s" % (os.path.relpath(os.path.dirname(fragfilename),
os.path.dirname(self.file)), os.path.basename(fragfilename))
# ensure fend h5dict exists
if not os.path.exists(fragfilename):
if not self.silent:
print >> sys.stderr, ("Could not find %s.\n") % (fragfilename),
self.history += "Error: '%s' not found\n" % fragfilename
return None
self.frags = h5py.File(fragfilename, 'r')
# create dictionary for converting chromosome names to indices
self.chr2int = {}
for i, chrom in enumerate(self.frags['chromosomes']):
self.chr2int[chrom] = i
# create arrays
self.filter = numpy.ones(self.frags['fragments'].shape[0], dtype=numpy.int32)
self.history += 'Success\n'
return None
def save(self, out_fname=None):
"""
Save analysis parameters to h5dict.
:param filename: Specifies the file name of the :class:`FiveC <hifive.fivec.FiveC>` object to save this analysis to.
:type filename: str.
:returns: None
"""
self.history.replace("'None'", "None")
if not out_fname is None:
original_file = os.path.abspath(self.file)
if 'datafilename' in self.__dict__:
datafilename = self.datafilename
if datafilename[:2] == './':
datafilename = datafilename[2:]
parent_count = datafilename.count('../')
datafilename = '/'.join(original_file.split('/')[:-(1 + parent_count)] +
datafilename.lstrip('/').split('/')[parent_count:])
self.datafilename = "%s/%s" % (os.path.relpath(os.path.dirname(os.path.abspath(datafilename)),
os.path.dirname(self.file)), os.path.basename(datafilename))
if 'fragfilename' in self.__dict__:
fragfilename = self.fragfilename
if fragfilename[:2] == './':
fragfilename = fragfilename[2:]
parent_count = fragfilename.count('../')
fragfilename = '/'.join(original_file.split('/')[:-(1 + parent_count)] +
fragfilename.lstrip('/').split('/')[parent_count:])
self.fragfilename = "%s/%s" % (os.path.relpath(os.path.dirname(os.path.abspath(fragfilename)),
os.path.dirname(self.file)), os.path.basename(fragfilename))
else:
out_fname = self.file
datafile = h5py.File(out_fname, 'w')
for key in self.__dict__.keys():
if key in ['data', 'frags', 'file', 'chr2int', 'silent']:
continue
elif self[key] is None:
continue
elif isinstance(self[key], numpy.ndarray):
datafile.create_dataset(key, data=self[key])
elif not isinstance(self[key], dict):
datafile.attrs[key] = self[key]
datafile.close()
return None
def load(self):
"""
Load analysis parameters from h5dict specified at object creation and open h5dicts for associated :class:`FiveCData <hifive.fivec_data.FiveCData>` and :class:`Fragment <hifive.fragment.Fragment>` objects.
Any call of this function will overwrite current object data with values from the last :func:`save` call.
:returns: None
"""
# return attributes to init state
self.binning_corrections = None
self.binning_correction_indices = None
self.binning_frag_indices = None
self.binning_num_bins = None
self.model_parameters = None
self.corrections = None
self.region_means = None
self.gamma = None
self.sigma = None
self.trans_mean = None
self.normalization = 'none'
self.history = ''
# load data hdf5 dict
datafile = h5py.File(self.file, 'r')
for key in datafile.keys():
self[key] = numpy.copy(datafile[key])
for key in datafile['/'].attrs.keys():
self[key] = datafile['/'].attrs[key]
# ensure data h5dict exists
if 'datafilename' in self.__dict__:
datafilename = self.datafilename
if datafilename[:2] == './':
datafilename = datafilename[2:]
parent_count = datafilename.count('../')
datafilename = '/'.join(self.file.split('/')[:-(1 + parent_count)] +
datafilename.lstrip('/').split('/')[parent_count:])
if not os.path.exists(datafilename):
if not self.silent:
print >> sys.stderr, ("Could not find %s. No data loaded.\n") % (datafilename),
else:
self.data = h5py.File(datafilename, 'r')
# ensure fragment h5dict exists
if 'fragfilename' in self.__dict__:
fragfilename = self.fragfilename
if fragfilename[:2] == './':
fragfilename = fragfilename[2:]
parent_count = fragfilename.count('../')
fragfilename = '/'.join(self.file.split('/')[:-(1 + parent_count)] +
fragfilename.lstrip('/').split('/')[parent_count:])
if not os.path.exists(fragfilename):
if not self.silent:
print >> sys.stderr, ("Could not find %s. No fragments loaded.\n") % (fragfilename),
else:
self.frags = h5py.File(fragfilename, 'r')
# create dictionary for converting chromosome names to indices
self.chr2int = {}
for i, chrom in enumerate(self.frags['chromosomes']):
self.chr2int[chrom] = i
datafile.close()
return None
def filter_fragments(self, mininteractions=20, mindistance=0, maxdistance=0):
"""
Iterate over the dataset and remove fragments that do not have 'minobservations' using only unfiltered fragments and interactions falling with the distance limits specified.
In order to create a set of fragments that all have the necessary number of interactions, after each round of filtering, fragment interactions are retallied using only interactions that have unfiltered fragments at both ends.
:param mininteractions: The required number of interactions for keeping a fragment in analysis.
:type mininteractions: int.
:param mindistance: The minimum inter-fragment distance to be included in filtering.
:type mindistance: int.
:param maxdistance: The maximum inter-fragment distance to be included in filtering. A value of zero indicates no maximum cutoff.
:type maxdistance: int.
:returns: None
"""
self.history += "FiveC.filter_fragments(mininteractions=%i, mindistance=%s, maxdistance=%s) - " % (mininteractions, str(mindistance), str(maxdistance))
if not self.silent:
print >> sys.stderr, ("Filtering fragments..."),
original_count = numpy.sum(self.filter)
previous_valid = original_count + 1
current_valid = original_count
coverage = numpy.zeros(self.filter.shape[0], dtype=numpy.int32)
# copy needed arrays
data = self.data['cis_data'][...]
distances = self.frags['fragments']['mid'][data[:, 1]] - self.frags['fragments']['mid'][data[:, 0]]
if maxdistance == 0 or maxdistance is None:
maxdistance = numpy.amax(distances) + 1
valid = numpy.where((self.filter[data[:, 0]] * self.filter[data[:, 1]]) *
(distances >= mindistance) * (distances < maxdistance))[0]
data = data[valid, :]
# repeat until all remaining fragments have minobservation valid observations
while current_valid < previous_valid:
previous_valid = current_valid
coverage = numpy.bincount(data[:, 0], minlength=self.filter.shape[0])
coverage += numpy.bincount(data[:, 1], minlength=self.filter.shape[0])
invalid = numpy.where(coverage < mininteractions)[0]
self.filter[invalid] = 0
valid = numpy.where(self.filter[data[:, 0]] * self.filter[data[:, 1]])[0]
data = data[valid, :]
current_valid = numpy.sum(self.filter)
if not self.silent:
print >> sys.stderr, ("Removed %i of %i fragments\n") % (original_count - current_valid, original_count),
self.history += "Success\n"
return None
def find_distance_parameters(self):
"""
Regress log counts versus inter-fragment distances to find slope and intercept values and then find the standard deviation of corrected counts.
:returns: None
:Attributes: * **gamma** (*float*) - A float denoting the negative slope of the distance-dependence regression line.
* **sigma** (*float*) - A float denoting the standard deviation of nonzero data about the distance-dependence regression line.
* **region_means** (*ndarray*) - A numpy array of type float32 and length equal to the number of regions. This is initialized to zeros until fragment correction values are found.
"""
self.history += "FiveC.find_distance_parameters() - "
if not self.silent:
print >> sys.stderr, ("Finding distance parameters..."),
# copy needed arrays
data = self.data['cis_data'][...]
mids = self.frags['fragments']['mid'][...]
# find which pairs are both unfiltered
valid = numpy.where(self.filter[data[:, 0]] * self.filter[data[:, 1]])[0]
# find distances between fragment pairs
log_distances = numpy.log(mids[data[valid, 1]] - mids[data[valid, 0]])
# find regression line
counts = numpy.log(data[valid, 2])
if not self.corrections is None:
counts -= self.corrections[data[valid, 0]] - self.corrections[data[valid, 1]]
temp = linregress(log_distances, counts)[:2]
self.gamma = -float(temp[0])
if self.region_means is None:
self.region_means = numpy.zeros(self.frags['regions'].shape[0], dtype=numpy.float32) + temp[1]
self.sigma = float(numpy.std(counts - temp[1] + self.gamma * log_distances))
if not self.silent:
print >> sys.stderr, ("Done\n"),
self.history += "Success\n"
return None
def find_probability_fragment_corrections(self, mindistance=0, maxdistance=0, max_iterations=1000,
minchange=0.0005, learningstep=0.1, precalculate=True, regions=[],
precorrect=False):
"""
Using gradient descent, learn correction values for each valid fragment based on a Log-Normal distribution of observations.
:param mindistance: The minimum inter-fragment distance to be included in modeling.
:type mindistance: int.
:param maxdistance: The maximum inter-fragment distance to be included in modeling.
:type maxdistance: int.
:param max_iterations: The maximum number of iterations to carry on gradient descent for.
:type max_iterations: int.
:type annealing_iterations: int.
:param minchange: The cutoff threshold for early learning termination for the maximum absolute gradient value.
:type minchange: float
:param learningstep: The scaling factor for decreasing learning rate by if step doesn't meet armijo criterion.
:type learningstep: float
:param precalculate: Specifies whether the correction values should be initialized at the fragment means.
:type precalculate: bool.
:param regions: A list of regions to calculate corrections for. If set as None, all region corrections are found.
:type regions: list
:param precorrect: Use binning-based corrections in expected value calculations, resulting in a chained normalization approach.
:type precorrect: bool.
:returns: None
:Attributes: * **corrections** (*ndarray*) - A numpy array of type float32 and length equal to the number of fragments. All invalid fragments have an associated correction value of zero.
The 'normalization' attribute is updated to 'probability' or 'binning-probability', depending on if the 'precorrect' option is selected. In addition, the 'region_means' attribute is updated such that the mean correction (sum of all valid regional correction value pairs) is adjusted to zero and the corresponding region mean is adjusted the same amount but the opposite sign.
"""
self.history += "FiveC.find_probability_fragment_corrections(mindistance=%s, maxdistance=%s, max_iterations=%i, minchange=%f, learningstep=%f, precalculate=%s, regions=%s, precorrect=%s) - " % (str(mindistance), str(maxdistance), max_iterations, minchange, learningstep, precalculate, str(regions), precorrect)
if precorrect and self.binning_corrections is None:
if not self.silent:
print >> sys.stderr, ("Precorrection can only be used in project has previously run 'find_binning_fragment_corrections'.\n"),
self.history += "Error: 'find_binning_fragment_corrections()' not run yet\n"
return None
if self.corrections is None:
self.corrections = numpy.zeros(self.frags['fragments'].shape[0], dtype=numpy.float32)
# if regions not given, set to all regions
if regions == None or len(regions) == 0:
regions = numpy.arange(self.frags['regions'].shape[0])
# determine if distance parameters have been calculated
if self.gamma is None:
self.find_distance_parameters()
# limit corrections to only requested regions
filt = numpy.copy(self.filter)
for i in range(self.frags['regions'].shape[0]):
if i not in regions:
filt[self.frags['regions']['start_frag'][i]:self.frags['regions']['stop_frag'][i]] = 0
# copy and calculate needed arrays
if not self.silent:
print >> sys.stderr, ("\r%s\rCopying needed data...") % (' ' * 80),
data = self.data['cis_data'][...]
distances = self.frags['fragments']['mid'][data[:, 1]] - self.frags['fragments']['mid'][data[:, 0]]
if maxdistance == 0 or maxdistance is None:
maxdistance = numpy.amax(distances) + 1
valid = numpy.where((filt[data[:, 0]] * filt[data[:, 1]]) *
(distances >= mindistance) * (distances < maxdistance))[0]
data = data[valid, :]
distances = numpy.log(distances[valid])
counts_n = numpy.log(data[:, 2] - 0.5).astype(numpy.float32)
counts = numpy.log(data[:, 2]).astype(numpy.float32)
counts_p = numpy.log(data[:, 2] + 0.5).astype(numpy.float32)
distance_signal = (-self.gamma * distances).astype(numpy.float32)
distance_signal += self.region_means[self.frags['fragments']['region'][data[:, 0]]]
# create empty arrays
gradients = numpy.zeros(self.filter.shape[0], dtype=numpy.float32)
valid = numpy.where(filt)[0]
# find number of interactions for each fragment
interactions = numpy.bincount(data[:, 0], minlength=self.filter.shape[0]).astype(numpy.int32)
interactions += numpy.bincount(data[:, 1], minlength=self.filter.shape[0]).astype(numpy.int32)
interactions = numpy.maximum(1, interactions)
# if precalculation requested, find fragment means
if precalculate:
enrichments = counts - distance_signal
count_sums = numpy.bincount(data[:, 0], weights=enrichments, minlength=gradients.shape[0])
count_sums += numpy.bincount(data[:, 1], weights=enrichments, minlength=gradients.shape[0])
self.corrections = ((count_sums / numpy.maximum(1, interactions)) * 0.5).astype(numpy.float32)
if precorrect:
if not self.silent:
print >> sys.stderr, ("\r%s\rFinding binning corrections...") % (' ' * 80),
_optimize.find_binning_correction_adjustment(distance_signal,
data,
self.binning_corrections,
self.binning_correction_indices,
self.binning_num_bins,
self.binning_frag_indices)
# cycle through learning phases
if not self.silent:
print >> sys.stderr, ("\r%s\rLearning corrections...") % (' ' * 80),
iteration = 0
cont = True
change = numpy.inf
new_corrections = numpy.copy(self.corrections)
start_cost = _optimize.calculate_prob_cost(data,
counts_n,
counts,
counts_p,
distance_signal,
self.corrections,
self.sigma)
previous_cost = start_cost
while cont:
iteration += 1
# find gradients
gradients.fill(0.0)
_optimize.calculate_gradients(data,
counts_n,
counts,
counts_p,
distance_signal,
self.corrections,
gradients,
self.sigma)
# find best step size
armijo = numpy.inf
t = 0.1
gradients /= interactions
gradient_norm = numpy.sum(gradients[valid] ** 2.0)
j = 0
best_score = numpy.inf
best_t = 0.1
while armijo > 0.0:
# update gradients
_optimize.update_corrections(filt,
self.corrections,
new_corrections,
gradients,
t)
cost = _optimize.calculate_prob_cost(data,
counts_n,
counts,
counts_p,
distance_signal,
new_corrections,
self.sigma)
if numpy.isnan(cost):
cost = numpy.inf
armijo = numpy.inf
else:
armijo = cost - previous_cost + t * gradient_norm
if cost < best_score:
best_score = cost
best_t = t
if not self.silent:
print >> sys.stderr, ("\r%s iteration:%i cost:%f change:%f armijo: %f %s") %\
('Learning corrections...', iteration, previous_cost,
change, armijo, ' ' * 20),
t *= learningstep
j += 1
if j == 20:
armijo = -numpy.inf
t = best_t
_optimize.update_corrections(filt,
self.corrections,
new_corrections,
gradients,
t)
cost = _optimize.calculate_prob_cost(data,
counts_n,
counts,
counts_p,
distance_signal,
new_corrections,
self.sigma)
previous_cost = cost
self.corrections = new_corrections
change = numpy.amax(numpy.abs(gradients[valid] / new_corrections[valid]))
if not self.silent:
print >> sys.stderr, ("\r%s iteration:%i cost:%f change:%f %s") %\
('Learning corrections...', iteration, cost, change, ' ' * 40),
iteration += 1
if iteration >= max_iterations or change <= minchange:
cont = False
if not self.silent:
print >> sys.stderr, ("\r%s\rLearning corrections... Initial Cost: %f Final Cost: %f Done\n") %\
(' ' * 80, start_cost, cost),
# Calculate region means
if self.region_means is None:
self.region_means = numpy.zeros(self.frags['regions'].shape[0], dtype=numpy.float32)
for i in regions:
start = self.frags['regions']['start_frag'][i]
stop = self.frags['regions']['stop_frag'][i]
forward = (numpy.where(self.filter[start:stop] *
(self.frags['fragments']['strand'][start:stop] == 0))[0] + start)
reverse = (numpy.where(self.filter[start:stop] *
(self.frags['fragments']['strand'][start:stop] == 1))[0] + start)
if forward.shape[0] == 0 or reverse.shape[0] == 0:
continue
region_mean = (numpy.sum(self.corrections[forward]) * reverse.shape[0] +
numpy.sum(self.corrections[reverse]) * forward.shape[0])
region_mean /= forward.shape[0] * reverse.shape[0]
self.corrections[forward] -= region_mean / 2.0
self.corrections[reverse] -= region_mean / 2.0
self.region_means[i] += region_mean
if precorrect:
self.normalization = 'binning-probability'
else:
self.normalization = 'probability'
self.history += 'Succcess\n'
return None
def find_express_fragment_corrections(self, mindistance=0, maxdistance=0, iterations=1000, remove_distance=False,
usereads='cis', regions=[], precorrect=False, logged=True, kr=False):
"""
Using iterative approximation, learn correction values for each valid fragment.
:param mindistance: The minimum inter-fragment distance to be included in modeling.
:type mindistance: int.
:param maxdistance: The maximum inter-fragment distance to be included in modeling.
:type maxdistance: int.
:param iterations: The number of iterations to use for learning fragment corrections.
:type iterations: int.
:param remove_distance: Specifies whether the estimated distance-dependent portion of the signal is removed prior to learning fragment corrections.
:type remove_distance: bool.
:param usereads: Specifies which set of interactions to use, 'cis', 'trans', or 'all'.
:type usereads: str.
:param regions: A list of regions to calculate corrections for. If set as None, all region corrections are found.
:type regions: list
:param precorrect: Use binning-based corrections in expected value calculations, resulting in a chained normalization approach.
:type precorrect: bool.
:param logged: Use log-counts instead of counts for learning.
:type logged: bool.
:param kr: Use the Knight Ruiz matrix balancing algorithm instead of weighted matrix balancing. This option ignores 'iterations' and 'logged'.
:type kr: bool.
:returns: None
Calling this function creates the following attributes:
:Attributes: * **corrections** (*ndarray*) - A numpy array of type float32 and length equal to the number of fragments. All invalid fragments have an associated correction value of zero.
The 'normalization' attribute is updated to 'express' or 'binning-express', depending on if the 'precorrect' option is selected. In addition, if the 'remove_distance' option is selected, the 'region_means' attribute is updated such that the mean correction (sum of all valid regional correction value pairs) is adjusted to zero and the corresponding region mean is adjusted the same amount but the opposite sign.
"""
self.history += "FiveC.find_express_fragment_corrections(mindistance=%s, maxdistance=%s, iterations=%i, remove_distance=%s, usereads='%s', regions=%s, precorrect=%s, logged=%s, kr=%s) - " % (str(mindistance), str(maxdistance), iterations, remove_distance, usereads, str(regions), precorrect, logged, kr)
if precorrect and self.binning_corrections is None:
if not self.silent:
print >> sys.stderr, ("Precorrection can only be used in project has previously run 'find_binning_fragment_corrections'.\n"),
self.history += "Error: 'find_binning_fragment_corrections()' not run yet\n"
return None
# make sure usereads has a valid value
if usereads not in ['cis', 'trans', 'all']:
if not self.silent:
print >> sys.stderr, ("'usereads' does not have a valid value.\n"),
self.history += "Error: '%s' not a valid value for 'usereads'\n" % usereads
return None
# if regions not given, set to all regions
if regions == None or len(regions) == 0:
regions = numpy.arange(self.frags['regions'].shape[0])
if self.corrections is None:
self.corrections = numpy.zeros(self.frags['fragments'].shape[0], dtype=numpy.float32)
if kr:
self._find_kr_corrections(mindistance, maxdistance, remove_distance,
usereads, regions, precorrect, logged)
return None
# limit corrections to only requested regions
filt = numpy.copy(self.filter)
for i in range(self.frags['regions'].shape[0]):
if i not in regions:
filt[self.frags['regions']['start_frag'][i]:self.frags['regions']['stop_frag'][i]] = 0
if not self.silent:
print >> sys.stderr, ("\r%s\rCopying needed data...") % (' ' * 80),
# copy and calculate needed arrays
data = None
trans_data = None
counts = None
trans_counts = None
distance_signal = None
trans_signal = None
corrections = numpy.copy(self.corrections)
if usereads in ['cis', 'all']:
data = self.data['cis_data'][...]
distances = (self.frags['fragments']['mid'][data[:, 1]] -
self.frags['fragments']['mid'][data[:, 0]]).astype(numpy.float32)
if maxdistance == 0 or maxdistance is None:
maxdistance = numpy.amax(distances) + 1
valid = numpy.where((filt[data[:, 0]] * filt[data[:, 1]]) *
(distances >= mindistance) * (distances < maxdistance))[0]
data = data[valid, :]
counts = numpy.log(data[:, 2]).astype(numpy.float64)
distances = distances[valid]
if remove_distance:
if self.gamma is None:
self.find_distance_parameters()
distance_signal = (-self.gamma * numpy.log(distances)).astype(numpy.float32)
distance_signal += self.region_means[self.frags['fragments']['region'][data[:, 0]]]
if usereads in ['trans', 'all']:
trans_data = self.data['trans_data'][...]
valid = numpy.where(filt[trans_data[:, 0]] * filt[trans_data[:, 1]])[0]
trans_data = trans_data[valid, :]
trans_counts = numpy.log(trans_data[:, 2]).astype(numpy.float64)
if remove_distance:
if self.trans_mean is None:
self.find_trans_mean()
trans_signal = numpy.zeros(trans_data.shape[0], dtype=numpy.float32) + self.trans_mean
if precorrect:
if not self.silent:
print >> sys.stderr, ("\r%s\rFinding binning corrections...") % (' ' * 80),
if usereads in ['cis', 'all']:
if distance_signal is None:
distance_signal = numpy.zeros(data.shape[0], dtype=numpy.float32)
_optimize.find_binning_correction_adjustment(distance_signal,
data,
self.binning_corrections,
self.binning_correction_indices,
self.binning_num_bins,
self.binning_frag_indices)
if usereads in ['trans', 'all']:
if trans_signal is None:
trans_signal = numpy.zeros(trans_data.shape[0], dtype=numpy.float32)
_optimize.find_binning_correction_adjustment(trans_signal,
trans_data,
self.binning_corrections,
self.binning_correction_indices,
self.binning_num_bins,
self.binning_frag_indices)
# create empty arrays
fragment_means = numpy.zeros(self.filter.shape[0], dtype=numpy.float64)
interactions = numpy.zeros(self.filter.shape[0], dtype=numpy.int32)
# find number of interactions for each fragment
for i in range(self.frags['regions'].shape[0]):
if not data is None:
interactions += (numpy.bincount(data[:, 0], minlength=interactions.shape[0]) +
numpy.bincount(data[:, 1], minlength=interactions.shape[0])).astype(numpy.int32)
if not trans_data is None:
interactions += (numpy.bincount(trans_data[:, 0], minlength=interactions.shape[0]) +
numpy.bincount(trans_data[:, 1], minlength=interactions.shape[0])).astype(numpy.int32)
# learn corrections
for iteration in range(iterations):
# update corrections
if logged:
cost = _optimize.find_log_fragment_means(distance_signal,
trans_signal,
interactions,
fragment_means,
data,
trans_data,
counts,
trans_counts,
corrections)
else:
cost = _optimize.find_fragment_means(distance_signal,
trans_signal,
interactions,
fragment_means,
data,
trans_data,
counts,
trans_counts,
corrections)
if not self.silent:
print >> sys.stderr, ("\r%s\rLearning corrections... iteration:%i cost:%f ") % (' ' * 80, iteration,
cost),
where = numpy.where(filt)[0]
self.corrections[where] = corrections[where]
# Calculate region means
if self.region_means is None:
self.region_means = numpy.zeros(self.frags['regions'].shape[0], dtype=numpy.float32)
for i in regions:
start = self.frags['regions']['start_frag'][i]
stop = self.frags['regions']['stop_frag'][i]
forward = (numpy.where(self.filter[start:stop] *
(self.frags['fragments']['strand'][start:stop] == 0))[0] + start)
reverse = (numpy.where(self.filter[start:stop] *
(self.frags['fragments']['strand'][start:stop] == 1))[0] + start)
if forward.shape[0] == 0 or reverse.shape[0] == 0:
continue
region_mean = (numpy.sum(self.corrections[forward]) * reverse.shape[0] +
numpy.sum(self.corrections[reverse]) * forward.shape[0])
region_mean /= forward.shape[0] * reverse.shape[0]
self.corrections[forward] -= region_mean / 2.0
self.corrections[reverse] -= region_mean / 2.0
if remove_distance:
self.region_means[i] += region_mean
if not self.silent:
print >> sys.stderr, ("\r%s\rLearning corrections... Final Cost: %f Done\n") % (' ' * 80, cost),
if precorrect:
self.normalization = 'binning-express'
else:
self.normalization = 'express'
self.history += 'Success\n'
return None
def _find_kr_corrections(self, mindistance=0, maxdistance=0, remove_distance=True,
usereads='cis', regions=[], precorrect=False, logged=False):
if self.gamma is None:
self.find_distance_parameters()
all_regions = numpy.copy(regions)
filt = numpy.copy(self.filter)
if maxdistance == 0 or maxdistance is None:
maxdistance = 99999999999
if usereads != 'cis':
for i in range(self.frags['regions'].shape[0]):
if i not in regions:
filt[self.frags['regions']['start_frag'][i]:self.frags['regions']['stop_frag'][i]] = 0
regions = ['all']
for region in regions:
if region == 'all':
startfrag = 0
stopfrag = self.frags['chr_indices'][-1]
regfilt = filt
else:
startfrag = self.frags['regions']['start_frag'][region]
stopfrag = self.frags['regions']['stop_frag'][region]
regfilt = filt[startfrag:stopfrag]
# create needed arrays
if not self.silent:
print >> sys.stderr, ("\r%s\rLoading needed data...") % (' ' * 80),
mids = self.frags['fragments']['mid'][startfrag:stopfrag]
strands = self.frags['fragments']['strand'][startfrag:stopfrag]
if usereads in ['cis', 'all']:
start_index = self.data['cis_indices'][startfrag]
stop_index = self.data['cis_indices'][stopfrag]
data = self.data['cis_data'][start_index:stop_index, :]
distances = mids[data[:, 1] - startfrag] - mids[data[:, 0] - startfrag]
valid = numpy.where(regfilt[data[:, 0] - startfrag] * regfilt[data[:, 1] - startfrag] *
(distances >= mindistance) * (distances < maxdistance))[0]
data = data[valid, :]
else:
data = None
if usereads in ['trans', 'all']:
trans_data = self.data['trans_data'][...]
valid = numpy.where(filt[trans_data[:, 0]] * filt[trans_data[:, 1]])[0]
trans_data = trans_data[valid, :]
else:
trans_data = None
trans_means = None
# remapped data
rev_mapping = numpy.where(regfilt)[0]
mapping = numpy.zeros(regfilt.shape[0], dtype=numpy.int32) - 1
mapping[rev_mapping] = numpy.arange(rev_mapping.shape[0])
if not data is None:
data[:, 0] = mapping[data[:, 0] - startfrag]
data[:, 1] = mapping[data[:, 1] - startfrag]
if not trans_data is None:
trans_data[:, 0] = mapping[trans_data[:, 0]]
trans_data[:, 1] = mapping[trans_data[:, 1]]
mids = mids[rev_mapping]
strands = strands[rev_mapping]
if not self.silent:
print >> sys.stderr, ("\r%s\rChecking for fragment interaction count...") % (' ' * 80),
# precalculate interaction distance means for all included interactions
if not data is None:
counts = data[:, 2].astype(numpy.float64)
else:
counts = None
if not trans_data is None:
trans_counts = trans_data[:, 2].astype(numpy.float64)
else:
trans_counts = None
trans_means = None
distance_means = None
if remove_distance:
if not self.silent:
print >> sys.stderr, ("\r%s\rPrecalculating distances...") % (' ' * 80),
if usereads != 'cis':
trans_mean = numpy.sum(trans_counts).astype(numpy.float64)
ffrags = numpy.where(strands == 0)[0]
rfrags = numpy.where(strands == 1)[0]
interactions = ffrags.shape[0] * rfrags.shape[0]
all_ints = self.frags['fragments']['region'][rev_mapping]
fints = all_ints[ffrags]
rints = all_ints[rfrags]
interactions -= numpy.sum(
numpy.bincount(fints, minlength=self.frags['regions'].shape[0]) *
numpy.bincount(rints, minlength=self.frags['regions'].shape[0]))
trans_mean /= interactions
trans_means = numpy.empty(trans_data.shape[0], dtype=numpy.float32).fill(trans_mean)
if not data is None:
distance_means = numpy.zeros(data.shape[0], dtype=numpy.float32)
findices = numpy.r_[0, numpy.bincount(fints)]
rindices = numpy.r_[0, numpy.bincount(rints)]
for i in range(1, findices.shape[0]):
findices[i] += findices[i - 1]
rindices[i] += rindices[i - 1]
for i in range(findices.shape[0] - 1):
if findices[i] < findices[i + 1] and rindices[i] < rindices[i + 1]:
distance_means[:] = numpy.exp(-self.gamma *
numpy.log(mids[data[:, 1]] - mids[data[:, 0]]) +
self.region_means[all_ints[data[:, 0]]])
else:
distance_means = numpy.zeros(data.shape[0], dtype=numpy.float32)
distance_means[:] = (-self.gamma * numpy.log(mids[data[:, 1]] - mids[data[:, 0]]) +
self.region_means[region])
if precorrect:
if not self.silent:
print >> sys.stderr, ("\r%s\rFinding binning corrections...") % (' ' * 80),
if not data is None:
if distance_means is None:
distance_means = numpy.ones(data.shape[0], dtype=numpy.float32)
_optimize.find_binning_correction_adjustment(distance_means,
data,
self.binning_corrections,
self.binning_correction_indices,
self.binning_num_bins,
self.binning_frag_indices)
if not trans_data is None:
if trans_means is None:
trans_means = numpy.ones(trans_data.shape[0], dtype=numpy.float32)
_optimize.find_binning_correction_adjustment(trans_means,
trans_data,
self.binning_corrections,
self.binning_correction_indices,
self.binning_num_bins,
self.binning_frag_indices)
if not distance_means is None:
counts /= numpy.exp(distance_means)
if not trans_means is None:
trans_counts /= numpy.exp(trans_means)
if not self.silent:
print >> sys.stderr, ("\r%s\rFinding fend corrections...") % (' ' * 80),
# add psuedo-count diagonal
if data is None:
data = numpy.zeros((rev_mapping.shape[0], 2), dtype=numpy.int32)
data[:, 0] = numpy.arange(rev_mapping.shape[0])
data[:, 1] = numpy.arange(rev_mapping.shape[0])
counts = numpy.ones(data.shape[0], dtype=numpy.float64) * 0.5
else:
temp = numpy.zeros((rev_mapping.shape[0], 3), dtype=numpy.int32)
temp[:, 0] = numpy.arange(rev_mapping.shape[0])
temp[:, 1] = numpy.arange(rev_mapping.shape[0])
data = numpy.vstack((data, temp))
counts = numpy.hstack((counts, numpy.ones(data.shape[0], dtype=numpy.float64) * 0.5))
# calculate corrections
corrections = numpy.ones((rev_mapping.shape[0], 1), dtype=numpy.float64)
g = 0.9
eta = etamax = 0.1
tol = 1e-12
stop_tol = tol * 0.5
rt = tol ** 2.0
delta = 0.1
Delta = 3
v = numpy.zeros((corrections.shape[0], 1), dtype=numpy.float64)
w = numpy.zeros((corrections.shape[0], 1), dtype=numpy.float64)
_optimize.calculate_v(data, trans_data, counts, trans_counts, corrections, v)
rk = 1.0 - v
rho_km1 = numpy.dot(rk.T, rk)[0, 0]
rho_km2 = rho_km1
rold = rout = rho_km1
i = MVP = 0
while rout > rt:
i += 1
k = 0
y = numpy.ones((rev_mapping.shape[0], 1), dtype=numpy.float64)
innertol = max(eta ** 2.0 * rout, rt)
while rho_km1 > innertol:
k += 1
if k == 1:
Z = rk / v
p = numpy.copy(Z)
rho_km1 = numpy.dot(rk.T, Z)
else:
beta = rho_km1 / rho_km2
p = Z + beta * p
# Update search direction efficiently
w.fill(0.0)
_optimize.calculate_w(data, trans_data, counts, trans_counts, corrections, p, w)
w += v * p
alpha = rho_km1 / numpy.dot(p.T, w)[0, 0]
ap = alpha * p
# Test distance to boundary of cone
ynew = y + ap
if numpy.amin(ynew) <= delta:
if delta == 0:
break
ind = numpy.where(ap < 0.0)[0]
gamma = numpy.amin((delta - y[ind]) / ap[ind])
y += gamma * ap
break
if numpy.amax(ynew) >= Delta:
ind = numpy.where(ynew > Delta)[0]
gamma = numpy.amin((Delta - y[ind]) / ap[ind])
y += gamma * ap
break
y = | numpy.copy(ynew) | numpy.copy |
"""
# Event source for MAGIC calibrated data files.
# Requires uproot package (https://github.com/scikit-hep/uproot).
"""
import re
import uproot
import logging
import scipy
import scipy.interpolate
import numpy as np
from decimal import Decimal
from enum import Enum, auto
from astropy.coordinates import Angle
from astropy import units as u
from astropy.time import Time
from ctapipe.io.eventsource import EventSource
from ctapipe.io.datalevels import DataLevel
from ctapipe.core import Container, Field
from ctapipe.core.traits import Bool
from ctapipe.coordinates import CameraFrame
from ctapipe.containers import (
ArrayEventContainer,
SimulatedEventContainer,
SimulatedShowerContainer,
SimulationConfigContainer,
PointingContainer,
TelescopePointingContainer,
TelescopeTriggerContainer,
MonitoringCameraContainer,
PedestalContainer,
)
from ctapipe.instrument import (
TelescopeDescription,
SubarrayDescription,
OpticsDescription,
CameraDescription,
CameraReadout,
)
from .version import __version__
from .constants import (
MC_STEREO_TRIGGER_PATTERN,
PEDESTAL_TRIGGER_PATTERN,
DATA_STEREO_TRIGGER_PATTERN
)
__all__ = ['MAGICEventSource', '__version__']
LOGGER = logging.getLogger(__name__)
degrees_per_hour = 15.0
seconds_per_hour = 3600.
msec2sec = 1e-3
nsec2sec = 1e-9
# MAGIC telescope positions in m wrt. to the center of CTA simulations
# MAGIC_TEL_POSITIONS = {
# 1: [-27.24, -146.66, 50.00] * u.m,
# 2: [-96.44, -96.77, 51.00] * u.m
# }
# MAGIC telescope positions in m wrt. to the center of MAGIC simulations, from
# CORSIKA and reflector input card
MAGIC_TEL_POSITIONS = {
1: [31.80, -28.10, 0.00] * u.m,
2: [-31.80, 28.10, 0.00] * u.m
}
# Magnetic field values at the MAGIC site (taken from CORSIKA input cards)
# Reference system is the CORSIKA one, where x-axis points to magnetic north
# i.e. B y-component is 0
# MAGIC_Bdec is the magnetic declination i.e. angle between magnetic and
# geographic north, negative if pointing westwards, positive if pointing
# eastwards
# MAGIC_Binc is the magnetic field inclination
MAGIC_Bx = u.Quantity(29.5, u.uT)
MAGIC_Bz = u.Quantity(23.0, u.uT)
MAGIC_Btot = np.sqrt(MAGIC_Bx**2+MAGIC_Bz**2)
MAGIC_Bdec = u.Quantity(-7.0, u.deg).to(u.rad)
MAGIC_Binc = u.Quantity(np.arctan2(-MAGIC_Bz.value, MAGIC_Bx.value), u.rad)
# MAGIC telescope description
OPTICS = OpticsDescription.from_name('MAGIC')
MAGICCAM = CameraDescription.from_name("MAGICCam")
pulse_shape_lo_gain = np.array([0., 1., 2., 1., 0.])
pulse_shape_hi_gain = np.array([1., 2., 3., 2., 1.])
pulse_shape = np.vstack((pulse_shape_lo_gain, pulse_shape_lo_gain))
MAGICCAM.readout = CameraReadout(
camera_name='MAGICCam',
sampling_rate=u.Quantity(1.64, u.GHz),
reference_pulse_shape=pulse_shape,
reference_pulse_sample_width=u.Quantity(0.5, u.ns)
)
MAGICCAM.geometry.frame = CameraFrame(focal_length=OPTICS.equivalent_focal_length)
GEOM = MAGICCAM.geometry
MAGIC_TEL_DESCRIPTION = TelescopeDescription(
name='MAGIC', tel_type='MAGIC', optics=OPTICS, camera=MAGICCAM)
MAGIC_TEL_DESCRIPTIONS = {1: MAGIC_TEL_DESCRIPTION, 2: MAGIC_TEL_DESCRIPTION}
class MARSDataLevel(Enum):
"""
Enum of the different MARS Data Levels
"""
CALIBRATED = auto() # Calibrated images in charge and time (no waveforms)
STAR = auto() # Cleaned images, with Hillas parametrization
SUPERSTAR = auto() # Stereo parameters reconstructed
MELIBEA = auto() # Reconstruction of hadronness, event direction and energy
class MissingDriveReportError(Exception):
"""
Exception raised when a subrun does not have drive reports.
"""
def __init__(self, message):
self.message = message
class MAGICEventSource(EventSource):
"""
EventSource for MAGIC calibrated data.
This class operates with the MAGIC data subrun-wise for calibrated data.
Attributes
----------
current_run : MarsCalibratedRun
Object containing the info needed to fill the ctapipe Containers
datalevel : DataLevel
Data level according to the definition in ctapipe
file_ : uproot.ReadOnlyFile
A ROOT file opened with uproot
is_mc : bool
Flag indicating real or simulated data
mars_datalevel : int
Data level according to MARS convention
metadata : dict
Dictionary containing metadata
run_numbers : int
Run number of the file
simulation_config : SimulationConfigContainer
Container filled with the information about the simulation
telescope : int
The number of the telescope
use_pedestals : bool
Flag indicating if pedestal events should be returned by the generator
"""
use_pedestals = Bool(
default_value=False,
help=(
'If true, extract pedestal evens instead of cosmic events.'
),
).tag(config=False)
def __init__(self, input_url=None, config=None, parent=None, **kwargs):
"""
Constructor
Parameters
----------
config: traitlets.loader.Config
Configuration specified by config file or cmdline arguments.
Used to set traitlet values.
Set to None if no configuration to pass.
parent : ctapipe.core.Tool
Tool executable that is calling this component.
Passes the correct logger to the component.
Set to None if no Tool to pass.
kwargs: dict
Additional parameters to be passed.
NOTE: The file mask of the data to read can be passed with
the 'input_url' parameter.
"""
super().__init__(input_url=input_url, config=config, parent=parent, **kwargs)
# Retrieving the list of run numbers corresponding to the data files
self.file_ = uproot.open(self.input_url.expanduser())
run_info = self.parse_run_info()
self.run_numbers = run_info[0]
self.is_mc = run_info[1]
self.telescope = run_info[2]
self.mars_datalevel = run_info[3]
self.metadata = self.parse_metadata_info()
# Retrieving the data level (so far HARDCODED Sorcerer)
self.datalevel = DataLevel.DL0
if self.is_mc:
self.simulation_config = self.parse_simulation_header()
if not self.is_mc:
self.is_stereo, self.is_sumt = self.parse_data_info()
# # Setting up the current run with the first run present in the data
# self.current_run = self._set_active_run(run_number=0)
self.current_run = None
self._subarray_info = SubarrayDescription(
name='MAGIC',
tel_positions=MAGIC_TEL_POSITIONS,
tel_descriptions=MAGIC_TEL_DESCRIPTIONS
)
if self.allowed_tels:
self._subarray_info = self._subarray_info.select_subarray(self.allowed_tels)
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Releases resources (e.g. open files).
Parameters
----------
exc_type : Exception
Class of the exception
exc_val : BaseException
Type of the exception
exc_tb : TracebackType
The traceback
"""
self.close()
def close(self):
"""
Closes open ROOT file.
"""
self.file_.close()
@staticmethod
def is_compatible(file_path):
"""
This method checks if the specified file mask corresponds
to MAGIC data files. The result will be True only if all
the files are of ROOT format and contain an 'Events' tree.
Parameters
----------
file_path: str
Path to file
Returns
-------
bool:
True if the masked files are MAGIC data runs, False otherwise.
"""
is_magic_root_file = True
try:
with uproot.open(file_path) as input_data:
mandatory_trees = ['Events', 'RunHeaders', 'RunTails']
trees_in_file = [tree in input_data for tree in mandatory_trees]
if not all(trees_in_file):
is_magic_root_file = False
except ValueError:
# uproot raises ValueError if the file is not a ROOT file
is_magic_root_file = False
return is_magic_root_file
@staticmethod
def get_run_info_from_name(file_name):
"""
This internal method extracts the run number and
type (data/MC) from the specified file name.
Parameters
----------
file_name : str
A file name to process.
Returns
-------
run_number: int
The run number of the file.
is_mc: Bool
Flag to tag MC files
telescope: int
Number of the telescope
datalevel: MARSDataLevel
Data level according to MARS
Raises
------
IndexError
Description
"""
mask_data_calibrated = r"\d{6}_M(\d+)_(\d+)\.\d+_Y_.*"
mask_data_star = r"\d{6}_M(\d+)_(\d+)\.\d+_I_.*"
mask_data_superstar = r"\d{6}_(\d+)_S_.*"
mask_data_melibea = r"\d{6}_(\d+)_Q_.*"
mask_mc_calibrated = r"GA_M(\d)_za\d+to\d+_\d_(\d+)_Y_.*"
mask_mc_star = r"GA_M(\d)_za\d+to\d+_\d_(\d+)_I_.*"
mask_mc_superstar = r"GA_za\d+to\d+_\d_S_.*"
mask_mc_melibea = r"GA_za\d+to\d+_\d_Q_.*"
if re.findall(mask_data_calibrated, file_name):
parsed_info = re.findall(mask_data_calibrated, file_name)
telescope = int(parsed_info[0][0])
run_number = int(parsed_info[0][1])
datalevel = MARSDataLevel.CALIBRATED
is_mc = False
elif re.findall(mask_data_star, file_name):
parsed_info = re.findall(mask_data_star, file_name)
telescope = int(parsed_info[0][0])
run_number = int(parsed_info[0][1])
datalevel = MARSDataLevel.STAR
is_mc = False
elif re.findall(mask_data_superstar, file_name):
parsed_info = re.findall(mask_data_superstar, file_name)
telescope = None
run_number = int(parsed_info[0])
datalevel = MARSDataLevel.SUPERSTAR
is_mc = False
elif re.findall(mask_data_melibea, file_name):
parsed_info = re.findall(mask_data_melibea, file_name)
telescope = None
run_number = int(parsed_info[0])
datalevel = MARSDataLevel.MELIBEA
is_mc = False
elif re.findall(mask_mc_calibrated, file_name):
parsed_info = re.findall(mask_mc_calibrated, file_name)
telescope = int(parsed_info[0][0])
run_number = int(parsed_info[0][1])
datalevel = MARSDataLevel.CALIBRATED
is_mc = True
elif re.findall(mask_mc_star, file_name):
parsed_info = re.findall(mask_mc_star, file_name)
telescope = int(parsed_info[0][0])
run_number = int(parsed_info[0][1])
datalevel = MARSDataLevel.STAR
is_mc = True
elif re.findall(mask_mc_superstar, file_name):
parsed_info = re.findall(mask_mc_superstar, file_name)
telescope = None
run_number = None
datalevel = MARSDataLevel.SUPERSTAR
is_mc = True
elif re.findall(mask_mc_melibea, file_name):
parsed_info = re.findall(mask_mc_melibea, file_name)
telescope = None
run_number = None
datalevel = MARSDataLevel.MELIBEA
is_mc = True
else:
raise IndexError(
'Can not identify the run number and type (data/MC) of the file'
'{:s}'.format(file_name))
return run_number, is_mc, telescope, datalevel
def parse_run_info(self):
"""
Parses run info from the TTrees in the ROOT file
Returns
-------
run_number: int
The run number of the file
is_mc: Bool
Flag to tag MC files
telescope_number: int
Number of the telescope
datalevel: MARSDataLevel
Data level according to MARS
"""
runinfo_array_list = [
'MRawRunHeader.fRunNumber',
'MRawRunHeader.fRunType',
'MRawRunHeader.fTelescopeNumber',
]
run_info = self.file_['RunHeaders'].arrays(
runinfo_array_list, library="np")
run_number = int(run_info['MRawRunHeader.fRunNumber'][0])
run_type = int(run_info['MRawRunHeader.fRunType'][0])
telescope_number = int(run_info['MRawRunHeader.fTelescopeNumber'][0])
# a note about run numbers:
# mono data has run numbers starting with 1 or 2 (telescope dependent)
# stereo data has run numbers starting with 5
# if both telescopes are taking data with no L3,
# also in this case run number starts with 5 (e.g. muon runs)
# Here the data types (from MRawRunHeader.h)
# std data = 0
# pedestal = 1 (_P_)
# calibration = 2 (_C_)
# domino calibration = 3 (_L_)
# linearity calibration = 4 (_N_)
# point run = 7
# monteCarlo = 256
# none = 65535
mc_data_type = 256
if run_type == mc_data_type:
is_mc = True
else:
is_mc = False
events_tree = self.file_['Events']
melibea_trees = ['MHadronness', 'MStereoParDisp', 'MEnergyEst']
superstar_trees = ['MHillas_1', 'MHillas_2', 'MStereoPar']
star_trees = ['MHillas']
datalevel = MARSDataLevel.CALIBRATED
events_keys = events_tree.keys()
trees_in_file = [tree in events_keys for tree in melibea_trees]
if all(trees_in_file):
datalevel = MARSDataLevel.MELIBEA
trees_in_file = [tree in events_keys for tree in superstar_trees]
if all(trees_in_file):
datalevel = MARSDataLevel.SUPERSTAR
trees_in_file = [tree in events_keys for tree in star_trees]
if all(trees_in_file):
datalevel = MARSDataLevel.STAR
return run_number, is_mc, telescope_number, datalevel
def parse_data_info(self):
"""
Check if data is stereo/mono and std trigger/SUMT
Returns
-------
is_stereo: Bool
True if stereo data, False if mono
is_sumt: Bool
True if SUMT data, False if std trigger
"""
prescaler_mono_nosumt = [1, 1, 0, 1, 0, 0, 0, 0]
prescaler_mono_sumt = [0, 1, 0, 1, 0, 1, 0, 0]
prescaler_stereo = [0, 1, 0, 1, 0, 0, 0, 1]
# L1_table_mono = "L1_4NN"
# L1_table_stereo = "L1_3NN"
L3_table_nosumt = "L3T_L1L1_100_SYNC"
L3_table_sumt = "L3T_SUMSUM_100_SYNC"
trigger_tree = self.file_["Trigger"]
L3T_tree = self.file_["L3T"]
# here we take the 2nd element (if possible) because sometimes
# the first trigger report has still the old prescaler values from a previous run
try:
prescaler_array = trigger_tree["MTriggerPrescFact.fPrescFact"].array(library="np")
except AssertionError:
LOGGER.warning("No prescaler info found. Will assume standard stereo data.")
is_stereo = True
is_sumt = False
return is_stereo, is_sumt
prescaler_size = prescaler_array.size
if prescaler_size > 1:
prescaler = prescaler_array[1]
else:
prescaler = prescaler_array[0]
if prescaler == prescaler_mono_nosumt or prescaler == prescaler_mono_sumt:
is_stereo = False
elif prescaler == prescaler_stereo:
is_stereo = True
else:
is_stereo = True
is_sumt = False
if is_stereo:
# here we take the 2nd element for the same reason as above
# L3Table is empty for mono data i.e. taken with one telescope only
# if both telescopes take data with no L3, L3Table is filled anyway
L3Table_array = L3T_tree["MReportL3T.fTablename"].array(library="np")
L3Table_size = L3Table_array.size
if L3Table_size > 1:
L3Table = L3Table_array[1]
else:
L3Table = L3Table_array[0]
if L3Table == L3_table_sumt:
is_sumt = True
elif L3Table == L3_table_nosumt:
is_sumt = False
else:
is_sumt = False
else:
if prescaler == prescaler_mono_sumt:
is_sumt = True
return is_stereo, is_sumt
@staticmethod
def decode_version_number(version_encoded):
"""
Decodes the version number from an integer
Parameters
----------
version_encoded : int
Version number encoded as integer
Returns
-------
version_decoded: str
Version decoded as major.minor.patch
"""
major_version = version_encoded >> 16
minor_version = (version_encoded % 65536) >> 8
patch_version = (version_encoded % 65536) % 256
version_decoded = f'{major_version}.{minor_version}.{patch_version}'
return version_decoded
def parse_metadata_info(self):
"""
Parse metadata information from ROOT file
Returns
-------
metadata: dict
Dictionary containing the metadata information:
- run number
- real or simulated data
- telescope number
- subrun number
- source RA and DEC
- source name (real data only)
- observation mode (real data only)
- MARS version
- ROOT version
"""
metadatainfo_array_list_runheaders = [
'MRawRunHeader.fSubRunIndex',
'MRawRunHeader.fSourceRA',
'MRawRunHeader.fSourceDEC',
'MRawRunHeader.fSourceName[80]',
'MRawRunHeader.fObservationMode[60]',
]
metadatainfo_array_list_runtails = [
'MMarsVersion_sorcerer.fMARSVersionCode',
'MMarsVersion_sorcerer.fROOTVersionCode',
]
metadata = dict()
metadata['run_number'] = self.run_numbers
metadata['is_simulation'] = self.is_mc
metadata['telescope'] = self.telescope
meta_info_runh = self.file_['RunHeaders'].arrays(
metadatainfo_array_list_runheaders, library="np"
)
metadata['subrun_number'] = int(meta_info_runh['MRawRunHeader.fSubRunIndex'][0])
metadata['source_ra'] = meta_info_runh['MRawRunHeader.fSourceRA'][0] / \
seconds_per_hour * degrees_per_hour * u.deg
metadata['source_dec'] = meta_info_runh['MRawRunHeader.fSourceDEC'][0] / \
seconds_per_hour * u.deg
if not self.is_mc:
src_name_array = meta_info_runh['MRawRunHeader.fSourceName[80]'][0]
metadata['source_name'] = "".join([chr(item) for item in src_name_array if item != 0])
obs_mode_array = meta_info_runh['MRawRunHeader.fObservationMode[60]'][0]
metadata['observation_mode'] = "".join([chr(item) for item in obs_mode_array if item != 0])
meta_info_runt = self.file_['RunTails'].arrays(
metadatainfo_array_list_runtails, library="np"
)
mars_version_encoded = int(meta_info_runt['MMarsVersion_sorcerer.fMARSVersionCode'][0])
root_version_encoded = int(meta_info_runt['MMarsVersion_sorcerer.fROOTVersionCode'][0])
metadata['mars_version_sorcerer'] = self.decode_version_number(mars_version_encoded)
metadata['root_version_sorcerer'] = self.decode_version_number(root_version_encoded)
return metadata
def parse_simulation_header(self):
"""
Parse the simulation information from the RunHeaders tree.
Returns
-------
SimulationConfigContainer
Container filled with simulation information
Notes
-----
Information is extracted from the RunHeaders tree within the ROOT file.
Within it, the MMcCorsikaRunHeader and MMcRunHeader branches are used.
Here below the units of the members extracted, for reference:
* fSlopeSpec: float
* fELowLim, fEUppLim: GeV
* fCorsikaVersion: int
* fHeightLev[10]: centimeter
* fAtmosphericModel: int
* fRandomPointingConeSemiAngle: deg
* fImpactMax: centimeter
* fNumSimulatedShowers: int
* fShowerThetaMax, fShowerThetaMin: deg
* fShowerPhiMax, fShowerPhiMin: deg
* fCWaveUpper, fCWaveLower: nanometer
"""
run_header_tree = self.file_['RunHeaders']
spectral_index = run_header_tree['MMcCorsikaRunHeader.fSlopeSpec'].array(library="np")[0]
e_low = run_header_tree['MMcCorsikaRunHeader.fELowLim'].array(library="np")[0]
e_high = run_header_tree['MMcCorsikaRunHeader.fEUppLim'].array(library="np")[0]
corsika_version = run_header_tree['MMcCorsikaRunHeader.fCorsikaVersion'].array(library="np")[0]
site_height = run_header_tree['MMcCorsikaRunHeader.fHeightLev[10]'].array(library="np")[0][0]
atm_model = run_header_tree['MMcCorsikaRunHeader.fAtmosphericModel'].array(library="np")[0]
if self.mars_datalevel in [MARSDataLevel.CALIBRATED, MARSDataLevel.STAR]:
view_cone = run_header_tree['MMcRunHeader.fRandomPointingConeSemiAngle'].array(library="np")[0]
max_impact = run_header_tree['MMcRunHeader.fImpactMax'].array(library="np")[0]
n_showers = np.sum(run_header_tree['MMcRunHeader.fNumSimulatedShowers'].array(library="np"))
max_zd = run_header_tree['MMcRunHeader.fShowerThetaMax'].array(library="np")[0]
min_zd = run_header_tree['MMcRunHeader.fShowerThetaMin'].array(library="np")[0]
max_az = run_header_tree['MMcRunHeader.fShowerPhiMax'].array(library="np")[0]
min_az = run_header_tree['MMcRunHeader.fShowerPhiMin'].array(library="np")[0]
max_wavelength = run_header_tree['MMcRunHeader.fCWaveUpper'].array(library="np")[0]
min_wavelength = run_header_tree['MMcRunHeader.fCWaveLower'].array(library="np")[0]
elif self.mars_datalevel in [MARSDataLevel.SUPERSTAR, MARSDataLevel.MELIBEA]:
view_cone = run_header_tree['MMcRunHeader_1.fRandomPointingConeSemiAngle'].array(library="np")[0]
max_impact = run_header_tree['MMcRunHeader_1.fImpactMax'].array(library="np")[0]
n_showers = np.sum(run_header_tree['MMcRunHeader_1.fNumSimulatedShowers'].array(library="np"))
max_zd = run_header_tree['MMcRunHeader_1.fShowerThetaMax'].array(library="np")[0]
min_zd = run_header_tree['MMcRunHeader_1.fShowerThetaMin'].array(library="np")[0]
max_az = run_header_tree['MMcRunHeader_1.fShowerPhiMax'].array(library="np")[0]
min_az = run_header_tree['MMcRunHeader_1.fShowerPhiMin'].array(library="np")[0]
max_wavelength = run_header_tree['MMcRunHeader_1.fCWaveUpper'].array(library="np")[0]
min_wavelength = run_header_tree['MMcRunHeader_1.fCWaveLower'].array(library="np")[0]
return SimulationConfigContainer(
corsika_version=corsika_version,
energy_range_min=u.Quantity(e_low, u.GeV).to(u.TeV),
energy_range_max=u.Quantity(e_high, u.GeV).to(u.TeV),
prod_site_alt=u.Quantity(site_height, u.cm).to(u.m),
spectral_index=spectral_index,
num_showers=n_showers,
shower_reuse=1,
# shower_reuse not written in the magic root file, but since the
# sim_events already include shower reuse we artificially set it
# to 1 (actually every shower reused 5 times for std MAGIC MC)
shower_prog_id=1,
prod_site_B_total=MAGIC_Btot,
prod_site_B_declination=MAGIC_Bdec,
prod_site_B_inclination=MAGIC_Binc,
max_alt=u.Quantity((90. - min_zd), u.deg).to(u.rad),
min_alt=u.Quantity((90. - max_zd), u.deg).to(u.rad),
max_az=u.Quantity(max_az, u.deg).to(u.rad),
min_az=u.Quantity(min_az, u.deg).to(u.rad),
max_viewcone_radius=view_cone * u.deg,
min_viewcone_radius=0.0 * u.deg,
max_scatter_range=u.Quantity(max_impact, u.cm).to(u.m),
min_scatter_range=0.0 * u.m,
atmosphere=atm_model,
corsika_wlen_min=min_wavelength * u.nm,
corsika_wlen_max=max_wavelength * u.nm,
)
def _set_active_run(self, run_number):
"""
This internal method sets the run that will be used for data loading.
Parameters
----------
run_number: int
The run number to use.
Returns
-------
run: MarsRun
The run to use
"""
run = dict()
run['number'] = run_number
run['read_events'] = 0
if self.mars_datalevel == MARSDataLevel.CALIBRATED:
run['data'] = MarsCalibratedRun(self.file_, self.is_mc)
return run
@property
def subarray(self):
return self._subarray_info
@property
def is_simulation(self):
return self.is_mc
@property
def datalevels(self):
return (self.datalevel, )
@property
def obs_ids(self):
# ToCheck: will this be compatible in the future, e.g. with merged MC files
return [self.run_numbers]
def _generator(self):
"""
The default event generator. Return the stereo event
generator instance.
Returns
-------
"""
if self.mars_datalevel == MARSDataLevel.CALIBRATED:
if self.use_pedestals:
return self._pedestal_event_generator(telescope=f"M{self.telescope}")
else:
return self._mono_event_generator(telescope=f"M{self.telescope}")
def _stereo_event_generator(self):
"""
Stereo event generator. Yields DataContainer instances, filled
with the read event data.
Returns
-------
"""
counter = 0
# Data container - is initialized once, and data is replaced within it after each yield
data = ArrayEventContainer()
# Telescopes with data:
tels_in_file = ["m1", "m2"]
tels_with_data = [1, 2]
# Loop over the available data runs
for run_number in self.run_numbers:
# Removing the previously read data run from memory
if self.current_run is not None:
if 'data' in self.current_run:
del self.current_run['data']
# Setting the new active run (class MarsRun object)
self.current_run = self._set_active_run(run_number)
# Set monitoring data:
if not self.is_mc:
monitoring_data = self.current_run['data'].monitoring_data
for tel_i, tel_id in enumerate(tels_in_file):
monitoring_camera = MonitoringCameraContainer()
pedestal_info = PedestalContainer()
badpixel_info = PixelStatusContainer()
pedestal_info.sample_time = Time(
monitoring_data['M{:d}'.format(tel_i + 1)]['PedestalUnix'], format='unix', scale='utc'
)
# hardcoded number of pedestal events averaged over:
pedestal_info.n_events = 500
pedestal_info.charge_mean = []
pedestal_info.charge_mean.append(
monitoring_data['M{:d}'.format(tel_i + 1)]['PedestalFundamental']['Mean'])
pedestal_info.charge_mean.append(
monitoring_data['M{:d}'.format(tel_i + 1)]['PedestalFromExtractor']['Mean'])
pedestal_info.charge_mean.append(monitoring_data['M{:d}'.format(
tel_i + 1)]['PedestalFromExtractorRndm']['Mean'])
pedestal_info.charge_std = []
pedestal_info.charge_std.append(
monitoring_data['M{:d}'.format(tel_i + 1)]['PedestalFundamental']['Rms'])
pedestal_info.charge_std.append(
monitoring_data['M{:d}'.format(tel_i + 1)]['PedestalFromExtractor']['Rms'])
pedestal_info.charge_std.append(
monitoring_data['M{:d}'.format(tel_i + 1)]['PedestalFromExtractorRndm']['Rms'])
t_range = Time(monitoring_data['M{:d}'.format(tel_i + 1)]['badpixelinfoUnixRange'], format='unix', scale='utc')
badpixel_info.hardware_failing_pixels = monitoring_data['M{:d}'.format(tel_i + 1)]['badpixelinfo']
badpixel_info.sample_time_range = t_range
monitoring_camera.pedestal = pedestal_info
monitoring_camera.pixel_status = badpixel_info
data.mon.tels_with_data = [1, 2]
data.mon.tel[tel_i + 1] = monitoring_camera
else:
assert self.current_run['data'].mcheader_data['M1'] == self.current_run['data'].mcheader_data['M2'], "Simulation configurations are different for M1 and M2 !!!"
data.mcheader.num_showers = self.current_run['data'].mcheader_data['M1']['sim_nevents']
data.mcheader.shower_reuse = self.current_run['data'].mcheader_data['M1']['sim_reuse']
data.mcheader.energy_range_min = (self.current_run['data'].mcheader_data['M1']['sim_emin']).to(u.TeV) # GeV->TeV
data.mcheader.energy_range_max = (self.current_run['data'].mcheader_data['M1']['sim_emax']).to(u.TeV) # GeV->TeV
data.mcheader.spectral_index = self.current_run['data'].mcheader_data['M1']['sim_eslope']
data.mcheader.max_scatter_range = (self.current_run['data'].mcheader_data['M1']['sim_max_impact']).to(u.m) # cm->m
data.mcheader.max_viewcone_radius = (self.current_run['data'].mcheader_data['M1']['sim_conesemiangle']).to(u.deg)# deg->deg
if data.mcheader.max_viewcone_radius != 0.:
data.mcheader.diffuse = True
else:
data.mcheader.diffuse = False
# Loop over the events
for event_i in range(self.current_run['data'].n_stereo_events):
# Event and run ids
event_order_number = self.current_run['data'].stereo_ids[event_i][0]
event_id = self.current_run['data'].event_data['M1']['stereo_event_number'][event_order_number]
obs_id = self.current_run['number']
# Reading event data
event_data = self.current_run['data'].get_stereo_event_data(event_i)
data.meta['origin'] = 'MAGIC'
data.meta['input_url'] = self.input_url
data.meta['max_events'] = self.max_events
# Event counter
data.count = counter
data.index.obs_id = obs_id
data.index.event_id = event_id
# Setting up the R0 container
data.r0.tel.clear()
# Setting up the R1 container
data.r1.tel.clear()
# Setting up the DL0 container
data.dl0.tel.clear()
# Setting up the DL1 container
data.dl1.tel.clear()
pointing = PointingContainer()
# Filling the DL1 container with the event data
for tel_i, tel_id in enumerate(tels_in_file):
# Creating the telescope pointing container
pointing_tel = TelescopePointingContainer()
pointing_tel.azimuth = np.deg2rad(
event_data['{:s}_pointing_az'.format(tel_id)]) * u.rad
pointing_tel.altitude = np.deg2rad(
90 - event_data['{:s}_pointing_zd'.format(tel_id)]) * u.rad
# pointing.ra = np.deg2rad(
# event_data['{:s}_pointing_ra'.format(tel_id)]) * u.rad
# pointing.dec = np.deg2rad(
# event_data['{:s}_pointing_dec'.format(tel_id)]) * u.rad
pointing.tel[tel_i + 1] = pointing_tel
# Adding trigger id (MAGIC nomenclature)
data.r0.tel[tel_i + 1].trigger_type = self.current_run['data'].event_data['M1']['trigger_pattern'][event_order_number]
data.r1.tel[tel_i + 1].trigger_type = self.current_run['data'].event_data['M1']['trigger_pattern'][event_order_number]
data.dl0.tel[tel_i + 1].trigger_type = self.current_run['data'].event_data['M1']['trigger_pattern'][event_order_number]
# Adding event charge and peak positions per pixel
data.dl1.tel[tel_i +
1].image = event_data['{:s}_image'.format(tel_id)]
data.dl1.tel[tel_i +
1].peak_time = event_data['{:s}_pulse_time'.format(tel_id)]
pointing.array_azimuth = np.deg2rad(event_data['m1_pointing_az']) * u.rad
pointing.array_altitude = np.deg2rad(90 - event_data['m1_pointing_zd']) * u.rad
pointing.array_ra = np.deg2rad(event_data['m1_pointing_ra']) * u.rad
pointing.array_dec = np.deg2rad(event_data['m1_pointing_dec']) * u.rad
data.pointing = pointing
if not self.is_mc:
for tel_i, tel_id in enumerate(tels_in_file):
data.trigger.tel[tel_i + 1] = TelescopeTriggerContainer(
time=Time(event_data[f'{tel_id}_unix'], format='unix', scale='utc')
)
else:
data.mc.energy = event_data['true_energy'] * u.GeV
data.mc.alt = (np.pi/2 - event_data['true_zd']) * u.rad
# check meaning of 7deg transformation (I.Vovk)
data.mc.az = -1 * \
(event_data['true_az'] - np.deg2rad(180 - 7)) * u.rad
data.mc.shower_primary_id = 1 - \
event_data['true_shower_primary_id']
data.mc.h_first_int = event_data['true_h_first_int'] * u.cm
# adding a 7deg rotation between the orientation of corsika (x axis = magnetic north) and MARS (x axis = geographical north) frames
# magnetic north is 7 deg westward w.r.t. geographical north
rot_corsika = 7 *u.deg
data.mc.core_x = (event_data['true_core_x']*np.cos(rot_corsika) - event_data['true_core_y']*np.sin(rot_corsika))* u.cm
data.mc.core_y = (event_data['true_core_x']*np.sin(rot_corsika) + event_data['true_core_y']*np.cos(rot_corsika))* u.cm
# Setting the telescopes with data
data.r0.tels_with_data = tels_with_data
data.r1.tels_with_data = tels_with_data
data.dl0.tels_with_data = tels_with_data
data.trigger.tels_with_trigger = tels_with_data
yield data
counter += 1
return
def _mono_event_generator(self, telescope):
"""
Mono event generator. Yields DataContainer instances, filled
with the read event data.
Parameters
----------
telescope: str
The telescope for which to return events. Can be either "M1" or "M2".
Returns
-------
"""
counter = 0
telescope = telescope.upper()
# Data container - is initialized once, and data is replaced after each yield
data = ArrayEventContainer()
# Telescopes with data:
tels_in_file = ["M1", "M2"]
if telescope not in tels_in_file:
raise ValueError(f"Specified telescope {telescope} is not in the allowed list {tels_in_file}")
tel_i = tels_in_file.index(telescope)
tels_with_data = [tel_i + 1, ]
# Removing the previously read data run from memory
if self.current_run is not None:
if 'data' in self.current_run:
del self.current_run['data']
# Setting the new active run
self.current_run = self._set_active_run(self.run_numbers)
# Set monitoring data:
if not self.is_mc:
monitoring_data = self.current_run['data'].monitoring_data
monitoring_camera = MonitoringCameraContainer()
pedestal_info = PedestalContainer()
badpixel_info = PixelStatusContainer()
pedestal_info.sample_time = Time(
monitoring_data['M{:d}'.format(tel_i + 1)]['PedestalUnix'], format='unix', scale='utc'
)
pedestal_info.n_events = 500 # hardcoded number of pedestal events averaged over
pedestal_info.charge_mean = []
pedestal_info.charge_mean.append(
monitoring_data['M{:d}'.format(tel_i + 1)]['PedestalFundamental']['Mean'])
pedestal_info.charge_mean.append(
monitoring_data['M{:d}'.format(tel_i + 1)]['PedestalFromExtractor']['Mean'])
pedestal_info.charge_mean.append(monitoring_data['M{:d}'.format(
tel_i + 1)]['PedestalFromExtractorRndm']['Mean'])
pedestal_info.charge_std = []
pedestal_info.charge_std.append(
monitoring_data['M{:d}'.format(tel_i + 1)]['PedestalFundamental']['Rms'])
pedestal_info.charge_std.append(
monitoring_data['M{:d}'.format(tel_i + 1)]['PedestalFromExtractor']['Rms'])
pedestal_info.charge_std.append(
monitoring_data['M{:d}'.format(tel_i + 1)]['PedestalFromExtractorRndm']['Rms'])
t_range = Time(monitoring_data['M{:d}'.format(tel_i + 1)]['badpixelinfoUnixRange'], format='unix', scale='utc')
badpixel_info.hardware_failing_pixels = monitoring_data['M{:d}'.format(tel_i + 1)]['badpixelinfo']
badpixel_info.sample_time_range = t_range
monitoring_camera.pedestal = pedestal_info
monitoring_camera.pixel_status = badpixel_info
data.mon.tel[tel_i + 1] = monitoring_camera
if telescope == 'M1':
n_events = self.current_run['data'].n_mono_events_m1
else:
n_events = self.current_run['data'].n_mono_events_m2
# Loop over the events
for event_i in range(n_events):
# Event and run ids
event_order_number = self.current_run['data'].mono_ids[telescope][event_i]
event_id = self.current_run['data'].event_data[telescope]['stereo_event_number'][event_order_number]
obs_id = self.current_run['number']
# Reading event data
event_data = self.current_run['data'].get_mono_event_data(event_i, telescope=telescope)
data.meta['origin'] = 'MAGIC'
data.meta['input_url'] = self.input_url
data.meta['max_events'] = self.max_events
data.trigger.event_type = self.current_run['data'].event_data[telescope]['trigger_pattern'][event_order_number]
data.trigger.tels_with_trigger = tels_with_data
if self.allowed_tels:
data.trigger.tels_with_trigger = np.intersect1d(
data.trigger.tels_with_trigger,
self.subarray.tel_ids,
assume_unique=True
)
if not self.is_mc:
data.trigger.tel[tel_i + 1] = TelescopeTriggerContainer(
time=Time(event_data['unix'], format='unix', scale='utc')
)
# Event counter
data.count = counter
data.index.obs_id = obs_id
data.index.event_id = event_id
# Setting up the R0 container
data.r0.tel.clear()
data.r1.tel.clear()
data.dl0.tel.clear()
data.dl1.tel.clear()
data.pointing.tel.clear()
# Creating the telescope pointing container
pointing = PointingContainer()
pointing_tel = TelescopePointingContainer(
azimuth=np.deg2rad(event_data['pointing_az']) * u.rad,
altitude=np.deg2rad(90 - event_data['pointing_zd']) * u.rad,)
pointing.tel[tel_i + 1] = pointing_tel
pointing.array_azimuth = np.deg2rad(event_data['pointing_az']) * u.rad
pointing.array_altitude = np.deg2rad(90 - event_data['pointing_zd']) * u.rad
pointing.array_ra = np.deg2rad(event_data['pointing_ra']) * u.rad
pointing.array_dec = np.deg2rad(event_data['pointing_dec']) * u.rad
data.pointing = pointing
# Adding event charge and peak positions per pixel
data.dl1.tel[tel_i + 1].image = event_data['image']
data.dl1.tel[tel_i + 1].peak_time = event_data['pulse_time']
if self.is_mc:
# check meaning of 7deg transformation (I.Vovk)
# adding a 7deg rotation between the orientation of corsika (x axis = magnetic north) and MARS (x axis = geographical north) frames
# magnetic north is 7 deg westward w.r.t. geographical north
data.simulation = SimulatedEventContainer()
data.simulation.shower = SimulatedShowerContainer(
energy=u.Quantity(event_data['true_energy'], u.GeV),
alt=Angle((np.pi/2 - event_data['true_zd']), u.rad),
az=Angle(-1 * (event_data['true_az'] - (np.pi/2 + MAGIC_Bdec.value)), u.rad),
shower_primary_id=(1 - event_data['true_shower_primary_id']),
h_first_int=u.Quantity(event_data['true_h_first_int'], u.cm),
core_x=u.Quantity((event_data['true_core_x']*np.cos(-MAGIC_Bdec) - event_data['true_core_y']*np.sin(-MAGIC_Bdec)).value, u.cm),
core_y=u.Quantity((event_data['true_core_x']*np.sin(-MAGIC_Bdec) + event_data['true_core_y']*np.cos(-MAGIC_Bdec)).value, u.cm),
)
yield data
counter += 1
return
def _pedestal_event_generator(self, telescope):
"""
Pedestal event generator. Yields DataContainer instances, filled
with the read event data.
Parameters
----------
telescope: str
The telescope for which to return events. Can be either "M1" or "M2".
Returns
-------
"""
counter = 0
telescope = telescope.upper()
# Data container - is initialized once, and data is replaced after each yield
data = ArrayEventContainer()
# Telescopes with data:
tels_in_file = ["M1", "M2"]
if telescope not in tels_in_file:
raise ValueError(f"Specified telescope {telescope} is not in the allowed list {tels_in_file}")
tel_i = tels_in_file.index(telescope)
tels_with_data = [tel_i + 1, ]
# Removing the previously read data run from memory
if self.current_run is not None:
if 'data' in self.current_run:
del self.current_run['data']
# Setting the new active run
self.current_run = self._set_active_run(self.run_numbers)
monitoring_data = self.current_run['data'].monitoring_data
monitoring_camera = MonitoringCameraContainer()
pedestal_info = PedestalContainer()
badpixel_info = PixelStatusContainer()
pedestal_info.sample_time = Time(
monitoring_data['M{:d}'.format(tel_i + 1)]['PedestalUnix'], format='unix', scale='utc'
)
pedestal_info.n_events = 500 # hardcoded number of pedestal events averaged over
pedestal_info.charge_mean = []
pedestal_info.charge_mean.append(
monitoring_data['M{:d}'.format(tel_i + 1)]['PedestalFundamental']['Mean'])
pedestal_info.charge_mean.append(
monitoring_data['M{:d}'.format(tel_i + 1)]['PedestalFromExtractor']['Mean'])
pedestal_info.charge_mean.append(monitoring_data['M{:d}'.format(
tel_i + 1)]['PedestalFromExtractorRndm']['Mean'])
pedestal_info.charge_std = []
pedestal_info.charge_std.append(
monitoring_data['M{:d}'.format(tel_i + 1)]['PedestalFundamental']['Rms'])
pedestal_info.charge_std.append(
monitoring_data['M{:d}'.format(tel_i + 1)]['PedestalFromExtractor']['Rms'])
pedestal_info.charge_std.append(
monitoring_data['M{:d}'.format(tel_i + 1)]['PedestalFromExtractorRndm']['Rms'])
t_range = Time(monitoring_data['M{:d}'.format(tel_i + 1)]['badpixelinfoUnixRange'], format='unix', scale='utc')
badpixel_info.hardware_failing_pixels = monitoring_data['M{:d}'.format(
tel_i + 1)]['badpixelinfo']
badpixel_info.sample_time_range = t_range
monitoring_camera.pedestal = pedestal_info
monitoring_camera.pixel_status = badpixel_info
data.mon.tel[tel_i + 1] = monitoring_camera
if telescope == 'M1':
n_events = self.current_run['data'].n_pedestal_events_m1
else:
n_events = self.current_run['data'].n_pedestal_events_m2
# Loop over the events
for event_i in range(n_events):
# Event and run ids
event_order_number = self.current_run['data'].pedestal_ids[telescope][event_i]
event_id = self.current_run['data'].event_data[telescope]['stereo_event_number'][event_order_number]
obs_id = self.current_run['number']
# Reading event data
event_data = self.current_run['data'].get_pedestal_event_data(
event_i, telescope=telescope)
data.meta['origin'] = 'MAGIC'
data.meta['input_url'] = self.input_url
data.meta['max_events'] = self.max_events
data.trigger.event_type = self.current_run['data'].event_data[telescope]['trigger_pattern'][event_order_number]
data.trigger.tels_with_trigger = tels_with_data
if self.allowed_tels:
data.trigger.tels_with_trigger = np.intersect1d(
data.trigger.tels_with_trigger,
self.subarray.tel_ids,
assume_unique=True,)
if not self.is_mc:
# Adding the event arrival time
data.trigger.tel[tel_i + 1] = TelescopeTriggerContainer(
time=Time(event_data['unix'], format='unix', scale='utc')
)
# Event counter
data.count = counter
data.index.obs_id = obs_id
data.index.event_id = event_id
# Setting up the R0 container
data.r0.tel.clear()
data.r1.tel.clear()
data.dl0.tel.clear()
data.dl1.tel.clear()
data.pointing.tel.clear()
# Creating the telescope pointing container
pointing = PointingContainer()
pointing_tel = TelescopePointingContainer(
azimuth=np.deg2rad(event_data['pointing_az']) * u.rad,
altitude=np.deg2rad(90 - event_data['pointing_zd']) * u.rad,)
pointing.tel[tel_i + 1] = pointing_tel
pointing.array_azimuth = np.deg2rad(event_data['pointing_az']) * u.rad
pointing.array_altitude = np.deg2rad(90 - event_data['pointing_zd']) * u.rad
pointing.array_ra = np.deg2rad(event_data['pointing_ra']) * u.rad
pointing.array_dec = np.deg2rad(event_data['pointing_dec']) * u.rad
data.pointing = pointing
# Adding event charge and peak positions per pixel
data.dl1.tel[tel_i + 1].image = event_data['image']
data.dl1.tel[tel_i + 1].peak_time = event_data['pulse_time']
yield data
counter += 1
return
class MarsCalibratedRun:
"""
This class implements reading of the event data from a single MAGIC calibrated run.
"""
def __init__(self, uproot_file, is_mc):
"""
Constructor of the class. Defines the run to use and the camera pixel arrangement.
Parameters
----------
uproot_file: str
A file opened by uproot via uproot.open(file_name)
"""
self.n_camera_pixels = GEOM.n_pixels
self.file_name = uproot_file.file_path
self.is_mc = is_mc
if '_M1_' in self.file_name:
m1_data = self.load_events(
uproot_file, self.is_mc, self.n_camera_pixels)
m2_data = self.load_events(
None, self.is_mc, self.n_camera_pixels)
if '_M2_' in self.file_name:
m1_data = self.load_events(
None, self.is_mc, self.n_camera_pixels)
m2_data = self.load_events(
uproot_file, self.is_mc, self.n_camera_pixels)
# Getting the event data
self.event_data = dict()
self.event_data['M1'] = m1_data[0]
self.event_data['M2'] = m2_data[0]
# Getting the monitoring data
self.monitoring_data = dict()
self.monitoring_data['M1'] = m1_data[1]
self.monitoring_data['M2'] = m2_data[1]
# Detecting pedestal events
self.pedestal_ids = self._find_pedestal_events()
# Detecting stereo events
self.stereo_ids = self._find_stereo_events()
# Detecting mono events
if self.is_mc:
self.mono_ids = self._find_stereo_mc_events()
else:
self.mono_ids = self._find_mono_events()
@property
def n_events_m1(self):
return len(self.event_data['M1']['unix'])
@property
def n_events_m2(self):
return len(self.event_data['M2']['unix'])
@property
def n_stereo_events(self):
return len(self.stereo_ids)
@property
def n_mono_events_m1(self):
return len(self.mono_ids['M1'])
@property
def n_mono_events_m2(self):
return len(self.mono_ids['M2'])
@property
def n_pedestal_events_m1(self):
return len(self.pedestal_ids['M1'])
@property
def n_pedestal_events_m2(self):
return len(self.pedestal_ids['M2'])
@staticmethod
def load_events(uproot_file, is_mc, n_camera_pixels):
"""
This method loads events and monitoring data from the pre-defiled file and returns them as a dictionary.
Parameters
----------
file_name: str
Name of the MAGIC calibrated file to use.
is_mc: boolean
Specify whether Monte Carlo (True) or data (False) events are read
n_camera_pixels: int
Number of MAGIC camera pixels (not hardcoded, but specified solely via ctapipe.instrument.CameraGeometry)
Returns
-------
dict:
A dictionary with the even properties: charge / arrival time data, trigger, direction etc.
"""
event_data = dict()
event_data['charge'] = []
event_data['arrival_time'] = []
event_data['trigger_pattern'] = np.array([], dtype=np.int32)
event_data['stereo_event_number'] = np.array([], dtype=np.int32)
event_data['pointing_zd'] = np.array([])
event_data['pointing_az'] = np.array([])
event_data['pointing_ra'] = np.array([])
event_data['pointing_dec'] = np.array([])
event_data['unix'] = np.array([])
# monitoring information (updated from time to time)
monitoring_data = dict()
monitoring_data['badpixelinfo'] = []
monitoring_data['badpixelinfoUnixRange'] = []
monitoring_data['PedestalUnix'] = np.array([])
monitoring_data['PedestalFundamental'] = dict()
monitoring_data['PedestalFundamental']['Mean'] = []
monitoring_data['PedestalFundamental']['Rms'] = []
monitoring_data['PedestalFromExtractor'] = dict()
monitoring_data['PedestalFromExtractor']['Mean'] = []
monitoring_data['PedestalFromExtractor']['Rms'] = []
monitoring_data['PedestalFromExtractorRndm'] = dict()
monitoring_data['PedestalFromExtractorRndm']['Mean'] = []
monitoring_data['PedestalFromExtractorRndm']['Rms'] = []
event_data['file_edges'] = [0]
# if no file in the list (e.g. when reading mono information), then simply
# return empty dicts/array
if uproot_file is None:
return event_data, monitoring_data
drive_data = dict()
drive_data['mjd'] = np.array([])
drive_data['zd'] = np.array([])
drive_data['az'] = np.array([])
drive_data['ra'] = np.array([])
drive_data['dec'] = np.array([])
evt_common_list = [
'MArrivalTime.fData',
'MCerPhotEvt.fPixels.fPhot',
'MRawEvtHeader.fDAQEvtNumber',
'MRawEvtHeader.fStereoEvtNumber',
'MTriggerPattern.fPrescaled',
'MTriggerPattern.fSkipEvent',
]
# Separately, because only used with pre-processed MARS data
# to create MPointingPos container
pointing_array_list = [
'MPointingPos.fZd',
'MPointingPos.fAz',
'MPointingPos.fRa',
'MPointingPos.fDec',
'MPointingPos.fDevZd',
'MPointingPos.fDevAz',
'MPointingPos.fDevHa',
'MPointingPos.fDevDec',
]
# Info only applicable for data:
time_array_list = [
'MTime.fMjd',
'MTime.fTime.fMilliSec',
'MTime.fNanoSec',
]
drive_array_list = [
'MReportDrive.fMjd',
'MReportDrive.fCurrentZd',
'MReportDrive.fCurrentAz',
'MReportDrive.fRa',
'MReportDrive.fDec'
]
pedestal_array_list = [
'MTimePedestals.fMjd',
'MTimePedestals.fTime.fMilliSec',
'MTimePedestals.fNanoSec',
'MPedPhotFundamental.fArray.fMean',
'MPedPhotFundamental.fArray.fRms',
'MPedPhotFromExtractor.fArray.fMean',
'MPedPhotFromExtractor.fArray.fRms',
'MPedPhotFromExtractorRndm.fArray.fMean',
'MPedPhotFromExtractorRndm.fArray.fRms'
]
# Info only applicable for MC:
mc_list = [
'MMcEvt.fEnergy',
'MMcEvt.fTheta',
'MMcEvt.fPhi',
'MMcEvt.fPartId',
'MMcEvt.fZFirstInteraction',
'MMcEvt.fCoreX',
'MMcEvt.fCoreY'
]
input_file = uproot_file
events = input_file['Events'].arrays(evt_common_list, library="np")
# Reading the info common to MC and real data
charge = events['MCerPhotEvt.fPixels.fPhot']
arrival_time = events['MArrivalTime.fData']
trigger_pattern = events['MTriggerPattern.fPrescaled']
stereo_event_number = events['MRawEvtHeader.fStereoEvtNumber']
if not is_mc:
# Reading event timing information:
event_times = input_file['Events'].arrays(time_array_list, library="np")
# Computing the event arrival time
event_obs_day = Time(event_times['MTime.fMjd'], format='mjd', scale='utc')
event_obs_day = np.round(event_obs_day.to_value(format='unix', subfmt='float'))
event_obs_day = np.array([Decimal(str(x)) for x in event_obs_day])
event_millisec = np.round(event_times['MTime.fTime.fMilliSec'] * msec2sec, 3)
event_millisec = np.array([Decimal(str(x)) for x in event_millisec])
event_nanosec = np.round(event_times['MTime.fNanoSec'] * nsec2sec, 7)
event_nanosec = np.array([Decimal(str(x)) for x in event_nanosec])
event_unix = event_obs_day + event_millisec + event_nanosec
event_data['unix'] = np.concatenate((event_data['unix'], event_unix))
badpixelinfo = input_file['RunHeaders']['MBadPixelsCam.fArray.fInfo'].array(
uproot.interpretation.jagged.AsJagged(
uproot.interpretation.numerical.AsDtype(np.dtype('>i4'))
), library="np")[0].reshape((4, 1183), order='F')
# now we have 4 axes:
# 0st axis: empty (?)
# 1st axis: Unsuitable pixels
# 2nd axis: Uncalibrated pixels (says why pixel is unsuitable)
# 3rd axis: Bad hardware pixels (says why pixel is unsuitable)
# Each axis cointains a 32bit integer encoding more information about the
# specific problem, see MARS software, MBADPixelsPix.h
# take first axis
unsuitable_pix_bitinfo = badpixelinfo[1][:n_camera_pixels]
# extract unsuitable bit:
unsuitable_pix = np.zeros(n_camera_pixels, dtype=np.bool)
for i in range(n_camera_pixels):
unsuitable_pix[i] = int('\t{0:08b}'.format(
unsuitable_pix_bitinfo[i] & 0xff)[-2])
monitoring_data['badpixelinfo'].append(unsuitable_pix)
# save time interval of badpixel info:
monitoring_data['badpixelinfoUnixRange'].append([event_unix[0], event_unix[-1]])
# try to read Pedestals tree (soft fail if not present)
try:
pedestal_info = input_file['Pedestals'].arrays(pedestal_array_list, library="np")
pedestal_obs_day = Time(pedestal_info['MTimePedestals.fMjd'], format='mjd', scale='utc')
pedestal_obs_day = np.round(pedestal_obs_day.to_value(format='unix', subfmt='float'))
pedestal_obs_day = np.array([Decimal(str(x)) for x in pedestal_obs_day])
pedestal_millisec = np.round(pedestal_info['MTimePedestals.fTime.fMilliSec'] * msec2sec, 3)
pedestal_millisec = np.array([Decimal(str(x)) for x in pedestal_millisec])
pedestal_nanosec = np.round(pedestal_info['MTimePedestals.fNanoSec'] * nsec2sec, 7)
pedestal_nanosec = np.array([Decimal(str(x)) for x in pedestal_nanosec])
pedestal_unix = pedestal_obs_day + pedestal_millisec + pedestal_nanosec
monitoring_data['PedestalUnix'] = np.concatenate((monitoring_data['PedestalUnix'], pedestal_unix))
n_pedestals = len(pedestal_unix)
for quantity in ['Mean', 'Rms']:
for i_pedestal in range(n_pedestals):
monitoring_data['PedestalFundamental'][quantity].append(
pedestal_info[f'MPedPhotFundamental.fArray.f{quantity}'][i_pedestal][:n_camera_pixels])
monitoring_data['PedestalFromExtractor'][quantity].append(
pedestal_info[f'MPedPhotFromExtractor.fArray.f{quantity}'][i_pedestal][:n_camera_pixels])
monitoring_data['PedestalFromExtractorRndm'][quantity].append(
pedestal_info[f'MPedPhotFromExtractorRndm.fArray.f{quantity}'][i_pedestal][:n_camera_pixels])
except KeyError:
LOGGER.warning(
"Pedestals tree not present in file. Cleaning algorithm may fail.")
# Getting the telescope drive info
drive = input_file['Drive'].arrays(drive_array_list, library="np")
drive_mjd = drive['MReportDrive.fMjd']
drive_zd = drive['MReportDrive.fCurrentZd']
drive_az = drive['MReportDrive.fCurrentAz']
drive_ra = drive['MReportDrive.fRa'] * degrees_per_hour
drive_dec = drive['MReportDrive.fDec']
drive_data['mjd'] = np.concatenate((drive_data['mjd'], drive_mjd))
drive_data['zd'] = np.concatenate((drive_data['zd'], drive_zd))
drive_data['az'] = np.concatenate((drive_data['az'], drive_az))
drive_data['ra'] = np.concatenate((drive_data['ra'], drive_ra))
drive_data['dec'] = np.concatenate((drive_data['dec'], drive_dec))
if len(drive_mjd) < 3:
LOGGER.warning(f"File {uproot_file.file_path} has only {len(drive_mjd)} drive reports.")
if len(drive_mjd) == 0:
raise MissingDriveReportError(f"File {uproot_file.file_path} does not have any drive report. Check if it was merpped correctly.")
# Reading pointing information (in units of degrees):
if is_mc:
# Retrieving the telescope pointing direction
pointing = input_file['Events'].arrays(pointing_array_list, library="np")
pointing_zd = pointing['MPointingPos.fZd'] - \
pointing['MPointingPos.fDevZd']
pointing_az = pointing['MPointingPos.fAz'] - \
pointing['MPointingPos.fDevAz']
# N.B. the positive sign here, as HA = local sidereal time - ra
pointing_ra = (pointing['MPointingPos.fRa'] +
pointing['MPointingPos.fDevHa']) * degrees_per_hour
pointing_dec = pointing['MPointingPos.fDec'] - \
pointing['MPointingPos.fDevDec']
# check for bit flips in the stereo event ID:
d_x = np.diff(stereo_event_number.astype(np.int))
dx_flip_ids_before = np.where(d_x < 0)[0]
dx_flip_ids_after = dx_flip_ids_before + 1
dx_flipzero_ids_first = np.where(d_x == 0)[0]
dx_flipzero_ids_second = dx_flipzero_ids_first + 1
if not is_mc:
pedestal_ids = np.where(
trigger_pattern == PEDESTAL_TRIGGER_PATTERN)[0]
# sort out pedestals events from zero-difference steps:
dx_flipzero_ids_second = np.array(
list(set(dx_flipzero_ids_second) - set(pedestal_ids)))
dx_flip_ids_after = np.array(np.union1d(
dx_flip_ids_after, dx_flipzero_ids_second), dtype=np.int)
else:
# for MC, sort out stereo_event_number = 0:
orphan_ids = np.where(stereo_event_number == 0)[0]
dx_flip_ids_after = np.array(
list(set(dx_flip_ids_after) - set(orphan_ids)))
dx_flip_ids_before = dx_flip_ids_after - 1
max_total_jumps = 100
if len(dx_flip_ids_before) > 0:
LOGGER.warning("Warning: detected %d bitflips in file %s. Flag affected events as unsuitable" % (
len(dx_flip_ids_before), uproot_file.file_path))
total_jumped_events = 0
for i in dx_flip_ids_before:
trigger_pattern[i] = -1
trigger_pattern[i+1] = -1
if not is_mc:
jumped_events = int(stereo_event_number[i]) - int(stereo_event_number[i+1])
total_jumped_events += jumped_events
LOGGER.warning(
f"Jump of L3 number backward from {stereo_event_number[i]} to "
f"{stereo_event_number[i+1]}; total jumped events so far: "
f"{total_jumped_events}"
)
if total_jumped_events > max_total_jumps:
LOGGER.warning(
f"More than {max_total_jumps} in stereo trigger number; "
f"you may have to match events by timestamp at a later stage."
)
event_data['charge'].append(charge)
event_data['arrival_time'].append(arrival_time)
event_data['trigger_pattern'] = np.concatenate(
(event_data['trigger_pattern'], trigger_pattern))
event_data['stereo_event_number'] = np.concatenate(
(event_data['stereo_event_number'], stereo_event_number))
if is_mc:
event_data['pointing_zd'] = np.concatenate(
(event_data['pointing_zd'], pointing_zd))
event_data['pointing_az'] = np.concatenate(
(event_data['pointing_az'], pointing_az))
event_data['pointing_ra'] = np.concatenate(
(event_data['pointing_ra'], pointing_ra))
event_data['pointing_dec'] = np.concatenate(
(event_data['pointing_dec'], pointing_dec))
mc_info = input_file['Events'].arrays(mc_list, library="np")
# N.B.: For MC, there is only one subrun, so do not need to 'append'
event_data['true_energy'] = mc_info['MMcEvt.fEnergy']
event_data['true_zd'] = mc_info['MMcEvt.fTheta']
event_data['true_az'] = mc_info['MMcEvt.fPhi']
event_data['true_shower_primary_id'] = mc_info['MMcEvt.fPartId']
event_data['true_h_first_int'] = mc_info['MMcEvt.fZFirstInteraction']
event_data['true_core_x'] = mc_info['MMcEvt.fCoreX']
event_data['true_core_y'] = mc_info['MMcEvt.fCoreY']
event_data['file_edges'].append(len(event_data['trigger_pattern']))
if not is_mc:
monitoring_data['badpixelinfo'] = np.array(monitoring_data['badpixelinfo'])
monitoring_data['badpixelinfoUnixRange'] = np.array(monitoring_data['badpixelinfoUnixRange'])
# sort monitoring data:
order = np.argsort(monitoring_data['PedestalUnix'])
monitoring_data['PedestalUnix'] = monitoring_data['PedestalUnix'][order]
for quantity in ['Mean', 'Rms']:
monitoring_data['PedestalFundamental'][quantity] = np.array(
monitoring_data['PedestalFundamental'][quantity])
monitoring_data['PedestalFromExtractor'][quantity] = np.array(
monitoring_data['PedestalFromExtractor'][quantity])
monitoring_data['PedestalFromExtractorRndm'][quantity] = np.array(
monitoring_data['PedestalFromExtractorRndm'][quantity])
# get only drive reports with unique times, otherwise interpolation fails.
drive_mjd_unique, unique_indices = np.unique(
drive_data['mjd'],
return_index=True
)
drive_zd_unique = drive_data['zd'][unique_indices]
drive_az_unique = drive_data['az'][unique_indices]
drive_ra_unique = drive_data['ra'][unique_indices]
drive_dec_unique = drive_data['dec'][unique_indices]
first_drive_report_time = Time(drive_mjd_unique[0], scale='utc', format='mjd')
last_drive_report_time = Time(drive_mjd_unique[-1], scale='utc', format='mjd')
LOGGER.warning(f"Interpolating events information from {len(drive_data['mjd'])} drive reports.")
LOGGER.warning(f"Drive reports available from {first_drive_report_time.iso} to {last_drive_report_time.iso}.")
# Creating azimuth and zenith angles interpolators
drive_zd_pointing_interpolator = scipy.interpolate.interp1d(
drive_mjd_unique, drive_zd_unique, fill_value="extrapolate")
drive_az_pointing_interpolator = scipy.interpolate.interp1d(
drive_mjd_unique, drive_az_unique, fill_value="extrapolate")
# Creating RA and DEC interpolators
drive_ra_pointing_interpolator = scipy.interpolate.interp1d(
drive_mjd_unique, drive_ra_unique, fill_value="extrapolate")
drive_dec_pointing_interpolator = scipy.interpolate.interp1d(
drive_mjd_unique, drive_dec_unique, fill_value="extrapolate")
# Interpolating the drive pointing to the event time stamps
event_mjd = Time(event_data['unix'], format='unix', scale='utc').to_value(format='mjd', subfmt='float')
event_data['pointing_zd'] = drive_zd_pointing_interpolator(event_mjd)
event_data['pointing_az'] = drive_az_pointing_interpolator(event_mjd)
event_data['pointing_ra'] = drive_ra_pointing_interpolator(event_mjd)
event_data['pointing_dec'] = drive_dec_pointing_interpolator(event_mjd)
return event_data, monitoring_data
def _find_pedestal_events(self):
"""
This internal method identifies the IDs (order numbers) of the
pedestal events in the run.
Returns
-------
dict:
A dictionary of pedestal event IDs in M1/2 separately.
"""
pedestal_ids = dict()
for telescope in self.event_data:
ped_triggers = np.where(
self.event_data[telescope]['trigger_pattern'] == PEDESTAL_TRIGGER_PATTERN)
pedestal_ids[telescope] = ped_triggers[0]
return pedestal_ids
def _find_stereo_events(self):
"""
This internal methods identifies stereo events in the run.
Returns
-------
list:
A list of pairs (M1_id, M2_id) corresponding to stereo events in the run.
"""
stereo_ids = []
n_m1_events = len(self.event_data['M1']['stereo_event_number'])
n_m2_events = len(self.event_data['M2']['stereo_event_number'])
if (n_m1_events == 0) or (n_m2_events == 0):
return stereo_ids
if not self.is_mc:
stereo_m1_data = self.event_data['M1']['stereo_event_number'][np.where(self.event_data['M1']['trigger_pattern'] == DATA_STEREO_TRIGGER_PATTERN)]
stereo_m2_data = self.event_data['M2']['stereo_event_number'][np.where(self.event_data['M2']['trigger_pattern'] == DATA_STEREO_TRIGGER_PATTERN)]
# find common values between M1 and M2 stereo events, see https://numpy.org/doc/stable/reference/generated/numpy.intersect1d.html
stereo_numbers = np.intersect1d(stereo_m1_data, stereo_m2_data)
# find indices of the stereo event numbers in original stereo event numbers arrays, see
# https://stackoverflow.com/questions/12122639/find-indices-of-a-list-of-values-in-a-numpy-array
m1_ids = np.searchsorted(self.event_data['M1']['stereo_event_number'], stereo_numbers)
m2_ids = np.searchsorted(self.event_data['M2']['stereo_event_number'], stereo_numbers)
# make list of tuples, see https://stackoverflow.com/questions/2407398/how-to-merge-lists-into-a-list-of-tuples
stereo_ids = list(zip(m1_ids, m2_ids))
else:
stereo_m1_data = self.event_data['M1']['stereo_event_number'][np.where(self.event_data['M1']['trigger_pattern'] == MC_STEREO_TRIGGER_PATTERN)]
stereo_m2_data = self.event_data['M2']['stereo_event_number'][np.where(self.event_data['M2']['trigger_pattern'] == MC_STEREO_TRIGGER_PATTERN)]
# remove events with 0 stereo number, which are mono events
stereo_m1_data = stereo_m1_data[np.where(stereo_m1_data != 0)]
stereo_m2_data = stereo_m2_data[np.where(stereo_m2_data != 0)]
stereo_numbers = np.intersect1d(stereo_m1_data, stereo_m2_data)
# because of IDs equal to 0, we must find indices in a slight different way
# see https://stackoverflow.com/questions/8251541/numpy-for-every-element-in-one-array-find-the-index-in-another-array
index_m1 = np.argsort(self.event_data['M1']['stereo_event_number'])
index_m2 = np.argsort(self.event_data['M2']['stereo_event_number'])
sort_stereo_events_m1 = self.event_data['M1']['stereo_event_number'][index_m1]
sort_stereo_events_m2 = self.event_data['M2']['stereo_event_number'][index_m2]
sort_index_m1 = np.searchsorted(sort_stereo_events_m1, stereo_numbers)
sort_index_m2 = np.searchsorted(sort_stereo_events_m2, stereo_numbers)
m1_ids = np.take(index_m1, sort_index_m1)
m2_ids = np.take(index_m2, sort_index_m2)
stereo_ids = list(zip(m1_ids, m2_ids))
return stereo_ids
def _find_stereo_mc_events(self):
"""
This internal methods identifies stereo events in the run.
Returns
-------
list:
A list of pairs (M1_id, M2_id) corresponding to stereo events in the run.
"""
mono_ids = dict()
mono_ids['M1'] = []
mono_ids['M2'] = []
m1_ids = np.argwhere(self.event_data['M1']['stereo_event_number'])
m2_ids = np.argwhere(self.event_data['M2']['stereo_event_number'])
mono_ids['M1'] = list(m1_ids.flatten())
mono_ids['M2'] = list(m2_ids.flatten())
return mono_ids
def _find_mono_events(self):
"""
This internal method identifies the IDs (order numbers) of the
pedestal events in the run.
Returns
-------
dict:
A dictionary of pedestal event IDs in M1/2 separately.
"""
mono_ids = dict()
mono_ids['M1'] = []
mono_ids['M2'] = []
n_m1_events = len(self.event_data['M1']['stereo_event_number'])
n_m2_events = len(self.event_data['M2']['stereo_event_number'])
if not self.is_mc:
if (n_m1_events != 0) and (n_m2_events != 0):
m1_data = self.event_data['M1']['stereo_event_number'][np.where(self.event_data['M1']['trigger_pattern'] == DATA_STEREO_TRIGGER_PATTERN)]
m2_data = self.event_data['M2']['stereo_event_number'][np.where(self.event_data['M2']['trigger_pattern'] == DATA_STEREO_TRIGGER_PATTERN)]
m1_ids_data = np.where(self.event_data['M1']['trigger_pattern'] == DATA_STEREO_TRIGGER_PATTERN)[0]
m2_ids_data = np.where(self.event_data['M2']['trigger_pattern'] == DATA_STEREO_TRIGGER_PATTERN)[0]
stereo_numbers = np.intersect1d(m1_data, m2_data)
m1_ids_stereo = np.searchsorted(self.event_data['M1']['stereo_event_number'], stereo_numbers)
m2_ids_stereo = np.searchsorted(self.event_data['M2']['stereo_event_number'], stereo_numbers)
# remove ids that have stereo trigger from the array of ids of data events
# see: https://stackoverflow.com/questions/52417929/remove-elements-from-one-array-if-present-in-another-array-keep-duplicates-nu
sidx1 = m1_ids_stereo.argsort()
idx1 = np.searchsorted(m1_ids_stereo,m1_ids_data,sorter=sidx1)
idx1[idx1==len(m1_ids_stereo)] = 0
m1_ids_mono = m1_ids_data[m1_ids_stereo[sidx1[idx1]] != m1_ids_data]
sidx2 = m2_ids_stereo.argsort()
idx2 = np.searchsorted(m2_ids_stereo,m2_ids_data,sorter=sidx2)
idx2[idx2==len(m2_ids_stereo)] = 0
m2_ids_mono = m2_ids_data[m2_ids_stereo[sidx2[idx2]] != m2_ids_data]
mono_ids['M1'] = m1_ids_mono.tolist()
mono_ids['M2'] = m2_ids_mono.tolist()
elif (n_m1_events != 0) and (n_m2_events == 0):
m1_ids_data = np.where(self.event_data['M1']['trigger_pattern'] == DATA_STEREO_TRIGGER_PATTERN)[0]
mono_ids['M1'] = m1_ids_data.tolist()
elif (n_m1_events == 0) and (n_m2_events != 0):
m2_ids_data = np.where(self.event_data['M2']['trigger_pattern'] == DATA_STEREO_TRIGGER_PATTERN)[0]
mono_ids['M2'] = m2_ids_data.tolist()
else:
# just find ids where event stereo number is 0 (which is given to mono events) and pattern is MC trigger
m1_mono_mask = np.logical_and(self.event_data['M1']['trigger_pattern'] == MC_STEREO_TRIGGER_PATTERN, self.event_data['M1']['stereo_event_number'] == 0)
m2_mono_mask = np.logical_and(self.event_data['M2']['trigger_pattern'] == MC_STEREO_TRIGGER_PATTERN, self.event_data['M2']['stereo_event_number'] == 0)
m1_ids = np.where(m1_mono_mask == True)[0].tolist()
m2_ids = | np.where(m2_mono_mask == True) | numpy.where |
"""Metadata for a single table."""
import copy
import json
import logging
import numpy as np
import pandas as pd
import rdt
from faker import Faker
from sdv.constraints.base import Constraint
from sdv.constraints.errors import MissingConstraintColumnError
from sdv.metadata.errors import MetadataError, MetadataNotFittedError
from sdv.metadata.utils import strings_from_regex
LOGGER = logging.getLogger(__name__)
class Table:
"""Table Metadata.
The Metadata class provides a unified layer of abstraction over the metadata
of a single Table, which includes all the necessary details to handle the
table of this data, including the data types, the fields with pii information
and the constraints that affect this data.
Args:
name (str):
Name of this table. Optional.
field_names (list[str]):
List of names of the fields that need to be modeled
and included in the generated output data. Any additional
fields found in the data will be ignored and will not be
included in the generated output.
If ``None``, all the fields found in the data are used.
field_types (dict[str, dict]):
Dictinary specifying the data types and subtypes
of the fields that will be modeled. Field types and subtypes
combinations must be compatible with the SDV Metadata Schema.
field_transformers (dict[str, str]):
Dictinary specifying which transformers to use for each field.
Available transformers are:
* ``integer``: Uses a ``NumericalTransformer`` of dtype ``int``.
* ``float``: Uses a ``NumericalTransformer`` of dtype ``float``.
* ``categorical``: Uses a ``CategoricalTransformer`` without gaussian noise.
* ``categorical_fuzzy``: Uses a ``CategoricalTransformer`` adding gaussian noise.
* ``one_hot_encoding``: Uses a ``OneHotEncodingTransformer``.
* ``label_encoding``: Uses a ``LabelEncodingTransformer``.
* ``boolean``: Uses a ``BooleanTransformer``.
* ``datetime``: Uses a ``DatetimeTransformer``.
anonymize_fields (dict[str, str]):
Dict specifying which fields to anonymize and what faker
category they belong to.
primary_key (str):
Name of the field which is the primary key of the table.
constraints (list[Constraint, dict]):
List of Constraint objects or dicts.
dtype_transformers (dict):
Dictionary of transformer templates to be used for the
different data types. The keys must be any of the `dtype.kind`
values, `i`, `f`, `O`, `b` or `M`, and the values must be
either RDT Transformer classes or RDT Transformer instances.
model_kwargs (dict):
Dictionary specifiying the kwargs that need to be used in
each tabular model when working on this table. This dictionary
contains as keys the name of the TabularModel class and as
values a dictionary containing the keyword arguments to use.
This argument exists mostly to ensure that the models are
fitted using the same arguments when the same Table is used
to fit different model instances on different slices of the
same table.
sequence_index (str):
Name of the column that acts as the order index of each
sequence. The sequence index column can be of any type that can
be sorted, such as integer values or datetimes.
entity_columns (list[str]):
Names of the columns which identify different time series
sequences. These will be used to group the data in separated
training examples.
context_columns (list[str]):
The columns in the dataframe which are constant within each
group/entity. These columns will be provided at sampling time
(i.e. the samples will be conditioned on the context variables).
rounding (int, str or None):
Define rounding scheme for ``NumericalTransformer``. If set to an int, values
will be rounded to that number of decimal places. If ``None``, values will not
be rounded. If set to ``'auto'``, the transformer will round to the maximum number
of decimal places detected in the fitted data. Defaults to ``'auto'``.
min_value (int, str or None):
Specify the minimum value the ``NumericalTransformer`` should use. If an integer
is given, sampled data will be greater than or equal to it. If the string ``'auto'``
is given, the minimum will be the minimum value seen in the fitted data. If ``None``
is given, there won't be a minimum. Defaults to ``'auto'``.
max_value (int, str or None):
Specify the maximum value the ``NumericalTransformer`` should use. If an integer
is given, sampled data will be less than or equal to it. If the string ``'auto'``
is given, the maximum will be the maximum value seen in the fitted data. If ``None``
is given, there won't be a maximum. Defaults to ``'auto'``.
"""
_hyper_transformer = None
_fakers = None
_constraint_instances = None
_fields_metadata = None
fitted = False
_ANONYMIZATION_MAPPINGS = dict()
_TRANSFORMER_TEMPLATES = {
'integer': rdt.transformers.NumericalTransformer(dtype=int),
'float': rdt.transformers.NumericalTransformer(dtype=float),
'categorical': rdt.transformers.CategoricalTransformer,
'categorical_fuzzy': rdt.transformers.CategoricalTransformer(fuzzy=True),
'one_hot_encoding': rdt.transformers.OneHotEncodingTransformer,
'label_encoding': rdt.transformers.LabelEncodingTransformer,
'boolean': rdt.transformers.BooleanTransformer,
'datetime': rdt.transformers.DatetimeTransformer(strip_constant=True),
}
_DTYPE_TRANSFORMERS = {
'i': 'integer',
'f': 'float',
'O': 'one_hot_encoding',
'b': 'boolean',
'M': 'datetime',
}
_DTYPES_TO_TYPES = {
'i': {
'type': 'numerical',
'subtype': 'integer',
},
'f': {
'type': 'numerical',
'subtype': 'float',
},
'O': {
'type': 'categorical',
},
'b': {
'type': 'boolean',
},
'M': {
'type': 'datetime',
}
}
_TYPES_TO_DTYPES = {
('categorical', None): 'object',
('boolean', None): 'bool',
('numerical', None): 'float',
('numerical', 'float'): 'float',
('numerical', 'integer'): 'int',
('datetime', None): 'datetime64',
('id', None): 'int',
('id', 'integer'): 'int',
('id', 'string'): 'str'
}
def _get_faker(self, category):
"""Return the faker object to anonymize data.
Args:
category (str or tuple):
Fake category to use. If a tuple is passed, the first element is
the category and the rest are additional arguments for the Faker.
Returns:
function:
Faker function to generate new fake data instances.
Raises:
ValueError:
A ``ValueError`` is raised if the faker category we want don't exist.
"""
if isinstance(category, (tuple, list)):
category, *args = category
else:
args = tuple()
try:
faker_method = getattr(Faker(), category)
if not args:
return faker_method
def faker():
return faker_method(*args)
return faker
except AttributeError:
raise ValueError('Category "{}" couldn\'t be found on faker'.format(category))
def _update_transformer_templates(self, rounding, min_value, max_value):
default_numerical_transformer = self._TRANSFORMER_TEMPLATES['integer']
if (rounding != default_numerical_transformer.rounding
or min_value != default_numerical_transformer.min_value
or max_value != default_numerical_transformer.max_value):
custom_int = rdt.transformers.NumericalTransformer(
dtype=int, rounding=rounding, min_value=min_value, max_value=max_value)
custom_float = rdt.transformers.NumericalTransformer(
dtype=float, rounding=rounding, min_value=min_value, max_value=max_value)
self._transformer_templates.update({
'integer': custom_int,
'float': custom_float
})
def __init__(self, name=None, field_names=None, field_types=None, field_transformers=None,
anonymize_fields=None, primary_key=None, constraints=None,
dtype_transformers=None, model_kwargs=None, sequence_index=None,
entity_columns=None, context_columns=None, rounding=None, min_value=None,
max_value=None):
self.name = name
self._field_names = field_names
self._field_types = field_types or {}
self._field_transformers = field_transformers or {}
self._anonymize_fields = anonymize_fields or {}
self._model_kwargs = model_kwargs or {}
self._primary_key = primary_key
self._sequence_index = sequence_index
self._entity_columns = entity_columns or []
self._context_columns = context_columns or []
self._constraints = constraints or []
self._dtype_transformers = self._DTYPE_TRANSFORMERS.copy()
self._transformer_templates = self._TRANSFORMER_TEMPLATES.copy()
self._update_transformer_templates(rounding, min_value, max_value)
if dtype_transformers:
self._dtype_transformers.update(dtype_transformers)
def __repr__(self):
return 'Table(name={}, field_names={})'.format(self.name, self._field_names)
def get_model_kwargs(self, model_name):
"""Return the required model kwargs for the indicated model.
Args:
model_name (str):
Qualified Name of the model for which model kwargs
are needed.
Returns:
dict:
Keyword arguments to use on the indicated model.
"""
return copy.deepcopy(self._model_kwargs.get(model_name))
def set_model_kwargs(self, model_name, model_kwargs):
"""Set the model kwargs used for the indicated model."""
self._model_kwargs[model_name] = model_kwargs
def _get_field_dtype(self, field_name, field_metadata):
field_type = field_metadata['type']
field_subtype = field_metadata.get('subtype')
dtype = self._TYPES_TO_DTYPES.get((field_type, field_subtype))
if not dtype:
raise MetadataError(
'Invalid type and subtype combination for field {}: ({}, {})'.format(
field_name, field_type, field_subtype)
)
return dtype
def get_fields(self):
"""Get fields metadata.
Returns:
dict:
Dictionary of fields metadata for this table.
"""
return copy.deepcopy(self._fields_metadata)
def get_dtypes(self, ids=False):
"""Get a ``dict`` with the ``dtypes`` for each field of the table.
Args:
ids (bool):
Whether or not to include the id fields. Defaults to ``False``.
Returns:
dict:
Dictionary that contains the field names and data types.
"""
dtypes = dict()
for name, field_meta in self._fields_metadata.items():
field_type = field_meta['type']
if ids or (field_type != 'id'):
dtypes[name] = self._get_field_dtype(name, field_meta)
return dtypes
def _build_fields_metadata(self, data):
"""Build all the fields metadata.
Args:
data (pandas.DataFrame):
Data to be analyzed.
Returns:
dict:
Dict of valid fields.
Raises:
ValueError:
If a column from the data analyzed is an unsupported data type
"""
fields_metadata = dict()
for field_name in self._field_names:
if field_name not in data:
raise ValueError('Field {} not found in given data'.format(field_name))
field_meta = self._field_types.get(field_name)
if field_meta:
dtype = self._get_field_dtype(field_name, field_meta)
else:
dtype = data[field_name].dtype
field_template = self._DTYPES_TO_TYPES.get(dtype.kind)
if field_template is None:
msg = 'Unsupported dtype {} in column {}'.format(dtype, field_name)
raise ValueError(msg)
field_meta = copy.deepcopy(field_template)
field_transformer = self._field_transformers.get(field_name)
if field_transformer:
field_meta['transformer'] = field_transformer
else:
field_meta['transformer'] = self._dtype_transformers.get( | np.dtype(dtype) | numpy.dtype |
# Copyright (c) AIRBUS and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import numpy as np
from .cgp import CGP
from .evaluator import Evaluator
from joblib import Parallel, delayed
class CGPES:
def __init__(self, num_offsprings, mutation_rate_nodes, mutation_rate_outputs, father, evaluator, folder='genomes', num_cpus = 1):
self.num_offsprings = num_offsprings
self.mutation_rate_nodes = mutation_rate_nodes
self.mutation_rate_outputs = mutation_rate_outputs
self.father = father
#self.num_mutations = int(len(self.father.genome) * self.mutation_rate)
self.evaluator = evaluator
self.num_cpus = num_cpus
self.folder = folder
if self.num_cpus > 1:
self.evaluator_pool = []
for i in range(self.num_offsprings):
self.evaluator_pool.append(self.evaluator.clone())
def run(self, num_iteration):
if not os.path.isdir(self.folder):
os.mkdir(self.folder)
self.logfile = open(self.folder + '/out.txt', 'w')
self.current_fitness = self.evaluator.evaluate(self.father, 0)
self.father.save(self.folder + '/cgp_genome_0_' + str(self.current_fitness) + '.txt')
self.offsprings = np.empty(self.num_offsprings, dtype=CGP)
self.offspring_fitnesses = | np.zeros(self.num_offsprings, dtype=float) | numpy.zeros |
import numpy as np
import est_dir
def test_1():
"""Test that design matrix has first column of all ones."""
n = 10
m = 5
f = est_dir.quad_f_noise
minimizer = np.ones((m,))
centre_point = np.array([2.01003596, 3.29020466, 2.96499689, 0.93333668,
3.33078812])
matrix = est_dir.quad_func_params(1, 10, m)
func_args = (minimizer, matrix, 0, 5)
no_vars = m
region = 0.1
act_design, y, positions, func_evals = (est_dir.compute_random_design
(n, m, centre_point, no_vars,
f, func_args, region))
assert(positions.shape == (m,))
assert(func_evals == n)
assert(y.shape == (n, ))
full_act_design = np.ones((act_design.shape[0], act_design.shape[1] + 1))
full_act_design[:, 1:] = act_design
assert(np.all(full_act_design[:, 0] == np.ones(n)))
assert(np.all(full_act_design[:, 1:] == act_design))
def test_2():
"""
Test outputs of compute_direction_LS(), with m=10,
no_vars = 10 and F-test p-value is greater than 0.1.
"""
np.random.seed(91)
m = 10
no_vars = 10
f = est_dir.quad_f_noise
minimizer = np.ones((m,))
centre_point = np.repeat(1.1, m)
matrix = est_dir.quad_func_params(1, 1, m)
func_args = (minimizer, matrix, 0, 1)
region = 0.1
direction, func_evals = (est_dir.compute_direction_LS
(m, centre_point, f, func_args,
no_vars, region))
assert(func_evals == 16)
assert(direction == False)
def test_3():
"""
Test outputs of compute_direction_LS(), with m=10,
no_vars=10 and F-test p-value is smaller than 0.1.
"""
np.random.seed(96)
m = 10
no_vars = 10
f = est_dir.quad_f_noise
minimizer = np.ones((m,))
centre_point = np.random.uniform(-2, 2, (m,))
matrix = est_dir.quad_func_params(1, 1, m)
func_args = (minimizer, matrix, 0, 1)
region = 0.1
direction, func_evals = (est_dir.compute_direction_LS
(m, centre_point, f, func_args,
no_vars, region))
assert(func_evals == 16)
assert(direction.shape == (m,))
assert(np.where(direction == 0)[0].shape[0] == 0)
assert(np.max(abs(direction)) == 1)
pos_max = np.argmax(direction)
for j in range(no_vars):
if j != pos_max:
assert(abs(direction[j]) <= 1)
def test_4():
"""
Test outputs of compute_direction_LS(), with m=100,
no_vars=10 and F-test p-value is smaller than 0.1.
"""
np.random.seed(96)
m = 100
no_vars = 10
f = est_dir.quad_f_noise
minimizer = np.ones((m,))
centre_point = np.random.uniform(-2, 2, (m,))
matrix = est_dir.quad_func_params(1, 1, m)
func_args = (minimizer, matrix, 0, 1)
region = 0.1
direction, func_evals = (est_dir.compute_direction_LS
(m, centre_point, f, func_args,
no_vars, region))
assert(func_evals >= 16)
assert(direction.shape == (m,))
assert(np.where(direction == 0)[0].shape[0] == (m-no_vars))
assert(np.max(abs(direction)) == 1)
pos_max = np.argmax(direction)
for j in range(no_vars):
if j != pos_max:
assert(abs(direction[j]) <= 1)
def test_5():
"""
Test outputs of compute_direction_LS(), with m=100,
no_vars=10 and F-test p-value is greater than 0.1.
"""
np.random.seed(100)
m = 100
no_vars = 10
f = est_dir.quad_f_noise
minimizer = np.ones((m,))
centre_point = np.repeat(1.1, m)
matrix = est_dir.quad_func_params(1, 1, m)
func_args = (minimizer, matrix, 0, 1)
region = 0.1
direction, func_evals = (est_dir.compute_direction_LS
(m, centre_point, f, func_args,
no_vars, region))
assert(func_evals == 160)
assert(direction == False)
def test_6():
m = 20
no_vars = 4
set_all_positions = np.arange(m)
positions = np.array([[1, 3, 5, 7],
[0, 2, 10, 15],
[4, 12, 19, 6],
[8, 9, 17, 11],
[13, 14, 16, 18]])
index = 0
while True:
set_all_positions = np.setdiff1d(set_all_positions, positions[index])
for k in range(index + 1):
for i in range(no_vars):
assert(positions[k][i] not in set_all_positions)
index += 1
if index >= np.floor(m / no_vars):
break
def test_7():
"""
Test outputs of compute_direction_XY(), with n=16, m=10,
no_vars=m.
"""
np.random.seed(91)
n = 16
m = 10
no_vars = m
f = est_dir.quad_f_noise
minimizer = np.ones((m,))
centre_point = np.random.uniform(-2, 2, (m,))
matrix = est_dir.quad_func_params(1, 1, m)
func_args = (minimizer, matrix, 0, 1)
region = 0.1
direction, func_evals = (est_dir.compute_direction_XY
(n, m, centre_point, f, func_args,
no_vars, region))
assert(func_evals == 16)
assert(direction.shape == (m,))
assert(np.where(direction == 0)[0].shape[0] == 0)
assert(np.max(abs(direction)) == 1)
pos_max = np.argmax(direction)
for j in range(no_vars):
if j != pos_max:
assert(abs(direction[j]) <= 1)
def test_8():
"""
Test outputs of compute_direction_XY(), with n=16, m=100,
no_vars=m.
"""
np.random.seed(91)
n = 16
m = 100
no_vars = m
f = est_dir.quad_f_noise
minimizer = np.ones((m,))
centre_point = np.random.uniform(-2, 2, (m,))
matrix = est_dir.quad_func_params(1, 1, m)
func_args = (minimizer, matrix, 0, 1)
region = 0.1
direction, func_evals = (est_dir.compute_direction_XY
(n, m, centre_point, f, func_args,
no_vars, region))
assert(func_evals == 16)
assert(direction.shape == (m,))
assert(np.where(direction == 0)[0].shape[0] == 0)
assert(np.max(abs(direction)) == 1)
pos_max = np.argmax(direction)
for j in range(no_vars):
if j != pos_max:
assert(abs(direction[j]) <= 1)
def test_9():
"""
Test outputs of calc_first_phase_RSM_LS(), where F-test
p-value is less than 0.1.
"""
np.random.seed(96)
m = 10
no_vars = 10
f = est_dir.quad_f_noise
minimizer = np.ones((m,))
centre_point = np.random.uniform(-2, 2, (m,))
matrix = est_dir.quad_func_params(1, 1, m)
func_args = (minimizer, matrix, 0, 1)
region = 0.1
init_func_val = f(centre_point, *func_args)
const_back = 0.5
forward_tol = 1000000
back_tol = 0.000001
const_forward = (1 / const_back)
step = 1
no_vars = 10
(upd_point,
f_new,
total_func_evals_step,
total_func_evals_dir,
flag) = (est_dir.calc_first_phase_RSM_LS
(centre_point, init_func_val, f, func_args,
m, const_back, back_tol, const_forward,
forward_tol, step, no_vars, region))
assert(upd_point.shape == (m, ))
assert(f_new < init_func_val)
assert(total_func_evals_step > 0)
assert(total_func_evals_dir == 16)
assert(flag == True)
def test_10():
"""
Test outputs of calc_first_phase_RSM_LS(), where F-test
p-value is greater than 0.1.
"""
np.random.seed(100)
n = 16
m = 100
no_vars = 10
f = est_dir.quad_f_noise
minimizer = np.ones((m,))
centre_point = np.repeat(1.1, m)
matrix = est_dir.quad_func_params(1, 1, m)
func_args = (minimizer, matrix, 0, 1)
region = 0.1
init_func_val = f(centre_point, *func_args)
const_back = 0.5
forward_tol = 1000000
back_tol = 0.000001
const_forward = (1 / const_back)
step = 1
(upd_point,
f_new,
total_func_evals_step,
total_func_evals_dir,
flag) = (est_dir.calc_first_phase_RSM_LS
(centre_point, init_func_val, f, func_args,
m, const_back, back_tol, const_forward,
forward_tol, step, no_vars, region))
assert(np.all(np.round(upd_point, 5) == np.round(centre_point, 5)))
assert(f_new == init_func_val)
assert(total_func_evals_step == 0)
assert(total_func_evals_dir == n * (int(m / no_vars)))
assert(flag == False)
def test_11():
"""
Test outputs of calc_first_phase_RSM_XY().
"""
np.random.seed(91)
n = 16
m = 100
no_vars = m
f = est_dir.quad_f_noise
minimizer = np.ones((m,))
centre_point = np.random.uniform(-2, 2, (m,))
matrix = est_dir.quad_func_params(1, 1, m)
func_args = (minimizer, matrix, 0, 1)
region = 0.1
init_func_val = f(centre_point, *func_args)
const_back = 0.5
forward_tol = 1000000
back_tol = 0.000001
const_forward = (1 / const_back)
step = 1
(upd_point,
f_new,
total_func_evals_step,
total_func_evals_dir) = (est_dir.calc_first_phase_RSM_XY
(centre_point, init_func_val, f, func_args,
n, m, const_back, back_tol,
const_forward, forward_tol, step, no_vars,
region))
assert(upd_point.shape == (m, ))
assert(f_new < init_func_val)
assert(total_func_evals_step > 0)
assert(total_func_evals_dir == n)
def test_12():
"""
Test outputs of divide_abs_max_value(). That is, ensure signs of the
updated direction are the same as previous direction, and ensure
computation is correct.
"""
direction = np.array([0.8, 0.2, -0.04, 0.5, -0.6, -0.95])
new_direction = est_dir.divide_abs_max_value(direction)
assert(np.all(np.sign(direction) == np.sign(new_direction)))
assert(np.all(np.round(new_direction * np.max(abs(direction)), 2)
== np.round(direction, 2)))
def test_13():
"""
Test outputs of compute_direction_MP().
"""
np.random.seed(91)
n = 16
m = 100
no_vars = 10
f = est_dir.quad_f_noise
minimizer = np.ones((m,))
centre_point = np.random.uniform(-2, 2, (m,))
matrix = est_dir.quad_func_params(1, 1, m)
func_args = (minimizer, matrix, 0, 1)
region = 0.1
direction, func_evals = (est_dir.compute_direction_MP
(n, m, centre_point, f, func_args,
no_vars, region))
assert(func_evals == 16)
assert(direction.shape == (m,))
assert(np.where(direction == 0)[0].shape[0] == m - no_vars)
assert(np.max(abs(direction)) == 1)
pos_max = np.argmax(direction)
for j in range(no_vars):
if j != pos_max:
assert(abs(direction[j]) <= 1)
np.random.seed(91)
n = 16
m = 100
no_vars = 10
f = est_dir.quad_f_noise
minimizer = np.ones((m,))
centre_point = np.random.uniform(-2, 2, (m,))
matrix = est_dir.quad_func_params(1, 1, m)
func_args = (minimizer, matrix, 0, 1)
region = 0.1
act_design, y, positions, func_evals = (est_dir.compute_random_design
(n, m, centre_point, no_vars,
f, func_args, region))
full_act_design = np.ones((act_design.shape[0], act_design.shape[1] + 1))
full_act_design[:, 1:] = act_design
direction2 = np.zeros((m,))
est = (np.linalg.pinv(full_act_design) @ y)
direction2[positions] = est_dir.divide_abs_max_value(est[1:])
assert(full_act_design.shape == (n, no_vars+1))
assert( | np.all(full_act_design != 0) | numpy.all |
import cv2 as cv
import pyautogui
import numpy as np
cam = cv.VideoCapture(0)
#range for red color
lower_red = np.array([150, 30, 30])
upper_red = np.array([190, 255, 255])
#range for green color
lower_green = np.array([50,100,100])
upper_green = np.array([80,255,255])
#range for blue color
lower_blue = np.array([100, 60, 60])
upper_blue = np.array([140, 255, 255])
while(True):
ret,frame = cam.read()
frame = cv.flip(frame,1)
#Smoothen the image
image_smooth = cv.GaussianBlur(frame,(7,7),0)
#Define Region of interest
mask = np.zeros_like(frame)
mask[100:350,100:350] = [255,255,255]
image_roi = cv.bitwise_and(image_smooth,mask)
cv.rectangle(frame,(50,50),(350,350),(0,0,255),2)
cv.line(frame,(150,50),(150,350),(0,0,255),1)
cv.line(frame,(250,50),(250,350),(0,0,255),1)
cv.line(frame,(50,150),(350,150),(0,0,255),1)
cv.line(frame,(50,250),(350,250),(0,0,255),1)
#Threshold the image for red color
image_hsv = cv.cvtColor(image_roi,cv.COLOR_BGR2HSV)
image_threshold = cv.inRange(image_hsv,lower_red,upper_red)
#Find Contours
contours,heirarchy = cv.findContours(image_threshold, \
cv.RETR_TREE, \
cv.CHAIN_APPROX_NONE)
#Find the index of the largest contour
if(len(contours)!=0):
areas = [cv.contourArea(c) for c in contours]
max_index = | np.argmax(areas) | numpy.argmax |
"""
Miscellaneous and sundry plotting functions for to please your visual cortex
"""
import typing
import warnings
from pathlib import Path
from typing import Dict, Union
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import xarray as xr
from matplotlib import cm, colors, gridspec, image, transforms
from matplotlib.backends.backend_pdf import PdfPages
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy import stats
from skimage.measure import label, regionprops
from statsmodels.stats.weightstats import DescrStatsW
from tqdm.auto import tqdm
from pharedox import constants
from pharedox import data_analysis as da
def imshow_r_stack(
imgs: xr.DataArray,
profile_data: xr.DataArray,
output_dir: Union[str, Path],
per_animal_cmap: bool = True,
fl_wvl: str = "410",
cmap: str = "coolwarm",
width: int = 80,
height: int = 30,
colorbar=True,
):
output_dir = Path(output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
center = (np.array(imgs.shape[-2:]) / 2).astype(np.int)
wpad = int(width / 2)
hpad = int(height / 2)
for tp in tqdm(imgs.timepoint.values, leave=False, desc="timepoint"):
for pair in tqdm(imgs.pair.values, leave=False, desc="pair"):
filepath = output_dir.joinpath(f"timepoint={tp}_pair={pair}.pdf")
with PdfPages(filepath) as pdf:
i = 0
for animal in tqdm(imgs.animal.values, desc="animal", leave=False):
fig, ax = plt.subplots()
selector = dict(animal=animal, timepoint=tp, pair=pair)
im, cbar = imshow_ratio_normed(
imgs.sel(wavelength="r", **selector),
imgs.sel(wavelength=fl_wvl, **selector),
profile_data=profile_data.sel(wavelength="r", **selector),
prob=0.999,
colorbar=colorbar,
i_min=0,
i_max=3000,
cmap=cmap,
ax=ax,
)
ax.set_xlim(center[1] - wpad, center[1] + wpad)
ax.set_ylim(center[0] - hpad, center[0] + hpad)
ax.set_title(str(selector))
pdf.savefig()
if (i % 20) == 0:
plt.close("all")
i += 1
def generate_wvl_pair_timepoint_profile_plots(data: xr.DataArray, ignored_wvls=None):
"""
For each wavelength and pair in the given data, this function plots a line plot with
each color representing a unique strain. The line is the mean value across animals
for that strain, and the shaded regions are the 95% confidence intervals
Parameters
----------
data
ignored_wvls
"""
if ignored_wvls is None:
ignored_wvls = ["TL"]
strains = np.unique(data.strain.values)
cmap = plt.get_cmap("Set2")
colormap = dict(zip(strains, cmap.colors))
wvls = list(map(lambda x: x.lower(), data.wavelength.values))
for wvl in ignored_wvls:
try:
wvls.remove(wvl.lower())
except ValueError:
continue
for wvl in wvls:
for pair in data.pair.values:
for tp in data.timepoint.values:
fig, ax = plt.subplots()
for strain in strains:
strain_data = data.where(data["strain"] == strain, drop=True)
ax.plot(
strain_data.sel(wavelength=wvl, pair=pair, timepoint=tp).T,
color=colormap[strain],
alpha=0.5,
)
title = f"wavelength = {wvl} ; pair = {pair} ; timepoint = {tp}"
ax.set_title(title)
ax.legend(
[
plt.Line2D([0], [0], color=color, lw=4)
for color in cmap.colors[: len(strains)]
],
strains,
)
yield title, fig
def generate_avg_wvl_pair_profile_plots(
data: xr.DataArray, ignored_wvls: typing.List[str] = None
):
"""
For each wavelength and pair in the given data, this function plots a line plot with
each color representing a unique strain. The line is the mean value across animals
for that strain, and the shaded regions are the 95% confidence intervals
Parameters
----------
ignored_wvls
data : [type]
[description]
"""
if ignored_wvls is None:
ignored_wvls = ["TL"]
strains = np.unique(data.strain.values)
cmap = plt.get_cmap("Set2")
colormap = dict(zip(strains, cmap.colors))
wvls = list(map(lambda x: x.lower(), data.wavelength.values))
for wvl in ignored_wvls:
try:
wvls.remove(wvl.lower())
except ValueError:
continue
for wvl in wvls:
for pair in data.pair.values:
for tp in data.timepoint.values:
fig, ax = plt.subplots()
for strain in np.unique(data.strain.values):
strain_data = data.where(data["strain"] == strain, drop=True)
plot_profile_avg_with_bounds(
strain_data.sel(wavelength=wvl, pair=pair, timepoint=tp),
label=strain,
ax=ax,
color=colormap[strain],
)
title = f"wavelength = {wvl} ; pair = {pair} ; timepoint = {tp}"
ax.set_title(title)
ax.legend()
yield title, fig
def plot_err_with_region_summaries(
data: xr.DataArray,
measure_regions: Dict,
display_regions=None,
ax=None,
profile_color="black",
label=None,
):
st_color = "k"
mv_color = "tab:red"
if ax is None:
_, ax = plt.subplots()
if display_regions is None:
display_regions = measure_regions
df = da.fold_v_point_table(data, measure_regions)
df_avgs = df.reset_index().groupby("region").agg(["mean", "sem"]).reset_index()
xs = np.linspace(0, 1, data.position.size)
plot_profile_avg_with_sem_bounds(
100 * da.fold_error(data), xs=xs, ax=ax, color=profile_color, label=label
)
for region, region_err_mean, region_err_sem in zip(
df_avgs["region"],
df_avgs["fold_error_region"][1]["mean"],
df_avgs["fold_error_region"][1]["sem"],
):
try:
ax.axhline(
100 * region_err_mean,
*display_regions[region],
color=profile_color,
alpha=1,
lw=2,
solid_capstyle="butt",
)
ax.errorbar(
x=np.mean(display_regions[region]),
y=100 * region_err_mean,
yerr=100 * region_err_sem,
color=profile_color,
elinewidth=0.5,
capsize=1,
capthick=0.5,
)
except:
continue
ax.set_xlim(0, 1)
add_regions_to_axis(
ax, display_regions, alpha=0.3, hide_labels=True, skip=["medial_axis"]
)
ax.set_xlabel("position along midline")
def plot_stage_layout(
image_data: xr.DataArray, pair: int = 0
) -> sns.axisgrid.FacetGrid:
"""
Shows a scatter plot where each point is an animal located on the imaging stage and
the points are colored by strain.
A useful visualization to make sure that the strain map is accurate.
.. image:: _static/plot_stage_layout.png
Parameters
----------
image_data : xr.DataArray
The image data acquired by metamorph.
pair : int
The image pair to display
Returns
-------
seaborn.axisgrid.FacetGrid
The grid object returned by seaborns's lmplot
See Also
--------
io.load_tiff_as_hyperstack
seaborn.lmplot
"""
df = pd.DataFrame(
dict(
stage_x=image_data.sel(wavelength="410", pair=1).stage_x,
stage_y=image_data.sel(wavelength="410", pair=1).stage_y,
strain=image_data.sel(wavelength="410", pair=1).strain,
)
)
return sns.lmplot(x="stage_x", y="stage_y", data=df, hue="strain", fit_reg=False)
def ecdf_(data):
"""Compute ECDF"""
x = np.sort(data)
n = x.size
y = np.arange(1, n + 1) / n
return x, y
def cdf_plot(data, *args, **kwargs):
"""
Plot a CDF, compatible with Seaborn's FacetGrid
data
1-D vector of numbers to plot the CDF of
*args
ignored
**kwargs
keyword arguments passed onto ``plt.step``
"""
x, y = ecdf_(data)
plt.step(x, y, **kwargs)
def add_regions_to_axis(
ax,
regions: dict,
skip=None,
label_dist_bottom_percent: float = 0.03,
label_x_offset_percent: float = 0.005,
alpha: float = 0.03,
hide_labels: bool = False,
xs=None,
color="black",
**kwargs,
):
"""
TODO: Documentation
Parameters
----------
ax
the axis to add the regions to
regions
the region dictionary, formatted as such::
{
'pm3': [1, 10],
'pm4': [12, 30],
...
}
skip
the regions to skip plotting
label_dist_bottom_percent
the distance from the bottom of the axis that the region labels should be placed, expressed as a percentage of the axis height
label_x_offset_percent
the distance from the left of the region annotation, expressed as a percentage of the axis length
alpha
the opacity of the region annotations (0 = transparent, 1=opaque)
hide_labels
if True, does not add labels to regions
kwargs
these will be passed onto ``ax.axvspan``
"""
if skip is None:
skip = []
min_y, max_y = ax.get_ylim()
min_x, max_x = ax.get_xlim()
text_y = ((max_y - min_y) * label_dist_bottom_percent) + min_y
text_x_offset = (max_x - min_x) * label_x_offset_percent
for region, bounds in regions.items():
if region in skip:
continue
ax.axvspan(
bounds[0], bounds[1], alpha=alpha, color=color, linewidth=0, **kwargs
)
if not hide_labels:
ax.annotate(region, xy=(bounds[0] + text_x_offset, text_y))
def add_region_bars_to_axis(
ax, regions, skip=None, bar_height=8, bar_width=1, fontsize=3
):
if skip is None:
skip = []
for region, region_bounds in regions.items():
if region in skip:
continue
yy = -0.01
ax.annotate(
"",
xy=(region_bounds[0], yy),
xycoords=("data", "axes fraction"),
xytext=(region_bounds[1], yy),
textcoords=("data", "axes fraction"),
arrowprops=dict(
arrowstyle="-",
connectionstyle=f"bar,armA=-{bar_height},armB=-{bar_height},fraction=0.0",
capstyle="butt",
joinstyle="miter",
lw=bar_width,
),
annotation_clip=False,
)
ax.annotate(
region,
xy=((region_bounds[0] + region_bounds[1]) / 2, yy - 0.08),
xycoords=("data", "axes fraction"),
ha="center",
fontsize=fontsize,
)
ax.xaxis.labelpad = 25
def plot_profile_avg_with_bounds(
data,
ax=None,
confint_alpha=0.05,
label=None,
xs=None,
axis=0,
bounds: str = "ci",
**kwargs,
):
"""
TODO: Documentation
Parameters
----------
data
ax
confint_alpha
label
kwargs
Returns
-------
"""
with np.errstate(invalid="ignore"):
mean = np.nanmean(data, axis=0)
sem = stats.sem(data)
bounds_map = {
"ci": DescrStatsW(data).tconfint_mean(alpha=confint_alpha),
"sem": (mean - sem, mean + sem),
}
if ax is None:
ax = plt.gca()
if xs is None:
try:
# if the data is an xr.DataArray
xs = data.position
except ValueError:
# if it's a numpy array
xs = np.arange(len(data))
with warnings.catch_warnings():
warnings.simplefilter("ignore")
ax.plot(xs, np.nanmean(data, axis=axis), label=label, **kwargs)
lower, upper = bounds_map[bounds]
kwargs.pop("linestyle", None)
kwargs.pop("linewidth", None)
kwargs.pop("lw", None)
ax.fill_between(xs, lower, upper, alpha=0.3, lw=0, **kwargs)
return ax
def imgs_to_rgb(
imgs,
r_min,
r_max,
cmap="coolwarm",
i_min=0,
i_max=None,
i_wvls=["410", "470"],
ratio_numerator="410",
ratio_denominator="470",
):
if i_max is None:
i_max = np.max(imgs.sel(wavelength=["410", "470"]))
try:
R = imgs.sel(wavelength="R")
except KeyError:
R = imgs.sel(wavelength=ratio_numerator) / imgs.sel(
wavelength=ratio_denominator
)
norm_ratio = colors.Normalize(vmin=r_min, vmax=r_max)
cmap = cm.get_cmap(cmap)
img_rgba = cmap(norm_ratio(R))
norm_fl = colors.Normalize(vmin=i_min, vmax=i_max, clip=True)
hsv_img = colors.rgb_to_hsv(img_rgba[..., :3]) # ignore the "alpha" channel
hsv_img[..., -1] = norm_fl(imgs.sel(wavelength=i_wvls).max(dim="wavelength"))
img_rgba = colors.hsv_to_rgb(hsv_img)
return img_rgba
def imshow_ratio_normed(
ratio_img,
fl_img,
profile_data=None,
prob=0.999,
cmap="coolwarm",
r_min=None,
r_max=None,
i_min=0,
i_max=None,
clip=True,
ax=None,
colorbar=False,
colorbar_kwargs_dict={},
**imshow_kwargs,
):
"""
Show the given ratio image, first converting to HSV and setting the "V" (value)
channel to be the given (normalized) intensity image
Parameters
----------
ratio_img
the ratio image to display
fl_img
the fluorescent intensity image with which to "value-correct" the ratio image.
A good choice here is the max value of both intensity channels used in the
ratio.
profile_data
the midline profile data corresponding to the ratio image. This is used to
center and to choose min/max values for the ratio colormap.
prob
The "confidence interval" around the center of the ratio values to include in
the colormap. For example, 0.95 translates to a min/max of
mean(ratio) +/- (1.96*std(ratio))
cmap
The colormap used to display the ratio image. Diverging colormaps are a good
choice here (default is RdBu_r).
r_min
The minimum value for the ratio colormap. If None, uses the `prob` parameter
(see its description), and requires `profile_data`.
r_max
The maximum value for the ratio colormap. If None, uses the `prob` parameter
(see its description), and requires `profile_data`.
i_min
The intensity to map to 0 in the value channel
i_max
The intensity to map to 1 in the value channel
clip
Whether or not the value channel should be clipped to [0, 1] before converting
back to RGB. Leaving this as True is a sane default.
ax
If given, the image is plotted on this axis. If ``None``, this function uses the
pyplot interface.
colorbar
show the colorbar or not
imshow_args
keyword arguments that will be passed along to the ``imshow`` function
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# Convert ratio to RGB
if profile_data is None:
if (r_min is None) or (r_max is None):
raise ValueError(
"r_min and r_max must be set if profile_data is not given"
)
else:
r_mean = np.mean(profile_data)
r_std = | np.std(profile_data) | numpy.std |
import cv2
from time import perf_counter
import numpy as np
import tensorrt as trt
import pycuda.driver as cuda
import pycuda.autoinit
import os
THIS_DIR = os.path.dirname(os.path.realpath(__file__))
CWD = os.getcwd()
# Simple helper data class that's a little nicer to use than a 2-tuple.
class HostDeviceMem(object):
def __init__(self, host_mem, device_mem):
self.host = host_mem
self.device = device_mem
def __str__(self):
return "Host:\n" + str(self.host) + "\nDevice:\n" + str(self.device)
def __repr__(self):
return self.__str__()
class YOLOV4(object):
if CWD == THIS_DIR:
_defaults = {
"engine_path": "trt_weights/yolov4_1_608_608.trt",
"classes_path": 'data/coco.names',
"thresh": 0.5,
"nms_thresh": 0.4,
"model_image_size": (608,608) # must follow trt size
}
else:
_defaults = {
"engine_path": "pytorch_YOLOv4/trt_weights/yolov4_1_608_608.trt",
"classes_path": 'pytorch_YOLOv4/data/coco.names',
"thresh": 0.5,
"nms_thresh": 0.4,
"model_image_size": (608,608) # must follow trt size
}
def __init__(self, bgr=True, **kwargs):
self.__dict__.update(self._defaults) # set up default values
# for portability between keras-yolo3/yolo.py and this
if 'model_path' in kwargs:
kwargs['weights'] = kwargs['model_path']
if 'score' in kwargs:
kwargs['thresh'] = kwargs['score']
self.__dict__.update(kwargs) # update with user overrides
self.trt_engine = self.get_engine(self.engine_path)
self.trt_context = self.trt_engine.create_execution_context()
self.max_batch_size = self.trt_engine.max_batch_size
self.class_names = self._get_class()
self.num_classes = len(self.class_names)
self.bgr = bgr
# warm up
self._detect([np.zeros((10,10,3), dtype=np.uint8)])
print('Warmed up!')
def _get_class(self):
with open(self.classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
@staticmethod
def get_engine(engine_path):
# If a serialized engine exists, use it instead of building an engine.
print("Reading engine from file {}".format(engine_path))
with open(engine_path, "rb") as f, trt.Runtime(trt.Logger(min_severity=trt.Logger.ERROR)) as runtime:
return runtime.deserialize_cuda_engine(f.read())
# Allocates all buffers required for an engine, i.e. host/device inputs/outputs.
@staticmethod
def allocate_buffers(engine):
inputs = []
outputs = []
bindings = []
stream = cuda.Stream()
for binding in engine:
size = trt.volume(engine.get_binding_shape(binding))
dtype = trt.nptype(engine.get_binding_dtype(binding))
# Allocate host and device buffers
host_mem = cuda.pagelocked_empty(size, dtype)
device_mem = cuda.mem_alloc(host_mem.nbytes)
# Append the device buffer to device bindings.
bindings.append(int(device_mem))
# Append to the appropriate list.
if engine.binding_is_input(binding):
inputs.append(HostDeviceMem(host_mem, device_mem))
else:
outputs.append(HostDeviceMem(host_mem, device_mem))
return inputs, outputs, bindings, stream
# This function is generalized for multiple inputs/outputs.
# inputs and outputs are expected to be lists of HostDeviceMem objects.
@staticmethod
def trt_inference(context, bindings, inputs, outputs, stream):
# Transfer input data to the GPU.
[cuda.memcpy_htod_async(inp.device, inp.host, stream) for inp in inputs]
# Run inference.
context.execute_async(bindings=bindings, stream_handle=stream.handle)
# Transfer predictions back from the GPU.
[cuda.memcpy_dtoh_async(out.host, out.device, stream) for out in outputs]
# Synchronize the stream
stream.synchronize()
# Return only the host outputs.
return [out.host for out in outputs]
def _detect(self, list_of_imgs):
if self.bgr:
list_of_imgs = [ cv2.cvtColor(img, cv2.COLOR_BGR2RGB) for img in list_of_imgs ]
resized = [np.array(cv2.resize(img, self.model_image_size)) for img in list_of_imgs]
images = | np.stack(resized, axis=0) | numpy.stack |
import warnings
import numpy as np
from simtk import unit
from tqdm import tqdm
from benchmark import simulation_parameters
from benchmark.integrators import LangevinSplittingIntegrator
from benchmark.testsystems import NonequilibriumSimulator
from benchmark.testsystems import water_cluster_rigid, alanine_constrained
from benchmark.testsystems.bookkeepers import get_state_as_mdtraj
from multiprocessing import Pool
n_processes = 32
# experiment variables
testsystems = {
"alanine_constrained": alanine_constrained,
"water_cluster_rigid": water_cluster_rigid,
}
splittings = {"OVRVO": "O V R V O",
"ORVRO": "O R V R O",
"RVOVR": "R V O V R",
"VRORV": "V R O R V",
}
marginals = ["configuration", "full"]
dt_range = np.array([0.1] + list(np.arange(0.5, 8.001, 0.5))) * unit.femtosecond
# constant parameters
collision_rate = 1.0 / unit.picoseconds
temperature = simulation_parameters['temperature']
def n_steps_(dt, n_collisions=1, max_steps=1000):
"""Heuristic for how many steps are needed to reach steady state:
should run at least long enough to have n_collisions full "collisions"
with the bath.
This corresponds to more discrete steps when dt is small, and fewer discrete steps
when dt is large.
Examples:
n_steps_(dt=1fs) = 1000
n_steps_(dt=2fs) = 500
n_steps_(dt=4fs) = 250
n_steps_(dt=8fs) = 125
"""
return min(max_steps, int((n_collisions / collision_rate) / dt))
# adaptive inner-loop params
inner_loop_initial_size = 50
inner_loop_batch_size = 1
inner_loop_stdev_threshold = 0.01
inner_loop_max_samples = 50000
# adaptive outer-loop params
outer_loop_initial_size = 50
outer_loop_batch_size = 100
outer_loop_stdev_threshold = inner_loop_stdev_threshold
outer_loop_max_samples = 1000
def stdev_log_rho_pi(w):
"""Approximate the standard deviation of the estimate of log < e^{-w} >_{x; \Lambda}
Parameters
----------
w : unitless (kT) numpy array of work samples
Returns
-------
stdev : float
Notes
-----
This will be an underestimate esp. when len(w) is small or stdev_log_rho_pi is large.
"""
assert(type(w) != unit.Quantity) # assert w is unitless
assert(type(w) == np.ndarray) # assert w is a numpy array
# use leading term in taylor expansion: anecdotally, looks like it's in good agreement with
# bootstrapped uncertainty estimates up to ~0.5-0.75, then becomes an increasingly bad underestimate
return np.std(np.exp(-w)) / (np.mean(np.exp(-w)) * np.sqrt(len(w)))
def stdev_kl_div(outer_samples):
"""Approximate the stdev of the estimate of E_rho log_rho_pi"""
# TODO: Propagate uncertainty from the estimates of log_rho_pi
# currently, just use standard error of mean of log_rho_pi_s
log_rho_pi_s = np.array([np.log(np.mean(np.exp(-sample['Ws']))) for sample in outer_samples])
return np.std(log_rho_pi_s) / np.sqrt(len(log_rho_pi_s))
def estimate_from_work_samples(work_samples):
"""Returns an estimate of log(rho(x) / pi(x)) from unitless work_samples initialized at x"""
return np.log(np.mean(np.exp(-np.array(work_samples))))
def inner_sample(noneq_sim, x, v, n_steps, marginal="full"):
if marginal == "full":
pass
elif marginal == "configuration":
v = noneq_sim.sample_v_given_x(x)
else:
raise (Exception("marginal must be `full` or `configuration`"))
return noneq_sim.accumulate_shadow_work(x, v, n_steps)['W_shad']
def collect_inner_samples_naive(x, v, noneq_sim, marginal="full", n_steps=1000, n_inner_samples=100):
"""Collect a fixed number of noneq trajectories starting from x,v"""
Ws = np.zeros(n_inner_samples)
for i in range(n_inner_samples):
Ws[i] = inner_sample(noneq_sim, x, v, n_steps, marginal)
return Ws
def collect_inner_samples_until_threshold(x, v, noneq_sim, marginal="full", initial_size=100, batch_size=5,
n_steps=1000, threshold=0.1, max_samples=1000):
"""Collect up to max_samples trajectories, potentially fewer if stdev of estimated log(rho(x,v) / pi(x,v)) is below threshold."""
Ws = []
# collect initial samples
for _ in range(initial_size):
Ws.append(inner_sample(noneq_sim, x, v, n_steps, marginal))
# keep adding batches until either stdev threshold is reached or budget is reached
while (stdev_log_rho_pi(np.array(Ws)) > threshold) and (len(Ws) <= (max_samples - batch_size)):
for _ in range(batch_size):
Ws.append(inner_sample(noneq_sim, x, v, n_steps, marginal))
# warn user if stdev threshold was not met
if (stdev_log_rho_pi(np.array(Ws)) > threshold):
message = "stdev_log_rho_pi(Ws) > threshold\n({:.3f} > {:.3f})".format(stdev_log_rho_pi(np.array(Ws)), threshold)
warnings.warn(message, RuntimeWarning)
return | np.array(Ws) | numpy.array |
import os
import unittest
from unittest.util import safe_repr
import numpy as np
import time
from torch.utils.data import TensorDataset
import torch
from util import randomize_in_place
from config import CNNConfig
from DataHolder import DataHolder
from cnn import train_model_img_classification, CNN
def run_test(testClass):
"""
Function to run all the tests from a class of tests.
:param testClass: class for testing
:type testClass: unittest.TesCase
"""
suite = unittest.TestLoader().loadTestsFromTestCase(testClass)
unittest.TextTestRunner(verbosity=2).run(suite)
class TestEP4(unittest.TestCase):
"""
Class that test the functions from basic_functions module
"""
@classmethod
def setUpClass(cls):
raw_X = | np.load("self_driving_pi_car_data/train_data.npy") | numpy.load |
from collections import OrderedDict
from scipy.spatial.transform import Rotation as R
from scipy.spatial.transform import Slerp
import numpy as np
import json
import multiprocessing as mp
from tqdm import tqdm
def pretty_print(ob):
print(json.dumps(ob, indent=4))
def euler_to_rot(angles):
# Euler ZYX to Rot
# Note that towr has (x, y, z) order
x = angles[0]
y = angles[1]
z = angles[2]
ret = np.array([
np.cos(y) * np.cos(z),
np.cos(z) * np.sin(x) * np.sin(y) - np.cos(x) * np.sin(z),
np.sin(x) * np.sin(z) + np.cos(x) * np.cos(z) * np.sin(y),
np.cos(y) * | np.sin(z) | numpy.sin |
import numpy
from matplotlib import pyplot
from surrogates.kernels import MCMCSimulation
from surrogates.kernels.samplers.hmc import Hamiltonian
from surrogates.models.simple import UnconditionedModel
from surrogates.utils.distributions import Normal
from surrogates.utils.file import change_directory
from surrogates.utils.plotting import plot_corner, plot_log_p, plot_trace
def main():
std_devs = {"a": 0.05, "b": 50.0, "c": 5000.0}
priors = {
"a": Normal(numpy.array([0.0]), numpy.array([std_devs["a"]])),
"b": Normal(numpy.array([100.0]), numpy.array([std_devs["b"]])),
"c": Normal(numpy.array([0.0]), numpy.array([std_devs["c"]])),
}
model = UnconditionedModel(priors)
# Construct and run the simulation object.
initial_parameters = {
"a": numpy.array([0.0]),
"b": | numpy.array([0.0]) | numpy.array |
from KASD.initializers import initializers, serialize as _serialize_initializer, get as _get_initializer
from KASD.regularizers import regularizers, serialize as _serialize_regularizer, get as _get_regularizer
from KASD.constraints import constraints, serialize as _serialize_constraint, get as _get_constraint
from KASD.activations import activations
from ..initializers import randomize as _randomize_initializer
from ..regularizers import randomize as _randomize_regularizer
from ..constraints import randomize as _randomize_constraint
from ..utils.math import factors
from ..utils.rand_funcs import*
from .. import Process, run
from collections import deque
from copy import deepcopy
import numpy as np
data_formats = ['channels_last', 'channels_first']
paddings = ['valid', 'causal', 'same']
interpolations = ['nearest', 'bilinear']
implementations = [1, 2]
merge_modes = ['sum', 'mul', 'concat', 'ave']
def regularizer_(reg):
if reg is None: return None
reg = _get_regularizer(reg)
reg = _serialize_regularizer(reg)
_randomize_regularizer(reg)
return reg
def initializer_(init):
if init is None: return None
init = _get_initializer(init)
init = _serialize_initializer(init)
_randomize_initializer(init)
return init
def constraint_(const, input_shape):
if const is None: return None
const = _get_constraint(const)
const = _serialize_constraint(const)
_randomize_constraint(const, input_shape)
return const
default_ranges = {
'Dense/units': [1, 128],
'Dropout/seed': [1, 1024],
'RepeatVector/n': [1, 64],
'ActivityRegularization/l1': [-1.0, 1.0],
'ActivityRegularization/l2': [-1.0, 1.0],
'SpatialDropout1D/seed': [1, 1024],
'SpatialDropout2D/seed': [1, 1024],
'SpatialDropout3D/seed': [1, 1024],
'Conv1D/filters': [1, 128],
'Conv2D/filters': [1, 128],
'SeparableConv1D/filters': [1, 128],
'SeparableConv1D/depth_multiplier': [1, 32],
'SeparableConv2D/filters': [1, 128],
'SeparableConv2D/depth_multiplier': [1, 32],
'DepthwiseConv2D/filters': [1, 128],
'DepthwiseConv2D/depth_multiplier': [1, 32],
'Conv2DTranspose/filters': [1, 128],
'Conv3D/filters': [1, 128],
'Conv3DTranspose/filters': [1, 128],
'UpSampling1D/size': [2, 32],
'UpSampling2D/size': ([2, 32], [2, 32]),
'UpSampling3D/size': ([2, 32], [2, 32], [2, 32]),
'ZeroPadding1D/padding': (([0, 32], [0, 32]),),
'ZeroPadding2D/padding': (([0, 32], [0, 32]), ([0, 32], [0, 32])),
'ZeroPadding3D/padding': (([0, 32], [0, 32]), ([0, 32], [0, 32]), ([0, 32], [0, 32])),
'SimpleRNN/units': [1, 128],
'GRU/units': [1, 128],
'LSTM/units': [1, 128],
'SimpleRNNCell/units': [1, 128],
'GRUCell/units': [1, 128],
'LSTMCell/units': [1, 128],
'CuDNNGRU/units': [1, 128],
'CuDNNLSTM/units': [1, 128],
'BatchNormalization/momentum': [-10, 10],
'BatchNormalization/epsilon': [1e-5, 1e-2],
'GaussianNoise/stddev': [1e-3, 10],
'AlphaDropout/seed': [1, 1024],
'LeakyReLU/alpha': [0, 16],
'ELU/alpha': [0, 16],
'ThresholdedReLU/theta': [0, 10],
'ReLU/threshold/max_value': [0, 16],
'ReLU/negative_slope': [0, 16],
'ConvLSTM2D/filters': [1, 128],
'ConvLSTM2DCell/filters': [1, 128]}
###Layer Samples###
def _sample_null(serial, attributes=[], ranges=default_ranges): pass
InputLayer_sample=Add_sample=Subtract_sample=Multiply_sample=Average_sample=Maximum_sample=Minimum_sample=Concatenate_sample=Lambda_sample=_sample_null
def Dot_sample(serial, attributes=[], ranges=default_ranges):
input_, input_shape, output_shape, queue = serial['input'], serial['input_shape'], serial['output_shape'], deque([])
@Process(serial, queue)
def normalize():
return bool(np.random.randint(0, 2))
run(queue, attributes, locals())
def Dense_sample(serial, attributes=[], ranges=default_ranges):
input_, input_shape, output_shape, queue = serial['input'], serial['input_shape'], serial['output_shape'], deque([])
@Process(serial, queue)
def units():
return np.random.randint(ranges['Dense/units'][0], ranges['Dense/units'][1]+1)
@Process(serial, queue)
def activation():
return activations.choice(include=[None])
@Process(serial, queue)
def use_bias():
return bool(np.random.randint(0, 2))
@Process(serial, queue)
def kernel_initializer():
return initializer_(initializers.choice())
@Process(serial, queue)
def bias_initializer():
return initializer_(initializers.choice(exclude=initializers.labels['>=2DMatrix'])) #removes Identity and Orthogonal
@Process(serial, queue)
def kernel_regularizer():
return regularizer_(regularizers.choice(include=[None]))
@Process(serial, queue)
def bias_regularizer():
return regularizer_(regularizers.choice(include=[None]))
@Process(serial, queue)
def activity_regularizer():
return regularizer_(regularizers.choice(include=[None]))
@Process(serial, queue)
def kernel_constraint():
return constraint_(constraints.choice(include=[None]), input_shape)
@Process(serial, queue)
def bias_constraint():
return constraint_(constraints.choice(include=[None]), input_shape)
run(queue, attributes, locals())
def Activation_sample(serial, attributes=[], ranges=default_ranges):
input_, input_shape, output_shape, queue = serial['input'], serial['input_shape'], serial['output_shape'], deque([])
@Process(serial, queue)
def activation():
return activations.choice()
run(queue, attributes, locals())
def Dropout_sample(serial, attributes=[], ranges=default_ranges):
input_, input_shape, output_shape, queue = serial['input'], serial['input_shape'], serial['output_shape'], deque([])
@Process(serial, queue)
def rate():
return np.random.sample()
@Process(serial, queue)
def noise_shape():
return noise_shape_(input_shape)
@Process(serial, queue)
def seed():
return np.random.randint(ranges['Dropout/seed'][0], ranges['Dropout/seed'][1]+1)
run(queue, attributes, locals())
def Flatten_sample(serial, attributes=[], ranges=default_ranges):
input_, input_shape, output_shape, queue = serial['input'], serial['input_shape'], serial['output_shape'], deque([])
@Process(serial, queue)
def data_format():
return np.random.choice(data_formats+[None])
run(queue, attributes, locals())
def Reshape_sample(serial, attributes=[], ranges=default_ranges):
input_, input_shape, output_shape, queue = serial['input'], serial['input_shape'], serial['output_shape'], deque([])
@Process(serial, queue)
def target_shape():
_factors = factors(np.prod(input_shape[1:]), dims=np.array(output_shape[1:]).shape[0])
if not isinstance(_factors, (list, np.ndarray)):
_factors = np.array([[_factors]])
_factors = np.concatenate((_factors, np.flip(_factors, axis=-1)))
return tuple(_factors[np.random.randint(0, _factors.shape[0])].tolist())
run(queue, attributes, locals())
def Permute_sample(serial, attributes=[], ranges=default_ranges):
input_, input_shape, output_shape, queue = serial['input'], serial['input_shape'], serial['output_shape'], deque([])
@Process(serial, queue)
def dims():
return tuple(np.random.permutation(np.arange(np.array(input_shape[1:]).shape[0])+1).tolist())
run(queue, attributes, locals())
def RepeatVector_sample(serial, attributes=[], ranges=default_ranges):
input_, input_shape, output_shape, queue = serial['input'], serial['input_shape'], serial['output_shape'], deque([])
@Process(serial, queue)
def n():
return np.random.randint(ranges['RepeatVector/n'][0], ranges['RepeatVector/n'][1]+1)
run(queue, attributes, locals())
def ActivityRegularization_sample(serial, attributes=[], ranges=default_ranges):
input_, input_shape, output_shape, queue = serial['input'], serial['input_shape'], serial['output_shape'], deque([])
@Process(serial, queue)
def l1():
return np.random.uniform(ranges['ActivityRegularization/l1'][0], ranges['ActivityRegularization/l1'][1])
@Process(serial, queue)
def l2():
return np.random.uniform(ranges['ActivityRegularization/l2'][0], ranges['ActivityRegularization/l2'][1])
run(queue, attributes, locals())
def Masking_sample(serial, attributes=[], ranges=default_ranges):
input_, input_shape, output_shape, queue = serial['input'], serial['input_shape'], serial['output_shape'], deque([])
@Process(serial, queue)
def mask_value():
return np.random.sample()
run(queue, attributes, locals())
def SpatialDropout1D_sample(serial, attributes=[], ranges=default_ranges):
input_, input_shape, output_shape, queue = serial['input'], serial['input_shape'], serial['output_shape'], deque([])
@Process(serial, queue)
def rate():
return np.random.sample()
@Process(serial, queue)
def noise_shape():
return noise_shape_(input_shape)
@Process(serial, queue)
def seed():
return np.random.randint(ranges['SpatialDropout1D/seed'][0], ranges['SpatialDropout1D/seed'][1]+1)
run(queue, attributes, locals())
def SpatialDropout2D_sample(serial, attributes=[], ranges=default_ranges):
input_, input_shape, output_shape, queue = serial['input'], serial['input_shape'], serial['output_shape'], deque([])
@Process(serial, queue)
def rate():
return np.random.sample()
@Process(serial, queue)
def noise_shape():
return noise_shape_(input_shape)
@Process(serial, queue)
def seed():
return np.random.randint(ranges['SpatialDropout2D/seed'][0], ranges['SpatialDropout2D/seed'][1]+1)
@Process(serial, queue)
def data_format():
return np.random.choice(data_formats)
run(queue, attributes, locals())
def SpatialDropout3D_sample(serial, attributes=[], ranges=default_ranges):
input_, input_shape, output_shape, queue = serial['input'], serial['input_shape'], serial['output_shape'], deque([])
@Process(serial, queue)
def rate():
return np.random.sample()
@Process(serial, queue)
def noise_shape():
return noise_shape_(input_shape)
@Process(serial, queue)
def seed():
return np.random.randint(ranges['SpatialDropout3D/seed'][0], ranges['SpatialDropout3D/seed'][1]+1)
@Process(serial, queue)
def data_format():
return np.random.choice(data_formats)
run(queue, attributes, locals())
def Conv1D_sample(serial, attributes=[], ranges=default_ranges):
input_, input_shape, output_shape, queue = serial['input'], serial['input_shape'], serial['output_shape'], deque([])
@Process(serial, queue)
def filters():
return np.random.randint(ranges['Conv1D/filters'][0], ranges['Conv1D/filters'][1]+1)
@Process(serial, queue)
def padding():
return np.random.choice(np.delete(paddings, [1])) #removes 'causal'
@Process(serial, queue)
def data_format():
return np.random.choice(data_formats) if padding() != 'causal' else 'channels_last'
@Process(serial, queue)
def kernel_size():
return kernel_size_(input_shape, data_format(), strides(), dilation_rate=dilation_rate())
@Process(serial, queue)
def strides():
return strides_(input_shape, data_format(), kernel_size(), dilation_rate=dilation_rate(), null=np.random.randint(0, 2))
@Process(serial, queue)
def dilation_rate():
return dilation_rate_(input_shape, data_format(), kernel_size(), strides(), null=np.random.randint(0, 2))
@Process(serial, queue)
def activation():
return activations.choice(include=[None])
@Process(serial, queue)
def use_bias():
return bool(np.random.randint(0, 2))
@Process(serial, queue)
def kernel_initializer():
return initializer_(initializers.choice(exclude=initializers.labels['2DMatrix'])) #removes Identity
@Process(serial, queue)
def bias_initializer():
return initializer_(initializers.choice(exclude=initializers.labels['>=2DMatrix'])) #removes Identity and Orthogonal
@Process(serial, queue)
def kernel_regularizer():
return regularizer_(regularizers.choice(include=[None]))
@Process(serial, queue)
def bias_regularizer():
return regularizer_(regularizers.choice(include=[None]))
@Process(serial, queue)
def activity_regularizer():
return regularizer_(regularizers.choice(include=[None]))
@Process(serial, queue)
def kernel_constraint():
return constraint_(constraints.choice(include=[None]), input_shape)
@Process(serial, queue)
def bias_constraint():
return constraint_(constraints.choice(include=[None]), input_shape)
run(queue, attributes, locals())
def Conv2D_sample(serial, attributes=[], ranges=default_ranges):
input_, input_shape, output_shape, queue = serial['input'], serial['input_shape'], serial['output_shape'], deque([])
@Process(serial, queue)
def filters():
return np.random.randint(ranges['Conv2D/filters'][0], ranges['Conv2D/filters'][1]+1)
@Process(serial, queue)
def padding():
return np.random.choice(np.delete(paddings, [1])) #removes 'causal'
@Process(serial, queue)
def data_format():
return np.random.choice(data_formats)
@Process(serial, queue)
def kernel_size():
return kernel_size_(input_shape, data_format(), strides(), dilation_rate=dilation_rate())
@Process(serial, queue)
def strides():
return strides_(input_shape, data_format(), kernel_size(), dilation_rate=dilation_rate(), null=np.random.randint(0, 2))
@Process(serial, queue)
def dilation_rate():
return dilation_rate_(input_shape, data_format(), kernel_size(), strides(), null=np.random.randint(0, 2))
@Process(serial, queue)
def activation():
return activations.choice(include=[None])
@Process(serial, queue)
def use_bias():
return bool(np.random.randint(0, 2))
@Process(serial, queue)
def kernel_initializer():
return initializer_(initializers.choice(exclude=initializers.labels['2DMatrix'])) #removes Identity
@Process(serial, queue)
def bias_initializer():
return initializer_(initializers.choice(exclude=initializers.labels['>=2DMatrix'])) #removes Identity and Orthogonal
@Process(serial, queue)
def kernel_regularizer():
return regularizer_(regularizers.choice(include=[None]))
@Process(serial, queue)
def bias_regularizer():
return regularizer_(regularizers.choice(include=[None]))
@Process(serial, queue)
def activity_regularizer():
return regularizer_(regularizers.choice(include=[None]))
@Process(serial, queue)
def kernel_constraint():
return constraint_(constraints.choice(include=[None]), input_shape)
@Process(serial, queue)
def bias_constraint():
return constraint_(constraints.choice(include=[None]), input_shape)
run(queue, attributes, locals())
def SeparableConv1D_sample(serial, attributes=[], ranges=default_ranges):
input_, input_shape, output_shape, queue = serial['input'], serial['input_shape'], serial['output_shape'], deque([])
@Process(serial, queue)
def filters():
return np.random.randint(ranges['SeparableConv1D/filters'][0], ranges['SeparableConv1D/filters'][1]+1)
@Process(serial, queue)
def padding():
return np.random.choice(np.delete(paddings, [1])) #removes 'causal'
@Process(serial, queue)
def data_format():
return np.random.choice(data_formats)
@Process(serial, queue)
def kernel_size():
return kernel_size_(input_shape, data_format(), strides(), dilation_rate=dilation_rate())
@Process(serial, queue)
def strides():
return strides_(input_shape, data_format(), kernel_size(), dilation_rate=dilation_rate(), null=np.random.randint(0, 2))
@Process(serial, queue)
def dilation_rate():
return dilation_rate_(input_shape, data_format(), kernel_size(), strides(), null=np.random.randint(0, 2))
@Process(serial, queue)
def activation():
return activations.choice(include=[None])
@Process(serial, queue)
def use_bias():
return bool(np.random.randint(0, 2))
@Process(serial, queue)
def depth_multiplier():
return np.random.randint(ranges['SeparableConv1D/depth_multiplier'][0], ranges['SeparableConv1D/depth_multiplier'][1]+1)
@Process(serial, queue)
def depthwise_initializer():
return initializer_(initializers.choice(exclude=initializers.labels['2DMatrix'])) #removes Identity
@Process(serial, queue)
def pointwise_initializer():
return initializer_(initializers.choice(exclude=initializers.labels['2DMatrix'])) #removes Identity
@Process(serial, queue)
def bias_initializer():
return initializer_(initializers.choice(exclude=initializers.labels['>=2DMatrix'])) #removes Identity and Orthogonal
@Process(serial, queue)
def depthwise_regularizer():
return regularizer_(regularizers.choice(include=[None]))
@Process(serial, queue)
def pointwise_regularizer():
return regularizer_(regularizers.choice(include=[None]))
@Process(serial, queue)
def bias_regularizer():
return regularizer_(regularizers.choice(include=[None]))
@Process(serial, queue)
def activity_regularizer():
return regularizer_(regularizers.choice(include=[None]))
@Process(serial, queue)
def depthwise_constraint():
return constraint_(constraints.choice(include=[None]), input_shape)
@Process(serial, queue)
def pointwise_constraint():
return constraint_(constraints.choice(include=[None]), input_shape)
@Process(serial, queue)
def bias_constraint():
return constraint_(constraints.choice(include=[None]), input_shape)
run(queue, attributes, locals())
def SeparableConv2D_sample(serial, attributes=[], ranges=default_ranges):
input_, input_shape, output_shape, queue = serial['input'], serial['input_shape'], serial['output_shape'], deque([])
@Process(serial, queue)
def filters():
return np.random.randint(ranges['SeparableConv2D/filters'][0], ranges['SeparableConv2D/filters'][1]+1)
@Process(serial, queue)
def padding():
return np.random.choice(np.delete(paddings, [1])) #removes 'causal'
@Process(serial, queue)
def data_format():
return np.random.choice(data_formats)
@Process(serial, queue)
def kernel_size():
return kernel_size_(input_shape, data_format(), strides(), dilation_rate=dilation_rate())
@Process(serial, queue)
def strides():
return strides_(input_shape, data_format(), kernel_size(), dilation_rate=dilation_rate(), null=np.random.randint(0, 2))
@Process(serial, queue)
def dilation_rate():
return dilation_rate_(input_shape, data_format(), kernel_size(), strides(), null=np.random.randint(0, 2))
@Process(serial, queue)
def activation():
return activations.choice(include=[None])
@Process(serial, queue)
def use_bias():
return bool(np.random.randint(0, 2))
@Process(serial, queue)
def depth_multiplier():
return np.random.randint(ranges['SeparableConv2D/depth_multiplier'][0], ranges['SeparableConv2D/depth_multiplier'][1]+1)
@Process(serial, queue)
def depthwise_initializer():
return initializer_(initializers.choice(exclude=initializers.labels['2DMatrix'])) #removes Identity
@Process(serial, queue)
def pointwise_initializer():
return initializer_(initializers.choice(exclude=initializers.labels['2DMatrix'])) #removes Identity
@Process(serial, queue)
def bias_initializer():
return initializer_(initializers.choice(exclude=initializers.labels['>=2DMatrix'])) #removes Identity and Orthogonal
@Process(serial, queue)
def depthwise_regularizer():
return regularizer_(regularizers.choice(include=[None]))
@Process(serial, queue)
def pointwise_regularizer():
return regularizer_(regularizers.choice(include=[None]))
@Process(serial, queue)
def bias_regularizer():
return regularizer_(regularizers.choice(include=[None]))
@Process(serial, queue)
def activity_regularizer():
return regularizer_(regularizers.choice(include=[None]))
@Process(serial, queue)
def depthwise_constraint():
return constraint_(constraints.choice(include=[None]), input_shape)
@Process(serial, queue)
def pointwise_constraint():
return constraint_(constraints.choice(include=[None]), input_shape)
@Process(serial, queue)
def bias_constraint():
return constraint_(constraints.choice(include=[None]), input_shape)
run(queue, attributes, locals())
def DepthwiseConv2D_sample(serial, attributes=[], ranges=default_ranges):
input_, input_shape, output_shape, queue = serial['input'], serial['input_shape'], serial['output_shape'], deque([])
@Process(serial, queue)
def padding():
return np.random.choice(np.delete(paddings, [1])) #removes 'causal'
@Process(serial, queue)
def data_format():
return np.random.choice(data_formats)
@Process(serial, queue)
def kernel_size():
return kernel_size_(input_shape, data_format(), strides(), dilation_rate=dilation_rate())
@Process(serial, queue)
def strides():
return strides_(input_shape, data_format(), kernel_size(), dilation_rate=dilation_rate(), null=np.random.randint(0, 2))
@Process(serial, queue)
def dilation_rate():
return dilation_rate_(input_shape, data_format(), kernel_size(), strides(), null=np.random.randint(0, 2))
@Process(serial, queue)
def activation():
return activations.choice(include=[None])
@Process(serial, queue)
def use_bias():
return bool(np.random.randint(0, 2))
@Process(serial, queue)
def depth_multiplier():
return np.random.randint(ranges['DepthwiseConv2D/depth_multiplier'][0], ranges['DepthwiseConv2D/depth_multiplier'][1]+1)
@Process(serial, queue)
def depthwise_initializer():
return initializer_(initializers.choice(exclude=initializers.labels['2DMatrix'])) #removes Identity
@Process(serial, queue)
def bias_initializer():
return initializer_(initializers.choice(exclude=initializers.labels['>=2DMatrix'])) #removes Identity and Orthogonal
@Process(serial, queue)
def depthwise_regularizer():
return regularizer_(regularizers.choice(include=[None]))
@Process(serial, queue)
def bias_regularizer():
return regularizer_(regularizers.choice(include=[None]))
@Process(serial, queue)
def activity_regularizer():
return regularizer_(regularizers.choice(include=[None]))
@Process(serial, queue)
def depthwise_constraint():
return constraint_(constraints.choice(include=[None]), input_shape)
@Process(serial, queue)
def bias_constraint():
return constraint_(constraints.choice(include=[None]), input_shape)
run(queue, attributes, locals())
def Conv2DTranspose_sample(serial, attributes=[], ranges=default_ranges):
input_, input_shape, output_shape, queue = serial['input'], serial['input_shape'], serial['output_shape'], deque([])
@Process(serial, queue)
def filters():
return np.random.randint(ranges['Conv2DTranspose/filters'][0], ranges['Conv2DTranspose/filters'][1]+1)
@Process(serial, queue)
def padding():
return np.random.choice(np.delete(paddings, [1])) #removes 'causal'
@Process(serial, queue)
def data_format():
return np.random.choice(data_formats)
@Process(serial, queue)
def kernel_size():
return kernel_size_(input_shape, data_format(), strides(), dilation_rate=dilation_rate())
@Process(serial, queue)
def strides():
return strides_(input_shape, data_format(), kernel_size(), dilation_rate=dilation_rate(), null=np.random.randint(0, 2))
@Process(serial, queue)
def dilation_rate():
return (min(dilation_rate_(input_shape, data_format(), kernel_size(), strides(), null=np.random.randint(0, 2))),)*2 #assert dilation_rate[0] == dilation_rate[1]
@Process(serial, queue)
def output_padding():
return output_padding_(strides())
@Process(serial, queue)
def activation():
return activations.choice(include=[None])
@Process(serial, queue)
def use_bias():
return bool(np.random.randint(0, 2))
@Process(serial, queue)
def kernel_initializer():
return initializer_(initializers.choice(exclude=initializers.labels['2DMatrix'])) #removes Identity
@Process(serial, queue)
def bias_initializer():
return initializer_(initializers.choice(exclude=initializers.labels['>=2DMatrix'])) #removes Identity and Orthogonal
@Process(serial, queue)
def kernel_regularizer():
return regularizer_(regularizers.choice(include=[None]))
@Process(serial, queue)
def bias_regularizer():
return regularizer_(regularizers.choice(include=[None]))
@Process(serial, queue)
def activity_regularizer():
return regularizer_(regularizers.choice(include=[None]))
@Process(serial, queue)
def kernel_constraint():
return constraint_(constraints.choice(include=[None]), input_shape)
@Process(serial, queue)
def bias_constraint():
return constraint_(constraints.choice(include=[None]), input_shape)
run(queue, attributes, locals())
def Conv3D_sample(serial, attributes=[], ranges=default_ranges):
input_, input_shape, output_shape, queue = serial['input'], serial['input_shape'], serial['output_shape'], deque([])
@Process(serial, queue)
def filters():
return np.random.randint(ranges['Conv3D/filters'][0], ranges['Conv3D/filters'][1]+1)
@Process(serial, queue)
def padding():
return np.random.choice(np.delete(paddings, [1])) #removes 'causal'
@Process(serial, queue)
def data_format():
return np.random.choice(data_formats)
@Process(serial, queue)
def kernel_size():
return kernel_size_(input_shape, data_format(), strides(), dilation_rate=dilation_rate())
@Process(serial, queue)
def strides():
return strides_(input_shape, data_format(), kernel_size(), dilation_rate=dilation_rate(), null=np.random.randint(0, 2))
@Process(serial, queue)
def dilation_rate():
return dilation_rate_(input_shape, data_format(), kernel_size(), strides(), null=np.random.randint(0, 2))
@Process(serial, queue)
def activation():
return activations.choice(include=[None])
@Process(serial, queue)
def use_bias():
return bool(np.random.randint(0, 2))
@Process(serial, queue)
def kernel_initializer():
return initializer_(initializers.choice(exclude=initializers.labels['2DMatrix'])) #removes Identity
@Process(serial, queue)
def bias_initializer():
return initializer_(initializers.choice(exclude=initializers.labels['>=2DMatrix'])) #removes Identity and Orthogonal
@Process(serial, queue)
def kernel_regularizer():
return regularizer_(regularizers.choice(include=[None]))
@Process(serial, queue)
def bias_regularizer():
return regularizer_(regularizers.choice(include=[None]))
@Process(serial, queue)
def activity_regularizer():
return regularizer_(regularizers.choice(include=[None]))
@Process(serial, queue)
def kernel_constraint():
return constraint_(constraints.choice(include=[None]), input_shape)
@Process(serial, queue)
def bias_constraint():
return constraint_(constraints.choice(include=[None]), input_shape)
run(queue, attributes, locals())
def Conv3DTranspose_sample(serial, attributes=[], ranges=default_ranges):
input_, input_shape, output_shape, queue = serial['input'], serial['input_shape'], serial['output_shape'], deque([])
@Process(serial, queue)
def filters():
return np.random.randint(ranges['Conv3DTranspose/filters'][0], ranges['Conv3DTranspose/filters'][1]+1)
@Process(serial, queue)
def padding():
return np.random.choice(np.delete(paddings, [1])) #removes 'causal'
@Process(serial, queue)
def data_format():
return np.random.choice(data_formats)
@Process(serial, queue)
def kernel_size():
return kernel_size_(input_shape, data_format(), strides())
@Process(serial, queue)
def strides():
return strides_(input_shape, data_format(), kernel_size(), null=np.random.randint(0, 2))
@Process(serial, queue)
def output_padding():
return output_padding_(strides())
@Process(serial, queue)
def activation():
return activations.choice(include=[None])
@Process(serial, queue)
def use_bias():
return bool(np.random.randint(0, 2))
@Process(serial, queue)
def kernel_initializer():
return initializer_(initializers.choice(exclude=initializers.labels['2DMatrix'])) #removes Identity
@Process(serial, queue)
def bias_initializer():
return initializer_(initializers.choice(exclude=initializers.labels['>=2DMatrix'])) #removes Identity and Orthogonal
@Process(serial, queue)
def kernel_regularizer():
return regularizer_(regularizers.choice(include=[None]))
@Process(serial, queue)
def bias_regularizer():
return regularizer_(regularizers.choice(include=[None]))
@Process(serial, queue)
def activity_regularizer():
return regularizer_(regularizers.choice(include=[None]))
@Process(serial, queue)
def kernel_constraint():
return constraint_(constraints.choice(include=[None]), input_shape)
@Process(serial, queue)
def bias_constraint():
return constraint_(constraints.choice(include=[None]), input_shape)
run(queue, attributes, locals())
def Cropping1D_sample(serial, attributes=[], ranges=default_ranges):
input_, input_shape, output_shape, queue = serial['input'], serial['input_shape'], serial['output_shape'], deque([])
@Process(serial, queue)
def cropping():
return cropping_(input_shape, 'channels_last')[0]
run(queue, attributes, locals())
def Cropping2D_sample(serial, attributes=[], ranges=default_ranges):
input_, input_shape, output_shape, queue = serial['input'], serial['input_shape'], serial['output_shape'], deque([])
@Process(serial, queue)
def data_format():
return np.random.choice(data_formats)
@Process(serial, queue)
def cropping():
return cropping_(input_shape, data_format())
run(queue, attributes, locals())
def Cropping3D_sample(serial, attributes=[], ranges=default_ranges):
input_, input_shape, output_shape, queue = serial['input'], serial['input_shape'], serial['output_shape'], deque([])
@Process(serial, queue)
def data_format():
return np.random.choice(data_formats)
@Process(serial, queue)
def cropping():
return cropping_(input_shape, data_format())
run(queue, attributes, locals())
def UpSampling1D_sample(serial, attributes=[], ranges=default_ranges):
input_, input_shape, output_shape, queue = serial['input'], serial['input_shape'], serial['output_shape'], deque([])
@Process(serial, queue)
def size():
return np.random.randint(ranges['UpSampling1D/size'][0], ranges['UpSampling1D/size'][1]+1)
run(queue, attributes, locals())
def UpSampling2D_sample(serial, attributes=[], ranges=default_ranges):
input_, input_shape, output_shape, queue = serial['input'], serial['input_shape'], serial['output_shape'], deque([])
@Process(serial, queue)
def size():
return tuple([np.random.randint(size[0], size[1]+1) for size in ranges['UpSampling2D/size']])
@Process(serial, queue)
def data_format():
return np.random.choice(data_formats)
@Process(serial, queue)
def interpolation():
return np.random.choice(interpolations)
run(queue, attributes, locals())
def UpSampling3D_sample(serial, attributes=[], ranges=default_ranges):
input_, input_shape, output_shape, queue = serial['input'], serial['input_shape'], serial['output_shape'], deque([])
@Process(serial, queue)
def size():
return tuple([np.random.randint(size[0], size[1]+1) for size in ranges['UpSampling3D/size']])
@Process(serial, queue)
def data_format():
return np.random.choice(data_formats)
run(queue, attributes, locals())
def ZeroPadding1D_sample(serial, attributes=[], ranges=default_ranges):
input_, input_shape, output_shape, queue = serial['input'], serial['input_shape'], serial['output_shape'], deque([])
@Process(serial, queue)
def padding():
return tuple([(np.random.randint(padding[0][0], padding[0][1]+1), np.random.randint(padding[1][0], padding[1][1]+1)) for padding in ranges['ZeroPadding1D/padding']])[0]
run(queue, attributes, locals())
def ZeroPadding2D_sample(serial, attributes=[], ranges=default_ranges):
input_, input_shape, output_shape, queue = serial['input'], serial['input_shape'], serial['output_shape'], deque([])
@Process(serial, queue)
def padding():
return tuple([(np.random.randint(padding[0][0], padding[0][1]+1), np.random.randint(padding[1][0], padding[1][1]+1)) for padding in ranges['ZeroPadding2D/padding']])
@Process(serial, queue)
def data_format():
return np.random.choice(data_formats)
run(queue, attributes, locals())
def ZeroPadding3D_sample(serial, attributes=[], ranges=default_ranges):
input_, input_shape, output_shape, queue = serial['input'], serial['input_shape'], serial['output_shape'], deque([])
@Process(serial, queue)
def padding():
return tuple([(np.random.randint(padding[0][0], padding[0][1]+1), np.random.randint(padding[1][0], padding[1][1]+1)) for padding in ranges['ZeroPadding3D/padding']])
@Process(serial, queue)
def data_format():
return np.random.choice(data_formats)
run(queue, attributes, locals())
def MaxPooling1D_sample(serial, attributes=[], ranges=default_ranges):
input_, input_shape, output_shape, queue = serial['input'], serial['input_shape'], serial['output_shape'], deque([])
@Process(serial, queue)
def data_format():
return np.random.choice(data_formats)
@Process(serial, queue)
def pool_size():
return pool_size_(input_shape, data_format(), strides())
@Process(serial, queue)
def strides():
return strides_pooling_(input_shape, data_format(), pool_size())
@Process(serial, queue)
def padding():
return np.random.choice(np.delete(paddings, [1])) #removes 'causal'
run(queue, attributes, locals())
def MaxPooling2D_sample(serial, attributes=[], ranges=default_ranges):
input_, input_shape, output_shape, queue = serial['input'], serial['input_shape'], serial['output_shape'], deque([])
@Process(serial, queue)
def data_format():
return np.random.choice(data_formats)
@Process(serial, queue)
def pool_size():
return pool_size_(input_shape, data_format(), strides())
@Process(serial, queue)
def strides():
return strides_pooling_(input_shape, data_format(), pool_size())
@Process(serial, queue)
def padding():
return np.random.choice(np.delete(paddings, [1])) #removes 'causal'
run(queue, attributes, locals())
def MaxPooling3D_sample(serial, attributes=[], ranges=default_ranges):
input_, input_shape, output_shape, queue = serial['input'], serial['input_shape'], serial['output_shape'], deque([])
@Process(serial, queue)
def data_format():
return np.random.choice(data_formats)
@Process(serial, queue)
def pool_size():
return pool_size_(input_shape, data_format(), strides())
@Process(serial, queue)
def strides():
return strides_pooling_(input_shape, data_format(), pool_size())
@Process(serial, queue)
def padding():
return np.random.choice(np.delete(paddings, [1])) #removes 'causal'
run(queue, attributes, locals())
def AveragePooling1D_sample(serial, attributes=[], ranges=default_ranges):
input_, input_shape, output_shape, queue = serial['input'], serial['input_shape'], serial['output_shape'], deque([])
@Process(serial, queue)
def data_format():
return np.random.choice(data_formats)
@Process(serial, queue)
def pool_size():
return pool_size_(input_shape, data_format(), strides())
@Process(serial, queue)
def strides():
return strides_pooling_(input_shape, data_format(), pool_size())
@Process(serial, queue)
def padding():
return np.random.choice(np.delete(paddings, [1])) #removes 'causal'
run(queue, attributes, locals())
def AveragePooling2D_sample(serial, attributes=[], ranges=default_ranges):
input_, input_shape, output_shape, queue = serial['input'], serial['input_shape'], serial['output_shape'], deque([])
@Process(serial, queue)
def data_format():
return np.random.choice(data_formats)
@Process(serial, queue)
def pool_size():
return pool_size_(input_shape, data_format(), strides())
@Process(serial, queue)
def strides():
return strides_pooling_(input_shape, data_format(), pool_size())
@Process(serial, queue)
def padding():
return np.random.choice(np.delete(paddings, [1])) #removes 'causal'
run(queue, attributes, locals())
def AveragePooling3D_sample(serial, attributes=[], ranges=default_ranges):
input_, input_shape, output_shape, queue = serial['input'], serial['input_shape'], serial['output_shape'], deque([])
@Process(serial, queue)
def data_format():
return np.random.choice(data_formats)
@Process(serial, queue)
def pool_size():
return pool_size_(input_shape, data_format(), strides())
@Process(serial, queue)
def strides():
return strides_pooling_(input_shape, data_format(), pool_size())
@Process(serial, queue)
def padding():
return np.random.choice(np.delete(paddings, [1])) #removes 'causal'
run(queue, attributes, locals())
def GlobalMaxPooling1D_sample(serial, attributes=[], ranges=default_ranges):
input_, input_shape, output_shape, queue = serial['input'], serial['input_shape'], serial['output_shape'], deque([])
@Process(serial, queue)
def data_format():
return np.random.choice(data_formats)
run(queue, attributes, locals())
def GlobalMaxPooling2D_sample(serial, attributes=[], ranges=default_ranges):
input_, input_shape, output_shape, queue = serial['input'], serial['input_shape'], serial['output_shape'], deque([])
@Process(serial, queue)
def data_format():
return np.random.choice(data_formats)
run(queue, attributes, locals())
def GlobalMaxPooling3D_sample(serial, attributes=[], ranges=default_ranges):
input_, input_shape, output_shape, queue = serial['input'], serial['input_shape'], serial['output_shape'], deque([])
@Process(serial, queue)
def data_format():
return np.random.choice(data_formats)
run(queue, attributes, locals())
def GlobalAveragePooling2D_sample(serial, attributes=[], ranges=default_ranges):
input_, input_shape, output_shape, queue = serial['input'], serial['input_shape'], serial['output_shape'], deque([])
@Process(serial, queue)
def data_format():
return np.random.choice(data_formats)
run(queue, attributes, locals())
def GlobalAveragePooling1D_sample(serial, attributes=[], ranges=default_ranges):
input_, input_shape, output_shape, queue = serial['input'], serial['input_shape'], serial['output_shape'], deque([])
@Process(serial, queue)
def data_format():
return np.random.choice(data_formats)
run(queue, attributes, locals())
def GlobalAveragePooling3D_sample(serial, attributes=[], ranges=default_ranges):
input_, input_shape, output_shape, queue = serial['input'], serial['input_shape'], serial['output_shape'], deque([])
@Process(serial, queue)
def data_format():
return np.random.choice(data_formats)
run(queue, attributes, locals())
def LocallyConnected1D_sample(serial, attributes=[], ranges=default_ranges):
input_, input_shape, output_shape, queue = serial['input'], serial['input_shape'], serial['output_shape'], deque([])
@Process(serial, queue)
def filters():
return np.random.randint(ranges['Conv1D/filters'][0], ranges['Conv1D/filters'][1]+1)
@Process(serial, queue)
def padding():
return 'valid'
@Process(serial, queue)
def data_format():
return np.random.choice(data_formats)
@Process(serial, queue)
def kernel_size():
return kernel_size_(input_shape, data_format(), strides())
@Process(serial, queue)
def strides():
return strides_(input_shape, data_format(), kernel_size(), null=np.random.randint(0, 2))
@Process(serial, queue)
def activation():
return activations.choice(include=[None])
@Process(serial, queue)
def use_bias():
return bool(np.random.randint(0, 2))
@Process(serial, queue)
def kernel_initializer():
return initializer_(initializers.choice(exclude=initializers.labels['2DMatrix'])) #removes Identity
@Process(serial, queue)
def bias_initializer():
return initializer_(initializers.choice())
@Process(serial, queue)
def kernel_regularizer():
return regularizer_(regularizers.choice(include=[None]))
@Process(serial, queue)
def bias_regularizer():
return regularizer_(regularizers.choice(include=[None]))
@Process(serial, queue)
def activity_regularizer():
return regularizer_(regularizers.choice(include=[None]))
@Process(serial, queue)
def kernel_constraint():
return constraint_(constraints.choice(include=[None]), input_shape)
@Process(serial, queue)
def bias_constraint():
return constraint_(constraints.choice(include=[None]), input_shape)
run(queue, attributes, locals())
def LocallyConnected2D_sample(serial, attributes=[], ranges=default_ranges):
input_, input_shape, output_shape, queue = serial['input'], serial['input_shape'], serial['output_shape'], deque([])
@Process(serial, queue)
def filters():
return np.random.randint(ranges['Conv1D/filters'][0], ranges['Conv1D/filters'][1]+1)
@Process(serial, queue)
def padding():
return 'valid'
@Process(serial, queue)
def data_format():
return np.random.choice(data_formats)
@Process(serial, queue)
def kernel_size():
return kernel_size_(input_shape, data_format(), strides())
@Process(serial, queue)
def strides():
return strides_(input_shape, data_format(), kernel_size(), null=np.random.randint(0, 2))
@Process(serial, queue)
def activation():
return activations.choice(include=[None])
@Process(serial, queue)
def use_bias():
return bool(np.random.randint(0, 2))
@Process(serial, queue)
def kernel_initializer():
return initializer_(initializers.choice(exclude=initializers.labels['2DMatrix'])) #removes Identity
@Process(serial, queue)
def bias_initializer():
return initializer_(initializers.choice(exclude=initializers.labels['2DMatrix'])) #removes Identity
@Process(serial, queue)
def kernel_regularizer():
return regularizer_(regularizers.choice(include=[None]))
@Process(serial, queue)
def bias_regularizer():
return regularizer_(regularizers.choice(include=[None]))
@Process(serial, queue)
def activity_regularizer():
return regularizer_(regularizers.choice(include=[None]))
@Process(serial, queue)
def kernel_constraint():
return constraint_(constraints.choice(include=[None]), input_shape)
@Process(serial, queue)
def bias_constraint():
return constraint_(constraints.choice(include=[None]), input_shape)
run(queue, attributes, locals())
def RNN_sample(serial, attributes=[], ranges=default_ranges):
input_, input_shape, output_shape, queue = serial['input'], serial['input_shape'], serial['output_shape'], deque([])
@Process(serial, queue)
def cell():
def rand_cell(cell):
cell['input'] = input_
cell['input_shape'] = input_shape
cell['output_shape'] = output_shape
sample_functions[cell['class_name']](cell, attributes=attributes, ranges=ranges)
del cell['input'], cell['input_shape'], cell['output_shape']
cells = deepcopy(serial['config']['cell'])
if isinstance(cell, list):
[rand_cell(cells) for cell in cells]
else:
rand_cell(cells)
return cells
@Process(serial, queue)
def return_sequences():
return bool(np.random.randint(0, 2))
@Process(serial, queue)
def return_state():
return bool(np.random.randint(0, 2))
@Process(serial, queue)
def go_backwards():
return bool(np.random.randint(0, 2))
@Process(serial, queue)
def stateful():
return bool(np.random.randint(0, 2))
@Process(serial, queue)
def unroll():
return bool(np.random.randint(0, 2))
run(queue, attributes, locals())
def SimpleRNN_sample(serial, attributes=[], ranges=default_ranges):
input_, input_shape, output_shape, queue = serial['input'], serial['input_shape'], serial['output_shape'], deque([])
@Process(serial, queue)
def units():
return np.random.randint(ranges['SimpleRNN/units'][0], ranges['SimpleRNN/units'][1]+1)
@Process(serial, queue)
def activation():
return activations.choice()
@Process(serial, queue)
def use_bias():
return bool(np.random.randint(0, 2))
@Process(serial, queue)
def kernel_initializer():
return initializer_(initializers.choice())
@Process(serial, queue)
def recurrent_initializer():
return initializer_(initializers.choice())
@Process(serial, queue)
def bias_initializer():
return initializer_(initializers.choice(exclude=initializers.labels['>=2DMatrix'])) #removes Identity and Orthogonal
@Process(serial, queue)
def kernel_regularizer():
return regularizer_(regularizers.choice(include=[None]))
@Process(serial, queue)
def recurrent_regularizer():
return regularizer_(regularizers.choice(include=[None]))
@Process(serial, queue)
def bias_regularizer():
return regularizer_(regularizers.choice(include=[None]))
@Process(serial, queue)
def activity_regularizer():
return regularizer_(regularizers.choice(include=[None]))
@Process(serial, queue)
def kernel_constraint():
return constraint_(constraints.choice(include=[None]), input_shape)
@Process(serial, queue)
def recurrent_constraint():
return constraint_(constraints.choice(include=[None]), input_shape)
@Process(serial, queue)
def bias_constraint():
return constraint_(constraints.choice(include=[None]), input_shape)
@Process(serial, queue)
def dropout():
return np.random.sample()
@Process(serial, queue)
def recurrent_dropout():
return | np.random.sample() | numpy.random.sample |
import os
import multiprocessing
from . import data_stream
from . import layers
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from sklearn.metrics import confusion_matrix
import umap
from sklearn.manifold import TSNE
from tensorflow import keras
from tensorflow.keras import Input, losses
from tensorflow.keras.layers import Flatten, Dense, Concatenate
class voxel_model() :
def __init__(self, pPathToData, pCSVTrainFile = None, pCSVTestFile = None, pBatchSize = 32, pName = '', pIsUniformNorm = True) :
self.mBatchSize = pBatchSize
self.mModel = None
self.mName = pName
self.mWeightFile = None
self.mVoxDim = (32, 32, 32)
self.mPathToData = pPathToData
self.isUniformNorm = pIsUniformNorm
self.mRootFolder = os.path.join(os.getcwd(), 'python', 'metadata')
if not os.path.exists(self.mRootFolder) :
os.mkdir(self.mRootFolder)
self.mRootFolder = os.path.join(self.mRootFolder, pName + '_voxel_' + str(self.mVoxDim[0]), '')
if not os.path.exists(self.mRootFolder) :
os.mkdir(self.mRootFolder)
if pCSVTrainFile is not None :
self.mTrain_csv = pd.read_csv(pCSVTrainFile)
self.mTest_csv = pd.read_csv(pCSVTestFile)
X_train, y_train, labels = self.__csvToDataset(self.mTrain_csv)
X_test, y_test, labels = self.__csvToDataset(self.mTest_csv)
self.mLabels = labels
self.mTrainGen = data_stream.dataStream_VX(X_train, y_train, self.mVoxDim, self.mBatchSize, self.mPathToData, self.isUniformNorm)
self.mValidGen = data_stream.dataStream_VX(X_test, y_test, self.mVoxDim, self.mBatchSize, self.mPathToData, self.isUniformNorm)
self.mNumClasses = labels.shape[0]
def build_network(self) :
inputGrid = Input(shape=self.mVoxDim)
grid = tf.expand_dims(inputGrid, -1)
if self.isUniformNorm :
grid = layers.voxel_encoder(
name = '32_to_8',
filters = 2,
kernel_size = 8,
stride = 4,
padding = 'same',
pool_size = 8,
pool_stride = 4,
pool_padding = 'same')(grid)
else :
grid = layers.voxel_encoder(
name = '32_to_8',
filters = 3,
kernel_size = 8,
stride = 4,
padding = 'same',
pool_size = 8,
pool_stride = 4,
pool_padding = 'same')(grid)
features = Flatten(name='features_grid')(grid)
if self.isUniformNorm :
inputAR = Input(shape=(8,3))
features_ar = layers.point_encoder(name = 'point_enc')(inputAR)
features_ar = Flatten(name='features_ar')(features_ar)
features = Concatenate(name='features_global')([features, features_ar])
inputs = [inputGrid, inputAR,]
else :
features = Flatten(name='features_global')(features)
inputs = [inputGrid,]
classification = layers.classifier(
name = 'classifier',
units = 32,
drop_rate = 0.3,
num_classes = self.mNumClasses)(features)
self.mModel = keras.Model(inputs=inputs, outputs=[classification], name="bim_vox")
self.mModel.summary()
def __compile_model(self) :
self.mModel.compile(
loss=losses.CategoricalCrossentropy(),
metrics=['accuracy',],
optimizer=tf.keras.optimizers.Adam())
def train_model(self, pNumEpochs) :
self.__compile_model()
termination_cb = keras.callbacks.EarlyStopping(monitor='val_loss', patience=5, min_delta=0.02, mode='min', verbose=1)
checkpoint_cb = keras.callbacks.ModelCheckpoint(self.mRootFolder, monitor='val_loss', mode='min', save_best_only=True, save_weights_only=True)
history = self.mModel.fit(x = self.mTrainGen,
validation_data = self.mValidGen,
callbacks=[termination_cb, checkpoint_cb,],
epochs = pNumEpochs,
verbose = 1,)
self.__export_history(history)
self.__export_metrics()
print('Exporting model...')
keras.utils.plot_model(self.mModel, to_file=os.path.join(self.mRootFolder, "voxel_" + str(self.mVoxDim[0]) + ".png"), show_shapes=True, dpi=300)
self.__export_features(self.mTrainGen)
def __get_features_from_layer(self, pLayerName, pStream) :
features_model = keras.Model(inputs=self.mModel.input, outputs=self.mModel.get_layer(pLayerName).output)
features = features_model.predict(x = pStream)
return features
def __export_features(self, pStream) :
print('Exporting features...')
np.savez_compressed(os.path.join(self.mRootFolder, 'features'),
a=self.__get_features_from_layer('features_global', pStream),
b=pStream.y,
c=self.mLabels)
def __export_history(self, history) :
print('Exporting train history...')
fig = plt.figure()
plt.xlabel('epoch')
plt.ylabel('loss')
plt.title('Train/Val Loss')
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.legend(['train', 'val'])
plt.xticks(np.arange(len(history.history['loss'])))
plt.grid()
fig.savefig(os.path.join(self.mRootFolder, 'history.pdf'), bbox_inches='tight')
plt.clf()
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.title('Train/Val accuracy')
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.legend(['train', 'val'])
plt.xticks(np.arange(len(history.history['accuracy'])))
plt.grid()
fig.savefig(os.path.join(self.mRootFolder, 'accuracy.pdf'), bbox_inches='tight')
def __export_metrics(self) :
print('Exporting metrics...')
self.mModel.load_weights(self.mRootFolder)
predictions = self.mModel.predict(x = self.mTrainGen, verbose=1)
y_pred = np.argmax(predictions, axis=1)
y_true = | np.argmax(self.mTrainGen.y, axis=1) | numpy.argmax |
"""
General utilities including numpy extensions and graph utils.
"""
from typing import Iterable, List, Union
import numba
import numpy as np
import scipy.sparse as sp
import warnings
from scipy.sparse.csgraph import minimum_spanning_tree, connected_components
from sklearn.model_selection import train_test_split
__all__ = [
'cartesian_product',
'edges_to_sparse',
'train_val_test_split_adjacency',
'train_val_test_split_tabular',
'sort_nodes',
'sparse_feeder',
'gumbel_sample_random_walks',
'edge_cover',
'construct_line_graph',
'sample_random_walks_per_node',
'sample_random_walks_numba',
]
def cartesian_product(x: np.ndarray, y: np.ndarray) -> np.ndarray:
"""
Form the cartesian product (i.e. all pairs of values) between two arrays.
Parameters
----------
x
Left array in the cartesian product. Shape [Nx]
y
Right array in the cartesian product. Shape [Ny]
Returns
-------
np.ndarray
Cartesian product. Shape [Nx * Ny]
"""
return np.array(np.meshgrid(x, y)).T.reshape(-1, 2)
def edges_to_sparse(edges: np.ndarray, num_nodes: int, weights: np.ndarray = None) -> sp.csr_matrix:
"""Create a sparse adjacency matrix from an array of edge indices and (optionally) values.
Parameters
----------
edges
Array with each row storing indices of an edge as (u, v). Shape [num_edges, 2]
num_nodes
Number of nodes in the resulting graph.
weights
Weights of the edges. If None, all edges weights are set to 1. Shape [num_edges]
Returns
-------
sp.csr_matrix
Adjacency matrix in CSR format.
"""
if weights is None:
weights = np.ones(edges.shape[0])
return sp.coo_matrix((weights, (edges[:, 0], edges[:, 1])), shape=(num_nodes, num_nodes)).tocsr()
def train_val_test_split_tabular(
*arrays: Iterable[Union[np.ndarray, sp.spmatrix]],
train_size: float = 0.5,
val_size: float = 0.3,
test_size: float = 0.2,
stratify: np.ndarray = None,
random_state: int = None
) -> List[Union[np.ndarray, sp.spmatrix]]:
"""Split the arrays or matrices into random train, validation and test subsets.
Parameters
----------
*arrays
Allowed inputs are lists, numpy arrays or scipy-sparse matrices with the same length / shape[0].
train_size
Proportion of the dataset included in the train split.
val_size
Proportion of the dataset included in the validation split.
test_size
Proportion of the dataset included in the test split.
stratify
If not None, data is split in a stratified fashion, using this as the class labels.
random_state
Random_state is the seed used by the random number generator;
Returns
-------
list, length=3 * len(arrays)
List containing train-validation-test split of inputs.
"""
if len(set(array.shape[0] for array in arrays)) != 1:
raise ValueError("Arrays must have equal first dimension.")
idx = np.arange(arrays[0].shape[0])
idx_train_and_val, idx_test = train_test_split(idx,
random_state=random_state,
train_size=(train_size + val_size),
test_size=test_size,
stratify=stratify)
if stratify is not None:
stratify = stratify[idx_train_and_val]
idx_train, idx_val = train_test_split(idx_train_and_val,
random_state=random_state,
train_size=(train_size / (train_size + val_size)),
test_size=(val_size / (train_size + val_size)),
stratify=stratify)
result = []
for X in arrays:
result.append(X[idx_train])
result.append(X[idx_val])
result.append(X[idx_test])
return result
def train_val_test_split_adjacency(A, p_val=0.10, p_test=0.05, random_state=0, neg_mul=1,
every_node=True, connected=False, undirected=False,
use_edge_cover=True, set_ops=True, asserts=False):
"""Create edge and non-edge train, validation and test sets.
Split the edges of the adjacency matrix into train, validation and test edges.
Randomly sample validation and test non-edges.
Parameters
----------
A : scipy.sparse.spmatrix
Sparse unweighted adjacency matrix
p_val : float
Percentage of validation edges. Default p_val=0.10
p_test : float
Percentage of test edges. Default p_test=0.05
random_state : int
Seed for numpy.random. Default seed=0
neg_mul : int
What multiplicity of negative samples (non-edges) to have in the test/validation set
w.r.t the number of edges, i.e. len(non-edges) = L * len(edges). Default neg_mul=1
every_node : bool
Make sure each node appears at least once in the train set. Default every_node=True
connected : bool
Make sure the training graph is still connected after the split
undirected : bool
Whether to make the split undirected, that is if (i, j) is in val/test set then (j, i) is there as well.
Default undirected=False
use_edge_cover: bool
Whether to use (approximate) edge_cover to find the minimum set of edges that cover every node.
Only active when every_node=True. Default use_edge_cover=True
set_ops : bool
Whether to use set operations to construction the test zeros. Default setwise_zeros=True
Otherwise use a while loop.
asserts : bool
Unit test like checks. Default asserts=False
Returns
-------
train_ones : array-like, shape [n_train, 2]
Indices of the train edges
val_ones : array-like, shape [n_val, 2]
Indices of the validation edges
val_zeros : array-like, shape [n_val, 2]
Indices of the validation non-edges
test_ones : array-like, shape [n_test, 2]
Indices of the test edges
test_zeros : array-like, shape [n_test, 2]
Indices of the test non-edges
"""
assert p_val + p_test > 0
assert A.max() == 1 # no weights
assert A.min() == 0 # no negative edges
assert A.diagonal().sum() == 0 # no self-loops
assert not np.any(A.sum(0).A1 + A.sum(1).A1 == 0) # no dangling nodes
is_undirected = (A != A.T).nnz == 0
if undirected:
assert is_undirected # make sure is directed
A = sp.tril(A).tocsr() # consider only upper triangular
A.eliminate_zeros()
else:
if is_undirected:
warnings.warn('Graph appears to be undirected. Did you forgot to set undirected=True?')
np.random.seed(random_state)
E = A.nnz
N = A.shape[0]
s_train = int(E * (1 - p_val - p_test))
idx = np.arange(N)
# hold some edges so each node appears at least once
if every_node:
if connected:
assert connected_components(A)[0] == 1 # make sure original graph is connected
A_hold = minimum_spanning_tree(A)
else:
A.eliminate_zeros() # makes sure A.tolil().rows contains only indices of non-zero elements
d = A.sum(1).A1
if use_edge_cover:
hold_edges = edge_cover(A)
# make sure the training percentage is not smaller than len(edge_cover)/E when every_node is set to True
min_size = hold_edges.shape[0]
if min_size > s_train:
raise ValueError('Training percentage too low to guarantee every node. Min train size needed is {:.2f}.'
.format(min_size / E))
else:
# make sure the training percentage is not smaller than N/E when every_node is set to True
if N > s_train:
raise ValueError('Training percentage too low to guarantee every node. Min train size needed is {:.2f}.'
.format(N / E))
hold_edges_d1 = np.column_stack(
(idx[d > 0], np.row_stack(map(np.random.choice, A[d > 0].tolil().rows))))
if np.any(d == 0):
hold_edges_d0 = np.column_stack((np.row_stack(map(np.random.choice, A[:, d == 0].T.tolil().rows)),
idx[d == 0]))
hold_edges = np.row_stack((hold_edges_d0, hold_edges_d1))
else:
hold_edges = hold_edges_d1
if asserts:
assert np.all(A[hold_edges[:, 0], hold_edges[:, 1]])
assert len(np.unique(hold_edges.flatten())) == N
A_hold = edges_to_sparse(hold_edges, N)
A_hold[A_hold > 1] = 1
A_hold.eliminate_zeros()
A_sample = A - A_hold
s_train = s_train - A_hold.nnz
else:
A_sample = A
idx_ones = np.random.permutation(A_sample.nnz)
ones = np.column_stack(A_sample.nonzero())
train_ones = ones[idx_ones[:s_train]]
test_ones = ones[idx_ones[s_train:]]
# return back the held edges
if every_node:
train_ones = np.row_stack((train_ones, np.column_stack(A_hold.nonzero())))
n_test = len(test_ones) * neg_mul
if set_ops:
# generate slightly more completely random non-edge indices than needed and discard any that hit an edge
# much faster compared a while loop
# in the future: estimate the multiplicity (currently fixed 1.3/2.3) based on A_obs.nnz
if undirected:
random_sample = np.random.randint(0, N, [int(2.3 * n_test), 2])
random_sample = random_sample[random_sample[:, 0] > random_sample[:, 1]]
else:
random_sample = np.random.randint(0, N, [int(1.3 * n_test), 2])
random_sample = random_sample[random_sample[:, 0] != random_sample[:, 1]]
# discard ones
random_sample = random_sample[A[random_sample[:, 0], random_sample[:, 1]].A1 == 0]
# discard duplicates
random_sample = random_sample[np.unique(random_sample[:, 0] * N + random_sample[:, 1], return_index=True)[1]]
# only take as much as needed
test_zeros = np.row_stack(random_sample)[:n_test]
assert test_zeros.shape[0] == n_test
else:
test_zeros = []
while len(test_zeros) < n_test:
i, j = np.random.randint(0, N, 2)
if A[i, j] == 0 and (not undirected or i > j) and (i, j) not in test_zeros:
test_zeros.append((i, j))
test_zeros = np.array(test_zeros)
# split the test set into validation and test set
s_val_ones = int(len(test_ones) * p_val / (p_val + p_test))
s_val_zeros = int(len(test_zeros) * p_val / (p_val + p_test))
val_ones = test_ones[:s_val_ones]
test_ones = test_ones[s_val_ones:]
val_zeros = test_zeros[:s_val_zeros]
test_zeros = test_zeros[s_val_zeros:]
if undirected:
# put (j, i) edges for every (i, j) edge in the respective sets and form back original A
symmetrize = lambda x: np.row_stack((x, np.column_stack((x[:, 1], x[:, 0]))))
train_ones = symmetrize(train_ones)
val_ones = symmetrize(val_ones)
val_zeros = symmetrize(val_zeros)
test_ones = symmetrize(test_ones)
test_zeros = symmetrize(test_zeros)
A = A.maximum(A.T)
if asserts:
set_of_train_ones = set(map(tuple, train_ones))
assert train_ones.shape[0] + test_ones.shape[0] + val_ones.shape[0] == A.nnz
assert (edges_to_sparse(np.row_stack((train_ones, test_ones, val_ones)), N) != A).nnz == 0
assert set_of_train_ones.intersection(set(map(tuple, test_ones))) == set()
assert set_of_train_ones.intersection(set(map(tuple, val_ones))) == set()
assert set_of_train_ones.intersection(set(map(tuple, test_zeros))) == set()
assert set_of_train_ones.intersection(set(map(tuple, val_zeros))) == set()
assert len(set(map(tuple, test_zeros))) == len(test_ones) * neg_mul
assert len(set(map(tuple, val_zeros))) == len(val_ones) * neg_mul
assert not connected or connected_components(A_hold)[0] == 1
assert not every_node or ((A_hold - A) > 0).sum() == 0
return train_ones, val_ones, val_zeros, test_ones, test_zeros
def sort_nodes(z, deg=None):
"""Sort the nodes such that consecutive nodes belong to the same cluster.
Clusters are sorted from smallest to largest.
Optionally also sort by node degrees within each cluster.
Parameters
----------
z : array-like, shape [n_samples]
The cluster indicators (labels)
deg : array-like, shape [n_samples]
Degree of each node
Returns
-------
o : array-like, shape [n_samples]
Indices of the nodes that give the desired sorting
"""
_, idx, cnts = np.unique(z, return_counts=True, return_inverse=True)
counts = cnts[idx]
if deg is None:
return np.lexsort((z, counts))
else:
return np.lexsort((deg, z, counts))
def sparse_feeder(M):
"""Convert a sparse matrix to the format suitable for feeding as a tf.SparseTensor.
Parameters
----------
M : sp.spmatrix
Matrix to convert.
Returns
-------
indices : array-like, shape [num_edges, 2]
Indices of the nonzero elements.
values : array-like, shape [num_edges]
Values of the nonzero elements.
shape : tuple
Shape of the matrix.
"""
M = sp.coo_matrix(M)
return np.vstack((M.row, M.col)).T, M.data, M.shape
def gumbel_sample_random_walks(A, walks_per_node, walk_length, random_state=None):
"""Sample random walks from a given graph using the Gumbel trick.
Parameters
----------
A : sp.spmatrix
Sparse adjacency matrix
walks_per_node : int
The number of random walks from each node.
walk_length : int
The length of each random walk.
random_state : int or None
Random seed for the numpy RNG.
Returns
-------
random_walks : array-like, shape [N*r, l]
The sampled random walks
"""
if random_state is not None:
np.random.seed(random_state)
num_nodes = A.shape[0]
samples = []
prev_nodes = np.random.permutation(np.repeat( | np.arange(num_nodes) | numpy.arange |
from typing import Optional
import numpy as np
from dataset.augmentors import BaseAugmentor
class DualAugmentor(BaseAugmentor):
"""
A class that applies 2 augmentors in parallel and creates a batch for SimCLR-based experiments.
"""
def __init__(self, augmentor1: Optional[BaseAugmentor], augmentor2: Optional[BaseAugmentor]):
assert issubclass(type(augmentor1), BaseAugmentor) or augmentor1 is None
assert issubclass(type(augmentor2), BaseAugmentor) or augmentor2 is None
self._augmentor1: BaseAugmentor = augmentor1
self._augmentor2: BaseAugmentor = augmentor2
def augment_single(self, x: np.ndarray) -> np.ndarray:
raise NotImplementedError("Dual augmentor overwrites augment_batch, use that instead")
def augment_batch(self, batch: np.ndarray) -> np.ndarray:
# We directly overwrite augment_batch instead of augment_single because of the nature of the augmentor.
# NOTE - Important to copy, otherwise it's the same batch for both augmentation channels
batch1: np.ndarray = batch.copy()
batch2: np.ndarray = batch.copy()
if self._augmentor1 is not None:
batch1 = self._augmentor1.augment_batch(batch1)
if self._augmentor2 is not None:
batch2 = self._augmentor2.augment_batch(batch2)
return | np.vstack((batch1, batch2)) | numpy.vstack |
#!/usr/bin/env python3
import numpy as np
import sys, os
from netCDF4 import Dataset
import netCDF4
import h5py
from osgeo import osr, ogr
import csv
import json
import re
import glob
import uuid
import pkg_resources
import warnings
from datetime import datetime
def write_atl14meta(dst,fileout,ncTemplate,args):
# setup basic dictionary of attributes to touch
root_info={'asas_release':'SET_BY_PGE', 'date_created':'', 'fileName':'', 'geospatial_lat_max':0., \
'geospatial_lat_min':0., 'geospatial_lon_max':0., 'geospatial_lon_min':0., \
'netcdfversion':'', 'history':'SET_BY_PGE', \
'identifier_product_format_version':'SET_BY_PGE', 'time_coverage_duration':0., \
'time_coverage_end':'', 'time_coverage_start':'', 'uuid':''}
# copy attributes, dimensions, variables, and groups from template
if 'ATL15' in os.path.basename(fileout):
ncTemplate = ncTemplate.replace('atl14','atl15')
with Dataset(ncTemplate,'r') as src:
# copy attributes
for name in src.ncattrs():
dst.setncattr(name, src.getncattr(name))
# copy dimensions
for name, dimension in src.dimensions.items():
dst.createDimension(
name, (len(dimension) if not dimension.isunlimited else None))
# copy variables
for name, variable in src.variables.items():
x = dst.createVariable(name, variable.datatype, variable.dimensions)
dst.variables[name][:] = src.variables[name][:]
# copy groups, recursively
for grp in walktree(src):
for child in grp:
dg = dst.createGroup(child.path)
for name in child.ncattrs():
dg.setncattr(name,child.getncattr(name))
for name, dimension in child.dimensions.items():
dg.createDimension(name, (len(dimension) if not dimension.isunlimited() else None))
for name, variable in child.variables.items():
x = dg.createVariable(name, variable.datatype, variable.dimensions)
dg.variables[name][:] = child.variables[name][:]
# build ATL11 lineage
set_lineage(dst,root_info,args)
# lat/lon bounds
set_geobounds(dst,fileout,root_info)
# set file and date attributes
root_info.update({'netcdfversion': netCDF4.__netcdf4libversion__})
root_info.update({'uuid': str(uuid.uuid4())})
dst['METADATA/DatasetIdentification'].setncattr('uuid', str(uuid.uuid4()).encode('ASCII'))
dateval = str(datetime.now().date())
dateval = dateval+'T'+str(datetime.now().time())+'Z'
root_info.update({'date_created': dateval})
dst['METADATA/DatasetIdentification'].setncattr('creationDate', str(datetime.now().date()))
root_info.update({'fileName': os.path.basename(fileout)})
dst['METADATA/DatasetIdentification'].setncattr('fileName', os.path.basename(fileout))
# apply dict of root level attributes
for key, keyval in root_info.items():
dst.setncattr(key, keyval)
# To recursively step through groups
def walktree(top):
yield top.groups.values()
for value in top.groups.values():
yield from walktree(value)
def set_lineage(dst,root_info,args):
tilepath = args.tiles_dir
atl11path = args.ATL11_lineage_dir
# list of lineage attributes
lineage = []
# regular expression for extracting ATL11 parameters
rx = re.compile(r'(ATL\d{2})_(\d{4})(\d{2})_(\d{2})(\d{2})_(\d{3})_(\d{2})(.*?).h5$')
# For each tile:
min_start_delta_time = np.finfo(np.float64()).max
max_end_delta_time = np.finfo(np.float64()).tiny
ATL11_files=set()
for tile in glob.iglob(os.path.join(tilepath,'*.h5')):
with h5py.File(tile,'r') as h5f:
inputs=str(h5f['/meta/'].attrs['input_files'])
if inputs[0]=='b':
inputs=inputs[1:]
ATL11_files.update(inputs.replace("'",'').split(','))
for FILE in ATL11_files:
# extract parameters from filename
PRD,TRK,GRAN,SCYC,ECYC,RL,VERS,AUX = rx.findall(FILE).pop()
with h5py.File(os.path.join(atl11path,FILE),'r') as fileID:
# extract ATL11 attributes from files
UUID = fileID['METADATA']['DatasetIdentification'].attrs['uuid'].decode('utf-8')
SGEOSEG = fileID['ancillary_data/start_geoseg'][0]
EGEOSEG = fileID['ancillary_data/end_geoseg'][0]
SORBIT = fileID['ancillary_data/start_orbit'][0]
EORBIT = fileID['ancillary_data/end_orbit'][0]
sdeltatime = fileID['ancillary_data/start_delta_time'][0]
edeltatime = fileID['ancillary_data/end_delta_time'][0]
# track earliest and latest delta time and UTC
if sdeltatime < min_start_delta_time:
sUTCtime = fileID['ancillary_data/data_start_utc'][0].decode('utf-8')
min_start_delta_time = sdeltatime
if edeltatime > max_end_delta_time:
eUTCtime = fileID['ancillary_data/data_end_utc'][0].decode('utf-8')
max_end_delta_time = edeltatime
# merge attributes as a tuple
attrs = (FILE,PRD,int(TRK),int(GRAN),int(SCYC),int(ECYC),int(VERS),UUID,int(SGEOSEG),
int(EGEOSEG),int(SORBIT),int(EORBIT))
# add attributes to list, if not already present
if attrs not in lineage:
lineage.append(attrs)
# reduce to unique lineage attributes (no repeat files)
# sorted(set(lineage))
# sort and set lineage attributes
slineage = sorted(lineage,key=lambda x: (x[0]))
dst['METADATA/Lineage/ATL11'].setncattr('fileName',list(zip(*slineage))[0])
dst['METADATA/Lineage/ATL11'].setncattr('shortName',list(zip(*slineage))[1])
dst['METADATA/Lineage/ATL11'].setncattr('start_rgt',list(zip(*slineage))[2])
dst['METADATA/Lineage/ATL11'].setncattr('end_rgt',list(zip(*slineage))[2])
dst['METADATA/Lineage/ATL11'].setncattr('start_region',list(zip(*slineage))[3])
dst['METADATA/Lineage/ATL11'].setncattr('end_region',list(zip(*slineage))[3])
dst['METADATA/Lineage/ATL11'].setncattr('start_cycle',list(zip(*slineage))[4])
dst['METADATA/Lineage/ATL11'].setncattr('end_cycle',list(zip(*slineage))[5])
dst['METADATA/Lineage/ATL11'].setncattr('version',list(zip(*slineage))[6])
dst['METADATA/Lineage/ATL11'].setncattr('uuid',list(zip(*slineage))[7])
dst['METADATA/Lineage/ATL11'].setncattr('start_geoseg',list(zip(*slineage))[8])
dst['METADATA/Lineage/ATL11'].setncattr('end_geoseg',list(zip(*slineage))[9])
dst['METADATA/Lineage/ATL11'].setncattr('start_orbit',list(zip(*slineage))[10])
dst['METADATA/Lineage/ATL11'].setncattr('end_orbit',list(zip(*slineage))[11])
# set time attributes
root_info.update({'time_coverage_start': sUTCtime})
root_info.update({'time_coverage_end': eUTCtime})
root_info.update({'time_coverage_duration': max_end_delta_time-min_start_delta_time})
dst['/METADATA/Extent'].setncattr('rangeBeginningDateTime',sUTCtime)
dst['/METADATA/Extent'].setncattr('rangeEndingDateTime',eUTCtime)
# buuild lat/lon geo boundaries
def set_geobounds(dst,fileout,root_info):
if 'ATL14' in os.path.basename(fileout):
georoot = ''
else:
georoot = '/delta_h'
polar_srs=osr.SpatialReference()
polar_srs.ImportFromEPSG(int(dst[georoot+'/Polar_Stereographic'].getncattr('spatial_epsg')))
ll_srs=osr.SpatialReference()
ll_srs.ImportFromEPSG(4326)
if hasattr(osr,'OAMS_TRADITIONAL_GIS_ORDER'):
ll_srs.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)
polar_srs.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)
ct=osr.CoordinateTransformation(polar_srs, ll_srs)
xmin,xmax = (np.min(dst[georoot+'/x']),np.max(dst[georoot+'/x']))
ymin,ymax = (np.min(dst[georoot+'/y']),np.max(dst[georoot+'/y']))
N = 2
dx = (xmax-xmin)/N
dy = (ymax-ymin)/N
multipoint = ogr.Geometry(ogr.wkbMultiPoint)
for x in range(N+1):
for y in range(N+1):
point = ogr.Geometry(ogr.wkbPoint)
point.AddPoint(ymax - y*dy,xmin + x*dx)
multipoint.AddGeometry(point)
multipoint.Transform(ct)
lonmin,lonmax,latmin,latmax = multipoint.GetEnvelope()
if (lonmin == -180.0) | (lonmax == 180.0):
lonmin,lonmax = (-180.0,180.0)
root_info.update({'geospatial_lon_min': lonmin})
root_info.update({'geospatial_lon_max': lonmax})
root_info.update({'geospatial_lat_min': latmin})
root_info.update({'geospatial_lat_max': latmax})
# set variables and attributes, from JSON polygons, if present
try:
region = os.path.basename(fileout).split("_")[1]
polyfile = pkg_resources.resource_filename('ATL1415','resources/region_extent_polygons.json')
with open (polyfile) as poly_f:
poly_data = poly_f.read()
reg_poly = region+'_poly'
poly = json.loads(poly_data)
x = [row[0] for row in poly[reg_poly]]
y = [row[1] for row in poly[reg_poly]]
dst['/orbit_info'].variables['bounding_polygon_dim1'][:] = np.arange(1,np.size(x)+1)
dst['/orbit_info'].variables['bounding_polygon_lon1'][:] = np.array(x)[:]
dst['/orbit_info'].variables['bounding_polygon_lat1'][:] = np.array(y)[:]
except:
warnings.filterwarnings("always")
warnings.warn("Deprecated. Use polygon from json file", DeprecationWarning)
dst['/orbit_info'].variables['bounding_polygon_dim1'][:] = np.arange(1,4+1)
dst['/orbit_info'].variables['bounding_polygon_lon1'][:] = np.array([lonmin,lonmax,lonmax,lonmin])[:]
dst['/orbit_info'].variables['bounding_polygon_lat1'][:] = | np.array([latmax,latmax,latmin,latmin]) | numpy.array |
# -*- coding: utf-8 -*-
"""peaks_des.py
"""
import argparse
import gc
import healpy as hp
import logging
import multiprocessing
import numpy as np
import os
import time
from util import configure_logger
from util import ang_dist
def worker_peaks(map, bin_count, logbin_count, bin_range):
logger = logging.getLogger(__name__)
factor = 1 - | np.sum(map == hp.UNSEEN) | numpy.sum |
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 23 14:34:28 2019
@author: bwc
"""
# standard imports
import numpy as np
import matplotlib.pyplot as plt
# custom imports
import apt_fileio
import plotting_stuff
import peak_param_determination as ppd
from histogram_functions import bin_dat
import scipy.interpolate
import image_registration.register_images
import sel_align_m2q_log_xcorr
import scipy.interpolate
import time
import m2q_calib
import initElements_P3
from voltage_and_bowl import do_voltage_and_bowl
import voltage_and_bowl
import colorcet as cc
import matplotlib._color_data as mcd
def extents(f):
delta = f[1] - f[0]
return [f[0] - delta/2, f[-1] + delta/2]
def create_histogram(ys,cts_per_slice=2**10,y_roi=None,delta_y=1.6e-3):
num_y = int(np.ceil(np.abs(np.diff(y_roi))/delta_y/2)*2) # even number
# num_ly = int(2**np.round(np.log2(np.abs(np.diff(ly_roi))/delta_ly)))-1 # closest power of 2
print('number of points in ly = ',num_y)
num_x = int(ys.size/cts_per_slice)
xs = np.arange(ys.size)
N,x_edges,y_edges = np.histogram2d(xs,ys,bins=[num_x,num_y],range=[[1,ys.size],y_roi],density=False)
return (N,x_edges,y_edges)
def edges_to_centers(*edges):
centers = []
for es in edges:
centers.append((es[0:-1]+es[1:])/2)
if len(centers)==1:
centers = centers[0]
return centers
plt.close('all')
fn = r"Q:\NIST_Projects\EUV_APT_IMS\BWC\R44_02203-v01.epos"
epos = apt_fileio.read_epos_numpy(fn)
epos = epos[100000::10]
# Voltage and bowl correct ToF data
p_volt = np.array([])
p_bowl = np.array([])
t_i = time.time()
tof_corr, p_volt, p_bowl = do_voltage_and_bowl(epos,p_volt,p_bowl)
print("time to voltage and bowl correct: "+str(time.time()-t_i)+" seconds")
# Only apply bowl correction
tof_bcorr = voltage_and_bowl.mod_geometric_bowl_correction(p_bowl,epos['tof'],epos['x_det'],epos['y_det'])
ax = plotting_stuff.plot_TOF_vs_time(tof_bcorr,epos,2)
# Plot histogram for steel
fig = plt.figure(figsize=(2*3.14961,2*3.14961),num=321,dpi=100)
plt.clf()
ax1, ax2 = fig.subplots(2,1,sharex=True)
N,x_edges,y_edges = create_histogram(tof_bcorr,y_roi=[400,600],cts_per_slice=2**10,delta_y=0.5)
ax1.imshow(np.log10(1+1*np.transpose(N)), aspect='auto',
extent=extents(x_edges) + extents(y_edges), origin='lower', cmap=cc.cm.CET_L8,
interpolation='bilinear')
ax1.set(ylabel='flight time (ns)')
ax1twin = ax1.twinx()
ax1twin.plot(epos['v_dc'],'-',
linewidth=2,
color=mcd.XKCD_COLORS['xkcd:white'])
ax1twin.set(ylabel='applied voltage (volts)',ylim=[0, 6000],xlim=[0, 400000])
N,x_edges,y_edges = create_histogram(tof_corr,y_roi=[425,475],cts_per_slice=2**10,delta_y=0.5)
ax2.imshow(np.log10(1+1*np.transpose(N)), aspect='auto',
extent=extents(x_edges) + extents(y_edges), origin='lower', cmap=cc.cm.CET_L8,
interpolation='bilinear')
ax2.set(xlabel='ion sequence',ylabel='corrected flight time (ns)')
fig.tight_layout()
fig.savefig(r'Q:\users\bwc\APT\scale_corr_paper\metal_not_wandering.svg', format='svg', dpi=600)
#
fn = r"Q:\NIST_Projects\EUV_APT_IMS\BWC\R45_data\R45_04472-v02_allVfromAnn.epos"
epos = apt_fileio.read_epos_numpy(fn)
epos = epos[25000:]
# Voltage and bowl correct ToF data
p_volt = np.array([])
p_bowl = np.array([])
t_i = time.time()
tof_corr, p_volt, p_bowl = do_voltage_and_bowl(epos,p_volt,p_bowl)
print("time to voltage and bowl correct: "+str(time.time()-t_i)+" seconds")
# Only apply bowl correction
tof_bcorr = voltage_and_bowl.mod_geometric_bowl_correction(p_bowl,epos['tof'],epos['x_det'],epos['y_det'])
ax = plotting_stuff.plot_TOF_vs_time(tof_bcorr,epos,2)
# Plot histogram for sio2
fig = plt.figure(figsize=(2*3.14961,2*3.14961),num=4321,dpi=100)
plt.clf()
ax1, ax2 = fig.subplots(2,1,sharex=True)
N,x_edges,y_edges = create_histogram(tof_bcorr,y_roi=[280,360],cts_per_slice=2**9,delta_y=.5)
ax1.imshow(np.log10(1+1*np.transpose(N)), aspect='auto',
extent=extents(x_edges) + extents(y_edges), origin='lower', cmap=cc.cm.CET_L8,
interpolation='bilinear')
ax1.set(ylabel='flight time (ns)')
ax1twin = ax1.twinx()
ax1twin.plot(epos['v_dc'],'-',
linewidth=2,
color=mcd.XKCD_COLORS['xkcd:white'])
ax1twin.set(ylabel='applied voltage (volts)',ylim=[0000, 8000],xlim=[0, 400000])
N,x_edges,y_edges = create_histogram(tof_corr,y_roi=[280,360],cts_per_slice=2**9,delta_y=.5)
ax2.imshow(np.log10(1+1*np.transpose(N)), aspect='auto',
extent=extents(x_edges) + extents(y_edges), origin='lower', cmap=cc.cm.CET_L8,
interpolation='bilinear')
ax2.set(xlabel='ion sequence',ylabel='corrected flight time (ns)')
fig.tight_layout()
fig.savefig(r'Q:\users\bwc\APT\scale_corr_paper\SiO2_NUV_wandering.svg', format='svg', dpi=600)
#
fn = r"Q:\NIST_Projects\EUV_APT_IMS\BWC\R20_07080-v01.epos"
epos = apt_fileio.read_epos_numpy(fn)
#epos = epos[165000:582000]
plotting_stuff.plot_TOF_vs_time(epos['tof'],epos,1,clearFigure=True,user_ylim=[0,1000])
# Voltage and bowl correct ToF data
p_volt = np.array([])
p_bowl = np.array([])
t_i = time.time()
tof_corr, p_volt, p_bowl = do_voltage_and_bowl(epos,p_volt,p_bowl)
print("time to voltage and bowl correct: "+str(time.time()-t_i)+" seconds")
# Only apply bowl correction
tof_bcorr = voltage_and_bowl.mod_geometric_bowl_correction(p_bowl,epos['tof'],epos['x_det'],epos['y_det'])
ax = plotting_stuff.plot_TOF_vs_time(tof_bcorr,epos,2)
# Plot histogram for sio2
fig = plt.figure(figsize=(2*3.14961,2*3.14961),num=54321,dpi=100)
plt.clf()
ax1, ax2 = fig.subplots(2,1,sharex=True)
N,x_edges,y_edges = create_histogram(tof_bcorr,y_roi=[320,380],cts_per_slice=2**9,delta_y=.5)
ax1.imshow(np.log10(1+1*np.transpose(N)), aspect='auto',
extent=extents(x_edges) + extents(y_edges), origin='lower', cmap=cc.cm.CET_L8,
interpolation='bilinear')
ax1.set(ylabel='flight time (ns)')
ax1twin = ax1.twinx()
ax1twin.plot(epos['v_dc'],'-',
linewidth=2,
color=mcd.XKCD_COLORS['xkcd:white'])
ax1twin.set(ylabel='applied voltage (volts)',ylim=[0000, 5000],xlim=[0, 400000])
N,x_edges,y_edges = create_histogram(tof_corr,y_roi=[320,380],cts_per_slice=2**9,delta_y=.5)
ax2.imshow(np.log10(1+1*np.transpose(N)), aspect='auto',
extent=extents(x_edges) + extents(y_edges), origin='lower', cmap=cc.cm.CET_L8,
interpolation='bilinear')
ax2.set(xlabel='ion sequence',ylabel='corrected flight time (ns)')
fig.tight_layout()
fig.savefig(r'Q:\users\bwc\APT\scale_corr_paper\SiO2_EUV_wandering.svg', format='svg', dpi=600)
## Plot histogram for sio2
#fig = plt.figure(figsize=(2*3.14961,2*3.14961),num=654321,dpi=100)
#plt.clf()
#ax1,ax2, ax3 = fig.subplots(1,3,sharey=True)
#N,x_edges,y_edges = create_histogram(tof_bcorr,y_roi=[0,1000],cts_per_slice=2**10,delta_y=.125)
##ax1.imshow(np.log10(1+1*np.transpose(N)), aspect='auto',
## extent=extents(x_edges) + extents(y_edges), origin='lower', cmap=cc.cm.CET_L8,
## interpolation='bilinear')
#
#event_idx_range_ref = [10000, 20000]
#event_idx_range_mov = [70000, 80000]
#
#x_centers = edges_to_centers(x_edges)
#idxs_ref = (x_centers>=event_idx_range_ref[0]) & (x_centers<=event_idx_range_ref[1])
#idxs_mov = (x_centers>=event_idx_range_mov[0]) & (x_centers<=event_idx_range_mov[1])
#
#ref_hist = np.sum(N[idxs_ref,:],axis=0)
#mov_hist = np.sum(N[idxs_mov,:],axis=0)
#
#y_centers = edges_to_centers(y_edges)
#sc = 300
#
#
#ax1.set(xlim=[84, 96])
#ax2.set(xlim=[348,362])
#ax3.set(xlim=[498,512])
#
#
#ax1.plot(y_centers,ref_hist+mov_hist+2*sc)
#ax2.plot(y_centers,ref_hist+mov_hist+2*sc)
#ax3.plot(y_centers,ref_hist+mov_hist+2*sc)
#
#
#ax1.plot(y_centers,mov_hist+5*sc)
#ax2.plot(y_centers,mov_hist+5*sc)
#ax3.plot(y_centers,mov_hist+5*sc)
#
#N,x_edges,y_edges = create_histogram(1.003*tof_bcorr,y_roi=[0,1000],cts_per_slice=2**10,delta_y=.125)
#mov_hist = np.sum(N[idxs_mov,:],axis=0)
#
#
#
#ax1.plot(y_centers,ref_hist+6*sc)
#ax2.plot(y_centers,ref_hist+6*sc)
#ax3.plot(y_centers,ref_hist+6*sc)
#
#
#ax1.plot(y_centers,mov_hist+4*sc)
#ax2.plot(y_centers,mov_hist+4*sc)
#ax3.plot(y_centers,mov_hist+4*sc)
#
#
#ax1.plot(y_centers,mov_hist+ref_hist+1*sc)
#ax2.plot(y_centers,mov_hist+ref_hist+1*sc)
#ax3.plot(y_centers,mov_hist+ref_hist+1*sc)
#
#N,x_edges,y_edges = create_histogram(1.006*tof_bcorr,y_roi=[0,1000],cts_per_slice=2**10,delta_y=.125)
#mov_hist = np.sum(N[idxs_mov,:],axis=0)
#
#
#ax1.plot(y_centers,mov_hist+3*sc)
#ax2.plot(y_centers,mov_hist+3*sc)
#ax3.plot(y_centers,mov_hist+3*sc)
#
#
#ax1.plot(y_centers,mov_hist+ref_hist)
#ax2.plot(y_centers,mov_hist+ref_hist)
#ax3.plot(y_centers,mov_hist+ref_hist)
#
#
#
#
#
#fig.tight_layout()
#
#
#fig.savefig(r'Q:\users\bwc\APT\scale_corr_paper\correction_idea.svg', format='svg', dpi=600)
#
#def shaded_plot(ax,x,y,idx):
# sc = 250
# cols = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
#
# xlim = ax.get_xlim()
#
# idxs = np.nonzero((x>=xlim[0]) & (x<=xlim[1]))
#
# ax.fill_between(x[idxs], y[idxs]+idx*sc, (idx-0.005)*sc, color=cols[idx])
## ax.plot(x,y+idx*sc, color='k')
# return
#
#
#
#
## Plot histogram for sio2
#fig = plt.figure(figsize=(2*3.14961,2*3.14961),num=654321,dpi=100)
#plt.clf()
#ax1,ax2 = fig.subplots(1,2,sharey=True)
#N,x_edges,y_edges = create_histogram(tof_bcorr,y_roi=[80,400],cts_per_slice=2**10,delta_y=.125)
##ax1.imshow(np.log10(1+1*np.transpose(N)), aspect='auto',
## extent=extents(x_edges) + extents(y_edges), origin='lower', cmap=cc.cm.CET_L8,
## interpolation='bilinear')
#
#event_idx_range_ref = [10000, 20000]
#event_idx_range_mov = [70000, 80000]
#
#x_centers = edges_to_centers(x_edges)
#idxs_ref = (x_centers>=event_idx_range_ref[0]) & (x_centers<=event_idx_range_ref[1])
#idxs_mov = (x_centers>=event_idx_range_mov[0]) & (x_centers<=event_idx_range_mov[1])
#
#ref_hist = np.sum(N[idxs_ref,:],axis=0)
#mov_hist = np.sum(N[idxs_mov,:],axis=0)
#
#y_centers = edges_to_centers(y_edges)
#
#
#ax1.set(xlim=[87, 93])
#ax2.set(xlim=[352,360])
##ax3.set(xlim=[498,512])
#
#
#shaded_plot(ax1,y_centers,ref_hist+mov_hist,2)
#shaded_plot(ax2,y_centers,ref_hist+mov_hist,2)
#
#shaded_plot(ax1,y_centers,mov_hist,5)
#shaded_plot(ax2,y_centers,mov_hist,5)
#
#N,x_edges,y_edges = create_histogram(1.003*tof_bcorr,y_roi=[80,400],cts_per_slice=2**10,delta_y=.125)
#mov_hist = np.sum(N[idxs_mov,:],axis=0)
#
#shaded_plot(ax1,y_centers,ref_hist,6)
#shaded_plot(ax2,y_centers,ref_hist,6)
#
#
#shaded_plot(ax1,y_centers,mov_hist,4)
#shaded_plot(ax2,y_centers,mov_hist,4)
#
#
#shaded_plot(ax1,y_centers,mov_hist+ref_hist,1)
#shaded_plot(ax2,y_centers,mov_hist+ref_hist,1)
#
#
#N,x_edges,y_edges = create_histogram(1.006*tof_bcorr,y_roi=[80,400],cts_per_slice=2**10,delta_y=.125)
#mov_hist = np.sum(N[idxs_mov,:],axis=0)
#
#
#shaded_plot(ax1,y_centers,mov_hist,3)
#shaded_plot(ax2,y_centers,mov_hist,3)
#
#
#shaded_plot(ax1,y_centers,mov_hist+ref_hist,0)
#shaded_plot(ax2,y_centers,mov_hist+ref_hist,0)
#
#
#
#fig.tight_layout()
#
#
#fig.savefig(r'Q:\users\bwc\APT\scale_corr_paper\correction_idea.svg', format='svg', dpi=600)
def shaded_plot(ax,x,y,idx,col_idx=None):
if col_idx is None:
col_idx = idx
sc = 50
cols = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
xlim = ax.get_xlim()
idxs = np.nonzero((x>=xlim[0]) & (x<=xlim[1]))
ax.fill_between(x[idxs], y[idxs]+idx*sc, (idx-0.005)*sc, color=cols[col_idx])
# ax.plot(x,y+idx*sc, color='k')
return
# Plot histogram for sio2
fig = plt.figure(figsize=(2*3.14961,2*3.14961),num=654321,dpi=100)
plt.clf()
ax2 = fig.subplots(1,1)
N,x_edges,y_edges = create_histogram(tof_corr,y_roi=[80,400],cts_per_slice=2**10,delta_y=0.0625)
#ax1.imshow(np.log10(1+1*np.transpose(N)), aspect='auto',
# extent=extents(x_edges) + extents(y_edges), origin='lower', cmap=cc.cm.CET_L8,
# interpolation='bilinear')
event_idx_range_ref = [0, 0+1024]
event_idx_range_mov = [124000, 124000+1024]
x_centers = edges_to_centers(x_edges)
idxs_ref = (x_centers>=event_idx_range_ref[0]) & (x_centers<=event_idx_range_ref[1])
idxs_mov = (x_centers>=event_idx_range_mov[0]) & (x_centers<=event_idx_range_mov[1])
ref_hist = np.sum(N[idxs_ref,:],axis=0)
mov_hist = np.sum(N[idxs_mov,:],axis=0)
y_centers = edges_to_centers(y_edges)
ax2.set(xlim=[290,320])
#ax2.set(xlim=[0, 1000])
#ax3.set(xlim=[498,512])
N,x_edges,y_edges = create_histogram(0.98*tof_corr,y_roi=[80,400],cts_per_slice=2**10,delta_y=0.0625)
mov_hist = np.sum(N[idxs_mov,:],axis=0)
#shaded_plot(ax2,y_centers,ref_hist+mov_hist,2)
shaded_plot(ax2,y_centers,mov_hist,2,2)
N,x_edges,y_edges = create_histogram(0.99*tof_corr,y_roi=[80,400],cts_per_slice=2**10,delta_y=0.0625)
mov_hist = np.sum(N[idxs_mov,:],axis=0)
shaded_plot(ax2,y_centers,ref_hist,3,3)
shaded_plot(ax2,y_centers,mov_hist,1,1)
#shaded_plot(ax2,y_centers,mov_hist+ref_hist,1)
N,x_edges,y_edges = create_histogram(1.0*tof_corr,y_roi=[80,400],cts_per_slice=2**10,delta_y=0.0625)
mov_hist = np.sum(N[idxs_mov,:],axis=0)
shaded_plot(ax2,y_centers,mov_hist,0,col_idx=0)
#shaded_plot(ax2,y_centers,mov_hist+ref_hist,0)
#fig.gca().grid()
fig.tight_layout()
fig.savefig(r'Q:\users\bwc\APT\scale_corr_paper\correction_idea1.svg', format='svg', dpi=600)
cs = np.linspace(0.975, 1.005, 256)
dp = np.zeros_like(cs)
for idx, c in enumerate(cs):
N,x_edges,y_edges = create_histogram(c*tof_corr,y_roi=[80,400],cts_per_slice=2**10,delta_y=0.0625)
mov_hist = np.sum(N[idxs_mov,:],axis=0)
dp[idx] = np.sum((mov_hist/np.sum(mov_hist))*(ref_hist/np.sum(ref_hist)))
# Plot histogram for sio2
fig = plt.figure(figsize=(2*3.14961,1*3.14961),num=7654321,dpi=100)
plt.clf()
ax1 = fig.subplots(1,1)
ax1.set(xlim=[0.975, 1.005],ylim=[-0.1,1.1])
f = scipy.interpolate.interp1d(cs,dp/np.max(dp))
cols = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
xq = [0.98, 0.99017, 1.0]
for idx in [0,1,2]:
ax1.plot(xq[idx],f(xq[idx]),'o',markersize=14,color=cols[2-idx])
ax1.plot(cs,dp/np.max(dp),'k')
fig.tight_layout()
fig.savefig(r'Q:\users\bwc\APT\scale_corr_paper\correction_idea2.svg', format='svg', dpi=600)
import sel_align_m2q_log_xcorr_v2
fn = r"Q:\NIST_Projects\EUV_APT_IMS\BWC\R45_data\R45_04472-v03.epos"
#fn = r"Q:\NIST_Projects\EUV_APT_IMS\BWC\R45_data\R45_04472-v02.epos"
# fn = r"Q:\NIST_Projects\EUV_APT_IMS\BWC\GaN epos files\R20_07148-v01.epos" # Mg doped
# fn = fn[:-5]+'_vbm_corr.epos'
epos = apt_fileio.read_epos_numpy(fn)
epos = epos[25000:]
epos = epos[:400000]
fake_tof = np.sqrt((296/312)*epos['m2q']/1.393e-4)
cts_per_slice=2**7
#m2q_roi = [0.9,190]
tof_roi = [0, 1000]
import time
t_start = time.time()
pointwise_scales,piecewise_scales = sel_align_m2q_log_xcorr_v2.get_all_scale_coeffs(epos['m2q'],
m2q_roi=[0.8,80],
cts_per_slice=cts_per_slice,
max_scale=1.15)
t_end = time.time()
print('Total Time = ',t_end-t_start)
fake_tof_corr = fake_tof/np.sqrt(pointwise_scales)
m2q_corr = epos['m2q']/pointwise_scales
# Plot histogram for sio2
fig = plt.figure(figsize=(2*3.14961,2*3.14961),num=87654321,dpi=100)
plt.clf()
ax1, ax2 = fig.subplots(2,1,sharex=True)
N,x_edges,y_edges = create_histogram(fake_tof,y_roi=[280,360],cts_per_slice=cts_per_slice,delta_y=.5)
ax1.imshow(np.log10(1+1*np.transpose(N)), aspect='auto',
extent=extents(x_edges) + extents(y_edges), origin='lower', cmap=cc.cm.CET_L8,
interpolation='bilinear')
ax1.set(ylabel='flight time (ns)')
ax1twin = ax1.twinx()
ax1twin.plot(pointwise_scales,'-',
linewidth=1,
color=mcd.XKCD_COLORS['xkcd:white'])
ax1twin.set(ylabel='correction factor, c',ylim=[0.95, 1.3],xlim=[0, 400000])
N,x_edges,y_edges = create_histogram(fake_tof_corr,y_roi=[280,360],cts_per_slice=cts_per_slice,delta_y=.5)
ax2.imshow(np.log10(1+1*np.transpose(N)), aspect='auto',
extent=extents(x_edges) + extents(y_edges), origin='lower', cmap=cc.cm.CET_L8,
interpolation='bilinear')
ax2.set(xlabel='ion sequence',ylabel='corrected flight time (ns)')
fig.tight_layout()
fig.savefig(r'Q:\users\bwc\APT\scale_corr_paper\SiO2_NUV_corrected.svg', format='svg', dpi=600)
def shaded_plot(ax,x,y,idx,col_idx=None,min_val=None):
if col_idx is None:
col_idx = idx
if min_val is None:
min_val = np.min(y)
sc = 150
cols = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
xlim = ax.get_xlim()
idxs = np.nonzero((x>=xlim[0]) & (x<=xlim[1]))
ax.fill_between(x[idxs], y[idxs], min_val, color=cols[col_idx])
# ax.plot(x,y+idx*sc, color='k')
return
fig = plt.figure(constrained_layout=True,figsize=(2*3.14961,2*3.14961),num=87654321,dpi=100)
plt.clf()
gs = plt.GridSpec(2, 3, figure=fig)
ax0 = fig.add_subplot(gs[0, :])
# identical to ax1 = plt.subplot(gs.new_subplotspec((0, 0), colspan=3))
ax1 = fig.add_subplot(gs[1,0:2])
#ax2 = fig.add_subplot(gs[1,1])
ax3 = fig.add_subplot(gs[1,2])
dat = epos['m2q']
user_bin_width = 0.03
user_xlim = [0,65]
ax0.set(xlim=user_xlim)
dat = m2q_corr
xs, ys = bin_dat(dat,isBinAligned=True,bin_width=user_bin_width,user_roi=user_xlim)
shaded_plot(ax0,xs,100*(1+ys),1,min_val=100)
dat = epos['m2q']
xs, ys = bin_dat(dat,isBinAligned=True,bin_width=user_bin_width,user_roi=user_xlim)
shaded_plot(ax0,xs,1+ys,0,min_val=1)
ax0.set(xlabel='m/z (Da)', ylabel='counts', xlim=user_xlim)
ax0.set_yscale('log')
user_bin_width = 0.01
user_xlim = [13,19]
ax1.set(xlim=user_xlim)
dat = m2q_corr
xs, ys = bin_dat(dat,isBinAligned=True,bin_width=user_bin_width,user_roi=user_xlim)
shaded_plot(ax1,xs,100*(1+ys),1,min_val=100)
dat = epos['m2q']
xs, ys = bin_dat(dat,isBinAligned=True,bin_width=user_bin_width,user_roi=user_xlim)
shaded_plot(ax1,xs,1+ys,0,min_val=1)
ax1.set(xlabel='m/z (Da)', ylabel='counts', xlim=user_xlim)
ax1.set_yscale('log')
#
#
##user_bin_width = 0.01
#user_xlim = [30,34]
#ax2.set(xlim=user_xlim)
#
#
#dat = m2q_corr
#xs, ys = bin_dat(dat,isBinAligned=True,bin_width=user_bin_width,user_roi=user_xlim)
#shaded_plot(ax2,xs,100*(1+ys),1,min_val=100)
#
#
#dat = epos['m2q']
#xs, ys = bin_dat(dat,isBinAligned=True,bin_width=user_bin_width,user_roi=user_xlim)
#shaded_plot(ax2,xs,1+ys,0,min_val=1)
#
#
#ax2.set(xlabel='m/z (Da)', ylabel='counts', xlim=user_xlim)
#ax2.set_yscale('log')
#user_bin_width = 0.01
user_xlim = [58,64]
ax3.set(xlim=user_xlim)
dat = m2q_corr
xs, ys = bin_dat(dat,isBinAligned=True,bin_width=user_bin_width,user_roi=user_xlim)
shaded_plot(ax3,xs,100*(1+ys),1,min_val=100)
dat = epos['m2q']
xs, ys = bin_dat(dat,isBinAligned=True,bin_width=user_bin_width,user_roi=user_xlim)
shaded_plot(ax3,xs,1+ys,0,min_val=1)
ax3.set(xlabel='m/z (Da)', ylabel='counts', xlim=user_xlim)
ax3.set_yscale('log')
ax0.set(ylim=[1,None])
ax1.set(ylim=[1,None])
ax2.set(ylim=[1,None])
ax3.set(ylim=[1,None])
fig.tight_layout()
fig.savefig(r'Q:\users\bwc\APT\scale_corr_paper\SiO2_NUV_corrected_hist.svg', format='svg', dpi=600)
fn = r"Q:\NIST_Projects\EUV_APT_IMS\BWC\R45_data\R45_00504-v56.epos"
#fn = r"Q:\NIST_Projects\EUV_APT_IMS\BWC\R45_data\R45_04472-v02.epos"
# fn = r"Q:\NIST_Projects\EUV_APT_IMS\BWC\GaN epos files\R20_07148-v01.epos" # Mg doped
# fn = fn[:-5]+'_vbm_corr.epos'
epos = apt_fileio.read_epos_numpy(fn)
#epos = epos[25000:]
#epos = epos[:400000]
cts_per_slice=2**9
import time
t_start = time.time()
pointwise_scales,piecewise_scales = sel_align_m2q_log_xcorr_v2.get_all_scale_coeffs(epos['m2q'],
m2q_roi=[10,250],
cts_per_slice=cts_per_slice,
max_scale=1.15)
t_end = time.time()
print('Total Time = ',t_end-t_start)
m2q_corr = epos['m2q']/pointwise_scales
def shaded_plot(ax,x,y,idx,col_idx=None,min_val=None):
if col_idx is None:
col_idx = idx
if min_val is None:
min_val = np.min(y)
cols = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#8c564b', '#e377c2', '#7f7f7f', '#bcbd22', '#17becf']
xlim = ax.get_xlim()
idxs = np.nonzero((x>=xlim[0]) & (x<=xlim[1]))
ax.fill_between(x[idxs], y[idxs], min_val, color=cols[col_idx])
# ax.plot(x,y+idx*sc, color='k')
return
fig = plt.figure(constrained_layout=True,figsize=(2*3.14961,2*3.14961),num=87654321,dpi=100)
plt.clf()
gs = plt.GridSpec(2, 3, figure=fig)
ax0 = fig.add_subplot(gs[0, :])
# identical to ax1 = plt.subplot(gs.new_subplotspec((0, 0), colspan=3))
ax1 = fig.add_subplot(gs[1,0:2])
#ax2 = fig.add_subplot(gs[1,1])
ax3 = fig.add_subplot(gs[1,2])
dat = epos['m2q']
user_bin_width = 0.03
user_xlim = [0,200]
ax0.set(xlim=user_xlim)
dat = m2q_corr
xs, ys = bin_dat(dat,isBinAligned=True,bin_width=user_bin_width,user_roi=user_xlim)
shaded_plot(ax0,xs,10*(1+ys),1,min_val=10)
dat = epos['m2q']
xs, ys = bin_dat(dat,isBinAligned=True,bin_width=user_bin_width,user_roi=user_xlim)
shaded_plot(ax0,xs,1+ys,0,min_val=1)
ax0.set(xlabel='m/z (Da)', ylabel='counts', xlim=user_xlim)
ax0.set_yscale('log')
ax0.set(ylim=[10,None])
user_bin_width = 0.01
user_xlim = [45,55]
ax1.set(xlim=user_xlim)
dat = m2q_corr
xs, ys = bin_dat(dat,isBinAligned=True,bin_width=user_bin_width,user_roi=user_xlim)
shaded_plot(ax1,xs,10*(1+ys),1,min_val=10)
dat = epos['m2q']
xs, ys = bin_dat(dat,isBinAligned=True,bin_width=user_bin_width,user_roi=user_xlim)
shaded_plot(ax1,xs,1+ys,0,min_val=1)
ax1.set(xlabel='m/z (Da)', ylabel='counts', xlim=user_xlim)
ax1.set_yscale('log')
ax1.set(ylim=[10,None])
#
#
##user_bin_width = 0.01
#user_xlim = [30,34]
#ax2.set(xlim=user_xlim)
#
#
#dat = m2q_corr
#xs, ys = bin_dat(dat,isBinAligned=True,bin_width=user_bin_width,user_roi=user_xlim)
#shaded_plot(ax2,xs,100*(1+ys),1,min_val=100)
#
#
#dat = epos['m2q']
#xs, ys = bin_dat(dat,isBinAligned=True,bin_width=user_bin_width,user_roi=user_xlim)
#shaded_plot(ax2,xs,1+ys,0,min_val=1)
#
#
#ax2.set(xlabel='m/z (Da)', ylabel='counts', xlim=user_xlim)
#ax2.set_yscale('log')
#user_bin_width = 0.01
user_xlim = [168,178]
ax3.set(xlim=user_xlim)
dat = m2q_corr
xs, ys = bin_dat(dat,isBinAligned=True,bin_width=user_bin_width,user_roi=user_xlim)
shaded_plot(ax3,xs,10*(1+ys),1,min_val=10)
dat = epos['m2q']
xs, ys = bin_dat(dat,isBinAligned=True,bin_width=user_bin_width,user_roi=user_xlim)
shaded_plot(ax3,xs,1+ys,0,min_val=1)
ax3.set(xlabel='m/z (Da)', ylabel='counts', xlim=user_xlim)
ax3.set_yscale('log')
ax3.set(ylim=[10,None])
fig.tight_layout()
fig.savefig(r'Q:\users\bwc\APT\scale_corr_paper\Ceria_NUV_corrected_hist.svg', format='svg', dpi=600)
ceria_chi2 = [50100017.77823232, 54953866.6417411 , 56968470.41426052,
57832991.31751654, 58136713.37802257, 58103886.08055325,
57387594.45685758, 56278878.21237884, 52715317.92279702,
48064845.44202947, 42888989.38802697, 34852375.17765743,
30543492.44201695]
ceria_slic = [1.6000e+01, 3.2000e+01, 6.4000e+01, 1.2800e+02, 2.5600e+02,
5.1200e+02, 1.0240e+03, 2.0480e+03, 4.0960e+03, 8.1920e+03,
1.6384e+04, 3.2768e+04, 6.5536e+04]
sio2_slic = [1.6000e+01, 3.2000e+01, 6.4000e+01, 1.2800e+02, 2.5600e+02,
5.1200e+02, 1.0240e+03, 2.0480e+03, 4.0960e+03, 8.1920e+03,
1.6384e+04, 3.2768e+04, 6.5536e+04]
sio2_chi2 = [1.14778821e+08, 1.47490976e+08, 1.52686129e+08, 1.51663402e+08,
1.45270347e+08, 1.34437550e+08, 1.18551040e+08, 1.01481358e+08,
8.62360167e+07, 7.45989701e+07, 6.50088595e+07, 4.22995630e+07,
3.71045091e+07]
fig = plt.figure(num=666)
fig.clear()
ax = fig.gca()
ax.plot(sio2_slic,sio2_chi2/np.max(sio2_chi2),'s-',
markersize=8,label='SiO2')
ax.plot(ceria_slic,ceria_chi2/ | np.max(ceria_chi2) | numpy.max |
import numpy as np
from scipy import (linalg, optimize as op, stats)
def whiten(X, axis=0):
return (X - np.mean(X, axis=axis))/np.std(X, axis=axis)
def generate_data(n_samples, n_features, n_latent_factors, n_components,
omega_scale=1, noise_scale=1, random_seed=0, force=None):
rng = np.random.RandomState(random_seed)
A = stats.special_ortho_group.rvs(n_features, random_state=rng)
A = A[:, :n_latent_factors]
AL = linalg.cholesky(A.T @ A)
A = A @ linalg.solve(AL, np.eye(n_latent_factors))
pvals = np.ones(n_components) / n_components
R = np.argmax(rng.multinomial(1, pvals, size=n_samples), axis=1)
pi = np.array([np.sum(R == i) for i in range(n_components)])/n_samples
xi = rng.randn(n_latent_factors, n_components)
omega = np.zeros((n_latent_factors, n_latent_factors, n_components))
if force is not None:
if "xi" in force:
xi = force["xi"]
print("using forced xi")
if "A" in force:
A = force["A"]
print("using forced A")
for i in range(n_components):
omega[(*np.diag_indices(n_latent_factors), i)] = \
rng.gamma(1, scale=omega_scale, size=n_latent_factors)**2
if force is not None:
if "omega" in force:
omega = force["omega"]
print("using forced omega")
scores = np.empty((n_samples, n_latent_factors))
for i in range(n_components):
match = (R == i)
scores[match] = rng.multivariate_normal(xi.T[i], omega.T[i],
size=sum(match))
psi = rng.gamma(1, scale=noise_scale, size=n_features)
noise = np.sqrt(psi) * rng.randn(n_samples, n_features)
X = scores @ A.T + noise
truth = dict(A=A, pi=pi, xi=xi, omega=omega, psi=psi,
noise=noise, R=R, scores=scores)
return (X, truth)
def old_generate_data(n_samples=20, n_features=5, n_latent_factors=3, n_components=2,
omega_scale=1, noise_scale=1, latent_scale=1, random_seed=0):
rng = np.random.RandomState(random_seed)
#A = rng.randn(n_features, n_latent_factors)
sigma_L = np.abs(rng.normal(0, latent_scale))
choose = lambda x, y: int(np.math.factorial(x) \
/ (np.math.factorial(y) * np.math.factorial(x - y)))
M = n_latent_factors * (n_features - n_latent_factors) \
+ choose(n_latent_factors, 2)
beta_lower_triangular = rng.normal(0, sigma_L, size=M)
beta_diag = np.abs(rng.normal(0, latent_scale, size=n_latent_factors))
A = | np.zeros((n_features, n_latent_factors), dtype=float) | numpy.zeros |
"""
Author: <NAME> (<EMAIL>)
Date: May 07, 2020
"""
from __future__ import print_function
import torch
import torch.nn as nn
import numpy as np
class SupConLoss(nn.Module):
"""Supervised Contrastive Learning: https://arxiv.org/pdf/2004.11362.pdf.
It also supports the unsupervised contrastive loss in SimCLR"""
def __init__(self, temperature=0.07, contrast_mode='all',
base_temperature=0.07):
super(SupConLoss, self).__init__()
self.temperature = temperature
self.contrast_mode = contrast_mode
self.base_temperature = base_temperature
def forward(self, features, labels=None, mask=None):
"""Compute loss for model. If both `labels` and `mask` are None,
it degenerates to SimCLR unsupervised loss:
https://arxiv.org/pdf/2002.05709.pdf
Args:
features: hidden vector of shape [bsz, n_views, ...].
labels: ground truth of shape [bsz].
mask: contrastive mask of shape [bsz, bsz], mask_{i,j}=1 if sample j
has the same class as sample i. Can be asymmetric.
Returns:
A loss scalar.
"""
device = (torch.device('cuda')
if features.is_cuda
else torch.device('cpu'))
if len(features.shape) < 3:
raise ValueError('`features` needs to be [bsz, n_views, ...],'
'at least 3 dimensions are required')
if len(features.shape) > 3:
features = features.view(features.shape[0], features.shape[1], -1)
batch_size = features.shape[0]
if labels is not None and mask is not None:
raise ValueError('Cannot define both `labels` and `mask`')
elif labels is None and mask is None:
mask = torch.eye(batch_size, dtype=torch.float32).to(device)
elif labels is not None:
labels = labels.contiguous().view(-1, 1)
if labels.shape[0] != batch_size:
raise ValueError('Num of labels does not match num of features')
mask = torch.eq(labels, labels.T).float().to(device)
else:
mask = mask.float().to(device)
contrast_count = features.shape[1]
contrast_feature = torch.cat(torch.unbind(features, dim=1), dim=0)
if self.contrast_mode == 'one':
anchor_feature = features[:, 0]
anchor_count = 1
elif self.contrast_mode == 'all':
anchor_feature = contrast_feature
anchor_count = contrast_count
else:
raise ValueError('Unknown mode: {}'.format(self.contrast_mode))
# compute logits
anchor_dot_contrast = torch.div(
torch.matmul(anchor_feature, contrast_feature.T),
self.temperature)
# for numerical stability
logits_max, _ = torch.max(anchor_dot_contrast, dim=1, keepdim=True)
logits = anchor_dot_contrast - logits_max.detach()
# tile mask
mask = mask.repeat(anchor_count, contrast_count)
# mask-out self-contrast cases
logits_mask = torch.scatter(
torch.ones_like(mask),
1,
torch.arange(batch_size * anchor_count).view(-1, 1).to(device),
0
)
mask = mask * logits_mask
# compute log_prob
exp_logits = torch.exp(logits) * logits_mask
log_prob = logits - torch.log(exp_logits.sum(1, keepdim=True))
# compute mean of log-likelihood over positive
mean_log_prob_pos = (mask * log_prob).sum(1) / mask.sum(1)
# loss
loss = - (self.temperature / self.base_temperature) * mean_log_prob_pos
loss = loss.view(anchor_count, batch_size).mean()
return loss
class LabeledContrastiveLoss(nn.Module):
def __init__(self, temp=0.5, eps=1e-3,
pos_in_denom=False, # True
log_first=True, # False
a_lc=1.0, a_spread=0.0,
noise_rate=0.0, K=-1, lc_norm = False, old_lnew = False,
detect_noise=False, correct_noise=False, num_classes=10):
super().__init__()
self.temp = temp
self.eps = eps
self.log_first = log_first
self.a_lc=a_lc
self.a_spread=a_spread
self.pos_in_denom = pos_in_denom
self.noise_rate = noise_rate
p = 1 - noise_rate + noise_rate * (1. / num_classes)
self.p = p
self.num_classes = num_classes
self.lc_norm = lc_norm
self.K = K
self.old_lnew = old_lnew
self.detect_noise = detect_noise
self.correct_noise = correct_noise
def forward(self, x, labels):
# x has shape batch * num views * dimension
# labels has shape batch * num views
labels = labels.unsqueeze(1).repeat(1, 2)
b, nViews, d = x.size()
vs = torch.split(x,1, dim=1) # images indexed by view
ts = torch.split(labels, 1, dim=1) # labels indexed by view
l = 0.
pairs = nViews*(nViews-1)//2
for ii in range(nViews):
vi = vs[ii].squeeze()
ti = ts[ii].squeeze()
ti_np = np.array([int(label) for label in ti])
for jj in range(ii):
vj = vs[jj].squeeze()
tj = ts[jj].squeeze()
if self.log_first:
if self.old_lnew:
# old implementation of L_new
# don't include these in positives
_mask = ti.unsqueeze(0) != tj.unsqueeze(1)
# num[i,j] is f(xi) * f(xj) / tau, for i,j
if self.lc_norm:
num = torch.einsum('b d, c d -> b c', vi, vj).div(self.temp).div(
torch.norm(vi, dim=1) * torch.norm(vj, dim=1)
)
else:
num = torch.einsum('b d, c d -> b c', vi, vj).div(self.temp)
# _mask_denom is True when yi != yj
_mask_denom = (ti.unsqueeze(0) != tj.unsqueeze(1)).float()
if self.noise_rate > 0.:
_mask_denom[ti.unsqueeze(0) == tj.unsqueeze(1)] = self.noise_rate
_mask_denom[torch.eye(ti.shape[0], dtype=bool)] = 0.
# for numerical stability, see log-sum-exp trick
num_max, _ = torch.max(num, dim=1, keepdim=True)
# log_denom[i,j] is log[exp(f(xi) * f(xj) / tau) +
# + sum_{j in _mask_denom[i]} exp(f(xi) * f(xj) / tau)
log_denom = (
# sum_{j in _mask_denom[i]} exp(f(xi) * f(xj) / tau)
(torch.exp(num - num_max) * _mask_denom).sum(-1, keepdim=True) +
# exp(f(xi) * f(xj) / tau)
torch.exp(num - num_max)
).log() + num_max
log_prob = num - log_denom
if self.noise_rate > 0.:
_mask_mult = (ti.unsqueeze(0) == tj.unsqueeze(1)).float() * (1. - self.noise_rate)
_mask_mult[torch.eye(ti.shape[0], dtype=bool)] = 1.
log_prob = log_prob * _mask_mult
_mask_nans_infs = torch.isnan(log_prob) + torch.isinf(log_prob)
a = -log_prob.masked_fill(
_mask,
math.log(self.eps)
).masked_fill(
_mask_nans_infs,
math.log(self.eps)
).sum(-1).div(_mask.sum(-1))
l += a.mean()
else:
# new implementation of L_new/L_out
# don't include these in positives
_mask = ti.unsqueeze(0) != tj.unsqueeze(1)
# num[i,j] is f(xi) * f(xj) / tau, for i,j
if self.lc_norm:
num = torch.einsum('b d, c d -> b c', vi, vj).div(self.temp).div(
torch.norm(vi, dim=1) * torch.norm(vj, dim=1)
)
else:
num = torch.einsum('b d, c d -> b c', vi, vj).div(self.temp)
if self.detect_noise:
# new implementation of L_new
_mask_same_class = ti.unsqueeze(0) == tj.unsqueeze(1)
_mask_same_class[torch.eye(ti.shape[0], dtype=bool)] = False
_mask_diff_class = ti.unsqueeze(0) != tj.unsqueeze(1)
num_norm = torch.einsum('b d, c d -> b c', vi, vj).div(self.temp).div(
torch.norm(vi, dim=1) * torch.norm(vj, dim=1)
)
num_norm_numpy = num_norm.detach().cpu().numpy()
if (min(torch.sum(_mask_same_class, axis=0)) == 0 or
min(torch.sum(_mask_diff_class, axis=0)) == 0):
# crazy edge case where every element in the batch has the same class??
preds_class_correct = np.ones(ti_np.shape[0], dtype=bool)
else:
same_class_sim = np.average(num_norm_numpy, axis=0,
weights=_mask_same_class.detach().cpu().numpy())
diff_class_sim = np.average(num_norm_numpy, axis=0,
weights=_mask_diff_class.detach().cpu().numpy())
difference = same_class_sim - diff_class_sim
preds_class_correct = np.ones(ti_np.shape[0], dtype=bool)
preds_class_correct[np.argsort(difference)[:int((1-self.p) * ti.shape[0])]] = False
if self.correct_noise:
class_sims = []
for cls in range(self.num_classes):
class_sim = np.mean(
num_norm_numpy, axis=1,
where=np.tile((ti_np == cls), (len(ti_np), 1))
)
class_sims.append(class_sim)
class_preds = | np.argmax(class_sims, axis=0) | numpy.argmax |
import os
import numpy as np
from torch.utils.data.sampler import Sampler
import sys
import os.path as osp
import torch
import time
def time_now():
return time.strftime('%y-%m-%d %H:%M:%S', time.localtime())
def load_data(input_data_path ):
with open(input_data_path) as f:
data_file_list = open(input_data_path, 'rt').read().splitlines()
# Get full list of color image and labels
file_image = [s.split(' ')[0] for s in data_file_list]
file_label = [int(s.split(' ')[1]) for s in data_file_list]
return file_image, file_label
def GenIdx( train_color_label, train_ir_label):
color_pos = []
unique_label_color = | np.unique(train_color_label) | numpy.unique |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""A miscellaneous collection of basic functions."""
import sys
import copy
import os
import warnings
from collections.abc import Iterable
from pathlib import Path
import tempfile
from scipy.interpolate import interp1d
from scipy.stats import ncx2
import numpy as np
from numpy import histogram2d as histogram2d_np
from numpy import histogram as histogram_np
from astropy.logger import AstropyUserWarning
from astropy import log
from stingray.stats import pds_probability, pds_detection_level
from stingray.stats import z2_n_detection_level, z2_n_probability
from stingray.stats import fold_detection_level, fold_profile_probability
from stingray.pulse.pulsar import _load_and_prepare_TOAs
try:
import pint.toa as toa
import pint
from pint.models import get_model
HAS_PINT = True
except ImportError:
HAS_PINT = False
try:
from skimage.feature import peak_local_max
HAS_SKIMAGE = True
except ImportError:
HAS_SKIMAGE = False
try:
from tqdm import tqdm as show_progress
except ImportError:
def show_progress(a):
return a
from . import (
prange,
array_take,
HAS_NUMBA,
njit,
vectorize,
float32,
float64,
int32,
int64,
)
__all__ = [
"array_take",
"njit",
"prange",
"show_progress",
"z2_n_detection_level",
"z2_n_probability",
"pds_detection_level",
"pds_probability",
"fold_detection_level",
"fold_profile_probability",
"r_in",
"r_det",
"_assign_value_if_none",
"_look_for_array_in_array",
"is_string",
"_order_list_of_arrays",
"mkdir_p",
"common_name",
"hen_root",
"optimal_bin_time",
"gti_len",
"deorbit_events",
"_add_default_args",
"check_negative_numbers_in_args",
"interpret_bintime",
"get_bin_edges",
"compute_bin",
"hist1d_numba_seq",
"hist2d_numba_seq",
"hist3d_numba_seq",
"hist2d_numba_seq_weight",
"hist3d_numba_seq_weight",
"index_arr",
"index_set_arr",
"histnd_numba_seq",
"histogram2d",
"histogram",
"touch",
"log_x",
"get_list_of_small_powers",
"adjust_dt_for_power_of_two",
"adjust_dt_for_small_power",
"memmapped_arange",
"nchars_in_int_value",
]
DEFAULT_PARSER_ARGS = {}
DEFAULT_PARSER_ARGS["loglevel"] = dict(
args=["--loglevel"],
kwargs=dict(
help=(
"use given logging level (one between INFO, "
"WARNING, ERROR, CRITICAL, DEBUG; "
"default:WARNING)"
),
default="WARNING",
type=str,
),
)
DEFAULT_PARSER_ARGS["nproc"] = dict(
args=["--nproc"],
kwargs=dict(help=("Number of processors to use"), default=1, type=int),
)
DEFAULT_PARSER_ARGS["debug"] = dict(
args=["--debug"],
kwargs=dict(
help=("set DEBUG logging level"), default=False, action="store_true"
),
)
DEFAULT_PARSER_ARGS["bintime"] = dict(
args=["-b", "--bintime"],
kwargs=dict(help="Bin time", type=np.longdouble, default=1),
)
DEFAULT_PARSER_ARGS["energies"] = dict(
args=["-e", "--energy-interval"],
kwargs=dict(
help="Energy interval used for filtering",
nargs=2,
type=float,
default=None,
),
)
DEFAULT_PARSER_ARGS["pi"] = dict(
args=["--pi-interval"],
kwargs=dict(
help="PI interval used for filtering",
nargs=2,
type=int,
default=[-1, -1],
),
)
DEFAULT_PARSER_ARGS["deorbit"] = dict(
args=["-p", "--deorbit-par"],
kwargs=dict(
help=(
"Deorbit data with this parameter file (requires PINT installed)"
),
default=None,
type=str,
),
)
DEFAULT_PARSER_ARGS["output"] = dict(
args=["-o", "--outfile"],
kwargs=dict(help="Output file", default=None, type=str),
)
DEFAULT_PARSER_ARGS["usepi"] = dict(
args=["--use-pi"],
kwargs=dict(
help="Use the PI channel instead of energies",
default=False,
action="store_true",
),
)
DEFAULT_PARSER_ARGS["test"] = dict(
args=["--test"],
kwargs=dict(
help="Only used for tests", default=False, action="store_true"
),
)
DEFAULT_PARSER_ARGS["pepoch"] = dict(
args=["--pepoch"],
kwargs=dict(
type=float,
required=False,
help="Reference epoch for timing parameters (MJD)",
default=None,
),
)
def r_in(td, r_0):
"""Calculate incident countrate given dead time and detected countrate."""
tau = 1 / r_0
return 1.0 / (tau - td)
def r_det(td, r_i):
"""Calculate detected countrate given dead time and incident countrate."""
tau = 1 / r_i
return 1.0 / (tau + td)
def _assign_value_if_none(value, default):
if value is None:
return default
return value
def _look_for_array_in_array(array1, array2):
"""
Examples
--------
>>> _look_for_array_in_array([1, 2], [2, 3, 4])
2
>>> _look_for_array_in_array([1, 2], [3, 4, 5]) is None
True
"""
for a1 in array1:
if a1 in array2:
return a1
return None
def is_string(s):
"""Portable function to answer this question."""
return isinstance(s, str) # NOQA
def _order_list_of_arrays(data, order):
"""
Examples
--------
>>> order = [1, 2, 0]
>>> new = _order_list_of_arrays({'a': [4, 5, 6], 'b':[7, 8, 9]}, order)
>>> np.all(new['a'] == [5, 6, 4])
True
>>> np.all(new['b'] == [8, 9, 7])
True
>>> new = _order_list_of_arrays([[4, 5, 6], [7, 8, 9]], order)
>>> np.all(new[0] == [5, 6, 4])
True
>>> np.all(new[1] == [8, 9, 7])
True
>>> _order_list_of_arrays(2, order) is None
True
"""
if hasattr(data, "items"):
data = dict((i[0], np.asarray(i[1])[order]) for i in data.items())
elif hasattr(data, "index"):
data = [np.asarray(i)[order] for i in data]
else:
data = None
return data
class _empty:
def __init__(self):
pass
def mkdir_p(path):
"""Safe mkdir function."""
return os.makedirs(path, exist_ok=True)
def common_name(str1, str2, default="common"):
"""Strip two strings of the letters not in common.
Filenames must be of same length and only differ by a few letters.
Parameters
----------
str1 : str
str2 : str
Returns
-------
common_str : str
A string containing the parts of the two names in common
Other Parameters
----------------
default : str
The string to return if common_str is empty
Examples
--------
>>> common_name('strAfpma', 'strBfpmb')
'strfpm'
>>> common_name('strAfpma', 'strBfpmba')
'common'
>>> common_name('asdfg', 'qwerr')
'common'
>>> common_name('A_3-50_A.nc', 'B_3-50_B.nc')
'3-50'
"""
if not len(str1) == len(str2):
return default
common_str = ""
# Extract the HEN root of the name (in case they're event files)
str1 = hen_root(str1)
str2 = hen_root(str2)
for i, letter in enumerate(str1):
if str2[i] == letter:
common_str += letter
# Remove leading and trailing underscores and dashes
common_str = common_str.rstrip("_").rstrip("-")
common_str = common_str.lstrip("_").lstrip("-")
if common_str == "":
common_str = default
# log.debug('common_name: %s %s -> %s', str1, str2, common_str)
return common_str
def hen_root(filename):
"""Return the root file name (without _ev, _lc, etc.).
Parameters
----------
filename : str
Examples
--------
>>> fname = "blabla_ev_calib.nc"
>>> hen_root(fname)
'blabla'
>>> fname = "blablu_ev_bli.fits.gz"
>>> hen_root(fname)
'blablu_ev_bli'
>>> fname = "blablu_ev_lc.nc"
>>> hen_root(fname)
'blablu'
>>> fname = "blablu_lc_asrd_ev_lc.nc"
>>> hen_root(fname)
'blablu_lc_asrd'
"""
fname = filename.replace(".gz", "")
fname = os.path.splitext(fname)[0]
todo = True
while todo:
todo = False
for ending in ["_ev", "_lc", "_pds", "_cpds", "_calib"]:
if fname.endswith(ending):
fname = fname[: -len(ending)]
todo = True
return fname
def optimal_bin_time(fftlen, tbin):
"""Vary slightly the bin time to have a power of two number of bins.
Given an FFT length and a proposed bin time, return a bin time
slightly shorter than the original, that will produce a power-of-two number
of FFT bins.
Examples
--------
>>> optimal_bin_time(512, 1.1)
1.0
"""
current_nbin = fftlen / tbin
new_nbin = 2 ** np.ceil(np.log2(current_nbin))
return fftlen / new_nbin
def gti_len(gti):
"""Return the total good time from a list of GTIs.
Examples
--------
>>> gti_len([[0, 1], [2, 4]])
3
"""
return np.sum(np.diff(gti, axis=1))
def simple_orbit_fun_from_parfile(
mjdstart, mjdstop, parfile, ntimes=1000, ephem="DE421", invert=False
):
"""Get a correction for orbital motion from pulsar parameter file.
Parameters
----------
mjdstart, mjdstop : float
Start and end of the time interval where we want the orbital solution
parfile : str
Any parameter file understood by PINT (Tempo or Tempo2 format)
Other parameters
----------------
ntimes : int
Number of time intervals to use for interpolation. Default 1000
invert : bool
Invert the solution (e.g. to apply an orbital model instead of
subtracting it)
Returns
-------
correction_mjd : function
Function that accepts times in MJDs and returns the deorbited times.
"""
from scipy.interpolate import interp1d
from astropy import units
if not HAS_PINT:
raise ImportError(
"You need the optional dependency PINT to use this "
"functionality: github.com/nanograv/pint"
)
mjds = | np.linspace(mjdstart, mjdstop, ntimes) | numpy.linspace |
from utils import *
from mpmath import ellipe, ellipk, ellippi
from scipy.integrate import quad
import numpy as np
C1 = 3.0 / 14.0
C2 = 1.0 / 3.0
C3 = 3.0 / 22.0
C4 = 3.0 / 26.0
def J(N, k2, kappa, gradient=False):
# We'll need to solve this with gaussian quadrature
func = (
lambda x: np.sin(x) ** (2 * N) * (np.maximum(0, 1 - np.sin(x) ** 2 / k2)) ** 1.5
)
res = 0.0
for i in range(0, len(kappa), 2):
res += quad(
func, 0.5 * kappa[i], 0.5 * kappa[i + 1], epsabs=1e-12, epsrel=1e-12,
)[0]
if gradient:
# Deriv w/ respect to kappa is analytic
dJdkappa = (
0.5
* (
np.sin(0.5 * kappa) ** (2 * N)
* (np.maximum(0, 1 - np.sin(0.5 * kappa) ** 2 / k2)) ** 1.5
)
* np.repeat([-1, 1], len(kappa) // 2).reshape(1, -1)
)
# Deriv w/ respect to k2 is tricky, need to integrate
func = (
lambda x: (1.5 / k2 ** 2)
* np.sin(x) ** (2 * N + 2)
* (np.maximum(0, 1 - np.sin(x) ** 2 / k2)) ** 0.5
)
dJdk2 = 0.0
for i in range(0, len(kappa), 2):
dJdk2 += quad(
func, 0.5 * kappa[i], 0.5 * kappa[i + 1], epsabs=1e-12, epsrel=1e-12,
)[0]
return res, (dJdk2, dJdkappa)
else:
return res
def pal(bo, ro, kappa, gradient=False):
# TODO
if len(kappa) != 2:
raise NotImplementedError("TODO!")
def func(phi):
c = np.cos(phi)
z = np.minimum(
1 - 1e-12, np.maximum(1e-12, 1 - ro ** 2 - bo ** 2 - 2 * bo * ro * c)
)
return (1.0 - z ** 1.5) / (1.0 - z) * (ro + bo * c) * ro / 3.0
res, _ = quad(func, kappa[0] - np.pi, kappa[1] - np.pi, epsabs=1e-12, epsrel=1e-12,)
if gradient:
# Deriv w/ respect to kappa is analytic
dpaldkappa = func(kappa - np.pi) * np.repeat([-1, 1], len(kappa) // 2).reshape(
1, -1
)
# Derivs w/ respect to b and r are tricky, need to integrate
def func_bo(phi):
c = np.cos(phi)
z = np.maximum(1e-12, 1 - ro ** 2 - bo ** 2 - 2 * bo * ro * c)
P = (1.0 - z ** 1.5) / (1.0 - z) * (ro + bo * c) * ro / 3.0
q = 3.0 * z ** 0.5 / (1.0 - z ** 1.5) - 2.0 / (1.0 - z)
return P * ((bo + ro * c) * q + 1.0 / (bo + ro / c))
dpaldbo, _ = quad(
func_bo, kappa[0] - np.pi, kappa[1] - np.pi, epsabs=1e-12, epsrel=1e-12,
)
def func_ro(phi):
c = np.cos(phi)
z = np.maximum(1e-12, 1 - ro ** 2 - bo ** 2 - 2 * bo * ro * c)
P = (1.0 - z ** 1.5) / (1.0 - z) * (ro + bo * c) * ro / 3.0
q = 3.0 * z ** 0.5 / (1.0 - z ** 1.5) - 2.0 / (1.0 - z)
return P * ((ro + bo * c) * q + 1.0 / ro + 1.0 / (ro + bo * c))
dpaldro, _ = quad(
func_ro, kappa[0] - np.pi, kappa[1] - np.pi, epsabs=1e-12, epsrel=1e-12,
)
return res, (dpaldbo, dpaldro, dpaldkappa)
else:
return res
def hyp2f1(a, b, c, z, gradient=False):
term = a * b * z / c
value = 1.0 + term
n = 1
while (np.abs(term) > STARRY_2F1_TOL) and (n < STARRY_2F1_MAXITER):
a += 1
b += 1
c += 1
n += 1
term *= a * b * z / c / n
value += term
if n == STARRY_2F1_MAXITER:
raise ValueError("Series for 2F1 did not converge.")
if gradient:
dFdz = a * b / c * hyp2f1(a + 1, b + 1, c + 1, z)
return value, dFdz
else:
return value
def el2(x, kc, a, b):
"""
Vectorized implementation of the `el2` function from
Bulirsch (1965). In this case, `x` is a *vector* of integration
limits. The halting condition does not depend on the value of `x`,
so it's much faster to evaluate all values of `x` at once!
"""
if kc == 0:
raise ValueError("Elliptic integral diverged because k = 1.")
c = x * x
d = 1 + c
p = np.sqrt((1 + kc * kc * c) / d)
d = x / d
c = d / (2 * p)
z = a - b
i = a
a = (b + a) / 2
y = np.abs(1 / x)
f = 0
l = np.zeros_like(x)
m = 1
kc = np.abs(kc)
for n in range(STARRY_EL2_MAX_ITER):
b = i * kc + b
e = m * kc
g = e / p
d = f * g + d
f = c
i = a
p = g + p
c = (d / p + c) / 2
g = m
m = kc + m
a = (b / m + a) / 2
y = -e / y + y
y[y == 0] = np.sqrt(e) * c[y == 0] * b
if np.abs(g - kc) > STARRY_EL2_CA * g:
kc = np.sqrt(e) * 2
l = l * 2
l[y < 0] = 1 + l[y < 0]
else:
break
if n == STARRY_EL2_MAX_ITER - 1:
raise ValueError(
"Elliptic integral EL2 failed to converge after {} iterations.".format(
STARRY_EL2_MAX_ITER
)
)
l[y < 0] = 1 + l[y < 0]
e = (np.arctan(m / y) + np.pi * l) * a / m
e[x < 0] = -e[x < 0]
return e + c * z
def EllipF(tanphi, k2, gradient=False):
kc2 = 1 - k2
F = el2(tanphi, np.sqrt(kc2), 1, 1)
if gradient:
E = EllipE(tanphi, k2)
p2 = (1 + tanphi ** 2) ** -1
q2 = p2 * tanphi ** 2
dFdtanphi = p2 * (1 - k2 * q2) ** -0.5
dFdk2 = 0.5 * (E / (k2 * kc2) - F / k2 - tanphi * dFdtanphi / kc2)
return F, (dFdtanphi, dFdk2)
else:
return F
def EllipE(tanphi, k2, gradient=False):
kc2 = 1 - k2
E = el2(tanphi, np.sqrt(kc2), 1, kc2)
if gradient:
F = EllipF(tanphi, k2)
p2 = (1 + tanphi ** 2) ** -1
q2 = p2 * tanphi ** 2
dEdtanphi = p2 * (1 - k2 * q2) ** 0.5
dEdk2 = 0.5 * (E - F) / k2
return E, (dEdtanphi, dEdk2)
else:
return E
def rj(x, y, z, p):
"""
Carlson elliptic integral RJ.
<NAME>,
Computing Elliptic Integrals by Duplication,
Numerische Mathematik,
Volume 33, 1979, pages 1-16.
<NAME>, <NAME>,
Algorithm 577, Algorithms for Incomplete Elliptic Integrals,
ACM Transactions on Mathematical Software,
Volume 7, Number 3, pages 398-403, September 1981
https://people.sc.fsu.edu/~jburkardt/f77_src/toms577/toms577.f
"""
# Limit checks
if x < STARRY_CRJ_LO_LIM:
x = STARRY_CRJ_LO_LIM
elif x > STARRY_CRJ_HI_LIM:
x = STARRY_CRJ_HI_LIM
if y < STARRY_CRJ_LO_LIM:
y = STARRY_CRJ_LO_LIM
elif y > STARRY_CRJ_HI_LIM:
y = STARRY_CRJ_HI_LIM
if z < STARRY_CRJ_LO_LIM:
z = STARRY_CRJ_LO_LIM
elif z > STARRY_CRJ_HI_LIM:
z = STARRY_CRJ_HI_LIM
if p < STARRY_CRJ_LO_LIM:
p = STARRY_CRJ_LO_LIM
elif p > STARRY_CRJ_HI_LIM:
p = STARRY_CRJ_HI_LIM
xn = x
yn = y
zn = z
pn = p
sigma = 0.0
power4 = 1.0
for k in range(STARRY_CRJ_MAX_ITER):
mu = 0.2 * (xn + yn + zn + pn + pn)
invmu = 1.0 / mu
xndev = (mu - xn) * invmu
yndev = (mu - yn) * invmu
zndev = (mu - zn) * invmu
pndev = (mu - pn) * invmu
eps = np.max([np.abs(xndev), np.abs(yndev), np.abs(zndev), np.abs(pndev)])
if eps < STARRY_CRJ_TOL:
ea = xndev * (yndev + zndev) + yndev * zndev
eb = xndev * yndev * zndev
ec = pndev * pndev
e2 = ea - 3.0 * ec
e3 = eb + 2.0 * pndev * (ea - ec)
s1 = 1.0 + e2 * (-C1 + 0.75 * C3 * e2 - 1.5 * C4 * e3)
s2 = eb * (0.5 * C2 + pndev * (-C3 - C3 + pndev * C4))
s3 = pndev * ea * (C2 - pndev * C3) - C2 * pndev * ec
value = 3.0 * sigma + power4 * (s1 + s2 + s3) / (mu * | np.sqrt(mu) | numpy.sqrt |
################################################################################
#
# Copyright (c) 2017 University of Oxford
# Authors:
# <NAME> (<EMAIL>)
#
# This work is licensed under the Creative Commons
# Attribution-NonCommercial-ShareAlike 4.0 International License.
# To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc-sa/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
#
################################################################################
import os
import re
import numpy as np
from transform import build_se3_transform
from interpolate_poses import interpolate_vo_poses, interpolate_ins_poses
from velodyne import load_velodyne_raw, load_velodyne_binary, velodyne_raw_to_pointcloud
def build_pointcloud(lidar_dir, poses_file, extrinsics_dir, start_time, end_time, origin_time=-1):
"""Builds a pointcloud by combining multiple LIDAR scans with odometry information.
Args:
lidar_dir (str): Directory containing LIDAR scans.
poses_file (str): Path to a file containing pose information. Can be VO or INS data.
extrinsics_dir (str): Directory containing extrinsic calibrations.
start_time (int): UNIX timestamp of the start of the window over which to build the pointcloud.
end_time (int): UNIX timestamp of the end of the window over which to build the pointcloud.
origin_time (int): UNIX timestamp of origin frame. Pointcloud coordinates are relative to this frame.
Returns:
numpy.ndarray: 3xn array of (x, y, z) coordinates of pointcloud
numpy.array: array of n reflectance values or None if no reflectance values are recorded (LDMRS)
Raises:
ValueError: if specified window doesn't contain any laser scans.
IOError: if scan files are not found.
"""
if origin_time < 0:
origin_time = start_time
lidar = re.search('(lms_front|lms_rear|ldmrs|velodyne_left|velodyne_right)', lidar_dir).group(0)
timestamps_path = os.path.join(lidar_dir, os.pardir, lidar + '.timestamps')
timestamps = []
with open(timestamps_path) as timestamps_file:
for line in timestamps_file:
timestamp = int(line.split(' ')[0])
if start_time <= timestamp <= end_time:
timestamps.append(timestamp)
if len(timestamps) == 0:
raise ValueError("No LIDAR data in the given time bracket.")
with open(os.path.join(extrinsics_dir, lidar + '.txt')) as extrinsics_file:
extrinsics = next(extrinsics_file)
G_posesource_laser = build_se3_transform([float(x) for x in extrinsics.split(' ')])
poses_type = re.search('(vo|ins|rtk)\.csv', poses_file).group(1)
if poses_type in ['ins', 'rtk']:
with open(os.path.join(extrinsics_dir, 'ins.txt')) as extrinsics_file:
extrinsics = next(extrinsics_file)
G_posesource_laser = np.linalg.solve(build_se3_transform([float(x) for x in extrinsics.split(' ')]),
G_posesource_laser)
poses = interpolate_ins_poses(poses_file, timestamps, origin_time, use_rtk=(poses_type == 'rtk'))
else:
# sensor is VO, which is located at the main vehicle frame
poses = interpolate_vo_poses(poses_file, timestamps, origin_time)
pointcloud = np.array([[0], [0], [0], [0]])
if lidar == 'ldmrs':
reflectance = None
else:
reflectance = np.empty((0))
for i in range(0, len(poses)):
scan_path = os.path.join(lidar_dir, str(timestamps[i]) + '.bin')
if "velodyne" not in lidar:
if not os.path.isfile(scan_path):
continue
scan_file = open(scan_path)
scan = np.fromfile(scan_file, np.double)
scan_file.close()
scan = scan.reshape((len(scan) // 3, 3)).transpose()
if lidar != 'ldmrs':
# LMS scans are tuples of (x, y, reflectance)
reflectance = np.concatenate((reflectance, np.ravel(scan[2, :])))
scan[2, :] = np.zeros((1, scan.shape[1]))
else:
if os.path.isfile(scan_path):
ptcld = load_velodyne_binary(scan_path)
else:
scan_path = os.path.join(lidar_dir, str(timestamps[i]) + '.png')
if not os.path.isfile(scan_path):
continue
ranges, intensities, angles, approximate_timestamps = load_velodyne_raw(scan_path)
ptcld = velodyne_raw_to_pointcloud(ranges, intensities, angles)
reflectance = np.concatenate((reflectance, ptcld[3]))
scan = ptcld[:3]
scan = np.dot(np.dot(poses[i], G_posesource_laser), np.vstack([scan, np.ones((1, scan.shape[1]))]))
pointcloud = np.hstack([pointcloud, scan])
pointcloud = pointcloud[:, 1:]
if pointcloud.shape[1] == 0:
raise IOError("Could not find scan files for given time range in directory " + lidar_dir)
return pointcloud, reflectance
if __name__ == "__main__":
import argparse
import open3d
parser = argparse.ArgumentParser(description='Build and display a pointcloud')
parser.add_argument('--poses_file', type=str, default=None, help='File containing relative or absolute poses')
parser.add_argument('--extrinsics_dir', type=str, default=None,
help='Directory containing extrinsic calibrations')
parser.add_argument('--laser_dir', type=str, default=None, help='Directory containing LIDAR data')
args = parser.parse_args()
lidar = re.search('(lms_front|lms_rear|ldmrs|velodyne_left|velodyne_right)', args.laser_dir).group(0)
timestamps_path = os.path.join(args.laser_dir, os.pardir, lidar + '.timestamps')
with open(timestamps_path) as timestamps_file:
start_time = int(next(timestamps_file).split(' ')[0])
end_time = start_time + 2e7
pointcloud, reflectance = build_pointcloud(args.laser_dir, args.poses_file,
args.extrinsics_dir, start_time, end_time)
if reflectance is not None:
colours = (reflectance - reflectance.min()) / (reflectance.max() - reflectance.min())
colours = 1 / (1 + np.exp(-10 * (colours - colours.mean())))
else:
colours = 'gray'
# Pointcloud Visualisation using Open3D
vis = open3d.Visualizer()
vis.create_window(window_name=os.path.basename(__file__))
render_option = vis.get_render_option()
render_option.background_color = np.array([0.1529, 0.1569, 0.1333], np.float32)
render_option.point_color_option = open3d.PointColorOption.ZCoordinate
coordinate_frame = open3d.geometry.create_mesh_coordinate_frame()
vis.add_geometry(coordinate_frame)
pcd = open3d.geometry.PointCloud()
pcd.points = open3d.utility.Vector3dVector(
-np.ascontiguousarray(pointcloud[[1, 0, 2]].transpose().astype(np.float64)))
pcd.colors = open3d.utility.Vector3dVector( | np.tile(colours[:, np.newaxis], (1, 3)) | numpy.tile |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.