prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
# KCCA Unit Tests
from mvlearn.embed.kcca import KCCA, _zscore, _rowcorr, _listdot, _listcorr, _make_kernel
import numpy as np
import pytest
# Initialize number of samples
nSamples = 1000
np.random.seed(30)
# Define two latent variables (number of samples x 1)
latvar1 = np.random.randn(nSamples,)
latvar2 = np.random.randn(nSamples,)
# Define independent components for each dataset (number of observations x dataset dimensions)
indep1 = np.random.randn(nSamples, 4)
indep2 = np.random.randn(nSamples, 5)
# Create two datasets, with each dimension composed as a sum of 75% one of the latent variables and 25% independent component
data1 = 0.25*indep1 + 0.75*np.vstack((latvar1, latvar2, latvar1, latvar2)).T
data2 = 0.25*indep2 + 0.75*np.vstack((latvar1, latvar2, latvar1, latvar2, latvar1)).T
# Split each dataset into a training set and test set (10% of dataset is training data)
train1 = data1[:int(nSamples/10)]
train2 = data2[:int(nSamples/10)]
test1 = data1[int(nSamples/10):]
test2 = data2[int(nSamples/10):]
n_components = 4
# Initialize a linear kCCA class
kcca_l = KCCA(ktype ="linear", reg = 0.001, n_components = n_components)
# Use the methods to find a kCCA mapping and transform the views of data
kcca_ft = kcca_l.fit_transform([train1, train2])
kcca_f = kcca_l.fit([train1, train2])
kcca_t = kcca_l.transform([train1, train2])
# Test that cancorrs_ is equal to n_components
def test_numCC_cancorrs_():
assert len(kcca_ft.cancorrs_) == n_components
# Test that number of views is equal to number of ws_
def test_numCC_ws_():
assert len(kcca_ft.weights_) == 2
# Test that number of views is equal to number of comps_
def test_numCC_comps_():
assert len(kcca_ft.components_) == 2
#Test that validate() runs
def test_validate():
accuracy = kcca_ft.validate([test1, test2])
assert (len(accuracy[0]) == 4 and len(accuracy[1]) == 5)
# Test that weights from fit equals fit.transform weights
def test_ktype_weights():
assert kcca_t.weights_ == kcca_f
# Test that components from transform equals fit.transform weights
def test_ktype_components():
assert np.allclose(kcca_ft.components_, kcca_t.components_)
# Test that gaussian kernel runs
def test_ktype_gaussian():
kgauss = KCCA(ktype = 'gaussian', reg = 0.0001, n_components = 2, sigma=2.0)
kgauss.fit_transform([train1, train2])
assert len(kgauss.components_) == 2
# Test that polynomial kernel runs
def test_ktype_polynomial():
kpoly = KCCA(ktype = 'poly', reg = 0.0001, n_components = 2, degree=3)
kpoly.fit_transform([train1, train2])
assert len(kpoly.components_) == 2
### Testing helper functions
np.random.seed(30)
a = np.random.uniform(0,1,(10,10))
b = | np.random.uniform(0,1,(10,10)) | numpy.random.uniform |
#
# Solvers
#
import liionpack as lp
from liionpack.solver_utils import _create_casadi_objects as cco
from liionpack.solver_utils import _serial_step as ss
from liionpack.solver_utils import _mapped_step as ms
from liionpack.solver_utils import _serial_eval as se
from liionpack.solver_utils import _mapped_eval as me
import ray
import numpy as np
import time as ticker
from dask.distributed import Client
from tqdm import tqdm
import pybamm
class generic_actor:
def __init__(self):
pass
def setup(
self,
Nspm,
sim_func,
parameter_values,
dt,
inputs,
variable_names,
initial_soc,
nproc,
):
# Casadi specific arguments
if nproc > 1:
mapped = True
else:
mapped = False
self.Nspm = Nspm
# Set up simulation
self.parameter_values = parameter_values
if initial_soc is not None:
if (
(type(initial_soc) in [float, int])
or (type(initial_soc) is list and len(initial_soc) == 1)
or (type(initial_soc) is np.ndarray and len(initial_soc) == 1)
):
_, _ = lp.update_init_conc(parameter_values, initial_soc, update=True)
else:
lp.logger.warning(
"Using a list or an array of initial_soc "
+ "is not supported, please set the initial "
+ "concentrations via inputs"
)
if sim_func is None:
self.simulation = lp.basic_simulation(self.parameter_values)
else:
self.simulation = sim_func(parameter_values)
# Set up integrator
casadi_objs = cco(
inputs, self.simulation, dt, Nspm, nproc, variable_names, mapped
)
self.model = self.simulation.built_model
self.integrator = casadi_objs["integrator"]
self.variables_fn = casadi_objs["variables_fn"]
self.t_eval = casadi_objs["t_eval"]
self.event_names = casadi_objs["event_names"]
self.events_fn = casadi_objs["events_fn"]
self.step_solutions = casadi_objs["initial_solutions"]
self.last_events = None
self.event_change = None
if mapped:
self.step_fn = ms
self.eval_fn = me
else:
self.step_fn = ss
self.eval_fn = se
def step(self, inputs):
# Solver Step
self.step_solutions, self.var_eval, self.events_eval = self.step_fn(
self.simulation.built_model,
self.step_solutions,
inputs,
self.integrator,
self.variables_fn,
self.t_eval,
self.events_fn,
)
return self.check_events()
def evaluate(self, inputs):
self.var_eval = self.eval_fn(
self.simulation.built_model,
self.step_solutions,
inputs,
self.variables_fn,
self.t_eval,
)
def check_events(self):
if self.last_events is not None:
# Compare changes
new_sign = np.sign(self.events_eval)
old_sign = np.sign(self.last_events)
self.event_change = (old_sign * new_sign) < 0
self.last_events = self.events_eval
return np.any(self.event_change)
else:
self.last_events = self.events_eval
return False
def get_event_change(self):
return self.event_change
def get_event_names(self):
return self.event_names
def output(self):
return self.var_eval
@ray.remote(num_cpus=1)
class ray_actor(generic_actor):
def __init__(self, **kwargs):
super().__init__(**kwargs)
class generic_manager:
def __init__(
self,
):
pass
def solve(
self,
netlist,
sim_func,
parameter_values,
experiment,
inputs,
output_variables,
initial_soc,
nproc,
):
self.netlist = netlist
self.sim_func = sim_func
self.parameter_values = parameter_values
self.check_current_function()
# Get netlist indices for resistors, voltage sources, current sources
Ri_map = netlist["desc"].str.find("Ri") > -1
V_map = netlist["desc"].str.find("V") > -1
I_map = netlist["desc"].str.find("I") > -1
Terminal_Node = np.array(netlist[I_map].node1)
Nspm = np.sum(V_map)
self.split_models(Nspm, nproc)
# Generate the protocol from the supplied experiment
protocol = lp.generate_protocol_from_experiment(experiment, flatten=True)
self.dt = experiment.period
Nsteps = len(protocol)
netlist.loc[I_map, ("value")] = protocol[0]
# Solve the circuit to initialise the electrochemical models
V_node, I_batt = lp.solve_circuit_vectorized(netlist)
# The simulation output variables calculated at each step for each battery
# Must be a 0D variable i.e. battery wide volume average - or X-averaged for
# 1D model
self.variable_names = [
"Terminal voltage [V]",
"Measured battery open circuit voltage [V]",
]
if output_variables is not None:
for out in output_variables:
if out not in self.variable_names:
self.variable_names.append(out)
# variable_names = variable_names + output_variables
Nvar = len(self.variable_names)
# Storage variables for simulation data
self.shm_i_app = np.zeros([Nsteps, Nspm], dtype=float)
self.shm_Ri = | np.zeros([Nsteps, Nspm], dtype=float) | numpy.zeros |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from utils.dict2namedtuple import convert
from ..multiagentenv import MultiAgentEnv
from .custom_scenarios import custom_scenario_registry
import atexit
from operator import attrgetter
from copy import deepcopy
import numpy as np
import enum
import math
from absl import logging
import torch as th
from pysc2 import maps
from pysc2 import run_configs
from pysc2.lib import protocol
from pysc2.lib.units import Neutral, Protoss, Terran, Zerg
from pysc2.lib.units import get_unit_type
from s2clientprotocol import common_pb2 as sc_common
from s2clientprotocol import sc2api_pb2 as sc_pb
from s2clientprotocol import raw_pb2 as r_pb
from s2clientprotocol import debug_pb2 as d_pb
from .map_params import get_map_params, map_present
import sys
import os
difficulties = {
"1": sc_pb.VeryEasy,
"2": sc_pb.Easy,
"3": sc_pb.Medium,
"4": sc_pb.MediumHard,
"5": sc_pb.Hard,
"6": sc_pb.Harder,
"7": sc_pb.VeryHard,
"8": sc_pb.CheatVision,
"9": sc_pb.CheatMoney,
"A": sc_pb.CheatInsane,
}
actions = {
"move": 16, # target: PointOrUnit
"attack": 23, # target: PointOrUnit
"stop": 4, # target: None
"heal": 386, # Unit
}
races = {
"R": sc_common.Random,
"P": sc_common.Protoss,
"T": sc_common.Terran,
"Z": sc_common.Zerg,
}
def get_unit_type_by_name(name):
for race in (Neutral, Protoss, Terran, Zerg):
unit = getattr(race, name, None)
if unit is not None:
return unit
# raise ValueError(f"Bad unit type {name}") # this gives a syntax error for some reason
class Direction(enum.IntEnum):
NORTH = 0
SOUTH = 1
EAST = 2
WEST = 3
class StarCraft2CustomEnv(MultiAgentEnv):
"""The StarCraft II environment for decentralised multi-agent
micromanagement scenarios.
"""
def __init__(self, **kwargs):
args = kwargs
if isinstance(args, dict):
args = convert(args)
self.obs_timestep_number = False
self.state_timestep_number = False
# Read arguments
self.map_name = args.map_name
assert map_present(self.map_name), \
"map {} not in map registry! please add.".format(self.map_name)
map_params = convert(get_map_params(self.map_name))
self.n_agents = map_params.n_agents
self.n_enemies = map_params.n_enemies
self.episode_limit = map_params.limit
self._move_amount = args.move_amount
self._step_mul = args.step_mul
self.difficulty = args.difficulty
# Observations and state
self.obs_own_health = args.obs_own_health
self.obs_all_health = args.obs_all_health
self.obs_instead_of_state = args.obs_instead_of_state
self.obs_last_action = args.obs_last_action
self.obs_pathing_grid = args.obs_pathing_grid
self.obs_terrain_height = args.obs_terrain_height
self.state_last_action = args.state_last_action
if self.obs_all_health:
self.obs_own_health = True
self.n_obs_pathing = 8
self.n_obs_height = 9
# Rewards args
self.reward_sparse = args.reward_sparse
self.reward_only_positive = args.reward_only_positive
self.reward_negative_scale = args.reward_negative_scale
self.reward_death_value = args.reward_death_value
self.reward_win = args.reward_win
self.reward_defeat = args.reward_defeat
self.reward_scale = args.reward_scale
self.reward_scale_rate = args.reward_scale_rate
# Other
self.continuing_episode = args.continuing_episode
self.seed = args.seed
self.heuristic = args.heuristic_ai
self.window_size = (2560, 1600)
self.save_replay_prefix = args.replay_prefix
self.restrict_actions = True # args.restrict_actions
# For sanity check
self.debug_inputs = False
self.debug_rewards = False
# Actions
self.n_actions_no_attack = 6
self.n_actions_move = 4
self.action_representation = getattr(args, "action_representation", "original")
# Configuration related to obs featurisation
self.obs_id_encoding = getattr(args, "obs_id_encoding", "original") # one of: original, metamix
self.obs_decoder = getattr(args, "obs_decoder", None) # None: flatten output!
self.obs_decode_on_the_fly = getattr(args, "obs_decode_on_the_fly", True)
self.obs_grid_shape = list(map(int, getattr(args, "obs_grid_shape", "1x1").split("x"))) # (width, height)
self.obs_resolve_multiple_occupancy = getattr(args, "obs_resolve_multiple_occupancy", False)
self.obs_grid_rasterise = getattr(args, "obs_grid_rasterise", False)
self.obs_grid_rasterise_debug = getattr(args, "obs_grid_rasterise_debug", False)
self.obs_resolve_multiple_occupancy_debug = getattr(args, "obs_resolve_multiple_occupancy_debug", False)
assert not (self.obs_resolve_multiple_occupancy and (self.obs_grid_shape is None)), "obs_grid_shape required!"
if self.obs_decoder is not None:
self.obs_id_encoding = "metamix"
self.rasterise = True
self.obs_resolve_multiple_occupancy = True
# Finalize action setup
self.n_actions = self.n_actions_no_attack + self.n_enemies
if self.action_representation == "output_grid":
self.n_actions_encoding = self.obs_grid_shape[0] * self.obs_grid_shape[1] + self.n_actions_no_attack
elif self.action_representation == "input_grid":
self.n_actions_encoding = self.obs_grid_shape[0] * self.obs_grid_shape[1] + self.n_actions_no_attack
elif self.action_representation == "input_xy":
self.n_actions_encoding = self.obs_grid_shape[0] + self.obs_grid_shape[1] + self.n_actions_no_attack
else:
self.n_actions_encoding = self.n_actions
self.n_available_actions = self.n_actions_no_attack + self.n_enemies
# Map info
self._agent_race = map_params.a_race
self._bot_race = map_params.b_race
self.shield_bits_ally = 1 if self._agent_race == "P" else 0
self.shield_bits_enemy = 1 if self._bot_race == "P" else 0
# NOTE: This means we ALWAYS have a unit type bit! really important for Metamix!
self.unit_type_bits = map_params.unit_type_bits
self.map_type = map_params.map_type
if sys.platform == 'linux':
os.environ['SC2PATH'] = os.path.join(os.getcwd(), "3rdparty", 'StarCraftII')
self.game_version = args.game_version
else:
# Can be derived automatically
self.game_version = None
# Launch the game
self._launch()
self.max_reward = self.n_enemies * self.reward_death_value + self.reward_win
self._game_info = self._controller.game_info()
self._map_info = self._game_info.start_raw
self.map_x = self._map_info.map_size.x
self.map_y = self._map_info.map_size.y
self.map_play_area_min = self._map_info.playable_area.p0
self.map_play_area_max = self._map_info.playable_area.p1
self.max_distance_x = self.map_play_area_max.x - self.map_play_area_min.x
self.max_distance_y = self.map_play_area_max.y - self.map_play_area_min.y
self.terrain_height = np.flip(
np.transpose(np.array(list(self._map_info.terrain_height.data)).reshape(self.map_x, self.map_y)), 1)
self.pathing_grid = np.flip(
np.transpose(np.array(list(self._map_info.pathing_grid.data)).reshape(self.map_x, self.map_y)), 1)
self._episode_count = 0
self._total_steps = 0
self.battles_won = 0
self.battles_game = 0
self.timeouts = 0
self.force_restarts = 0
self.last_stats = None
# Calculate feature sizes
obs_items = {"ally": [], "enemy": [], "own": []}
obs_items["ally"].append(("visible", 1,))
obs_items["ally"].append(("distance_sc2", 1,))
obs_items["ally"].append(("x_sc2", 1,))
obs_items["ally"].append(("y_sc2", 1,))
obs_items["enemy"].append(("attackable", 1,))
obs_items["enemy"].append(("distance_sc2", 1,))
obs_items["enemy"].append(("x_sc2", 1,))
obs_items["enemy"].append(("y_sc2", 1,))
if self.unit_type_bits:
obs_items["ally"].append(("unit_local", self.unit_type_bits))
obs_items["enemy"].append(("unit_local", self.unit_type_bits))
if self.obs_id_encoding == "metamix":
obs_items["ally"].append(("unit_global", 1))
obs_items["enemy"].append(("unit_global", 1))
if self.obs_id_encoding == "metamix":
obs_items["enemy"].append(("visible", 1,))
if self.obs_all_health:
obs_items["ally"].append(("health", 1,))
obs_items["enemy"].append(("health", 1,))
if self.shield_bits_ally:
obs_items["ally"].append(("shield", 1,))
obs_items["enemy"].append(("shield", 1,))
if self.obs_last_action:
obs_items["ally"].append(("last_action", 1,))
obs_items["own"].append(("unit_local", self.unit_type_bits))
if self.obs_id_encoding == "metamix":
obs_items["own"].append(("unit_global", 1,))
if self.obs_own_health:
obs_items["own"].append(("health", 1,))
if self.shield_bits_ally:
obs_items["own"].append(("health", self.shield_bits_ally))
if (self.obs_grid_rasterise and not self.obs_grid_rasterise_debug) \
or (self.obs_resolve_multiple_occupancy and not self.obs_resolve_multiple_occupancy_debug):
obs_items["ally"].append(("distance_grid", 1,))
obs_items["enemy"].append(("distance_grid", 1,))
obs_items["ally"].append(("x_grid", 1,))
obs_items["enemy"].append(("x_grid", 1,))
obs_items["ally"].append(("y_grid", 1,))
obs_items["enemy"].append(("y_grid", 1,))
obs_items["ally"].append(("distance_sc2_raster", 1,))
obs_items["enemy"].append(("distance_sc2_raster", 1,))
obs_items["ally"].append(("x_sc2_raster", 1,))
obs_items["enemy"].append(("x_sc2_raster", 1,))
obs_items["ally"].append(("y_sc2_raster", 1,))
obs_items["enemy"].append(("y_sc2_raster", 1,))
obs_items["ally"].append(("id", 1,))
obs_items["enemy"].append(("id", 1,))
self.nf_al = sum([x[1] for x in obs_items["ally"]])
self.nf_en = sum([x[1] for x in obs_items["enemy"]])
self.nf_own = sum([x[1] for x in obs_items["own"]])
# now calculate feature indices
def _calc_feat_idxs(lst):
ctr = 0
dct = {}
for l in lst:
name = l[0]
length = l[1]
dct[name] = (ctr, ctr + length)
ctr += length
return dct
self.move_feats_len = self.n_actions
self.move_feats_size = self.move_feats_len
self.enemy_feats_size = self.n_enemies * self.nf_en
self.ally_feats_size = (self.n_agents - 1) * self.nf_al
self.own_feats_size = self.nf_own
self.obs_size_base = self.ally_feats_size + self.enemy_feats_size + self.move_feats_size + self.own_feats_size
self.obs_size = self.obs_size_base
idxs = {"ally": _calc_feat_idxs(obs_items["ally"]),
"enemy": _calc_feat_idxs(obs_items["enemy"]),
"own": _calc_feat_idxs(obs_items["own"])}
# create obs access functions
from functools import partial
self.obs_get = partial(StarCraft2CustomEnv._obs_get,
idxs=idxs)
self.obs_set = partial(StarCraft2CustomEnv._obs_set,
idxs=idxs)
self.to_grid_coords = partial(StarCraft2CustomEnv._to_grid_coords,
obs_grid_shape=self.obs_grid_shape,
)
self.from_grid_coords = partial(StarCraft2CustomEnv._from_grid_coords,
obs_grid_shape=self.obs_grid_shape,
)
self.rasterise_grid = partial(StarCraft2CustomEnv._grid_rasterise,
obs_grid_shape=self.obs_grid_shape,
obs_get=self.obs_get,
obs_set=self.obs_set,
to_grid_coords=self.to_grid_coords,
from_grid_coords=self.from_grid_coords,
debug=self.obs_grid_rasterise_debug
)
self.resolve_multiple_occupancy = partial(StarCraft2CustomEnv._multiple_occupancy,
obs_grid_shape=self.obs_grid_shape,
obs_get=self.obs_get,
obs_set=self.obs_set,
to_grid_coords=self.to_grid_coords,
from_grid_coords=self.from_grid_coords,
debug=self.obs_resolve_multiple_occupancy_debug
)
self.obs_explode = partial(StarCraft2CustomEnv._obs_explode,
ally_feats_size=self.ally_feats_size,
enemy_feats_size=self.enemy_feats_size,
move_feats_len=self.move_feats_len,
n_allies=self.n_agents - 1,
n_enemies=self.n_enemies,
nf_al=self.nf_al,
nf_en=self.nf_en, )
self.create_channels = partial(StarCraft2CustomEnv._create_channels,
n_allies=self.n_agents - 1,
n_enemies=self.n_enemies,
obs_grid_shape=self.obs_grid_shape,
obs_get=self.obs_get,
obs_set=self.obs_set,
to_grid_coords=self.to_grid_coords,
from_grid_coords=self.from_grid_coords,
obs_explode=self.obs_explode)
return
def _launch(self):
"""Launch the StarCraft II game."""
self._run_config = run_configs.get(version=self.game_version)
_map = maps.get(self.map_name)
# Setting up the interface
interface_options = sc_pb.InterfaceOptions(raw=True, score=False)
self._sc2_proc = self._run_config.start(window_size=self.window_size)
self._controller = self._sc2_proc.controller
self._bot_controller = self._sc2_proc.controller
# Request to create the game
create = sc_pb.RequestCreateGame(
local_map=sc_pb.LocalMap(
map_path=_map.path,
map_data=self._run_config.map_data(_map.path)),
realtime=False,
random_seed=self.seed)
create.player_setup.add(type=sc_pb.Participant)
create.player_setup.add(type=sc_pb.Computer, race=races[self._bot_race],
difficulty=difficulties[self.difficulty])
self._controller.create_game(create)
join = sc_pb.RequestJoinGame(race=races[self._agent_race],
options=interface_options)
self._controller.join_game(join)
game_info = self._controller.game_info()
map_info = game_info.start_raw
map_play_area_min = map_info.playable_area.p0
map_play_area_max = map_info.playable_area.p1
self.max_distance_x = map_play_area_max.x - map_play_area_min.x
self.max_distance_y = map_play_area_max.y - map_play_area_min.y
self.map_x = map_info.map_size.x
self.map_y = map_info.map_size.y
self.map_center = (self.map_x//2,self.map_y//2)
if map_info.pathing_grid.bits_per_pixel == 1:
vals = np.array(list(map_info.pathing_grid.data)).reshape(
self.map_x, int(self.map_y / 8))
self.pathing_grid = np.transpose(np.array([
[(b >> i) & 1 for b in row for i in range(7, -1, -1)]
for row in vals], dtype=np.bool))
else:
self.pathing_grid = np.invert(np.flip(np.transpose(np.array(
list(map_info.pathing_grid.data), dtype=np.bool).reshape(
self.map_x, self.map_y)), axis=1))
self.terrain_height = np.flip(
np.transpose(np.array(list(map_info.terrain_height.data)).reshape(
self.map_x, self.map_y)), 1) / 255
def _calc_distance_mtx(self):
# Calculate distances of all agents to all agents and enemies (for visibility calculations)
dist_mtx = 1000 * np.ones((self.n_agents, self.n_agents + self.n_enemies))
for i in range(self.n_agents):
for j in range(self.n_agents + self.n_enemies):
if j < i:
continue
elif j == i:
dist_mtx[i, j] = 0.0
else:
unit_a = self.agents[i]
if j >= self.n_agents:
unit_b = self.enemies[j - self.n_agents]
else:
unit_b = self.agents[j]
if unit_a.health > 0 and unit_b.health > 0:
dist = self.distance(unit_a.pos.x, unit_a.pos.y,
unit_b.pos.x, unit_b.pos.y)
dist_mtx[i, j] = dist
if j < self.n_agents:
dist_mtx[j, i] = dist
self.dist_mtx = dist_mtx
def reset(self):
"""Reset the environment. Required after each full episode.
Returns initial observations and states.
"""
self._episode_steps = 0
if self._episode_count == 0:
# Launch StarCraft II
self._launch()
try:
self.init_units()
except (protocol.ProtocolError, protocol.ConnectionError):
self.full_restart()
self.init_units()
# Information kept for counting the reward
self.death_tracker_ally = np.zeros(self.n_agents)
self.death_tracker_enemy = np.zeros(self.n_enemies)
self.previous_ally_units = None
self.previous_enemy_units = None
self.win_counted = False
self.defeat_counted = False
self.last_action = np.zeros((self.n_agents, self.n_actions))
if self.heuristic_ai:
self.heuristic_targets = [None] * self.n_agents
if self.debug:
logging.debug("Started Episode {}"
.format(self._episode_count).center(60, "*"))
self._calc_distance_mtx()
if self.entity_scheme:
return self.get_entities(), self.get_masks()
return self.get_obs(), self.get_state()
def full_restart(self):
"""Full restart. Closes the SC2 process and launches a new one. """
self._sc2_proc.close()
self._launch()
self.force_restarts += 1
def try_controller_step(self, fn=lambda: None, n_steps=1):
try:
fn()
self._controller.step(n_steps)
self._obs = self._controller.observe()
return True
except (protocol.ProtocolError, protocol.ConnectionError):
self.full_restart()
self._obs = self._controller.observe()
return False
def step(self, actions):
"""A single environment step. Returns reward, terminated, info."""
actions = [int(a) for a in actions[:self.n_agents]]
self.last_action = np.eye(self.n_actions)[np.array(actions)]
# Collect individual actions
sc_actions = []
if self.debug:
logging.debug("Actions".center(60, "-"))
for a_id, action in enumerate(actions):
if not self.heuristic_ai:
agent_action = self.get_agent_action(a_id, action)
else:
agent_action = self.get_agent_action_heuristic(a_id, action)
if agent_action:
sc_actions.append(agent_action)
# Send action request
req_actions = sc_pb.RequestAction(actions=sc_actions)
step_success = self.try_controller_step(lambda: self._controller.actions(req_actions), self._step_mul)
if not step_success:
return 0, True, {}
self._total_steps += 1
self._episode_steps += 1
# Update units
game_end_code = self.update_units()
self._calc_distance_mtx()
terminated = False
reward = self.reward_battle()
info = {"battle_won": False}
if game_end_code is not None:
# Battle is over
terminated = True
self.battles_game += 1
if game_end_code == 1 and not self.win_counted:
self.battles_won += 1
self.win_counted = True
info["battle_won"] = True
if not self.reward_sparse:
reward += self.reward_win
else:
reward = 1
elif game_end_code == -1 and not self.defeat_counted:
self.defeat_counted = True
if not self.reward_sparse:
reward += self.reward_defeat
else:
reward = -1
elif self._episode_steps >= self.episode_limit:
# Episode limit reached
terminated = True
if self.continuing_episode:
info["episode_limit"] = True
self.battles_game += 1
self.timeouts += 1
if self.debug:
logging.debug("Reward = {}".format(reward).center(60, '-'))
if terminated:
self._episode_count += 1
if self.reward_scale:
reward /= self.max_reward / self.reward_scale_rate
return reward, terminated, info
def get_agent_action(self, a_id, action):
"""Construct the action for agent a_id."""
avail_actions = self.get_avail_agent_actions(a_id)
assert avail_actions[action] == 1, \
"Agent {} cannot perform action {}".format(a_id, action)
unit = self.get_unit_by_id(a_id)
tag = unit.tag
x = unit.pos.x
y = unit.pos.y
if action == 0:
# no-op (valid only when dead)
assert unit.health == 0, "No-op only available for dead agents."
if self.debug:
logging.debug("Agent {}: Dead".format(a_id))
return None
elif action == 1:
# stop
cmd = r_pb.ActionRawUnitCommand(
ability_id=actions["stop"],
unit_tags=[tag],
queue_command=False)
if self.debug:
logging.debug("Agent {}: Stop".format(a_id))
elif action == 2:
# move north
cmd = r_pb.ActionRawUnitCommand(
ability_id=actions["move"],
target_world_space_pos=sc_common.Point2D(
x=x, y=y + self._move_amount),
unit_tags=[tag],
queue_command=False)
if self.debug:
logging.debug("Agent {}: Move North".format(a_id))
elif action == 3:
# move south
cmd = r_pb.ActionRawUnitCommand(
ability_id=actions["move"],
target_world_space_pos=sc_common.Point2D(
x=x, y=y - self._move_amount),
unit_tags=[tag],
queue_command=False)
if self.debug:
logging.debug("Agent {}: Move South".format(a_id))
elif action == 4:
# move east
cmd = r_pb.ActionRawUnitCommand(
ability_id=actions["move"],
target_world_space_pos=sc_common.Point2D(
x=x + self._move_amount, y=y),
unit_tags=[tag],
queue_command=False)
if self.debug:
logging.debug("Agent {}: Move East".format(a_id))
elif action == 5:
# move west
cmd = r_pb.ActionRawUnitCommand(
ability_id=actions["move"],
target_world_space_pos=sc_common.Point2D(
x=x - self._move_amount, y=y),
unit_tags=[tag],
queue_command=False)
if self.debug:
logging.debug("Agent {}: Move West".format(a_id))
else:
# attack/heal units that are in range
target_tag = action - self.n_actions_no_attack
if unit.unit_type == Terran.Medivac:
target_id = np.where(self.ally_tags == target_tag)[0].item()
target_unit = self.agents[target_id]
action_name = "heal"
else:
target_id = np.where(self.enemy_tags == target_tag)[0].item()
target_unit = self.enemies[target_id]
action_name = "attack"
action_id = actions[action_name]
target_unit_tag = target_unit.tag
cmd = r_pb.ActionRawUnitCommand(
ability_id=action_id,
target_unit_tag=target_unit_tag,
unit_tags=[tag],
queue_command=False)
if self.debug:
logging.debug("Agent {} {}s unit # {}".format(
a_id, action_name, target_id))
sc_action = sc_pb.Action(action_raw=r_pb.ActionRaw(unit_command=cmd))
return sc_action
def get_agent_action_heuristic(self, a_id, action):
unit = self.get_unit_by_id(a_id)
tag = unit.tag
target = self.heuristic_targets[a_id]
if unit.unit_type == Terran.Medivac:
if (target is None or self.agents[target].health == 0 or
self.agents[target].health == self.agents[target].health_max):
min_dist = math.hypot(self.max_distance_x, self.max_distance_y)
min_id = -1
for al_id, al_unit in self.agents.items():
if al_unit.unit_type == Terran.Medivac:
continue
if (al_unit.health != 0 and
al_unit.health != al_unit.health_max):
dist = self.distance(unit.pos.x, unit.pos.y,
al_unit.pos.x, al_unit.pos.y)
if dist < min_dist:
min_dist = dist
min_id = al_id
self.heuristic_targets[a_id] = min_id
if min_id == -1:
self.heuristic_targets[a_id] = None
return None
action_id = actions['heal']
target_tag = self.agents[self.heuristic_targets[a_id]].tag
else:
if target is None or self.enemies[target].health == 0:
min_dist = math.hypot(self.max_distance_x, self.max_distance_y)
min_id = -1
for e_id, e_unit in self.enemies.items():
if (unit.unit_type == Terran.Marauder and
e_unit.unit_type == Terran.Medivac):
continue
if e_unit.health > 0:
dist = self.distance(unit.pos.x, unit.pos.y,
e_unit.pos.x, e_unit.pos.y)
if dist < min_dist:
min_dist = dist
min_id = e_id
self.heuristic_targets[a_id] = min_id
action_id = actions['attack']
target_tag = self.enemies[self.heuristic_targets[a_id]].tag
cmd = r_pb.ActionRawUnitCommand(
ability_id=action_id,
target_unit_tag=target_tag,
unit_tags=[tag],
queue_command=False)
sc_action = sc_pb.Action(action_raw=r_pb.ActionRaw(unit_command=cmd))
return sc_action
def reward_battle(self):
"""Reward function when self.reward_spare==False.
Returns accumulative hit/shield point damage dealt to the enemy
+ reward_death_value per enemy unit killed, and, in case
self.reward_only_positive == False, - (damage dealt to ally units
+ reward_death_value per ally unit killed) * self.reward_negative_scale
"""
if self.reward_sparse:
return 0
reward = 0
delta_deaths = 0
delta_ally = 0
delta_enemy = 0
neg_scale = self.reward_negative_scale
# update deaths
for al_id, al_unit in self.agents.items():
if not self.death_tracker_ally[al_id]:
# did not die so far
prev_health = (
self.previous_ally_units[al_id].health +
self.previous_ally_units[al_id].shield
)
if al_unit.health == 0:
# just died
self.death_tracker_ally[al_id] = 1
if not self.reward_only_positive:
delta_deaths -= self.reward_death_value * neg_scale
delta_ally += prev_health * neg_scale
else:
# still alive
delta_ally += neg_scale * (
prev_health - al_unit.health - al_unit.shield
)
for e_id, e_unit in self.enemies.items():
if not self.death_tracker_enemy[e_id]:
prev_health = (
self.previous_enemy_units[e_id].health +
self.previous_enemy_units[e_id].shield
)
if e_unit.health == 0:
self.death_tracker_enemy[e_id] = 1
delta_deaths += self.reward_death_value
delta_enemy += prev_health
else:
delta_enemy += prev_health - e_unit.health - e_unit.shield
if self.reward_only_positive:
reward = abs(delta_enemy + delta_deaths) # shield regeneration
else:
reward = delta_enemy + delta_deaths - delta_ally
return reward
def get_total_actions(self):
"""Returns the total number of actions an agent could ever take."""
return self.n_actions
@staticmethod
def distance(x1, y1, x2, y2):
"""Distance between two points."""
return math.hypot(x2 - x1, y2 - y1)
def unit_shoot_range(self, agent_id):
"""Returns the shooting range for an agent."""
return 6
def unit_sight_range(self, agent_id):
"""Returns the sight range for an agent."""
return self._sight_range
def save_replay(self):
"""Save a replay."""
prefix = self.replay_prefix or self.map_name
replay_dir = self.replay_dir or ""
replay_path = self._run_config.save_replay(
self._controller.save_replay(), replay_dir=replay_dir, prefix=prefix)
logging.info("Replay saved at: %s" % replay_path)
def unit_max_shield(self, unit):
"""Returns maximal shield for a given unit."""
if unit.unit_type == 74 or unit.unit_type == self.stalker_id:
return 80 # Protoss's Stalker
if unit.unit_type == 73 or unit.unit_type == self.zealot_id:
return 50 # Protoss's Zaelot
if unit.unit_type == 4 or unit.unit_type == self.colossus_id:
return 150 # Protoss's Colossus
def can_move(self, unit, direction):
"""Whether a unit can move in a given direction."""
m = self._move_amount / 2
if direction == Direction.NORTH:
x, y = int(unit.pos.x), int(unit.pos.y + m)
elif direction == Direction.SOUTH:
x, y = int(unit.pos.x), int(unit.pos.y - m)
elif direction == Direction.EAST:
x, y = int(unit.pos.x + m), int(unit.pos.y)
else:
x, y = int(unit.pos.x - m), int(unit.pos.y)
if self.check_bounds(x, y) and self.pathing_grid[x, y]:
return True
return False
def get_surrounding_points(self, unit, include_self=False):
"""Returns the surrounding points of the unit in 8 directions."""
x = int(unit.pos.x)
y = int(unit.pos.y)
ma = self._move_amount
points = [
(x, y + 2 * ma),
(x, y - 2 * ma),
(x + 2 * ma, y),
(x - 2 * ma, y),
(x + ma, y + ma),
(x - ma, y - ma),
(x + ma, y - ma),
(x - ma, y + ma),
]
if include_self:
points.append((x, y))
return points
def check_bounds(self, x, y):
"""Whether a point is within the map bounds."""
return (0 <= x < self.map_x and 0 <= y < self.map_y)
def get_surrounding_pathing(self, unit):
"""Returns pathing values of the grid surrounding the given unit."""
points = self.get_surrounding_points(unit, include_self=False)
vals = [
self.pathing_grid[x, y] if self.check_bounds(x, y) else 1
for x, y in points
]
return vals
def get_surrounding_height(self, unit):
"""Returns height values of the grid surrounding the given unit."""
points = self.get_surrounding_points(unit, include_self=True)
vals = [
self.terrain_height[x, y] if self.check_bounds(x, y) else 1
for x, y in points
]
return vals
def get_masks(self):
"""
Returns:
1) per agent observability mask over all entities (unoberserved = 1, else 0)
3) mask of inactive entities (including enemies) over all possible entities
"""
sight_range = np.array(
[self.unit_sight_range(a_i)
for a_i in range(self.n_agents)]).reshape(-1, 1)
obs_mask = (self.dist_mtx > sight_range).astype(np.uint8)
obs_mask_padded = np.ones((self.max_n_agents,
self.max_n_agents + self.max_n_enemies),
dtype=np.uint8)
obs_mask_padded[:self.n_agents,
:self.n_agents] = obs_mask[:, :self.n_agents]
obs_mask_padded[:self.n_agents,
self.max_n_agents:self.max_n_agents + self.n_enemies] = (
obs_mask[:, self.n_agents:]
)
entity_mask = np.ones(self.max_n_agents + self.max_n_enemies,
dtype=np.uint8)
entity_mask[:self.n_agents] = 0
entity_mask[self.max_n_agents:self.max_n_agents + self.n_enemies] = 0
return obs_mask_padded, entity_mask
def get_entities(self):
"""
Returns list of agent entities and enemy entities in the map (all entities are a fixed size)
All entities together form the global state
For decentralized execution agents should only have access to the
entities specified by get_masks()
"""
all_units = list(self.agents.items()) + list(self.enemies.items())
nf_entity = self.get_entity_size()
center_x = self.map_x / 2
center_y = self.map_y / 2
com_x = sum(unit.pos.x for u_i, unit in all_units) / len(all_units)
com_y = sum(unit.pos.y for u_i, unit in all_units) / len(all_units)
max_dist_com = max(self.distance(unit.pos.x, unit.pos.y, com_x, com_y)
for u_i, unit in all_units)
entities = []
avail_actions = self.get_avail_actions()
for u_i, unit in all_units:
entity = np.zeros(nf_entity, dtype=np.float32)
# entity tag
if u_i < self.n_agents:
tag = self.ally_tags[u_i]
else:
tag = self.enemy_tags[u_i - self.n_agents]
entity[tag] = 1
ind = self.max_n_agents + self.max_n_enemies
# available actions (if user controlled entity)
if u_i < self.n_agents:
for ac_i in range(self.n_actions - 2):
entity[ind + ac_i] = avail_actions[u_i][2 + ac_i]
ind += self.n_actions - 2
# unit type
if self.unit_type_bits > 0:
type_id = self.unit_type_ids[unit.unit_type]
entity[ind + type_id] = 1
ind += self.unit_type_bits
if unit.health > 0: # otherwise dead, return all zeros
# health and shield
if self.obs_all_health or self.obs_own_health:
entity[ind] = unit.health / unit.health_max
if ((self.shield_bits_ally > 0 and u_i < self.n_agents) or
(self.shield_bits_enemy > 0 and
u_i >= self.n_agents)):
entity[ind + 1] = unit.shield / unit.shield_max
ind += 1 + int(self.shield_bits_ally or
self.shield_bits_enemy)
# x-y positions
entity[ind] = (unit.pos.x - center_x) / self.max_distance_x
entity[ind + 1] = (unit.pos.y - center_y) / self.max_distance_y
entity[ind + 2] = (unit.pos.x - com_x) / max_dist_com
entity[ind + 3] = (unit.pos.y - com_y) / max_dist_com
ind += 4
if self.obs_pathing_grid:
entity[
ind:ind + self.n_obs_pathing
] = self.get_surrounding_pathing(unit)
ind += self.n_obs_pathing
if self.obs_terrain_height:
entity[ind:] = self.get_surrounding_height(unit)
entities.append(entity)
# pad entities to fixed number across episodes (for easier batch processing)
if u_i == self.n_agents - 1:
entities += [np.zeros(nf_entity, dtype=np.float32)
for _ in range(self.max_n_agents -
self.n_agents)]
elif u_i == self.n_agents + self.n_enemies - 1:
entities += [np.zeros(nf_entity, dtype=np.float32)
for _ in range(self.max_n_enemies -
self.n_enemies)]
return entities
def get_entity_size(self):
nf_entity = self.max_n_agents + self.max_n_enemies # tag
nf_entity += self.n_actions - 2 # available actions minus those that are always available
nf_entity += self.unit_type_bits # unit type
# below are only observed for alive units (else zeros)
if self.obs_all_health or self.obs_own_health:
nf_entity += 1 + int(self.shield_bits_ally or self.shield_bits_enemy) # health and shield
nf_entity += 4 # global x-y coords + rel x-y to center of mass of all agents (normalized by furthest agent's distance)
if self.obs_pathing_grid:
nf_entity += self.n_obs_pathing # local pathing
if self.obs_terrain_height:
nf_entity += self.n_obs_height # local terrain
return nf_entity
def get_obs_agent(self, agent_id, global_obs=False):
unit = self.get_unit_by_id(agent_id)
move_feats = np.zeros(self.move_feats_len, dtype=np.float32) # exclude no-op & stop
enemy_feats = np.zeros((self.n_enemies, self.nf_en), dtype=np.float32)
ally_feats = np.zeros((self.n_agents - 1, self.nf_al), dtype=np.float32)
own_feats = | np.zeros(self.nf_own, dtype=np.float32) | numpy.zeros |
"""Tests weight_transfer.py."""
# pylint: disable=no-name-in-module
import pytest
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Flatten, Dense
from dogwood.errors import NotADenseLayerError, InvalidExpansionStrategyError
from dogwood.model_expansion import (
expand_dense_layer,
expand_dense_layers,
STRATEGY_ALL_ZERO,
STRATEGY_OUTPUT_ZERO,
STRATEGY_ALL_RANDOM,
are_symmetric_dense_neurons,
clone_layer,
)
from tests.conftest import (
MNIST_IMAGE_SHAPE,
MICRO_HIDDEN_LEN,
MICRO_OUTPUT_LEN,
)
def test_are_symmetric_dense_neurons_one_neuron(
micro_symmetry_model: Sequential,
) -> None:
"""Tests that one neuron is always considered symmetric.
:param micro_symmetry_model: The small model used in weight symmetry tests.
"""
for layer_name in "dense_1", "dense_2":
for neuron_idx in range(
micro_symmetry_model.get_layer(layer_name).units
):
assert are_symmetric_dense_neurons(
micro_symmetry_model, layer_name, {neuron_idx}
)
@pytest.mark.slowtest
def test_are_symmetric_dense_neurons_symmetric(
micro_symmetry_model: Sequential,
micro_symmetry_dataset: tuple[np.ndarray, np.ndarray],
) -> None:
"""Tests that deliberately symmetric neurons are considered symmetric.
:param micro_symmetry_model: The small model used in weight symmetry tests.
:param micro_symmetry_dataset: The training dataset for the model.
"""
# Deliberately set the weights of neurons 3 and 4 in dense_1.
weights = micro_symmetry_model.get_weights()
# Weights in.
weights[0][:, 2:4] = | np.ones((2, 2)) | numpy.ones |
# -*- coding: iso-8859-15 -*-
#
# This software was written by <NAME> (<NAME>)
# Copyright <NAME>
# All rights reserved
# This software is licenced under a 3-clause BSD style license
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are met:
#
#Redistributions of source code must retain the above copyright notice,
#this list of conditions and the following disclaimer.
#
#Redistributions in binary form must reproduce the above copyright notice,
#this list of conditions and the following disclaimer in the documentation
#and/or other materials provided with the distribution.
#
#Neither the name of the University College London nor the names
#of the code contributors may be used to endorse or promote products
#derived from this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
#AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
#THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
#PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
#CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
#EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
#PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
#OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
#WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
#OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
#ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
# Developed by <NAME> (MSSL/UCL)
# uvotpy
# (c) 2009-2017, see Licence
from future.builtins import str
from future.builtins import input
from future.builtins import range
__version__ = '2.9.0 20171209'
import sys
import optparse
import numpy as np
import matplotlib.pyplot as plt
try:
from astropy.io import fits as pyfits
from astropy import wcs
except:
import pyfits
import re
import warnings
try:
import imagestats
except:
import stsci.imagestats as imagestats
import scipy
from scipy import interpolate
from scipy.ndimage import convolve
from scipy.signal import boxcar
from scipy.optimize import leastsq
from scipy.special import erf
from numpy import polyfit, polyval
'''
try:
#from uvotpy import uvotplot,uvotmisc,uvotwcs,rationalfit,mpfit,uvotio
import uvotplot
import uvotmisc
import uvotwcs
import rationalfit
import mpfit
import uvotio
except:
pass
'''
from uvotmisc import interpgrid, uvotrotvec, rdTab, rdList
from generate_USNOB1_cat import get_usnob1_cat
import datetime
import os
if __name__ != '__main__':
anchor_preset = list([None,None])
bg_pix_limits = list([-100,-70,70,100])
bg_lower_ = list([None,None]) # (offset, width) in pix, e.g., [20,30], default [50,50]
bg_upper_ = list([None,None]) # (offset, width) in pix, e.g., [20,30], default [50,50]
offsetlimit = None
#set Global parameters
status = 0
do_coi_correction = True # if not set, disable coi_correction
tempnames = list()
tempntags = list()
cval = -1.0123456789
interactive = True
update_curve = True
contour_on_img = False
give_result = False # with this set, a call to getSpec returns all data
give_new_result = False
use_rectext = False
background_method = 'boxcar' # alternatives 'splinefit' 'boxcar'
background_smoothing = [50,7] # 'boxcar' default smoothing in dispersion and across dispersion in pix
background_interpolation = 'linear'
trackcentroiding = True # default (= False will disable track y-centroiding)
global trackwidth
trackwidth = 2.5 # width of extraction region in sigma (alternative default = 1.0) 2.5 was used for flux calibration.
bluetrackwidth = 1.3 # multiplier width of non-order-overlapped extraction region [not yet active]
write_RMF = False
background_source_mag = 18.0
zeroth_blim_offset = 1.0
coi_half_width = None
slit_width = 200
_PROFILE_BACKGROUND_ = False # start with severe sigma-clip f background, before going to smoothing
today_ = datetime.date.today()
datestring = today_.isoformat()[0:4]+today_.isoformat()[5:7]+today_.isoformat()[8:10]
fileversion=1
calmode=True
typeNone = type(None)
senscorr = True # do sensitivity correction
print(66*"=")
print("uvotpy module uvotgetspec version=",__version__)
print("<NAME> (c) 2009-2017, see uvotpy licence.")
print("please use reference provided at http://github.com/PaulKuin/uvotpy")
print(66*"=","\n")
def getSpec(RA,DEC,obsid, ext, indir='./', wr_outfile=True,
outfile=None, calfile=None, fluxcalfile=None,
use_lenticular_image=True,
offsetlimit=None, anchor_offset=None, anchor_position=[None,None],
background_lower=[None,None], background_upper=[None,None],
background_template=None,
fixed_angle=None, spextwidth=13, curved="update",
fit_second=False, predict2nd=True, skip_field_src=False,
optimal_extraction=False, catspec=None,write_RMF=write_RMF,
get_curve=None,fit_sigmas=True,get_sigma_poly=False,
lfilt1=None, lfilt1_ext=None, lfilt2=None, lfilt2_ext=None,
wheelpos=None, interactive=interactive, sumimage=None, set_maglimit=None,
plot_img=True, plot_raw=True, plot_spec=True, zoom=True, highlight=False,
uvotgraspcorr_on=True, ank_c_0offset = False,
update_pnt=True, ifmotion=False, motion_file=None, anchor_x_offset=False,
replace=None,ifextended=False, singleside_bkg = False, fixwidth = False,
clobber=False, chatter=1):
'''Makes all the necessary calls to reduce the data.
Parameters
----------
ra, dec : float
The Sky position (J2000) in **decimal degrees**
obsid : str
The observation ID number as a **String**. Typically that is
something like "00032331001" and should be part of your
grism filename which is something like "sw00032331001ugu_dt.img"
ext : int
number of the extension to process
kwargs : dict
optional keyword arguments, possible values are:
- **fit_second** : bool
fit the second order. Off since it sometimes causes problems when the
orders overlap completely. Useful for spectra in top part detector
- **background_lower** : list
instead of default background list offset from spectrum as list
of two numbers, like [20, 40]. Distance relative to spectrum
- **background_upper** : list
instead of default background list offset from spectrum as list
of two numbers, like [20, 40]. Distance relative to spectrum
- **offsetlimit** : None,int,[center,range]
Default behaviour is to determine automatically any required offset from
the predicted anchor position to the spectrum, and correct for that.
The automated method may fail in the case of a weak spectrum and strong zeroth
or first order next to the spectrum. Two methods are provided:
(1) provide a number which will be used to limit the allowed offset. If
within that limit no peak is identified, the program will stop and require
you to provide a manual offset value. Try small numbers like 1, -1, 3, etc..
(2) if you already know the approximate y-location of the spectrum at the
anchor x-position in the rotated small image strip around the spectrum, you
can give this with a small allowed range for fine tuning as a list of two
parameter values. The first value in the list must be the y-coordinate
(by default the spectrum falls close to y=100 pixels), the second parameter
the allowed adjustment to a peak value in pixels. For example, [105,2].
This will require no further interactive input, and the spectrum will be
extracted using that offset.
- **wheelpos**: {160,200,955,1000}
filter wheel position for the grism filter mode used. Helpful for
forcing Vgrism or UVgrism input when both are present in the directory.
160:UV Clocked, 200:UV Nominal, 955:V clocked, 1000:V nominal
- **zoom** : bool
when False, the whole extracted region is displayed, including zeroth
order when present.
- **clobber** : bool
When True, overwrite earlier output (see also outfile)
- **write_RMF** : bool
When True, write the rmf file (will take extra time due to large matrix operations)
- **use_lenticular_image** : bool
When True and a lenticular image is present, it is used. If False,
the grism image header WCS-S system will be used for the astrometry,
with an automatic call to uvotgraspcorr for refinement.
- **sumimage** : str
Name summed image generated using ``sum_Extimage()``, will extract spectrum
from summed image.
- **wr_outfile** : bool
If False, no output file is written
- **outfile** : path, str
Name of output file, other than automatically generated.
- **calfile** : path, str
calibration file name
- **fluxcalfile** : path, str
flux calibration file name or "CALDB" or None
- **predict2nd** : bool
predict the second order flux from the first. Overestimates in centre a lot.
- **skip_field_src** : bool
if True do not locate zeroth order positions. Can be used if
absence internet connection or USNO-B1 server causes problems.
- **optimal_extraction** : bool, obsolete
Do not use.Better results with other implementation.
- **catspec** : path
optional full path to the catalog specification file for uvotgraspcorr.
- **get_curve** : bool or path
True: activate option to supply the curvature coefficients of all
orders by hand.
path: filename with coefficients of curvature
- **uvotgraspcorr_on** : bool
enable/disable rerun of uvotgraspcorr to update the WCS keywords
- **update_pnt** : bool
enable/disable update of the WCS keywords from the attitude file
(this is done prior to running uvotgraspcorr is that is enabled)
- **fit_sigmas** : bool
fit the sigma of trackwidths if True (not implemented, always on)
- **get_sigma_poly** : bool
option to supply the polynomial for the sigma (not implemented)
- **lfilt1**, **lfilt2** : str
name if the lenticular filter before and after the grism exposure
(now supplied by fileinfo())
- **lfilt1_ext**, **lfilt2_ext** : int
extension of the lenticular filter (now supplied by fileinfo())
- **plot_img** : bool
plot the first figure with the det image
- **plot_raw** : bool
plot the raw spectrum data
- **plot_spec** : bool
plot the flux spectrum
- **highlight** : bool
add contours to the plots to highlight contrasts
- **chatter** : int
verbosity of program
- **set_maglimit** : int
specify a magnitude limit to seach for background sources in the USNO-B1 catalog
- **background_template** : numpy 2D array
User provides a background template that will be used instead
determining background. Must be in counts. Size and alignment
must exactly match detector image.
Returns
-------
None, (give_result=True) compounded data (Y0, Y1, Y2, Y3, Y4) which
are explained in the code, or (give_new_result=True) a data dictionary.
Notes
-----
**Quick Start**
`getSpec(ra,dec,obsid, ext,)`
should produce plots and output files
**Which directory?**
The program needs to be started from the CORRECT data directory.
The attitude file [e.g., "sw<OBSID>pat.fits" ]is needed!
A link or copy of the attitude file needs to be present in the directory
or "../../auxil/" directory as well.
**Global parameters**
These parameters can be reset, e.g., during a (i)python session, before calling getSpec.
- **trackwidth** : float
width spectral extraction in units of sigma. The default is trackwidth = 2.5
The alternative default is trackwidth = 1.0 which gives better results for
weak sources, or spectra with nearby contamination. However, the flux
calibration and coincidence-loss correction give currently inconsistent
results. When using trackwidth=1.0, rescale the flux to match trackwidth=2.5
which value was used for flux calibration and coincidence-loss correction.
- **give_result** : bool
set to False since a call to getSpec with this set will return all the
intermediate results. See returns
When the extraction slit is set to be straight ``curved="straight"`` it cuts off the UV part of the
spectrum for spectra located in the top left and bottom right of the image.
History
-------
Version 2011-09-22 NPMK(MSSL) : handle case with no lenticular filter observation
Version 2012-01-15 NPMK(MSSL) : optimal extraction is no longer actively supported until further notice
Version 2013-10-23 NPMK(MSSL) : fixed bug so uvotgraspcorr gives same accuracy as lenticular filter
Version 2014-01-01 NPMK(MSSL) : aperture correction for background added; output dictionary
Version 2014-07-23 NPMK(MSSL) : coi-correction using new calibrared coi-box and factor
Version 2014-08-04 NPMK(MSSL/UCL): expanded offsetlimit parameter with list option to specify y-range.
Version 2015-12-03 NPMK(MSSL/UCL): change input parameter 'get_curve' to accept a file name with coefficients
Version 2016-01-16 NPMK(MSSL/UCL): added options for background; disable automated centroiding of spectrum
Example
-------
from uvotpy.uvotgetspec import getSpec
from uvotpy import uvotgetspec
import os, shutil
indir1 = os.getenv('UVOTPY') +'/test'
indir2 = os.getcwd()+'/test/UVGRISM/00055900056/uvot/image'
shutil.copytree(indir1, os.getcwd()+'/test' )
getSpec( 254.7129625, 34.3148667, '00055900056', 1, offsetlimit=1,indir=indir2, clobber=True )
'''
# (specfile, lfilt1_, lfilt1_ext_, lfilt2_, lfilt2_ext_, attfile), (method), \
# (Xphi, Yphi, date1), (dist12, ankerimg, ZOpos), expmap, bgimg, bg_limits_used, bgextra = Y0
#
#( (dis,spnet,angle,anker,anker2,anker_field,ank_c), (bg,bg1,bg2,extimg,spimg,spnetimg,offset),
# (C_1,C_2,img), hdr,m1,m2,aa,wav1 ) = Y1
#
#fit,(coef0,coef1,coef2,coef3),(bg_zeroth,bg_first,bg_second,bg_third),(borderup,borderdown),apercorr,expospec=Y2
#
#counts, variance, borderup, borderdown, (fractions,cnts,vars,newsigmas) = Y3
#
#wav2p, dis2p, flux2p, qual2p, dist12p = Y4[0]
#
# where,
#
#(present0,present1,present2,present3),(q0,q1,q2,q3), \
# (y0,dlim0L,dlim0U,sig0coef,sp_zeroth,co_zeroth),(y1,dlim1L,dlim1U,sig1coef,sp_first,co_first),\
# (y2,dlim2L,dlim2U,sig2coef,sp_second,co_second),(y3,dlim3L,dlim3U,sig3coef,sp_third,co_third),\
# (x,xstart,xend,sp_all,quality,co_back) = fit
#
# dis = dispersion with zero at ~260nm[UV]/420nm[V] ; spnet = background-substracted spectrum from 'spnetimg'
# angle = rotation-angle used to extract 'extimg' ; anker = first order anchor position in DET coordinates
# anker2 = second order anker X,Y position ; anker_field = Xphi,Yphy input angles with respect to reference
# ank_c = X,Y position of axis of rotation (anker) in 'extimg'
# bg = mean background, smoothed, with sources removed
# bg1 = one-sided background, sources removed, smoothed ; bg2 = same for background opposite side
# extimg = image extracted of source and background, 201 pixels wide, all orders.
# spimg = image centered on first order position ; spnetimg = background-subtracted 'spimg'
# offset = offset of spectrum from expected position based on 'anchor' at 260nm[UVG]/420nm[VG], first order
# C_1 = dispersion coefficients [python] first order; C_2 = same for second order
# img = original image ;
# WC_lines positions for selected WC star lines ; hdr = header for image
# m1,m2 = index limits spectrum ; aa = indices spectrum (e.g., dis[aa])
# wav1 = wavelengths for dis[aa] first order (combine with spnet[aa])
#
# when wr_outfile=True the program produces a flux calibrated output file by calling uvotio.
# [fails if output file is already present and clobber=False]
#
# The background must be consistent with the width of the spectrum summed.
from uvotio import fileinfo, rate2flux, readFluxCalFile
from uvotplot import plot_ellipsoid_regions
if (type(RA) == np.ndarray) | (type(DEC) == np.array):
raise IOError("RA, and DEC arguments must be of float type ")
if type(offsetlimit) == list:
if len(offsetlimit) != 2:
raise IOError("offsetlimit list must be [center, distance from center] in pixels")
get_curve_filename = None
a_str_type = type(curved)
if chatter > 4 :
print ("\n*****\na_str_type = ",a_str_type)
print ("value of get_curve = ",get_curve)
print ("type of parameter get_curve is %s\n"%(type(get_curve)) )
print ("type curved = ",type(curved))
if type(get_curve) == a_str_type:
# file name: check this file is present
if os.access(get_curve,os.F_OK):
get_curve_filename = get_curve
get_curve = True
else:
raise IOError(
"ERROR: get_curve *%s* is not a boolean value nor the name of a file that is on the disk."
%(get_curve) )
elif type(get_curve) == bool:
if get_curve:
get_curve_filename = None
print("requires input of curvature coefficients")
elif type(get_curve) == type(None):
get_curve = False
else:
raise IOError("parameter get_curve should by type str or bool, but is %s"%(type(get_curve)))
# check environment
CALDB = os.getenv('CALDB')
if CALDB == '':
print('WARNING: The CALDB environment variable has not been set')
HEADAS = os.getenv('HEADAS')
if HEADAS == '':
print('WARNING: The HEADAS environment variable has not been set')
print('That is needed for the calls to uvot Ftools ')
#SCAT_PRESENT = os.system('which scat > /dev/null')
#if SCAT_PRESENT != 0:
# print('WARNING: cannot locate the scat program \nDid you install WCSTOOLS ?\n')
SESAME_PRESENT = os.system('which sesame > /dev/null')
#if SESAME_PRESENT != 0:
# print 'WARNING: cannot locate the sesame program \nDid you install the cdsclient tools?\n'
# fix some parameters
framtime = 0.0110329 # all grism images are taken in unbinned mode
splineorder=3
getzmxmode='spline'
smooth=50
testparam=None
msg = "" ; msg2 = "" ; msg4 = ""
attime = datetime.datetime.now()
logfile = 'uvotgrism_'+obsid+'_'+str(ext)+'_'+'_'+attime.isoformat()[0:19]+'.log'
if type(fluxcalfile) == bool: fluxcalfile = None
tempnames.append(logfile)
tempntags.append('logfile')
tempnames.append('rectext_spectrum.img')
tempntags.append('rectext')
lfiltnames=np.array(['uvw2','uvm2','uvw1','u','b','v','wh'])
ext_names =np.array(['uw2','um2','uw1','uuu','ubb','uvv','uwh'])
filestub = 'sw'+obsid
histry = ""
for x in sys.argv: histry += x + " "
Y0 = None
Y2 = None
Y3 = None
Y4 = None
Yfit = {}
Yout = {"coi_level":None} # output dictionary (2014-01-01; replace Y0,Y1,Y2,Y3)
lfilt1_aspcorr = "not initialized"
lfilt2_aspcorr = "not initialized"
qflag = quality_flags()
ZOpos = None
# parameters getSpec()
Yout.update({'indir':indir,'obsid':obsid,'ext':ext})
Yout.update({'ra':RA,'dec':DEC,'wheelpos':wheelpos})
if type(sumimage) == typeNone:
if background_template is not None:
# convert background_template to a dictionary
background_template = {'template':np.asarray(background_template),
'sumimg':False}
try:
ext = int(ext)
except:
print("fatal error in extension number: must be an integer value")
# locate related lenticular images
specfile, lfilt1_, lfilt1_ext_, lfilt2_, lfilt2_ext_, attfile = \
fileinfo(filestub,ext,directory=indir,wheelpos=wheelpos,chatter=chatter)
# set some flags and variables
lfiltinput = (lfilt1 != None) ^ (lfilt2 != None)
lfiltpresent = lfiltinput | (lfilt1_ != None) | (lfilt2_ != None)
if (type(lfilt1_) == typeNone) & (type(lfilt2_) == typeNone):
# ensure the output is consistent with no lenticular filter solution
use_lenticular_image = False
# translate
filt_id = {"wh":"wh","v":"vv","b":"bb","u":"uu","uvw1":"w1","uvm2":"m2","uvw2":"w2"}
lfiltflag = False
if ((type(lfilt1) == typeNone)&(type(lfilt1_) != typeNone)):
lfilt1 = lfilt1_
lfilt1_ext = lfilt1_ext_
if chatter > 0: print("lenticular filter 1 from search lenticular images"+lfilt1+"+"+str(lfilt1_ext))
lfiltflag = True
lfilt1_aspcorr = None
try:
hdu_1 = pyfits.getheader(indir+"/sw"+obsid+"u"+filt_id[lfilt1]+"_sk.img",lfilt1_ext)
lfilt1_aspcorr = hdu_1["ASPCORR"]
except:
hdu_1 = pyfits.getheader(indir+"/sw"+obsid+"u"+filt_id[lfilt1]+"_sk.img.gz",lfilt1_ext)
lfilt1_aspcorr = hdu_1["ASPCORR"]
if ((type(lfilt2) == typeNone)&(type(lfilt2_) != typeNone)):
lfilt2 = lfilt2_
lfilt2_ext = lfilt2_ext_
if chatter > 0: print("lenticular filter 2 from search lenticular images"+lfilt2+"+"+str(lfilt2_ext))
lfiltflag = True
lfilt2_aspcorr = None
try:
hdu_2 = pyfits.getheader(indir+"/sw"+obsid+"u"+filt_id[lfilt2]+"_sk.img",lfilt2_ext)
lfilt2_aspcorr = hdu_2["ASPCORR"]
except:
hdu_2 = pyfits.getheader(indir+"/sw"+obsid+"u"+filt_id[lfilt2]+"_sk.img.gz",lfilt2_ext)
lfilt2_aspcorr = hdu_2["ASPCORR"]
# report
if chatter > 4:
msg2 += "getSpec: image parameter values\n"
msg2 += "ra, dec = (%6.1f,%6.1f)\n" % (RA,DEC)
msg2 += "filestub, extension = %s[%i]\n"% (filestub, ext)
if lfiltpresent & use_lenticular_image:
msg2 += "first/only lenticular filter = "+lfilt1+" extension first filter = "+str(lfilt1_ext)+'\n'
msg2 += " Aspect correction keyword : %s\n"%(lfilt1_aspcorr)
if lfilt2_ext != None:
msg2 += "second lenticular filter = "+lfilt2+" extension second filter = "+str(lfilt2_ext)+'\n'
msg2 += " Aspect correction keyword : %s\n"%(lfilt2_aspcorr)
if not use_lenticular_image:
msg2 += "anchor position derived without lenticular filter\n"
msg2 += "spectrum extraction preset width = "+str(spextwidth)+'\n'
#msg2 += "optimal extraction "+str(optimal_extraction)+'\n'
hdr = pyfits.getheader(specfile,int(ext))
if chatter > -1:
msg += '\nuvotgetspec version : '+__version__+'\n'
msg += ' Position RA,DEC : '+str(RA)+' '+str(DEC)+'\n'
msg += ' Start date-time : '+str(hdr['date-obs'])+'\n'
msg += ' grism file : '+specfile.split('/')[-1]+'['+str(ext)+']\n'
msg += ' attitude file : '+attfile.split('/')[-1]+'\n'
if lfiltpresent & use_lenticular_image:
if ((lfilt1 != None) & (lfilt1_ext != None)):
msg += ' lenticular file 1: '+lfilt1+'['+str(lfilt1_ext)+']\n'
msg += ' aspcorr: '+lfilt1_aspcorr+'\n'
if ((lfilt2 != None) & (lfilt2_ext != None)):
msg += ' lenticular file 2: '+lfilt2+'['+str(lfilt2_ext)+']\n'
msg += ' aspcorr: '+lfilt2_aspcorr+'\n'
if not use_lenticular_image:
msg += "anchor position derived without lenticular filter\n"
if not 'ASPCORR' in hdr: hdr['ASPCORR'] = 'UNKNOWN'
Yout.update({'hdr':hdr})
tstart = hdr['TSTART']
tstop = hdr['TSTOP']
wheelpos = hdr['WHEELPOS']
expo = hdr['EXPOSURE']
expmap = [hdr['EXPOSURE']]
Yout.update({'wheelpos':wheelpos})
if 'FRAMTIME' not in hdr:
# compute the frametime from the CCD deadtime and deadtime fraction
#deadc = hdr['deadc']
#deadtime = 600*285*1e-9 # 600ns x 285 CCD lines seconds
#framtime = deadtime/(1.0-deadc)
framtime = 0.0110329
hdr.update('framtime',framtime,comment='frame time computed from deadc ')
Yout.update({'hdr':hdr})
if chatter > 1:
print("frame time computed from deadc - added to hdr")
print("with a value of ",hdr['framtime']," ",Yout['hdr']['framtime'])
if not 'detnam' in hdr:
hdr.update('detnam',str(hdr['wheelpos']))
msg += ' exposuretime : %7.1f \n'%(expo)
maxcounts = 1.1 * expo/framtime
if chatter > 0:
msg += ' wheel position : '+str(wheelpos)+'\n'
msg += ' roll angle : %5.1f\n'% (hdr['pa_pnt'])
msg += 'coincidence loss version: 2 (2014-07-23)\n'
msg += '======================================\n'
try:
if ( (np.abs(RA - hdr['RA_OBJ']) > 0.4) ^ (np.abs(DEC - hdr['DEC_OBJ']) > 0.4) ):
sys.stderr.write("\nWARNING: It looks like the input RA,DEC and target position in header are different fields\n")
except (RuntimeError, TypeError, NameError, KeyError):
pass
msg2 += " cannot read target position from header for verification\n"
if lfiltinput:
# the lenticular filter(s) were specified on the command line.
# check that the lenticular image and grism image are close enough in time.
if type(lfilt1_ext) == typeNone:
lfilt1_ext = int(ext)
lpos = np.where( np.array([lfilt1]) == lfiltnames )
if len(lpos[0]) < 1: sys.stderr.write("WARNING: illegal name for the lenticular filter\n")
lnam = ext_names[lpos]
lfile1 = filestub+lnam[0]+'_sk.img'
hdr_l1 = pyfits.getheader(lfile1,lfilt1_ext)
tstart1 = hdr_l1['TSTART']
tstop1 = hdr_l1['TSTOP']
if not ( (np.abs(tstart-tstop1) < 20) ^ (np.abs(tstart1-tstop) < 20) ):
sys.stderr.write("WARNING: check that "+lfile1+" matches the grism image\n")
if lfilt2 != None:
if type(lfilt2_ext) == typeNone:
lfilt2_ext = lfilt1_ext+1
lpos = np.where( np.array([lfilt2]) == lfiltnames )
if len(lpos[0] < 1): sys.stderr.write("WARNING: illegal name for the lenticular filter\n")
lnam = ext_names[lpos]
lfile2 = filestub+lnam[0]+'_sk.img'
hdr_l2 = pyfits.getheader(lfile1,lfilt1_ext)
tstart2 = hdr_l2['TSTART']
tstop2 = hdr_l2['TSTOP']
if not ( (np.abs(tstart-tstop1) < 20) ^ (np.abs(tstart1-tstop) < 20) ):
sys.stderr.write("WARNING: check that "+lfile2+" matches the grism image\n")
if (not lfiltpresent) | (not use_lenticular_image):
method = "grism_only"
else:
method = None
if not senscorr: msg += "WARNING: No correction for sensitivity degradation applied.\n"
# get the USNO-B1 catalog data for the field, & find the zeroth orders
if (not skip_field_src):
if chatter > 2: print("============== locate zeroth orders due to field sources =============")
if wheelpos > 500: zeroth_blim_offset = 2.5
ZOpos = find_zeroth_orders(filestub, ext, wheelpos,indir=indir,
set_maglimit=set_maglimit,clobber="yes", chatter=chatter, )
# use for the ftools the downloaded usnob1 catalog in file "search.ub1" using the
# catspec parameter in the calls
if os.access('catalog.spec',os.F_OK) & (catspec == None):
catspec= 'catalog.spec'
# retrieve the input angle relative to the boresight
Xphi, Yphi, date1, msg3, lenticular_anchors = findInputAngle( RA, DEC, filestub, ext,
uvotgraspcorr_on=uvotgraspcorr_on, update_pnt=update_pnt, msg="", \
wheelpos=wheelpos, lfilter=lfilt1, lfilter_ext=lfilt1_ext, \
lfilt2=lfilt2, lfilt2_ext=lfilt2_ext, method=method, \
attfile=attfile, catspec=catspec, indir=indir, chatter=chatter)
Yout.update({"Xphi":Xphi,"Yphi":Yphi})
Yout.update({'lenticular_anchors':lenticular_anchors})
# read the anchor and dispersion out of the wavecal file
anker, anker2, C_1, C_2, angle, calibdat, msg4 = getCalData(Xphi,Yphi,wheelpos, date1, \
calfile=calfile, chatter=chatter)
hdrr = pyfits.getheader(specfile,int(ext))
if (hdrr['aspcorr'] == 'UNKNOWN') & (not lfiltpresent):
msg += "WARNING: No aspect solution found. Anchor uncertainty large.\n"
msg += "first order anchor position on detector in det coordinates:\n"
msg += "anchor1=(%8.2f,%8.2f)\n" % (anker[0],anker[1])
msg += "first order dispersion polynomial (distance anchor, \n"
msg += " highest term first)\n"
for k in range(len(C_1)):
msg += "DISP1_"+str(k)+"=%12.4e\n" % (C_1[k])
msg += "second order anchor position on detector in det coordinates:\n"
msg += "anchor2=(%8.2f,%8.2f)\n" % (anker2[0],anker2[1])
msg += "second order dispersion polynomial (distance anchor2,\n"
msg += " highest term first)\n"
for k in range(len(C_2)):
msg += "DISP2_"+str(k)+"=%12.4e\n" % (C_2[k])
#sys.stderr.write( "first order anchor = %s\n"%(anker))
#sys.stderr.write( "second order anchor = %s\n"%(anker2))
msg += "first order dispersion = %s\n"%(str(C_1))
msg += "second order dispersion = %s\n"%(str(C_2))
if chatter > 1:
sys.stderr.write( "first order dispersion = %s\n"%(str(C_1)) )
sys.stderr.write( "second order dispersion = %s\n"%(str(C_2)) )
msg += "lenticular filter anchor positions (det)\n"
msg += msg3
# override angle
if fixed_angle != None:
msg += "WARNING: overriding calibration file angle for extracting \n\t"\
"spectrum cal: "+str(angle)+'->'+str(fixed_angle)+" \n"
angle = fixed_angle
# override anchor position in det pixel coordinates
if anchor_position[0] != None:
cal_anker = anker
anker = np.array(anchor_position)
msg += "overriding anchor position with value [%8.1f,%8.1f]\n" % (anker[0],anker[1])
anker2 = anker2 -cal_anker + anker
msg += "overriding anchor position 2nd order with value [%8.1f,%8.1f]\n"%(anker2[0],anker2[1])
anker_field = np.array([Xphi,Yphi])
theta=np.zeros(5)+angle # use the angle from first order everywhere.
C_0 = np.zeros(3) # not in calibration file. Use uvotcal/zemax to get.
C_3 = np.zeros(3)
Cmin1 = np.zeros(3)
msg += "field coordinates:\n"
msg += "FIELD=(%9.4f,%9.4f)\n" % (Xphi,Yphi)
# order distance between anchors
dist12 = np.sqrt( (anker[0]-anker2[0])**2 + (anker[1]-anker2[1])**2 )
msg += "order distance 1st-2nd anchors :\n"
msg += "DIST12=%7.1f\n" % (dist12)
Yout.update({"anker":anker,"anker2":anker2,"C_1":C_1,"C_2":C_2,"theta":angle,"dist12":dist12})
# determine x,y locations of certain wavelengths on the image
# TBD: add curvature
if wheelpos < 500:
wavpnt = np.arange(1700,6800,slit_width)
else:
wavpnt = np.arange(2500,6600,slit_width)
dispnt=pixdisFromWave(C_1,wavpnt) # pixel distance to anchor
if chatter > 0: msg2 += 'first order angle at anchor point: = %7.1f\n'%(angle)
crpix = crpix1,crpix2 = hdr['crpix1'],hdr['crpix2']
crpix = np.array(crpix) # centre of image
ankerimg = anker - np.array([1100.5,1100.5])+crpix
xpnt = ankerimg[0] + dispnt*np.cos((180-angle)*np.pi/180)
ypnt = ankerimg[1] + dispnt*np.sin((180-angle)*np.pi/180)
msg += "1st order anchor on image at (%7.1f,%7.1f)\n"%(ankerimg[0],ankerimg[1])
if chatter > 4: msg += "Found anchor point; now extracting spectrum.\n"
if chatter > 2: print("==========Found anchor point; now extracting spectrum ========")
if type(offsetlimit) == typeNone:
if wheelpos > 300:
offsetlimit = 9
sys.stdout.write("automatically set the value for the offsetlimit = "+str(offsetlimit)+'\n')
# find position zeroth order on detector from WCS-S after update from uvotwcs
#if 'hdr' not in Yout:
# hdr = pyfits.getheader(specfile,int(ext))
# Yout.update({'hdr':hdr})
zero_xy_imgpos = [-1,-1]
if chatter > 1: print("zeroth order position on image...")
try:
wS =wcs.WCS(header=hdr,key='S',relax=True,)
zero_xy_imgpos = wS.wcs_world2pix([[RA,DEC]],0)
print("position not corrected for SIP = ", zero_xy_imgpos[0][0],zero_xy_imgpos[0][1])
zero_xy_imgpos = wS.sip_pix2foc(zero_xy_imgpos, 0)[0]
if chatter > 1:
"print zeroth order position on image:",zero_xy_imgpos
except:
pass
Yout.update({'zeroxy_imgpos':zero_xy_imgpos})
# provide some checks on background inputs:
if background_lower[0] != None:
background_lower = np.abs(background_lower)
if np.sum(background_lower) >= (slit_width-10):
background_lower = [None,None]
msg += "WARNING: background_lower set too close to edge image\n Using default\n"
if background_upper[0] != None:
background_upper = np.abs(background_upper)
if np.sum(background_upper) >= (slit_width-10):
background_upper = [None,None]
msg += "WARNING: background_upper set too close to edge image\n Using default\n"
# in case of summary file:
if (not skip_field_src) & (ZOpos == None):
if chatter > 2: print("DEBUG 802 ================== locate zeroth orders due to field sources =============")
if wheelpos > 500: zeroth_blim_offset = 2.5
try:
ZOpos = find_zeroth_orders(filestub, ext, wheelpos,indir=indir,
set_maglimit=set_maglimit,clobber="yes", chatter=chatter, )
except:
if type(sumimage) == typeNone:
print ("exception to call find_zeroth_orders : skip_field_src = ",skip_field_src)
pass
# use for the ftools the downloaded usnob1 catalog in file "search.ub1" using the
# catspec parameter in the calls
if os.access('catalog.spec',os.F_OK) & (catspec == None):
catspec= 'catalog.spec'
if (not skip_field_src):
Xim,Yim,Xa,Yb,Thet,b2mag,matched,ondetector = ZOpos
pivot_ori=np.array([(ankerimg)[0],(ankerimg)[1]])
Y_ZOpos={"Xim":Xim,"Yim":Yim,"Xa":Xa,"Yb":Yb,"Thet":Thet,"b2mag":b2mag,
"matched":matched,"ondetector":ondetector}
Yout.update({"ZOpos":Y_ZOpos})
else:
Yout.update({"ZOpos":None})
# find background, extract straight slit spectrum
if chatter > 3 : print ("DEBUG 827 compute background")
if sumimage != None:
# initialize parameters for extraction summed extracted image
print('reading summed image file : '+sumimage)
print('ext label for output file is set to : ', ext)
Y6 = sum_Extimage (None, sum_file_name=sumimage, mode='read')
extimg, expmap, exposure, wheelpos, C_1, C_2, dist12, anker, \
(coef0, coef1,coef2,coef3,sig0coef,sig1coef,sig2coef,sig3coef), hdr = Y6
if background_template != None:
background_template = {'extimg': background_template,
'sumimg': True}
if (background_template['extimg'].size != extimg.size):
print("ERROR")
print("background_template.size=",background_template['extimg'].size)
print("extimg.size=",extimg.size)
raise IOError("The template does not match the sumimage dimensions")
msg += "order distance 1st-2nd anchors :\n"
msg += "DIST12=%7.1f\n" % (dist12)
for k in range(len(C_1)):
msg += "DISP1_"+str(k)+"=%12.4e\n" % (C_1[k])
msg += "second order dispersion polynomial (distance anchor2,\n"
msg += " highest term first)\n"
for k in range(len(C_2)):
msg += "DISP2_"+str(k)+"=%12.4e\n" % (C_2[k])
print("first order anchor = ",anker)
print("first order dispersion = %s"%(str(C_1)))
print("second order dispersion = %s"%(str(C_2)))
tstart = hdr['tstart']
ank_c = [100,500,0,2000]
if type(offsetlimit) == typeNone:
offset = 0
elif type(offsetlimit) == list:
offset = offsetlimit[0]-96
ank_c[0] = offsetlimit[0]
else:
offset = offsetlimit # for sumimage used offsetlimit to set the offset
ank_c[0] = 96+offsetlimit
dis = np.arange(-500,1500)
img = extimg
# get background
bg, bg1, bg2, bgsig, bgimg, bg_limits_used, bgextra = findBackground(extimg,
background_lower=background_lower,
background_upper=background_upper,)
if singleside_bkg == 'bg1':
bg2 = bg1
elif singleside_bkg == 'bg2':
bg1 = bg2
else:
pass
skip_field_src = True
spnet = bg1 # placeholder
expo = exposure
maxcounts = exposure/0.01
anker2 = anker + [dist12,0]
spimg,spnetimg,anker_field = None, None, (0.,0.)
m1,m2,aa,wav1 = None,None,None,None
if type(outfile) == typeNone:
outfile='sum_image_'
Yfit.update({"coef0":coef0,"coef1":coef1,"coef2":coef2,"coef3":coef3,
"sig0coef":sig0coef,"sig1coef":sig1coef,"sig2coef":sig2coef,"sig3coef":sig3coef} )
Yout.update({"anker":anker,"anker2":None,
"C_1":C_1,"C_2":C_2,
"Xphi":0.0,"Yphi":0.0,
"wheelpos":wheelpos,"dist12":dist12,
"hdr":hdr,"offset":offset})
Yout.update({"background_1":bg1,"background_2":bg2})
dropout_mask = None
Yout.update({"zeroxy_imgpos":[1000,1000]})
else:
# default extraction
if chatter > 2 : print ("DEBUG 894 default extraction")
# start with a quick straight slit extraction
exSpIm = extractSpecImg(specfile,ext,ankerimg,angle,spwid=spextwidth,
background_lower=background_lower, background_upper=background_upper,
template = background_template, x_offset = anchor_x_offset, ank_c_0offset=ank_c_0offset,
offsetlimit=offsetlimit, replace=replace, chatter=chatter, singleside_bkg=singleside_bkg)
dis = exSpIm['dis']
spnet = exSpIm['spnet']
bg = exSpIm['bg']
bg1 = exSpIm['bg1']
bg2 = exSpIm['bg2']
bgsig = exSpIm['bgsigma']
bgimg = exSpIm['bgimg']
bg_limits_used = exSpIm['bg_limits_used']
bgextra = exSpIm['bgextras']
extimg = exSpIm['extimg']
spimg = exSpIm['spimg']
spnetimg = exSpIm['spnetimg']
offset = exSpIm['offset']
ank_c = exSpIm['ank_c']
if background_template != None:
background_template ={"extimg":exSpIm["template_extimg"]}
Yout.update({"template":exSpIm["template_extimg"]})
if exSpIm['dropouts']:
dropout_mask = exSpIm['dropout_mask']
else: dropout_mask = None
Yout.update({"background_1":bg1,"background_2":bg2})
#msg += "1st order anchor offset from spectrum = %7.1f\n"%(offset)
#msg += "anchor position in rotated extracted spectrum (%6.1f,%6.1f)\n"%(ank_c[1],ank_c[0])
calibdat = None # free the memory
if chatter > 2: print("============ straight slit extraction complete =================")
if np.max(spnet) < maxcounts: maxcounts = 2.0*np.max(spnet)
# initial limits spectrum (pixels)
m1 = ank_c[1]-400
if wheelpos > 500: m1 = ank_c[1]-370
if m1 < 0: m1 = 0
if m1 < (ank_c[2]+30): m1 = ank_c[2]+30
m2 = ank_c[1]+2000
if wheelpos > 500: m2 = ank_c[1]+1000
if m2 >= len(dis): m2 = len(dis)-2
if m2 > (ank_c[3]-40): m2=(ank_c[3]-40)
aa = list(range(int(m1),int(m2)))
wav1 = polyval(C_1,dis[aa])
# get grism det image
img = pyfits.getdata(specfile, ext)
if isinstance(replace,np.ndarray):
img = replace
try:
offset = np.asscalar(offset)
except:
pass
Yout.update({"offset":offset})
Zbg = bg, bg1, bg2, bgsig, bgimg, bg_limits_used, bgextra
net = extimg-bgextra[-1]
var = extimg.copy()
dims = np.asarray( img.shape )
dims = np.array([dims[1],dims[0]])
dims2 = np.asarray(extimg.shape)
dims2 = np.array([dims2[1],dims2[0]])
msg += "Lower background from y = %i pix\nLower background to y = %i pix\n" % (bg_limits_used[0],bg_limits_used[1])
msg += "Upper background from y = %i pix\nUpper background to y = %i pix\n" % (bg_limits_used[2],bg_limits_used[3])
msg += "TRACKWID =%4.1f\n" % (trackwidth)
# collect some results:
if sumimage == None:
Y0 = (specfile, lfilt1_, lfilt1_ext_, lfilt2_, lfilt2_ext_, attfile), (method), \
(Xphi, Yphi, date1), (dist12, ankerimg, ZOpos), expmap, bgimg, bg_limits_used, bgextra
else:
Y0 = None, None, None, (dist12, None, None), expmap, bgimg, bg_limits_used, bgextra
angle = 0.0
# curvature from input (TBD how - placeholder with raw_input)
# choose input coef or pick from plot
# choose order to do it for
if (get_curve & interactive) | (get_curve & (get_curve_filename != None)):
if chatter > 3 : print ("DEBUG 978 get user-provided curve coefficients and extract spectrum")
spextwidth = None
# grab coefficients
poly_1 = None
poly_2 = None
poly_3 = None
if get_curve_filename == None:
try:
poly_1 = eval(input("give coefficients of first order polynomial array( [X^3,X^2,X,C] )"))
poly_2 = eval(input("give coefficients of second order polynomial array( [X^2,X,C] )"))
poly_3 = eval(input("give coefficients of third order polynomial array( [X,C] )"))
except:
print("failed")
if (type(poly_1) != list) | (type(poly_2) != list) | (type(poly_3) != list):
print("poly_1 type = ",type(poly_1))
print("poly_2 type = ",type(poly_2))
print("poly_3 type = ",type(poly_3))
raise IOError("the coefficients must be a list")
poly_1 = np.asarray(poly_1)
poly_2 = np.asarray(poly_2)
poly_3 = np.asarray(poly_3)
else:
try:
curfile = rdList(get_curve_filename)
poly_1 = np.array(curfile[0][0].split(','),dtype=float)
poly_2 = np.array(curfile[1][0].split(','),dtype=float)
poly_3 = np.array(curfile[2][0].split(','),dtype=float)
except:
print("There seems to be a problem when readin the coefficients out of the file")
print("The format is a list of coefficient separated by comma's, highest order first")
print("The first line for the first order")
print("The second line for the secons order")
print("The third line for the third order")
print("like, \n1.233e-10,-7.1e-7,3.01e-3,0.0.\n1.233e-5,-2.3e-2,0.03.0\n1.7e-1,0.9\n")
print(get_curve_filename)
print(curfile)
print(poly_1)
print(poly_2)
print(poly_3)
raise IOError("ERROR whilst reading curvature polynomial from file\n")
print("Curvature coefficients were read in...\npoly_1: %s \npoly_2: %s \npoly_3: %s \n"%
(poly_1,poly_2,poly_3))
fitorder, cp2, (coef0,coef1,coef2,coef3), (bg_zeroth,bg_first,\
bg_second,bg_third), (borderup,borderdown), apercorr, expospec, msg, curved \
= curved_extraction(
extimg, ank_c, anker, wheelpos,
ZOpos=ZOpos, skip_field_sources=skip_field_src,
offsetlimit=offsetlimit,
predict_second_order=predict2nd,
background_template=background_template,
angle=angle, offset=offset,
poly_1=poly_1, poly_2=poly_2, poly_3=poly_3,
msg=msg, curved=curved,
outfull=True, expmap=expmap,
fit_second=fit_second,
fit_third=fit_second,
C_1=C_1,C_2=C_2,dist12=dist12,
dropout_mask=dropout_mask, ifmotion=ifmotion,
obsid=obsid,indir=indir,motion_file=motion_file,
ank_c_0offset=ank_c_0offset,
chatter=chatter,ifextended=ifextended,
fixwidth=fixwidth)
# fit_sigmas parameter needs passing
(present0,present1,present2,present3),(q0,q1,q2,q3), (
y0,dlim0L,dlim0U,sig0coef,sp_zeroth,co_zeroth),(
y1,dlim1L,dlim1U,sig1coef,sp_first,co_first),(
y2,dlim2L,dlim2U,sig2coef,sp_second,co_second),(
y3,dlim3L,dlim3U,sig3coef,sp_third,co_third),(
x,xstart,xend,sp_all,quality,co_back) = fitorder
# update the anchor y-coordinate
if chatter > 3 : print ("DEBUG 1048 update anchor coordinate\noriginal ank_c=%s\ny1=%s"%(ank_c,y1))
ank_c[0] = y1[np.int(ank_c[1])]
Yfit.update({"coef0":coef0,"coef1":coef1,"coef2":coef2,"coef3":coef3,
"bg_zeroth":bg_zeroth,"bg_first":bg_first,"bg_second":bg_second,"bg_third":bg_third,
"borderup":borderup,"borderdown":borderdown,
"sig0coef":sig0coef,"sig1coef":sig1coef,"sig2coef":sig2coef,"sig3coef":sig3coef,
"present0":present0,"present1":present1,"present2":present2,"present3":present3,
"q0":q0,"q1":q1,"q2":q2,"q3":q3,
"y0":y0,"dlim0L":dlim0L,"dlim0U":dlim0U,"sp_zeroth":sp_zeroth,"bg_zeroth":bg_zeroth,"co_zeroth":co_zeroth,
"y1":y1,"dlim1L":dlim1L,"dlim1U":dlim1U,"sp_first": sp_first, "bg_first": bg_first, "co_first": co_first,
"y2":y2,"dlim2L":dlim2L,"dlim2U":dlim2U,"sp_second":sp_second,"bg_second":bg_second,"co_second":co_second,
"y3":y3,"dlim3L":dlim3L,"dlim3U":dlim3U,"sp_third": sp_third, "bg_third": bg_third, "co_third":co_third,
"x":x,"xstart":xstart,"xend":xend,"sp_all":sp_all,"quality":quality,"co_back":co_back,
"apercorr":apercorr,"expospec":expospec})
Yout.update({"ank_c":ank_c,"extimg":extimg,"expmap":expmap})
# curvature from calibration
if spextwidth != None:
if chatter > 3 : print ("DEBUG 1067 get curve coefficients from cal file and extract spectrum ")
fitorder, cp2, (coef0,coef1,coef2,coef3), (bg_zeroth,bg_first,\
bg_second,bg_third), (borderup,borderdown) , apercorr, expospec, msg, curved \
= curved_extraction(
extimg,ank_c,anker, wheelpos,
ZOpos=ZOpos, skip_field_sources=skip_field_src,
offsetlimit=offsetlimit,
background_lower=background_lower,
background_upper=background_upper, \
background_template=background_template,\
angle=angle, offset=offset,
outfull=True, expmap=expmap,
msg = msg, curved=curved,
fit_second=fit_second,
fit_third=fit_second, C_1=C_1,C_2=C_2,dist12=dist12,
dropout_mask=dropout_mask, ifmotion=ifmotion,
obsid=obsid,indir=indir,motion_file=motion_file,
ank_c_0offset=ank_c_0offset,
chatter=chatter,ifextended=ifextended,
fixwidth=fixwidth)
(present0,present1,present2,present3),(q0,q1,q2,q3), \
(y0,dlim0L,dlim0U,sig0coef,sp_zeroth,co_zeroth),(
y1,dlim1L,dlim1U,sig1coef,sp_first,co_first),\
(y2,dlim2L,dlim2U,sig2coef,sp_second,co_second),(
y3,dlim3L,dlim3U,sig3coef,sp_third,co_third),\
(x,xstart,xend,sp_all,quality,co_back) = fitorder
Yfit.update({"coef0":coef0,"coef1":coef1,"coef2":coef2,"coef3":coef3,
"bg_zeroth":bg_zeroth,"bg_first":bg_first,"bg_second":bg_second,"bg_third":bg_third,
"borderup":borderup,"borderdown":borderdown,
"sig0coef":sig0coef,"sig1coef":sig1coef,"sig2coef":sig2coef,"sig3coef":sig3coef,
"present0":present0,"present1":present1,"present2":present2,"present3":present3,
"q0":q0,"q1":q1,"q2":q2,"q3":q3,
"y0":y0,"dlim0L":dlim0L,"dlim0U":dlim0U,"sp_zeroth":sp_zeroth,"bg_zeroth":bg_zeroth,"co_zeroth":co_zeroth,
"y1":y1,"dlim1L":dlim1L,"dlim1U":dlim1U,"sp_first": sp_first, "bg_first": bg_first, "co_first": co_first,
"y2":y2,"dlim2L":dlim2L,"dlim2U":dlim2U,"sp_second":sp_second,"bg_second":bg_second,"co_second":co_second,
"y3":y3,"dlim3L":dlim3L,"dlim3U":dlim3U,"sp_third": sp_third, "bg_third": bg_third, "co_third":co_third,
"x":x,"xstart":xstart,"xend":xend,"sp_all":sp_all,"quality":quality,"co_back":co_back,
"apercorr":apercorr,"expospec":expospec})
ank_c[0] = y1[int(ank_c[1])]
Yout.update({"ank_c":ank_c,"extimg":extimg,"expmap":expmap})
msg += "orders present:"
if present0: msg += "0th order, "
if present1: msg += "first order"
if present2: msg += ", second order"
if present3: msg += ", third order "
print('1224 CCCCCCCCCCCCC', coef1)
print(RA,DEC)
print(anker)
print(ank_c)
msg += '\nparametrized order curvature:\n'
if present0:
for k in range(len(coef0)):
msg += "COEF0_"+str(k)+"=%12.4e\n" % (coef0[k])
if present1:
for k in range(len(coef1)):
msg += "COEF1_"+str(k)+"=%12.4e\n" % (coef1[k])
if present2:
for k in range(len(coef2)):
msg += "COEF2_"+str(k)+"=%12.4e\n" % (coef2[k])
if present3:
for k in range(len(coef3)):
msg += "COEF3_"+str(k)+"=%12.4e\n" % (coef3[k])
msg += '\nparametrized width slit:\n'
if present0:
for k in range(len(sig0coef)):
msg += "SIGCOEF0_"+str(k)+"=%12.4e\n" % (sig0coef[k])
if present1:
for k in range(len(sig1coef)):
msg += "SIGCOEF1_"+str(k)+"=%12.4e\n" % (sig1coef[k])
if present2:
for k in range(len(sig2coef)):
msg += "SIGCOEF2_"+str(k)+"=%12.4e\n" % (sig2coef[k])
if present3:
for k in range(len(sig3coef)):
msg += "SIGCOEF3_"+str(k)+"=%12.4e\n" % (sig3coef[k])
if chatter > 3 : print ("DEBUG 1142 done spectral extraction, now calibrate")
offset = ank_c[0]-slit_width/2
msg += "best fit 1st order anchor offset from spectrum = %7.1f\n"%(offset)
msg += "anchor position in rotated extracted spectrum (%6.1f,%6.1f)\n"%(ank_c[1],y1[int(ank_c[1])])
msg += msg4
Yout.update({"offset":offset})
#2012-02-20 moved updateFitorder to curved_extraction
#if curved == "update":
# fit = fitorder2
#else:
# fit = fitorder
fit = fitorder
if optimal_extraction:
# development dropped, since mod8 causes slit width oscillations
# also requires a good second order flux and coi calibration for
# possible further development of order splitting.
# result in not consistent now.
print("Starting optimal extraction: This can take a few minutes ......\n\t "\
"........\n\t\t .............")
Y3 = get_initspectrum(net,var,fit,160,ankerimg,C_1=C_1,C_2=C_2,dist12=dist12,
predict2nd=predict2nd,
chatter=1)
counts, variance, borderup, borderdown, (fractions,cnts,vars,newsigmas) = Y3
# need to test that C_2 is valid here
if predict2nd:
Y4 = predict_second_order(dis,(sp_first-bg_first), C_1,C_2, dist12, quality,dlim1L, dlim1U,wheelpos)
wav2p, dis2p, flux2p, qual2p, dist12p = Y4[0]
# retrieve the effective area
Y7 = readFluxCalFile(wheelpos,anchor=anker,spectralorder=1,arf=fluxcalfile,msg=msg,chatter=chatter)
EffArea1 = Y7[:-1]
msg = Y7[-1]
Y7 = readFluxCalFile(wheelpos,anchor=anker,spectralorder=2,arf=None,msg=msg,chatter=chatter)
if type(Y7) == tuple:
EffArea2 = Y7[:-1]
else:
if type(Y7) != typeNone: msg = Y7
EffArea2 = None
# note that the output differs depending on parameters given, i.e., arf, anchor
Yout.update({"effarea1":EffArea1,"effarea2":EffArea2})
if interactive:
import matplotlib.pyplot as plt
if (plot_img) & (sumimage == None):
#plt.winter()
# make plot of model on image [figure 1]
#xa = np.where( (dis < 1400) & (dis > -300) )
bga = bg.copy()
fig1 = plt.figure(1); plt.clf()
img[img <=0 ] = 1e-16
plt.imshow(np.log(img),vmin=np.log(bga.mean()*0.1),vmax=np.log(bga.mean()*4))
levs = np.array([5,15,30,60,120,360]) * bg.mean()
if highlight: plt.contour(img,levels=levs)
# plot yellow wavelength marker
# TBD : add curvature
plt.plot(xpnt,ypnt,'+k',markersize=14)
if not skip_field_src:
plot_ellipsoid_regions(Xim,Yim,
Xa,Yb,Thet,b2mag,matched,ondetector,
pivot_ori,pivot_ori,dims,17.,)
if zoom:
#plt.xlim(np.max(np.array([0.,0.])),np.min(np.array([hdr['NAXIS1'],ankerimg[0]+400])))
#plt.ylim(np.max(np.array([0.,ankerimg[1]-400 ])), hdr['NAXIS2'])
plt.xlim(0,2000)
plt.ylim(0,2000)
else:
plt.xlim(0,2000)
plt.ylim(0,2000)
plt.savefig(indir+'/'+obsid+'_map.png',dpi=150)
#plt.show()
plt.close()
if (plot_raw):
#plt.winter()
nsubplots = 2
#if not fit_second: nsubplots=3
# make plot of spectrum [figure 2]
fig2 = plt.figure(2); plt.clf()
plt.subplots_adjust(top=1,hspace=0, wspace=0)
# image slice
ax21 = plt.subplot(nsubplots,1,1)
ac = -ank_c[1]
net[net<=0.] = 1e-16
#plt.imshow(np.log10(net),vmin=-0.8,vmax=0.8, #~FIXME:
# extent=(ac,ac+extimg.shape[1],0,extimg.shape[0]),
# origin='lower',cmap=plt.cm.winter)
plt.imshow(np.log10(net),vmin=-10,vmax=2,
extent=(ac,ac+extimg.shape[1],0,extimg.shape[0]),
origin='lower')#,cmap=plt.cm.winter)
#plt.imshow(extimg,vmin=0,vmax=50,
# extent=(ac,ac+extimg.shape[1],0,extimg.shape[0]),
# origin='lower')#,cmap=plt.cm.winter)
if highlight:
plt.contour(np.log10(net),levels=[1,1.3,1.7,2.0,3.0],
extent=(ac,ac+extimg.shape[1],0,extimg.shape[0]),
origin='lower')
#plt.imshow( extimg,vmin= (bg1.mean())*0.1,vmax= (bg1.mean()+bg1.std())*2, extent=(ac,ac+extimg.shape[1],0,extimg.shape[0]) )
#levels = np.array([5,10,20,40,70,90.])
#levels = spnet[ank_c[2]:ank_c[3]].max() * levels * 0.01
#if highlight: plt.contour(net,levels=levels,extent=(ac,ac+extimg.shape[1],0,extimg.shape[0]))
# cross_section_plot:
cp2 = cp2/np.max(cp2)*100
#plt.plot(ac+cp2+ank_c[1],np.arange(len(cp2)),'k',lw=2,alpha=0.6,ds='steps') #~TODO:
# plot zeroth orders
if not skip_field_src:
pivot= np.array([ank_c[1],ank_c[0]-offset])
#pivot_ori=ankerimg
mlim = 17.
if wheelpos > 500: mlim = 15.5
plot_ellipsoid_regions(Xim,Yim,Xa,Yb,Thet,b2mag,
matched,ondetector,
pivot,pivot_ori,
dims2,mlim,
img_angle=angle-180.0,ax=ax21)
# plot line on anchor location
#plt.plot([ac+ank_c[1],ac+ank_c[1]],[0,slit_width],'k',lw=2)
plt.plot(0,ank_c[0],'kx',MarkerSize=5) #~TODO:
# plot position centre of orders
#if present0: plt.plot(ac+q0[0],y0[q0[0]],'k--',lw=1.2)
#plt.plot( ac+q1[0],y1[q1[0]],'k--',lw=1.2)
#if present2: plt.plot(ac+q2[0],y2[q2[0]],'k--',alpha=0.6,lw=1.2)
#if present3: plt.plot(ac+q3[0],y3[q3[0]],'k--',alpha=0.3,lw=1.2)
# plot borders slit region
if present0:
plt.plot(ac+q0[0],borderup [0,q0[0]],'r-')
plt.plot(ac+q0[0],borderdown[0,q0[0]],'r-')
if present1:
plt.plot(ac+q1[0],borderup [1,q1[0]],'r-',lw=1.2)
plt.plot(ac+q1[0],borderdown[1,q1[0]],'r-',lw=1.2)
if present2:
plt.plot(ac+q2[0],borderup [2,q2[0]],'r-',alpha=0.6,lw=1)
plt.plot(ac+q2[0],borderdown[2,q2[0]],'r-',alpha=0.6,lw=1)
if present3:
plt.plot(ac+q3[0],borderup [3,q3[0]],'r-',alpha=0.3,lw=1.2)
plt.plot(ac+q3[0],borderdown[3,q3[0]],'r-',alpha=0.3,lw=1.2)
# plot limits background
plt_bg = np.ones(len(q1[0]))
if (background_lower[0] == None) & (background_upper[0] == None):
background_lower = [0,50] ; background_upper = [slit_width-50,slit_width]
plt.plot(ac+q1[0],plt_bg*(background_lower[1]),'-k',lw=1.5 )
plt.plot(ac+q1[0],plt_bg*(background_upper[0]),'-k',lw=1.5 )
else:
if background_lower[0] != None:
plt.plot(ac+q1[0],plt_bg*(y1[int(ank_c[1])]-background_lower[0]),'-k',lw=1.5 )
plt.plot(ac+q1[0],plt_bg*(y1[int(ank_c[1])]-background_lower[1]),'-k',lw=1.5 )
elif background_lower[1] != None:
plt.plot(ac+q1[0],plt_bg*(background_lower[1]),'-k',lw=1.5 )
if background_upper[1] != None:
plt.plot(ac+q1[0],plt_bg*(y1[int(ank_c[1])]+background_upper[0]),'-k',lw=1.5 )
plt.plot(ac+q1[0],plt_bg*(y1[int(ank_c[1])]+background_upper[1]),'-k',lw=1.5 )
elif background_upper[0] != None:
plt.plot(ac+q1[0],plt_bg*(background_upper[0]),'-k',lw=1.5 )
# rescale, title
plt.ylim(0,slit_width)
#plt.ylim(50,150)
if not zoom:
xlim1 = ac+ank_c[2]
xlim2 = ac+ank_c[3]
else:
xlim1 = max(ac+ank_c[2], -420)
xlim2 = min(ac+ank_c[3],1400)
plt.xlim(xlim1,xlim2)
plt.title(obsid+'+'+str(ext))
# first order raw data plot
ax22 = plt.subplot(nsubplots,1,2)
plt.rcParams['legend.fontsize'] = 'small'
if curved == 'straight':
p1, = plt.plot( dis[ank_c[2]:ank_c[3]], spnet[ank_c[2]:ank_c[3]],'k',
ds='steps',lw=0.5,alpha=0.5,label='straight')
p2, = plt.plot( dis[ank_c[2]:ank_c[3]],
spextwidth*(bg1[ank_c[2]:ank_c[3]]+bg2[ank_c[2]:ank_c[3]])*0.5,
'b',alpha=0.5,label='background')
plt.legend([p1,p2],['straight','background'],loc=0,)
if curved != "straight":
p3, = plt.plot(x[q1[0]],(sp_first-bg_first)[q1[0]],'r',ds='steps',label='spectrum')
plt.plot(x[q1[0]],(sp_first-bg_first)[q1[0]],'k',alpha=0.2,ds='steps',label='_nolegend_')
p7, = plt.plot(x[q1[0]], bg_first[q1[0]],'y',alpha=0.5,lw=1.1,ds='steps',label='background')
# bad pixels:
qbad = np.where(quality[q1[0]] > 0)
p4, = plt.plot(x[qbad],(sp_first-bg_first)[qbad],'xk',markersize=4)
#p7, = plt.plot(x[q1[0]],(bg_first)[q1[0]],'r-',alpha=0.3,label='curve_bkg')
# annotation
#plt.legend([p3,p4,p7],['spectrum','suspect','background'],loc=0,)
plt.legend([p3,p7],['spectrum','background'],loc=0,)
maxbg = np.max(bg_first[q1[0]][np.isfinite(bg_first[q1[0]])])
topcnt = 1.2 * np.max([np.max(spnet[q1[0]]),maxbg, np.max((sp_first-bg_first)[q1[0]])])
plt.ylim(np.max([ -20, np.min((sp_first-bg_first)[q1[0]])]), np.min([topcnt, maxcounts]))
if optimal_extraction:
p5, = plt.plot(x[q1[0]],counts[1,q1[0]],'g',alpha=0.5,ds='steps',lw=1.2,label='optimal' )
p6, = plt.plot(x[q1[0]],counts[1,q1[0]],'k',alpha=0.5,ds='steps',lw=1.2,label='_nolegend_' )
p7, = plt.plot(x[q1[0]], bg_first[q1[0]],'y',alpha=0.7,lw=1.1,ds='steps',label='background')
plt.legend([p3,p5,p7],['spectrum','optimal','background'],loc=0,)
topcnt = 1.2 * np.max((sp_first-bg_first)[q1[0]])
ylim1,ylim2 = -10, np.min([topcnt, maxcounts])
plt.ylim( ylim1, ylim2 )
#plt.xlim(ank_c[2]-ank_c[1],ank_c[3]-ank_c[1])
plt.xlim(xlim1,xlim2)
plt.ylabel('1st order counts')
'''
# plot second order
ax23 = plt.subplot(nsubplots,1,3)
plt.rcParams['legend.fontsize'] = 'small'
#plt.xlim(ank_c[2],ank_c[3])
if fit_second:
if curved != 'straight':
p1, = plt.plot(x[q2[0]],(sp_second-bg_second)[q2[0]],'r',label='spectrum')
plt.plot(x[q2[0]],(sp_second-bg_second)[q2[0]],'k',alpha=0.2,label='_nolegend_')
p7, = plt.plot(x[q2[0]],(bg_second)[q2[0]],'y',alpha=0.7,lw=1.1,label='background')
qbad = np.where(quality[q2[0]] > 0)
p2, = plt.plot(x[qbad],(sp_second-bg_second)[qbad],'+k',alpha=0.3,label='suspect')
plt.legend((p1,p7,p2),('spectrum','background','suspect'),loc=2)
plt.ylim(np.max([ -100, np.min((sp_second-bg_second)[q2[0]])]), \
np.min([np.max((sp_second-bg_second)[q2[0]]), maxcounts]))
plt.xlim(ank_c[2]-ank_c[1],ank_c[3]-ank_c[1])
if optimal_extraction:
p3, = plt.plot(x[q2[0]],counts[2,q2[0]],'g',alpha=0.5,ds='steps',label='optimal' )
plt.legend((p1,p7,p2,p3),('spectrum','background','suspect','optimal',),loc=2)
#plt.ylim(np.max([ -10,np.min(counts[2,q2[0]]), np.min((sp_second-bg_second)[q2[0]])]),\
# np.min([np.max(counts[2,q2[0]]), np.max((sp_second-bg_second)[q2[0]]), maxcounts]))
plt.ylim( ylim1,ylim2 )
if predict2nd :
p4, = plt.plot(dis2p+dist12,flux2p, ds='steps',label='predicted')
p5, = plt.plot(dis2p[np.where(qual2p != 0)]+dist12,flux2p[np.where(qual2p != 0)],'+k',label='suspect',markersize=4)
if optimal_extraction & fit_second:
plt.legend((p1,p2,p3,p4,p5),('curved','suspect','optimal','predicted','suspect'),loc=2)
#plt.ylim(np.max([ -100,np.min(counts[2,q2[0]]), np.min((sp_second-bg_second)[q2[0]])]),\
# np.min([np.max(counts[2,q2[0]]), np.max((sp_second-bg_second)[q2[0]]), maxcounts]))
plt.ylim( ylim1,ylim2 )
elif optimal_extraction:
plt.legend((p1,p7,p4,p5),('curved','background','predicted','suspect'),loc=2)
plt.ylim(np.max([ -10, np.min((sp_second-bg_second)[q2[0]])]), \
np.min([np.max((sp_second-bg_second)[q2[0]]), maxcounts]))
elif fit_second:
plt.legend((p1,p2,p4,p5),('curved','suspect','predicted','suspect'),loc=2)
plt.ylim(np.max([ -10, np.min((sp_second-bg_second)[q2[0]])]), \
np.min([np.max((sp_second-bg_second)[q2[0]]), maxcounts]))
else:
plt.legend((p4,p5),('predicted','suspect'),loc=2)
plt.ylim(np.max([ -10, np.min((sp_second-bg_second)[q2[0]])]), \
np.min([np.max((sp_second-bg_second)[q2[0]]), maxcounts]))
plt.xlim(ank_c[2]-ank_c[1],ank_c[3]-ank_c[1])
plt.xlim(xlim1,xlim2)
plt.ylabel('2nd order counts')
'''
'''
if fit_second:
ax24 = plt.subplot(nsubplots,1,4)
plt.rcParams['legend.fontsize'] = 'small'
if (len(q3[0]) > 1) & (curved != "xxx"):
p1, = plt.plot(x[q3[0]],(sp_third-bg_third)[q3[0]],'r',label='spectrum')
plt.plot(x[q3[0]],(sp_third-bg_third)[q3[0]],'k',alpha=0.2,label='_nolegend_')
qbad = np.where(quality[q3[0]] > 0)
p2, = plt.plot(x[qbad],(sp_third-bg_third)[qbad],'xk',alpha=0.3,label='suspect')
p3, = plt.plot(x[q3[0]],bg_third[q3[0]],'y',label='background')
plt.legend([p1,p3,p2],['spectrum','background','suspect'],loc=2)
plt.ylim(np.max([ -100, np.min((sp_second-bg_second)[q3[0]])]),\
np.min([np.max((sp_third-bg_third)[q3[0]]), maxcounts]))
if optimal_extraction:
p4, = plt.plot(x[q3[0]],counts[3,q3[0]],'b',alpha=0.5,ds='steps',label='optimal' )
plt.legend([p1,p3,p2,p4],['spectrum','background','suspect','optimal',],loc=2)
#plt.ylim(np.max([ -100,np.min(counts[3,q3[0]]), np.min((sp_second-bg_second)[q3[0]])]),\
# np.min([np.max(counts[3,q3[0]]), np.max((sp_third-bg_third)[q3[0]]), maxcounts]))
plt.ylim( ylim1,ylim2 )
#plt.xlim(ank_c[2]-ank_c[1],ank_c[3]-ank_c[1])
plt.xlim(xlim1,xlim2)
plt.ylabel(u'3rd order counts')
plt.xlabel(u'pixel distance from anchor position')
'''
plt.savefig(indir+'/'+obsid+'_count.png',dpi=150)
#plt.show()
if (plot_spec):
#plt.winter()
# NEED the flux cal applied!
nsubplots = 1
if not fit_second:
nsubplots = 1
fig3 = plt.figure(3)
plt.clf()
wav1 = polyval(C_1,x[q1[0]])
ax31 = plt.subplot(nsubplots,1,1)
if curved != "xxx":
# PSF aperture correction applies on net rate, but background
# needs to be corrected to default trackwidth linearly
rate1 = ((sp_first[q1[0]]-bg_first[q1[0]] ) * apercorr[1,[q1[0]]]
/expospec[1,[q1[0]]]).flatten()
bkgrate1 = ((bg_first)[q1[0]] * (2.5/trackwidth)
/expospec[1,[q1[0]]]).flatten()
print("computing flux for plot; frametime =",framtime)
flux1,wav1,coi_valid1 = rate2flux(wav1,rate1, wheelpos,
bkgrate=bkgrate1,
co_sprate = (co_first[q1[0]]/expospec[1,[q1[0]]]).flatten(),
co_bgrate = (co_back [q1[0]]/expospec[1,[q1[0]]]).flatten(),
pixno=x[q1[0]],
#sig1coef=sig1coef, sigma1_limits=[2.6,4.0],
arf1=fluxcalfile, arf2=None, effarea1=EffArea1,
spectralorder=1, swifttime=tstart,
#trackwidth = trackwidth,
anker=anker,
#option=1, fudgespec=1.32,
frametime=framtime,
debug=False,chatter=1)
#flux1_err = 0.5*(rate2flux(,,rate+err,,) - rate2flux(,,rate-err,,))
p1, = plt.plot(wav1[np.isfinite(flux1)],flux1[np.isfinite(flux1)],
color='darkred',label=u'curved')
p11, = plt.plot(wav1[np.isfinite(flux1)&(coi_valid1==False)],
flux1[np.isfinite(flux1)&(coi_valid1==False)],'.',
color='lawngreen',
label="too bright")
# PROBLEM quality flags !!!
qbad1 = np.where((quality[np.array(x[q1[0]],dtype=int)] > 0) & (quality[np.array(x[q1[0]],dtype=int)] < 16))
qbad2 = np.where((quality[np.array(x[q1[0]],dtype=int)] > 0) & (quality[np.array(x[q1[0]],dtype=int)] == qflag.get("bad")))
plt.legend([p1,p11],[u'calibrated spectrum',u'too bright - not calibrated'])
if len(qbad2[0]) > 0:
p2, = plt.plot(wav1[qbad2],flux1[qbad2],
'+k',markersize=4,label=u'bad data')
plt.legend([p1,p2],[u'curved',u'bad data'])
plt.ylabel(u'1st order flux $(erg\ cm^{-2} s^{-1} \AA^{-1)}$')
# find reasonable limits flux
get_flux_limit = flux1[int(len(wav1)*0.3):int(len(wav1)*0.7)]
get_flux_limit[get_flux_limit==np.inf] = np.nan
get_flux_limit[get_flux_limit==-np.inf]= np.nan
qf = np.nanmax(get_flux_limit)
if qf > 2e-12:
qf = 2e-12
plt.ylim(0.001*qf,1.2*qf)
plt.xlim(1600,6000)
if optimal_extraction: # no longer supported (2013-04-24)
print("OPTIMAL EXTRACTION IS NO LONGER SUPPORTED")
wav1 = np.polyval(C_1,x[q1[0]])
#flux1 = rate2flux(wav1, counts[1,q1[0]]/expo, wheelpos, spectralorder=1, arf1=fluxcalfile)
flux1,wav1,coi_valid1 = rate2flux(wav1,counts[1,q1[0]]/expo, wheelpos, bkgrate=bgkrate1,
co_sprate = (co_first[q1[0]]/expospec[1,[q1[0]]]).flatten(),
co_bgrate = (co_back [q1[0]]/expospec[1,[q1[0]]]).flatten(),
pixno=x[q1[0]], #sig1coef=sig1coef, sigma1_limits=[2.6,4.0],
arf1=fluxcalfile, arf2=None, spectralorder=1, swifttime=tstart,
#trackwidth = trackwidth,
anker=anker, #option=1, fudgespec=1.32,
frametime=framtime,
debug=False,chatter=1)
p3, = plt.plot(wav1, flux1,'g',alpha=0.5,ds='steps',lw=2,label='optimal' )
p4, = plt.plot(wav1,flux1,'k',alpha=0.5,ds='steps',lw=2,label='_nolegend_' )
#plt.legend([p1,p2,p3],['curved','suspect','optimal'],loc=0,)
plt.legend([p1,p3],['curved','optimal'],loc=0,)
qf = (flux1 > 0.) & (flux1 < 1.0e-11)
plt.ylim( -0.01*np.max(flux1[qf]), 1.2*np.max(flux1[qf]) )
plt.ylabel(u'1st order count rate')
plt.xlim(np.min(wav1)-10,np.max(wav1))
plt.title(obsid+'+'+str(ext))
'''
if fit_second:
ax32 = plt.subplot(nsubplots,1,2)
plt.plot([1650,3200],[0,1])
plt.text(2000,0.4,'NO SECOND ORDER DATA',fontsize=16)
if curved != 'xxx':
wav2 = polyval(C_2,x[q2[0]]-dist12)
rate2 = ((sp_second[q2[0]]-bg_second[q2[0]])*
apercorr[2,[q2[0]]].flatten()/expospec[2,[q2[0]]].flatten() )
bkgrate2 = ((bg_second)[q2[0]] * (2.5/trackwidth)
/expospec[2,[q2[0]]]).flatten()
flux2,wav2,coi_valid2 = rate2flux(wav2, rate2, wheelpos,
bkgrate=bkgrate2,
co_sprate = (co_second[q2[0]]/expospec[2,[q2[0]]]).flatten(),
co_bgrate = (co_back [q2[0]]/expospec[2,[q2[0]]]).flatten(),
pixno=x[q2[0]],
arf1=fluxcalfile, arf2=None,
frametime=framtime, effarea2=EffArea2,
spectralorder=2,swifttime=tstart,
anker=anker2,
debug=False,chatter=1)
#flux1_err = rate2flux(wave,rate_err, wheelpos, spectralorder=1,)
plt.cla()
print('#############################')
print(wav2[100],flux2[100],wav2,flux2)
p1, = plt.plot(wav2,flux2,'r',label='curved')
plt.plot(wav2,flux2,'k',alpha=0.2,label='_nolegend_')
qbad1 = np.where((quality[np.array(x[q2[0]],dtype=int)] > 0) & (quality[np.array(x[q2[0]],dtype=int)] < 16))
p2, = plt.plot(wav2[qbad1],flux2[qbad1],'+k',markersize=4,label='suspect data')
plt.legend(['uncalibrated','suspect data'])
plt.ylabel(u'estimated 2nd order flux')
plt.xlim(1600,3200)
qf = (flux1 > 0.) & (flux1 < 1.0e-11)
if np.sum(qf[0]) > 0:
plt.ylim( -0.01*np.max(flux1[qf]), 1.2*np.max(flux1[qf]) )
#else: plt.ylim(1e-16,2e-12)
else: plt.ylim(1e-12,1e-11)
# final fix to limits of fig 3,1
y31a,y31b = ax31.get_ylim()
setylim = False
if y31a < 1e-16:
y31a = 1e-16
setylim = True
if y31b > 1e-12:
y31b = 1e-12
setylim = True
if setylim: ax31.set_ylim(bottom=y31a,top=y31b)
#
'''
plt.xlabel(u'$\lambda(\AA)$',fontsize=16)
plt.savefig(indir+'/'+obsid+'_flux.png',dpi=150)
# to plot the three figures
#plt.show()
# output parameter
Y1 = ( (dis,spnet,angle,anker,anker2,anker_field,ank_c), (bg,bg1,bg2,extimg,spimg,spnetimg,offset),
(C_1,C_2,img), hdr,m1,m2,aa,wav1 )
# output parameter
Y2 = fit, (coef0,coef1,coef2,coef3), (bg_zeroth,bg_first,
bg_second,bg_third), (borderup,borderdown), apercorr, expospec
Yout.update({"Yfit":Yfit})
# writing output to a file
#try:
if wr_outfile: # write output file
if ((chatter > 0) & (not clobber)): print("trying to write output files")
import uvotio
if (curved == 'straight') & (not optimal_extraction):
ank_c2 = np.copy(ank_c) ; ank_c2[1] -= m1
F = uvotio.wr_spec(RA,DEC,filestub,ext,
hdr,anker,anker_field[0],anker_field[1],
dis[aa],wav1,
spnet[aa]/expo,bg[aa]/expo,
bg1[aa]/expo,bg2[aa]/expo,
offset,ank_c2,extimg, C_1,
history=None,chatter=1,
clobber=clobber,
calibration_mode=calmode,
interactive=interactive)
elif not optimal_extraction:
if fileversion == 2:
Y = Yout
elif fileversion == 1:
Y = (Y0,Y1,Y2,Y4)
F = uvotio.writeSpectrum(RA,DEC,filestub,ext, Y,
fileoutstub=outfile,
arf1=fluxcalfile, arf2=None,
fit_second=fit_second,
write_rmffile=write_RMF, fileversion=1,
used_lenticular=use_lenticular_image,
history=msg,
calibration_mode=calmode,
chatter=chatter,
clobber=clobber )
elif optimal_extraction:
Y = (Y0,Y1,Y2,Y3,Y4)
F = uvotio.OldwriteSpectrum(RA,DEC,filestub,ext, Y, mode=2,
quality=quality, interactive=False,fileout=outfile,
updateRMF=write_rmffile, \
history=msg, chatter=5, clobber=clobber)
#except (RuntimeError, IOError, ValueError):
# print "ERROR writing output files. Try to call uvotio.wr_spec."
# pass
# clean up fake file
if tempntags.__contains__('fakefilestub'):
filestub = tempnames[tempntags.index('fakefilestub')]
os.system('rm '+indir+filestub+'ufk_??.img ')
# update Figure 3 to use the flux...
# TBD
# write the summary
sys.stdout.write(msg)
sys.stdout.write(msg2)
flog = open(logfile,'a')
flog.write(msg)
flog.write(msg2)
flog.close()
#plt.show()
if give_result: return Y0, Y1, Y2, Y3, Y4
if give_new_result: return Yout
def extractSpecImg(file,ext,anker,angle,anker0=None,anker2=None, anker3=None,\
searchwidth=35,spwid=13,offsetlimit=None, fixoffset=None,
background_lower=[None,None], background_upper=[None,None],
template=None, x_offset = False, ank_c_0offset=False, replace=None,
clobber=True,chatter=2,singleside_bkg=False):
'''
extract the grism image of spectral orders plus background
using the reference point at 2600A in first order.
Parameters
----------
file : str
input file location
ext : int
extension of image
anker : list, ndarray
X,Y coordinates of the 2600A (1) point on the image in image coordinates
angle : float
angle of the spectrum at 2600A in first order from zemax e.g., 28.8
searchwidth : float
find spectrum with this possible offset ( in crowded fields
it should be set to a smaller value)
template : dictionary
template for the background.
use_rectext : bool
If True then the HEADAS uvotimgrism program rectext is used to extract the image
This is a better way than using ndimage.rotate() which does some weird smoothing.
offsetlimit : None, float/int, list
if None, search for y-offset predicted anchor to spectrum using searchwidth
if float/int number, search for offset only up to a distance as given from y=100
if list, two elements, no more. [y-value, delta-y] for search of offset.
if delta-y < 1, fixoffset = y-value.
History
-------
2011-09-05 NPMK changed interpolation in rotate to linear, added a mask image to
make sure to keep track of the new pixel area.
2011-09-08 NPMK incorporated rectext as new extraction and removed interactive plot,
curved, and optimize which are now olsewhere.
2014-02-28 Add template for the background as an option
2014-08-04 add option to provide a 2-element list for the offsetlimit to constrain
the offset search range.
'''
import numpy as np
import os, sys
try:
from astropy.io import fits as pyfits
except:
import pyfits
import scipy.ndimage as ndimage
#out_of_img_val = -1.0123456789 now a global
Tmpl = (template != None)
if Tmpl:
if template['sumimg']:
raise IOError("extractSpecImg should not be called when there is sumimage input")
if chatter > 4:
print('extractSpecImg parameters: file, ext, anker, angle')
print(file,ext)
print(anker,angle)
print('searchwidth,chatter,spwid,offsetlimit, :')
print(searchwidth,chatter,spwid,offsetlimit)
img, hdr = pyfits.getdata(file,ext,header=True)
if isinstance(replace,np.ndarray):
img = replace
# wcs_ = wcs.WCS(header=hdr,) # detector coordinates DETX,DETY in mm
# wcsS = wcs.WCS(header=hdr,key='S',relax=True,) # TAN-SIP coordinate type
if Tmpl:
if (img.shape != template['template'].shape) :
print("ERROR")
print("img.shape=", img.shape)
print("background_template.shape=",template['template'].shape)
raise IOError("The templare array does not match the image")
wheelpos = hdr['WHEELPOS']
if chatter > 4: print('wheelpos:', wheelpos)
if not use_rectext:
# now we want to extend the image array and place the anchor at the centre
s1 = 0.5*img.shape[0]
s2 = 0.5*img.shape[1]
d1 = -(s1 - anker[1]) # distance of anker to centre img
d2 = -(s2 - anker[0])
n1 = 2.*abs(d1) + img.shape[0] + 400 # extend img with 2.x the distance of anchor
n2 = 2.*abs(d2) + img.shape[1] + 400
#return img, hdr, s1, s2, d1, d2, n1, n2
if 2*int(n1/2) == int(n1): n1 = n1 + 1
if 2*int(n2/2) == int(n2): n2 = n2 + 1
c1 = n1 / 2 - anker[1]
c2 = n2 / 2 - anker[0]
n1 = int(n1)
n2 = int(n2)
c1 = | int(c1) | numpy.int |
# Copyright 2022 <NAME>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from nets.dynamic.base import DynamicNetBase
from utils.functions.misc import deterministic_set, find_sublist_index
# Description of this Recurrent Network of Dynamic Complexity (pages 18-21)
# https://papyrus.bib.umontreal.ca/xmlui/bitstream/handle/1866/26072/Le_Clei_Maximilien_2021_memoire.pdf#page=18
class Net(DynamicNetBase):
def __init__(self, arg_input, arg_output):
self.d_input = arg_input if isinstance(arg_input, int) else None
self.input_net = arg_input if not isinstance(arg_input, int) else None
self.d_output = arg_output if isinstance(arg_output, int) else None
self.output_net = arg_output if not isinstance(arg_output,
int) else None
self.nodes = {'all': [], 'input': [], 'hidden': [], 'output': [],
'receiving': [], 'emitting': [], 'being pruned': [],
'layered': []}
self.nb_nodes_grown = 0
self.architectural_mutations = [self.grow_node,
self.prune_node,
self.grow_connection,
self.prune_connection]
def initialize_architecture(self):
if self.d_input != None:
for _ in range(self.d_input):
self.grow_node('input')
if self.d_output != None:
for _ in range(self.d_output):
self.grow_node('output')
def mutate_parameters(self):
for node in self.nodes['hidden'] + self.nodes['output']:
node.mutate_parameters()
def grow_node(self, type='hidden'):
if type == 'input':
new_input_node = Node('input', self.nb_nodes_grown)
self.nb_nodes_grown += 1
self.nodes['all'].append(new_input_node)
self.nodes['input'].append(new_input_node)
self.nodes['receiving'].append(new_input_node)
if len(self.nodes['layered']) == 0:
self.nodes['layered'].append([])
self.nodes['layered'][0].append(new_input_node)
return new_input_node
elif type == 'output':
new_output_node = Node('output', self.nb_nodes_grown)
self.nb_nodes_grown += 1
self.nodes['all'].append(new_output_node)
self.nodes['output'].append(new_output_node)
while len(self.nodes['layered']) < 2:
self.nodes['layered'].append([])
self.nodes['layered'][-1].append(new_output_node)
return new_output_node
else: # type == 'hidden'
potential_in_nodes = deterministic_set(self.nodes['receiving'])
in_node_1 = np.random.choice(potential_in_nodes)
potential_in_nodes.remove(in_node_1)
if len(potential_in_nodes) != 0:
in_node_2 = np.random.choice(potential_in_nodes)
out_node = np.random.choice(self.nodes['hidden'] +
self.nodes['output'])
new_hidden_node = Node('hidden', self.nb_nodes_grown)
self.nb_nodes_grown += 1
self.grow_connection(in_node_1, new_hidden_node)
if len(potential_in_nodes) != 0:
self.grow_connection(in_node_2, new_hidden_node)
self.grow_connection(new_hidden_node, out_node)
in_node_1_layer = find_sublist_index(in_node_1,
self.nodes['layered'])
out_node_layer = find_sublist_index(out_node,
self.nodes['layered'])
layer_difference = out_node_layer - in_node_1_layer
self.nodes['all'].append(new_hidden_node)
self.nodes['hidden'].append(new_hidden_node)
if abs(layer_difference) > 1:
self.nodes['layered'][in_node_1_layer +
np.sign(layer_difference)].append(new_hidden_node)
else:
if layer_difference == 1:
latest_layer = out_node_layer
else: # layer_difference == -1 or layer_difference == 0:
latest_layer = in_node_1_layer
self.nodes['layered'].insert(latest_layer, [])
self.nodes['layered'][latest_layer].append(new_hidden_node)
def grow_connection(self, in_node=None, out_node=None):
if in_node == None:
potential_in_nodes = deterministic_set(self.nodes['receiving'])
for node in self.nodes['being pruned']:
while node in potential_in_nodes:
potential_in_nodes.remove(node)
if out_node != None:
for node in out_node.in_nodes:
potential_in_nodes.remove(node)
if len(potential_in_nodes) == 0:
return
in_node = np.random.choice(potential_in_nodes)
if out_node == None:
potential_out_nodes = self.nodes['hidden'] + self.nodes['output']
for node in self.nodes['being pruned']:
while node in potential_out_nodes:
potential_out_nodes.remove(node)
for node in in_node.out_nodes:
potential_out_nodes.remove(node)
if len(potential_out_nodes) == 0:
return
out_node = np.random.choice(potential_out_nodes)
in_node.connect_to(out_node)
self.nodes['receiving'].append(out_node)
self.nodes['emitting'].append(in_node)
def prune_node(self, node=None):
if node == None:
if len(self.nodes['hidden']) == 0:
return
node = np.random.choice(self.nodes['hidden'])
if node in self.nodes['being pruned']:
return
self.nodes['being pruned'].append(node)
for out_node in node.out_nodes.copy():
self.prune_connection(node, out_node, node)
for in_node in node.in_nodes.copy():
self.prune_connection(in_node, node, node)
for key in self.nodes:
if key == 'layered':
node_layer = find_sublist_index(node, self.nodes['layered'])
self.nodes['layered'][node_layer].remove(node)
if (node_layer != 0
and node_layer != len(self.nodes['layered']) - 1):
if self.nodes['layered'][node_layer] == []:
self.nodes['layered'].remove(
self.nodes['layered'][node_layer])
else:
while node in self.nodes[key]:
self.nodes[key].remove(node)
def prune_connection(self,
in_node=None,
out_node=None,
calling_node=None):
if in_node == None:
if len(self.nodes['emitting']) == 0:
return
in_node = np.random.choice(self.nodes['emitting'])
if out_node == None:
out_node = np.random.choice(in_node.out_nodes)
connection_was_already_pruned = in_node.disconnect_from(out_node)
if connection_was_already_pruned:
return
self.nodes['receiving'].remove(out_node)
self.nodes['emitting'].remove(in_node)
if in_node != calling_node:
if in_node not in self.nodes['emitting']:
if in_node in self.nodes['input']:
if self.input_net != None:
self.grow_connection(in_node=in_node)
elif in_node in self.nodes['hidden']:
self.prune_node(in_node)
if out_node != calling_node:
if out_node not in self.nodes['receiving']:
if out_node in self.nodes['output']:
if self.output_net != None:
self.grow_connection(out_node=out_node)
elif out_node in self.nodes['hidden']:
self.prune_node(out_node)
for node in [in_node, out_node]:
if (node != calling_node
and node not in self.nodes['being pruned']):
if node in self.nodes['hidden']:
if node.in_nodes == [node] or node.out_nodes == [node]:
self.prune_node(node)
elif node in self.nodes['output']:
if node.in_nodes == [node] and node.out_nodes == [node]:
self.prune_connection(node, node)
# This method is for use when combining with other networks.
def handle(self, source, action, i=None):
if source == self.input_net:
if action == '+ node':
new_input_node = self.grow_node('input')
self.grow_connection(new_input_node, None)
if self.output_net != None:
for output_node in self.nodes['output']:
if output_node not in self.nodes['receiving']:
self.grow_connection(None, output_node)
else: # action == '- node':
self.prune_node(self.nodes['input'][i])
else: # source == self.output_net:
if action == '+ node':
new_output_node = self.grow_node('output')
self.grow_connection(None, new_output_node)
for input_node in self.nodes['input']:
if input_node not in self.nodes['emitting']:
self.grow_connection(input_node, None)
else: # action == '- node':
self.prune_node(self.nodes['output'][i])
def reset(self):
for node in self.nodes['all']:
node.output = np.array([0])
def __call__(self, x):
for x_i, node in zip(x, self.nodes['input']):
node.output = x_i
for layer in range(1, len(self.nodes['layered'])):
for node in self.nodes['layered'][layer]:
node.compute()
for node in self.nodes['layered'][layer]:
node.update()
return [ node.output for node in self.nodes['output'] ]
class Node:
def __init__(self, type, id):
self.id = id
self.in_nodes = []
self.out_nodes = []
self.output = np.array([0])
self.type = type
if self.type != 'input':
self.initialize_parameters()
def __repr__(self):
in_node_ids = tuple([node.id for node in self.in_nodes])
out_node_ids = tuple([node.id for node in self.out_nodes])
if self.type == 'input':
return str(('x',)) + '->' + str(self.id) + '->' + \
str(out_node_ids)
elif self.type == 'hidden':
return str(in_node_ids) + '->' + str(self.id) + '->' + \
str(out_node_ids)
else: # self.type == 'output':
return str(in_node_ids) + '->' + str(self.id) + '->' + \
str(('y',) + out_node_ids)
def initialize_parameters(self):
self.weights = np.empty(0)
if self.type == 'hidden':
self.bias = np.random.randn(1)
else: # self.type == 'output':
self.bias = np.zeros(1)
def mutate_parameters(self):
self.weights += 0.01 * np.random.randn(*self.weights.shape)
self.bias += 0.01 * np.random.randn()
def connect_to(self, node):
new_weight = np.random.randn(1)
node.weights = | np.concatenate((node.weights, new_weight)) | numpy.concatenate |
import math
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
import glob
import pickle as pickle
'''
This file contains handy functions for image processing. More specifically to detect lane lines
'''
# need to run this once to get the intrinsic matrix
def get_cal_mtx(test_img, nx = 8, ny = 6, fname_dir = 'camera_cal/calibration*.jpg'):
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((ny*nx,3), np.float32)
objp[:,:2] = np.mgrid[0:nx, 0:ny].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane.
# Make a list of calibration images
images = glob.glob(fname_dir)
# Step through the list and search for chessboard corners
for idx, fname in enumerate(images):
img = cv2.imread(fname)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (nx,ny), None)
# If found, add object points, image points
if ret == True:
objpoints.append(objp)
imgpoints.append(corners)
# if any points found
if len(objpoints) > 0:
# Do camera calibration given object points and image points
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, test_img.shape[1:], None, None)
else:
mtx = np.eye(3, dtype=int)
dist = np.zeros(3)
print(dist)
print(mtx)
data = {'mtx': mtx, 'dist': dist}
pickle.dump(data, open("camera_calibration.p", "wb"))
return mtx, dist
# undistort the image
def cal_undistort(img, mtx, dist):
return cv2.undistort(img, mtx, dist, None, mtx)
# apply HSV and soble filter on image
def applyHSVAndSobelXFilter(img, sobel_kernel=3, s_thresh=(170, 255), sx_thresh=(20, 100), plotVisual = False):
img = np.copy(img)
# Convert to HLS color space and separate the V channel
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
l_channel = hls[:,:,1]
s_channel = hls[:,:,2]
# Sobel x
sobelx = cv2.Sobel(l_channel, cv2.CV_64F, 1, 0, ksize=sobel_kernel) # Take the derivative in x
abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal
scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
# Threshold x gradient
sxbinary = np.zeros_like(scaled_sobel)
sxbinary[(scaled_sobel >= sx_thresh[0]) & (scaled_sobel <= sx_thresh[1])] = 1
# Threshold color channel
s_binary = np.zeros_like(s_channel)
s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1
combined_binary = np.zeros_like(sxbinary)
# combined_binary[(s_binary == 1) | (sxbinary == 1)] = 1
combined_binary = cv2.addWeighted(s_binary, 1, sxbinary, 0.6, 0)
if plotVisual:
# Visualize undistortion
fig, ax = plt.subplots(2, 2, figsize=(20,10))
ax[0,0].imshow(img)
ax[0,0].set_title('Original Image', fontsize=20)
ax[0,1].imshow(sxbinary, cmap='gray')
ax[0,1].set_title('Sobel Grad X', fontsize=20)
ax[1,0].imshow(s_binary, cmap='gray')
ax[1,0].set_title('HSV filter', fontsize=20)
ax[1,1].imshow(combined_binary, cmap='gray')
ax[1,1].set_title('Combined', fontsize=20)
plt.show()
return combined_binary
# birds eye view by wrapping image
def warp(img, src, dst, plotVisual=False):
#use cv2.getPerspectiveTransform() to get M, the transform matrix
M = cv2.getPerspectiveTransform(src, dst)
#use cv2.warpPerspective() to warp your image to a top-down view
img_shape = (img.shape[1], img.shape[0])
warped = cv2.warpPerspective(img, M, img_shape, flags=cv2.INTER_LINEAR)
if plotVisual:
fig, ax = plt.subplots(1, 2, figsize=(20,10))
ax[0].set_title('Original Image', fontsize=20)
cv2.polylines(img, [src.astype(np.int32)],True, (1,100,100), thickness=2)
ax[0].imshow(img, cmap='gray')
ax[0].plot(src[0][0], src[0][1], 'r+')
ax[0].plot(src[1][0], src[1][1], 'c^')
ax[0].plot(src[2][0], src[2][1], 'r^')
ax[0].plot(src[3][0], src[3][1], 'g^')
ax[1].imshow(warped, cmap='gray')
ax[1].set_title('Warped', fontsize=20)
plt.show()
return warped, M
# find lane pixels using sliding window search
def find_lane_pixels(binary_warped):
# Take a histogram of the bottom half of the image
histogram = np.sum(binary_warped[binary_warped.shape[0]//2:,:], axis=0)
# Create an output image to draw on and visualize the result
out_img = np.dstack((binary_warped, binary_warped, binary_warped))
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0]//2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# HYPERPARAMETERS
# Choose the number of sliding windows
nwindows = 9
# Set the width of the windows +/- margin
margin = 100
# Set minimum number of pixels found to recenter window
minpix = 50
# Set height of windows - based on nwindows above and image shape
window_height = np.int(binary_warped.shape[0]//nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = | np.array(nonzero[0]) | numpy.array |
import os,sys,glob,time
import obspy
import scipy
import pycwt
import pyasdf
import datetime
import numpy as np
import pandas as pd
from obspy.signal.invsim import cosine_taper
from obspy.signal.regression import linear_regression
from scipy.fftpack import fft,ifft,next_fast_len
from seisgo import stacking as stack
from seisgo.types import CorrData, FFTData
from seisgo import utils
#####
########################################################
################ CROSS-CORRELATE FUNCTIONS ##################
########################################################
def cc_memory(inc_hours,sps,nsta,ncomp,cc_len,cc_step):
"""
Estimates the memory usage with given correlation parameters, assuming float 32.
"""
nseg_chunk = int(np.floor((3600*inc_hours-cc_len)/cc_step))+1
npts_chunk = int(nseg_chunk*cc_len*sps)
memory_size = nsta*npts_chunk*4/1024/1024/1024**ncomp
return memory_size
def compute_fft(trace,win_len,step,stainv=None,
freqmin=None,freqmax=None,time_norm='no',freq_norm='no',
smooth=20,smooth_spec=None,misc=dict(),taper_frac=0.05,df=None):
"""
Call FFTData to build the object. This is an alternative of directly call FFTData().
The motivation of this function is to provide an user interface to build FFTData object.
"""
return FFTData(trace=trace,win_len=win_len,step=step,
stainv=stainv,freqmin=freqmin,freqmax=freqmax,time_norm=time_norm,
freq_norm=freq_norm,smooth=smooth,smooth_spec=smooth_spec,misc=misc,
taper_frac=taper_frac,df=df)
#assemble FFT with given asdf file name
def assemble_fft(sfile,win_len,step,freqmin=None,freqmax=None,
time_norm='no',freq_norm='no',smooth=20,smooth_spec=20,
taper_frac=0.05,df=None,exclude_chan=[None],v=True):
#only deal with ASDF format for now.
# retrive station information
ds=pyasdf.ASDFDataSet(sfile,mpi=False,mode='r')
sta_list = ds.waveforms.list()
nsta=len(sta_list)
print('found %d stations in total'%nsta)
fftdata_all=[]
if nsta==0:
print('no data in %s'%sfile);
return fftdata_all
# loop through all stations
print('working on file: '+sfile.split('/')[-1])
for ista in sta_list:
# get station and inventory
try:
inv1 = ds.waveforms[ista]['StationXML']
except Exception as e:
print('abort! no stationxml for %s in file %s'%(ista,sfile))
continue
# get days information: works better than just list the tags
all_tags = ds.waveforms[ista].get_waveform_tags()
if len(all_tags)==0:continue
#----loop through each stream----
for itag in all_tags:
if v:print("FFT for station %s and trace %s" % (ista,itag))
# read waveform data
source = ds.waveforms[ista][itag]
if len(source)==0:continue
# channel info
comp = source[0].stats.channel
if comp[-1] =='U': comp.replace('U','Z')
#exclude some channels in the exclude_chan list.
if comp in exclude_chan:
print(comp+" is in the exclude_chan list. Skip it!")
continue
fftdata=FFTData(source,win_len,step,stainv=inv1,
time_norm=time_norm,freq_norm=freq_norm,
smooth=smooth,freqmin=freqmin,freqmax=freqmax,
smooth_spec=smooth_spec,taper_frac=taper_frac,df=df)
if fftdata.data is not None:
fftdata_all.append(fftdata)
####
return fftdata_all
def smooth_source_spect(fft1,cc_method,sn):
'''
this function smoothes amplitude spectrum of the 2D spectral matrix. (used in S1)
PARAMETERS:
---------------------
cc_para: dictionary containing useful cc parameters
fft1: source spectrum matrix
RETURNS:
---------------------
sfft1: complex numpy array with normalized spectrum
'''
smoothspect_N = sn #cc_para['smoothspect_N']
N=fft1.shape[0]
Nfft2=fft1.shape[1]
fft1=fft1.reshape(fft1.size)
if cc_method == 'deconv':
#-----normalize single-station cc to z component-----
temp = utils.moving_ave(np.abs(fft1),smoothspect_N)
try:
sfft1 = fft1/temp**2
except Exception:
raise ValueError('smoothed spectrum has zero values')
elif cc_method == 'coherency':
temp = utils.moving_ave(np.abs(fft1),smoothspect_N)
try:
sfft1 = fft1/temp
except Exception:
raise ValueError('smoothed spectrum has zero values')
elif cc_method == 'xcorr':
sfft1 = fft1
else:
raise ValueError('no correction correlation method is selected at L59')
return sfft1.reshape(N,Nfft2)
#
def do_correlation(sfile,win_len,step,maxlag,cc_method='xcorr',acorr_only=False,
xcorr_only=False,substack=False,substack_len=None,smoothspect_N=20,
maxstd=10,freqmin=None,freqmax=None,time_norm='no',freq_norm='no',
smooth_N=20,exclude_chan=[None],outdir='.',v=True):
"""
Wrapper for computing correlation functions. It includes two key steps: 1) compute and assemble
the FFT of all data in the sfile, into a list of FFTData objects; 2) loop through the FFTData object
list and do correlation (auto or xcorr) for each source-receiver pair.
====RETURNS====
ndata: the number of station-component pairs in the sfile, that have been processed.
"""
if win_len in [1,2,3]:
print("!!!WARNING: you may call do_correlation() in the old way with the 2nd argument as the ncomp info.")
print(" This may cause errors with arguments getting the wrong values. In this version and later,")
print(" ncomp is deprecated. No change for other arguments. This warning will be removed in")
print(" versions v0.7.x and later.")
if acorr_only and xcorr_only:
raise ValueError('acorr_only and xcorr_only CAN NOT all be True.')
tname = sfile.split('/')[-1]
tmpfile = os.path.join(outdir,tname.split('.')[0]+'.tmp')
if not os.path.isdir(outdir):os.makedirs(outdir)
#file to store CC results.
outfile=os.path.join(outdir,tname)
# check whether time chunk been processed or not
if os.path.isfile(tmpfile):
ftemp = open(tmpfile,'r')
alines = ftemp.readlines()
if len(alines) and alines[-1] == 'done':
return 0
else:
ftemp.close()
os.remove(tmpfile)
if os.path.isfile(outfile): os.remove(outfile)
ftmp = open(tmpfile,'w')
##############compute FFT#############
fftdata=assemble_fft(sfile,win_len,step,freqmin=freqmin,freqmax=freqmax,
time_norm=time_norm,freq_norm=freq_norm,smooth=smooth_N,exclude_chan=exclude_chan)
ndata=len(fftdata)
#############PERFORM CROSS-CORRELATION##################
if v: print(tname)
iend=ndata
for iiS in range(ndata):
# get index right for auto/cross correlation
istart=iiS;
src=fftdata[iiS].net+"."+fftdata[iiS].sta
# if acorr_only:iend=np.minimum(iiS+ncomp,ndata)
# if xcorr_only:istart=np.minimum(iiS+ncomp,ndata)
#-----------now loop III for each receiver B----------
for iiR in range(istart,iend):
# if v:print('receiver: %s %s' % (fftdata[iiR].net,fftdata[iiR].sta))
rcv=fftdata[iiR].net+"."+fftdata[iiR].sta
if (acorr_only and src==rcv) or (xcorr_only and src != rcv) or (not acorr_only and not xcorr_only):
if fftdata[iiS].data is not None and fftdata[iiR].data is not None:
if v:print('receiver: %s %s' % (fftdata[iiR].net,fftdata[iiR].sta))
corrdata=correlate(fftdata[iiS],fftdata[iiR],maxlag,method=cc_method,substack=substack,
smoothspect_N=smoothspect_N,substack_len=substack_len,
maxstd=maxstd)
if corrdata.data is not None: corrdata.to_asdf(file=outfile)
# create a stamp to show time chunk being done
ftmp.write('done')
ftmp.close()
return ndata
def correlate(fftdata1,fftdata2,maxlag,method='xcorr',substack=False,
substack_len=None,smoothspect_N=20,maxstd=10,terror=0.01):
'''
this function does the cross-correlation in freq domain and has the option to keep sub-stacks of
the cross-correlation if needed. it takes advantage of the linear relationship of ifft, so that
stacking is performed in spectrum domain first to reduce the total number of ifft.
PARAMETERS:
---------------------
fftdata1: FFTData for the source station
fftdata2: FFTData of the receiver station
maxlag: maximum lags to keep in the cross correlation
method: cross-correlation methods selected by the user
terror: 0-1 fraction of timing error in searching for overlapping. The timing error =
terror*dt
RETURNS:
---------------------
corrdata: CorrData object of cross-correlation functions in time domain
'''
corrdata=CorrData()
#check overlapping timestamps before any other processing
#this step is required when there are gaps in the data.
ind1,ind2=utils.check_overlap(fftdata1.time,fftdata2.time,error=terror*fftdata1.dt)
if not len(ind1):
print('no overlapped timestamps in the data.')
return corrdata
#---------- check the existence of earthquakes by std of the data.----------
source_std = fftdata1.std[ind1]
sou_ind = np.where((source_std<maxstd)&(source_std>0)&(np.isnan(source_std)==0))[0]
if not len(sou_ind): return corrdata
receiver_std = fftdata2.std[ind2]
rec_ind = np.where((receiver_std<maxstd)&(receiver_std>0)&(np.isnan(receiver_std)==0))[0]
if not len(rec_ind): return corrdata
bb=np.intersect1d(sou_ind,rec_ind)
if len(bb)==0:return corrdata
bb_data1=[ind1[i] for i in bb]
bb_data2=[ind2[i] for i in bb]
#----load paramters----
dt = fftdata1.dt
cc_len = fftdata1.win_len
cc_step = fftdata1.step
if substack_len is None: substack_len=cc_len
Nfft = fftdata1.Nfft
Nfft2 = Nfft//2
fft1=np.conj(fftdata1.data[bb_data1,:Nfft2]) #get the conjugate of fft1
nwin = fft1.shape[0]
fft2=fftdata2.data[bb_data2,:Nfft2]
timestamp=fftdata1.time[bb_data1]
if method != "xcorr":
fft1 = smooth_source_spect(fft1,method,smoothspect_N)
#------convert all 2D arrays into 1D to speed up--------
corr = np.zeros(nwin*Nfft2,dtype=np.complex64)
corr = fft1.reshape(fft1.size,)*fft2.reshape(fft2.size,)
if method == "coherency":
temp = utils.moving_ave(np.abs(fft2.reshape(fft2.size,)),smoothspect_N)
corr /= temp
corr = corr.reshape(nwin,Nfft2)
if substack:
if substack_len == cc_len:
# choose to keep all fft data for a day
s_corr = np.zeros(shape=(nwin,Nfft),dtype=np.float32) # stacked correlation
ampmax = np.zeros(nwin,dtype=np.float32)
n_corr = np.zeros(nwin,dtype=np.int16) # number of correlations for each substack
t_corr = timestamp # timestamp
crap = np.zeros(Nfft,dtype=np.complex64)
for i in range(nwin):
n_corr[i]= 1
crap[:Nfft2] = corr[i,:]
crap[:Nfft2] = crap[:Nfft2]-np.mean(crap[:Nfft2]) # remove the mean in freq domain (spike at t=0)
crap[-(Nfft2)+1:] = np.flip(np.conj(crap[1:(Nfft2)]),axis=0)
crap[0]=complex(0,0)
s_corr[i,:] = np.real(np.fft.ifftshift(scipy.fftpack.ifft(crap, Nfft, axis=0)))
# remove abnormal data
ampmax = np.max(s_corr,axis=1)
tindx = np.where( (ampmax<20*np.median(ampmax)) & (ampmax>0))[0]
s_corr = s_corr[tindx,:]
t_corr = t_corr[tindx]
n_corr = n_corr[tindx]
else:
# get time information
Ttotal = timestamp[-1]-timestamp[0] # total duration of what we have now
tstart = timestamp[0]
nstack = int(np.round(Ttotal/substack_len))
ampmax = np.zeros(nstack,dtype=np.float32)
s_corr = np.zeros(shape=(nstack,Nfft),dtype=np.float32)
n_corr = np.zeros(nstack,dtype=np.int)
t_corr = np.zeros(nstack,dtype=np.float)
crap = np.zeros(Nfft,dtype=np.complex64)
for istack in range(nstack):
# find the indexes of all of the windows that start or end within
itime = np.where( (timestamp >= tstart) & (timestamp < tstart+substack_len) )[0]
if len(itime)==0:tstart+=substack_len;continue
crap[:Nfft2] = np.mean(corr[itime,:],axis=0) # linear average of the correlation
crap[:Nfft2] = crap[:Nfft2]-np.mean(crap[:Nfft2]) # remove the mean in freq domain (spike at t=0)
crap[-(Nfft2)+1:]=np.flip(np.conj(crap[1:(Nfft2)]),axis=0)
crap[0]=complex(0,0)
s_corr[istack,:] = np.real(np.fft.ifftshift(scipy.fftpack.ifft(crap, Nfft, axis=0)))
n_corr[istack] = len(itime) # number of windows stacks
t_corr[istack] = tstart # save the time stamps
tstart += substack_len
#print('correlation done and stacked at time %s' % str(t_corr[istack]))
# remove abnormal data
ampmax = np.max(s_corr,axis=1)
tindx = np.where( (ampmax<20*np.median(ampmax)) & (ampmax>0))[0]
s_corr = s_corr[tindx,:]
t_corr = t_corr[tindx]
n_corr = n_corr[tindx]
else:
# average daily cross correlation functions
ampmax = np.max(corr,axis=1)
tindx = np.where( (ampmax<20*np.median(ampmax)) & (ampmax>0))[0]
n_corr = nwin
s_corr = np.zeros(Nfft,dtype=np.float32)
t_corr = timestamp[0]
crap = np.zeros(Nfft,dtype=np.complex64)
crap[:Nfft2] = np.mean(corr[tindx],axis=0)
crap[:Nfft2] = crap[:Nfft2]-np.mean(crap[:Nfft2],axis=0)
crap[-(Nfft2)+1:]=np.flip(np.conj(crap[1:(Nfft2)]),axis=0)
s_corr = np.real(np.fft.ifftshift(scipy.fftpack.ifft(crap, Nfft, axis=0)))
# trim the CCFs in [-maxlag maxlag]
t = np.arange(-Nfft2+1, Nfft2)*dt
ind = np.where(np.abs(t) <= maxlag)[0]
if s_corr.ndim==1:
s_corr = s_corr[ind]
elif s_corr.ndim==2:
s_corr = s_corr[:,ind]
### call CorrData to build the object
cc_comp= fftdata1.chan[-1]+fftdata2.chan[-1]
dist,azi,baz = obspy.geodetics.base.gps2dist_azimuth(fftdata1.lat,fftdata1.lon,fftdata2.lat,fftdata2.lon)
corrdata=CorrData(net=[fftdata1.net,fftdata2.net],sta=[fftdata1.sta,fftdata2.sta],\
loc=[fftdata1.loc,fftdata2.loc],chan=[fftdata1.chan,fftdata2.chan],\
lon=[fftdata1.lon,fftdata2.lon],lat=[fftdata1.lat,fftdata2.lat],\
ele=[fftdata1.ele,fftdata2.ele],cc_comp=cc_comp,lag=maxlag,\
dt=fftdata1.dt,cc_len=cc_len,cc_step=cc_step,dist=dist/1000,az=azi,\
baz=baz,time=t_corr,data=s_corr,substack=substack,\
side="A",misc={"cc_method":method,"dist_unit":"km"})
return corrdata
def do_stacking(ccfiles,pairlist=None,outdir='./STACK',method=['linear'],
rotation=False,correctionfile=None,flag=False,keep_substack=False,
to_egf=False):
# source folder
if pairlist is None:
pairlist,netsta_all=get_stationpairs(ccfiles,False)
if len(ccfiles)==0:
raise IOError('Abort! no available CCF data for stacking')
for s in netsta_all:
tmp = os.path.join(outdir,s)
if not os.path.isdir(tmp):os.mkdir(tmp)
if isinstance(pairlist,str):pairlist=[pairlist]
if not os.path.isdir(outdir):os.makedirs(outdir)
if rotation:
enz_system = ['EE','EN','EZ','NE','NN','NZ','ZE','ZN','ZZ']
rtz_components = ['ZR','ZT','ZZ','RR','RT','RZ','TR','TT','TZ']
for pair in pairlist:
ttr = pair.split('_')
snet,ssta = ttr[0].split('.')
rnet,rsta = ttr[1].split('.')
idir = ttr[0]
# continue when file is done
toutfn = os.path.join(outdir,idir+'/'+pair+'.tmp')
if os.path.isfile(toutfn):continue
if flag:print('assembling all corrdata ...')
t0=time.time()
corrdict_all=dict() #all components for the single station pair
txtract=np.zeros(len(ccfiles),dtype=np.float32)
tmerge=np.zeros(len(ccfiles),dtype=np.float32)
tparameters=None
for i,ifile in enumerate(ccfiles):
# tt00=time.time()
corrdict=extract_corrdata(ifile,pair=pair)
# txtract[i]=time.time()-tt00
if len(list(corrdict.keys()))>0:
comp_list=list(corrdict[pair].keys())
if len(comp_list)==0:
continue
elif len(comp_list) >9:
print(comp_list)
raise ValueError('more than 9 cross-component exists for %s %s! please double check'%(ifile,pair))
### merge same component corrdata.
# tt11=time.time()
for c in comp_list:
#convert corrdata to empirical Green's functions by
#taking the negative time derivative. See types.CorrData.to_egf() for details.
if to_egf:
corrdict[pair][c].to_egf()
if tparameters is None:tparameters=corrdict[pair][c].misc
if c in list(corrdict_all.keys()):
corrdict_all[c].merge(corrdict[pair][c])
else:corrdict_all[c]=corrdict[pair][c]
# tmerge[i]=time.time()-tt11
#
# if flag:print('extract time:'+str(np.sum(txtract)))
# if flag:print('merge time:'+str(np.sum(tmerge)))
t1=time.time()
if flag:print('finished assembling in %6.2fs ...'%(t1-t0))
#get length info from anyone of the corrdata, assuming all corrdata having the same length.
cc_comp=list(corrdict_all.keys()) #final check on number of keys after merging all data.
if len(cc_comp)==0:
if flag:print('continue! no cross components for %s'%(pair))
continue
elif len(cc_comp)<9 and rotation:
if flag:print('continue! not enough cross components for %s to do rotation'%(pair))
continue
elif len(cc_comp) >9:
print(cc_comp)
raise ValueError('more than 9 cross-component exists for %s! please double check'%(pair))
#save data.
outfn = pair+'.h5'
if flag:print('ready to output to %s'%(outfn))
t2=time.time()
# loop through cross-component for stacking
if isinstance(method,str):method=[method]
tparameters['station_source']=ssta
tparameters['station_receiver']=rsta
if rotation: #need to order the components according to enz_system list.
if corrdict_all[cc_comp[0]].substack:
npts_segmt = corrdict_all[cc_comp[0]].data.shape[1]
else:
npts_segmt = corrdict_all[cc_comp[0]].data.shape[0]
bigstack=np.zeros(shape=(9,npts_segmt),dtype=np.float32)
if flag:print('applying stacking and rotation ...')
stack_h5 = os.path.join(outdir,idir+'/'+outfn)
ds=pyasdf.ASDFDataSet(stack_h5,mpi=False)
#codes for ratation option.
for m in method:
data_type = 'Allstack_'+m
bigstack=np.zeros(shape=(9,npts_segmt),dtype=np.float32)
for icomp in range(9):
comp = enz_system[icomp]
indx = np.where(cc_comp==comp)[0]
# jump if there are not enough data
dstack,stamps_final=stacking(corrdict_all[cc_comp[indx[0]]],method=m)
bigstack[icomp]=dstack
tparameters['time'] = stamps_final[0]
ds.add_auxiliary_data(data=dstack, data_type=data_type, path=comp,
parameters=tparameters)
# start rotation
if np.all(bigstack==0):continue
bigstack_rotated = rotation(bigstack,tparameters,correctionfile,flag)
# write to file
data_type = 'Allstack_'+m
for icomp2 in range(9):
rcomp = rtz_components[icomp2]
if rcomp != 'ZZ':
ds.add_auxiliary_data(data=bigstack_rotated[icomp2], data_type=data_type,
path=rcomp, parameters=tparameters)
if keep_substack:
for ic in cc_comp:
for ii in range(corrdict_all[ic].data.shape[0]):
tparameters2=tparameters
tparameters2['time'] = corrdict_all[ic].time[ii]
data_type = 'T'+str(int(corrdict_all[ic].time[ii]))
ds.add_auxiliary_data(data=corrdict_all[ic].data[ii], data_type=data_type,
path=ic, parameters=tparameters2)
else: #no need to care about the order of components.
stack_h5 = os.path.join(outdir,idir+'/'+outfn)
ds=pyasdf.ASDFDataSet(stack_h5,mpi=False)
if flag:print('applying stacking ...')
for ic in cc_comp:
# write stacked data into ASDF file
dstack,stamps_final=stacking(corrdict_all[ic],method=method)
tparameters['time'] = stamps_final[0]
for i in range(len(method)):
m=method[i]
ds.add_auxiliary_data(data=dstack[i,:], data_type='Allstack_'+m, path=ic,
parameters=tparameters)
if keep_substack:
for ii in range(corrdict_all[ic].data.shape[0]):
tparameters2=tparameters
tparameters2['time'] = corrdict_all[ic].time[ii]
data_type = 'T'+str(int(corrdict_all[ic].time[ii]))
ds.add_auxiliary_data(data=corrdict_all[ic].data[ii], data_type=data_type,
path=ic, parameters=tparameters2)
#
if flag: print('stacking and saving took %6.2fs'%(time.time()-t2))
# write file stamps
ftmp = open(toutfn,'w');ftmp.write('done');ftmp.close()
del corrdict_all
####
def stacking(corrdata,method='linear',par=None):
'''
this function stacks the cross correlation data
PARAMETERS:
----------------------
corrdata: CorrData object.
method: stacking method, could be: linear, robust, pws, acf, or nroot.
par: stacking parameters in a dictionary. See stacking.seisstack() for details.
RETURNS:
----------------------
dstack: 1D matrix of stacked cross-correlation functions over all the segments
cc_time: timestamps of the traces for the stack
'''
if isinstance(method,str):method=[method]
# remove abnormal data
if corrdata.data.ndim==1:
cc_time = [corrdata.time]
# do stacking
dstack = np.zeros((len(method),corrdata.data.shape[0]),dtype=np.float32)
for i in range(len(method)):
m =method[i]
dstack[i,:]=corrdata.data[:]
else:
ampmax = np.max(corrdata.data,axis=1)
tindx = np.where( (ampmax<20*np.median(ampmax)) & (ampmax>0))[0]
nstacks=len(tindx)
dstack=[]
cc_time=[]
if nstacks >0:
# remove ones with bad amplitude
cc_array = corrdata.data[tindx,:]
cc_time = corrdata.time[tindx]
# do stacking
dstack = np.zeros((len(method),corrdata.data.shape[1]),dtype=np.float32)
for i in range(len(method)):
m =method[i]
if nstacks==1: dstack[i,:]=cc_array
else:
dstack[i,:] = stack.seisstack(cc_array,method=method,par=par)
# good to return
return dstack,cc_time
def rotation(bigstack,parameters,locs,flag):
'''
this function transfers the Green's tensor from a E-N-Z system into a R-T-Z one
PARAMETERS:
-------------------
bigstack: 9 component Green's tensor in E-N-Z system
parameters: dict containing all parameters saved in ASDF file
locs: dict containing station angle info for correction purpose
RETURNS:
-------------------
tcorr: 9 component Green's tensor in R-T-Z system
'''
# load parameter dic
pi = np.pi
azi = parameters['azi']
baz = parameters['baz']
ncomp,npts = bigstack.shape
if ncomp<9:
print('crap did not get enough components')
tcorr=[]
return tcorr
staS = parameters['station_source']
staR = parameters['station_receiver']
if locs is not None:
sta_list = list(locs['station'])
angles = list(locs['angle'])
# get station info from the name of ASDF file
ind = sta_list.index(staS)
acorr = angles[ind]
ind = sta_list.index(staR)
bcorr = angles[ind]
#---angles to be corrected----
cosa = np.cos((azi+acorr)*pi/180)
sina = np.sin((azi+acorr)*pi/180)
cosb = np.cos((baz+bcorr)*pi/180)
sinb = np.sin((baz+bcorr)*pi/180)
else:
cosa = np.cos(azi*pi/180)
sina = np.sin(azi*pi/180)
cosb = | np.cos(baz*pi/180) | numpy.cos |
from __future__ import division
import numpy as np
import pandas as pd
import random
import matplotlib.pyplot as plt
from numpy.random import sample as rs
from numpy import hstack as hs
from numpy import newaxis as na
from scipy.stats.distributions import norm, uniform
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib as mpl
import seaborn as sns
sns.set(style='white', font_scale=1.8)
clrs = ['#3778bf', '#e74c3c', '#9b59b6', '#319455', '#feb308', '#fd7f23']
def LCA_Model(I1=10, I2=8, I0=2, k=5, B=5, si=1., Z=1, dt=.01, tau=.1, tmax=1.5):
timepoints = np.arange(0, tmax, dt)
ntime = timepoints.size
y1 = np.zeros(ntime)
y2 = np.zeros(ntime)
dx=np.sqrt(si*dt/tau)
E1=si*np.sqrt(dt/tau)*rs(ntime)
E2=si*np.sqrt(dt/tau)*rs(ntime)
onset=100
for i in range(onset, ntime):
y1[i] = y1[i-1] + (I1 + -k*y1[i-1] + -B*y2[i-1]) * dt/tau + E1[i]
y2[i] = y2[i-1] + (I2 + -k*y2[i-1] + -B*y1[i-1]) * dt/tau + E2[i]
y_t = np.array([y1[i], y2[i]])
if np.any(y_t>=Z):
rt = i; act = np.argmax(y_t)
return y1[:i], y2[:i], rt, act
return y1[:i], y2[:i], np.nan, np.nan
def attractor_network(I1=6, I2=3, I0=2, k=.85, B=.28, si=.3, rmax=50, b=30, g=9, Z=20, dt=.001, tau=.05, tmax=1.5):
timepoints = np.arange(0, tmax, dt)
ntime = timepoints.size
r1 = np.zeros(ntime)
r2 = np.zeros(ntime)
dv = np.zeros(ntime)
NInput = lambda x, r: rmax/(1+np.exp(-(x-b)/g))-r
dspace = lambda r1, r2: (r1-r2)/np.sqrt(2)
E1=si*np.sqrt(dt/tau)*rs(ntime)
E2=si*np.sqrt(dt/tau)*rs(ntime)
onset=100
r1[:onset], r2[:onset] = [v[0][:onset] + I0+v[1][:onset] for v in [[r1,E1],[r2,E2]]]
subZ=True
for i in range(onset, ntime):
r1[i] = r1[i-1] + dt/tau * (NInput(I1 + I0 + k*r1[i-1] + -B*r2[i-1], r1[i-1])) + E1[i]
r2[i] = r2[i-1] + dt/tau * (NInput(I2 + I0 + k*r2[i-1] + -B*r1[i-1], r2[i-1])) + E2[i]
dv[i] = (r1[i]-r2[i])/np.sqrt(2)
if np.abs(dv[i])>=Z:
rt = i+1
return r1[:i+1], r2[:i+1], dv[:i+1], rt
rt = i+1
return r1[:i], r2[:i], dv[:i], rt
def simulate_attractor_competition(Imax=12, I0=0.05, k=1.15, B=.6, g=15, b=30, rmax=100, si=6.5, dt=.002, tau=.075, Z=100, ntrials=250):
sns.set(style='white', font_scale=1.8)
f, ax = plt.subplots(1, figsize=(8,7))
cmap = mpl.colors.ListedColormap(sns.blend_palette([clrs[1], clrs[0]], n_colors=ntrials))
Iscale = np.hstack(np.tile(np.linspace(.5*Imax, Imax, ntrials/2)[::-1], 2))
Ivector=np.linspace(-1,1,len(Iscale))
norm = mpl.colors.Normalize(
vmin=np.min(Ivector),
vmax=np.max(Ivector))
sm = mpl.cm.ScalarMappable(cmap=cmap, norm=norm)
sm.set_array([])
for i, I_t in enumerate(Iscale):
if i < (ntrials/2.):
I1 = Imax; I2 = I_t
else:
I1=I_t; I2 = Imax
r1, r2, dv, rt = attractor_network(I1=I1, I2=I2, I0=I0, k=k, B=B, g=g, b=b, rmax=rmax, si=si, dt=dt, tau=tau, Z=Z)
ax.plot(r1, r2, color=sm.to_rgba(Ivector[i]), alpha=.5)
c_ax = plt.colorbar(sm, ax=plt.gca())
c_ax.set_ticks([-1, 1])
c_ax.set_ticklabels(['$I_1<<I_2$', '$I_1>>I_2$'])
ax.plot([0,rmax], [0,rmax], color='k', alpha=.5, linestyle='-', lw=3.5)
_=plt.setp(ax, ylim=[0,rmax], xlim=[0,rmax], xticks=[0,rmax], xticklabels=[0,rmax],
yticks=[0,rmax],yticklabels=[0,rmax], ylabel='$r_1$ (Hz)', xlabel='$r_2$ (Hz)')
def simulate_attractor_behavior(I1=12, I2=9, I0=0.05, k=1.15, B=1., g=12, b=35, rmax=100, si=5., dt=.001, tau=.075, Z=30, ntrials=250):
behavior = np.zeros((ntrials, 3))
for t in range(ntrials):
r1, r2, dv, rt = attractor_network(I1=I1, I2=I2, I0=I0, k=k, B=B, g=g, b=b, rmax=rmax, si=si, dt=dt, tau=tau, Z=Z)
choice=0
acc=0
if dv[-1]>=Z:
choice=1
acc=0
if I1>I2: acc=1
elif dv[-1]<=-Z:
choice=2
if I2>I1: acc=1
elif I2==I1:
acc=.5
behavior[t, :] = choice, acc, rt
return pd.DataFrame(behavior, columns=['choice', 'accuracy', 'rt'], index= | np.arange(ntrials) | numpy.arange |
import sqlite3
import numpy as np
import pdb
from utils.nlp import normalize
# loading databases
domains = ['restaurant', 'hotel', 'attraction', 'train']
# 3 domains do not have DB: 'taxi', 'hospital', 'police']
dbs = {}
for domain in domains:
db = 'data2.0/db/{}-dbase.db'.format(domain)
print("Connect to DB {}".format(db))
conn = sqlite3.connect(db)
c = conn.cursor()
dbs[domain] = c
normalized_dbs = {}
for domain in domains:
db = 'data2.0/db_normalized/{}-dbase.db'.format(domain)
print("Connect to DB {}".format(db))
conn = sqlite3.connect(db)
c = conn.cursor()
normalized_dbs[domain] = c
def get_db_columns(normalized):
if normalized:
database = normalized_dbs
else:
database = dbs
out = {}
for domain, db in database.items():
query = 'select * from {}'.format(domain)
cursor = database[domain].execute(query)
keys = list(map(lambda x: x[0], cursor.description))
out[domain] = keys
return out
def get_all_entities(normalized):
if normalized:
database = normalized_dbs
else:
database = dbs
out = {}
for domain, db in database.items():
query = 'select * from {}'.format(domain)
cursor = database[domain].execute(query)
out[domain] = cursor.fetchall()
return out
normalized_dbs_columns = get_db_columns(True)
dbs_columns = get_db_columns(False)
assert normalized_dbs_columns == dbs_columns
def one_hot_vector(num, domain, vector):
"""Return number of available entities for particular domain."""
number_of_options = 6
if domain != 'train':
idx = domains.index(domain)
if num == 0:
vector[idx * 6: idx * 6 + 6] = np.array([1, 0, 0, 0, 0,0])
elif num == 1:
vector[idx * 6: idx * 6 + 6] = np.array([0, 1, 0, 0, 0, 0])
elif num == 2:
vector[idx * 6: idx * 6 + 6] = | np.array([0, 0, 1, 0, 0, 0]) | numpy.array |
import pandas as pd
import numpy as np
import dask
import scipy
import time
from functools import partial
from abc import ABCMeta, abstractmethod
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
import point_in_polygon
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import ConstantKernel, RBF, DotProduct, WhiteKernel
import factorialModel
import loadData
import matplotlib.pyplot as plt
from scipy.interpolate import interp2d, griddata
import SSVI
import bootstrapping
#######################################################################################################
class InterpolationModel(factorialModel.FactorialModel):
def __init__(self,
learningRate,
hyperParameters,
nbUnitsPerLayer,
nbFactors,
modelName = "./bestInterpolationModel"):
super().__init__(learningRate,
hyperParameters,
nbUnitsPerLayer,
nbFactors,
modelName)
#Build the learner
def buildModel(self):
#raise NotImplementedError()
return
def trainWithSession(self, session, inputTrain, nbEpoch, inputTest = None):
raise NotImplementedError("Not a tensorflow model")
return super().trainWithSession(session,
inputTrain,
nbEpoch,
inputTest = inputTest)
def train(self, inputTrain, nbEpoch, inputTest = None):
#Do nothing
return np.array([0.0])
def evalModelWithSession(self, sess, inputTest):
raise NotImplementedError("Not a tensorflow model")
return super().evalModelWithSession(sess, inputTest)
def evalModel(self, inputTestList):
#No loss since we interpolate exactly
inputTest = inputTestList[0]
coordinates = inputTestList[1]
loss = pd.Series(np.zeros(inputTest.shape[0]), index = inputTest.index)
#Return the inputs as compressed values
inputs = inputTest.apply(lambda x : self.interpolate(x, coordinates.loc[x.name]), axis=1)
#We do not have any factors so we assign a dummy value of 1
factors = pd.DataFrame(np.ones((inputTest.shape[0],self.nbFactors)),
index=inputTest.index)
return loss, inputs, factors
def getWeightAndBiasFromLayer(self, layer):
raise NotImplementedError("Not a tensorflow model")
return super().getWeightAndBiasFromLayer(layer)
#Interpolate or extrapolate certain values given the knowledge of other ones
def interpolate(self, incompleteSurface, coordinates):
raise NotImplementedError()
return pd.Series()
def completeDataTensor(self,
sparseSurfaceList,
initialValueForFactors,
nbCalibrationStep):
# knownValues = sparseSurface.dropna()
# locationToInterpolate = sparseSurface[sparseSurface.isna()].index
sparseSurface = sparseSurfaceList[0]
coordinates = sparseSurfaceList[1]
interpolatedValues = self.interpolate(sparseSurface, coordinates)
#Not a factorial model, we assign a dummy value
bestFactors = np.ones(self.nbFactors)
#Exact inteprolation
calibrationLoss = 0.0
calibrationSerie = pd.Series([calibrationLoss])
#Complete surface with inteporlated values
bestSurface = interpolatedValues
return calibrationLoss, bestFactors, bestSurface, calibrationSerie
#Interpolation does not assume any factors but relies on some known values
def evalSingleDayWithoutCalibrationWithSensi(self, initialValueForFactors, dataSetList):
raise NotImplementedError("Not a factorial model")
return super().evalSingleDayWithoutCalibrationWithSensi(initialValueForFactors, dataSetList)
def plotInterpolatedSurface(self,valueToInterpolate, calibratedFactors,
colorMapSystem=None,
plotType=None):
raise NotImplementedError("Not a factorial model")
return
def evalInterdependancy(self, fullSurfaceList):
raise NotImplementedError("Not a Factorial model")
return
def evalSingleDayWithoutCalibration(self, initialValueForFactors, dataSetList):
raise NotImplementedError("Not a Factorial model")
return
#ToolBox
#######################################################################################################
def getMaskedPoints(incompleteSurface, coordinates):
return coordinates.loc[incompleteSurface.isna()]
def getMaskMatrix(incompleteSurface):
maskMatrix = incompleteSurface.copy().fillna(True)
maskMatrix.loc[~incompleteSurface.isna()] = False
return maskMatrix
#maskedGrid : surface precising missing value with a NaN
#Assuming indexes and columns are sorted
#Select swaption coordinates (expiry, tenor) whose value is known and are on the boundary
#This defined a polygon whose vertices are known values
def selectPolygonOuterPoints(coordinates):
outerPoints = []
#Group coordinates by first coordinate
splittedCoordinates = {}
for tple in coordinates.values :
if tple[0] not in splittedCoordinates :
splittedCoordinates[tple[0]] = []
splittedCoordinates[tple[0]].append(tple[1])
#Get maximum and minimum for the second dimension
for key in splittedCoordinates.keys():
yMin = np.nanmin(splittedCoordinates[key])
yMax = np.nanmax(splittedCoordinates[key])
outerPoints.append((key,yMin))
outerPoints.append((key,yMax))
return outerPoints
def removeNaNcooridnates(coordinatesList):
isNotNaN = [False if (np.isnan(x[0]) or np.isnan(x[1])) else True for x in coordinatesList]
return coordinatesList[isNotNaN]
#Order a list of vertices to form a polygon
def orderPolygonVertices(outerPointList):
sortedPointList = np.sort(outerPointList) #np sort supports array of tuples
#Points are built as a pair of two points for value in the first dimension
#Hence the polygon starts with points having the first value for the second dimension
#(and order them along the first dimension)
orderedListOfVertices = sortedPointList[::2]
#We then browse the remaining points but in the reverse order for the second dimension
orderedListOfVertices = sortedPointList[1::2][::-1]
return orderedListOfVertices
#Select swaption coordinates (expiry, tenor) whose value is known and are on the boundary
#This defined a polygon whose vertices are known values
def buildInnerDomainCompletion(incompleteSurface, coordinates):
coordinatesWithValues = coordinates.loc[~incompleteSurface.isna()]
outerPointsList = selectPolygonOuterPoints(coordinatesWithValues)
verticesList = orderPolygonVertices(outerPointsList)
expiryVertices, tenorVectices = zip(*verticesList)
return expiryVertices, tenorVectices
#Select swaption coordinates (expiry, tenor) whose value is known
#and their coordinate corresponds to maximum/minimum value for x axis and y axis
#This defines a quadrilateral
def buildOuterDomainCompletion(incompleteSurface, coordinates):
coordinatesWithValues = coordinates.loc[~incompleteSurface.isna()].values
firstDimValues = list(map(lambda x : x[0], coordinatesWithValues))
secondDimValues = list(map(lambda x : x[1], coordinatesWithValues))
maxExpiry = np.amax(firstDimValues)
minExpiry = np.nanmin(firstDimValues)
maxTenor = np.amax(secondDimValues)
minTenor = np.nanmin(secondDimValues)
expiryVertices = [maxExpiry, maxExpiry, minExpiry, minExpiry, maxExpiry]
tenorVectices = [maxTenor, minTenor, minTenor, maxTenor, maxTenor]
return expiryVertices, tenorVectices
#verticesList : list of vertices defining the polygon
#Points : multiIndex serie for which we want to check the coordinates belongs to the domain defined by the polygon
#Use Winding number algorithm
def areInPolygon(verticesList, points):
return pd.Series(points.map(lambda p : point_in_polygon.wn_PnPoly(p, verticesList) != 0).values,
index = points.index)
#Return the list (pandas Dataframe) of points which are located in the domain (as a closed set)
#The closure ( i.e. edge of the domain ) is also returned
#defined by points which are not masked
def areInInnerPolygon(incompleteSurface, coordinates, showDomain = False):
#Add the frontier
gridPoints = coordinates.loc[~incompleteSurface.isna()]
#Build polygon from the frontier
expiriesPolygon, tenorsPolygon = buildInnerDomainCompletion(incompleteSurface, coordinates)
polygon = list(zip(expiriesPolygon,tenorsPolygon))
#Search among masked points which ones lie inside the polygon
maskedPoints = getMaskedPoints(incompleteSurface, coordinates)
interiorPoints = areInPolygon(polygon, maskedPoints)
if not interiorPoints.empty :
gridPoints = gridPoints.append(maskedPoints[interiorPoints]).drop_duplicates()
if showDomain :
plt.plot(expiriesPolygon,tenorsPolygon)
plt.xlabel("First dimension")
plt.xlabel("Second dimension")
plt.plot(gridPoints.map(lambda x : x[0]).values,
gridPoints.map(lambda x : x[1]).values,
'ro')
plt.show()
return gridPoints
#Return the list (pandas Dataframe) of points which are located in the outer domain (as a closed set)
#Outer domain is delimited by the maximum and minimum coordinates of the known values
#inner domain is delimited by the polygon whose vertices are the known points
#showDomain plots the boundary ( i.e. edge of the domain ) and the points which are inside the quadrilateral
def areInOuterPolygon(incompleteSurface, coordinates, showDomain = False):
#Add the frontier
gridPoints = coordinates.loc[~incompleteSurface.isna()]
#Build polygon from the frontier
expiriesPolygon, tenorsPolygon = buildOuterDomainCompletion(incompleteSurface, coordinates)
polygon = list(zip(expiriesPolygon,tenorsPolygon))
#Search among masked points which ones lie inside the polygon
maskedPoints = getMaskedPoints(incompleteSurface, coordinates)
interiorPoints = areInPolygon(polygon, maskedPoints)
if not interiorPoints.empty :
gridPoints = gridPoints.append(maskedPoints[interiorPoints]).drop_duplicates()
if showDomain :
plt.plot(expiriesPolygon,tenorsPolygon)
plt.xlabel("First dimension")
plt.xlabel("Second dimension")
plt.plot(gridPoints.map(lambda x : x[0]).values,
gridPoints.map(lambda x : x[1]).values,
'ro')
plt.show()
return gridPoints
#######################################################################################################
#Linear interpolation with flat extrapolation
#Assume row are non empty
def interpolateRow(row, coordinates):
definedValues = row.dropna()
if definedValues.size == 1 :
return pd.Series(definedValues.iloc[0] * np.ones_like(row),
index = row.index)
else :
#Flat extrapolation and linear interpolation based on index (Tenor) value
filledRow = row.interpolate(method='index', limit_direction = 'both')
return filledRow
def formatCoordinatesAsArray(coordinateList):
x = np.ravel(list(map(lambda x : x[0], coordinateList)))
y = np.ravel(list(map(lambda x : x[1], coordinateList)))
return np.vstack((x, y)).T
#Linear interpolation combined with Nearest neighbor extrapolation
# drawn from https://github.com/mChataign/DupireNN
def customInterpolator(interpolatedData, formerCoordinates, NewCoordinates):
knownPositions = formatCoordinatesAsArray(formerCoordinates)
xNew = np.ravel(list(map(lambda x : x[0], NewCoordinates)))
yNew = np.ravel(list(map(lambda x : x[1], NewCoordinates)))
# print(type(xNew))
# print(type(yNew))
# print(np.array((xNew, yNew)).T.shape)
# print(type(interpolatedData))
# print(type(knownPositions))
# print()
fInterpolation = griddata(knownPositions,
np.ravel(interpolatedData),
np.array((xNew, yNew)).T,
method = 'linear',
rescale=True)
fExtrapolation = griddata(knownPositions,
np.ravel(interpolatedData),
| np.array((xNew, yNew)) | numpy.array |
import importlib
from PyQt5.QtWidgets import QWidget, QApplication, QPushButton, QLabel, QLineEdit, QVBoxLayout, QMessageBox, QCheckBox, \
QComboBox, QListWidget, QDialog, QFileDialog, QAbstractItemView, QSplitter, QSizePolicy, QAbstractScrollArea, QHBoxLayout, QTextEdit, QShortcut,\
QProgressDialog, QDesktopWidget, QSlider, QTabWidget, QMenuBar, QAction, QTableWidgetSelectionRange, QProgressBar, QMenu, QTableWidgetItem, QTreeWidgetItem
from PyQt5.QtGui import QKeySequence, QFont, QDoubleValidator, QIntValidator
from PyQt5.QtCore import Qt, QProcess
from PyQt5 import uic
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
import webbrowser, shutil
from docx import Document
import os
import glob
import sys
import pyqtgraph as pg
from pyqtgraph.dockarea import DockArea, Dock
from PlotWidget import PlotWidget
import copy
import numpy as np
from Data_Dialog import Data_Dialog
# from readData import read1DSAXS
from importlib import import_module, reload
from Fit_Routines import Fit
from tabulate import tabulate
import corner
import numbers
import time
import shutil
from FunctionEditor import FunctionEditor
from MultiInputDialog import MultiInputDialog
import traceback
import pandas as pd
from functools import partial
import pylab as pl
from scipy.stats import chi2
from scipy.interpolate import interp1d
import math
from mplWidget import MplWidget
import statsmodels.api as sm
import Chemical_Formula
import Structure_Factors
import utils
import xraydb
class minMaxDialog(QDialog):
def __init__(self, value, vary=0, minimum=None, maximum=None, expr=None, brute_step=None, parent=None, title=None):
QDialog.__init__(self, parent)
self.value = value
self.vary = vary
if minimum is None:
self.minimum = -np.inf
else:
self.minimum = minimum
if maximum is None:
self.maximum = np.inf
else:
self.maximum = maximum
self.expr = expr
self.brute_step = brute_step
self.createUI()
if title is not None:
self.setWindowTitle(title)
def createUI(self):
self.vblayout = QVBoxLayout(self)
self.layoutWidget = pg.LayoutWidget()
self.vblayout.addWidget(self.layoutWidget)
valueLabel = QLabel('Value')
self.layoutWidget.addWidget(valueLabel)
self.layoutWidget.nextColumn()
self.valueLineEdit = QLineEdit(str(self.value))
self.layoutWidget.addWidget(self.valueLineEdit)
self.layoutWidget.nextRow()
varyLabel = QLabel('Fit')
self.layoutWidget.addWidget(varyLabel)
self.layoutWidget.nextColumn()
self.varyCheckBox = QCheckBox()
self.layoutWidget.addWidget(self.varyCheckBox)
if self.vary>0:
self.varyCheckBox.setCheckState(Qt.Checked)
else:
self.varyCheckBox.setCheckState(Qt.Unchecked)
self.layoutWidget.nextRow()
minLabel = QLabel('Minimum')
self.layoutWidget.addWidget(minLabel)
self.layoutWidget.nextColumn()
self.minimumLineEdit = QLineEdit(str(self.minimum))
self.layoutWidget.addWidget(self.minimumLineEdit)
self.layoutWidget.nextRow()
maxLabel = QLabel('Maximum')
self.layoutWidget.addWidget(maxLabel)
self.layoutWidget.nextColumn()
self.maximumLineEdit = QLineEdit(str(self.maximum))
self.layoutWidget.addWidget(self.maximumLineEdit)
self.layoutWidget.nextRow()
exprLabel = QLabel('Expr')
self.layoutWidget.addWidget(exprLabel)
self.layoutWidget.nextColumn()
self.exprLineEdit = QLineEdit(str(self.expr))
self.layoutWidget.addWidget(self.exprLineEdit)
self.layoutWidget.nextRow()
bruteStepLabel = QLabel('Brute step')
self.layoutWidget.addWidget(bruteStepLabel)
self.layoutWidget.nextColumn()
self.bruteStepLineEdit = QLineEdit(str(self.brute_step))
self.layoutWidget.addWidget(self.bruteStepLineEdit)
self.layoutWidget.nextRow()
self.cancelButton = QPushButton('Cancel')
self.cancelButton.clicked.connect(self.cancelandClose)
self.layoutWidget.addWidget(self.cancelButton)
self.layoutWidget.nextColumn()
self.okButton = QPushButton('OK')
self.okButton.clicked.connect(self.okandClose)
self.layoutWidget.addWidget(self.okButton)
self.okButton.setDefault(True)
def okandClose(self):
# try:
if type(eval(self.valueLineEdit.text())*1.0)==float:
self.value = float(self.valueLineEdit.text())
else:
QMessageBox.warning(self, 'Value Error',
'Please enter floating point number for Value', QMessageBox.Ok)
self.minimumLineEdit.setText(str(self.minimum))
return
if self.varyCheckBox.checkState() == Qt.Checked:
self.vary = 1
else:
self.vary = 0
minimum=self.minimumLineEdit.text()
if '-inf' in minimum:
self.minimum=-np.inf
elif type(eval(self.minimumLineEdit.text())*1.0)==float:
self.minimum=float(self.minimumLineEdit.text())
else:
QMessageBox.warning(self,'Value Error',
'Please enter floating point number for Minimum value',QMessageBox.Ok)
self.minimumLineEdit.setText(str(self.minimum))
return
maximum = self.maximumLineEdit.text()
if 'inf' in maximum:
self.maximum=np.inf
elif type(eval(self.maximumLineEdit.text())*1.0)==float:
self.maximum = float(self.maximumLineEdit.text())
else:
QMessageBox.warning(self, 'Value Error',
'Please enter floating point number for Maximum value', QMessageBox.Ok)
self.maximumLineEdit.setText(str(self.maximum))
return
self.expr=self.exprLineEdit.text()
if self.expr != 'None':
self.vary=0
if self.bruteStepLineEdit.text() != 'None':
self.brute_step = float(self.bruteStepLineEdit.text())
else:
self.brute_step = None
self.accept()
# except:
# QMessageBox.warning(self,'Value Error','Value, Min, Max should be floating point numbers\n\n'+traceback.format_exc(),QMessageBox.Ok)
def cancelandClose(self):
self.reject()
class FitResultDialog(QDialog):
def __init__(self,fit_report,fit_info,parent=None):
QDialog.__init__(self,parent)
self.setWindowTitle('Fit Results')
self.fit_report=fit_report
self.fit_info=fit_info
self.createUI()
self.resize(600,400)
def createUI(self):
self.vblayout=QVBoxLayout(self)
self.layoutWidget=pg.LayoutWidget()
self.vblayout.addWidget(self.layoutWidget)
fitReportLabel=QLabel('Fit Report')
self.layoutWidget.addWidget(fitReportLabel,colspan=2)
self.layoutWidget.nextRow()
self.fitReportTextEdit=QTextEdit()
self.fitReportTextEdit.setText(self.fit_report)
self.layoutWidget.addWidget(self.fitReportTextEdit,colspan=2)
self.layoutWidget.nextRow()
fitInfoLabel=QLabel('Fit Info')
self.layoutWidget.addWidget(fitInfoLabel,colspan=2)
self.layoutWidget.nextRow()
self.fitInfoTextEdit=QTextEdit()
self.fitInfoTextEdit.setText(self.fit_info)
self.layoutWidget.addWidget(self.fitInfoTextEdit,colspan=2)
self.layoutWidget.nextRow()
self.cancelButton=QPushButton('Reject')
self.cancelButton.clicked.connect(self.cancelandClose)
self.layoutWidget.addWidget(self.cancelButton,col=0)
self.okButton=QPushButton('Accept')
self.okButton.clicked.connect(self.okandClose)
self.layoutWidget.addWidget(self.okButton,col=1)
self.okButton.setDefault(True)
def okandClose(self):
self.accept()
def cancelandClose(self):
self.reject()
class XModFit(QWidget):
"""
This widget class is developed to provide an end-user a *Graphical User Interface* by which either they can \
develop their own fitting functions in python or use the existing fitting functions under different categories\
to analyze different kinds of one-dimensional data sets. `LMFIT <https://lmfit.github.io/lmfit-py/>`_ is extensively\
used within this widget.
**Features**
1. Read and fit multiple data files
2. Already available functions are categorized as per the function types and techniques
3. Easy to add more catergories and user-defined functions
4. Once the function is defined properly all the free and fitting parameters will be available within the GUI as tables.
5. An in-built Function editor with a function template is provided.
6. The function editor is enabled with python syntax highlighting.
**Usage**
:class:`Fit_Widget` can be used as stand-alone python fitting package by running it in terminal as::
$python xmodfit.py
.. figure:: Figures/Fit_widget.png
:figwidth: 100%
**Fit Widget** in action.
Also it can be used as a widget with any other python application.
"""
def __init__(self,parent=None):
QWidget.__init__(self,parent)
self.vblayout=QVBoxLayout(self)
self.menuBar = QMenuBar(self)
self.menuBar.setNativeMenuBar(False)
self.create_menus()
self.vblayout.addWidget(self.menuBar,0)
self.mainDock=DockArea(self,parent)
self.vblayout.addWidget(self.mainDock,5)
self.emcee_walker = 100
self.emcee_steps = 100
self.emcee_burn = 0
self.emcee_thin = 1
self.emcee_cores = 1
self.emcee_frac = self.emcee_burn/self.emcee_steps
self.reuse_sampler = False
self.funcDock=Dock('Functions',size=(1,6),closable=False,hideTitle=False)
self.fitDock=Dock('Fit options',size=(1,2),closable=False,hideTitle=False)
self.dataDock=Dock('Data',size=(1,8),closable=False,hideTitle=False)
self.paramDock=Dock('Parameters',size=(2,8),closable=False,hideTitle=False)
self.plotDock=Dock('Data and Fit',size=(5,8),closable=False,hideTitle=False)
self.fitResultDock=Dock('Fit Results',size=(5,8),closable=False,hideTitle=False)
self.mainDock.addDock(self.dataDock)
self.mainDock.addDock(self.fitDock,'bottom')
self.mainDock.addDock(self.paramDock,'right')
self.mainDock.addDock(self.fitResultDock, 'right')
self.mainDock.addDock(self.plotDock,'above',self.fitResultDock)
self.mainDock.addDock(self.funcDock,'above',self.dataDock)
self.special_keys=['x','params','choices','output_params','__mpar__']
self.curr_funcClass={}
self.data={}
self.dlg_data={}
self.plotColIndex={}
self.plotColors={}
self.curDir=os.getcwd()
self.fileNumber=0
self.fileNames={}
self.fchanged=True
self.chisqr='None'
self.format='%.6e'
self.gen_param_items=[]
self.doubleValidator=QDoubleValidator()
self.intValidator=QIntValidator()
self.tApp_Clients={}
self.tModules={}
self.fitMethods={'Levenberg-Marquardt':'leastsq',
'Scipy-Least-Squares':'least_squares',
'Differential-Evolution': 'differential_evolution'}
# 'Brute-Force-Method':'brute',
# 'Nelder-Mead':'nelder',
# 'L-BFGS-B':'lbfgsb',
# 'Powell':'powell',
# 'Congugate-Gradient':'cg',
# 'Newton-CG-Trust-Region':'trust-ncg',
# 'COBLYA':'cobyla',
# 'Truncate-Newton':'tnc',
# 'Exact-Trust-Region':'trust-exact',
# 'Dogleg':'dogleg',
# 'Sequential-Linear-Square':'slsqp',
# 'Adaptive-Memory-Programming':'ampgo',
# 'Maximum-Likelihood-MC-Markov-Chain':'emcee'}
#
self.create_funcDock()
self.create_fitDock()
self.create_dataDock()
self.create_plotDock()
self.create_fitResultDock()
self.update_catagories()
self.create_paramDock()
# self.xminmaxChanged()
self.sfnames=None
self.expressions={}
def create_menus(self):
self.fileMenu = self.menuBar.addMenu('&File')
self.settingsMenu = self.menuBar.addMenu('&Settings')
self.toolMenu = self.menuBar.addMenu('&Tools')
self.helpMenu = self.menuBar.addMenu('&Help')
quit=QAction('Quit',self)
quit.triggered.connect(self.close)
self.fileMenu.addAction(quit)
parFormat=QAction('&Parameter format',self)
parFormat.triggered.connect(self.changeParFormat)
self.settingsMenu.addAction(parFormat)
about=QAction('&About',self)
about.triggered.connect(self.aboutDialog)
self.helpMenu.addAction(about)
toolItems=os.listdir(os.path.join(os.curdir,'Tools'))
self.toolDirs=[]
self.toolApps={}
for item in toolItems:
if '__' not in item:
self.toolDirs.append(self.toolMenu.addMenu('&%s'%item))
tApps=glob.glob(os.path.join(os.curdir,'Tools',item,'*.py'))
for app in tApps:
tname='&'+os.path.basename(os.path.splitext(app)[0])
self.toolApps[tname]=app
tApp=QAction(tname,self)
tApp.triggered.connect(self.launch_tApp)
self.toolDirs[-1].addAction(tApp)
def changeParFormat(self):
dlg=MultiInputDialog(inputs={'Format':self.format},title='Parameter format')
if dlg.exec_():
self.format=dlg.inputs['Format']
try:
self.update_sfit_parameters()
self.update_mfit_parameters_new()
except:
pass
def launch_tApp(self):
tname=self.sender().text()
module_name=".".join(os.path.splitext(self.toolApps[tname])[0].split(os.sep)[1:])
if module_name not in sys.modules:
self.tModules[module_name]=importlib.import_module(module_name)
tmodule=self.tModules[module_name]
if tmodule in self.tApp_Clients:
self.tApp_Clients[tmodule].show()
else:
tclass = getattr(tmodule, tname[1:])
self.tApp_Clients[tmodule]=tclass(self)
self.tApp_Clients[tmodule].setWindowTitle(tname[1:])
self.tApp_Clients[tmodule].show()
# if tname not in self.tApp_Clients or self.tApp_Clients[tname].pid() is None:
# self.tApp_Clients[tname]=QProcess()
# self.tApp_Clients[tname].start('python '+self.toolApps[tname])
# elif self.tApp_Clients[tname].pid()>0:
# QMessageBox.warning(self,'Running...','The tool %s is already running'%tname,QMessageBox.Ok)
# else:
# self.tApp_Clients[tname].start('python ' + self.toolApps[tname])
def aboutDialog(self):
QMessageBox.information(self,'About','Copyright (c) 2021 NSF\'s ChemMAtCARS, University of Chicago.\n\n'
'Developers:\n'
'<NAME> (<EMAIL> \n'
'<NAME> (<EMAIL>)'
,QMessageBox.Ok)
def create_funcDock(self):
self.funcLayoutWidget=pg.LayoutWidget(self)
row=0
col=0
funcCategoryLabel=QLabel('Function Categories:')
self.funcLayoutWidget.addWidget(funcCategoryLabel,row=row,col=col,colspan=2)
row+=1
col=0
self.addCategoryButton=QPushButton('Create')
self.addCategoryButton.clicked.connect(self.addCategory)
self.funcLayoutWidget.addWidget(self.addCategoryButton,row=row,col=col)
col+=1
self.removeCategoryButton=QPushButton('Remove')
self.removeCategoryButton.clicked.connect(self.removeCategory)
self.funcLayoutWidget.addWidget(self.removeCategoryButton,row=row,col=col)
row+=1
col=0
self.categoryListWidget=QListWidget()
self.categoryListWidget.currentItemChanged.connect(self.update_functions)
self.funcLayoutWidget.addWidget(self.categoryListWidget,row=row,col=col,colspan=2)
row+=1
col=0
funcLabel=QLabel('Functions:')
self.funcLayoutWidget.addWidget(funcLabel,row=row,col=col,colspan=2)
row+=1
col=0
self.addFuncButton=QPushButton('Create')
self.addFuncButton.clicked.connect(self.addFunction)
self.funcLayoutWidget.addWidget(self.addFuncButton,row=row,col=col)
col+=1
self.removeFuncButton=QPushButton('Remove')
self.removeFuncButton.clicked.connect(self.removeFunction)
self.funcLayoutWidget.addWidget(self.removeFuncButton,row=row,col=col)
row+=1
col=0
self.funcListWidget=QListWidget()
self.funcListWidget.setSelectionMode(4)
self.funcListWidget.setContextMenuPolicy(Qt.CustomContextMenu)
self.funcListWidget.customContextMenuRequested.connect(self.funcListRightClicked)
self.funcListWidget.itemSelectionChanged.connect(self.functionChanged)
self.funcListWidget.itemDoubleClicked.connect(self.openFunction)
self.funcLayoutWidget.addWidget(self.funcListWidget,row=row,col=col,colspan=2)
self.funcDock.addWidget(self.funcLayoutWidget)
def funcListRightClicked(self,pos):
popMenu = QMenu()
showDet = QAction("Show Details", self)
addDet = QAction("Upload Details", self)
modDet = QAction("Create/Modify Details", self)
popMenu.addAction(showDet)
popMenu.addAction(addDet)
popMenu.addAction(modDet)
showDet.triggered.connect(self.showDetails)
addDet.triggered.connect(self.addDetails)
modDet.triggered.connect(self.modifyDetails)
popMenu.exec_(self.funcListWidget.mapToGlobal(pos))
def showDetails(self):
url = os.path.join(os.path.curdir, 'Function_Details', self.categoryListWidget.currentItem().text(),
self.funcListWidget.currentItem().text(),'help.pdf')
if os.path.exists(url):
webbrowser.open_new_tab(url)
else:
QMessageBox.warning(self,'File Error','The help files regarding the function details do not exist.',QMessageBox.Ok)
# os.system('C:/Users/mrinalkb/Desktop/ESH738.pdf')
def addDetails(self):
path=os.path.join(os.path.curdir,'Function_Details',self.categoryListWidget.currentItem().text(),self.funcListWidget.currentItem().text())
if os.path.exists(path):
fname = QFileDialog.getOpenFileName(self,caption='Select help file',directory=self.curDir,filter="Help files (*.docx *.pdf)")[0]
tfname=os.path.join(path,'help'+os.path.splitext(fname)[1])
shutil.copy(fname,tfname)
else:
os.makedirs(path)
def modifyDetails(self):
category=self.categoryListWidget.currentItem().text()
function=self.funcListWidget.currentItem().text()
path = os.path.join(os.path.curdir, 'Function_Details', category,
function,'help.docx')
if os.path.exists(path):
webbrowser.open_new_tab(path)
else:
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
doc=Document()
doc.add_heading('Details of %s/%s'%(category,function),0)
module = 'Functions.%s.%s' % (category,function)
text=getattr(self.curr_funcClass[module], function).__init__.__doc__
doc.add_paragraph(text)
doc.save(path)
webbrowser.open_new_tab(path)
def addCategory(self):
self.errorAvailable = False
self.reuse_sampler = False
self.calcConfInterButton.setDisabled(True)
tdir=QFileDialog.getExistingDirectory(self,'Select a folder','./Functions/',QFileDialog.ShowDirsOnly)
if tdir!='':
cdir=os.path.basename(os.path.normpath(tdir))
fh=open(os.path.join(tdir,'__init__.py'),'w')
fh.write('__all__=[]')
fh.close()
if cdir not in self.categories:
self.categories.append(cdir)
self.categoryListWidget.addItem(cdir)
else:
QMessageBox.warning(self,'Category error','Category already exist!',QMessageBox.Ok)
def removeCategory(self):
self.errorAvailable = False
self.reuse_sampler = False
self.calcConfInterButton.setDisabled(True)
self.funcListWidget.clear()
if len(self.categoryListWidget.selectedItems())==1:
ans=QMessageBox.question(self,'Delete warning','Are you sure you would like to delete the category?',
QMessageBox.No,QMessageBox.Yes)
if ans==QMessageBox.Yes:
category=os.path.abspath('./Functions/%s'%self.categoryListWidget.currentItem().text())
#os.rename(category,)
shutil.rmtree(category)
self.categories.remove(self.categoryListWidget.currentItem().text())
self.categoryListWidget.takeItem(self.categoryListWidget.currentRow())
elif len(self.categoryListWidget.selectedItems())>1:
QMessageBox.warning(self,'Warning','Please select only one category at a time to remove',QMessageBox.Ok)
else:
QMessageBox.warning(self,'Warning','Please select one category atleast to remove',QMessageBox.Ok)
def openFunction(self):
dirName=os.path.abspath('./Functions/%s'%self.categoryListWidget.currentItem().text())
funcName=self.funcListWidget.currentItem().text()
try:
if not self.funcEditor.open:
self.funcEditor=FunctionEditor(funcName=funcName,dirName=dirName)
self.funcEditor.setWindowTitle('Function editor')
self.funcEditor.show()
self.funcOpen=self.funcEditor.open
self.funcEditor.closeEditorButton.clicked.connect(self.postAddFunction)
else:
QMessageBox.warning(self,'Warning','You cannot edit two functions together',QMessageBox.Ok)
except:
self.funcEditor=FunctionEditor(funcName=funcName,dirName=dirName)
self.funcEditor.setWindowTitle('Function editor')
self.funcEditor.show()
self.funcEditor.closeEditorButton.clicked.connect(self.postAddFunction)
def addFunction(self):
if len(self.categoryListWidget.selectedItems())==1:
dirName=os.path.abspath('./Functions/%s'%self.categoryListWidget.currentItem().text())
self.funcEditor=FunctionEditor(dirName=dirName)
self.funcEditor.setWindowTitle('Function editor')
self.funcEditor.show()
self.funcEditor.closeEditorButton.clicked.connect(self.postAddFunction)
else:
QMessageBox.warning(self,'Category Error','Please select a Category first',QMessageBox.Ok)
self.errorAvailable = False
self.reuse_sampler = False
self.calcConfInterButton.setDisabled(True)
def postAddFunction(self):
if self.funcEditor.funcNameLineEdit.text()!='tmpxyz':
dirName=os.path.abspath('./Functions/%s'%self.categoryListWidget.currentItem().text())
fh=open(os.path.join(dirName,'__init__.py'),'r')
line=fh.readlines()
fh.close()
funcList=eval(line[0].split('=')[1])
funcName=self.funcEditor.funcNameLineEdit.text()
if funcName not in funcList:
funcList.append(funcName)
funcList=sorted(list(set(funcList)),key=str.lower)
os.remove(os.path.join(dirName,'__init__.py'))
fh=open(os.path.join(dirName,'__init__.py'),'w')
fh.write('__all__='+str(funcList))
fh.close()
self.update_functions()
def removeFunction(self):
if len(self.funcListWidget.selectedItems())==1:
ans=QMessageBox.question(self,'Warning','Are you sure you would like to remove the function',
QMessageBox.No,QMessageBox.Yes)
if ans==QMessageBox.Yes:
dirName=os.path.abspath('./Functions/%s'%self.categoryListWidget.currentItem().text())
fname=self.funcListWidget.currentItem().text()
fh=open(os.path.join(dirName,'__init__.py'),'r')
line=fh.readlines()
fh.close()
funcList=eval(line[0].split('=')[1])
try:
os.remove(os.path.join(dirName,fname+'.py'))
os.remove(os.path.join(dirName,'__init__.py'))
fh=open(os.path.join(dirName,'__init__.py'),'w')
fh.write('__all__='+str(funcList))
fh.close()
self.update_functions()
except:
QMessageBox.warning(self,'Remove error','Cannot remove the function because the function file\
might be open elsewhere.\n\n'+traceback.format_exc(),QMessageBox.Ok)
elif len(self.funcListWidget.selectedItems())>1:
QMessageBox.warning(self,'Warning','Please select only one function at a time to remove',QMessageBox.Ok)
else:
QMessageBox.warning(self,'Warning','Please select one function atleast to remove',QMessageBox.Ok)
self.errorAvailable = False
self.reuse_sampler = False
self.calcConfInterButton.setDisabled(True)
def create_dataDock(self):
self.dataLayoutWidget=pg.LayoutWidget(self)
datafileLabel=QLabel('Data files')
self.dataLayoutWidget.addWidget(datafileLabel,colspan=2)
self.dataLayoutWidget.nextRow()
self.addDataButton=QPushButton('Add files')
self.dataLayoutWidget.addWidget(self.addDataButton)
self.addDataButton.clicked.connect(lambda x: self.addData())
self.removeDataButton=QPushButton('Remove Files')
self.dataLayoutWidget.addWidget(self.removeDataButton,col=1)
self.removeDataButton.clicked.connect(self.removeData)
self.removeDataShortCut = QShortcut(QKeySequence.Delete, self)
self.removeDataShortCut.activated.connect(self.removeData)
self.dataLayoutWidget.nextRow()
self.dataListWidget=QListWidget()
self.dataListWidget.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.dataListWidget.itemSelectionChanged.connect(self.dataFileSelectionChanged)
self.dataListWidget.itemDoubleClicked.connect(self.openDataDialog)
self.dataLayoutWidget.addWidget(self.dataListWidget,colspan=2)
self.dataDock.addWidget(self.dataLayoutWidget)
def create_fitDock(self):
self.fitLayoutWidget=pg.LayoutWidget(self)
xminmaxLabel = QLabel('Xmin:Xmax')
self.fitLayoutWidget.addWidget(xminmaxLabel)
self.xminmaxLineEdit = QLineEdit('0:1')
self.xminmaxLineEdit.returnPressed.connect(self.xminmaxChanged)
self.fitLayoutWidget.addWidget(self.xminmaxLineEdit, col=1)
self.fitLayoutWidget.nextRow()
fitMethodLabel = QLabel('Fit Method')
self.fitLayoutWidget.addWidget(fitMethodLabel)
self.fitMethodComboBox = QComboBox()
self.fitMethodComboBox.addItems(list(self.fitMethods.keys()))
self.fitLayoutWidget.addWidget(self.fitMethodComboBox, col=1)
self.fitLayoutWidget.nextRow()
fitScaleLabel = QLabel('Fit Scale')
self.fitLayoutWidget.addWidget(fitScaleLabel)
self.fitScaleComboBox = QComboBox()
self.fitScaleComboBox.addItems(['Linear', 'Linear w/o error', 'Log', 'Log w/o error'])
self.fitLayoutWidget.addWidget(self.fitScaleComboBox, col=1)
self.fitLayoutWidget.nextRow()
fitIterationLabel = QLabel('Fit Iterations')
self.fitLayoutWidget.addWidget(fitIterationLabel)
self.fitIterationLineEdit = QLineEdit('1000')
self.fitLayoutWidget.addWidget(self.fitIterationLineEdit, col=1)
self.fitLayoutWidget.nextRow()
self.fitButton = QPushButton('Fit')
self.fitButton.clicked.connect(lambda x: self.doFit())
self.fitButton.setEnabled(False)
self.unfitButton = QPushButton('Undo fit')
self.unfitButton.clicked.connect(self.undoFit)
self.fitLayoutWidget.addWidget(self.unfitButton)
self.fitLayoutWidget.addWidget(self.fitButton, col=1)
self.fitLayoutWidget.nextRow()
confIntervalMethodLabel=QLabel('Confidence Interval Method')
self.confIntervalMethodComboBox=QComboBox()
self.confIntervalMethodComboBox.addItems(['ChiSqrDist', 'MCMC'])
self.fitLayoutWidget.addWidget(confIntervalMethodLabel)
self.fitLayoutWidget.addWidget(self.confIntervalMethodComboBox,col=1)
self.fitLayoutWidget.nextRow()
self.showConfIntervalButton = QPushButton('Show Param Error')
self.showConfIntervalButton.setDisabled(True)
self.showConfIntervalButton.clicked.connect(self.confInterval_emcee)
self.calcConfInterButton = QPushButton('Calculate Param Error')
self.calcConfInterButton.clicked.connect(self.calcConfInterval)
self.calcConfInterButton.setDisabled(True)
self.fitLayoutWidget.addWidget(self.showConfIntervalButton)
self.fitLayoutWidget.addWidget(self.calcConfInterButton, col=1)
self.fitDock.addWidget(self.fitLayoutWidget)
def dataFileSelectionChanged(self):
self.sfnames=[]
self.pfnames=[]
for item in self.dataListWidget.selectedItems():
self.sfnames.append(item.text())
txt=item.text()
self.pfnames=self.pfnames+[txt.split('<>')[0]+':'+key for key in self.data[txt].keys()]
if len(self.sfnames)>0:
self.curDir = os.path.dirname(self.sfnames[-1].split('<>')[1])
xmin=np.min([np.min([np.min(self.data[key][k1]['x']) for k1 in self.data[key].keys()]) for key in self.sfnames])
xmax=np.max([np.max([np.max(self.data[key][k1]['x']) for k1 in self.data[key].keys()]) for key in self.sfnames])
self.xminmaxLineEdit.setText('%0.3f:%0.3f'%(xmin,xmax))
self.xminmaxChanged()
# if len(self.data[self.sfnames[-1]].keys())>1:
# text='{'
# for key in self.data[self.sfnames[-1]].keys():
# text+='"'+key+'":np.linspace(%.3f,%.3f,%d),'%(xmin,xmax,100)
# text=text[:-1]+'}'
# else:
# text='np.linspace(%.3f,%.3f,100)'%(xmin,xmax)
# self.xLineEdit.setText(text)
self.fitButton.setEnabled(True)
else:
self.fitButton.setDisabled(True)
try:
self.update_plot()
except:
pass
# self.update_plot()
# self.xChanged()
self.errorAvailable = False
self.reuse_sampler = False
self.calcConfInterButton.setDisabled(True)
def openDataDialog(self,item):
fnum,fname=item.text().split('<>')
self.dataListWidget.itemSelectionChanged.disconnect()
data_dlg=Data_Dialog(data=self.dlg_data[item.text()],parent=self,expressions=self.expressions[item.text()],plotIndex=self.plotColIndex[item.text()],colors=self.plotColors[item.text()])
data_dlg.setModal(True)
data_dlg.closePushButton.setText('Cancel')
data_dlg.tabWidget.setCurrentIndex(1)
data_dlg.dataFileLineEdit.setText(fname)
if data_dlg.exec_():
self.plotWidget.remove_data(datanames=self.pfnames)
newFname=data_dlg.dataFileLineEdit.text()
if fname==newFname:
self.plotColIndex[item.text()]=data_dlg.plotColIndex
self.plotColors[item.text()]=data_dlg.plotColors
self.dlg_data[item.text()]=copy.copy(data_dlg.data)
self.data[item.text()]=copy.copy(data_dlg.externalData)
self.expressions[item.text()]=data_dlg.expressions
for key in self.data[item.text()].keys():
self.plotWidget.add_data(self.data[item.text()][key]['x'],self.data[item.text()][key]['y'],yerr=self.data[item.text()][key]['yerr'],name='%s:%s'%(fnum,key),color=self.plotColors[item.text()][key])
else:
text = '%s<>%s' % (fnum, newFname)
self.data[text] = self.data.pop(item.text())
self.dlg_data[text] = self.dlg_data.pop(item.text())
item.setText(text)
self.dlg_data[text]=copy.copy(data_dlg.data)
self.data[text]=copy.copy(data_dlg.externalData)
self.plotColIndex[text]=data_dlg.plotColIndex
self.plotColors[text]=data_dlg.plotColors
self.expressions[text]=data_dlg.expressions
for key in self.data[text].keys():
self.plotWidget.add_data(self.data[text][key]['x'], self.data[text][key]['y'], yerr=self.data[text][key][
'yerr'],name='%s:%s'%(fnum,key),color=self.plotColors[text][key])
# self.sfnames = []
# self.pfnames = []
# for item in self.dataListWidget.selectedItems():
# self.sfnames.append(item.text())
# txt=item.text()
# self.pfnames=self.pfnames+[txt.split('<>')[0]+':'+key for key in self.data[txt].keys()]
self.dataFileSelectionChanged()
# self.xChanged()
self.dataListWidget.itemSelectionChanged.connect(self.dataFileSelectionChanged)
#self.update_plot()
def xminmaxChanged(self):
try:
xmin,xmax=self.xminmaxLineEdit.text().split(':')
self.xmin, self.xmax=float(xmin),float(xmax)
self.update_plot()
except:
QMessageBox.warning(self,"Value Error", "Please supply the Xrange in this format:\n xmin:xmax",QMessageBox.Ok)
def doFit(self, fit_method=None, emcee_walker=100, emcee_steps=100,
emcee_cores=1, reuse_sampler=False, emcee_burn=30):
self.tchisqr=1e30
self.xminmaxChanged()
if self.sfnames is None or self.sfnames==[]:
QMessageBox.warning(self,'Data Error','Please select a dataset first before fitting',QMessageBox.Ok)
return
try:
if len(self.fit.fit_params)>0:
pass
else:
QMessageBox.warning(self, 'Fit Warning', 'Please select atleast a single parameter to fit', QMessageBox.Ok)
return
except:
QMessageBox.warning(self, 'Fit Function Warning', 'Please select a function to fit', QMessageBox.Ok)
return
if len(self.funcListWidget.selectedItems())==0:
QMessageBox.warning(self, 'Function Error',
'Please select a function first to fit.\n' + traceback.format_exc(), QMessageBox.Ok)
return
# try:
# self.fixedParamTableWidget.cellChanged.disconnect(self.fixedParamChanged)
# self.sfitParamTableWidget.cellChanged.disconnect(self.sfitParamChanged)
# self.mfitParamTableWidget.cellChanged.disconnect(self.mfitParamChanged)
# except:
# QMessageBox.warning(self,'Function Error','Please select a function first to fit.\n'+traceback.format_exc(),QMessageBox.Ok)
# return
if fit_method is None:
self.fit_method=self.fitMethods[self.fitMethodComboBox.currentText()]
else:
self.fit_method=fit_method
if self.fit_method not in ['leastsq','brute','differential_evolution','least_squares','emcee']:
QMessageBox.warning(self,'Fit Method Warning','This method is under development and will be available '
'soon. Please use only Lavenberg-Marquardt for the time '
'being.', QMessageBox.Ok)
return
self.fit_scale=self.fitScaleComboBox.currentText()
try:
self.fit.functionCalled.disconnect()
except:
pass
if self.fit_method!='emcee':
self.fit.functionCalled.connect(self.fitCallback)
else:
self.fit.functionCalled.connect(self.fitErrorCallback)
for fname in self.sfnames:
if len(self.data[fname].keys())>1:
x={}
y={}
yerr={}
for key in self.data[fname].keys():
x[key]=self.data[fname][key]['x']
y[key]=self.data[fname][key]['y']
yerr[key]=self.data[fname][key]['yerr']
else:
key=list(self.data[fname].keys())[0]
x=self.data[fname][key]['x']
y=self.data[fname][key]['y']
yerr=self.data[fname][key]['yerr']
# if len(np.where(self.data[fname][key]['yerr']<1e-30)[0])>0:
# QMessageBox.warning(self,'Zero Errorbars','Some or all the errorbars of the selected data are zeros.\
# Please select None for the Errorbar column in the Plot options of the Data_Dialog',QMessageBox.Ok)
# break
# if self.fitScaleComboBox.currentText()=='Log' and len(np.where(self.data[fname]['y']<1e-30)[0])>0:
# posval=np.argwhere(self.fit.y>0)
# self.fit.y=self.data[fname]['y'][posval].T[0]
# self.fit.x=self.data[fname]['x'][posval].T[0]
# self.fit.yerr=self.data[fname]['yerr'][posval].T[0]
self.fit.set_x(x,y=y,yerr=yerr)
#self.update_plot()
self.oldParams=copy.copy(self.fit.params)
self.fit_stopped=False
if self.fit.params['__mpar__']!={}:
self.oldmpar=copy.deepcopy(self.mfitParamData)
try:
self.showFitInfoDlg(emcee_walker=emcee_walker,emcee_steps=emcee_steps, emcee_burn = emcee_burn)
self.runFit(emcee_walker=emcee_walker, emcee_steps=emcee_steps, emcee_burn=emcee_burn,
emcee_cores=emcee_cores, reuse_sampler=reuse_sampler)
if self.fit_stopped:
self.fit.result.params = self.temp_params
#self.fit_report,self.fit_message=self.fit.perform_fit(self.xmin,self.xmax,fit_scale=self.fit_scale,\
# fit_method=self.fit_method,callback=self.fitCallback)
self.fit_info='Fit Message: %s\n'%self.fit_message
self.closeFitInfoDlg()
if self.fit_method != 'emcee':
self.errorAvailable=False
self.emcee_burn=0
self.emcee_steps=100
self.emcee_frac=self.emcee_burn/self.emcee_steps
self.showConfIntervalButton.setDisabled(True)
self.fit.functionCalled.disconnect()
try:
self.sfitParamTableWidget.cellChanged.disconnect()
for i in range(self.mfitParamTabWidget.count()):
mkey = self.mfitParamTabWidget.tabText(i)
self.mfitParamTableWidget[mkey].cellChanged.disconnect()
except:
pass
for row in range(self.sfitParamTableWidget.rowCount()):
key=self.sfitParamTableWidget.item(row,0).text()
self.sfitParamTableWidget.item(row,1).setText(self.format%(self.fit.result.params[key].value))
try:
if self.fit.result.params[key].stderr is None:
self.fit.result.params[key].stderr = 0.0
self.sfitParamTableWidget.item(row, 1).setToolTip(
(key + ' = ' + self.format + ' \u00B1 ' + self.format) % \
(self.fit.result.params[key].value,
self.fit.result.params[key].stderr))
except:
pass
self.sfitParamTableWidget.resizeRowsToContents()
self.sfitParamTableWidget.resizeColumnsToContents()
for i in range(self.mfitParamTabWidget.count()):
mkey=self.mfitParamTabWidget.tabText(i)
for row in range(self.mfitParamTableWidget[mkey].rowCount()):
for col in range(1,self.mfitParamTableWidget[mkey].columnCount()):
parkey=self.mfitParamTableWidget[mkey].horizontalHeaderItem(col).text()
key='__%s_%s_%03d'%(mkey,parkey,row)
self.mfitParamTableWidget[mkey].item(row,col).setText(self.format%(self.fit.result.params[key].value))
if self.fit.result.params[key].stderr is None:
self.fit.result.params[key].stderr = 0.0
self.mfitParamTableWidget[mkey].item(row, col).setToolTip(
(key + ' = ' + self.format + ' \u00B1 ' + self.format) % \
(self.fit.result.params[key].value,
self.fit.result.params[key].stderr))
self.mfitParamTableWidget[mkey].resizeRowsToContents()
self.mfitParamTableWidget[mkey].resizeColumnsToContents()
self.update_plot()
fitResultDlg=FitResultDialog(fit_report=self.fit_report,fit_info=self.fit_info)
#ans=QMessageBox.question(self,'Accept fit results?',self.fit_report,QMessageBox.Yes, QMessageBox.No)
if fitResultDlg.exec_():
for i in range(self.mfitParamTabWidget.count()):
mkey=self.mfitParamTabWidget.tabText(i)
for row in range(self.mfitParamTableWidget[mkey].rowCount()):
for col in range(1, self.mfitParamTableWidget[mkey].columnCount()):
parkey = self.mfitParamTableWidget[mkey].horizontalHeaderItem(col).text()
key = '__%s_%s_%03d' % (mkey, parkey, row)
self.mfitParamData[mkey][parkey][row] = self.fit.result.params[key].value
ofname=os.path.splitext(fname.split('<>')[1])[0]
header='Data fitted with model: %s on %s\n'%(self.funcListWidget.currentItem().text(),time.asctime())
header+='Fixed Parameters\n'
header+='----------------\n'
for key in self.fit.params.keys():
if key not in self.fit.fit_params.keys() and key not in self.special_keys and key[:2]!='__':
header+=key+'='+str(self.fit.params[key])+'\n'
header+=self.fit_report+'\n'
header+="col_names=['x','y','yerr','yfit']\n"
header+='x \t y\t yerr \t yfit\n'
if type(self.fit.x)==dict:
for key in self.fit.x.keys():
fitdata=np.vstack((self.fit.x[key][self.fit.imin[key]:self.fit.imax[key]+1],
self.fit.y[key][self.fit.imin[key]:self.fit.imax[key]+1],
self.fit.yerr[key][self.fit.imin[key]:self.fit.imax[key]+1],self.fit.yfit[key])).T
np.savetxt(ofname+'_'+key+'_fit.txt',fitdata,header=header,comments='#')
else:
fitdata = np.vstack((self.fit.x[self.fit.imin:self.fit.imax + 1],
self.fit.y[self.fit.imin:self.fit.imax + 1],
self.fit.yerr[self.fit.imin:self.fit.imax + 1],
self.fit.yfit)).T
np.savetxt(ofname + '_fit.txt', fitdata, header=header, comments='#')
self.calcConfInterButton.setEnabled(True)
self.update_plot()
# self.xChanged()
else:
self.undoFit()
self.calcConfInterButton.setDisabled(True)
self.reuse_sampler=False
else:
self.errorAvailable = True
self.reuse_sampler = True
self.emceeConfIntervalWidget.reuseSamplerCheckBox.setEnabled(True)
self.emceeConfIntervalWidget.reuseSamplerCheckBox.setCheckState(Qt.Checked)
self.fit.functionCalled.disconnect()
self.perform_post_sampling_tasks()
# self.showConfIntervalButton.setEnabled(True)
except:
try:
self.closeFitInfoDlg()
except:
pass
QMessageBox.warning(self,'Minimization failed','Some of the parameters have got unreasonable values.\n'+
traceback.format_exc(),QMessageBox.Ok)
self.update_plot()
break
self.sfitParamTableWidget.cellChanged.connect(self.sfitParamChanged)
for i in range(self.mfitParamTabWidget.count()):
mkey=self.mfitParamTabWidget.tabText(i)
self.mfitParamTableWidget[mkey].cellChanged.connect(self.mfitParamChanged_new)
try:
self.fit.functionCalled.disconnect()
except:
pass
def calcConfInterval(self):
if self.confIntervalMethodComboBox.currentText()=='ChiSqrDist':
self.confInterval_ChiSqrDist()
else:
self.confInterval_emcee()
def confInterval_ChiSqrDist(self):
self.fit_method = self.fitMethods[self.fitMethodComboBox.currentText()]
self.confIntervalWidget=QWidget()
self.confIntervalWidget.setWindowModality(Qt.ApplicationModal)
uic.loadUi('./UI_Forms/ConfInterval_ChiSqrDist.ui',self.confIntervalWidget)
self.confIntervalWidget.setWindowTitle("ChiSqrDist Confidence Interval Calculator")
self.chidata={}
fitTableWidget = self.confIntervalWidget.fitParamTableWidget
self.calcErrPushButtons={}
self.errProgressBars={}
self.plotErrPushButtons={}
self.stopCalc=False
for fpar in self.fit.result.params.keys():
if self.fit.fit_params[fpar].vary:
row = fitTableWidget.rowCount()
fitTableWidget.insertRow(row)
fitTableWidget.setCellWidget(row,0,QLabel(fpar))
fitTableWidget.setItem(row,1,QTableWidgetItem(self.format%self.fit.result.params[fpar].value))
if self.fit.result.params[fpar].stderr is not None and self.fit.result.params[fpar].stderr!=0.0:
errper=5*self.fit.result.params[fpar].stderr*100/self.fit.result.params[fpar].value
fitTableWidget.setItem(row,2,QTableWidgetItem('%.3f' % (errper)))
fitTableWidget.setItem(row,3,QTableWidgetItem('%.3f' % (errper)))
else:
fitTableWidget.setItem(row, 2, QTableWidgetItem('%.3f' % 10))
fitTableWidget.setItem(row, 3, QTableWidgetItem('%.3f' % 10))
fitTableWidget.setItem(row,4, QTableWidgetItem('20'))
self.calcErrPushButtons[fpar]=QPushButton('Calculate')
fitTableWidget.setCellWidget(row, 5, self.calcErrPushButtons[fpar])
self.calcErrPushButtons[fpar].clicked.connect(partial(self.calcErrPushButtonClicked,row,fpar))
self.errProgressBars[fpar]=QProgressBar()
fitTableWidget.setCellWidget(row, 6, self.errProgressBars[fpar])
self.confIntervalWidget.fitParamTableWidget.setItem(row, 7, QTableWidgetItem(''))
self.confIntervalWidget.fitParamTableWidget.setItem(row, 8, QTableWidgetItem(''))
self.plotErrPushButtons[fpar]=QPushButton('Plot')
fitTableWidget.setCellWidget(row,9, self.plotErrPushButtons[fpar])
self.plotErrPushButtons[fpar].clicked.connect(partial(self.plotErrPushButtonClicked,row,fpar))
fitTableWidget.resizeColumnsToContents()
self.confIntervalWidget.plotAllPushButton.clicked.connect(self.plotAllErrPushButtonClicked)
self.confIntervalWidget.stopPushButton.clicked.connect(self.stopErrCalc)
self.confIntervalWidget.calcAllPushButton.clicked.connect(self.calcAllErr)
self.confIntervalWidget.saveAllPushButton.clicked.connect(self.saveAllErr)
self.confIntervalWidget.confIntervalSpinBox.valueChanged.connect(self.setTargetChiSqr)
self.confIntervalWidget.saveErrPushButton.clicked.connect(self.saveParIntervalErr)
self.minimafitparameters = copy.copy(self.fit.result.params)
self.confIntervalWidget.showMaximized()
self.left_limit={}
self.right_limit={}
self.min_value={}
self.calcAll=False
def stopErrCalc(self):
self.stopCalc=True
def setTargetChiSqr(self):
self.confInterval = self.confIntervalWidget.confIntervalSpinBox.value()
self.minchisqr = self.fit.result.redchi
self.confIntervalWidget.minChiSqrLineEdit.setText(self.format % self.minchisqr)
self.targetchisqr = self.fit.result.redchi * chi2.isf((1.0 - self.confInterval * 0.01),
self.fit.result.nfree) / (self.fit.result.nfree)
self.confIntervalWidget.targetChiSqrLineEdit.setText(self.format % self.targetchisqr)
def calcAllErr(self):
self.calcAll=True
self.stopCalc=False
for row in range(self.confIntervalWidget.fitParamTableWidget.rowCount()):
if not self.stopCalc:
fpar=self.confIntervalWidget.fitParamTableWidget.cellWidget(row,0).text()
self.calcErrPushButtonClicked(row,fpar)
else:
return
self.plotAllErrPushButtonClicked()
self.errInfoTable = []
for key in self.chidata.keys():
if self.left_limit[key] is not None and self.right_limit[key] is not None:
self.errInfoTable.append([key, self.min_value[key], self.left_limit[key] - self.min_value[key],
self.right_limit[key] - self.min_value[key]])
elif self.left_limit[key] is None and self.right_limit[key] is not None:
self.errInfoTable.append([key, self.min_value[key], None,
self.right_limit[key] - self.min_value[key]])
elif self.left_limit[key] is not None and self.right_limit is None:
self.errInfoTable.append([key, self.min_value[key], self.left_limit[key] - self.min_value[key],
None])
else:
self.errInfoTable.append([key, self.min_value[key], None, None])
self.confIntervalWidget.errInfoTextEdit.clear()
self.confIntervalWidget.errInfoTextEdit.setFont(QFont("Courier", 10))
self.confIntervalWidget.errInfoTextEdit.append(tabulate(self.errInfoTable,
headers=["Parameter","Parameter-Value","Left-Error","Right-Error"],
stralign='left',numalign='left',tablefmt='simple'))
self.calcAll=False
def checkMinMaxErrLimits(self,fpar,vmin,vmax):
self.fit.fit_params[fpar].vary=False
for key in self.minimafitparameters: # Putting back the minima parameters
self.fit.fit_params[key].value = self.minimafitparameters[key].value
self.fit.fit_params[fpar].value = vmin
fit_report, mesg = self.fit.perform_fit(self.xmin, self.xmax, fit_scale=self.fit_scale,
fit_method=self.fit_method,
maxiter=int(self.fitIterationLineEdit.text()))
if self.fit.result.redchi>self.targetchisqr or self.fit.fit_params[fpar].min>vmin:
left_limit_ok=True
else:
left_limit_ok=False
for key in self.minimafitparameters: # Putting back the minima parameters
self.fit.fit_params[key].value = self.minimafitparameters[key].value
self.fit.fit_params[fpar].value = vmax
fit_report, mesg = self.fit.perform_fit(self.xmin, self.xmax, fit_scale=self.fit_scale,
fit_method=self.fit_method,
maxiter=int(self.fitIterationLineEdit.text()))
if self.fit.result.redchi>self.targetchisqr or self.fit.fit_params[fpar].max<vmax:
right_limit_ok=True
else:
right_limit_ok=False
self.fit.fit_params[fpar].vary=True
return left_limit_ok, right_limit_ok
def calcErrPushButtonClicked(self,row,fpar):
self.stopCalc=False
for key in self.minimafitparameters:
self.fit.fit_params[key].value = self.minimafitparameters[key].value
self.fit.fit_params[fpar].vary=False
redchi_r=[]
self.errProgressBars[fpar].setMinimum(0)
Nval = int(self.confIntervalWidget.fitParamTableWidget.item(row, 4).text())
self.errProgressBars[fpar].setMaximum(Nval)
#Getting the chi-sqr value at the minima position keeping the value of fpar fixed at the minima position
fit_report, mesg =self.fit.perform_fit(self.xmin, self.xmax, fit_scale=self.fit_scale, fit_method=self.fit_method,
maxiter=int(self.fitIterationLineEdit.text()))
self.setTargetChiSqr()
redchi_r.append([self.fit.fit_params[fpar].value, self.fit.result.redchi])
self.errProgressBars[fpar].setValue(1)
value=self.fit.result.params[fpar].value
vmax = value*(1.0+float(self.confIntervalWidget.fitParamTableWidget.item(row, 3).text())/100.0)
vmin = value*(1.0-float(self.confIntervalWidget.fitParamTableWidget.item(row, 2).text())/100.0)
left_limit_ok,right_limit_ok=self.checkMinMaxErrLimits(fpar,vmin,vmax)
self.fit.fit_params[fpar].vary = False
if left_limit_ok and right_limit_ok:
# Fitting the right hand side of the minima starting from the first point after minima
self.min_value[fpar]=value
pvalues=np.linspace(value+(vmax-value)*2/Nval, vmax, int(Nval/2))
i=1
for parvalue in pvalues:
if self.stopCalc:
for key in self.minimafitparameters:
self.fit.fit_params[key].value = self.minimafitparameters[key].value
return
for key in self.minimafitparameters: # Putting back the minima parameters
self.fit.fit_params[key].value = self.minimafitparameters[key].value
self.fit.fit_params[fpar].value=parvalue
fit_report, mesg = self.fit.perform_fit(self.xmin, self.xmax, fit_scale=self.fit_scale,
fit_method=self.fit_method,
maxiter=int(self.fitIterationLineEdit.text()))
if self.fit.result.success:
redchi_r.append([parvalue,self.fit.result.redchi])
i+=1
self.errProgressBars[fpar].setValue(i)
QApplication.processEvents()
step=(value-vmin)*2/Nval
redchi_l=[redchi_r[0]]
#Fitting the left hand of the minima starting from the minima point
pvalues=np.linspace(value-step, vmin, int(Nval / 2))
for parvalue in pvalues:
if self.stopCalc:
for key in self.minimafitparameters:
self.fit.fit_params[key].value = self.minimafitparameters[key].value
return
for key in self.minimafitparameters: # Putting back the minima parameters
self.fit.fit_params[key].value = self.minimafitparameters[key].value
self.fit.fit_params[fpar].value = parvalue
fit_report, mesg = self.fit.perform_fit(self.xmin, self.xmax, fit_scale=self.fit_scale,
fit_method=self.fit_method,
maxiter=int(self.fitIterationLineEdit.text()))
if self.fit.result.success:
redchi_l.append([parvalue, self.fit.result.redchi])
i+=1
self.errProgressBars[fpar].setValue(i)
QApplication.processEvents()
chidata=np.array(redchi_r+redchi_l[1:])
self.chidata[fpar]=chidata[chidata[:,0].argsort()]
# Calculating the right-limit by interpolation
rvalues = np.array(redchi_r)
if self.targetchisqr < np.max(rvalues[:, 1]):
fn=interp1d(rvalues[:, 1], rvalues[:, 0],kind='linear')
self.right_limit[fpar] = fn(self.targetchisqr)
self.confIntervalWidget.fitParamTableWidget.item(row, 8).setText(self.format % (self.right_limit[fpar]))
else:
self.right_limit[fpar] = None
self.confIntervalWidget.fitParamTableWidget.item(row, 8).setText('None')
# Calculating the left-limit by interpolation
lvalues = np.array(redchi_l)
if self.targetchisqr < np.max(lvalues[:, 1]):
fn=interp1d(lvalues[:, 1], lvalues[:, 0],kind='linear')
self.left_limit[fpar] = fn(self.targetchisqr)
self.confIntervalWidget.fitParamTableWidget.item(row, 7).setText(self.format % (self.left_limit[fpar]))
else:
self.left_limit[fpar] = None
self.confIntervalWidget.fitParamTableWidget.item(row, 7).setText('None')
self.confIntervalWidget.fitParamTableWidget.resizeColumnsToContents()
# Plotting the data
if not self.calcAll:
self.plotErrPushButtonClicked(row, fpar)
#Showing the Errorbars
self.errInfoTable = []
key=fpar
if self.left_limit[key] is not None and self.right_limit[key] is not None:
self.errInfoTable.append([key, self.min_value[key], self.left_limit[key] - self.min_value[key],
self.right_limit[key] - self.min_value[key]])
elif self.left_limit[key] is None and self.right_limit[key] is not None:
self.errInfoTable.append([key, self.min_value[key], None,
self.right_limit[key] - self.min_value[key]])
elif self.left_limit[key] is not None and self.right_limit is None:
self.errInfoTable.append([key, self.min_value[key], self.left_limit[key] - self.min_value[key],
None])
else:
self.errInfoTable.append([key, self.min_value[key], None, None])
self.confIntervalWidget.errInfoTextEdit.clear()
self.confIntervalWidget.errInfoTextEdit.setFont(QFont("Courier", 10))
self.confIntervalWidget.errInfoTextEdit.append(tabulate(self.errInfoTable,
headers=["Parameter", "Parameter-Value",
"Left-Error", "Right-Error"],
stralign='left', numalign='left',
tablefmt='simple'))
elif left_limit_ok:
QMessageBox.warning(self,'Limit Warning','Max limit is not enough to reach the target chi-square for %s. Increase the Max limit'%fpar,QMessageBox.Ok)
self.errProgressBars[fpar].setValue(0)
QApplication.processEvents()
else:
QMessageBox.warning(self, 'Limit Warning', 'Min limit is not enough to reach the target chi-square for %s. Increase the Min limit'%fpar, QMessageBox.Ok)
self.errProgressBars[fpar].setValue(0)
QApplication.processEvents()
# Going back to the minimum chi-sqr condition
for key in self.minimafitparameters:
self.fit.fit_params[key].value = self.minimafitparameters[key].value
self.fit.fit_params[fpar].vary = True
fit_report, mesg = self.fit.perform_fit(self.xmin, self.xmax, fit_scale=self.fit_scale,
fit_method=self.fit_method,
maxiter=int(self.fitIterationLineEdit.text()))
def plotErrPushButtonClicked(self,row,fpar):
if fpar in self.chidata.keys():
mw=MplWidget()
mw.setWindowModality(Qt.ApplicationModal)
subplot=mw.getFigure().add_subplot(111)
subplot.plot(self.chidata[fpar][:, 0], self.chidata[fpar][:, 1], 'r.')
subplot.axhline(self.minchisqr,color='k',lw=1,ls='--')
subplot.axhline(self.targetchisqr,color='k',lw=1,ls='-')
subplot.axvline(self.min_value[fpar],color='b',lw=2,ls='-')
# pl.text(self.min_value[fpar],1.01*self.minchisqr,self.format%self.min_value[fpar],rotation='vertical')
if self.right_limit[fpar] is not None:
subplot.axvline(self.right_limit[fpar],color='b',lw=1,ls='--')
# pl.text(self.right_limit[fpar], 1.01*self.targetchisqr, self.format%self.right_limit[fpar],rotation='vertical')
right_error = self.right_limit[fpar]-self.min_value[fpar]
else:
right_error='None'
if self.left_limit[fpar] is not None:
subplot.axvline(self.left_limit[fpar],color='b',lw=1,ls='--')
# pl.text(self.left_limit[fpar], 1.01*self.targetchisqr, self.format% self.left_limit[fpar],rotation='vertical')
left_error = self.left_limit[fpar]-self.min_value[fpar]
else:
left_error='None'
subplot.set_title('%.3e$^{%.3e}_{%.3e}$'%(self.min_value[fpar], right_error, left_error))
subplot.set_xlabel(fpar)
subplot.set_ylabel('\u03c7$^2$')
mw.getFigure().tight_layout()
mw.draw()
mw.show()
else:
QMessageBox.warning(self, 'Data error', 'No data available for plotting. Calculate first', QMessageBox.Ok)
def plotAllErrPushButtonClicked(self):
pkey=list(self.chidata.keys())
Nplots=len(pkey)
if Nplots>0:
mw=MplWidget()
mw.setWindowModality(Qt.ApplicationModal)
rows=math.ceil(np.sqrt(Nplots))
i=1
for row in range(rows):
for col in range(rows):
if i<=Nplots:
ax=mw.getFigure().add_subplot(rows,rows,i)
ax.plot(self.chidata[pkey[i-1]][:,0],self.chidata[pkey[i-1]][:,1],'r.')
ax.axhline(self.minchisqr, color='k', lw=1, ls='--')
ax.axhline(self.targetchisqr, color='k', lw=1, ls='-')
ax.axvline(self.min_value[pkey[i-1]], color='b', lw=2, ls='-')
# ax[row,col].text(self.min_value[pkey[i-1]], 1.01 * self.minchisqr, self.format % self.min_value[pkey[i-1]],rotation='vertical')
if self.right_limit[pkey[i-1]] is not None:
ax.axvline(self.right_limit[pkey[i-1]], color='b', lw=1, ls='--')
right_error=self.right_limit[pkey[i-1]]-self.min_value[pkey[i-1]]
# ax[row,col].text(self.right_limit[pkey[i-1]], 1.01*self.targetchisqr, self.format % self.right_limit[pkey[i-1]],rotation='vertical')
else:
right_error='None'
if self.left_limit[pkey[i-1]] is not None:
ax.axvline(self.left_limit[pkey[i-1]], color='b', lw=1, ls='--')
left_error=self.left_limit[pkey[i-1]]-self.min_value[pkey[i-1]]
# ax[row, col].text(self.left_limit[pkey[i-1]], 1.01*self.targetchisqr, self.format % self.left_limit[pkey[i-1]],rotation='vertical')
else:
left_error='None'
ax.set_title('%.3e$^{%.3e}_{%.3e}$'%(self.min_value[pkey[i-1]], right_error,left_error))
ax.set_xlabel(pkey[i-1])
ax.set_ylabel('\u03c7$^2$')
i+=1
mw.getFigure().tight_layout()
mw.draw()
mw.show()
def saveAllErr(self):
fname=QFileDialog.getSaveFileName(self,'Provide prefix of the filename',directory=self.curDir,filter='Chi-Sqr files (*.chisqr)')[0]
if fname!='':
for key in self.chidata.keys():
filename=os.path.splitext(fname)[0]+'_'+key+'.chisqr'
header='Saved on %s\n'%(time.asctime())
header="col_names=['%s','chi-sqr']\n"%key
header+='%s\tchi-sqr'%key
pl.savetxt(filename,self.chidata[key],header=header)
def saveParIntervalErr(self):
fname = QFileDialog.getSaveFileName(caption='Save Parameter Errors as', filter='Parameter Error files (*.perr)',
directory=self.curDir)[0]
if fname!='':
fh=open(fname,'w')
fh.write('# File saved on %s\n'%time.asctime())
fh.write('# Error calculated using Chi-Sqr-Distribution Method\n')
tlines=tabulate(self.errInfoTable, headers=["Parameter","Parameter-Value","Left-Error","Right-Error"],
stralign='left',numalign='left',tablefmt='simple')
lines=tlines.split('\n')
for i,line in enumerate(lines):
if i<2:
fh.write('#'+line+'\n')
else:
fh.write(' '+line+'\n')
fh.close()
def confInterval_emcee(self):
"""
"""
self.fit_method = self.fitMethods[self.fitMethodComboBox.currentText()]
if not self.errorAvailable:
self.emcee_walker=(self.fit.result.nvarys+1)*5
else:
# # try:
tnum=len(self.fit.result.flatchain[self.fit.result.var_names[0]])/self.emcee_walker
self.emcee_frac=self.emcee_burn/(tnum/(1.0-self.emcee_frac))
emcee_burn=tnum*self.emcee_frac/(1.0-self.emcee_frac)
self.emcee_burn=int(emcee_burn+self.emcee_steps*self.emcee_frac)
self.emceeConfIntervalWidget = QWidget()
self.emceeConfIntervalWidget.setWindowModality(Qt.ApplicationModal)
uic.loadUi('./UI_Forms/EMCEE_ConfInterval_Widget.ui', self.emceeConfIntervalWidget)
self.emceeConfIntervalWidget.setWindowTitle('MCMC Confidence Interval Caclulator')
self.emceeConfIntervalWidget.MCMCWalkerLineEdit.setText(str(self.emcee_walker))
self.emceeConfIntervalWidget.MCMCStepsLineEdit.setText(str(self.emcee_steps))
self.emceeConfIntervalWidget.MCMCBurnLineEdit.setText(str(self.emcee_burn))
self.emceeConfIntervalWidget.MCMCThinLineEdit.setText(str(self.emcee_thin))
self.emceeConfIntervalWidget.ParallelCoresLineEdit.setText(str(self.emcee_cores))
if not self.errorAvailable:
self.emceeConfIntervalWidget.reuseSamplerCheckBox.setChecked(False)
self.emceeConfIntervalWidget.reuseSamplerCheckBox.setDisabled(True)
self.reuse_sampler=False
else:
self.emceeConfIntervalWidget.reuseSamplerCheckBox.setChecked(True)
self.emceeConfIntervalWidget.reuseSamplerCheckBox.setDisabled(True)
if self.reuse_sampler:
self.emceeConfIntervalWidget.reuseSamplerCheckBox.setEnabled(True)
self.emceeConfIntervalWidget.reuseSamplerCheckBox.setCheckState(Qt.Checked)
else:
self.emceeConfIntervalWidget.reuseSamplerCheckBox.setCheckState(Qt.Unchecked)
self.emceeConfIntervalWidget.startSamplingPushButton.clicked.connect(self.start_emcee_sampling)
self.emceeConfIntervalWidget.MCMCWalkerLineEdit.returnPressed.connect(self.MCMCWalker_changed)
self.emceeConfIntervalWidget.saveConfIntervalPushButton.clicked.connect(self.saveParameterError)
self.emceeConfIntervalWidget.progressBar.setValue(0)
self.emceeConfIntervalWidget.showMaximized()
if self.errorAvailable:
self.update_emcee_parameters()
self.perform_post_sampling_tasks()
self.cornerPlot()
self.emceeConfIntervalWidget.tabWidget.setCurrentIndex=(4)
def MCMCWalker_changed(self):
self.emceeConfIntervalWidget.reuseSamplerCheckBox.setCheckState(Qt.Unchecked)
self.update_emcee_parameters()
def update_emcee_parameters(self):
self.emcee_walker=int(self.emceeConfIntervalWidget.MCMCWalkerLineEdit.text())
self.emcee_steps=int(self.emceeConfIntervalWidget.MCMCStepsLineEdit.text())
self.emcee_burn=int(self.emceeConfIntervalWidget.MCMCBurnLineEdit.text())
self.emcee_thin = int(self.emceeConfIntervalWidget.MCMCThinLineEdit.text())
if self.emceeConfIntervalWidget.reuseSamplerCheckBox.isChecked():
self.reuse_sampler=True
else:
self.reuse_sampler=False
self.emcee_cores = int(self.emceeConfIntervalWidget.ParallelCoresLineEdit.text())
def start_emcee_sampling(self):
try:
self.emceeConfIntervalWidget.parameterTreeWidget.itemSelectionChanged.disconnect()
except:
pass
self.emceeConfIntervalWidget.parameterTreeWidget.clear()
self.emceeConfIntervalWidget.chainMPLWidget.clear()
self.emceeConfIntervalWidget.correlationMPLWidget.clear()
self.emceeConfIntervalWidget.cornerPlotMPLWidget.clear()
self.emceeConfIntervalWidget.confIntervalTextEdit.clear()
self.update_emcee_parameters()
if not self.errorAvailable:
self.emcee_frac=self.emcee_burn/self.emcee_steps
self.doFit(fit_method='emcee', emcee_walker=self.emcee_walker, emcee_steps=self.emcee_steps,
emcee_cores=self.emcee_cores, reuse_sampler=self.reuse_sampler, emcee_burn=0)
def conf_interv_status(self,params,iterations,residual,fit_scale):
self.confIntervalStatus.setText(self.confIntervalStatus.text().split('\n')[0]+'\n\n {:^s} = {:10d}'.format('Iteration',iterations))
QApplication.processEvents()
def runFit(self, emcee_walker=100, emcee_steps=100, emcee_cores=1, reuse_sampler=False, emcee_burn=30):
self.start_time=time.time()
self.fit_report,self.fit_message=self.fit.perform_fit(self.xmin,self.xmax,fit_scale=self.fit_scale, fit_method=self.fit_method,
maxiter=int(self.fitIterationLineEdit.text()),
emcee_walker=emcee_walker, emcee_steps=emcee_steps,
emcee_cores=emcee_cores, reuse_sampler=reuse_sampler, emcee_burn=emcee_burn)
def showFitInfoDlg(self, emcee_walker=100, emcee_steps=100, emcee_burn=30):
if self.fit_method!='emcee':
self.fitInfoDlg=QDialog(self)
vblayout=QVBoxLayout(self.fitInfoDlg)
self.fitIterLabel=QLabel('Iteration: 0,\t Chi-Sqr: Not Available',self.fitInfoDlg)
vblayout.addWidget(self.fitIterLabel)
self.stopFitPushButton=QPushButton('Stop')
vblayout.addWidget(self.stopFitPushButton)
self.stopFitPushButton.clicked.connect(self.stopFit)
self.fitInfoDlg.setWindowTitle('Please wait for the fitting to be completed')
self.fitInfoDlg.setModal(True)
self.fitInfoDlg.show()
else:
self.emceeConfIntervalWidget.fitIterLabel.setText('Time left (hh:mm:ss): %s'%('N.A.'))
self.emceeConfIntervalWidget.progressBar.setMaximum(emcee_walker*emcee_steps)
self.emceeConfIntervalWidget.progressBar.setMinimum(0)
self.emceeConfIntervalWidget.progressBar.setValue(0)
self.emceeConfIntervalWidget.stopSamplingPushButton.clicked.connect(self.stopFit)
def stopFit(self):
self.fit.fit_abort=True
self.fit_stopped=True
self.reuse_sampler=False
if self.fit_method=='emcee':
self.emceeConfIntervalWidget.stopSamplingPushButton.clicked.disconnect()
def closeFitInfoDlg(self):
self.fitInfoDlg.done(0)
def fitCallback(self,params,iterations,residual,fit_scale):
# self.fitIterLabel.setText('Iteration=%d,\t Chi-Sqr=%.5e'%(iterations,np.sum(residual**2)))
# if np.any(self.fit.yfit):
chisqr=np.sum(residual**2)
if chisqr<self.tchisqr:
self.fitIterLabel.setText('Iteration=%d,\t Chi-Sqr=%.5e' % (iterations,chisqr))
self.temp_params=copy.copy(params)
if type(self.fit.x)==dict:
for key in self.fit.x.keys():
self.plotWidget.add_data(x=self.fit.x[key][self.fit.imin[key]:self.fit.imax[key]+1],y=self.fit.yfit[key],\
name=self.funcListWidget.currentItem().text()+':'+key,fit=True)
self.fit.params['output_params']['Residuals_%s'%key] = {'x': self.fit.x[key][self.fit.imin[key]:self.fit.imax[key]+1],
'y': (self.fit.y[key][self.fit.imin[key]:self.fit.imax[key]+1]-self.fit.yfit[key])
/self.fit.yerr[key][self.fit.imin[key]:self.fit.imax[key]+1]}
else:
self.plotWidget.add_data(x=self.fit.x[self.fit.imin:self.fit.imax + 1], y=self.fit.yfit, \
name=self.funcListWidget.currentItem().text(), fit=True)
# else:
# QMessageBox.warning(self,'Parameter Value Error','One or more fitting parameters has got unphysical values perhaps to make all the yvalues zeros!',QMessageBox.Ok)
# self.fit.fit_abort=True
self.fit.params['output_params']['Residuals']={'x':self.fit.x[self.fit.imin:self.fit.imax + 1],
'y': (self.fit.y[self.fit.imin:self.fit.imax + 1]-self.fit.yfit)/self.fit.yerr[self.fit.imin:self.fit.imax + 1]}
self.tchisqr=chisqr
QApplication.processEvents()
def fitErrorCallback(self, params, iterations, residual, fit_scale):
time_taken=time.time()-self.start_time
frac=iterations/(self.emcee_walker*self.emcee_steps+self.emcee_walker)
time_left=time_taken*(self.emcee_walker*self.emcee_steps+self.emcee_walker-iterations)/iterations
self.emceeConfIntervalWidget.fitIterLabel.setText('Time left (hh:mm:ss): %s'%(time.strftime('%H:%M:%S',time.gmtime(time_left))))
self.emceeConfIntervalWidget.progressBar.setValue(iterations)
QApplication.processEvents()
def perform_post_sampling_tasks(self):
self.emceeConfIntervalWidget.progressBar.setValue(self.emcee_walker*self.emcee_steps)
self.emceeConfIntervalWidget.fitIterLabel.setText('Time left (hh:mm:ss): 00:00:00' )
self.chain=self.fit.result.chain
self.chain_shape=self.chain.shape
self.param_chain={}
for i,key in enumerate(self.fit.result.flatchain.keys()):
l1=QTreeWidgetItem([key])
self.param_chain[key]={}
for j in range(self.chain_shape[1]):
self.param_chain[key][j]=self.chain[:,j,i]
l1_child=QTreeWidgetItem(['%s:chain:%d'%(key,j)])
l1.addChild(l1_child)
self.emceeConfIntervalWidget.parameterTreeWidget.addTopLevelItem(l1)
self.emceeConfIntervalWidget.parameterTreeWidget.itemSelectionChanged.connect(self.parameterTreeSelectionChanged)
#Calculating autocorrelation
acor={}
Nrows=len(self.param_chain.keys())
self.emceeConfIntervalWidget.correlationMPLWidget.clear()
ax1 = self.emceeConfIntervalWidget.correlationMPLWidget.fig.add_subplot(1, 1, 1)
corr_time=[]
for i,key in enumerate(self.param_chain.keys()):
tcor=[]
for ikey in self.param_chain[key].keys():
tdata=self.param_chain[key][ikey]
res=sm.tsa.acf(tdata,nlags=len(tdata),fft=True)
tcor.append(res)
tcor=np.array(tcor)
acor[key]=np.mean(tcor,axis=0)
ax1.plot(acor[key],'-',label='para=%s'%key)
corr_time.append([key,np.sum(np.where(acor[key]>0,acor[key],0))])
ax1.set_xlabel('Steps')
ax1.set_ylabel('Autocorrelation')
l=ax1.legend(loc='best')
l.set_draggable(True)
self.emceeConfIntervalWidget.correlationMPLWidget.draw()
self.emceeConfIntervalWidget.corrTimeTextEdit.clear()
self.emceeConfIntervalWidget.corrTimeTextEdit.setFont(QFont("Courier", 10))
corr_text = tabulate(corr_time, headers=['Parameter', 'Correlation-time (Steps)'], stralign='left',
numalign='left', tablefmt='simple')
self.emceeConfIntervalWidget.corrTimeTextEdit.append(corr_text)
#Plotting Acceptance Ratio
self.emceeConfIntervalWidget.acceptFracMPLWidget.clear()
ax2=self.emceeConfIntervalWidget.acceptFracMPLWidget.fig.add_subplot(1,1,1)
ax2.plot(self.fit.result.acceptance_fraction,'-')
ax2.set_xlabel('Walkers')
ax2.set_ylabel('Acceptance Ratio')
self.emceeConfIntervalWidget.acceptFracMPLWidget.draw()
self.emceeConfIntervalWidget.calcConfIntervPushButton.clicked.connect(self.cornerPlot)
self.emceeConfIntervalWidget.tabWidget.setCurrentIndex(1)
def cornerPlot(self):
percentile = self.emceeConfIntervalWidget.percentileDoubleSpinBox.value()
self.emceeConfIntervalWidget.cornerPlotMPLWidget.clear()
names = [name for name in self.fit.result.var_names if name != '__lnsigma']
values = [self.fit.result.params[name].value for name in names]
ndim = len(names)
quantiles=[1.0-percentile/100,0.5,percentile/100]
first=int(self.emceeConfIntervalWidget.MCMCBurnLineEdit.text())
corner.corner(self.fit.result.flatchain[names][first:], labels=names, bins=50, levels=(percentile/100,),
truths=values, quantiles=quantiles, show_titles=True, title_fmt='.6f',
use_math_text=True, title_kwargs={'fontsize': 3 * 12 / ndim},
label_kwargs={'fontsize': 3 * 12 / ndim}, fig=self.emceeConfIntervalWidget.cornerPlotMPLWidget.fig)
for ax3 in self.emceeConfIntervalWidget.cornerPlotMPLWidget.fig.get_axes():
ax3.set_xlabel('')
ax3.set_ylabel('')
ax3.tick_params(axis='y', labelsize=3 * 12 / ndim, rotation=0)
ax3.tick_params(axis='x', labelsize=3 * 12 / ndim)
self.emceeConfIntervalWidget.cornerPlotMPLWidget.draw()
self.emceeConfIntervalWidget.tabWidget.setCurrentIndex(3)
err_quantiles={}
mesg = [['Parameters', 'Value(50%)', 'Left-error(%.3f)'%(100-percentile), 'Right-error(%.3f)'%percentile]]
for name in names:
err_quantiles[name] = corner.quantile(self.fit.result.flatchain[name], quantiles)
l,p,r=err_quantiles[name]
mesg.append([name, p, l - p, r - p])
self.emceeConfIntervalWidget.confIntervalTextEdit.clear()
self.emceeConfIntervalWidget.confIntervalTextEdit.setFont(QFont("Courier", 10))
txt = tabulate(mesg, headers='firstrow', stralign='left', numalign='left', tablefmt='simple')
self.emceeConfIntervalWidget.confIntervalTextEdit.append(txt)
def parameterTreeSelectionChanged(self):
self.emceeConfIntervalWidget.chainMPLWidget.clear()
chaindata={}
for item in self.emceeConfIntervalWidget.parameterTreeWidget.selectedItems():
key,i=item.text(0).split(':chain:')
try:
chaindata[key].append(int(i))
except:
chaindata[key]=[int(i)]
NRows = len(chaindata.keys())
ax={}
firstkey=list(chaindata.keys())[0]
for j,key in enumerate(chaindata.keys()):
try:
ax[key]=self.emceeConfIntervalWidget.chainMPLWidget.fig.add_subplot(NRows, 1, j+1, sharex=ax[firstkey])
except:
ax[key] = self.emceeConfIntervalWidget.chainMPLWidget.fig.add_subplot(NRows, 1, j+1)
for i in chaindata[key]:
ax[key].plot(self.param_chain[key][i],'-')
ax[key].set_xlabel('MC steps')
ax[key].set_ylabel(key)
self.emceeConfIntervalWidget.chainMPLWidget.draw()
self.emceeConfIntervalWidget.tabWidget.setCurrentIndex(0)
def saveParameterError(self):
fname=QFileDialog.getSaveFileName(caption='Save Parameter Errors as',filter='Parameter Error files (*.perr)',directory=self.curDir)[0]
if os.path.splitext(fname)=='':
fname=fname+'.perr'
text=self.emceeConfIntervalWidget.confIntervalTextEdit.toPlainText()
fh=open(fname,'w')
fh.write('# File save on %s\n'%time.asctime())
fh.write('# Error calculated using MCMC Method\n')
fh.write(text)
fh.close()
def undoFit(self):
try:
self.sfitParamTableWidget.cellChanged.disconnect()
for i in range(self.mfitParamTabWidget.count()):
mkey=self.mfitParamTabWidget.tabText(i)
self.mfitParamTableWidget[mkey].cellChanged.disconnect()
except:
pass
for row in range(self.sfitParamTableWidget.rowCount()):
key=self.sfitParamTableWidget.item(row,0).text()
self.sfitParamTableWidget.item(row,1).setText(self.format%(self.oldParams[key]))
self.sfitParamTableWidget.item(row,1).setToolTip((key+' = '+self.format+' \u00B1 '+self.format)% (self.oldParams[key], 0.0))
if self.fit.params['__mpar__']!={}:
for i in range(self.mfitParamTabWidget.count()):
mkey=self.mfitParamTabWidget.tabText(i)
for row in range(self.mfitParamTableWidget[mkey].rowCount()):
for col in range(1,self.mfitParamTableWidget[mkey].columnCount()):
parkey=self.mfitParamTableWidget[mkey].horizontalHeaderItem(col).text()
key='__%s_%s_%03d'%(mkey,parkey,row)
self.mfitParamTableWidget[mkey].item(row,col).setText(self.format%(self.oldmpar[mkey][parkey][row]))
self.mfitParamTableWidget[mkey].item(row, col).setToolTip((key+' = '+self.format+' \u00B1 '+self.format) % \
(self.oldmpar[mkey][parkey][row], 0.0))
#self.mfitParamData=copy.copy(self.oldmpar)
self.sfitParamTableWidget.cellChanged.connect(self.sfitParamChanged)
for i in range(self.mfitParamTabWidget.count()):
mkey = self.mfitParamTabWidget.tabText(i)
self.mfitParamTableWidget[mkey].cellChanged.connect(self.mfitParamChanged_new)
self.update_plot()
def addData(self,fnames=None):
"""
fnames :List of filenames
"""
if self.dataListWidget.count()==0:
self.fileNumber=0
try:
self.dataListWidget.itemSelectionChanged.disconnect()
except:
pass
#try:
if fnames is None:
fnames,_=QFileDialog.getOpenFileNames(self,caption='Open data files',directory=self.curDir,\
filter='Data files (*.txt *.dat *.chi *.rrf)')
if len(fnames)!=0:
self.curDir=os.path.dirname(fnames[0])
for fname in fnames:
data_key=str(self.fileNumber)+'<>'+fname
data_dlg=Data_Dialog(fname=fname,parent=self)
data_dlg.setModal(True)
data_dlg.closePushButton.setText('Cancel')
if len(fnames)>1:
data_dlg.accept()
else:
data_dlg.exec_()
if data_dlg.acceptData:
self.dlg_data[data_key]=data_dlg.data
self.plotColIndex[data_key]=data_dlg.plotColIndex
self.plotColors[data_key]=data_dlg.plotColors
self.data[data_key]=data_dlg.externalData
self.expressions[data_key]=data_dlg.expressions
for key in self.data[data_key].keys():
self.plotWidget.add_data(self.data[data_key][key]['x'],self.data[data_key][key]['y'],\
yerr=self.data[data_key][key]['yerr'],name='%d:%s'%(self.fileNumber,key),color=self.data[data_key][key]['color'])
self.dataListWidget.addItem(data_key)
self.fileNames[self.fileNumber]=fname
self.fileNumber+=1
# else:
# QMessageBox.warning(self,'Import Error','Data file has been imported before.\
# Please remove the data file before importing again')
# #except:
# # QMessageBox.warning(self,'File error','The file(s) do(es) not look like a data file. Please format it in x,y[,yerr] column format',QMessageBox.Ok)
self.dataListWidget.clearSelection()
self.dataListWidget.itemSelectionChanged.connect(self.dataFileSelectionChanged)
self.dataListWidget.setCurrentRow(self.fileNumber-1)
self.errorAvailable = False
self.reuse_sampler = False
self.calcConfInterButton.setDisabled(True)
def removeData(self):
"""
"""
try:
self.dataListWidget.itemSelectionChanged.disconnect()
except:
pass
for item in self.dataListWidget.selectedItems():
fnum,fname=item.text().split('<>')
self.dataListWidget.takeItem(self.dataListWidget.row(item))
for key in self.data[item.text()].keys():
self.plotWidget.remove_data(['%s:%s'%(fnum,key)])
del self.data[item.text()]
del self.expressions[item.text()]
del self.plotColIndex[item.text()]
del self.plotColors[item.text()]
del self.dlg_data[item.text()]
if self.dataListWidget.count()>0:
self.dataFileSelectionChanged()
else:
self.pfnames=[]
self.dataListWidget.itemSelectionChanged.connect(self.dataFileSelectionChanged)
self.errorAvailable = False
self.reuse_sampler = False
self.calcConfInterButton.setDisabled(True)
def create_paramDock(self):
self.parSplitter=QSplitter(Qt.Vertical)
self.fixedparamLayoutWidget=pg.LayoutWidget(self)
xlabel=QLabel('x')
self.fixedparamLayoutWidget.addWidget(xlabel)
self.xLineEdit=QLineEdit('np.linspace(0.001,1,100)')
self.fixedparamLayoutWidget.addWidget(self.xLineEdit,col=1)
self.saveSimulatedButton=QPushButton("Save Simulated Curve")
self.saveSimulatedButton.setEnabled(False)
self.saveSimulatedButton.clicked.connect(self.saveSimulatedCurve)
self.fixedparamLayoutWidget.addWidget(self.saveSimulatedButton,col=2)
self.fixedparamLayoutWidget.nextRow()
self.saveParamButton = QPushButton('Save Parameters')
self.saveParamButton.clicked.connect(self.saveParameters)
self.fixedparamLayoutWidget.addWidget(self.saveParamButton,col=1)
self.loadParamButton = QPushButton('Load Parameters')
self.loadParamButton.clicked.connect(lambda x: self.loadParameters(fname=None))
self.fixedparamLayoutWidget.addWidget(self.loadParamButton, col=2)
self.fixedparamLayoutWidget.nextRow()
fixedParamLabel=QLabel('Fixed Parameters')
self.fixedparamLayoutWidget.addWidget(fixedParamLabel, colspan=3)
self.fixedparamLayoutWidget.nextRow()
self.fixedParamTableWidget=pg.TableWidget()
self.fixedParamTableWidget.setSizePolicy(QSizePolicy.Expanding,QSizePolicy.Expanding)
self.fixedParamTableWidget.setEditable(editable=True)
self.fixedParamTableWidget.setSizeAdjustPolicy(QAbstractScrollArea.AdjustToContents)
self.fixedparamLayoutWidget.addWidget(self.fixedParamTableWidget,colspan=3)
self.parSplitter.addWidget(self.fixedparamLayoutWidget)
self.sfitparamLayoutWidget=pg.LayoutWidget()
sfitParamLabel=QLabel('Single fitting parameters')
self.sfitparamLayoutWidget.addWidget(sfitParamLabel)
self.sfitparamLayoutWidget.nextRow()
self.sfitParamTableWidget=pg.TableWidget()
self.sfitParamTableWidget.setEditable(editable=True)
self.sfitParamTableWidget.setSizePolicy(QSizePolicy.Expanding,QSizePolicy.Expanding)
self.sfitParamTableWidget.setSizeAdjustPolicy(QAbstractScrollArea.AdjustToContents)
#self.sfitParamTableWidget.cellDoubleClicked.connect(self.editFitParam)
self.sfitparamLayoutWidget.addWidget(self.sfitParamTableWidget,colspan=3)
self.sfitparamLayoutWidget.nextRow()
self.sfitLabel=QLabel('')
self.sfitSlider=QSlider(Qt.Horizontal)
self.sfitSlider.setMinimum(1)
self.sfitSlider.setMaximum(1000)
self.sfitSlider.setSingleStep(10)
self.sfitSlider.setTickInterval(10)
self.sfitSlider.setValue(500)
self.sfitparamLayoutWidget.addWidget(self.sfitLabel,col=0,colspan=1)
self.sfitparamLayoutWidget.addWidget(self.sfitSlider,col=1,colspan=2)
self.sfitParamTableWidget.cellClicked.connect(self.update_sfitSlider)
self.parSplitter.addWidget(self.sfitparamLayoutWidget)
self.mfitparamLayoutWidget=pg.LayoutWidget()
mfitParamLabel=QLabel('Mutiple fitting parameters')
self.mfitparamLayoutWidget.addWidget(mfitParamLabel,col=0, colspan=3)
self.mfitparamLayoutWidget.nextRow()
self.mfitParamCoupledCheckBox=QCheckBox('Coupled')
self.mfitParamCoupledCheckBox.setEnabled(False)
self.mfitParamCoupledCheckBox.stateChanged.connect(self.mfitParamCoupledCheckBoxChanged)
self.mfitparamLayoutWidget.addWidget(self.mfitParamCoupledCheckBox,col=0)
self.add_mpar_button=QPushButton('Add')
self.add_mpar_button.clicked.connect(self.add_mpar)
self.add_mpar_button.setDisabled(True)
self.mfitparamLayoutWidget.addWidget(self.add_mpar_button,col=1)
self.remove_mpar_button=QPushButton('Remove')
self.mfitparamLayoutWidget.addWidget(self.remove_mpar_button,col=2)
self.remove_mpar_button.clicked.connect(self.remove_mpar)
self.remove_mpar_button.setDisabled(True)
self.mfitparamLayoutWidget.nextRow()
self.mfitParamTabWidget=QTabWidget()
self.mfitParamTabWidget.currentChanged.connect(self.mfitParamTabChanged)
# self.mfitParamTableWidget=pg.TableWidget(sortable=False)
# self.mfitParamTableWidget.cellDoubleClicked.connect(self.mparDoubleClicked)
# self.mfitParamTableWidget.setEditable(editable=True)
# self.mfitParamTableWidget.setSizePolicy(QSizePolicy.Expanding,QSizePolicy.Expanding)
# self.mfitParamTableWidget.setSizeAdjustPolicy(QAbstractScrollArea.AdjustToContents)
# #self.sfitParamTableWidget.cellDoubleClicked.connect(self.editFitParam)
# self.mfitparamLayoutWidget.addWidget(self.mfitParamTableWidget,colspan=3)
self.mfitparamLayoutWidget.addWidget(self.mfitParamTabWidget,colspan=3)
self.mfitparamLayoutWidget.nextRow()
self.mfitLabel=QLabel('')
self.mfitSlider=QSlider(Qt.Horizontal)
self.mfitSlider.setMinimum(1)
self.mfitSlider.setSingleStep(10)
self.mfitSlider.setTickInterval(10)
self.mfitSlider.setMaximum(1000)
self.mfitSlider.setValue(500)
self.mfitparamLayoutWidget.addWidget(self.mfitLabel,col=0,colspan=1)
self.mfitparamLayoutWidget.addWidget(self.mfitSlider,col=1,colspan=2)
# self.mfitParamTableWidget.cellClicked.connect(self.update_mfitSlider)
# self.mfitparamLayoutWidget.nextRow()
# self.saveParamButton=QPushButton('Save Parameters')
# self.saveParamButton.clicked.connect(self.saveParameters)
# self.mfitparamLayoutWidget.addWidget(self.saveParamButton,col=1)
# self.loadParamButton=QPushButton('Load Parameters')
# self.loadParamButton.clicked.connect(lambda x: self.loadParameters(fname=None))
# self.mfitparamLayoutWidget.addWidget(self.loadParamButton,col=2)
self.parSplitter.addWidget(self.mfitparamLayoutWidget)
self.genparamLayoutWidget=pg.LayoutWidget()
genParameters=QLabel('Generated Parameters')
self.genparamLayoutWidget.addWidget(genParameters,colspan=2)
self.genparamLayoutWidget.nextRow()
self.genParamListWidget=QListWidget()
self.genParamListWidget.setSelectionMode(QAbstractItemView.ExtendedSelection)
self.genParamListWidget.itemSelectionChanged.connect(self.plot_extra_param)
self.genParamListWidget.itemDoubleClicked.connect(self.extra_param_doubleClicked)
#self.genParamListWidget.setSizePolicy(QSizePolicy.Expanding,QSizePolicy.Expanding)
self.genparamLayoutWidget.addWidget(self.genParamListWidget,colspan=2)
self.genparamLayoutWidget.nextRow()
self.saveGenParamButton=QPushButton('Save Generated Parameters')
self.saveGenParamButton.clicked.connect(lambda x:self.saveGenParameters(bfname=None))
self.genparamLayoutWidget.addWidget(self.saveGenParamButton,colspan=2)
self.parSplitter.addWidget(self.genparamLayoutWidget)
self.paramDock.addWidget(self.parSplitter)
def mfitParamTabChanged(self,index):
self.mkey=self.mfitParamTabWidget.tabText(index)
if self.mkey!='':
if self.mfitParamTableWidget[self.mkey].rowCount()==self.mpar_N[self.mkey]:
self.remove_mpar_button.setDisabled(True)
else:
self.remove_mpar_button.setEnabled(True)
def update_sfitSlider(self,row,col):
if col==1:
try:
self.sfitSlider.valueChanged.disconnect()
self.sfitSlider.sliderReleased.disconnect()
except:
pass
key=self.sfitParamTableWidget.item(row,0).text()
self.sfitLabel.setText(key)
self.current_sfit_row=row
value=self.fit.fit_params[key].value
self.sfitSlider.setValue(500)
self.sfitSlider.valueChanged.connect(self.sfitSliderChanged)
self.sfitSlider.sliderReleased.connect(self.sfitSliderReleased)
def sfitSliderChanged(self,value):
if not self.sfitSlider.isSliderDown():
self.sfitSlider.setDisabled(True)
key=self.sfitParamTableWidget.item(self.current_sfit_row,0).text()
pvalue=self.fit.fit_params[key].value+self.fit.fit_params[key].brute_step*(value-500)/500
self.sfitParamTableWidget.item(self.current_sfit_row,1).setText(self.format%pvalue)
QApplication.processEvents()
self.sfitSlider.setEnabled(True)
def sfitSliderReleased(self):
key=self.sfitParamTableWidget.item(self.current_sfit_row,0).text()
pvalue=self.fit.fit_params[key].value*(1+0.2*(self.sfitSlider.value()-500)/500)
self.sfitParamTableWidget.item(self.current_sfit_row,1).setText(self.format%pvalue)
QApplication.processEvents()
def update_mfitSlider(self,row,col):
if col!=0:
try:
self.mfitSlider.valueChanged.disconnect()
self.mfitSlider.sliderReleased.disconnect()
except:
pass
pkey = self.mfitParamTableWidget[self.mkey].horizontalHeaderItem(col).text()
txt = self.mfitParamTableWidget[self.mkey].item(row, col).text()
key = '__%s_%s_%03d' % (self.mkey, pkey, row)
self.mfitLabel.setText(key)
self.current_mfit_row=row
self.current_mfit_col=col
value=self.fit.fit_params[key].value
self.mfitSlider.setValue(500)
self.mfitSlider.valueChanged.connect(self.mfitSliderChanged)
self.mfitSlider.sliderReleased.connect(self.mfitSliderReleased)
def mfitSliderChanged(self,value):
if not self.mfitSlider.isSliderDown():
self.mfitSlider.setDisabled(True)
pkey = self.mfitParamTableWidget[self.mkey].horizontalHeaderItem(self.current_mfit_col).text()
txt = self.mfitParamTableWidget[self.mkey].item(self.current_mfit_row, self.current_mfit_col).text()
key = '__%s_%s_%03d' % (self.mkey, pkey, self.current_mfit_row)
pvalue=self.fit.fit_params[key].value+self.fit.fit_params[key].brute_step*(value-500)/500
self.mfitParamTableWidget[self.mkey].item(self.current_mfit_row,self.current_mfit_col).setText(self.format%pvalue)
QApplication.processEvents()
self.mfitSlider.setEnabled(True)
def mfitSliderReleased(self):
pkey = self.mfitParamTableWidget[self.mkey].horizontalHeaderItem(self.current_mfit_col).text()
txt = self.mfitParamTableWidget[self.mkey].item(self.current_mfit_row, self.current_mfit_col).text()
key = '__%s_%s_%03d' % (self.mkey, pkey, self.current_mfit_row)
pvalue = self.fit.fit_params[key].value * (1 + 0.2 * (self.mfitSlider.value() - 500) / 500)
self.mfitParamTableWidget[self.mkey].item(self.current_mfit_row, self.current_mfit_col).setText(self.format % pvalue)
QApplication.processEvents()
def saveSimulatedCurve(self):
"""
Saves the simulated curve in a user-supplied ascii file
:return:
"""
fname=QFileDialog.getSaveFileName(caption='Save As',filter='Text files (*.dat *.txt)',directory=self.curDir)[0]
if fname!='':
header='Simulated curve generated on %s\n'%time.asctime()
header+='Category:%s\n'%self.curr_category
header+='Function:%s\n'%self.funcListWidget.currentItem().text()
for i in range(self.fixedParamTableWidget.rowCount()):
header += '%s=%s\n' % (
self.fixedParamTableWidget.item(i, 0).text(), self.fixedParamTableWidget.item(i, 1).text())
for i in range(self.sfitParamTableWidget.rowCount()):
header += '%s=%s\n' % (
self.sfitParamTableWidget.item(i, 0).text(), self.sfitParamTableWidget.item(i, 1).text())
for i in range(self.mfitParamTabWidget.count()):
mkey = self.mfitParamTabWidget.tabText(i)
for row in range(self.mfitParamTableWidget[mkey].rowCount()):
vartxt = mkey+'_'+self.mfitParamTableWidget[mkey].item(row, 0).text()
for col in range(1, self.mfitParamTableWidget[mkey].columnCount()):
header += '%s_%s=%s\n' % (vartxt, self.mfitParamTableWidget[mkey].horizontalHeaderItem(col).text(),
self.mfitParamTableWidget[mkey].item(row, col).text())
if type(self.fit.x)==dict:
text='col_names=[\'q\','
keys=list(self.fit.x.keys())
data=self.fit.x[keys[0]]
for key in keys:
text+='\''+key+'\','
data=np.vstack((data,self.fit.yfit[key]))
header+=text[:-1]+']\n'
np.savetxt(fname,data.T,header=header,comments='#')
else:
header+='col_names=[\'q\',\'I\']'
np.savetxt(fname,np.vstack((self.fit.x,self.fit.yfit)).T,header=header,comments='#')
else:
pass
def mparDoubleClicked(self,row,col):
mkey=self.mfitParamTabWidget.tabText(self.mfitParamTabWidget.currentIndex())
if col!=0:
try:
self.mfitParamTableWidget[mkey].cellChanged.disconnect()
except:
pass
pkey=self.mfitParamTableWidget[mkey].horizontalHeaderItem(col).text()
key='__%s_%s_%03d'%(mkey,pkey,row)
ovalue=self.fit.fit_params[key].value
ovary=self.fit.fit_params[key].vary
ominimum=self.fit.fit_params[key].min
omaximum=self.fit.fit_params[key].max
oexpr=self.fit.fit_params[key].expr
obrute_step=self.fit.fit_params[key].brute_step
dlg=minMaxDialog(ovalue,vary=ovary,minimum=ominimum,maximum=omaximum,expr=oexpr,brute_step=obrute_step,title=key)
if dlg.exec_():
value,vary,maximum,minimum,expr,brute_step=(dlg.value,dlg.vary,dlg.maximum,dlg.minimum,dlg.expr,dlg.brute_step)
else:
value,vary,maximum,minimum,expr,brute_step=copy.copy(ovalue),copy.copy(ovary),copy.copy(omaximum),copy.copy(ominimum),copy.copy(oexpr),copy.copy(obrute_step)
self.mfitParamTableWidget[mkey].item(row,col).setText(self.format%value)
if vary:
self.mfitParamTableWidget[mkey].item(row, col).setCheckState(Qt.Checked)
else:
self.mfitParamTableWidget[mkey].item(row, col).setCheckState(Qt.Unchecked)
try:
self.mfitParamData[mkey][pkey][row] = value
# self.fit.fit_params[key].set(value=value)
if expr == 'None':
expr = ''
self.fit.fit_params[key].set(value=value, vary=vary, min=minimum, max=maximum, expr=expr,
brute_step=brute_step)
self.update_plot()
except:
self.mfitParamTableWidget[mkey].item(row, col).setText(self.format % ovalue)
self.mfitParamData[mkey][pkey][row] = ovalue
self.fit.fit_params[key].set(value=ovalue, vary=ovary, min=ominimum, max=omaximum, expr=oexpr,
brute_step=brute_step)
self.update_plot()
QMessageBox.warning(self,'Parameter Error','Some parameter value you just entered are not correct. Please enter the values carefully',QMessageBox.Ok)
self.mfitParamTableWidget[mkey].cellChanged.connect(self.mfitParamChanged_new)
self.errorAvailable = False
self.reuse_sampler = False
self.calcConfInterButton.setDisabled(True)
def mfitParamCoupledCheckBoxChanged(self):
if self.mfitParamCoupledCheckBox.isChecked() and self.mfitParamTabWidget.count()>1:
mparRowCounts=[self.mfitParamTableWidget[self.mfitParamTabWidget.tabText(i)].rowCount() for i in range(self.mfitParamTabWidget.count())]
if not all(x == mparRowCounts[0] for x in mparRowCounts):
cur_index=self.mfitParamTabWidget.currentIndex()
cur_key=self.mfitParamTabWidget.tabText(cur_index)
for i in range(self.mfitParamTabWidget.count()):
if i != cur_index:
mkey=self.mfitParamTabWidget.tabText(i)
try:
self.mfitParamTableWidget[mkey].cellChanged.disconnect()
except:
pass
rowCount=self.mfitParamTableWidget[mkey].rowCount()
self.mfitParamTabWidget.setCurrentIndex(i)
if rowCount>mparRowCounts[cur_index]:
self.mfitParamTableWidget[mkey].clearSelection()
self.mfitParamTableWidget[mkey].setRangeSelected(
QTableWidgetSelectionRange(mparRowCounts[cur_index],0,rowCount-1,0),True)
self.remove_uncoupled_mpar()
elif rowCount<mparRowCounts[cur_index]:
for j in range(rowCount,mparRowCounts[cur_index]):
self.mfitParamTableWidget[mkey].clearSelection()
self.mfitParamTableWidget[mkey].setCurrentCell(j-1,0)
self.add_uncoupled_mpar()
self.mfitParamTableWidget[mkey].setSelectionBehavior(QAbstractItemView.SelectItems)
self.mfitParamTabWidget.setCurrentIndex(cur_index)
self.errorAvailable = False
self.reuse_sampler = False
self.calcConfInterButton.setDisabled(True)
def add_mpar(self):
if self.mfitParamCoupledCheckBox.isChecked() and self.mfitParamTabWidget.count()>1:
self.add_coupled_mpar()
else:
self.add_uncoupled_mpar()
self.update_plot()
self.remove_mpar_button.setEnabled(True)
self.errorAvailable = False
self.reuse_sampler = False
self.calcConfInterButton.setDisabled(True)
def remove_mpar(self):
if self.mfitParamCoupledCheckBox.isChecked() and self.mfitParamTabWidget.count()>1:
self.remove_coupled_mpar()
else:
self.remove_uncoupled_mpar()
self.update_plot()
self.errorAvailable = False
self.reuse_sampler = False
self.calcConfInterButton.setDisabled(True)
def add_coupled_mpar(self):
cur_index=self.mfitParamTabWidget.currentIndex()
mkey = self.mfitParamTabWidget.tabText(cur_index)
if len(self.mfitParamTableWidget[mkey].selectedItems())!=0:
curRow=self.mfitParamTableWidget[mkey].currentRow()
for i in range(self.mfitParamTabWidget.count()):
self.mfitParamTabWidget.setCurrentIndex(i)
tkey=self.mfitParamTabWidget.tabText(i)
self.mfitParamTableWidget[tkey].clearSelection()
self.mfitParamTableWidget[tkey].setCurrentCell(curRow,0)
self.add_uncoupled_mpar()
self.mfitParamTabWidget.setCurrentIndex(cur_index)
self.errorAvailable = False
self.reuse_sampler = False
self.calcConfInterButton.setDisabled(True)
def remove_coupled_mpar(self):
cur_index=self.mfitParamTabWidget.currentIndex()
mkey = self.mfitParamTabWidget.tabText(cur_index)
selRows = list(set([item.row() for item in self.mfitParamTableWidget[mkey].selectedItems()]))
if len(selRows) != 0:
for i in range(self.mfitParamTabWidget.count()):
self.mfitParamTabWidget.setCurrentIndex(i)
tkey=self.mfitParamTabWidget.tabText(i)
self.mfitParamTableWidget[tkey].clearSelection()
self.mfitParamTableWidget[tkey].setRangeSelected(
QTableWidgetSelectionRange(selRows[0], 0, selRows[-1], 0), True)
self.remove_uncoupled_mpar()
self.mfitParamTabWidget.setCurrentIndex(cur_index)
self.errorAvailable = False
self.reuse_sampler = False
self.calcConfInterButton.setDisabled(True)
def add_uncoupled_mpar(self):
cur_index = self.mfitParamTabWidget.currentIndex()
mkey=self.mfitParamTabWidget.tabText(self.mfitParamTabWidget.currentIndex())
try:
self.mfitParamTableWidget[mkey].cellChanged.disconnect()
except:
pass
NCols=self.mfitParamTableWidget[mkey].columnCount()
if len(self.mfitParamTableWidget[mkey].selectedItems())!=0:
curRow=self.mfitParamTableWidget[mkey].currentRow()
#if curRow!=0:
self.mfitParamTableWidget[mkey].insertRow(curRow)
self.mfitParamTableWidget[mkey].setRow(curRow,self.mfitParamData[mkey][curRow])
self.mfitParamData[mkey]=np.insert(self.mfitParamData[mkey],curRow,self.mfitParamData[mkey][curRow],0)
NRows = self.mfitParamTableWidget[mkey].rowCount()
for col in range(NCols):
pkey=self.mfitParamTableWidget[mkey].horizontalHeaderItem(col).text()
if col!=0:
for row in range(NRows-1, curRow,-1):
key='__%s_%s_%03d'%(mkey, pkey,row)
nkey = '__%s_%s_%03d' % (mkey,pkey,row-1)
if key in self.fit.fit_params.keys():
val,vary,min,max,expr,bs = self.mfitParamData[mkey][row][col],self.fit.fit_params[nkey].vary, \
self.fit.fit_params[nkey].min,self.fit.fit_params[nkey].max, \
self.fit.fit_params[nkey].expr,self.fit.fit_params[nkey].brute_step
self.fit.fit_params[key].set(value=val,vary=vary,min=min,max=max,expr=expr,brute_step=bs)
else:
val,vary,min,max,expr,bs=self.mfitParamData[mkey][row][col],self.fit.fit_params[nkey].vary,self.fit.fit_params[nkey].min, \
self.fit.fit_params[nkey].max,self.fit.fit_params[nkey].expr, \
self.fit.fit_params[nkey].brute_step
self.fit.fit_params.add(key,value=val,vary=vary,min=min,max=max,expr=expr,brute_step=bs)
item=self.mfitParamTableWidget[mkey].item(row,col)
item.setText(self.format%val)
item.setFlags(
Qt.ItemIsUserCheckable | Qt.ItemIsEnabled | Qt.ItemIsEditable | Qt.ItemIsSelectable)
if self.fit.fit_params[key].vary > 0:
item.setCheckState(Qt.Checked)
else:
item.setCheckState(Qt.Unchecked)
item.setToolTip((key+' = '+self.format+' \u00B1 '+self.format) % \
(self.fit.fit_params[key].value, 0.0))
# This is to make the newly inserted row checkable
item = self.mfitParamTableWidget[mkey].item(curRow, col)
item.setFlags(Qt.ItemIsUserCheckable | Qt.ItemIsEnabled | Qt.ItemIsEditable | Qt.ItemIsSelectable)
key = '__%s_%s_%03d' % (mkey, pkey, curRow)
item.setText(self.format%self.fit.fit_params[key].value)
item.setToolTip((key + ' = ' + self.format + ' \u00B1 ' + self.format) % \
(self.fit.fit_params[key].value, 0.0))
if self.fit.fit_params[key].vary>0:
item.setCheckState(Qt.Checked)
else:
item.setCheckState(Qt.Unchecked)
else:
item = self.mfitParamTableWidget[mkey].item(curRow, col)
item.setFlags(Qt.ItemIsEnabled | Qt.ItemIsEditable | Qt.ItemIsSelectable)
self.fit.params['__mpar__'][mkey][pkey].insert(curRow, self.mfitParamData[mkey][curRow][col])
self.update_mfit_parameters_new()
self.update_plot()
self.errorAvailable = False
self.reuse_sampler = False
self.calcConfInterButton.setDisabled(True)
# self.remove_mpar_button.setEnabled(True)
self.mfitParamTabWidget.setCurrentIndex(cur_index)
else:
QMessageBox.warning(self,'Warning','Please select a row at which you would like to add a set of parameters',QMessageBox.Ok)
self.mfitParamTableWidget[mkey].cellChanged.connect(self.mfitParamChanged_new)
def remove_uncoupled_mpar(self):
mkey = self.mfitParamTabWidget.tabText(self.mfitParamTabWidget.currentIndex())
selrows=list(set([item.row() for item in self.mfitParamTableWidget[mkey].selectedItems()]))
num=self.mfitParamTableWidget[mkey].rowCount()-len(selrows)
if num<self.mpar_N[mkey]:
QMessageBox.warning(self,'Selection error','The minimum number of rows required for this function to work is %d.\
You can only remove %d rows'%(self.mpar_N[mkey],num),QMessageBox.Ok)
return
# if self.mfitParamTableWidget[mkey].rowCount()-1 in selrows:
# QMessageBox.warning(self, 'Selection error',
# 'Cannot remove the last row. Please select the rows other than the last row', QMessageBox.Ok)
# return
try:
self.mfitParamTableWidget[mkey].cellChanged.disconnect()
except:
pass
if selrows!=[]:
selrows.sort(reverse=True)
for row in selrows:
maxrow=self.mfitParamTableWidget[mkey].rowCount()
for trow in range(row,maxrow):
for col in range(self.mfitParamTableWidget[mkey].columnCount()):
pkey=self.mfitParamTableWidget[mkey].horizontalHeaderItem(col).text()
if trow<maxrow-1:
key1='__%s_%s_%03d'%(mkey,pkey,trow)
key2='__%s_%s_%03d'%(mkey,pkey,trow+1)
self.fit.params['__mpar__'][mkey][pkey][trow] = copy.copy(self.fit.params['__mpar__'][mkey][pkey][trow + 1])
if col!=0:
self.fit.fit_params[key1]=copy.copy(self.fit.fit_params[key2])
del self.fit.fit_params[key2]
else:
key1='__%s_%s_%03d'%(mkey,pkey,trow)
# if col!=0:
del self.fit.params['__mpar__'][mkey][pkey][trow]
# del self.fit.fit_params[key1]
self.mfitParamTableWidget[mkey].removeRow(row)
self.mfitParamData[mkey]=np.delete(self.mfitParamData[mkey],row,axis=0)
#updating the tooltips after removal of rows
for col in range(1,self.mfitParamTableWidget[mkey].columnCount()):
pkey = self.mfitParamTableWidget[mkey].horizontalHeaderItem(col).text()
for row in range(self.mfitParamTableWidget[mkey].rowCount()):
item=self.mfitParamTableWidget[mkey].item(row, col)
key = '__%s_%s_%03d' % (mkey, pkey, row)
item.setToolTip((key + ' = ' + self.format + ' \u00B1 ' + self.format) % \
(self.fit.fit_params[key].value, 0.0))
else:
QMessageBox.warning(self,'Nothing selected','No item is selected for removal',QMessageBox.Ok)
self.mfitParamTableWidget[mkey].cellChanged.connect(self.mfitParamChanged_new)
self.fit.func.output_params={'scaler_parameters': {}}
self.update_plot()
if self.mfitParamTableWidget[mkey].rowCount()==self.mpar_N[mkey]:
self.remove_mpar_button.setDisabled(True)
self.errorAvailable = False
self.reuse_sampler = False
self.calcConfInterButton.setDisabled(True)
def saveGenParameters(self,bfname=None):
# if len(self.genParamListWidget.selectedItems())==1:
if bfname is None:
bfname = QFileDialog.getSaveFileName(self, 'Provide the prefix of the generated files',self.curDir)[0]
if bfname!='':
bfname=os.path.splitext(bfname)[0]
else:
return
selParams=self.genParamListWidget.selectedItems()
for params in selParams:
text=params.text()
parname,var=text.split(' : ')
fname=bfname+'_'+parname+'.txt'
# if fname!='':
# if fname[-4:]!='.txt':
# fname=fname+'.txt'
header='Generated output file on %s\n'%time.asctime()
header += 'Category=%s\n' % self.curr_category
header += 'Function=%s\n' % self.funcListWidget.currentItem().text()
added_par=[]
for i in range(self.fixedParamTableWidget.rowCount()):
par, val = self.fixedParamTableWidget.item(i, 0).text(), self.fixedParamTableWidget.item(i, 1).text()
if 'meta' in self.fit.params['output_params'][parname]:
if par in self.fit.params['output_params'][parname]['meta']:
header += '%s=%s\n' % (par, str(self.fit.params['output_params'][parname]['meta'][par]))
added_par.append(par)
else:
header+='%s=%s\n'%(par,val)
if 'meta' in self.fit.params['output_params'][parname]:
for metakey in self.fit.params['output_params'][parname]['meta'].keys():
if metakey not in added_par:
header+='%s=%s\n'%(metakey,str(self.fit.params['output_params'][parname]['meta'][metakey]))
for i in range(self.sfitParamTableWidget.rowCount()):
par,val=self.sfitParamTableWidget.item(i,0).text(),self.sfitParamTableWidget.item(i,1).text()
header+='%s=%s\n'%(par,val)
for k in range(self.mfitParamTabWidget.count()):
mkey=self.mfitParamTabWidget.tabText(k)
for i in range(self.mfitParamTableWidget[mkey].rowCount()):
vartxt=self.mfitParamTableWidget[mkey].item(i,0).text()
for j in range(1,self.mfitParamTableWidget[mkey].columnCount()):
header+='%s_%s=%s\n'%(vartxt,self.mfitParamTableWidget[mkey].horizontalHeaderItem(j).text(),
self.mfitParamTableWidget[mkey].item(i,j).text())
if 'names' in self.fit.params['output_params'][parname]:
header += "col_names=%s\n" % str(self.fit.params['output_params'][parname]['names'])
else:
header += "col_names=%s\n" % var
header=header.encode("ascii","ignore")
header=header.decode()
if var=="['x', 'y', 'meta']" or var == "['x', 'y']":
header+='x\ty\n'
res=np.vstack((self.fit.params['output_params'][parname]['x'], self.fit.params['output_params'][parname]['y'])).T
| np.savetxt(fname,res,header=header,comments='#') | numpy.savetxt |
import xlrd
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import BayesianRidge
from sklearn.preprocessing import PolynomialFeatures
class pKa:
def __init__(self, path_to_data='./training_data.xlsx'):
'''
Load data from path (default: './training_data.xlsx') and specify variables that are being
used throughout the procedure. Display the underlying database.
'''
self.data = pd.read_excel('training_data.xlsx')
display(self.data)
self.x = | np.array(self.data['pKa_theo']) | numpy.array |
# python 3
# functions.py
# <NAME>
# Last update: 12/16/2020
"""
This is a script accompanying main.py for building machine learning models.
Note that this script only contains functions and should not be run independently.
"""
def load_file_preprocessed(filename, mode=['sequence', 'shape'], structural_ref_df=None):
"""This function imports the file downloaded from Dr. Voight's github."""
import numpy as np
import pandas as pd
df_import = pd.read_csv(filename, sep='\t', header=2)
# Make the predictor, note that this is different for shape/nucleotide:
if mode == 'sequence':
temp = [list(item)[0:3]+list(item)[4:7] for item in list(df_import['trans'])]
predictor = pd.DataFrame(temp)
predictor.columns = ['3L', '2L', '1L', '1R', '2R', '3R']
elif mode == 'shape':
temp1 = [item.split(',')[0] for item in list(df_import['trans'])]
temp2 = [item.split(',')[1] for item in list(df_import['trans'])]
df_val_major = structural_ref_df.loc[temp1, :]
df_val_alter = structural_ref_df.loc[temp2, :]
predictor = pd.concat([df_val_major.reset_index(drop=True), df_val_alter.reset_index(drop=True)], axis=1, ignore_index=True)
colnames = list(df_val_major.columns) + [str(item) + '_r' for item in list(df_val_alter.columns)]
predictor.columns = colnames
# Make the effector
effector_train, effector_test = np.array(df_import['train_rate']), np.array(df_import['test_rate'])
# Make the mutation class (A>C, A>G, CpG C>A, ...) index
index_class = np.zeros(shape=(len(df_import), ), dtype=int)
g_label = np.array([item[4] for item in list(df_import['trans'])])
for select in range(1, 6):
sclass = ['A,G', 'A,T', 'C,A', 'C,G', 'C,T'][select-1]
index_class[df_import['class'] == sclass] = select
for select in range(6, 9):
sclass = ['C,A', 'C,G', 'C,T'][select-6]
index_class[np.all([df_import['class'] == sclass, g_label == 'G'], axis=0)] = select
return predictor, effector_train, effector_test, index_class
def make_2dshape_neighbor(shape_df, labels):
"""A custom polynomial transform function that transforms 1D shape features into 2D (neighboring
interactions only)"""
import numpy as np
import pandas as pd
from copy import deepcopy
temp_posit = [item.split('_')[1] for item in labels]
temp_df = np.array(deepcopy(shape_df))
temp_labels = deepcopy(labels)
for i in range(np.shape(shape_df)[1]):
for j in range(i, np.shape(shape_df)[1]):
posit1 = temp_posit[i]
posit2 = temp_posit[j]
if posit1 == 'L':
if posit2 in ['L', 'CL', 'C']:
append_arr = np.array(shape_df.iloc[:,i] * shape_df.iloc[:,j]).reshape(-1, 1)
temp_df = np.concatenate((temp_df, append_arr), axis=1)
temp_labels = np.append(temp_labels, temp_labels[i]+'*'+temp_labels[j])
elif posit1 == 'CL':
if posit2 in ['CL', 'C', 'CR']:
append_arr = np.array(shape_df.iloc[:,i] * shape_df.iloc[:,j]).reshape(-1, 1)
temp_df = np.concatenate((temp_df, append_arr), axis=1)
temp_labels = np.append(temp_labels, temp_labels[i]+'*'+temp_labels[j])
elif posit1 == 'C':
if posit2 in ['C', 'CR', 'R']:
append_arr = np.array(shape_df.iloc[:,i] * shape_df.iloc[:,j]).reshape(-1, 1)
temp_df = np.concatenate((temp_df, append_arr), axis=1)
temp_labels = np.append(temp_labels, temp_labels[i]+'*'+temp_labels[j])
elif posit1 == 'CR':
if posit2 in ['CR', 'R']:
append_arr = np.array(shape_df.iloc[:,i] * shape_df.iloc[:,j]).reshape(-1, 1)
temp_df = np.concatenate((temp_df, append_arr), axis=1)
temp_labels = np.append(temp_labels, temp_labels[i]+'*'+temp_labels[j])
elif posit1 == 'R':
if posit2 in ['R']:
append_arr = np.array(shape_df.iloc[:,i] * shape_df.iloc[:,j]).reshape(-1, 1)
temp_df = np.concatenate((temp_df, append_arr), axis=1)
temp_labels = np.append(temp_labels, temp_labels[i]+'*'+temp_labels[j])
df_strucval_neighbor = pd.DataFrame(temp_df)
df_strucval_neighbor.columns = temp_labels
return df_strucval_neighbor
def make_4dshape(predictor_1d, predictor_raw, degree=4, neighbors_only=False):
"""This function makes a 2~4D polynoimal transformed version of the nucleotide predictor."""
import numpy as np
from copy import deepcopy
pred_wide = np.shape(predictor_1d)[1]
predictor_out = deepcopy(predictor_1d)
Labels_ref = np.array(['C', 'G', 'T', 'C', 'G', 'T', 'C', 'G', 'T', 'C', 'G', 'T', 'C', 'G', 'T', 'C', 'G', 'T'], dtype=object)
labels_out = | np.array([]) | numpy.array |
import os
import numpy as np
import xml.etree.ElementTree as ET
from deg2str import *
#from astropy import coordinates as coord
from astropy.coordinates import SkyCoord
from astropy import units as u
from simbad import *
from datetime import *
from PIL import Image
from query_wsa_fits import *
from query_pso_fits import *
from query_des_fits import *
from jdcal import *
import pdb
import glob
import astropy.io.fits as pyfits
#import astropy.io.fits as aplpy
stop=pdb.set_trace
def finder(source_name,allwise=False,rejallwise=False,tmass=False,rejtmass=False,PSO=True,UKIDSS=True,VHS=True,DES=True,UHS=True,keepfiles=False,allcolor='#FFFF00',rejcolor='b',tm_color='r',plot=False,savepdf=True,secondary='',addtext='',addtext2='',skipdownloads=False,circle_radius=0.0025,size=1.667,override_directory=None,primarypos_label=None,secondarypos_label=None,title=None,filename=None,buffer=False,gnirsacq=False,DSS=True,TMASSIM=True,WISE=True,circle_alpha=.8,labels=True,pos_list_gray_ra=None,pos_list_gray_dec=None,pos_list_gray_sizes=None,pos_list_gray_pmra=None,pos_list_gray_pmdec=None,gray_label=None,pos3=None,pos3_label=None,pos4=None,pos4_label=None,pos5=None,pos5_label=None,closefigs=None):
# Set $FINDER_PATH in your bash_profile if you would like to control where the finder charts are output
# size: arcmin
# allwise: overplot AllWISE catalog positions
# rejallwise = overplot AllWISE reject positions
# tmass = overplot 2MASS psc positions
# keepfiles = keep fits, tbl, and xml files
# allcolor = color of allwise symbols
# rejcolor = color of allwise reject symbols
# tm_color = color of tmass symbols
# plot = show plot (otherwise, finder is just made)
# savepdf = save a pdf of the finder
# closefigs = True or False to indicate whether to close the figures at the end. Default: True if savepdf=True, False otherwise.
#Use buffer if needed
if buffer:
import matplotlib
matplotlib.use('Agg')
plot=False
#Import those after matplotlib is explicitly set as Agg in case Python is not installed as a Mac OS X framework
import pylab
import aplpy
#Adjust the default behavior of closefigs
if closefigs == None:
if savepdf is False:
closefigs = True
else:
closefigs = False
#Verify whether a working directory is set in the bash profile
main_dir = None
if override_directory:
main_dir = override_directory
else:
proc = subprocess.Popen(["echo $FINDER_PATH"], stdout=subprocess.PIPE,shell=True)
(out, err) = proc.communicate()
if out != '\n':
main_dir = out.decode().split('\n')[0]
if main_dir:
initial_dir = os.getcwd()
if not os.path.exists(main_dir):
os.makedirs(main_dir)
os.chdir(main_dir)
#List of colors
color_blue = '#377eb8'#RGB=[55,126,184]
color_red = '#e41a1c'#RGB=[228,26,28]
color_purple = '#b27bba'#RGB=[178,123,186]
color_green = '#4daf4a'#RGB=[77,175,74]
color_orange = '#ff7f00'#RGB=[255,127,0]
color_pink = '#f4d7d7'#RGB=[244,215,215]
col_yellow = '#ffde02'#RGB=[255,222,2]
nxplot = 5
nyplot = 3
fig_xsize = 11
fig_ysize = 8.5
if not labels:
nxplot -= 2
t1 = datetime.now()
ra,de = simbad(source_name)
ra2 = None
de2 = None
if secondary:
ra2,de2 = simbad(secondary)
ra3 = None
de3 = None
if pos3:
ra3,de3 = simbad(pos3)
ra4 = None
de4 = None
if pos4:
ra4,de4 = simbad(pos4)
ra5 = None
de5 = None
if pos5:
ra5,de5 = simbad(pos5)
if filename is None:
filename = source_name
#Download xml file from IRSA
xmlfile = "source.xml"
if skipdownloads is not True:
print("Getting xml file...")
cmd = "wget -O "+xmlfile+" 'http://irsa.ipac.caltech.edu/applications/finderchart/servlet/api?locstr="+str(ra)+"+"+str(de)+"&subsetsize="+str(size)+"' "
os.system(cmd)
# parse xml file
print("Parsing xml file...")
tree = ET.parse(xmlfile)
root = tree.getroot()
images = []
for image in root.iter('image'):
for child in image.getchildren():
if child.tag == 'surveyname':
surveyname = child.text
if child.tag == 'band':
band = child.text
if child.tag == 'obsdate':
obsdate = child.text
if child.tag == 'fitsurl':
fitsurl = child.text
if not DSS and (surveyname == 'DSS' or surveyname == 'DSS1' or surveyname == 'DSS2'):
continue
if not TMASSIM and surveyname == '2MASS':
continue
if not WISE and (surveyname == 'WISE' or surveyname == 'WISE (AllWISE)'):
continue
images.append([surveyname,band,obsdate,fitsurl])
if skipdownloads is not True:
if DSS:
print("Downloading DSS data...")
for i in range(len(images)):
if images[i][1] == 'DSS1 Blue':
cmd1 = "wget -O DSS1_Blue.fits '"+images[i][3]+"'"
os.system(cmd1)
if images[i][1] == 'DSS1 Red':
cmd1 = "wget -O DSS1_Red.fits '"+images[i][3]+"'"
os.system(cmd1)
if images[i][1] == 'DSS2 Blue':
cmd1 = "wget -O DSS2_Blue.fits '"+images[i][3]+"'"
os.system(cmd1)
if images[i][1] == 'DSS2 Red':
cmd1 = "wget -O DSS2_Red.fits '"+images[i][3]+"'"
os.system(cmd1)
if images[i][1] == 'DSS2 IR':
cmd1 = "wget -O DSS2_IR.fits '"+images[i][3]+"'"
os.system(cmd1)
if TMASSIM:
print("Downloading 2MASS data...")
for i in range(len(images)):
if images[i][1] == 'J':
cmd1 = "wget -O 2MASS_J.fits '"+images[i][3]+"'"
os.system(cmd1)
if images[i][1] == 'H':
cmd1 = "wget -O 2MASS_H.fits '"+images[i][3]+"'"
os.system(cmd1)
if images[i][1] == 'K':
cmd1 = "wget -O 2MASS_K.fits '"+images[i][3]+"'"
os.system(cmd1)
if WISE:
print("Downloading WISE data")
for i in range(len(images)):
if images[i][1] == 'w1':
cmd1 = "wget -O AllWISE_w1.fits '"+images[i][3]+"'"
os.system(cmd1)
if images[i][1] == 'w2':
cmd1 = "wget -O AllWISE_w2.fits '"+images[i][3]+"'"
os.system(cmd1)
if images[i][1] == 'w3':
cmd1 = "wget -O AllWISE_w3.fits '"+images[i][3]+"'"
os.system(cmd1)
if images[i][1] == 'w4':
cmd1 = "wget -O AllWISE_w4.fits '"+images[i][3]+"'"
os.system(cmd1)
if UKIDSS:
print("Downloading UKIDSS data")
#Remove previous data
os.system("rm *_UKIDSS_TMP.fits.gz")
query_wsa_fits(ra,de,size=size,output_file='UKIDSS_TMP.fits.gz',filter='all',catalog='UKIDSS')
if VHS:
print("Downloading VHS data")
#Remove previous data
os.system("rm *_VHS_TMP.fits.gz")
query_wsa_fits(ra,de,size=size,output_file='VHS_TMP.fits.gz',filter='all',catalog='VHS')
if PSO:
print("Downloading Pan-Starrs data")
#Remove previous data
os.system("rm *_PSO_TMP.fits*")
query_pso_fits(ra,de,size=size,output_file='PSO_TMP.fits')
if DES:
print("Downloading DES DR1 data")
#Remove previous data
os.system("rm *_DES_TMP.fits*")
query_des_fits(ra,de,size=size,output_file='DES_TMP.fits')
if UHS:
print("Downloading UHS DR1 data")
#Remove previous data
os.system("rm *_UHS_TMP.fits*")
query_wsa_fits(ra,de,size=size,output_file='UHS_TMP.fits.gz',filter='all',catalog='UHS')
#If no UKIDSS data could be downloaded, turn off the UKIDSS option
if len(glob.glob('*_UKIDSS_TMP.fits*')) == 0:
UKIDSS = False
#Check if UKIDSS_J is there. If it is then skip UHS
if len(glob.glob('J_UKIDSS_TMP.fits*')) != 0:
UHS = False
#If no VHS data could be downloaded, turn off the VHS option
if len(glob.glob('*_VHS_TMP.fits*')) == 0:
VHS = False
#If no PSO data could be downloaded, turn off the PSO option
if len(glob.glob('*_PSO_TMP.fits*')) == 0:
PSO = False
if len(glob.glob('*_DES_TMP.fits*')) == 0:
DES = False
if len(glob.glob('*_UHS_TMP.fits*')) == 0:
UHS = False
if PSO or DES:
nxplot = np.maximum(nxplot,5)
if UKIDSS or UHS:
nxplot = np.maximum(nxplot,4)
#Determine the amount of additional rows needed
vertical_spacing = 0
ukidss_spacing = 0
vhs_spacing = 0
tmass_spacing = 0
allwise_spacing = 1
dss_negspacing = 0
if PSO or DES:
vertical_spacing += 1
ukidss_spacing += 1
vhs_spacing += 1
tmass_spacing += 1
allwise_spacing += 1
if UKIDSS or UHS:
vertical_spacing += 1
vhs_spacing += 1
tmass_spacing += 1
allwise_spacing += 1
if VHS:
vertical_spacing += 1
tmass_spacing += 1
allwise_spacing += 1
if not DSS:
vertical_spacing -= 1
#dss_negspacing -= 1
ukidss_spacing -= 1
vhs_spacing -= 1
tmass_spacing -= 1
allwise_spacing -= 1
if not WISE:
vertical_spacing -= 1
#Adapt window size
nyplot += vertical_spacing
fig_ysize *= | np.sqrt(13.0/8.5) | numpy.sqrt |
from blackbox_selectinf.usecase.DTL import DropTheLoser
from blackbox_selectinf.learning.learning import (learn_select_prob, get_weight, get_CI)
import DTL_vae
import numpy as np
import argparse
import pickle
from regreg.smooth.glm import glm
from selectinf.algorithms import lasso
from scipy.stats import norm
import matplotlib.pyplot as plt
import torch
from selectinf.distributions.discrete_family import discrete_family
from argparse import Namespace
parser = argparse.ArgumentParser(description='DTL')
parser.add_argument('--idx', type=int, default=0)
parser.add_argument('--selection', type=str, default='mean')
parser.add_argument('--uc', type=float, default=2)
parser.add_argument('--basis_type', type=str, default='naive')
parser.add_argument('--indep', action='store_true', default=False)
parser.add_argument('--K', type=int, default=50)
parser.add_argument('--n', type=int, default=1000)
parser.add_argument('--m', type=int, default=500)
parser.add_argument('--n_b', type=int, default=1000)
parser.add_argument('--m_b', type=int, default=500)
parser.add_argument('--nrep', type=int, default=1)
parser.add_argument('--savemodel', action='store_true', default=False)
parser.add_argument('--modelname', type=str, default='model_')
parser.add_argument('--epochs', type=int, default=3000)
parser.add_argument('--batch_size', type=int, default=100)
parser.add_argument('--ntrain', type=int, default=5000)
parser.add_argument('--logname', type=str, default='log_')
parser.add_argument('--loadmodel', action='store_true', default=False)
parser.add_argument('--use_vae', action='store_true', default=False)
parser.add_argument('--nonnull', action='store_true', default=False)
args = parser.parse_args()
def main():
seed = args.idx
n = args.n
m = args.m
n_b = args.n_b
m_b = args.m_b
K = args.K
uc = args.uc
selection = args.selection
ntrain = args.ntrain
mu_list = np.zeros(K)
if args.nonnull:
mu_list[:25] = .1
logs = [dict() for x in range(args.nrep)]
for j in range(args.idx, args.idx + args.nrep):
print("Starting simulation", j)
logs[j - args.idx]['seed'] = j
np.random.seed(j)
X = np.zeros([K, n])
for k in range(K):
X[k, :] = np.random.randn(n) + mu_list[k]
X_bar = np.mean(X, 1)
if selection == 'mean':
max_rest = np.sort(X_bar)[-2]
win_idx = np.argmax(X_bar)
elif selection == "UC":
UC = X_bar + uc * np.std(X, axis=1, ddof=1)
win_idx = np.argmax(UC)
max_rest = np.sort(UC)[-2]
else:
raise AssertionError("invalid selection")
# Stage 2
X_2 = np.random.randn(m) + mu_list[win_idx]
DTL_class = DropTheLoser(X, X_2)
basis_type = args.basis_type
Z_data = DTL_class.basis(X, X_2, basis_type)
theta_data = DTL_class.theta_hat
print("Generate initial data")
training_data = DTL_class.gen_train_data(ntrain=ntrain, n_b=n_b, m_b=m_b, basis_type=args.basis_type, remove_D0=args.indep)
Z_train = training_data['Z_train']
W_train = training_data['W_train']
gamma = training_data['gamma']
print(np.mean(W_train))
if args.use_vae and np.mean(W_train) <= .1:
print("Start generating more positive data")
pos_ind = W_train == 1
Z_pos = torch.tensor(Z_train[pos_ind, :], dtype=torch.float)
input_dim = Z_pos.shape[1]
bottleneck_dim = 10
vae_model = DTL_vae.VAE(input_dim, bottleneck_dim)
vae_path = "DTL_VAE_seed_{}_n_{}_K_{}_m_{}.pt".format(seed, n, K, m)
output_dim = n * K + m
decoder = DTL_vae.Decoder(input_dim, output_dim)
decoder_path = "DTL_decoder_seed_{}_n_{}_K_{}_m_{}.pt".format(seed, n, K, m)
try:
vae_model.load_state_dict(torch.load(vae_path))
decoder.load_state_dict(torch.load(decoder_path))
except:
print("no model found, start training")
DTL_vae.train_networks(n, K, bottleneck_dim, Z_pos, vae_path, decoder_path, output_dim, print_every=100, dec_epochs=2)
vae_model.load_state_dict(torch.load(vae_path))
decoder.load_state_dict(torch.load(decoder_path))
n_vae = ntrain
Z_vae = vae_model.decode(torch.randn(n_vae, bottleneck_dim))
X_vae = decoder(Z_vae).detach().numpy()
X_vae_1 = X_vae[:, :n * K].reshape(-1, K, n)
X_vae_2 = X_vae[:, n * K:].reshape(-1, m)
Z_train_vae = np.zeros([n_vae, K + 1])
W_train_vae = np.zeros(n_vae)
print("Start generating data using VAE+decoder")
for i in range(n_vae):
X_1_b = X_vae_1[i, :, :]
X_2_b = X_vae_2[i, :]
X_bar_b = np.mean(X_1_b, 1)
if np.argmax(X_bar_b) == win_idx:
W_train_vae[i] = 1
Z_train_vae[i, :] = DTL_class.basis(X_1_b, X_2_b, basis_type=basis_type)
Z_train = np.concatenate([Z_train, Z_train_vae])
W_train = np.concatenate([W_train, W_train_vae])
print(Z_train.shape)
# train
print("Start learning selection probability")
net, flag, pr_data = learn_select_prob(Z_train, W_train, Z_data=torch.tensor(Z_data, dtype=torch.float),
num_epochs=args.epochs, batch_size=args.batch_size, verbose=True)
print('pr_data', pr_data)
logs[j - args.idx]['pr_data'] = pr_data
logs[j - args.idx]['flag'] = flag
if args.indep:
gamma_D0 = training_data['gamma_D0']
Z_data = Z_data - gamma_D0 @ DTL_class.D_0.reshape(1, )
N_0 = Z_data - gamma @ theta_data.reshape(1, )
target_var = 1 / (n + m)
target_sd = np.sqrt(target_var)
gamma_list = np.linspace(-20 / np.sqrt(n_b + m_b), 20 / np.sqrt(n_b + m_b), 101)
target_theta = theta_data + gamma_list
target_theta = target_theta.reshape(1, 101)
weight_val = get_weight(net, target_theta, N_0, gamma)
interval_nn, pvalue_nn = get_CI(target_theta, weight_val, target_var, theta_data, return_pvalue=True)
logs[j - args.idx]['interval_nn'] = interval_nn
if interval_nn[0] <= mu_list[DTL_class.win_idx] <= interval_nn[1]:
logs[j - args.idx]['covered_nn'] = 1
else:
logs[j - args.idx]['covered_nn'] = 0
logs[j - args.idx]['width_nn'] = interval_nn[1] - interval_nn[0]
logs[j - args.idx]['pvalue_nn'] = pvalue_nn
print("pvalue", pvalue_nn)
##################################################
# check learning
count = 0
nb = 50
X_pooled = np.concatenate([X[win_idx], X_2])
pval = []
for ell in range(int(nb / np.mean(W_train))):
X_b = np.zeros([K, n_b])
for k in range(K):
if k != win_idx:
X_b[k, :] = X[k, np.random.choice(n, n_b, replace=True)]
if k == win_idx:
X_b[k, :] = X_pooled[np.random.choice(n + m, n_b, replace=True)]
if selection == 'mean':
idx = np.argmax(np.mean(X_b, 1))
else:
idx = np.argmax(np.mean(X_b, 1) + uc * np.std(X_b, axis=1, ddof=1))
if idx != win_idx:
continue
else:
count += 1
X_2_b = X_pooled[np.random.choice(n + m, m_b, replace=True)]
d_M = np.mean(np.concatenate([X_b[win_idx], X_2_b]))
observed_target = d_M
target_theta_0 = d_M + gamma_list
target_theta_0 = target_theta_0.reshape(1, 101)
target_val = target_theta_0
weight_val = get_weight(net, target_theta_0, N_0, gamma)
weight_val_2 = weight_val * norm.pdf((target_val - observed_target) / target_sd)
exp_family = discrete_family(target_val.reshape(-1), weight_val_2.reshape(-1))
hypothesis = theta_data
pivot = exp_family.cdf((hypothesis - observed_target) / target_var, x=observed_target)
pivot = 2 * min(pivot, 1 - pivot)
pval.append(pivot)
if count == nb:
break
pval = np.array(pval)
logs[j - args.idx]['pval'] = pval
logs[j - args.idx]['false_rej'] = sum(pval <= 0.05) / len(pval)
print(pval)
print("reject:", sum(pval <= 0.05) / len(pval))
##################################################
# true interval
var_0 = 1 / n - 1 / (n + m)
observed_target = theta_data
gamma_list = np.linspace(-20 / np.sqrt(n_b + m_b), 20 / np.sqrt(n_b + m_b), 101)
target_val = gamma_list + theta_data
prob_gamma_true = []
for gamma_t in gamma_list:
if selection == "mean":
prob_gamma_true.append(norm.sf(( | np.sort(X_bar) | numpy.sort |
import sys, os, numpy as np, pandas as pd
from ardnmf.ardnmf import ARDNMF, PRIORS, EXP_PRIOR
from scipy.stats import iqr
from tqdm.notebook import tqdm
from collections import defaultdict
# Adapted from https://github.com/cmsc828p-f18/Reproducing-Kim2016
def extract_signatures_Kim2016(sbs96_df):
# Initialize ARDNMF hyperparameters
n_init = 50 # default: 50 (following Kim, et al. paper)
a = 10 # default: 10 (following SignatureAnalyzer v1.1 source code)
beta = 1 # default: 1 (KL-divergence)
tolerance = 1e-7 # default: 1e-7 (following Kim, et al. paper)
max_iter = 100000 # default: 100000 (following SignatureAnalyzer v1.1 source code)
prior = EXP_PRIOR # default: exponential (following Kim, et al. paper)
tau = 0.001 # default: 0.001 (following SignatureAnalyzer v1.1 source code)
n_permutations = 25000 # default: 25000
random_seed = 1 # default: 1
# Split hypermutators, which are defined to be samples where:
# N_SNV > median(N_SNV) + 1.5*IQR (interquartile range)
def split_hypermutators(X):
# Repeat the hypermutator splitting until it's over
sample_indices = list(range(X.shape[0]))
hypermuts = True
X_star = np.array(X)
while hypermuts:
# Row sums give N_SNV, then use median + 1.5 IQR to identify
# hypermutated indices
n_snvs = X_star.sum(axis=1)
hypermut_thresh = np.median(n_snvs) + 1.5*iqr(n_snvs)
hypermut_idx = set(np.where(n_snvs > hypermut_thresh)[0])
hypermuts = len(hypermut_idx) > 0
# Split the rows at hypermutated indices in two
rows = []
new_sample_idx = []
for i, row in enumerate(X_star):
if i in hypermut_idx:
rows.append(row/2.)
rows.append(row/2.)
new_sample_idx.append(sample_indices[i])
new_sample_idx.append(sample_indices[i])
else:
rows.append(row)
new_sample_idx.append(sample_indices[i])
# Create new X_star
X_star = np.array(rows)
sample_indices = new_sample_idx
return X_star, sample_indices
X = sbs96_df.values
samples = sbs96_df.index
N = len(samples)
categories = list(sbs96_df.columns)
cat_index = dict(zip(categories, range(len(categories))))
# Create mutation count matrix and split "hypermutators"
X_star, sample_indices = split_hypermutators(X)
| np.random.seed(random_seed) | numpy.random.seed |
# xyz Dec 2017
from __future__ import print_function
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
#from plyfile import (PlyData, PlyElement, make2d, PlyParseError, PlyProperty)
import numpy as np
import h5py
import glob
import time
import multiprocessing as mp
import itertools
from block_data_prep_util import Normed_H5f
ROOT_DIR = os.path.dirname(BASE_DIR)
DATA_DIR = os.path.join(ROOT_DIR,'data')
DATASET_DIR={}
DATASET_DIR['scannet'] = os.path.join(DATA_DIR,'scannet_data')
DATASET_DIR['stanford_indoor3d'] = os.path.join(DATA_DIR,'stanford_indoor3d')
matterport3D_h5f_dir = os.path.join(DATA_DIR,'Matterport3D_H5F/all_merged_nf5')
DATASET_DIR['matterport3d'] = matterport3D_h5f_dir
#-------------------------------------------------------------------------------
# provider for training and testing
#------------------------------------------------------------------------------
class Net_Provider():
'''
(1) provide data for training
(2) load file list to list of Norm_H5f[]
dataset_name: 'stanford_indoor3d' 'scannet'
all_filename_glob: stride_1_step_2_test_small_4096_normed/*.nh5
eval_fnglob_or_rate: file name str glob or file number rate. 'scan1*.nh5' 0.2
num_point_block: if the block point number is not this, do randomly sample
feed_data_elements: sub list of ['xyz_1norm','xyz_midnorm','nxnynz','color_1norm','intensity_1norm']
feed_label_elements: sub list of ['label_category','label_instance','label_material']
'''
# input normalized h5f files
# normed_h5f['data']: [blocks*block_num_point*num_channel],like [1000*4096*9]
# one batch would contain sevel(batch_size) blocks,this will be set out side
# provider with train_start_idx and test_start_idx
def __init__(self,dataset_name,all_filename_glob,eval_fnglob_or_rate,\
only_evaluate,num_point_block=None,feed_data_elements=['xyz_midnorm'],feed_label_elements=['label_category'],\
train_num_block_rate=1,eval_num_block_rate=1 ):
self.dataset_name = dataset_name
self.feed_data_elements = feed_data_elements
self.feed_label_elements = feed_label_elements
self.num_point_block = num_point_block
all_file_list = self.get_all_file_name_list(dataset_name,all_filename_glob)
train_file_list,eval_file_list = self.split_train_eval_file_list\
(all_file_list,eval_fnglob_or_rate)
if only_evaluate:
open_type = 'a' # need to write pred labels
else:
open_type = 'r'
self.train_file_N = train_file_N = len(train_file_list)
eval_file_N = len(eval_file_list)
self.g_file_N = train_file_N + eval_file_N
self.normed_h5f_file_list = normed_h5f_file_list = train_file_list + eval_file_list
#-----------------------------------------------------------------------
# open each file as a Normed_H5f class instance
self.norm_h5f_L = []
# self.g_block_idxs: within the whole train/test dataset (several files)
# record the start/end row idx of each file to help search data from all files
# [ [start_global_row_idxs,end_global__idxs] ]
# [[ 0, 38], [ 38, 90],[ 90, 150],...[259, 303],[303, 361],[361, 387]]
# self.train_num_blocks: 303
# self.eval_num_blocks: 84
# self.eval_global_start_idx: 303
self.g_block_idxs = np.zeros((self.g_file_N,2),np.int32)
self.eval_global_start_idx = None
for i,fn in enumerate(normed_h5f_file_list):
assert(os.path.exists(fn))
h5f = h5py.File(fn,open_type)
norm_h5f = Normed_H5f(h5f,fn)
self.norm_h5f_L.append( norm_h5f )
self.g_block_idxs[i,1] = self.g_block_idxs[i,0] + norm_h5f.data_set.shape[0]
if i<self.g_file_N-1:
self.g_block_idxs[i+1,0] = self.g_block_idxs[i,1]
self.eval_global_start_idx = self.g_block_idxs[train_file_N,0]
if train_file_N > 0:
self.train_num_blocks = self.g_block_idxs[train_file_N-1,1] # = self.eval_global_start_idx
else: self.train_num_blocks = 0
self.eval_num_blocks = self.g_block_idxs[-1,1] - self.train_num_blocks
self.num_classes = self.norm_h5f_L[0].num_classes
self.label_ele_idxs = self.norm_h5f_L[0].label_ele_idxs
self.label_eles = self.norm_h5f_L[0].label_set_elements
self.update_sample_loss_weight()
self.update_train_eval_shuffled_idx()
#-----------------------------------------------------------------------
# use only part of the data to test code:
if train_num_block_rate!=1 or eval_num_block_rate!=1:
self.get_data_label_shape()
print('whole train data shape: %s'%(str(self.train_data_shape)))
print('whole eval data shape: %s'%(str(self.eval_data_shape)))
# train: use the front part
self.train_num_blocks = int( self.train_num_blocks * train_num_block_rate )
if not only_evaluate:
self.train_num_blocks = max(self.train_num_blocks,2)
new_eval_num_blocks = int( max(2,self.eval_num_blocks * eval_num_block_rate) )
# eval:use the back part, so train_file_list and eval_file_list can be
# the same
self.eval_global_start_idx += self.eval_num_blocks - new_eval_num_blocks
self.eval_num_blocks = new_eval_num_blocks
self.get_data_label_shape()
self.update_data_summary()
#self.test_tmp()
def update_data_summary(self):
self.data_summary_str = '%s \nfeed_data_elements:%s \nfeed_label_elements:%s \n'%(self.dataset_name,self.feed_data_elements,self.feed_label_elements)
self.data_summary_str += 'train data shape: %s \ntest data shape: %s \n'%(
str(self.train_data_shape),str(self.eval_data_shape))
# self.data_summary_str += 'train labels histogram: %s \n'%( np.array_str(np.transpose(self.train_labels_hist_1norm) ))
# self.data_summary_str += 'test labels histogram: %s \n'%( np.array_str(np.transpose(self.test_labels_hist_1norm) ))
self.data_summary_str += 'labels histogram: %s \n'%( np.array_str(np.transpose(self.labels_hist_1norm[:,0]) ))
#print(self.data_summary_str)
def get_all_file_name_list(self,dataset_name,all_filename_globs):
all_file_list = []
fn_globs = []
for all_filename_glob in all_filename_globs:
fn_glob = os.path.join(DATASET_DIR[dataset_name],all_filename_glob+'*.nh5')
all_file_list += glob.glob( fn_glob )
fn_globs.append(fn_glob)
if len(all_file_list)== 0:
print('no file in:')
print(fn_globs)
return all_file_list
def split_train_eval_file_list(self,all_file_list,eval_fnglob_or_rate=None):
if eval_fnglob_or_rate == None:
if self.dataset_name=='stanford_indoor3d':
eval_fnglob_or_rate = 'Area_6'
if self.dataset_name=='scannet':
eval_fnglob_or_rate = 0.2
if type(eval_fnglob_or_rate)==str:
# split by name
train_file_list = []
eval_file_list = []
for fn in all_file_list:
if fn.find(eval_fnglob_or_rate) > 0:
eval_file_list.append(fn)
else:
train_file_list.append(fn)
elif type(eval_fnglob_or_rate) == float:
# split by number
n = len(all_file_list)
m = int(n*(1-eval_fnglob_or_rate))
train_file_list = all_file_list[0:m]
eval_file_list = all_file_list[m:n]
log_str = '\ntrain file list (n=%d) = \n%s\n\n'%(len(train_file_list),train_file_list[-2:])
log_str += 'eval file list (n=%d) = \n%s\n\n'%(len(eval_file_list),eval_file_list[-2:])
print( log_str )
return train_file_list,eval_file_list
def get_data_label_shape(self):
data_batches,label_batches,_ = self.get_train_batch(0,1)
self.train_data_shape = list(data_batches.shape)
self.train_data_shape[0] = self.train_num_blocks
self.num_channels = self.train_data_shape[2]
self.eval_data_shape = list(data_batches.shape)
self.eval_data_shape[0] = self.eval_num_blocks
self.num_label_eles = label_batches.shape[2]
def test_tmp(self):
s = 0
e = 1
train_data,train_label = self.get_train_batch(s,e)
eval_data,eval_label = self.get_eval_batch(s,e)
print('train:\n',train_data[0,0,:])
print('eval:\n',eval_data[0,0,:])
print('err=\n',train_data[0,0,:]-eval_data[0,0,:])
def __exit__(self):
print('exit Net_Provider')
for norm_h5f in self.norm_h5f:
norm_h5f.h5f.close()
def global_idx_to_local(self,g_start_idx,g_end_idx):
assert(g_start_idx>=0 and g_start_idx<=self.g_block_idxs[-1,1])
assert(g_end_idx>=0 and g_end_idx<=self.g_block_idxs[-1,1])
for i in range(self.g_file_N):
if g_start_idx >= self.g_block_idxs[i,0] and g_start_idx < self.g_block_idxs[i,1]:
start_file_idx = i
local_start_idx = g_start_idx - self.g_block_idxs[i,0]
for j in range(i,self.g_file_N):
if g_end_idx > self.g_block_idxs[j,0] and g_end_idx <= self.g_block_idxs[j,1]:
end_file_idx = j
local_end_idx = g_end_idx - self.g_block_idxs[j,0]
return start_file_idx,end_file_idx,local_start_idx,local_end_idx
def set_pred_label_batch(self,pred_label,g_start_idx,g_end_idx):
start_file_idx,end_file_idx,local_start_idx,local_end_idx = \
self.global_idx_to_local(g_start_idx,g_end_idx)
pred_start_idx = 0
for f_idx in range(start_file_idx,end_file_idx+1):
if f_idx == start_file_idx:
start = local_start_idx
else:
start = 0
if f_idx == end_file_idx:
end = local_end_idx
else:
end = self.norm_h5f_L[f_idx].label_set.shape[0]
n = end-start
self.norm_h5f_L[f_idx].set_dset_value('pred_label',\
pred_label[pred_start_idx:pred_start_idx+n,:],start,end)
pred_start_idx += n
self.norm_h5f_L[f_idx].h5f.flush()
def get_global_batch(self,g_start_idx,g_end_idx):
start_file_idx,end_file_idx,local_start_idx,local_end_idx = \
self.global_idx_to_local(g_start_idx,g_end_idx)
#t0 = time.time()
data_ls = []
label_ls = []
center_mask = []
for f_idx in range(start_file_idx,end_file_idx+1):
if f_idx == start_file_idx:
start = local_start_idx
else:
start = 0
if f_idx == end_file_idx:
end = local_end_idx
else:
end = self.norm_h5f_L[f_idx].labels_set.shape[0]
data_i,feed_data_elements_idxs = self.norm_h5f_L[f_idx].get_normed_data(start,end,self.feed_data_elements)
label_i = self.norm_h5f_L[f_idx].get_label_eles(start,end,self.feed_label_elements)
data_ls.append(data_i)
label_ls.append(label_i)
if 'xyz_midnorm' in self.feed_data_elements:
xyz_midnorm_i = data_i[:,:,feed_data_elements_idxs['xyz_midnorm']]
else:
xyz_midnorm_i,_ = self.norm_h5f_L[f_idx].get_normed_data(start,end,['xyz_midnorm'])
center_mask_i = self.get_center_mask(xyz_midnorm_i)
center_mask.append(center_mask_i)
data_batches = np.concatenate(data_ls,0)
label_batches = np.concatenate(label_ls,0)
center_mask = np.concatenate(center_mask,0)
data_batches,label_batches = self.sample(data_batches,label_batches,self.num_point_block)
num_label_eles = self.labels_weights.shape[1]
center_mask = np.expand_dims(center_mask,axis=-1)
center_mask = np.tile(center_mask,(1,1,num_label_eles))
sample_weights = []
for k in range(num_label_eles):
sample_weights_k = np.take(self.labels_weights[:,k],label_batches[:,:,k])
sample_weights.append( np.expand_dims(sample_weights_k,axis=-1) )
sample_weights = np.concatenate(sample_weights,axis=-1)
sample_weights *= center_mask
# print('\nin global')
# print('file_start = ',start_file_idx)
# print('file_end = ',end_file_idx)
# print('local_start = ',local_start_idx)
# print('local end = ',local_end_idx)
# #print('data = \n',data_batches[0,:])
#t_block = (time.time()-t0)/(g_end_idx-g_start_idx)
#print('get_global_batch t_block:%f ms'%(1000.0*t_block))
return data_batches,label_batches,sample_weights
def get_center_mask(self,xyz_midnorm,edge_rate=0.12):
# true for center, false for edge
# edge_rate: distance to center of the block_step
block_step = self.norm_h5f_L[0].h5f.attrs['block_step']
center_rate = np.abs(xyz_midnorm / block_step) # -0.5 ~ 0.5
center_mask = (center_rate[:,:,0] < (0.5-edge_rate)) * ( center_rate[:,:,0] < (0.5-edge_rate) )
#print('center n rate= %f'%(np.sum(center_mask).astype(float)/xyz_midnorm.shape[0]/xyz_midnorm.shape[1]))
return center_mask
def get_shuffled_global_batch(self,g_shuffled_idx_ls):
data_batches = []
label_batches = []
sample_weights = []
for idx in g_shuffled_idx_ls:
data_i,label_i,smw_i = self.get_global_batch(idx,idx+1)
data_batches.append(data_i)
label_batches.append(label_i)
sample_weights.append(smw_i)
data_batches = np.concatenate(data_batches,axis=0)
label_batches = np.concatenate(label_batches,axis=0)
sample_weights = np.concatenate(sample_weights,axis=0)
return data_batches,label_batches,sample_weights
def update_train_eval_shuffled_idx(self):
self.train_shuffled_idx = np.arange(self.train_num_blocks)
np.random.shuffle(self.train_shuffled_idx)
self.eval_shuffled_idx = np.arange(self.eval_num_blocks)
np.random.shuffle(self.eval_shuffled_idx)
def get_train_batch(self,train_start_batch_idx,train_end_batch_idx):
assert(train_start_batch_idx>=0 and train_start_batch_idx<=self.train_num_blocks)
assert(train_end_batch_idx>=0 and train_end_batch_idx<=self.train_num_blocks)
# all train files are before eval files
IsShuffleIdx = False
if IsShuffleIdx:
g_shuffled_batch_idx = self.train_shuffled_idx[range(train_start_batch_idx,train_end_batch_idx)]
return self.get_shuffled_global_batch(g_shuffled_batch_idx)
else:
return self.get_global_batch(train_start_batch_idx,train_end_batch_idx)
def get_eval_batch(self,eval_start_batch_idx,eval_end_batch_idx):
assert(eval_start_batch_idx>=0 and eval_start_batch_idx<=self.eval_num_blocks)
assert(eval_end_batch_idx>=0 and eval_end_batch_idx<=self.eval_num_blocks)
eval_start_batch_idx += self.eval_global_start_idx
eval_end_batch_idx += self.eval_global_start_idx
IsShuffleIdx = False
if IsShuffleIdx:
g_shuffled_batch_idx = self.eval_shuffled_idx[range(eval_start_batch_idx,eval_end_batch_idx)]
return self.get_shuffled_global_batch(g_shuffled_batch_idx)
else:
return self.get_global_batch(eval_start_batch_idx,eval_end_batch_idx)
def gen_gt_pred_objs(self,visu_fn_glob='The glob for file to be visualized',obj_dump_dir=None):
for k,norm_h5f in enumerate(self.norm_h5f_L):
if norm_h5f.file_name.find(visu_fn_glob) > 0:
norm_h5f.gen_gt_pred_obj( obj_dump_dir )
def sample(self,data_batches,label_batches,num_point_block):
NUM_POINT_IN = data_batches.shape[1]
if num_point_block == None:
num_point_block = NUM_POINT_IN
if NUM_POINT_IN != num_point_block:
sample_choice = GLOBAL_PARA.sample(NUM_POINT_IN,num_point_block,'random')
data_batches = data_batches[:,sample_choice,...]
label_batches = label_batches[:,sample_choice]
return data_batches,label_batches
def update_sample_loss_weight(self):
# amount is larger, the loss weight is smaller
# get all the labels
train_labels_hist_1norm = []
test_labels_hist_1norm = []
labels_hist_1norm = []
labels_weights = []
for label_name in self.norm_h5f_L[0].label_set_elements:
if label_name in self.feed_label_elements:
label_hist = np.zeros(self.num_classes).astype(np.int64)
train_label_hist = np.zeros(self.num_classes).astype(np.int64)
test_label_hist = np.zeros(self.num_classes).astype(np.int64)
for k,norme_h5f in enumerate(self.norm_h5f_L):
label_hist_k = norme_h5f.labels_set.attrs[label_name+'_hist']
label_hist += label_hist_k
if k < self.train_file_N:
train_label_hist += label_hist_k
else:
test_label_hist += label_hist_k
train_labels_hist_1norm.append( np.expand_dims( train_label_hist / np.sum(train_label_hist).astype(float),axis=-1) )
test_labels_hist_1norm.append( np.expand_dims(test_label_hist / np.sum(test_label_hist).astype(float),axis=-1) )
cur_labels_hist_1norm = np.expand_dims(label_hist / | np.sum(label_hist) | numpy.sum |
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 7 13:49:32 2016
@author: pchambers
"""
import airconics.AirCONICStools as act
import numpy as np
from OCC.Core.gp import gp_Pnt, gp_Dir, gp_Vec
import pytest
def test_coslin():
abscissa = act.coslin(0.5, 8, 8)[0]
ans = np.array([0.0,
0.01253604390908819,
0.04951556604879043,
0.1090842587659851,
0.1882550990706332,
0.2830581304412209,
0.3887395330218428,
0.49999999999999994,
0.5625,
0.625,
0.6875,
0.75,
0.8125,
0.875,
0.9375,
1.0])
assert(np.all(np.abs(abscissa - ans) < 1e-10))
def test_Objects_Extents():
box = act.BRepPrimAPI_MakeBox(1, 1, 1).Shape()
X = np.array(act.ObjectsExtents(box))
expected = | np.array([0, 0, 0, 1, 1, 1]) | numpy.array |
"""
"""
import os
import random
import argparse
import math
from copy import deepcopy
from functools import reduce
import logging
from typing import Union, Optional, Any, List, Tuple, Dict, NoReturn
from numbers import Real
import numpy as np
import pandas as pd
from scipy.io import loadmat, savemat
import multiprocessing as mp
from easydict import EasyDict as ED
from utils import CPSC_STATS, get_optimal_covering
from cfg import BaseCfg, PreprocCfg, FeatureCfg
from signal_processing.ecg_preproc import parallel_preprocess_signal
from signal_processing.ecg_features import compute_ecg_features
__all__ = [
"CPSC2020Reader",
]
class CPSC2020Reader(object):
"""
The 3rd China Physiological Signal Challenge 2020:
Searching for Premature Ventricular Contraction (PVC) and Supraventricular Premature Beat (SPB) from Long-term ECGs
ABOUT CPSC2019:
---------------
1. training data consists of 10 single-lead ECG recordings collected from arrhythmia patients, each of the recording last for about 24 hours
2. data and annotations are stored in v5 .mat files
3. A02, A03, A08 are patient with atrial fibrillation
4. sampling frequency = 400 Hz
5. Detailed information:
-------------------------------------------------------------------------
rec ?AF Length(h) # N beats # V beats # S beats # Total beats
A01 No 25.89 109,062 0 24 109,086
A02 Yes 22.83 98,936 4,554 0 103,490
A03 Yes 24.70 137,249 382 0 137,631
A04 No 24.51 77,812 19,024 3,466 100,302
A05 No 23.57 94,614 1 25 94,640
A06 No 24.59 77,621 0 6 77,627
A07 No 23.11 73,325 15,150 3,481 91,956
A08 Yes 25.46 115,518 2,793 0 118,311
A09 No 25.84 88,229 2 1,462 89,693
A10 No 23.64 72,821 169 9,071 82,061
6. challenging factors for accurate detection of SPB and PVC:
amplitude variation; morphological variation; noise
NOTE:
-----
1. the records can roughly be classified into 4 groups:
N: A01, A03, A05, A06
V: A02, A08
S: A09, A10
VS: A04, A07
2. as premature beats and atrial fibrillation can co-exists
(via the following code, and data from CINC2020),
the situation becomes more complicated.
>>> from utils.scoring_aux_data import dx_cooccurrence_all
>>> dx_cooccurrence_all.loc["AF", ["PAC","PVC","SVPB","VPB"]]
... PAC 20
... PVC 19
... SVPB 4
... VPB 20
... Name: AF, dtype: int64
this could also be seen from this dataset, via the following code as an example:
>>> from data_reader import CPSC2020Reader as CR
>>> db_dir = '/media/cfs/wenhao71/data/CPSC2020/TrainingSet/'
>>> dr = CR(db_dir)
>>> rec = dr.all_records[1]
>>> dr.plot(rec, sampfrom=0, sampto=4000, ticks_granularity=2)
ISSUES:
-------
1. currently, using `xqrs` as qrs detector,
a lot more (more than 1000) rpeaks would be detected for A02, A07, A08,
which might be caused by motion artefacts (or AF?);
a lot less (more than 1000) rpeaks would be detected for A04.
numeric details are as follows:
----------------------------------------------
rec ?AF # beats by xqrs # Total beats
A01 No 109502 109,086
A02 Yes 119562 103,490
A03 Yes 135912 137,631
A04 No 92746 100,302
A05 No 94674 94,640
A06 No 77955 77,627
A07 No 98390 91,956
A08 Yes 126908 118,311
A09 No 89972 89,693
A10 No 83509 82,061
2. A04 has duplicate 'PVC_indices' (13534856,27147621,35141190 all appear twice):
before correction of `load_ann`:
>>> from collections import Counter
>>> db_dir = "/mnt/wenhao71/data/CPSC2020/TrainingSet/"
>>> data_gen = CPSC2020Reader(db_dir=db_dir,working_dir=db_dir)
>>> rec = 4
>>> ann = data_gen.load_ann(rec)
>>> Counter(ann['PVC_indices']).most_common()[:4]
would produce [(13534856, 2), (27147621, 2), (35141190, 2), (848, 1)]
3. when extracting morphological features using augmented rpeaks for A04,
`RuntimeWarning: invalid value encountered in double_scalars` would raise
for `R_value = (R_value - y_min) / (y_max - y_min)` and
for `y_values[n] = (y_values[n] - y_min) / (y_max - y_min)`.
this is caused by the 13882273-th sample, which is contained in 'PVC_indices',
however, whether it is a PVC beat, or just motion artefact, is in doubt!
TODO:
-----
1. use SNR to filter out too noisy segments?
2. for ML, consider more features
Usage:
------
1. ecg arrhythmia (PVC, SPB) detection
References:
-----------
[1] http://www.icbeb.org/CPSC2020.html
[2] https://github.com/PIA-Group/BioSPPy
"""
def __init__(self, db_dir:str, working_dir:Optional[str]=None, verbose:int=1, **kwargs):
""" finished, to be improved,
Parameters:
-----------
db_dir: str,
directory where the database is stored
working_dir: str, optional,
working directory, to store intermediate files and log file
verbose: int, default 2,
"""
self.db_dir = db_dir
self.working_dir = working_dir or os.getcwd()
self.verbose = verbose
self.fs = 400
self.spacing = 1000/self.fs
self.rec_ext = '.mat'
self.ann_ext = '.mat'
self.nb_records = 10
self.all_records = ["A{0:02d}".format(i) for i in range(1,1+self.nb_records)]
self.all_annotations = ["R{0:02d}".format(i) for i in range(1,1+self.nb_records)]
self.all_references = self.all_annotations
self.rec_dir = os.path.join(self.db_dir, "data")
self.ann_dir = os.path.join(self.db_dir, "ref")
self.data_dir = self.rec_dir
self.ref_dir = self.ann_dir
self.subgroups = ED({
"N": ["A01", "A03", "A05", "A06",],
"V": ["A02", "A08"],
"S": ["A09", "A10"],
"VS": ["A04", "A07"],
})
self.df_stats = CPSC_STATS
self.palette = {"spb": "green", "pvc": "red",}
# a dict mapping the string annotations ('N', 'S', 'V') to digits (0, 1, 2)
self.class_map = kwargs.get("class_map", BaseCfg.class_map)
# NOTE:
# the ordering of `self.allowed_preproc` and `self.allowed_features`
# should be in accordance with
# corresponding items in `PreprocCfg` and `FeatureCfg`
self.allowed_preproc = ['baseline', 'bandpass',]
self.preprocess_dir = os.path.join(self.db_dir, "preprocessed")
os.makedirs(self.preprocess_dir, exist_ok=True)
self.rpeaks_dir = os.path.join(self.db_dir, "rpeaks")
os.makedirs(self.rpeaks_dir, exist_ok=True)
self.allowed_features = ['wavelet', 'rr', 'morph',]
self.feature_dir = os.path.join(self.db_dir, "features")
os.makedirs(self.feature_dir, exist_ok=True)
self.beat_ann_dir = os.path.join(self.db_dir, "beat_ann")
os.makedirs(self.beat_ann_dir, exist_ok=True)
# TODO: add logger
def load_data(self, rec:Union[int,str], units:str='mV', sampfrom:Optional[int]=None, sampto:Optional[int]=None, keep_dim:bool=True, preproc:Optional[List[str]]=None, **kwargs) -> np.ndarray:
""" finished, checked,
Parameters:
-----------
rec: int or str,
number of the record, NOTE that rec_no starts from 1,
or the record name
units: str, default 'mV',
units of the output signal, can also be 'μV', with an alias of 'uV'
sampfrom: int, optional,
start index of the data to be loaded
sampto: int, optional,
end index of the data to be loaded
keep_dim: bool, default True,
whether or not to flatten the data of shape (n,1)
preproc: list of str,
type of preprocesses performed to the original raw data,
should be sublist of `self.allowed_preproc`,
if empty, the original raw data will be loaded
Returns:
--------
data: ndarray,
the ecg data
"""
preproc = self._normalize_preprocess_names(preproc, False)
rec_name = self._get_rec_name(rec)
if preproc:
rec_name = f"{rec_name}-{self._get_rec_suffix(preproc)}"
rec_fp = os.path.join(self.preprocess_dir, f"{rec_name}{self.rec_ext}")
else:
rec_fp = os.path.join(self.data_dir, f"{rec_name}{self.rec_ext}")
data = loadmat(rec_fp)['ecg']
if units.lower() in ['uv', 'μv']:
data = (1000 * data).astype(int)
sf, st = (sampfrom or 0), (sampto or len(data))
data = data[sf:st]
if not keep_dim:
data = data.flatten()
return data
def preprocess_data(self, rec:Union[int,str], preproc:List[str]) -> NoReturn:
""" finished, checked,
preprocesses the ecg data in advance for further use
Parameters:
-----------
rec: int or str,
number of the record, NOTE that rec_no starts from 1,
or the record name
preproc: list of str,
type of preprocesses to perform,
should be sublist of `self.allowed_preproc`
"""
preproc = self._normalize_preprocess_names(preproc, True)
rec_name = self._get_rec_name(rec)
save_fp = ED()
save_fp.data = os.path.join(self.preprocess_dir, f"{rec_name}-{self._get_rec_suffix(preproc)}{self.rec_ext}")
save_fp.rpeaks = os.path.join(self.rpeaks_dir, f"{rec_name}-{self._get_rec_suffix(preproc)}{self.rec_ext}")
config = deepcopy(PreprocCfg)
config.preproc = preproc
pps = parallel_preprocess_signal(self.load_data(rec, keep_dim=False), fs=self.fs, config=config)
pps['rpeaks'] = pps['rpeaks'][np.where( (pps['rpeaks']>=config.beat_winL) & (pps['rpeaks']<len(pps['filtered_ecg'])-config.beat_winR) )[0]]
# save mat, keep in accordance with original mat files
savemat(save_fp.data, {'ecg': np.atleast_2d(pps['filtered_ecg']).T}, format='5')
savemat(save_fp.rpeaks, {'rpeaks': np.atleast_2d(pps['rpeaks']).T}, format='5')
def compute_features(self, rec:Union[int,str], features:List[str], preproc:List[str], augment:bool=True, save:bool=True) -> np.ndarray:
""" finished, checked,
Parameters:
-----------
rec: int or str,
number of the record, NOTE that rec_no starts from 1,
or the record name
features: list of str,
list of feature types to compute,
should be sublist of `self.allowd_features`
preproc: list of str,
type of preprocesses to perform, should be sublist of `self.allowed_preproc`
augment: bool, default False,
rpeaks used for extracting features is augmented using the annotations or not
save: bool, default True,
whether or not save the features to the working directory
Returns:
--------
feature_mat: ndarray,
the computed features, of shape (m,n), where
m = the number of beats (the number of rpeaks)
n = the dimension of the features
NOTE: for deep learning models, this function is not necessary
"""
features = self._normalize_feature_names(features, True)
preproc = self._normalize_preprocess_names(preproc, True)
rec_name = self._get_rec_name(rec)
rec_name = f"{rec_name}-{self._get_rec_suffix(preproc+features)}"
if augment:
rec_name = rec_name + "-augment"
try:
print("try loading precomputed filtered signal and precomputed rpeaks...")
data = self.load_data(rec, preproc=preproc, keep_dim=False)
rpeaks = self.load_rpeaks(rec, preproc=preproc, augment=augment, keep_dim=False)
print("precomputed filtered signal and precomputed rpeaks loaded successfully")
except:
print("no precomputed data exist")
self.preprocess_data(rec, preproc=preproc)
data = self.load_data(rec, preproc=preproc, keep_dim=False)
rpeaks = self.load_rpeaks(rec, preproc=preproc, augment=augment, keep_dim=False)
config = deepcopy(FeatureCfg)
config.features = features
feature_mat = compute_ecg_features(data, rpeaks, config=config)
if save:
save_fp = os.path.join(self.feature_dir, f"{rec_name}{self.rec_ext}")
savemat(save_fp, {'features': feature_mat}, format='5')
return feature_mat
def load_rpeaks(self, rec:Union[int,str], sampfrom:Optional[int]=None, sampto:Optional[int]=None, keep_dim:bool=True, preproc:Optional[List[str]]=None, augment:bool=False) -> np.ndarray:
""" finished, checked,
Parameters:
-----------
rec: int or str,
number of the record, NOTE that rec_no starts from 1,
or the record name
sampfrom: int, optional,
start index of the data to be loaded
sampto: int, optional,
end index of the data to be loaded
keep_dim: bool, default True,
whether or not to flatten the data of shape (n,1)
preproc: list of str, optional
preprocesses performed when detecting the rpeaks,
should be sublist of `self.allowed_preproc`
augment: bool, default False,
rpeaks detected by algorithm is augmented using the annotations or not
Returns:
--------
rpeaks: ndarray,
the indices of rpeaks
"""
preproc = self._normalize_preprocess_names(preproc, True)
rec_name = self._get_rec_name(rec)
rec_name = f"{rec_name}-{self._get_rec_suffix(preproc)}"
if augment:
rec_name = rec_name + "-augment"
rpeaks_fp = os.path.join(self.beat_ann_dir, f"{rec_name}{self.rec_ext}")
else:
rpeaks_fp = os.path.join(self.rpeaks_dir, f"{rec_name}{self.rec_ext}")
rpeaks = loadmat(rpeaks_fp)['rpeaks'].flatten().astype(int)
sf, st = (sampfrom or 0), (sampto or np.inf)
rpeaks = rpeaks[np.where( (rpeaks>=sf) & (rpeaks<st) )[0]]
if keep_dim:
rpeaks = np.atleast_2d(rpeaks).T
return rpeaks
def load_features(self, rec:Union[int,str], features:List[str], preproc:Optional[List[str]], augment:bool=True, force_recompute:bool=False) -> np.ndarray:
""" finished, checked,
Parameters:
-----------
rec: int or str,
number of the record, NOTE that rec_no starts from 1,
or the record name
features: list of str,
list of feature types computed,
should be sublist of `self.allowd_features`
preproc: list of str,
type of preprocesses performed before extracting features,
should be sublist of `self.allowed_preproc`
augment: bool, default True,
rpeaks used in extracting features is augmented using the annotations or not
force_recompute: bool, default False,
force recompute, regardless of the existing precomputed feature files
Returns:
--------
feature_mat: ndarray,
the computed features, of shape (m,n), where
m = the number of beats (the number of rpeaks)
n = the dimension of the features
NOTE: for deep learning models, this function is not necessary
"""
features = self._normalize_feature_names(features, True)
preproc = self._normalize_preprocess_names(preproc, True)
rec_name = self._get_rec_name(rec)
rec_name = f"{rec_name}-{self._get_rec_suffix(preproc+features)}"
if augment:
rec_name = rec_name + "-augment"
feature_fp = os.path.join(self.feature_dir, f"{rec_name}{self.rec_ext}")
if os.path.isfile(feature_fp) and not force_recompute:
print("try loading precomputed features...")
feature_mat = loadmat(feature_fp)['features']
print("precomputed features loaded successfully")
else:
print("recompute features")
feature_mat = self.compute_features(
rec, features, preproc, augment, save=True
)
return feature_mat
def load_ann(self, rec:Union[int,str], sampfrom:Optional[int]=None, sampto:Optional[int]=None) -> Dict[str, np.ndarray]:
""" finished, checked,
Parameters:
-----------
rec: int or str,
number of the record, NOTE that rec_no starts from 1,
or the record name
sampfrom: int, optional,
start index of the data to be loaded
sampto: int, optional,
end index of the data to be loaded
Returns:
--------
ann: dict,
with items (ndarray) "SPB_indices" and "PVC_indices",
which record the indices of SPBs and PVCs
"""
ann_name = self._get_ann_name(rec)
ann_fp = os.path.join(self.ann_dir, ann_name + self.ann_ext)
ann = loadmat(ann_fp)['ref']
sf, st = (sampfrom or 0), (sampto or np.inf)
spb_indices = ann['S_ref'][0,0].flatten().astype(int)
# drop duplicates
spb_indices = np.array(sorted(list(set(spb_indices))), dtype=int)
spb_indices = spb_indices[np.where( (spb_indices>=sf) & (spb_indices<st) )[0]]
pvc_indices = ann['V_ref'][0,0].flatten().astype(int)
# drop duplicates
pvc_indices = np.array(sorted(list(set(pvc_indices))), dtype=int)
pvc_indices = pvc_indices[np.where( (pvc_indices>=sf) & (pvc_indices<st) )[0]]
ann = {
"SPB_indices": spb_indices,
"PVC_indices": pvc_indices,
}
return ann
def load_beat_ann(self, rec:Union[int,str], sampfrom:Optional[int]=None, sampto:Optional[int]=None, preproc:Optional[List[str]]=None, augment:bool=True, return_aux_data:bool=False, force_recompute:bool=False) -> Union[np.ndarray, Dict[str,np.ndarray]]:
""" finished, checked,
Parameters:
-----------
rec: int or str,
number of the record, NOTE that rec_no starts from 1,
or the record name
sampfrom: int, optional,
start index of the data to be loaded
sampto: int, optional,
end index of the data to be loaded
preproc: list of str,
type of preprocesses performed before detecting rpeaks,
should be sublist of `self.allowed_preproc`
augment: bool, default True,
rpeaks detected by algorithm is augmented using the annotations or not
return_aux_data: bool, default False,
whether or not return auxiliary data, including
- the augmented rpeaks
- the beat_ann mapped to int annotations via `self.class_map`
force_recompute: bool, default False,
force recompute, regardless of the existing precomputed feature files
Returns:
--------
beat_ann: ndarray, or dict,
annotation (one of 'N', 'S', 'V') for each beat,
or together with auxiliary data as a dict
"""
preproc = self._normalize_preprocess_names(preproc, True)
rec_name = f"{self._get_rec_name(rec)}-{self._get_rec_suffix(preproc)}"
if augment:
rec_name = rec_name + "-augment"
fp = os.path.join(self.beat_ann_dir, f"{rec_name}{self.ann_ext}")
if not force_recompute and os.path.isfile(fp):
print("try loading precomputed beat_ann...")
beat_ann = loadmat(fp)
for k in beat_ann.keys():
if not k.startswith("__"):
beat_ann[k] = beat_ann[k].flatten()
if not return_aux_data:
beat_ann = beat_ann["beat_ann"]
print("precomputed beat_ann loaded successfully")
else:
print("recompute beat_ann")
rpeaks = self.load_rpeaks(
rec,
sampfrom=sampfrom, sampto=sampto,
keep_dim=False,
preproc=preproc,
augment=False,
)
ann = self.load_ann(rec, sampfrom, sampto)
beat_ann = self._ann_to_beat_ann(
rec=rec,
rpeaks=rpeaks,
ann=ann,
preproc=preproc,
bias_thr=BaseCfg.beat_ann_bias_thr,
augment=augment,
return_aux_data=return_aux_data,
save=True
)
return beat_ann
def _ann_to_beat_ann(self, rec:Union[int,str], rpeaks:np.ndarray, ann:Dict[str, np.ndarray], preproc:List[str], bias_thr:Real, augment:bool=True, return_aux_data:bool=False, save:bool=False) -> Union[np.ndarray, Dict[str,np.ndarray]]:
""" finished, checked,
Parameters:
-----------
rec: int or str,
number of the record, NOTE that rec_no starts from 1,
or the record name
rpeaks: ndarray,
rpeaks for forming beats
ann: dict,
with items (ndarray) "SPB_indices" and "PVC_indices",
which record the indices of SPBs and PVCs
preproc: list of str,
type of preprocesses performed before detecting rpeaks,
should be sublist of `self.allowed_preproc`
bias_thr: real number,
tolerance for using annotations (PVC, SPB indices provided by the dataset),
to label the type of beats given by `rpeaks`
augment: bool, default True,
`rpeaks` is augmented using the annotations or not
return_aux_data: bool, default False,
whether or not return auxiliary data, including
- the augmented rpeaks
- the beat_ann mapped to int annotations via `self.class_map`
save: bool, default False,
save the outcome beat annotations (along with 'augmented' rpeaks) to file or not
Returns:
--------
beat_ann: ndarray, or dict,
annotation (one of 'N', 'S', 'V') for each beat,
or together with auxiliary data as a dict
NOTE:
-----
the 'rpeaks' and 'beat_ann_int' saved in the .mat file is of shape (1,n), rather than (n,)
"""
one_hour = self.fs*3600
split_indices = [0]
for i in range(1, int(rpeaks[-1]+bias_thr)//one_hour):
split_indices.append(len(np.where(rpeaks<i*one_hour)[0])+1)
if len(split_indices) == 1 or split_indices[-1] < len(rpeaks): # tail
split_indices.append(len(rpeaks))
epoch_params = []
for idx in range(len(split_indices)-1):
p = {}
p['rpeaks'] = rpeaks[split_indices[idx]:split_indices[idx+1]]
p['ann'] = {
k: v[np.where( (v>=p['rpeaks'][0]-bias_thr-1) & (v<p['rpeaks'][-1]+bias_thr+1) )[0]] for k, v in ann.items()
}
# if idx == 0:
# p['prev_r'] = -1
# else:
# p['prev_r'] = rpeaks[split_indices[idx]-1]
# if idx == len(split_indices)-2:
# p['next_r'] = np.inf
# else:
# p['next_r'] = rpeaks[split_indices[idx+1]]
epoch_params.append(p)
if augment:
epoch_func = _ann_to_beat_ann_epoch_v3
else:
epoch_func = _ann_to_beat_ann_epoch_v1
cpu_num = max(1, mp.cpu_count()-3)
with mp.Pool(processes=cpu_num) as pool:
result = pool.starmap(
func=epoch_func,
iterable=[
(
item['rpeaks'],
item['ann'],
bias_thr,
# item['prev_r'],
# item['next_r'],
)\
for item in epoch_params
],
)
ann_matched = {
k: np.concatenate([item['ann_matched'][k] for item in result]) \
for k in ann.keys()
}
ann_not_matched = {
k: [a for a in v if a not in ann_matched[k]] for k, v in ann.items()
}
# print(f"rec = {rec}, ann_not_matched = {ann_not_matched}")
beat_ann = np.concatenate([item['beat_ann'] for item in result]).astype('<U1')
augmented_rpeaks = np.concatenate((rpeaks, np.array(ann_not_matched['SPB_indices']), np.array(ann_not_matched['PVC_indices'])))
beat_ann = np.concatenate((beat_ann, np.array(['S' for _ in ann_not_matched['SPB_indices']], dtype='<U1'), np.array(['V' for _ in ann_not_matched['PVC_indices']], dtype='<U1')))
sorted_indices = np.argsort(augmented_rpeaks)
augmented_rpeaks = augmented_rpeaks[sorted_indices].astype(int)
beat_ann = beat_ann[sorted_indices].astype('<U1')
# NOTE: features will only be extracted at 'valid' rpeaks
raw_sig = self.load_data(rec, keep_dim=False, preproc=None)
valid_indices = np.where( (augmented_rpeaks>=BaseCfg.beat_winL) & (augmented_rpeaks<len(raw_sig)-BaseCfg.beat_winR) )[0]
augmented_rpeaks = augmented_rpeaks[valid_indices]
beat_ann = beat_ann[valid_indices]
# list_addition = lambda a,b: a+b
# beat_ann = reduce(list_addition, result)
# beat_ann = ["N" for _ in range(len(rpeaks))]
# for idx, r in enumerate(rpeaks):
# if any([-beat_winL <= r-p < beat_winR for p in ann['SPB_indices']]):
# beat_ann[idx] = 'S'
# elif any([-beat_winL <= r-p < beat_winR for p in ann['PVC_indices']]):
# beat_ann[idx] = 'V'
preproc = self._normalize_preprocess_names(preproc, True)
rec_name = f"{self._get_rec_name(rec)}-{self._get_rec_suffix(preproc)}"
if augment:
rec_name = rec_name + "-augment"
fp = os.path.join(self.beat_ann_dir, f"{rec_name}{self.ann_ext}")
to_save_mdict = {
"rpeaks": augmented_rpeaks.astype(int),
"beat_ann": beat_ann,
"beat_ann_int": np.vectorize(lambda a:self.class_map[a])(beat_ann)
}
savemat(fp, to_save_mdict, format='5')
if return_aux_data:
beat_ann = to_save_mdict
return beat_ann
def _get_ann_name(self, rec:Union[int,str]) -> str:
""" finished, checked,
Parameters:
-----------
rec: int or str,
number of the record, NOTE that rec_no starts from 1,
or the record name
Returns:
--------
ann_name: str,
filename of the annotation file
"""
if isinstance(rec, int):
assert rec in range(1, self.nb_records+1), "rec should be in range(1,{})".format(self.nb_records+1)
ann_name = self.all_annotations[rec-1]
elif isinstance(rec, str):
assert rec in self.all_annotations+self.all_records, "rec should be one of {} or one of {}".format(self.all_records, self.all_annotations)
ann_name = rec.replace("A", "R")
return ann_name
def _get_rec_name(self, rec:Union[int,str]) -> str:
""" finished, checked,
Parameters:
-----------
rec: int or str,
number of the record, NOTE that rec_no starts from 1,
or the record name
Returns:
--------
rec_name: str,
filename of the record
"""
if isinstance(rec, int):
assert rec in range(1, self.nb_records+1), "rec should be in range(1,{})".format(self.nb_records+1)
rec_name = self.all_records[rec-1]
elif isinstance(rec, str):
assert rec in self.all_records, "rec should be one of {}".format(self.all_records)
rec_name = rec
return rec_name
def _get_rec_suffix(self, operations:List[str]) -> str:
""" finished, checked,
Parameters:
-----------
operations: list of str,
names of operations to perform (or has performed),
should be sublist of `self.allowed_preproc` or `self.allowed_features`
Returns:
--------
suffix: str,
suffix of the filename of the preprocessed ecg signal, or the features
"""
suffix = '-'.join(sorted([item.lower() for item in operations]))
return suffix
def _normalize_feature_names(self, features:List[str], ensure_nonempty:bool) -> List[str]:
""" finished, checked,
to transform all features into lower case,
and keep them in a specific ordering
Parameters:
-----------
features: list of str,
list of feature types,
should be sublist of `self.allowd_features`
ensure_nonempty: bool,
if True, when the passed `features` is empty,
`self.allowed_features` will be returned
Returns:
--------
_f: list of str,
'normalized' list of feature types
"""
_f = [item.lower() for item in features] if features else []
if ensure_nonempty:
_f = _f or self.allowed_features
# ensure ordering
_f = [item for item in self.allowed_features if item in _f]
# assert features and all([item in self.allowed_features for item in features])
return _f
def _normalize_preprocess_names(self, preproc:List[str], ensure_nonempty:bool) -> List[str]:
"""
to transform all preproc into lower case,
and keep them in a specific ordering
Parameters:
-----------
preproc: list of str,
list of preprocesses types,
should be sublist of `self.allowd_features`
ensure_nonempty: bool,
if True, when the passed `preproc` is empty,
`self.allowed_preproc` will be returned
Returns:
--------
_p: list of str,
'normalized' list of preprocess types
"""
_p = [item.lower() for item in preproc] if preproc else []
if ensure_nonempty:
_p = _p or self.allowed_preproc
# ensure ordering
_p = [item for item in self.allowed_preproc if item in _p]
# assert all([item in self.allowed_preproc for item in _p])
return _p
def train_test_split_rec(self, test_rec_num:int=2) -> Dict[str, List[str]]:
""" finished, checked,
split the records into train set and test set
Parameters:
-----------
test_rec_num: int,
number of records for the test set
Returns:
--------
split_res: dict,
with items `train`, `test`, both being list of record names
"""
if test_rec_num == 1:
test_records = random.sample(self.subgroups.VS, 1)
elif test_rec_num == 2:
test_records = random.sample(self.subgroups.VS, 1) + random.sample(self.subgroups.N, 1)
elif test_rec_num == 3:
test_records = random.sample(self.subgroups.VS, 1) + random.sample(self.subgroups.N, 2)
elif test_rec_num == 4:
test_records = []
for k in self.subgroups.keys():
test_records += random.sample(self.subgroups[k], 1)
else:
raise ValueError("test data ratio too high")
train_records = [r for r in self.all_records if r not in test_records]
split_res = ED({
"train": train_records,
"test": test_records,
})
return split_res
def train_test_split_data(self, test_rec_num:int, features:List[str], preproc:Optional[List[str]], augment:bool=True, int_labels:bool=True) -> Tuple[np.ndarray,np.ndarray,np.ndarray,np.ndarray,np.ndarray,np.ndarray]:
""" finished, checked,
split the data (and the annotations) into train set and test set
Parameters:
-----------
test_rec_num: int,
number of records for the test set
features: list of str,
list of feature types used for producing the training data,
should be sublist of `self.allowd_features`
preproc: list of str,
list of preprocesses types performed on the raw data,
should be sublist of `self.allowd_preproc`
augment: bool, default True,
features are computed using augmented rpeaks or not
int_labels: bool, default True,
use the 'beat_ann_int', which is mapped into int via `class_map`
Returns:
--------
x_train, y_train, y_indices_train, x_test, y_test, y_indices_test: ndarray,
"""
features = self._normalize_feature_names(features, True)
preproc = self._normalize_preprocess_names(preproc, True)
split_rec = self.train_test_split_rec(test_rec_num)
x = ED({"train": | np.array([],dtype=float) | numpy.array |
from __future__ import division, print_function
import numpy as np
from .core import kcore_bd, kcore_bu
from .distance import reachdist
from bct.utils import invert
def betweenness_bin(G):
'''
Node betweenness centrality is the fraction of all shortest paths in
the network that contain a given node. Nodes with high values of
betweenness centrality participate in a large number of shortest paths.
Parameters
----------
A : NxN np.ndarray
binary directed/undirected connection matrix
BC : Nx1 np.ndarray
node betweenness centrality vector
Notes
-----
Betweenness centrality may be normalised to the range [0,1] as
BC/[(N-1)(N-2)], where N is the number of nodes in the network.
'''
G = np.array(G, dtype=float) # force G to have float type so it can be
# compared to float np.inf
n = len(G) # number of nodes
I = np.eye(n) # identity matrix
d = 1 # path length
NPd = G.copy() # number of paths of length |d|
NSPd = G.copy() # number of shortest paths of length |d|
NSP = G.copy() # number of shortest paths of any length
L = G.copy() # length of shortest paths
NSP[np.where(I)] = 1
L[np.where(I)] = 1
# calculate NSP and L
while np.any(NSPd):
d += 1
NPd = np.dot(NPd, G)
NSPd = NPd * (L == 0)
NSP += NSPd
L = L + d * (NSPd != 0)
L[L == 0] = np.inf # L for disconnected vertices is inf
L[np.where(I)] = 0
NSP[NSP == 0] = 1 # NSP for disconnected vertices is 1
DP = np.zeros((n, n)) # vertex on vertex dependency
diam = d - 1
# calculate DP
for d in range(diam, 1, -1):
DPd1 = np.dot(((L == d) * (1 + DP) / NSP), G.T) * \
((L == (d - 1)) * NSP)
DP += DPd1
return np.sum(DP, axis=0)
def betweenness_wei(G):
'''
Node betweenness centrality is the fraction of all shortest paths in
the network that contain a given node. Nodes with high values of
betweenness centrality participate in a large number of shortest paths.
Parameters
----------
L : NxN np.ndarray
directed/undirected weighted connection matrix
Returns
-------
BC : Nx1 np.ndarray
node betweenness centrality vector
Notes
-----
The input matrix must be a connection-length matrix, typically
obtained via a mapping from weight to length. For instance, in a
weighted correlation network higher correlations are more naturally
interpreted as shorter distances and the input matrix should
consequently be some inverse of the connectivity matrix.
Betweenness centrality may be normalised to the range [0,1] as
BC/[(N-1)(N-2)], where N is the number of nodes in the network.
'''
n = len(G)
BC = np.zeros((n,)) # vertex betweenness
for u in range(n):
D = np.tile(np.inf, (n,))
D[u] = 0 # distance from u
NP = np.zeros((n,))
NP[u] = 1 # number of paths from u
S = np.ones((n,), dtype=bool) # distance permanence
P = np.zeros((n, n)) # predecessors
Q = np.zeros((n,))
q = n - 1 # order of non-increasing distance
G1 = G.copy()
V = [u]
while True:
S[V] = 0 # distance u->V is now permanent
G1[:, V] = 0 # no in-edges as already shortest
for v in V:
Q[q] = v
q -= 1
W, = np.where(G1[v, :]) # neighbors of v
for w in W:
Duw = D[v] + G1[v, w] # path length to be tested
if Duw < D[w]: # if new u->w shorter than old
D[w] = Duw
NP[w] = NP[v] # NP(u->w) = NP of new path
P[w, :] = 0
P[w, v] = 1 # v is the only predecessor
elif Duw == D[w]: # if new u->w equal to old
NP[w] += NP[v] # NP(u->w) sum of old and new
P[w, v] = 1 # v is also predecessor
if D[S].size == 0:
break # all nodes were reached
if np.isinf(np.min(D[S])): # some nodes cannot be reached
Q[:q + 1], = np.where(np.isinf(D)) # these are first in line
break
V, = np.where(D == np.min(D[S]))
DP = np.zeros((n,))
for w in Q[:n - 1]:
BC[w] += DP[w]
for v in np.where(P[w, :])[0]:
DP[v] += (1 + DP[w]) * NP[v] / NP[w]
return BC
def diversity_coef_sign(W, ci):
'''
The Shannon-entropy based diversity coefficient measures the diversity
of intermodular connections of individual nodes and ranges from 0 to 1.
Parameters
----------
W : NxN np.ndarray
undirected connection matrix with positive and negative weights
ci : Nx1 np.ndarray
community affiliation vector
Returns
-------
Hpos : Nx1 np.ndarray
diversity coefficient based on positive connections
Hneg : Nx1 np.ndarray
diversity coefficient based on negative connections
'''
n = len(W) # number of nodes
_, ci = np.unique(ci, return_inverse=True)
ci += 1
m = np.max(ci) # number of modules
def entropy(w_):
S = np.sum(w_, axis=1) # strength
Snm = np.zeros((n, m)) # node-to-module degree
for i in range(m):
Snm[:, i] = np.sum(w_[:, ci == i + 1], axis=1)
pnm = Snm / (np.tile(S, (m, 1)).T)
pnm[np.isnan(pnm)] = 0
pnm[np.logical_not(pnm)] = 1
return -np.sum(pnm * np.log(pnm), axis=1) / np.log(m)
#explicitly ignore compiler warning for division by zero
with np.errstate(invalid='ignore'):
Hpos = entropy(W * (W > 0))
Hneg = entropy(-W * (W < 0))
return Hpos, Hneg
def edge_betweenness_bin(G):
'''
Edge betweenness centrality is the fraction of all shortest paths in
the network that contain a given edge. Edges with high values of
betweenness centrality participate in a large number of shortest paths.
Parameters
----------
A : NxN np.ndarray
binary directed/undirected connection matrix
Returns
-------
EBC : NxN np.ndarray
edge betweenness centrality matrix
BC : Nx1 np.ndarray
node betweenness centrality vector
Notes
-----
Betweenness centrality may be normalised to the range [0,1] as
BC/[(N-1)(N-2)], where N is the number of nodes in the network.
'''
n = len(G)
BC = np.zeros((n,)) # vertex betweenness
EBC = np.zeros((n, n)) # edge betweenness
for u in range(n):
D = np.zeros((n,))
D[u] = 1 # distance from u
NP = np.zeros((n,))
NP[u] = 1 # number of paths from u
P = np.zeros((n, n)) # predecessors
Q = np.zeros((n,))
q = n - 1 # order of non-increasing distance
Gu = G.copy()
V = np.array([u])
while V.size:
Gu[:, V] = 0 # remove remaining in-edges
for v in V:
Q[q] = v
q -= 1
W, = np.where(Gu[v, :]) # neighbors of V
for w in W:
if D[w]:
NP[w] += NP[v] # NP(u->w) sum of old and new
P[w, v] = 1 # v is a predecessor
else:
D[w] = 1
NP[w] = NP[v] # NP(u->v) = NP of new path
P[w, v] = 1 # v is a predecessor
V, = np.where(np.any(Gu[V, :], axis=0))
if np.any(np.logical_not(D)): # if some vertices unreachable
Q[:q], = np.where(np.logical_not(D)) # ...these are first in line
DP = np.zeros((n,)) # dependency
for w in Q[:n - 1]:
BC[w] += DP[w]
for v in np.where(P[w, :])[0]:
DPvw = (1 + DP[w]) * NP[v] / NP[w]
DP[v] += DPvw
EBC[v, w] += DPvw
return EBC, BC
def edge_betweenness_wei(G):
'''
Edge betweenness centrality is the fraction of all shortest paths in
the network that contain a given edge. Edges with high values of
betweenness centrality participate in a large number of shortest paths.
Parameters
----------
L : NxN np.ndarray
directed/undirected weighted connection matrix
Returns
-------
EBC : NxN np.ndarray
edge betweenness centrality matrix
BC : Nx1 np.ndarray
nodal betweenness centrality vector
Notes
-----
The input matrix must be a connection-length matrix, typically
obtained via a mapping from weight to length. For instance, in a
weighted correlation network higher correlations are more naturally
interpreted as shorter distances and the input matrix should
consequently be some inverse of the connectivity matrix.
Betweenness centrality may be normalised to the range [0,1] as
BC/[(N-1)(N-2)], where N is the number of nodes in the network.
'''
n = len(G)
BC = np.zeros((n,)) # vertex betweenness
EBC = np.zeros((n, n)) # edge betweenness
for u in range(n):
D = np.tile(np.inf, n)
D[u] = 0 # distance from u
NP = np.zeros((n,))
NP[u] = 1 # number of paths from u
S = np.ones((n,), dtype=bool) # distance permanence
P = np.zeros((n, n)) # predecessors
Q = np.zeros((n,))
q = n - 1 # order of non-increasing distance
G1 = G.copy()
V = [u]
while True:
S[V] = 0 # distance u->V is now permanent
G1[:, V] = 0 # no in-edges as already shortest
for v in V:
Q[q] = v
q -= 1
W, = np.where(G1[v, :]) # neighbors of v
for w in W:
Duw = D[v] + G1[v, w] # path length to be tested
if Duw < D[w]: # if new u->w shorter than old
D[w] = Duw
NP[w] = NP[v] # NP(u->w) = NP of new path
P[w, :] = 0
P[w, v] = 1 # v is the only predecessor
elif Duw == D[w]: # if new u->w equal to old
NP[w] += NP[v] # NP(u->w) sum of old and new
P[w, v] = 1 # v is also a predecessor
if D[S].size == 0:
break # all nodes reached, or
if np.isinf(np.min(D[S])): # some cannot be reached
Q[:q], = np.where(np.isinf(D)) # these are first in line
break
V, = np.where(D == np.min(D[S]))
DP = np.zeros((n,)) # dependency
for w in Q[:n - 1]:
BC[w] += DP[w]
for v in np.where(P[w, :])[0]:
DPvw = (1 + DP[w]) * NP[v] / NP[w]
DP[v] += DPvw
EBC[v, w] += DPvw
return EBC, BC
def eigenvector_centrality_und(CIJ):
'''
Eigenector centrality is a self-referential measure of centrality:
nodes have high eigenvector centrality if they connect to other nodes
that have high eigenvector centrality. The eigenvector centrality of
node i is equivalent to the ith element in the eigenvector
corresponding to the largest eigenvalue of the adjacency matrix.
Parameters
----------
CIJ : NxN np.ndarray
binary/weighted undirected adjacency matrix
v : Nx1 np.ndarray
eigenvector associated with the largest eigenvalue of the matrix
'''
from scipy import linalg
n = len(CIJ)
vals, vecs = linalg.eig(CIJ)
i = np.argmax(vals)
return np.abs(vecs[:, i])
def erange(CIJ):
'''
Shortcuts are central edges which significantly reduce the
characteristic path length in the network.
Parameters
----------
CIJ : NxN np.ndarray
binary directed connection matrix
Returns
-------
Erange : NxN np.ndarray
range for each edge, i.e. the length of the shortest path from i to j
for edge c(i,j) after the edge has been removed from the graph
eta : float
average range for the entire graph
Eshort : NxN np.ndarray
entries are ones for shortcut edges
fs : float
fractions of shortcuts in the graph
Follows the treatment of 'shortcuts' by <NAME>
'''
N = len(CIJ)
K = np.size(np.where(CIJ)[1])
Erange = np.zeros((N, N))
i, j = np.where(CIJ)
for c in range(len(i)):
CIJcut = CIJ.copy()
CIJcut[i[c], j[c]] = 0
R, D = reachdist(CIJcut)
Erange[i[c], j[c]] = D[i[c], j[c]]
# average range (ignore Inf)
eta = (np.sum(Erange[np.logical_and(Erange > 0, Erange < np.inf)]) /
len(Erange[np.logical_and(Erange > 0, Erange < np.inf)]))
# Original entries of D are ones, thus entries of Erange
# must be two or greater.
# If Erange(i,j) > 2, then the edge is a shortcut.
# 'fshort' is the fraction of shortcuts over the entire graph.
Eshort = Erange > 2
fs = len(np.where(Eshort)) / K
return Erange, eta, Eshort, fs
def flow_coef_bd(CIJ):
'''
Computes the flow coefficient for each node and averaged over the
network, as described in Honey et al. (2007) PNAS. The flow coefficient
is similar to betweenness centrality, but works on a local
neighborhood. It is mathematically related to the clustering
coefficient (cc) at each node as, fc+cc <= 1.
Parameters
----------
CIJ : NxN np.ndarray
binary directed connection matrix
Returns
-------
fc : Nx1 np.ndarray
flow coefficient for each node
FC : float
average flow coefficient over the network
total_flo : int
number of paths that "flow" across the central node
'''
N = len(CIJ)
fc = np.zeros((N,))
total_flo = np.zeros((N,))
max_flo = np.zeros((N,))
# loop over nodes
for v in range(N):
# find neighbors - note: both incoming and outgoing connections
nb, = np.where(CIJ[v, :] + CIJ[:, v].T)
fc[v] = 0
if np.where(nb)[0].size:
CIJflo = -CIJ[np.ix_(nb, nb)]
for i in range(len(nb)):
for j in range(len(nb)):
if CIJ[nb[i], v] and CIJ[v, nb[j]]:
CIJflo[i, j] += 1
total_flo[v] = np.sum(
(CIJflo == 1) * np.logical_not(np.eye(len(nb))))
max_flo[v] = len(nb) * len(nb) - len(nb)
fc[v] = total_flo[v] / max_flo[v]
fc[np.isnan(fc)] = 0
FC = np.mean(fc)
return fc, FC, total_flo
def gateway_coef_sign(W, ci, centrality_type='degree'):
'''
The gateway coefficient is a variant of participation coefficient.
It is weighted by how critical the connections are to intermodular
connectivity (e.g. if a node is the only connection between its
module and another module, it will have a higher gateway coefficient,
unlike participation coefficient).
Parameters
----------
W : NxN np.ndarray
undirected signed connection matrix
ci : Nx1 np.ndarray
community affiliation vector
centrality_type : enum
'degree' - uses the weighted degree (i.e, node strength)
'betweenness' - uses the betweenness centrality
Returns
-------
Gpos : Nx1 np.ndarray
gateway coefficient for positive weights
Gneg : Nx1 np.ndarray
gateway coefficient for negative weights
Reference:
<NAME>, <NAME>, Eur Phys J B (2014) 87:1-10
'''
_, ci = np.unique(ci, return_inverse=True)
ci += 1
n = len(W)
np.fill_diagonal(W, 0)
def gcoef(W):
#strength
s = np.sum(W, axis=1)
#neighbor community affiliation
Gc = np.inner((W != 0), np.diag(ci))
#community specific neighbors
Sc2 = np.zeros((n,))
#extra modular weighting
ksm = np.zeros((n,))
#intra modular wieghting
centm = np.zeros((n,))
if centrality_type == 'degree':
cent = s.copy()
elif centrality_type == 'betweenness':
cent = betweenness_wei(invert(W))
nr_modules = int(np.max(ci))
for i in range(1, nr_modules+1):
ks = np.sum(W * (Gc == i), axis=1)
print(np.sum(ks))
Sc2 += ks ** 2
for j in range(1, nr_modules+1):
#calculate extramodular weights
ksm[ci == j] += ks[ci == j] / np.sum(ks[ci == j])
#calculate intramodular weights
centm[ci == i] = np.sum(cent[ci == i])
#print(Gc)
#print(centm)
#print(ksm)
#print(ks)
centm = centm / max(centm)
#calculate total weights
gs = (1 - ksm * centm) ** 2
Gw = 1 - Sc2 * gs / s ** 2
Gw[np.where(np.isnan(Gw))] = 0
Gw[np.where(np.logical_not(Gw))] = 0
return Gw
G_pos = gcoef(W * (W > 0))
G_neg = gcoef(-W * (W < 0))
return G_pos, G_neg
def kcoreness_centrality_bd(CIJ):
'''
The k-core is the largest subgraph comprising nodes of degree at least
k. The coreness of a node is k if the node belongs to the k-core but
not to the (k+1)-core. This function computes k-coreness of all nodes
for a given binary directed connection matrix.
Parameters
----------
CIJ : NxN np.ndarray
binary directed connection matrix
Returns
-------
coreness : Nx1 np.ndarray
node coreness
kn : int
size of k-core
'''
N = len(CIJ)
coreness = np.zeros((N,))
kn = np.zeros((N,))
for k in range(N):
CIJkcore, kn[k] = kcore_bd(CIJ, k)
ss = np.sum(CIJkcore, axis=0) > 0
coreness[ss] = k
return coreness, kn
def kcoreness_centrality_bu(CIJ):
'''
The k-core is the largest subgraph comprising nodes of degree at least
k. The coreness of a node is k if the node belongs to the k-core but
not to the (k+1)-core. This function computes the coreness of all nodes
for a given binary undirected connection matrix.
Parameters
----------
CIJ : NxN np.ndarray
binary undirected connection matrix
Returns
-------
coreness : Nx1 np.ndarray
node coreness
kn : int
size of k-core
'''
N = len(CIJ)
# determine if the network is undirected -- if not, compute coreness
# on the corresponding undirected network
CIJund = CIJ + CIJ.T
if np.any(CIJund > 1):
CIJ = np.array(CIJund > 0, dtype=float)
coreness = np.zeros((N,))
kn = np.zeros((N,))
for k in range(N):
CIJkcore, kn[k] = kcore_bu(CIJ, k)
ss = np.sum(CIJkcore, axis=0) > 0
coreness[ss] = k
return coreness, kn
def module_degree_zscore(W, ci, flag=0):
'''
The within-module degree z-score is a within-module version of degree
centrality.
Parameters
----------
W : NxN np.narray
binary/weighted directed/undirected connection matrix
ci : Nx1 np.array_like
community affiliation vector
flag : int
Graph type. 0: undirected graph (default)
1: directed graph in degree
2: directed graph out degree
3: directed graph in and out degree
Returns
-------
Z : Nx1 np.ndarray
within-module degree Z-score
'''
_, ci = np.unique(ci, return_inverse=True)
ci += 1
if flag == 2:
W = W.copy()
W = W.T
elif flag == 3:
W = W.copy()
W = W + W.T
n = len(W)
Z = np.zeros((n,)) # number of vertices
for i in range(1, int(np.max(ci) + 1)):
Koi = np.sum(W[np.ix_(ci == i, ci == i)], axis=1)
Z[np.where(ci == i)] = (Koi - np.mean(Koi)) / np.std(Koi)
Z[np.where(np.isnan(Z))] = 0
return Z
def pagerank_centrality(A, d, falff=None):
'''
The PageRank centrality is a variant of eigenvector centrality. This
function computes the PageRank centrality of each vertex in a graph.
Formally, PageRank is defined as the stationary distribution achieved
by instantiating a Markov chain on a graph. The PageRank centrality of
a given vertex, then, is proportional to the number of steps (or amount
of time) spent at that vertex as a result of such a process.
The PageRank index gets modified by the addition of a damping factor,
d. In terms of a Markov chain, the damping factor specifies the
fraction of the time that a random walker will transition to one of its
current state's neighbors. The remaining fraction of the time the
walker is restarted at a random vertex. A common value for the damping
factor is d = 0.85.
Parameters
----------
A : NxN np.narray
adjacency matrix
d : float
damping factor (see description)
falff : Nx1 np.ndarray | None
Initial page rank probability, non-negative values. Default value is
None. If not specified, a naive bayesian prior is used.
Returns
-------
r : Nx1 np.ndarray
vectors of page rankings
Notes
-----
Note: The algorithm will work well for smaller matrices (number of
nodes around 1000 or less)
'''
from scipy import linalg
N = len(A)
if falff is None:
norm_falff = np.ones((N,)) / N
else:
norm_falff = falff / np.sum(falff)
deg = np.sum(A, axis=0)
deg[deg == 0] = 1
D1 = np.diag(1 / deg)
B = np.eye(N) - d * np.dot(A, D1)
b = (1 - d) * norm_falff
r = linalg.solve(B, b)
r /= np.sum(r)
return r
def participation_coef(W, ci, degree='undirected'):
'''
Participation coefficient is a measure of diversity of intermodular
connections of individual nodes.
Parameters
----------
W : NxN np.ndarray
binary/weighted directed/undirected connection matrix
ci : Nx1 np.ndarray
community affiliation vector
degree : str
Flag to describe nature of graph 'undirected': For undirected graphs
'in': Uses the in-degree
'out': Uses the out-degree
Returns
-------
P : Nx1 np.ndarray
participation coefficient
'''
if degree == 'in':
W = W.T
_, ci = np.unique(ci, return_inverse=True)
ci += 1
n = len(W) # number of vertices
Ko = | np.sum(W, axis=1) | numpy.sum |
"""
Tests available cost function classes in FitBenchmarking.
"""
from unittest import TestCase
import numpy as np
from fitbenchmarking.cost_func.cost_func_factory import create_cost_func
from fitbenchmarking.cost_func.hellinger_nlls_cost_func import \
HellingerNLLSCostFunc
from fitbenchmarking.cost_func.nlls_cost_func import NLLSCostFunc
from fitbenchmarking.cost_func.poisson_cost_func import (PoissonCostFunc,
_safe_a_log_b)
from fitbenchmarking.cost_func.weighted_nlls_cost_func import \
WeightedNLLSCostFunc
from fitbenchmarking.hessian.analytic_hessian import Analytic
from fitbenchmarking.jacobian.scipy_jacobian import Scipy
from fitbenchmarking.parsing.fitting_problem import FittingProblem
from fitbenchmarking.utils import exceptions
from fitbenchmarking.utils.options import Options
# pylint: disable=attribute-defined-outside-init
def fun(x, p):
"""
Analytic function evaluation
"""
return (x*p**2)**2
def jac(x, p):
"""
Analytic Jacobian evaluation
"""
return np.column_stack((4*x**2*p[0]**3,
4*x**2*p[0]**3))
def hes(x, p):
"""
Analytic Hessian evaluation
"""
return np.array([[12*x**2*p[0]**2, 12*x**2*p[0]**2],
[12*x**2*p[0]**2, 12*x**2*p[0]**2], ])
class TestNLLSCostFunc(TestCase):
"""
Class to test the NLLSCostFunc class
"""
def setUp(self):
"""
Setting up nonlinear least squares cost function tests
"""
self.options = Options()
fitting_problem = FittingProblem(self.options)
self.cost_function = NLLSCostFunc(fitting_problem)
fitting_problem.function = lambda x, p1: x + p1
self.x_val = np.array([1, 8, 11])
self.y_val = np.array([6, 10, 20])
def test_eval_r_raise_error(self):
"""
Test that eval_r raises and error
"""
self.assertRaises(exceptions.CostFuncError,
self.cost_function.eval_r,
params=[1, 2, 3],
x=[2],
y=[3, 4])
def test_eval_r_correct_evaluation(self):
"""
Test that eval_r is running the correct function
"""
eval_result = self.cost_function.eval_r(x=self.x_val,
y=self.y_val,
params=[5])
self.assertTrue(all(eval_result == np.array([0, -3, 4])))
def test_eval_cost(self):
"""
Test that eval_cost is correct
"""
eval_result = self.cost_function.eval_cost(params=[5],
x=self.x_val,
y=self.y_val)
self.assertEqual(eval_result, 25)
def test_validate_algorithm_type_error(self):
"""
Test that validate_algorithm_type raises an error
for incompatible options
"""
self.cost_function.invalid_algorithm_types = ['ls']
algorithm_check = {'ls': ['ls-min']}
minimizer = 'ls-min'
self.assertRaises(exceptions.IncompatibleMinimizerError,
self.cost_function.validate_algorithm_type,
algorithm_check=algorithm_check,
minimizer=minimizer)
def test_validate_algorithm_type_correct(self):
"""
Test that validate_algorithm_type does not raise
an error for compaitble options
"""
self.cost_function.invalid_algorithm_types = []
algorithm_check = {'ls': ['ls-min']}
minimizer = 'ls-min'
self.cost_function.validate_algorithm_type(algorithm_check, minimizer)
def test_jac_res(self):
"""
Test that jac_res works for the NLLs cost function
"""
jacobian = Scipy(self.cost_function.problem)
jacobian.method = "2-point"
self.cost_function.jacobian = jacobian
J = self.cost_function.jac_res(params=[5],
x=self.x_val,
y=self.y_val)
expected = np.array([[-1.0], [-1.0], [-1.0]])
self.assertTrue(np.allclose(J, expected))
def test_jac_cost(self):
"""
Test that jac_cost works for the NLLs cost function
"""
jacobian = Scipy(self.cost_function.problem)
jacobian.method = "2-point"
self.cost_function.jacobian = jacobian
jac_cost = self.cost_function.jac_cost(params=[5],
x=self.x_val,
y=self.y_val)
expected = np.array([-2.0])
self.assertTrue(np.allclose(jac_cost, expected))
def test_hes_res(self):
"""
Test that hes_res works for the NLLs cost function
"""
self.cost_function.problem.function = fun
self.cost_function.problem.jacobian = jac
self.cost_function.problem.hessian = hes
jacobian = Scipy(self.cost_function.problem)
jacobian.method = "2-point"
self.cost_function.jacobian = jacobian
hessian = Analytic(self.cost_function.problem,
self.cost_function.jacobian)
self.cost_function.hessian = hessian
H, _ = self.cost_function.hes_res(params=[5],
x=self.x_val,
y=self.y_val)
expected = np.array([[[-300, -19200, -36300],
[-300, -19200, -36300]],
[[-300, -19200, -36300],
[-300, -19200, -36300]]])
self.assertTrue(np.allclose(H, expected))
def test_hes_cost(self):
"""
Test that hes_cost works for the NLLs cost function
"""
self.cost_function.problem.function = fun
self.cost_function.problem.jacobian = jac
self.cost_function.problem.hessian = hes
jacobian = Scipy(self.cost_function.problem)
jacobian.method = "2-point"
self.cost_function.jacobian = jacobian
hessian = Analytic(self.cost_function.problem,
self.cost_function.jacobian)
self.cost_function.hessian = hessian
hes_cost = self.cost_function.hes_cost(params=[0.01],
x=self.x_val,
y=self.y_val)
expected = np.array([[-7.35838895, -7.35838895],
[-7.35838895, -7.35838895]])
self.assertTrue(np.allclose(hes_cost, expected))
class TestWeightedNLLSCostFunc(TestCase):
"""
Class to test the WeightedNLLSCostFunc class
"""
def setUp(self):
"""
Setting up weighted nonlinear least squares cost function tests
"""
self.options = Options()
fitting_problem = FittingProblem(self.options)
self.cost_function = WeightedNLLSCostFunc(fitting_problem)
fitting_problem.function = lambda x, p1: x + p1
self.x_val = np.array([1, 8, 11])
self.y_val = np.array([6, 10, 20])
self.e_val = np.array([2, 4, 1])
def test_eval_r_raise_error(self):
"""
Test that eval_r raises and error
"""
self.assertRaises(exceptions.CostFuncError,
self.cost_function.eval_r,
params=[1, 2, 3],
x=[2],
y=[3, 4, 5],
e=[23, 4])
def test_eval_r_correct_evaluation(self):
"""
Test that eval_r is running the correct function
"""
eval_result = self.cost_function.eval_r(x=self.x_val,
y=self.y_val,
e=self.e_val,
params=[5])
self.assertTrue(all(eval_result == np.array([0, -0.75, 4])))
def test_eval_cost(self):
"""
Test that eval_cost is correct
"""
eval_result = self.cost_function.eval_cost(params=[5],
x=self.x_val,
y=self.y_val,
e=self.e_val)
self.assertEqual(eval_result, 16.5625)
def test_jac_res(self):
"""
Test that jac_res works for the Weighted NLLs cost function
"""
jacobian = Scipy(self.cost_function.problem)
jacobian.method = "2-point"
self.cost_function.jacobian = jacobian
J = self.cost_function.jac_res(params=[5],
x=self.x_val,
y=self.y_val,
e=self.e_val)
expected = np.array([[-0.5], [-0.25], [-1.0]])
self.assertTrue(np.allclose(J, expected))
def test_hes_res(self):
"""
Test that hes_res works for the Weighted NLLs cost function
"""
self.cost_function.problem.function = fun
self.cost_function.problem.jacobian = jac
self.cost_function.problem.hessian = hes
jacobian = Scipy(self.cost_function.problem)
jacobian.method = "2-point"
self.cost_function.jacobian = jacobian
hessian = Analytic(self.cost_function.problem,
self.cost_function.jacobian)
self.cost_function.hessian = hessian
H, _ = self.cost_function.hes_res(params=[5],
x=self.x_val,
y=self.y_val,
e=self.e_val)
expected = np.array([[[-150, -4800, -36300],
[-150, -4800, -36300]],
[[-150, -4800, -36300],
[-150, -4800, -36300]]])
self.assertTrue( | np.allclose(H, expected) | numpy.allclose |
from itertools import combinations
import numpy as np
from scipy import optimize
import scipy
import itertools
from numerik import lrpd, rref, ref, gauss_elimination
np.set_printoptions(linewidth=200)
# REF:
# MYERS, <NAME>.; MYERS, <NAME>.
# Numerical solution of chemical equilibria with simultaneous reactions.
# The Journal of chemical physics, 1986, 84. Jg., Nr. 10, S. 5787-5795.
p = 0.101325 # MPa
temp = 1478. # °K
t0_ref = 298.15 # K
r = 8.314 # J/(mol K)
namen = ['CO2', 'SO2', 'H2O', 'S2', 'CO', 'COS', 'CS2', 'H2S', 'H2']
elemente = ['C', 'O', 'S', 'H']
c = len(namen)
e = len(elemente)
atom_m = np.array([
[1, 0, 0, 0, 1, 1, 1, 0, 0],
[2, 2, 1, 0, 1, 1, 0, 0, 0],
[0, 1, 0, 2, 0, 1, 2, 1, 0],
[0, 0, 2, 0, 0, 0, 0, 2, 2]
])
ne = np.array([
113.15,
0.0,
0.0,
18.73,
-81.25,
0,
0,
0,
89.1,
]) # mol
g_t = r * temp * np.array([
-32.2528,
-20.4331,
-13.4872,
0.,
-19.7219,
-18.0753,
-1.9250,
-1.4522,
0.
]) # J/mol
rho = np.linalg.matrix_rank(atom_m)
print('(1) Eingangsdaten')
print('Namen:' + str(namen))
print('Elemente: ' + str(elemente))
print('g_t^\circ: ' + str(g_t))
print('n_e: ' + str(ne))
print('(2) Atomische Matrix \n A = \n' + str(atom_m))
print('E = ' + str(e) + '; C = ' + str(c))
print('rho = Rang(A) = ' + str(rho))
print('R = (C-rho) = ' + str(c - rho))
print('rho >= E ? : ' + str(rho >= e))
_, r_atom_m, _, _, _ = lrpd(atom_m)
rref_atom = rref(r_atom_m)
print('')
print('Reduzierte Stufenform(A): \n rref(A) = \n' + str(rref_atom))
b = rref_atom[:e, c - rho - 1:]
stoech_m = np.concatenate([
-b.T, np.eye(c - rho, dtype=float)
], axis=1)
k_t = np.exp(-stoech_m.dot(g_t / (r * temp)))
print('Stöchiometriche Matrix ((C-rho) X C): \n N = \n' + str(stoech_m))
print('A N^T = 0')
print(atom_m.dot(stoech_m.T))
print('Kj = ' + str(k_t))
print('sum(stoech_m)')
print(np.sum(stoech_m[:, :(c - rho) - 1]))
nach_g_sortieren = np.argsort(g_t)
atom_m = atom_m[:, nach_g_sortieren]
rho = np.linalg.matrix_rank(atom_m)
_, r_atom_m, _, _, _ = lrpd(atom_m)
rref_atom = rref(r_atom_m)
b = rref_atom[:e, c - rho - 1:]
stoech_m = np.concatenate([
-b.T, np.eye(c - rho, dtype=float)
], axis=1)
k_t = np.exp(-stoech_m.dot(g_t / (r * temp)))
print('Namen, nach g0 sortiert:')
print([namen[i] for i in nach_g_sortieren])
print('A, nach g0 sortiert:')
print(atom_m)
print('rho:')
print(rho)
print('Kj')
print(k_t)
print('sum(stoech_m)')
print(np.sum(stoech_m[:, :(c - rho) - 1]))
print('Atomischer Vektor m:')
print(np.sum(atom_m * (ne[nach_g_sortieren]), axis=1))
print('(4) Schlüsselkomponente')
print('Mögliche Gruppen mit Rang>=rho:')
for comb in combinations(range(9), 4):
mat = atom_m[:, comb]
rank = np.linalg.matrix_rank(mat)
if rank >= rho:
print('Rang: ' + str(rank))
print( | np.array(namen) | numpy.array |
# -*- coding: utf-8 -*-
#
#Created on Fri Apr 21 11:13:09 2017
#
#author: <NAME>
#
from joblib import Parallel,delayed
from abc import abstractmethod
import numpy as np
import scipy.linalg
from scipy.special import gammaln,iv
import math
import warnings
import h5py
import time
def _full_covariance_matrix(points,mean,weight,resp,reg_covar):
"""
Compute the correspondong covariance matrix
"""
_,dim = points.shape
diff = points - mean
diff_weighted = diff * resp
cov = 1/weight * np.dot(diff_weighted.T,diff)
cov.flat[::dim + 1] += reg_covar
return cov
def _full_covariance_matrices(points,means,weights,resp,reg_covar,n_jobs=1):
"""
Compute the full covariance matrices
"""
nb_points,dim = points.shape
n_components,_ = means.shape
covariance = np.asarray(Parallel(n_jobs=n_jobs,backend='threading')(
delayed(_full_covariance_matrix)(points,means[i],weights[i],resp[:,i:i+1],
reg_covar) for i in range(n_components)))
return covariance
def _spherical_covariance_matrices(points,means,weights,assignements,reg_covar,n_jobs=1):
"""
Compute the coefficients for the spherical covariances matrices
"""
n_points,dim = points.shape
n_components,_ = means.shape
covariance = np.zeros(n_components)
for i in range(n_components):
assignements_i = assignements[:,i:i+1]
points_centered = points - means[i]
points_centered_weighted = points_centered * assignements_i
product = points_centered * points_centered_weighted
covariance[i] = np.sum(product)/weights[i]
covariance[i] += reg_covar
return covariance / dim
def _compute_precisions_chol(cov,covariance_type):
if covariance_type in 'full':
n_components, n_features, _ = cov.shape
precisions_chol = np.empty((n_components, n_features, n_features))
for k, covariance in enumerate(cov):
try:
cov_chol = scipy.linalg.cholesky(covariance, lower=True)
except scipy.linalg.LinAlgError:
raise ValueError(str(k) + "-th covariance matrix non positive definite")
precisions_chol[k] = scipy.linalg.solve_triangular(cov_chol,
np.eye(n_features),
lower=True, check_finite=False).T
# might save precisions_chol[k] on disk
return precisions_chol
def _log_normal_matrix_core(points,mu,prec_chol):
y = np.dot(points,prec_chol) - np.dot(mu,prec_chol)
return np.sum(np.square(y),axis=1)
def _log_normal_matrix(points,means,cov,covariance_type,n_jobs=1):
"""
This method computes the log of the density of probability of a normal law centered. Each line
corresponds to a point from points.
:param points: an array of points (n_points,dim)
:param means: an array of k points which are the means of the clusters (n_components,dim)
:param cov: an array of k arrays which are the covariance matrices (n_components,dim,dim)
:return: an array containing the log of density of probability of a normal law centered (n_points,n_components)
"""
n_points,dim = points.shape
n_components,_ = means.shape
if covariance_type == "full":
precisions_chol = _compute_precisions_chol(cov, covariance_type)
# Marvin : np.log(np.linalg.det(X)) has been changed to np.linalg.slogdet(X)
# as it is numerically more stable !
sign, log_det_chol = np.linalg.slogdet(precisions_chol)
log_det_chol = sign * log_det_chol
# may need to read precisions_chol[k] from disk
log_prob = Parallel(n_jobs=n_jobs, backend='threading')(
delayed(_log_normal_matrix_core)(points, means[k], precisions_chol[k]) for k in range(n_components))
log_prob = np.asarray(log_prob).T
elif covariance_type == "spherical":
precisions_chol = np.sqrt(np.reciprocal(cov))
log_det_chol = dim * np.log(precisions_chol)
log_prob = np.empty((n_points,n_components))
for k, (mu, prec_chol) in enumerate(zip(means,precisions_chol)):
y = prec_chol * (points - mu)
log_prob[:,k] = np.sum(np.square(y), axis=1)
return -.5 * (dim * np.log(2*np.pi) + log_prob) + log_det_chol
def _log_vMF_matrix(points,means,K,n_jobs=1):
"""
This method computes the log of the density of probability of a von Mises Fischer law. Each line
corresponds to a point from points.
:param points: an array of points (n_points,dim)
:param means: an array of k points which are the means of the clusters (n_components,dim)
:param cov: an array of k arrays which are the covariance matrices (n_components,dim,dim)
:return: an array containing the log of density of probability of a von Mises Fischer law (n_points,n_components)
"""
n_points,dim = points.shape
n_components,_ = means.shape
dim = float(dim)
log_prob = K * np.dot(points,means.T)
# Regularisation to avoid infinte terms
bessel_term = iv(dim*0.5-1,K)
idx = | np.where(bessel_term==np.inf) | numpy.where |
import random
import numpy as np
import torch
import torch.nn.functional as F
from torch.optim import Adam
from rl import use_gpu
from rl.memory.memory import PrioritizedReplayBuffer, ReplayBuffer
from rl.models.dqn import DQN
from rl.policies.eps_greedy import EpsGreedy
class Agent:
def act(self, obs, t):
pass
def optimize(self, batch_size):
pass
class DqnAgent(Agent):
def __init__(self, state_dim, action_dim, lr=1e-4, l2_reg=1e-3, hidden_layers=None, activation=F.relu, gamma=1.0,
double=True, duel=True, loss_fct=F.mse_loss, mem_size=10000, mem_type='per', **eps_params):
if hidden_layers is None:
hidden_layers = [32, 32]
self.state_dim = state_dim
self.action_dim = action_dim
self.gamma = gamma
self.double = double
self.loss_fct = loss_fct
self.mem_type = mem_type
if self.mem_type is 'per':
self.memory = PrioritizedReplayBuffer(capacity=mem_size)
else:
self.memory = ReplayBuffer(capacity=mem_size)
self.dqn = DQN(self.state_dim, self.action_dim, hidden_layers, activation=activation, duel=duel)
self.dqn_target = DQN(self.state_dim, self.action_dim, hidden_layers, activation=activation, duel=duel)
self.dqn_target.load_state_dict(self.dqn.state_dict())
self.dqn_target.eval()
self.epsilon_greedy = EpsGreedy(**eps_params)
self.optimizer = Adam(self.dqn.parameters(), lr=lr, weight_decay=l2_reg)
def act(self, obs, t):
if random.random() < self.epsilon_greedy.value(t):
return random.randint(0, self.action_dim - 1)
obs_t = torch.from_numpy(obs).float()
if use_gpu():
obs_t = obs_t.cuda()
q = self.dqn.forward(obs_t)
max_q, action = q.max(0)
return action.data.cpu().numpy()
def optimize(self, batch_size):
batch, idxs, is_weights = self.memory.sample(batch_size)
states = torch.from_numpy(np.array(batch["states"], dtype=np.float32))
actions = torch.from_numpy(np.array(batch["actions"], dtype=np.int64))
rewards = torch.from_numpy(np.array(batch["rewards"], dtype=np.float32))
next_states = torch.from_numpy( | np.array(batch["next_states"], dtype=np.float32) | numpy.array |
import numpy as np
import copy
import warnings
from astropy.tests.helper import pytest
from numpy.random import poisson, standard_cauchy
from scipy.signal.ltisys import TransferFunction
from stingray import Lightcurve
from stingray.events import EventList
from stingray import Multitaper, Powerspectrum
np.random.seed(1)
class TestMultitaper(object):
@classmethod
def setup_class(cls):
tstart = 0.0
tend = 1.484
dt = 0.0001
time = np.arange(tstart + 0.5*dt, tend + 0.5*dt, dt)
mean_count_rate = 100.0
mean_counts = mean_count_rate * dt
poisson_counts = np.random.poisson(mean_counts, size=time.shape[0])
cls.lc = Lightcurve(time, counts=poisson_counts, dt=dt,
gti=[[tstart, tend]])
mean = 0
standard_deviation = 0.1
gauss_counts = \
np.random.normal(mean, standard_deviation, size=time.shape[0])
cls.lc_gauss = Lightcurve(time, counts=gauss_counts, dt=dt,
gti=[[tstart, tend]], err_dist='gauss')
def test_lc_keyword_deprecation(self):
mtp1 = Multitaper(self.lc)
with pytest.warns(DeprecationWarning) as record:
mtp2 = Multitaper(lc=self.lc)
assert np.any(['lc keyword' in r.message.args[0]
for r in record])
assert np.allclose(mtp1.power, mtp2.power)
assert np.allclose(mtp1.freq, mtp2.freq)
def test_make_empty_multitaper(self):
mtp = Multitaper()
assert mtp.norm == 'frac'
assert mtp.freq is None
assert mtp.power is None
assert mtp.multitaper_norm_power is None
assert mtp.eigvals is None
assert mtp.power_err is None
assert mtp.df is None
assert mtp.m == 1
assert mtp.nphots is None
assert mtp.jk_var_deg_freedom is None
@pytest.mark.parametrize("lightcurve", ['lc', 'lc_gauss'])
def test_make_multitaper_from_lightcurve(self, lightcurve):
mtp = Multitaper(getattr(self, lightcurve))
assert mtp.norm == "frac"
assert mtp.fullspec is False
assert mtp.meancounts == getattr(self, lightcurve).meancounts
assert mtp.nphots == np.float64(np.sum(getattr(self, lightcurve).counts))
assert mtp.err_dist == getattr(self, lightcurve).err_dist
assert mtp.dt == getattr(self, lightcurve).dt
assert mtp.n == getattr(self, lightcurve).time.shape[0]
assert mtp.df == 1.0 / getattr(self, lightcurve).tseg
assert mtp.m == 1
assert mtp.freq is not None
assert mtp.multitaper_norm_power is not None
assert mtp.power is not None
assert mtp.power_err is not None
assert mtp.jk_var_deg_freedom is not None
def test_init_with_norm_not_str(self):
with pytest.raises(TypeError):
mpt = Multitaper(norm=1)
def test_init_with_lightcurve(self):
assert Multitaper(self.lc)
def test_init_without_lightcurve(self):
with pytest.raises(TypeError):
assert Multitaper(self.lc.counts)
def test_init_with_nonsense_data(self):
nonsense_data = [None for i in range(100)]
with pytest.raises(TypeError):
assert Multitaper(nonsense_data)
def test_init_with_nonsense_norm(self):
nonsense_norm = "bla"
with pytest.raises(ValueError):
assert Multitaper(self.lc, norm=nonsense_norm)
def test_init_with_wrong_norm_type(self):
nonsense_norm = 1.0
with pytest.raises(TypeError):
assert Multitaper(self.lc, norm=nonsense_norm)
@pytest.mark.parametrize('low_bias', [False, True])
def test_make_multitaper_adaptive_and_low_bias(self, low_bias):
mtp = Multitaper(self.lc, low_bias=low_bias, adaptive=True)
if low_bias:
assert np.min(mtp.eigvals) >= 0.9
assert mtp.jk_var_deg_freedom is not None
assert mtp.freq is not None
assert mtp.multitaper_norm_power is not None
@pytest.mark.parametrize('lightcurve', ['lc', 'lc_gauss'])
def test_make_multitaper_var(self, lightcurve):
if getattr(self, lightcurve).err_dist == "poisson":
mtp = Multitaper(getattr(self, lightcurve))
assert mtp.err_dist == "poisson"
assert mtp.var == getattr(self, lightcurve).meancounts
else:
with pytest.warns(UserWarning) as record:
mtp = Multitaper(getattr(self, lightcurve))
assert mtp.err_dist == "gauss"
assert mtp.var == \
np.mean(getattr(self, lightcurve).counts_err) ** 2
assert np.any(["not poisson" in r.message.args[0]
for r in record])
@pytest.mark.parametrize('lombscargle', [False, True])
def test_fourier_multitaper_with_invalid_NW(self, lombscargle):
with pytest.raises(ValueError):
mtp = Multitaper(self.lc, NW=0.1, lombscargle=lombscargle)
@pytest.mark.parametrize("adaptive, jackknife",
[(a, j) for a in (True, False) for j in (True, False)])
def test_fourier_multitaper_with_adaptive_jackknife_combos(self, adaptive, jackknife):
mtp = Multitaper(self.lc, adaptive=adaptive, jackknife=jackknife)
assert mtp.multitaper_norm_power is not None
assert mtp.jk_var_deg_freedom is not None
def test_fractional_rms_in_frac_norm_is_consistent(self):
"""
Copied from test_powerspectrum.py
"""
time = np.arange(0, 100, 1) + 0.5
poisson_counts = np.random.poisson(100.0,
size=time.shape[0])
lc = Lightcurve(time, counts=poisson_counts, dt=1,
gti=[[0, 100]])
mtp = Multitaper(lc, norm="leahy")
rms_mtp_l, rms_err_l = mtp.compute_rms(min_freq=mtp.freq[1],
max_freq=mtp.freq[-1],
white_noise_offset=0)
mtp = Multitaper(lc, norm="frac")
rms_mtp, rms_err = mtp.compute_rms(min_freq=mtp.freq[1],
max_freq=mtp.freq[-1],
white_noise_offset=0)
assert np.allclose(rms_mtp, rms_mtp_l, atol=0.01)
assert | np.allclose(rms_err, rms_err_l, atol=0.01) | numpy.allclose |
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
# from celluloid import Camera
from skmultiflow.core import BaseSKMObject, ClassifierMixin
from sklearn.cluster import KMeans
class Minas(BaseSKMObject, ClassifierMixin):
def __init__(self,
kini=3,
cluster_algorithm='kmeans',
random_state=0,
min_short_mem_trigger=10,
min_examples_cluster=10,
threshold_strategy=1,
threshold_factor=1.1,
window_size=100,
update_summary=False,
animation=False):
super().__init__()
self.kini = kini
self.random_state = random_state
accepted_algos = ['kmeans'] # TODO: define list of accepted algos
if cluster_algorithm not in accepted_algos:
print('Available algorithms: {}'.format(', '.join(accepted_algos)))
else:
self.cluster_algorithm = cluster_algorithm
self.microclusters = [] # list of microclusters
self.before_offline_phase = True
self.short_mem = []
self.sleep_mem = []
self.min_short_mem_trigger = min_short_mem_trigger
self.min_examples_cluster = min_examples_cluster
self.threshold_strategy = threshold_strategy
self.threshold_factor = threshold_factor
self.window_size = window_size
self.update_summary = update_summary
self.animation = animation
self.sample_counter = 0 # to be used with window_size
if self.animation:
# TODO use Camera
# self.fig = plt.figure()
# self.camera = Camera(self.fig)
self.animation_frame_num = 0
def fit(self, X, y, classes=None, sample_weight=None):
"""fit means fitting in the OFFLINE phase"""
self.microclusters = self.offline(X, y)
self.before_offline_phase = False
return self
def partial_fit(self, X, y, classes=None, sample_weight=None):
self.sample_counter += 1
if self.before_offline_phase:
self.fit(X, y)
else:
y_preds, cluster_preds = self.predict(X, ret_cluster=True)
timestamp = self.sample_counter
# TODO: remove this ugly loop too
for point_x, y_pred, cluster in zip(X, y_preds, cluster_preds):
if y_pred != -1: # the model can explain point_x
cluster.update_cluster(point_x, y_pred, timestamp, self.update_summary)
else: # the model cannot explain point_x
self.short_mem.append(ShortMemInstance(point_x, timestamp))
if len(self.short_mem) >= self.min_short_mem_trigger:
self.novelty_detect()
if self.animation:
self.plot_clusters()
# forgetting mechanism
if self.sample_counter % self.window_size == 0:
self.trigger_forget()
return self
def predict(self, X, ret_cluster=False):
"""X is an array"""
# TODO: remove this ugly loop
pred_labels = []
pred_clusters = []
for point in X:
# find closest centroid
closest_cluster = min(self.microclusters,
key=lambda cl: cl.distance_to_centroid(point))
if closest_cluster.encompasses(point): # classify in this cluster
pred_labels.append(closest_cluster.label)
pred_clusters.append(closest_cluster)
else: # classify as unknown
pred_labels.append(-1)
pred_clusters.append(None)
if ret_cluster:
return np.asarray(pred_labels), pred_clusters
else:
return np.asarray(pred_labels)
def predict_proba(self, X):
# TODO
pass
def offline(self, X_train, y_train):
microclusters = []
# in offline phase, consider all instances arriving at the same time in the microclusters:
timestamp = len(X_train)
if self.cluster_algorithm == 'kmeans':
for y_class in np.unique(y_train):
# subset with instances from each class
X_class = X_train[y_train == y_class]
class_cluster_clf = KMeans(n_clusters=self.kini,
random_state=self.random_state)
class_cluster_clf.fit(X_class)
for class_cluster in np.unique(class_cluster_clf.labels_):
# get instances in cluster
cluster_instances = X_class[class_cluster_clf.labels_ == class_cluster]
microclusters.append(
MicroCluster(y_class, cluster_instances, timestamp)
)
return microclusters
def novelty_detect(self):
possible_clusters = []
X = np.array([instance.point for instance in self.short_mem])
if self.cluster_algorithm == 'kmeans':
cluster_clf = KMeans(n_clusters=self.kini,
random_state=self.random_state)
cluster_clf.fit(X)
for cluster_label in np.unique(cluster_clf.labels_):
cluster_instances = X[cluster_clf.labels_ == cluster_label]
possible_clusters.append(
MicroCluster(-1, cluster_instances, self.sample_counter))
for cluster in possible_clusters:
if cluster.is_cohesive(self.microclusters) and cluster.is_representative(self.min_examples_cluster):
closest_cluster = cluster.find_closest_cluster(self.microclusters)
closest_distance = cluster.distance_to_centroid(closest_cluster.centroid)
threshold = self.best_threshold(cluster, closest_cluster,
self.threshold_strategy, self.threshold_factor)
# TODO make these ifs elifs cleaner
if closest_distance < threshold: # the new microcluster is an extension
cluster.label = closest_cluster.label
elif self.sleep_mem: # look in the sleep memory, if not empty
closest_cluster = cluster.find_closest_cluster(self.sleep_mem)
closest_distance = cluster.distance_to_centroid(closest_cluster.centroid)
if closest_distance < threshold: # check again: the new microcluster is an extension
cluster.label = closest_cluster.label
# awake old cluster
self.sleep_mem.remove(closest_cluster)
closest_cluster.timestamp = self.sample_counter
self.microclusters.append(closest_cluster)
else: # the new microcluster is a novelty pattern
cluster.label = max([cluster.label for cluster in self.microclusters]) + 1
else: # the new microcluster is a novelty pattern
cluster.label = max([cluster.label for cluster in self.microclusters]) + 1
# add the new cluster to the model
self.microclusters.append(cluster)
# remove these examples from short term memory
for instance in cluster.instances:
self.short_mem.remove(instance)
def best_threshold(self, new_cluster, closest_cluster, strategy, factor):
def run_strategy_1():
factor_1 = factor
# factor_1 = 5 # good for artificial, separated data sets
return factor_1 * np.std(closest_cluster.distance_to_centroid(closest_cluster.instances))
if strategy == 1:
return run_strategy_1()
else:
# factor_2 = factor_3 = 1.2 # good for artificial, separated data sets
factor_2 = factor_3 = factor
clusters_same_class = self.get_clusters_in_class(closest_cluster.label)
if len(clusters_same_class) == 1:
return run_strategy_1()
else:
class_centroids = np.array([cluster.centroid for cluster in clusters_same_class])
distances = closest_cluster.distance_to_centroid(class_centroids)
if strategy == 2:
return factor_2 * np.max(distances)
elif strategy == 3:
return factor_3 * np.mean(distances)
def get_clusters_in_class(self, label):
return [cluster for cluster in self.microclusters if cluster.label == label]
def trigger_forget(self):
for cluster in self.microclusters:
if cluster.timestamp < self.sample_counter - self.window_size:
self.sleep_mem.append(cluster)
self.microclusters.remove(cluster)
for instance in self.short_mem:
if instance.timestamp < self.sample_counter - self.window_size:
self.short_mem.remove(instance)
def plot_clusters(self):
"""Simplistic plotting, assumes elements in cluster have two dimensions"""
points = pd.DataFrame(columns=['x', 'y', 'pred_label'])
cluster_info = pd.DataFrame(columns=['label', 'centroid', 'radius'])
for cluster in self.microclusters:
cluster_info = cluster_info.append(pd.Series({'label': cluster.label,
'centroid': cluster.centroid,
'radius': cluster.radius}),
ignore_index=True)
# add points from cluster
for point in cluster.instances:
points = points.append(pd.Series({'x': point[0],
'y': point[1],
'pred_label': cluster.label}),
ignore_index=True)
# add points from short term memory
for mem_instance in self.short_mem:
points = points.append(pd.Series({'x': mem_instance.point[0],
'y': mem_instance.point[1],
'pred_label': -1}),
ignore_index=True)
color_names = ['k', 'tab:blue', 'tab:orange', 'tab:green', 'tab:red', 'tab:purple',
'tab:brown', 'tab:pink', 'tab:gray', 'tab:olive', 'tab:cyan']
assert len(cluster_info.label.unique()) <= len(color_names) # limited to these colors for now
# colormap to be indexed by label id
colormap = pd.DataFrame({'name': color_names}, index=range(-1, len(color_names) - 1))
mapped_label_colors = colormap.loc[points['pred_label']].values[:, 0]
plt.scatter(points['x'], points['y'], c=mapped_label_colors, s=10, alpha=0.3)
plt.gca().set_aspect('equal', adjustable='box') # equal scale for both axes
circles = []
for label, centroid, radius in cluster_info.values:
circles.append(plt.Circle((centroid[0], centroid[1]), radius,
color=colormap.loc[label].values[0], alpha=0.1))
for circle in circles:
plt.gcf().gca().add_artist(circle)
# self.camera.snap()
import os
if not os.path.exists('animation'):
os.makedirs('animation')
plt.savefig(f'animation/clusters_{self.animation_frame_num:05}.png', dpi=300)
plt.close()
self.animation_frame_num += 1
def plot_animation(self):
pass
# TODO
# animation = self.camera.animate()
# animation.save('animation.mp4')
def confusion_matrix(self, X_test=None, y_test=None):
"""Creates a confusion matrix.
It must be run on a fitted classifier that has already seen the examples in the test set.
Parameters
----------
X_test : numpy.ndarray
The set of data samples to predict the class labels for.
y_test : numpy.ndarray
The set of class labels for the data samples.
Returns
-------
pandas.core.frame.DataFrame
"""
# Init confusion matrix
y_test_classes = np.unique(y_test) # rows
detected_classes = | np.unique([cluster.label for cluster in self.microclusters]) | numpy.unique |
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
# This is an EXUDYN example
#
# Details: Test suite for Rotation Vector Update formulas
# Refs.: <NAME>., <NAME>.: Time integration of rigid bodies modelled with three rotation parameters, Multibody System Dynamics (2020)
#
# Author: <NAME>
# Date: 2020-06-02
#
# Copyright:This file is part of Exudyn. Exudyn is free software. You can redistribute it and/or modify it under the terms of the Exudyn license. See 'LICENSE.txt' for more details.
#
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
import numpy as np
from timeIntegrationOfRotationVectorFormulas import *
from numpy import linalg as LA
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
plt.close("all")
globalErrorBound = 1e-14
###############################################################################
#
# Test compose rotation vector
def TestComposeRotationVector():
#
#pi = 3.141592653589793
pi = np.pi
#+++++++++++++++++++++++++++++++++ TEST 1 +++++++++++++++++++++++++++++++++
v0 = np.array([1.0, 2.0, 3.0])
Omega = np.array([0.1, 0.2, 0.3])
# matlab results:
vMatlab = np.array([1.100000000000000, 2.200000000000000, 3.300000000000000])
nMAtlab = np.array([0.267261241912424, 0.534522483824849, 0.801783725737273])
phiMatlab = 4.115823125451335
# python results
vPython = ComposeRotationVectors(v0,Omega)
nPython = ComputeRotationAxisFromRotationVector(vPython)
phiPython = | LA.norm(vPython) | numpy.linalg.norm |
# coding=utf-8
# Copyright 2021 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for OOD evaluation.
Referneces:
[1]: Lee, Kimin, et al. "A simple unified framework for detecting
out-of-distribution samples and adversarial attacks." Advances in neural
information processing systems 31 (2018).
https://arxiv.org/abs/1807.03888
"""
import jax
import numpy as np
import scipy
import sklearn.metrics
SUPPORTED_OOD_METHODS = ('msp', 'entropy', 'maha', 'rmaha')
# TODO(dusenberrymw): Move it to robustness metrics.
def compute_ood_metrics(targets,
predictions,
tpr_thres=0.95,
targets_threshold=None):
"""Computes Area Under the ROC and PR curves and FPRN.
ROC - Receiver Operating Characteristic
PR - Precision and Recall
FPRN - False positive rate at which true positive rate is N.
Args:
targets: np.ndarray of targets, either 0 or 1, or continuous values.
predictions: np.ndarray of predictions, any value.
tpr_thres: float, threshold for true positive rate.
targets_threshold: float, if target values are continuous values, this
threshold binarizes them.
Returns:
A dictionary with AUC-ROC, AUC-PR, and FPRN scores.
"""
if targets_threshold is not None:
targets = np.array(targets)
targets = np.where(targets < targets_threshold,
np.zeros_like(targets, dtype=np.int32),
np.ones_like(targets, dtype=np.int32))
fpr, tpr, _ = sklearn.metrics.roc_curve(targets, predictions)
fprn = fpr[np.argmax(tpr >= tpr_thres)]
return {
'auroc': sklearn.metrics.roc_auc_score(targets, predictions),
'auprc': sklearn.metrics.average_precision_score(targets, predictions),
'fprn': fprn,
}
class OODMetric:
"""OOD metric class that stores scores and OOD labels."""
def __init__(self, dataset_name, method_name):
if method_name not in SUPPORTED_OOD_METHODS:
raise NotImplementedError(
'Only %s are supported for OOD evaluation! Got metric_name=%s!' %
(','.join(SUPPORTED_OOD_METHODS), method_name))
self.datatset_name = dataset_name
self.method_name = method_name
self.metric_name = f'{dataset_name}_{method_name}'
self.scores = []
self.labels = []
def update(self, scores, labels):
self.scores += list(scores)
self.labels += list(labels)
def reset_states(self):
self.scores = []
self.labels = []
def get_scores_and_labels(self):
return self.scores, self.labels
def get_metric_name(self):
return self.metric_name
def compute_ood_scores(self, scores):
"""Compute OOD scores.
Compute OOD scores that indicate uncertainty.
Args:
scores: A dict that contains scores for computing OOD scores. A full dict
can contain probs, Mahalanobis distance, and Relative Mahalanobis
distance. The scores should be of the size [batch_size, num_classes]
Returns:
OOD scores: OOD scores that indicate uncertainty. Should be of the size
[batch_size, ]
Raises:
KeyError: An error occurred when the corresponding scores needed for
computing OOD scores are not found in the scores dict.
"""
ood_scores = None
if self.method_name == 'msp':
if 'probs' in scores:
ood_scores = 1 - np.max(scores['probs'], axis=-1)
else:
raise KeyError(
('The variable probs is needed for computing MSP OOD score. ',
'But it is not found in the dict.'))
elif self.method_name == 'entropy':
if 'entropy' in scores:
ood_scores = scores['entropy']
else:
raise KeyError(
'The variable entropy is needed for computing Entropy OOD score.',
'But it is not found in the dict.')
elif self.method_name == 'maha':
if 'dists' in scores:
ood_scores = np.min(scores['dists'], axis=-1)
else:
raise KeyError(
('The variable dists is needed for computing Mahalanobis distance ',
'OOD score. But it is not found in the dict.'))
elif self.method_name == 'rmaha':
if 'dists' in scores and 'dists_background' in scores:
ood_scores = np.min(
scores['dists'], axis=-1) - scores['dists_background'].reshape(-1)
else:
raise KeyError((
'The variable dists and dists_background are needed for computing ',
'Mahalanobis distance OOD score. But it is not found in the dict.'))
return ood_scores
def compute_metrics(self, tpr_thres=0.95, targets_threshold=None):
return compute_ood_metrics(
self.labels,
self.scores,
tpr_thres=tpr_thres,
targets_threshold=targets_threshold)
def compute_mean_and_cov(embeds, labels):
"""Computes class-specific means and shared covariance matrix of given embedding.
The computation follows Eq (1) in [1].
Args:
embeds: An np.array of size [n_train_sample, n_dim], where n_train_sample is
the sample size of training set, n_dim is the dimension of the embedding.
labels: An np.array of size [n_train_sample, ]
Returns:
mean_list: A list of len n_class, and the i-th element is an np.array of
size [n_dim, ] corresponding to the mean of the fitted Guassian distribution
for the i-th class.
cov: The shared covariance mmatrix of the size [n_dim, n_dim].
"""
n_dim = embeds.shape[1]
n_class = int(np.max(labels)) + 1
mean_list = []
cov = np.zeros((n_dim, n_dim))
for class_id in range(n_class):
data = embeds[labels == class_id]
data_mean = np.mean(data, axis=0)
cov += np.dot((data - data_mean).T, (data - data_mean))
mean_list.append(data_mean)
cov = cov / len(labels)
return mean_list, cov
def compute_mahalanobis_distance(embeds, mean_list, cov, epsilon=1e-20):
"""Computes Mahalanobis distance between the input to the fitted Guassians.
The computation follows Eq.(2) in [1].
Args:
embeds: An np.array of size [n_test_sample, n_dim], where n_test_sample is
the sample size of the test set, n_dim is the size of the embeddings.
mean_list: A list of len n_class, and the i-th element is an np.array of
size [n_dim, ] corresponding to the mean of the fitted Guassian
distribution for the i-th class.
cov: The shared covariance mmatrix of the size [n_dim, n_dim].
epsilon: The small value added to the diagonal of the covariance matrix to
avoid singularity.
Returns:
out: An np.array of size [n_test_sample, n_class] where the [i, j] element
corresponds to the Mahalanobis distance between i-th sample to the j-th
class Guassian.
"""
n_sample = embeds.shape[0]
n_class = len(mean_list)
v = cov + np.eye(cov.shape[0], dtype=int) * epsilon # avoid singularity
vi = np.linalg.inv(v)
means = np.array(mean_list)
out = np.zeros((n_sample, n_class))
for i in range(n_sample):
x = embeds[i]
out[i, :] = np.diag(np.dot(np.dot((x - means), vi), (x - means).T))
return out
def load_ood_datasets(
dataset,
ood_datasets,
ood_split,
pp_eval,
ood_methods,
train_split,
data_dir,
get_data_fn,
):
"""Load datasets for OOD evaluation.
The datasets should include in-distribution test dataset, OOD test dataset,
and in-distribution training dataset if Mahalanobis distance based method is
applied.
Args:
dataset: The name of in-distribution dataset.
ood_datasets: A list of OOD dataset names.
ood_split: The split of the OOD dataset.
pp_eval: The pre-processing method applied to the input data.
ood_methods: The OOD methods used for evaluation. Can be choose from 'msp',
'maha', 'rmaha'.
train_split: The split of the training in-distribution dataset.
data_dir: The data directory.
get_data_fn: A function for generates a tuple of (data iterator, num_steps)
given a dataset name or builder, split, preprocessing function, and
optional data_dir.
Returns:
ood_ds: A dictionary with dataset label as the key and dataset iterator as
the value.
ood_ds_names: A list of dataset labels.
"""
ood_ds = {}
ood_ds_names = []
if isinstance(ood_split, str):
ood_ds.update({'ind': get_data_fn(dataset, ood_split, pp_eval, data_dir)})
ood_ds_names.append('ind')
for ood_dataset in ood_datasets:
ood_ds_name = 'ood_' + ood_dataset
ood_ds.update({
ood_ds_name: get_data_fn(ood_dataset, ood_split, pp_eval, data_dir),
})
ood_ds_names.append(ood_ds_name)
else:
raise NotImplementedError(
'Only string type of ood_split is supported for OOD evaluation! Got ood_split=%s!'
% str(ood_split))
if 'maha' in ood_methods or 'rmaha' in ood_methods:
# Adding training set for fitting class conditional Gaussian for
# Mahalanoabis distance based method
if isinstance(train_split, str):
ood_ds.update(
{'train_maha': get_data_fn(dataset, train_split, pp_eval, data_dir)})
ood_ds_names.insert(0, 'train_maha')
else:
raise NotImplementedError(
'Only string type of train_split is supported for OOD evaluation! Got train_split=%s!'
% str(train_split))
return ood_ds, ood_ds_names
def eval_ood_metrics(ood_ds, ood_ds_names, ood_methods, evaluation_fn,
opt_repl):
"""Evaluate the model for OOD detection and record metrics."""
# MSP stands for maximum softmax probability, max(softmax(logits)).
# MSP can be used as confidence score.
# Maha stands for Mahalanobis distance between the test input and
# fitted class conditional Gaussian distributions based on the
# embeddings. Mahalanobis distance can be used as uncertainty score
# or in other words, negative Mahalanobis distance can be used as
# confidence score.
# RMaha stnads for Relative Mahalanobis distance (Ren et al. 2021)
# https://arxiv.org/abs/2106.09022
ood_metrics = {}
for ood_ds_name in ood_ds_names:
if 'ood' in ood_ds_name:
ood_metrics[ood_ds_name] = [
OODMetric(ood_ds_name, ood_method) for ood_method in ood_methods
]
output = {}
# Mean and cov of class conditional Guassian in Mahalanobis distance.
# Mean_background and cov_background for the unified Guassian model
# regardless of class labels for computing Relative Mahalanobis distance
mean_list, cov = None, None
mean_list_background, cov_background = None, None
for ood_ds_name in ood_ds_names:
# The dataset train_maha must come before ind and ood
# because the train_maha will be used to esimate the class conditional
# mean and shared covariance.
val_iter, val_steps = ood_ds[ood_ds_name]
ncorrect, loss, nseen = 0, 0, 0
pre_logits_list, labels_list = [], []
for _, batch in zip(range(val_steps), val_iter):
batch_scores = {}
batch_ncorrect, batch_losses, batch_n, batch_metric_args = evaluation_fn(
opt_repl.target, batch['image'], batch['labels'], batch['mask'])
ncorrect += np.sum(np.array(batch_ncorrect[0]))
loss += np.sum(np.array(batch_losses[0]))
nseen += np.sum(np.array(batch_n[0]))
# Here we parse batch_metric_args to compute OOD metrics.
logits, labels, pre_logits, masks = batch_metric_args
masks_bool = np.array(masks[0], dtype=bool)
if not np.any(masks_bool):
continue # No valid examples in this batch.
if ood_ds_name == 'train_maha':
# For Mahalanobis distance, we need to first fit class conditional
# Gaussian using training data.
pre_logits_list.append(np.array(pre_logits[0])[masks_bool])
labels_list.append(np.array(labels[0])[masks_bool])
else:
# Computes Mahalanobis distance.
if mean_list is not None and cov is not None:
dists = compute_mahalanobis_distance(
| np.array(pre_logits[0]) | numpy.array |
'''
Author: <NAME>
Date Created: 180511
Objective: create node to get relative cone positions and absolute car
position data, then return where cones are in absolute coordinates.
kjgnote: first, will simulate random cone locations. will set overall loop
time of program to 1hz, in order to go slowly.
step 1: generate a constant output of cone locations with some noise.
later on, want to do them out of order as well...?
kjgnote about orientation: all cones lie in x-y plane. y is
forward, x is right, and z is up (out of page). coordinates are to always
be given in x-y-z values.
kjgnote: have buckled and will definitely use np arrays everywhere, not just
within functions. forget the other stuff...
kjgnote: currently, two schools of thought on how to figure out if a cone has
already been seen:
1. lookup method. get a cone in global coordinates, then check euclidian
distance to nearest cone. if within a certain threshold, ignore / add to an
average. if outside threshold, add to list.
2. some sort of kalman filter. take known positions of cones. when car moves,
estimate that they should be. take a measurement, then check again.
'''
# initializations ==============================================================
import time
from random import random
import matplotlib.pyplot as plt
def addnoise(arr,k=1):
''' add mu=0 centered gauss noise to numpy array given, simulate
real-world noise.
arr = numpy array, 2D
k = scale factor. default is 1
'''
import numpy as np
r=arr.shape[0]
c=arr.shape[1]
return arr+(np.random.rand(r,c)-0.5)*k
def cs_transform(oldcoord,pose):
''' Objective: take old coordinate value(s), return new within new 2D
coordinate system. will be primarily used for transforming cone local
coordinates to global coordinates. global coordinate system is relative
to starting position of car, assuming LOAM is used from lidar data
oldcoord = numpy array of 2D (x,y) cone data, allows 1 or more cones
pose = numpy array of (x,y,angle) data. angle is in radians
return: newcoord = numpy array of (x,y) global cone data
general steps:
1. receive a single cone coordinate and pose
2. put into matrices
3. transform
4. return new value as tuple
'''
import numpy as np
# create x_old col vector
# receiving 2D cone data, col-dominant x/y values
ones=np.ones((len(oldcoord),1))
x1=np.column_stack((oldcoord,ones)).transpose()
angle=(-1.0)*pose[2] # radians. -1 necessary because referring to old cs
s=np.sin(angle)
c=np.cos(angle)
tx=pose[0]
ty=pose[1]
T = np.array([[c,s,tx],[-s,c,ty],[0,0,1]])
# print T
x2=np.matmul(T,x1)
# print x2
# return (float(x2[0]),float(x2[1]))
return x2[0:2].transpose()
# at this point, have transformed all cones into global coordinates
# now, want to start identifying whether or not a set of cones have already
# been seen or not... although this is probably something already covered
# in kalman filters... ?
'''
at this point, want to simulate the car standing still, but having noise while
looking at some cones. will use function to simulate this and see if data
becomes more accurate over time.
'''
# debugging region =============================================================
class ConeList(object):
''' class to handle cone list, maintenance, etc. will use numpy to speed up
operations and provide compatability with other code. cone data is
assumed to always be in global coordinates. local cone data requires the
car's global pose.
desired functions:
evaluate global cone values
evaluate local cone values
get ConeList
reset conelist (?)
'''
def __init__(self,threshold):
''' intialize data about object. will assume that object is created when
nothing has been identified yet, and cones are evaluated later.
self._cones = main cone list. 2D numpy array, col dominant
self.thresh = acceptable level of noise that is ignored. used for
differentiating between what is a new cone and old cone in the
lookup strategy / method.
'''
import numpy as np
self.np = np
self._cones='init'
self.thresh = threshold
# threshold defines how much noise is allowed for differentiating betwe
def getlist(self):
if(type(self._cones) == type('')):
# no data has been yet given return zero list
return self.np.array([])
else:
return self._cones
def localToGlobal(self,oldcoord,pose):
''' Objective: take old coordinate value(s), return new within new 2D
coordinate system. will be primarily used for transforming cone local
coordinates to global coordinates. global coordinate system is relative
to starting position of car, assuming LOAM is used from lidar data
NOTE: in this context, 2D means numpy array has 2 non-empty dimensions,
wherever this is stated.
oldcoord = numpy array of 2D (x,y) cone data, allows 1 or more cones
pose = numpy array of (x,y,angle) data. angle is in radians
(return) = (x,y) global cone data. 2D numpy array, col dominant
general steps:
1. receive a single cone coordinate and pose
2. put into matrices
3. transform
4. return new value as tuple
'''
# create x_old col vector
# receiving 2D cone data, col-dominant x/y values
ones=self.np.ones((len(oldcoord),1))
x1=self.np.column_stack((oldcoord,ones)).transpose()
angle=(-1.0)*pose[2] # radians. -1 necessary because referring to old cs
s=self.np.sin(angle)
c=self.np.cos(angle)
tx=pose[0]
ty=pose[1]
T = self.np.array([[c,s,tx],[-s,c,ty],[0,0,1]])
# print T
x2=self.np.matmul(T,x1)
# print x2
# return (float(x2[0]),float(x2[1]))
return x2[0:2].transpose()
def eval_globalcones(self,newconesGlobal):
''' will follow lookup strategy here. given a set of global cone data,
need to evaluate whether the cones have already been detected.
newconesGlobal = (x,y) global cone data, 2D numpy array, col dominant
(return) = how many cones have been accepted into array, scalar integer
general steps, following lookup method:
1. take each iNEWcone
2. compare xy location with known jOLDcone list
3. if euclidian distance is below a threshold, accept iNEWcone into
array. if it is over that threshold, reject cone. add to counter
4. when complete, return number of cones accepted.
basic steps:
D = zeros(len(K),len(U))
for ik in len(K):
for ju in len(U):
D[ik,ju] = 2norm(K[ik,:],U[ju,:])
for i in len(D[0,:]):
if(min(D[:,j]) > threshold):
K=np.row_stack(K,U[j,:])
'''
# initial run of object, meaning it has no data yet. add here
if(type(self._cones)==type('')):
self._cones = self.np.matmul(self.np.identity(len(newconesGlobal)),newconesGlobal)
# print 'would return here value of',len(newconesGlobal)
return len(newconesGlobal)
# end initial run section
U = newconesGlobal # make easier to refer to new cone array
K = self._cones # make easier to refer to old cone array
l_k = K.shape[0] # ease of use
l_u = U.shape[0] # ease of use
D = self.np.zeros((l_k,l_u)) # initialize. requires tuple
norm = self.np.linalg.norm
# import ipdb; ipdb.set_trace()
# this nested forloop should be improved if possible
for ik in range(len(K)):
for ju in range(len(U)):
D[ik,ju] = norm(K[ik,:] - U[ju,:])
# at this point, have list of cone locations
addedCounter=0
for jcol in range(D.shape[1]): # for all columns in D
if( min(D[:,jcol]) > self.thresh ):
# distance from other cones is greater than allowable range. add to stack
addedCounter=addedCounter+1
self._cones = self.np.row_stack((self._cones,U[jcol,:])) # can be improved
# print 'able to add this many new items',addedCounter
return addedCounter
def eval_localcones(self,newconesLocal,carPose):
''' given a set of local cone data, need to evaluate whether the cones
are part of global set. this method combines two other methods:
convert new cone data to global coordinates, then run
eval_globalcones and return result here.
newconesLocal = numpy 2D array of (x,y) local cone data, column dominant
carPose = numpy 2D array of (x,y,angle) car data
(return) = how many cones have been accepted into array, scalar integer
'''
newconesGlobal = self.localToGlobal(newconesLocal,carPose)
return self.eval_globalcones(newconesGlobal)
# debugging region end =========================================================
# at this point, have an initial method in order to find all cones. next,
# build here the listener for the odometry data.
# KJGNOTE: will instead develop listener in separate file, then integrate
# component testing ============================================================
import numpy as lala
conesLocal=lala.array([
[-2,1],[-2,3],[-2,5],[-2,7],[-2,9],[-2,11],
[2,1],[2,3],[2,5],[2,7],[2,9],[2,11] ])
carPose = lala.array([3,4,lala.radians(0)])
carGlobal = carPose[0:2]
carLocal = | lala.array([0,0]) | numpy.array |
# -*- coding: utf-8 -*-
import unittest
import numpy as np
import leibniz as lbnz
from leibniz.core3d.vec3 import box
from leibniz.core3d.gridsys.regular3 import RegularGrid
class TestFrame(unittest.TestCase):
def setUp(self):
lbnz.bind(RegularGrid(
basis='lng,lat,alt',
W=51, L=51, H=51,
east=119.0, west=114.0,
north=42.3, south=37.3,
upper=16000.0, lower=0.0
))
lbnz.use('thetaphir')
lbnz.use('xyz')
lbnz.use('x,y,z')
def tearDown(self):
lbnz.clear()
def test_basis_ortho(self):
phx, phy, phz = lbnz.thetaphir.phi
phx = phx.cpu().numpy()
phy = phy.cpu().numpy()
phz = phz.cpu().numpy()
self.assertAlmostEqual(1, | np.max(phx * phx + phy * phy + phz * phz) | numpy.max |
import numpy as np
import pandas as pd
import copy
import re
from collections import defaultdict
from matplotlib import pyplot as plt
import sklearn
from sklearn.cluster import AgglomerativeClustering, DBSCAN, OPTICS
from sklearn.metrics.pairwise import euclidean_distances
from eval_functions import strdistance, _iou_score
from utils import flatten
def merge_strset(strings):
sets = [set(s.split(" ")) for s in strings]
unionset = set().union(*sets)
return " ".join(list(unionset))
class TaggedString():
def __init__(self, string, tag=None):
self.string = string
self.tag = tag
self.numdims = 1
def intersects(self, other):
s1 = set(self.string.split(" "))
s2 = set(other.string.split(" "))
return len(s1.intersection(s2)) > 0
def __getitem__(self, item):
return self.string[item]
def __lt__(self, other):
return self.string < other.string
def __eq__(self, other):
if self.intersects(other):
setstring = merge_strset([self.string, other.string])
self.string = setstring
other.string = setstring
return True
else:
return False
def __hash__(self):
return 1
def __repr__(self):
return self.string
class VectorRange():
def __init__(self, start_vector, end_vector, tag=None):
assert len(start_vector) == len(end_vector)
self.numdims = len(start_vector)
for i in range(len(start_vector)):
if start_vector[i] > end_vector[i]:
print(F"ERROR!!! start {start_vector} greater than end {end_vector}")
tmp = start_vector[i]
start_vector[i] = end_vector[i]
end_vector[i] = tmp
self.start_vector = start_vector
self.end_vector = end_vector
self.tag = tag
def intersects(self, other):
for i in range(self.numdims):
if self.start_vector[i] > other.end_vector[i] or self.end_vector[i] < other.start_vector[i]:
return False
return True
def centroid(self):
return (np.array(self.start_vector) + np.array(self.end_vector)) / 2
def __getitem__(self, item):
return self.start_vector[item]
def __lt__(self, other):
return self.start_vector[0] < other.start_vector[0]
def __eq__(self, other):
assert self.numdims == other.numdims
if self.intersects(other):
for i in range(self.numdims):
self.start_vector[i] = min(self.start_vector[i], other.start_vector[i])
self.end_vector[i] = max(self.end_vector[i], other.end_vector[i])
other.start_vector[i] = self.start_vector[i]
other.end_vector[i] = self.end_vector[i]
return True
else:
return False
def __hash__(self):
return 1
def __repr__(self):
return str((list(self.start_vector), list(self.end_vector)))
class SeqRange(VectorRange):
def __init__(self, startend, tag=None):
super().__init__([startend[0]], [startend[1]], tag)
self.startend = startend
def __getitem__(self, item):
return self.startend[item]
def vr_from_string(string):
''' string like "([425.0, 893.0], [458.0, 941.0])" '''
start, end = re.findall('\[.*?\]', string)
start = list(map(float, start[1:-1].split(",")))
end = list(map(float, end[1:-1].split(",")))
return VectorRange(start, end)
def unionize_vectorrange_sequence(vectorranges, **kwargs):
vectorranges = copy.deepcopy(vectorranges)
for dim in range(vectorranges[0].numdims):
sortedvectorranges = sorted(vectorranges, key=lambda x:x[dim])
vectorranges = sorted(list(set(sortedvectorranges)))
return vectorranges
def create_oracle_decomp_fn(gold_dict):
def oracle_decomp(minilabels, item_id, dist_fn=None, plot_fn=None, **kwargs):
if len(minilabels) < 2:
return [np.array([0])]
gold_labels = gold_dict.get(item_id)
# lat_obj_minilabels = {}
# for minilabel in minilabels:
# lat_obj = np.argmin([dist_fn(gold, minilabel) for gold in gold_labels])
# lat_obj_minilabels.setdefault(lat_obj, []).append(minilabel)
minilabel_clusters = [np.argmin([dist_fn([gold], [minilabel]) for gold in gold_labels]) for minilabel in minilabels]
clusterdict = defaultdict(list)
for i, cluster in enumerate(minilabel_clusters):
clusterdict[cluster].append(i)
result = [np.array(indices) for indices in clusterdict.values()]
return result
return oracle_decomp
def cluster_decomp(minilabels, dist_fn=None, n_clusters=None, plot_fn=None, **kwargs):
'''
Return a list of length #clusters, where each element is a ndarray holding indices
of minilabels corresponding to that cluster
'''
if len(minilabels) < 2:
return [np.array([0])]
# return minilabels if dist_fn is None else [np.array([0])]
n_clusters = min(n_clusters, len(minilabels)) if n_clusters is not None else None
if dist_fn is None:
centroids = [vr.centroid() for vr in minilabels]
dists = euclidean_distances(centroids)
mean_dist = np.std(dists)
clustering = AgglomerativeClustering(n_clusters=n_clusters,
distance_threshold=mean_dist if n_clusters is None else None)
clustering.fit(centroids)
else:
dists = np.array([[dist_fn([a], [b]) for a in minilabels] for b in minilabels])
mean_dist = np.mean(np.median(dists, axis=0))
clustering = AgglomerativeClustering(n_clusters=n_clusters,
distance_threshold=mean_dist if n_clusters is None else None,
affinity="precomputed",
linkage="average") # single
clustering.fit(dists)
minilabel_clusters = clustering.labels_
clusterdict = defaultdict(list)
for i, cluster in enumerate(minilabel_clusters):
clusterdict[cluster].append(i)
result = [np.array(indices) for indices in clusterdict.values()]
if plot_fn is not None:
colors = ["r", "b", "g", "y", "m", "c", "k"]
for i, vr in enumerate(minilabels):
plot_fn(vr, color=colors[minilabel_clusters[i] % len(colors)], alpha=0.5, text=minilabel_clusters[i])
plt.gca().invert_yaxis()
plt.show()
return result
def fragment_by_overlaps(experiment, use_oracle=False, decomp_fn=unionize_vectorrange_sequence, dist_fn=None):
return _fragment_by_overlaps(experiment.annodf,
experiment.uid_colname,
experiment.item_colname,
experiment.label_colname,
decomp_fn,
dist_fn,
experiment.golddict if use_oracle else None)
def _fragment_by_overlaps(annodf, uid_colname, item_colname, label_colname, decomp_fn, dist_fn=None, oracle_golddict=None):
resultdfs = []
for item_id in annodf[item_colname].unique():
idf = annodf[annodf[item_colname] == item_id]
vectorranges = [vr for annotation in idf[label_colname].values for vr in annotation]
if oracle_golddict is not None:
or_dist_fn = dist_center
use_strings = False
if hasattr(vectorranges[0], "string"):
or_dist_fn = lambda x,y: strdistance(x.string, y.string)
use_strings = True
regions = []
try:
gold_vrs = [vr for vr in oracle_golddict.get(item_id)]
orbuckets = {}
for vr in vectorranges:
vr_orbucket = np.argmin([or_dist_fn(gold_vr, vr) for gold_vr in gold_vrs])
orbuckets.setdefault(vr_orbucket, []).append(vr)
for orbucket in orbuckets.values():
if use_strings:
setstring = merge_strset([vr.string for vr in orbucket])
regions.append(TaggedString(setstring))
else:
minstart = np.min([vr.start_vector for vr in orbucket], axis=0)
maxend = np.max([vr.end_vector for vr in orbucket], axis=0)
regions.append(VectorRange(minstart, maxend))
except Exception as e:
print(e)
pass
else:
regions = decomp_fn(vectorranges, dist_fn=dist_fn)
origItemID = []
newItemID = []
newItemVR = []
uid = []
label = []
gold = []
for region in regions:
for i, row in idf.iterrows():
origItemID.append(item_id)
newItemID.append(F"{item_id}-{region}")
newItemVR.append(region)
uid.append(row[uid_colname])
label.append([vr for vr in row[label_colname] if region.intersects(vr)])
gold.append(None)
resultdfs.append(pd.DataFrame({"origItemID":origItemID, "newItemID":newItemID, "newItemVR":newItemVR, uid_colname:uid, label_colname:label, "gold":gold}))
return pd.concat(resultdfs)
def decomposition(experiment, decomp_fn, plot_fn=None):
resultdfs = []
annodf = experiment.annodf
uid_colname = experiment.uid_colname
item_colname = experiment.item_colname
label_colname = experiment.label_colname
dist_fn = experiment.distance_fn
resultdfs = []
for item_id in annodf[item_colname].unique():
idf = annodf[annodf[item_colname] == item_id]
uids = []
labels = []
num_latent = []
for _, row in idf[[uid_colname, label_colname]].iterrows():
uid = row[uid_colname]
uid_labels = row[label_colname]
num_latent.append(len(uid_labels))
for label in uid_labels:
uids.append(uid)
labels.append(label)
est_num_latent = np.max(num_latent)
# est_num_latent = int(np.ceil(np.max(num_latent) + 2 * np.std(num_latent)))
# print(item_id, ">>>", len(experiment.golddict.get(item_id)), est_num_latent)
# est_num_latent = int(np.ceil(np.median(sorted(num_latent)[1:-1])) if len(num_latent) > 2 else np.max(num_latent))
origItemID = []
newItemID = []
newItemVR = []
region_label_indices_list = decomp_fn(labels, dist_fn=dist_fn, n_clusters=est_num_latent, plot_fn=plot_fn, item_id=item_id)
uid_set = set(uids)
for region_i, region_label_indices in enumerate(region_label_indices_list):
region_uids = list(np.array(uids)[region_label_indices])
region_labels = [[l] for l in np.array(labels)[region_label_indices]]
remaining_uids = list(uid_set - set(region_uids))
remaining_labels = [[]] * len(remaining_uids)
region_uids += remaining_uids
region_labels += remaining_labels
dfdict = {
uid_colname: region_uids,
label_colname: region_labels,
}
df = pd.DataFrame(dfdict)
df = df.groupby(uid_colname).agg(flatten).reset_index()
df["newItemID"] = F"{item_id}-{region_i}"
df["origItemID"] = item_id
resultdfs.append(df)
return pd.concat(resultdfs)
def dist_center(vr1, vr2):
vr1c = (np.array(vr1.start_vector) + | np.array(vr1.end_vector) | numpy.array |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 3 15:55:04 2019
@author: bruce
"""
import pandas as pd
import numpy as np
from scipy import fftpack
from scipy import signal
import matplotlib.pyplot as plt
import os
# set saving path
path_result_freq = "/home/bruce/Dropbox/Project/5.Result/5.Result_Nov/2.freq_domain/"
def correlation_matrix(corr_mx, cm_title):
fig = plt.figure()
ax1 = fig.add_subplot(111)
#cmap = cm.get_cmap('jet', 30)
cax = ax1.matshow(corr_mx, cmap='gray')
#cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
fig.colorbar(cax)
ax1.grid(False)
plt.title(cm_title)
#plt.title('cross correlation of test and retest')
ylabels = ['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14',
'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels = ['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14',
'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
# Add colorbar, make sure to specify tick locations to match desired ticklabels
#fig.colorbar(cax, ticks=[.75,.8,.85,.90,.95,1])
plt.show()
def correlation_matrix_01(corr_mx, cm_title):
# find the maximum in each row
# input corr_mx is a dataframe
# need to convert it into a array first
# otherwise it is not working
temp = np.asarray(corr_mx)
output = (temp == temp.max(axis=1)[:,None]) # along rows
fig = plt.figure()
ax1 = fig.add_subplot(111)
# cmap = cm.get_cmap('jet', 30)
cax = ax1.matshow(output, cmap='binary')
# cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
# fig.colorbar(cax)
ax1.grid(False)
plt.title(cm_title)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14',
'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14',
'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
plt.show()
def correlation_matrix_min_01_comb(corr_mx1 ,corr_mx2, cm_title1, cm_title2):
# find the minimum in each row
# input corr_mx is a dataframe
# need to convert it into a array first
# otherwise it is not working
temp = np.asarray(corr_mx1)
output1 = (temp == temp.min(axis=1)[:,None]) # along rows
temp = np.asarray(corr_mx2)
output2 = (temp == temp.min(axis=1)[:,None]) # along rows
fig, (ax1, ax2) = plt.subplots(1, 2)
# figure 1
im1 = ax1.matshow(output1, cmap='binary')
#fig.colorbar(im1, ax1)
ax1.grid(False)
ax1.set_title(cm_title1)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14',
'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14',
'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
# figure 2
im2 = ax2.matshow(output2, cmap='binary')
#fig.colorbar(im2, ax2)
ax2.grid(False)
ax2.set_title(cm_title2)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14',
'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14',
'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax2.set_xticks(np.arange(len(xlabels)))
ax2.set_yticks(np.arange(len(ylabels)))
ax2.set_xticklabels(xlabels,fontsize=6)
ax2.set_yticklabels(ylabels,fontsize=6)
plt.show()
def correlation_matrix_tt_01(corr_mx, cm_title):
# find the maximum in each row
# input corr_mx is a dataframe
# need to convert it into a array first
# otherwise it is not working
temp = np.asarray(corr_mx)
output = (temp == temp.max(axis=1)[:,None]) # along rows
fig = plt.figure()
ax1 = fig.add_subplot(111)
#cmap = cm.get_cmap('jet', 30)
cax = ax1.matshow(output, cmap='gray')
#cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
fig.colorbar(cax)
ax1.grid(False)
plt.title(cm_title)
ylabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14',
'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
xlabels=['T1','T2','T3','T4','T6','T7','T8','T9', 'T11', 'T12', 'T13', 'T14',
'T15', 'T16', 'T17', 'T18', 'T19', 'T20', 'T21', 'T22', 'T23', 'T25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels, fontsize=6)
ax1.set_yticklabels(ylabels, fontsize=6)
plt.show()
def correlation_matrix_rr_01(corr_mx, cm_title):
# find the maximum in each row
# input corr_mx is a dataframe
# need to convert it into a array first
#otherwise it is not working
temp = np.asarray(corr_mx)
output = (temp == temp.max(axis=1)[:,None]) # along rows
fig = plt.figure()
ax1 = fig.add_subplot(111)
# cmap = cm.get_cmap('jet', 30)
cax = ax1.matshow(output, cmap='gray')
# cax = ax1.imshow(df.corr(), interpolation="nearest", cmap=cmap)
fig.colorbar(cax)
ax1.grid(False)
plt.title(cm_title)
ylabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14',
'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
xlabels=['R1','R2','R3','R4','R6','R7','R8','R9', 'R11', 'R12', 'R13', 'R14',
'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R25']
ax1.set_xticks(np.arange(len(xlabels)))
ax1.set_yticks(np.arange(len(ylabels)))
ax1.set_xticklabels(xlabels,fontsize=6)
ax1.set_yticklabels(ylabels,fontsize=6)
plt.show()
# eg: plot_mag_db(df_as_85_vsc, 1, "Subject")
def fig_mag_db(signal_in, subject_number = 'subject_number', title = 'title', filename = 'filename'):
plt.figure()
plt.subplot(2,1,1)
plt.plot(signal_in.iloc[2*(subject_number-1), :48030], '-')
plt.plot(signal_in.iloc[2*(subject_number-1)+1, :48030], '-')
plt.ylabel('magnitude')
plt.legend(('Retest', 'Test'), loc='upper right')
plt.title(title)
# plt.subplot(2,1,2)
# plt.plot(signal_in.iloc[2*(subject_number-1), :48030].apply(f_dB), '-')
# plt.plot(signal_in.iloc[2*(subject_number-1)+1, :48030].apply(f_dB), '-')
# plt.xlabel('Frequency(Hz)')
# plt.ylabel('dB')
# plt.xlim(0,10000)
# plt.legend(('Retest', 'Test'), loc='lower right')
plt.show()
plt.savefig(filename)
# plot time domain signal in one figure
def fig_time_in_1(signal_in, title = 'title'):
plt.figure()
sub_title = ['1', '2', '3', '4', '6', '7', '8', '9', '11', '12',\
'13', '14', '15', '16', '17', '18', '19', '20', '21',\
'22', '23', '25']
for i in range(22):
plt.subplot(11,2,i+1)
x_label = np.arange(0, 100, 0.09765625)
plt.plot(x_label, signal_in.iloc[2*i, :1024], '-')
plt.plot(x_label, signal_in.iloc[2*i+1, :1024], '-')
plt.ylabel(sub_title[i])
plt.legend(('Retest', 'Test'), loc='upper right', fontsize='xx-small')
if i < 20:
plt.xticks([])
else:
plt.xlabel('Time (ms)')
plt.suptitle(title) # add a centered title to the figure
plt.show()
# plot frequency domain signal in one figure
def fig_mag_in_1(signal_in, title = 'title'):
plt.figure()
sub_title = ['1', '2', '3', '4', '6', '7', '8', '9', '11', '12',\
'13', '14', '15', '16', '17', '18', '19', '20', '21',\
'22', '23', '25']
for i in range(22):
plt.subplot(11,2,i+1)
x_label = np.arange(0, 4803, 0.1)
plt.plot(x_label, signal_in.iloc[2*i, :48030], '-')
plt.plot(x_label, signal_in.iloc[2*i+1, :48030], '-')
plt.ylabel(sub_title[i])
plt.xlim(0,1300)
plt.legend(('Retest', 'Test'), loc='upper right', fontsize='xx-small')
if i < 20:
plt.xticks([])
else:
plt.xlabel('Frequency(Hz)')
plt.suptitle(title) # add a centered title to the figure
plt.show()
def fig_test_in_1(signal_in_1, signal_in_2, title = 'title', path = 'path', filename = 'filename'):
plt.figure()
sub_title = ['1', '2', '3', '4', '6', '7', '8', '9', '11', '12',\
'13', '14', '15', '16', '17', '18', '19', '20', '21',\
'22', '23', '25']
for i in range(22):
plt.subplot(11,2,i+1)
x_label = np.arange(0, 4803, 0.1)
plt.plot(x_label, signal_in_1.iloc[2*i, :48030], '-')
plt.plot(x_label, signal_in_2.iloc[2*i, :48030], '-')
plt.ylabel(sub_title[i])
plt.xlim(0,1000)
plt.legend(('no window', 'window'), loc='upper right', fontsize='xx-small')
plt.suptitle(title) # add a centered title to the figure
plt.show()
plt.savefig(os.path.join(path, filename), dpi=300)
def fig_retest_in_1(signal_in_1, signal_in_2, title = 'title', path = 'path', filename = 'filename'):
plt.figure()
sub_title = ['1', '2', '3', '4', '6', '7', '8', '9', '11', '12',\
'13', '14', '15', '16', '17', '18', '19', '20', '21',\
'22', '23', '25']
for i in range(22):
plt.subplot(11,2,i+1)
x_label = np.arange(0, 4803, 0.1)
plt.plot(x_label, signal_in_1.iloc[2*i+1, :48030], '-')
plt.plot(x_label, signal_in_2.iloc[2*i+1, :48030], '-')
plt.ylabel(sub_title[i])
plt.xlim(0,1000)
plt.legend(('no window', 'window'), loc='upper right', fontsize='xx-small')
plt.suptitle(title) # add a centered title to the figure
plt.show()
plt.savefig(os.path.join(path, filename), dpi=300)
def distance_mx(sig_in):
# freq_range -> from 0 to ???
freq_range = 13000
matrix_temp = np.zeros((22, 22))
matrix_temp_square = np.zeros((22, 22))
for i in range(22):
for j in range(22):
temp = np.asarray(sig_in.iloc[2*i, 0:freq_range] - sig_in.iloc[2*j+1, 0:freq_range])
temp_sum = 0
temp_square_sum = 0
for k in range(freq_range):
#test_t3 = (abs(temp_series[k]))**2
#print(test_t3)
temp_sum = temp_sum + abs(temp[k])
temp_square_sum = temp_square_sum + (abs(temp[k]))**2
matrix_temp[i][j] = temp_sum
matrix_temp_square[i][j] = temp_square_sum
output_1 = pd.DataFrame(matrix_temp)
output_2 = pd.DataFrame(matrix_temp_square)
# output 1 is similar with euclidian diatance eg. x1+jy1 -> sqrt(x1**2 + y1**2)
# output 1 is square result eg. x1+jy1 -> x1**2 + y1**2
return output_1, output_2
def complex_coherence_mx(input_signal):
# compute the magnitude squared coherence based on signal.coherence
# then create the matrix with values
# higher value -> better coherence value
sig_in = input_signal.copy()
matrix_temp = np.zeros((22, 22))
for i in range(22):
for j in range(22):
# temp here is the
temp_sum = 0
sig_in_1 = np.array(sig_in.iloc[2*i, :])
sig_in_2 = np.array(sig_in.iloc[2*j+1, :])
# signal 9606Hz length 106.6ms window length 10ms -> nperseg=96
f, temp_Cxy = signal.coherence(sig_in_1, sig_in_2, fs=9606, nperseg=96)
# delete values lower than 0.01
for l in range(len(temp_Cxy)):
if temp_Cxy[l] < 0.1:
temp_Cxy[l] = 0
# delete finish
# test
'''
if i ==0 and j == 0:
plt.figure()
plt.semilogy(f, temp_Cxy)
plt.title("test in complex_coherence_mx")
plt.show()
'''
# test finish
for k in range(len(temp_Cxy)):
#test_t3 = (abs(temp_series[k]))**2
#print(test_t3)
temp_sum = temp_sum + abs(temp_Cxy[k])
matrix_temp[i][j] = temp_sum
output_3 = pd.DataFrame(matrix_temp)
return output_3
def fig_coherence_in_1(signal_in, threshold_Cxy = None, title = 'title', title2 = 'title2'):
# threshold_Cxy is used for setting minimum value
Cxy_sum = pd.DataFrame()
plt.figure()
sub_title = ['1', '2', '3', '4', '6', '7', '8', '9', '11', '12',\
'13', '14', '15', '16', '17', '18', '19', '20', '21',\
'22', '23', '25']
for i in range(22):
sig_in_1 = signal_in.iloc[i, :]
sig_in_2 = signal_in.iloc[i+22, :]
# signal 9606Hz length 106.6ms window length 10ms -> nperseg=96
# no zero padding
# f, temp_Cxy = signal.coherence(sig_in_1, sig_in_2, fs=9606, nperseg=128)
# with zero padding
f, temp_Cxy = signal.coherence(sig_in_1, sig_in_2, fs = 9606, nperseg=512, nfft=19210)
# print("shape of temp_Cxy is")
# print (temp_Cxy.shape)
# delete value lower than 0.05
if (threshold_Cxy != None):
for l in range(len(temp_Cxy)):
if temp_Cxy[l] < threshold_Cxy:
temp_Cxy[l] = 0
# delete finish
Cxy_sum = Cxy_sum.append(pd.DataFrame(np.reshape(temp_Cxy, (1,9606))), ignore_index=True)
plt.subplot(11,2,i+1)
plt.plot(f, temp_Cxy)
plt.ylabel(sub_title[i])
plt.xlim(0,2000)
plt.legend(('Retest', 'Test'), loc='upper right', fontsize='xx-small')
plt.suptitle(title) # add a centered title to the figure
plt.show()
# plot aveerage of 22 subjects
plt.figure()
plt.subplot(1,1,1)
Cxy_avg = Cxy_sum.mean(axis=0)
plt.plot(f, Cxy_avg)
plt.title('average of 22 subjects based on '+ title2)
plt.xlim(0,2000)
plt.show()
#################################
f_dB = lambda x : 20 * np.log10(np.abs(x))
# import the pkl file
# for linux
df_FFR=pd.read_pickle('/home/bruce/Dropbox/Project/4.Code for Linux/df_FFR.pkl')
# for mac
# df_FFR=pd.read_pickle('/Users/bruce/Dropbox/Project/4.Code for Linux/df_FFR.pkl')
# remove DC offset
df_FFR_detrend = pd.DataFrame()
for i in range(1408):
# combine next two rows later
df_FFR_detrend_data_t = pd.DataFrame(signal.detrend(df_FFR.iloc[i: i+1, 0:1024], type='constant').reshape(1,1024))
df_FFR_label_t = pd.DataFrame(df_FFR.iloc[i, 1024:1031].values.reshape(1,7))
df_FFR_detrend = df_FFR_detrend.append(pd.concat([df_FFR_detrend_data_t, df_FFR_label_t], axis=1, ignore_index=True))
# set the title of columns
df_FFR_detrend.columns = np.append(np.arange(1024), ["Subject", "Sex", "Condition", "Vowel", "Sound Level", "Num", "EFR/FFR"])
df_FFR_detrend = df_FFR_detrend.reset_index(drop=True)
df_FFR = df_FFR_detrend
# Time domain
# Define window function
win_kaiser = signal.kaiser(1024, beta=14)
win_hamming = signal.hamming(1024)
# average the df_FFR
df_FFR_avg = pd.DataFrame()
df_FFR_avg_win = pd.DataFrame()
# average test1 and test2
for i in range(704):
# combine next two rows later
df_FFR_avg_t = pd.DataFrame(df_FFR.iloc[2*i: 2*i+2, 0:1024].mean(axis=0).values.reshape(1,1024)) # average those two rows
# implement the window function
df_FFR_avg_t_win = pd.DataFrame((df_FFR_avg_t.iloc[0,:] * win_hamming).values.reshape(1,1024))
df_FFR_label = pd.DataFrame(df_FFR.iloc[2*i, 1024:1031].values.reshape(1,7))
df_FFR_avg = df_FFR_avg.append(pd.concat([df_FFR_avg_t, df_FFR_label], axis=1, ignore_index=True))
df_FFR_avg_win = df_FFR_avg_win.append(pd.concat([df_FFR_avg_t_win, df_FFR_label], axis=1, ignore_index=True))
# set the title of columns
df_FFR_avg.columns = np.append( | np.arange(1024) | numpy.arange |
# -*- coding: utf-8 -*-
from scipy.integrate import solve_ivp
import matplotlib
"""in case it's not working uncomment this: matplotlib.use('TkAgg') """
import matplotlib.pyplot as plt
import numpy as np
from numpy.linalg import inv
from matplotlib import colors as mcolors
import paras_dorsoventral as dors
import paras_rostrocaudal as ros
import testround_difftest_set as r #for sparse matrix stuff
#import testround_difftest_backup as r
import stencil_import as tubemodel
import os
import plot_saved_v
d=10.0
dx=20
dt=0.1#10
maxtime = 10000 #TIME IN SECONDS!!!
"""
CHECKLIST
maxtime
dx according to model (10 ori, 20 0.5, 40 0.25 etc)
stencil_import paths
stencils in folder
plotsaved path
Wnt0, Shh0
delta_Wnt, delta_Shh
plotting colourmax here and in plotsaved
how often save?
spheresize according to model
"""
xlen =tubemodel.xmax
ylen =tubemodel.ymax
zlen = tubemodel.zmax
print(xlen,ylen,zlen)
spheresize = r.spheresize
D_Wnt = 150.7
D_Shh = 133.4
delta_Wnt = 0.04
delta_Shh = 0.1
Wnt0 = tubemodel.Wnt0
Shh0 = tubemodel.Shh0
#import the stencils for tubemodel, WNTsecretion and SHHsecretion points
stenc = tubemodel.stenc
WNTstenc= tubemodel.Wstenc
SHHstenc= tubemodel.Sstenc
#plotting colourmax
rosmax = tubemodel.Wnt0#5#0.0
dorsmax = tubemodel.Shh0#5#0.0
unknownbase=5.0
class Grid:
def __init__(self,xdim,ydim,zdim, Name, seeds,Alpha,Baselevel):
self.grid = np.zeros((xdim,ydim,zdim))
self.totalsites = np.sum(stenc.grid)
self.name = Name
self.xlen=xdim
self.ylen=ydim
self.zlen=zdim
self.baselevel=Baselevel
self.plantrandomseed(seeds)
self.alpha=Alpha
if Name =="Wnt":
self.Amatr = A_Wnt
self.b = b_Wnt
self.delta = delta_Wnt
print("deltawnt:",self.delta)
if Name =="Shh":
self.Amatr = A_Shh
self.b = b_Shh
self.delta = delta_Shh
def show(self,ax):
plotgrid(self,ax)
def plantseed(self,coordinates):
for xyz in coordinates:
x= xyz[0]
y = xyz[1]
z=xyz[2]
self.grid[y][x][z] = self.baselevel
def artificialseed(self,coordinates,level):
for i in range(len(coordinates)):
xyz = coordinates[i]
x= xyz[0]
y = xyz[1]
z=xyz[2]
self.grid[x][y][z] = level[i]*self.baselevel
def plantrandomseed(self, seeds):
n = seeds
M = self.totalsites
coords = np.transpose(np.where(stenc.grid))
for c in coords:
randomnr = np.random.uniform()
if randomnr < n/M:
self.grid[c[0]][c[1]][c[2]] = self.baselevel#*np.random.uniform()
n-=1
M-=1
def diffusion(self,n):
for i in range(n):
deltaU,b = laplacian(self,self.Amatr,self.b)
old = self.grid
self.grid =old + dt*self.alpha*(deltaU +b)
def degradation(self,n):
for i in range(n):
old = self.grid
#print("degrmax",np.max(self.delta * self.grid *dt))
self.grid = old - self.delta * old *dt
def rostrocaudal_reaction(rate,FB,MB,HB,Wnt):
for i in range(rate):
fb= (FB.grid).copy()
mb= (MB.grid).copy()
hb= (HB.grid).copy()
gsk3= (GSK3.grid).copy() # Wnt modulates gsk3
wnt= (Wnt.grid).copy()
u = (U.grid).copy()
FB.grid = fb + dt*( ros.c1*(gsk3**ros.n1)/(1+ ros.c1*(gsk3**ros.n1)+ ros.c2*(mb**ros.n2)+ ros.c3*(hb**ros.n3)) -ros.d1*fb )
MB.grid = mb + dt*(ros.c4*(mb**ros.n4)/(1+ ros.c4*(mb**ros.n4)+ ros.c5*(fb**ros.n5)+ ros.c6*(hb**ros.n6)+ ros.c7*(gsk3**ros.n7)) -ros.d2*mb)
HB.grid = hb + dt*( ros.c8*(hb**ros.n8)/(1 + ros.c8*(hb**ros.n8) + ros.c9*(fb**ros.n9) + ros.c10*(mb**ros.n10)+ ros.c11*(gsk3**ros.n11)) -ros.d3*hb )
GSK3.grid = gsk3 + dt*(ros.c12*(gsk3**ros.n12)/(1 + ros.c12*(gsk3**ros.n12)+ ros.c13*(u**ros.n13) ) -ros.d4*gsk3 )
U.grid = u + dt*((ros.c14*(wnt**ros.n14) + ros.c15*(u**ros.n15))/( 1+ ros.c14*(wnt**ros.n14) + ros.c15*(u**ros.n15) + ros.c16*(u**ros.n16)) - ros.d5*u)
antistenc = np.ones_like(stenc.grid) - stenc.grid
for c in np.transpose(np.where(antistenc)):
FB.grid[c[0]][c[1]][c[2]] = 0
MB.grid[c[0]][c[1]][c[2]] = 0
HB.grid[c[0]][c[1]][c[2]] = 0
GSK3.grid[c[0]][c[1]][c[2]] = 0
def dorsoventral_reaction(rate,P,O,N,G,S,W):
for i in range(rate):
p= (P.grid).copy()
o= (O.grid).copy()
n= (N.grid).copy()
g= (G.grid).copy()
s= (S.grid).copy()
w= (W.grid).copy()
P.grid = p + dt*( dors.alpha / (1.0 + (n/dors.NcritP)**dors.h1 + (o/dors.OcritP)**dors.h2 ) - dors.k1*p )
O.grid = o + dt*(( (dors.beta*g) / (1.0+g) ) * ( 1.0/(1.0+(n/dors.NcritO)**dors.h3) ) - dors.k2*o)
N.grid = n + dt*( (dors.gamma*g/(1.0+g)) * (1.0/(1.0+ (o/dors.OcritN)**dors.h4 + (p/dors.PcritN)**dors.h5 )) - dors.k3*n)
G.grid = g + dt*(((dors.delta*s)/(1.0+s)) * (1.0/(1.0+ (w/dors.WcritG)**dors.h6 )) - dors.k4*g)
antistenc = np.ones_like(stenc.grid) - stenc.grid
for c in np.transpose(np.where(antistenc)):
P.grid[c[0]][c[1]][c[2]] = 0
O.grid[c[0]][c[1]][c[2]] = 0
N.grid[c[0]][c[1]][c[2]] = 0
G.grid[c[0]][c[1]][c[2]] = 0
def alldiffuse(rate,Wnt,Shh):
for i in range(rate):
Wnt.diffusion(1)
Shh.diffusion(1)
def alldegrade(rate,Wnt,Shh):
for i in range(rate):
Wnt.degradation(1)
Shh.degradation(1)
def plotgrid(grid,ax,r=0.47,g=0.0,b=1.0):
if np.all(grid.grid ==0):
return
print("minmax",np.min(grid.grid),np.max(grid.grid))
if grid.baselevel!=0:
colorgrid=np.asarray([[[matplotlib.colors.to_hex([ r, g, b,z/unknownbase], keep_alpha=True) for z in x] for x in y] for y in grid.grid])
else:
colorgrid=np.asarray([[[matplotlib.colors.to_hex([ r, g, b,z/unknownbase], keep_alpha=True) for z in x] for x in y] for y in grid.grid])
fc = (colorgrid).flatten()
gridindices = np.where(np.ones_like(grid.grid))
ax.scatter(gridindices[0],gridindices[1],gridindices[2],marker = 'o',c=fc,linewidth=0,vmin=0,vmax=grid.baselevel,depthshade=False,s=spheresize )
def plotarray(array,ax,maximum,r=0.47,g=0.0,b=1.0):
if np.all(array ==0):
return
colorgrid=np.asarray([[[matplotlib.colors.to_hex([ r, g, b,z/maximum ], keep_alpha=True) for z in x] for x in y] for y in array])
fc = (colorgrid).flatten()
gridindices = np.where(np.ones_like(array))
ax.scatter(gridindices[0],gridindices[1],gridindices[2],marker = 'o',c=fc,linewidth=0,vmin=0,vmax=maximum,depthshade=False,s=spheresize )
def plotarray_fixed_alpha(array,ax,maximum,alpha=0.3,r=0.47,g=0.0,b=1.0):
if np.all(array ==0):
return
colorgrid=np.asarray([[[matplotlib.colors.to_hex([ r, g, b,alpha ], keep_alpha=True) for z in x] for x in y] for y in array])
fc = (colorgrid).flatten()
gridindices = np.where(np.ones_like(array))
ax.scatter(gridindices[0],gridindices[1],gridindices[2],marker = 'o',c=fc,linewidth=0,vmin=0,vmax=maximum,depthshade=False,s=spheresize )
def secretion(rate,Wnt,Shh):
for i in range(rate):
Shh.artificialseed(SHHstenc.secretion_coords,SHHstenc.secretion_levels)
Wnt.artificialseed(WNTstenc.secretion_coords,WNTstenc.secretion_levels)
def run(maxt, savedirectory, save=True):
for ax in [axWnt,axShh,axRos,axDors]:
ax.clear()
axRos.set_title("Rostrocaudal network (Max)")
axDors.set_title("Dorsoventral network (Balaskas)")
axWnt.set_title("Wnt")
axShh.set_title("Shh ")
if save == True:
sd=savedirectory
wntdir = sd + '/Wnt'
shhdir = sd + '/Shh'
rostrodir = sd + '/rostro'
dorsodir = sd + '/dorso'
os.mkdir(wntdir)
os.mkdir(shhdir)
os.mkdir(rostrodir)
os.mkdir(dorsodir)
os.mkdir(wntdir + '/pictures')
os.mkdir(shhdir + '/pictures')
os.mkdir(rostrodir + '/pictures')
os.mkdir(dorsodir + '/pictures')
else:
print('NOT SAVING')
steps = int((maxt/dt +dt))
print("steps:",steps)
for step in range(steps):
if save == True:
if step in np.arange(0,3000,200) or step in np.arange(0,120000,20000) or step in np.arange(0,10000,1000): #step %1000 == 0 or step# and time % 100 == 0) or (save == True and time in np.arange(0,16,1)):
time = step*dt
save_networks(savedirectory,time,FB,MB,HB,P,O,N,G,Wnt,Shh)
print("Saved time %f"% time)
print("step",step,"/",steps)
dorsoventral_reaction(1,P,O,N,G,Shh,Wnt)
rostrocaudal_reaction(1,FB,MB,HB,Wnt)
alldiffuse(1,Wnt,Shh)
secretion(1,Wnt,Shh)
alldegrade(1,Wnt,Shh)
def sparsedot(A,v):
"""Dot product for sparse matrices"""
w=np.zeros(len(v))
for ija in A:
i=ija[0]
j=ija[1]
a=ija[2]
w[i] += v[j]*a
return w
def laplacian(gridname,Amatr,b):
v,c = r.grid_to_vector(stenc)
c1,c2,c3 = np.transpose(c)
u=(gridname.grid)[c1,c2,c3]
if len(Amatr) == len(Amatr[0]):
newu= np.dot(Amatr,u)
else:
newu= sparsedot(Amatr,u)
L = r.vector_to_grid(newu,gridname,c)
L[:,:,:] = L[:,:,:]/dx**2
b = r.vector_to_grid(b,gridname,c)
b = b*gridname.baselevel/dx**2
return L,b
def compare(matrices):
dimy = len(matrices[0])
dimx = len(matrices[0][0])
dimz = len(matrices[0][0][0])
show= np.zeros_like(matrices)
for i in range(dimy):
for j in range(dimx):
for k in range(dimz):
comparevalues =[m[i][j][k] for m in matrices]
gene = np.argmax(comparevalues)
show[gene][i][j][k] = | np.max(comparevalues) | numpy.max |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Applies necessary calibration to the cubes and corrects NACO biases
@author: lewis, iain
"""
__author__ = '<NAME>, <NAME>'
__all__ = ['raw_dataset', 'find_nearest', 'find_filtered_max']
import pdb
import numpy as np
import pyprind
import os
import random
import matplotlib as mpl
mpl.use('Agg') #show option for plot is unavailable with this option, set specifically to save plots on m3
from matplotlib import pyplot as plt
from numpy import isclose
from vip_hci.fits import open_fits, write_fits
from vip_hci.preproc import frame_crop, cube_crop_frames, frame_shift,\
cube_subtract_sky_pca, cube_correct_nan, cube_fix_badpix_isolated,cube_fix_badpix_clump,\
cube_recenter_2dfit
from vip_hci.var import frame_center, get_annulus_segments, frame_filter_lowpass,\
mask_circle, dist, fit_2dgaussian, frame_filter_highpass, get_circle, get_square
from vip_hci.metrics import detection, normalize_psf
from vip_hci.conf import time_ini, time_fin, timing
from hciplot import plot_frames
from skimage.feature import register_translation
from photutils import CircularAperture, aperture_photometry
from astropy.stats import sigma_clipped_stats
from scipy.optimize import minimize
def find_shadow_list(self, file_list, threshold = 0, verbose = True, debug = False, plot = None):
"""
In coro NACO data there is a lyot stop causing a shadow on the detector
this method will return the radius and central position of the circular shadow
"""
cube = open_fits(self.inpath + file_list[0],verbose=debug)
nz, ny, nx = cube.shape
median_frame = np.median(cube, axis = 0)
median_frame = frame_filter_lowpass(median_frame, median_size = 7, mode = 'median')
median_frame = frame_filter_lowpass(median_frame, mode = 'gauss',fwhm_size = 5)
ycom,xcom = np.unravel_index(np.argmax(median_frame), median_frame.shape) #location of AGPM
if debug:
write_fits(self.outpath + 'shadow_median_frame', median_frame,verbose=debug)
shadow = np.where(median_frame >threshold, 1, 0) #lyot shadow
#create similar shadow centred at the origin
area = sum(sum(shadow))
r = np.sqrt(area/np.pi)
tmp = np.zeros([ny,nx])
tmp = mask_circle(tmp,radius = r, fillwith = 1)
tmp = frame_shift(tmp, ycom - ny/2 ,xcom - nx/2, imlib='opencv') # no vip_fft because the image isn't square
#measure translation
shift_yx, _, _ = register_translation(tmp, shadow,
upsample_factor= 100)
#express as a coordinate
y, x = shift_yx
cy = np.round(ycom-y)
cx = np.round(xcom-x)
if debug:
pdb.set_trace()
if verbose:
print('The centre of the shadow is','cy = ',cy,'cx = ',cx)
if plot == 'show':
plot_frames((median_frame, shadow, tmp),vmax=(np.percentile(median_frame,99.9),1,1),
vmin=(np.percentile(median_frame,0.1),0,0),label=('Median frame','Shadow',''),title='Shadow')
if plot == 'save':
plot_frames((median_frame, shadow, tmp), vmax=(np.percentile(median_frame,99.9),1,1),
vmin=(np.percentile(median_frame,0.1),0,0),label=('Median frame','Shadow',''),title='Shadow',
dpi=300, save = self.outpath + 'shadow_fit.pdf')
return cy, cx, r
def find_filtered_max(path, verbose = True, debug = False):
"""
This method will find the location of the max after low pass filtering.
It gives a rough approximation of the stars location, reliable in unsaturated frames where the star dominates.
Need to supply the path to the cube.
"""
cube = open_fits(path, verbose = debug)
#nz, ny, nx = cube.shape
#cy,cx = frame_center(cube, verbose = verbose) #find central pixel coordinates
# then the position will be that plus the relative shift in y and x
#rel_shift_x = rel_AGPM_pos_xy[0] # 6.5 is pixels from frame center to AGPM in y in an example data set, thus providing the relative shift
#rel_shift_y = rel_AGPM_pos_xy[1] # 50.5 is pixels from frame center to AGPM in x in an example data set, thus providing the relative shift
#y_tmp = cy + rel_shift_y
#x_tmp = cx + rel_shift_x
median_frame = np.median(cube, axis = 0)
# define a square of 100 x 100 with the center being the approximate AGPM/star position
#median_frame,cornery,cornerx = get_square(median_frame, size = size, y = y_tmp, x = x_tmp, position = True, verbose = True)
# apply low pass filter
#filter for the brightest source
median_frame = frame_filter_lowpass(median_frame, median_size = 7, mode = 'median')
median_frame = frame_filter_lowpass(median_frame, mode = 'gauss',fwhm_size = 5)
#obtain location of the bright source
ycom,xcom = np.unravel_index(np.argmax(median_frame), median_frame.shape)
if verbose:
print('The location of the star is','ycom =',ycom,'xcom =', xcom)
if debug:
pdb.set_trace
return [ycom, xcom]
def find_AGPM(path, rel_AGPM_pos_xy = (50.5, 6.5), size = 101, verbose = True, debug = False):
"""
added by Iain to prevent dust grains being picked up as the AGPM
This method will find the location of the AGPM or star (even when sky frames are mixed with science frames), by
using the known relative distance of the AGPM from the frame center in all VLT/NaCO datasets. It then creates a
subset square image around the expected location and applies a low pass filter + max search method and returns
the (y,x) location of the AGPM/star
Parameters
----------
path : str
Path to cube
rel_AGPM_pos_xy : tuple, float
relative location of the AGPM from the frame center in pixels, should be left unchanged. This is used to
calculate how many pixels in x and y the AGPM is from the center and can be applied to almost all datasets
with VLT/NaCO as the AGPM is always in the same approximate position
size : int
pixel dimensions of the square to sample for the AGPM/star (ie size = 100 is 100 x 100 pixels)
verbose : bool
If True extra messages are shown.
debug : bool, False by default
Enters pdb once the location has been found
Returns
----------
[ycom, xcom] : location of AGPM or star
"""
cube = open_fits(path,verbose = debug) # opens first sci/sky cube
cy,cx = frame_center(cube, verbose = verbose) #find central pixel coordinates
# then the position will be that plus the relative shift in y and x
rel_shift_x = rel_AGPM_pos_xy[0] # 6.5 is pixels from frame center to AGPM in y in an example data set, thus providing the relative shift
rel_shift_y = rel_AGPM_pos_xy[1] # 50.5 is pixels from frame center to AGPM in x in an example data set, thus providing the relative shift
#the center of the square to apply the low pass filter to - is the approximate position of the AGPM/star based on previous observations
y_tmp = cy + rel_shift_y
x_tmp = cx + rel_shift_x
median_frame = cube[-1]
# define a square of 100 x 100 with the center being the approximate AGPM/star position
median_frame,cornery,cornerx = get_square(median_frame, size = size, y = y_tmp, x = x_tmp, position = True, verbose = True)
# apply low pass filter
median_frame = frame_filter_lowpass(median_frame, median_size = 7, mode = 'median')
median_frame = frame_filter_lowpass(median_frame, mode = 'gauss',fwhm_size = 5)
# find coordinates of max flux in the square
ycom_tmp, xcom_tmp = np.unravel_index(np.argmax(median_frame), median_frame.shape)
# AGPM/star is the bottom-left corner coordinates plus the location of the max in the square
ycom = cornery+ycom_tmp
xcom = cornerx+xcom_tmp
if verbose:
print('The location of the AGPM/star is','ycom =',ycom,'xcom =', xcom)
if debug:
pdb.set_trace()
return [ycom, xcom]
def find_nearest(array, value, output='index', constraint=None):
"""
Function to find the index, and optionally the value, of an array's closest element to a certain value.
Possible outputs: 'index','value','both'
Possible constraints: 'ceil', 'floor', None ("ceil" will return the closest element with a value greater than 'value', "floor" the opposite)
"""
if type(array) is np.ndarray:
pass
elif type(array) is list:
array = np.array(array)
else:
raise ValueError("Input type for array should be np.ndarray or list.")
idx = (np.abs(array-value)).argmin()
if type == 'ceil' and array[idx]-value < 0:
idx+=1
elif type == 'floor' and value-array[idx] < 0:
idx-=1
if output=='index': return idx
elif output=='value': return array[idx]
else: return array[idx], idx
class raw_dataset:
"""
In order to successfully run the pipeline you must run the methods in following order:
1. dark_subtraction()
2. flat_field_correction()
3. correct_nan()
4. correct_bad_pixels()
5. first_frames_removal()
6. get_stellar_psf()
7. subtract_sky()
This will prevent any undefined variables.
"""
def __init__(self, inpath, outpath, dataset_dict,final_sz = None, coro = True):
self.inpath = inpath
self.outpath = outpath
self.final_sz = final_sz
self.coro = coro
sci_list = []
# get the common size (crop size)
with open(self.inpath+"sci_list.txt", "r") as f:
tmp = f.readlines()
for line in tmp:
sci_list.append(line.split('\n')[0])
nx = open_fits(self.inpath + sci_list[0],verbose = False).shape[2]
self.com_sz = np.array([int(nx - 1)])
write_fits(self.outpath + 'common_sz', self.com_sz, verbose = False)
#the size of the shadow in NACO data should be constant.
#will differ for NACO data where the coronagraph has been adjusted
self.shadow_r = 280 # shouldnt change for NaCO data
sci_list_mjd = [] # observation time of each sci cube
sky_list_mjd = [] # observation time of each sky cube
with open(self.inpath+"sci_list_mjd.txt", "r") as f:
tmp = f.readlines()
for line in tmp:
sci_list_mjd.append(float(line.split('\n')[0]))
with open(self.inpath+"sky_list_mjd.txt", "r") as f:
tmp = f.readlines()
for line in tmp:
sky_list_mjd.append(float(line.split('\n')[0]))
self.sci_list_mjd = sci_list_mjd
self.sky_list_mjd = sky_list_mjd
self.dataset_dict = dataset_dict
self.fast_reduction = dataset_dict['fast_reduction']
def get_final_sz(self, final_sz = None, verbose = True, debug = False):
"""
Update the cropping size as you wish
debug: enters Python debugger after finding the size
"""
if final_sz is None:
final_sz_ori = min(2*self.agpm_pos[0]-1,2*self.agpm_pos[1]-1,2*\
(self.com_sz-self.agpm_pos[0])-1,2*\
(self.com_sz-self.agpm_pos[1])-1, int(2*self.shadow_r))
else:
final_sz_ori = min(2*self.agpm_pos[0]-1,2*self.agpm_pos[1]-1,\
2*(self.com_sz-self.agpm_pos[0])-1,\
2*(self.com_sz-self.agpm_pos[1])-1,\
int(2*self.shadow_r), final_sz)
if final_sz_ori%2 == 0:
final_sz_ori -= 1
final_sz = int(final_sz_ori) # iain: added int() around final_sz_ori as cropping requires an integer
if verbose:
print('the final crop size is ', final_sz)
if debug:
pdb.set_trace()
return final_sz
def dark_subtract(self, bad_quadrant = [3], method = 'pca', npc_dark = 1, verbose = True, debug = False, plot = None, NACO = True):
"""
Dark subtraction of science, sky and flats using principal component analysis or median subtraction.
Unsaturated frames are always median dark subtracted.
All frames are also cropped to a common size.
Parameters:
***********
bad_quadrant : list, optional
list of bad quadrants to ignore. quadrants are in format 2 | 1 Default = 3 (inherently bad NaCO quadrant)
3 | 4
method : str, default = 'pca'
'pca' for dark subtraction via principal component analysis
'median' for median subtraction of dark
npc_dark : int, optional
number of principal components subtracted during dark subtraction. Default = 1 (most variance in the PCA library)
plot options : 'save' 'show' or None
Whether to show plot or save it, or do nothing
"""
self.com_sz = int(open_fits(self.outpath + 'common_sz',verbose=debug)[0])
crop = 0
if NACO:
mask_std = np.zeros([self.com_sz,self.com_sz])
cy,cx = frame_center(mask_std)
# exclude the negative dot if the frame includes it
if self.com_sz <=733:
mask_std[int(cy)-23:int(cy)+23,:] = 1
else:
crop = int((self.com_sz-733)/2)
mask_std[int(cy) - 23:int(cy) + 23, :-crop] = 1
write_fits(self.outpath + 'mask_std.fits',mask_std,verbose=debug)
sci_list = []
with open(self.inpath +"sci_list.txt", "r") as f:
tmp = f.readlines()
for line in tmp:
sci_list.append(line.split('\n')[0])
sky_list = []
with open(self.inpath +"sky_list.txt", "r") as f:
tmp = f.readlines()
for line in tmp:
sky_list.append(line.split('\n')[0])
unsat_list = []
with open(self.inpath +"unsat_list.txt", "r") as f:
tmp = f.readlines()
for line in tmp:
unsat_list.append(line.split('\n')[0])
unsat_dark_list = []
with open(self.inpath +"unsat_dark_list.txt", "r") as f:
tmp = f.readlines()
for line in tmp:
unsat_dark_list.append(line.split('\n')[0])
flat_list = []
with open(self.inpath +"flat_list.txt", "r") as f:
tmp = f.readlines()
for line in tmp:
flat_list.append(line.split('\n')[0])
flat_dark_list = []
with open(self.inpath +"flat_dark_list.txt", "r") as f:
tmp = f.readlines()
for line in tmp:
flat_dark_list.append(line.split('\n')[0])
sci_dark_list = []
with open(self.inpath +"sci_dark_list.txt", "r") as f:
tmp = f.readlines()
for line in tmp:
sci_dark_list.append(line.split('\n')[0])
if not os.path.isfile(self.inpath + sci_list[-1]):
raise NameError('Missing .fits. Double check the contents of the input path')
self.com_sz = int(open_fits(self.outpath + 'common_sz',verbose=debug)[0])
pixel_scale = self.dataset_dict['pixel_scale']
tmp = np.zeros([len(flat_dark_list), self.com_sz, self.com_sz])
master_all_darks = []
#cropping the flat dark cubes to com_sz
for fd, fd_name in enumerate(flat_dark_list):
tmp_tmp = open_fits(self.inpath+fd_name, header=False, verbose=debug)
tmp[fd] = frame_crop(tmp_tmp, self.com_sz, force = True , verbose= debug)
print(tmp[fd].shape)
master_all_darks.append(tmp[fd])
write_fits(self.outpath+'flat_dark_cube.fits', tmp, verbose=debug)
if verbose:
print('Flat dark cubes have been cropped and saved')
tmp = np.zeros([len(sci_dark_list), self.com_sz, self.com_sz])
#cropping the SCI dark cubes to com_sz
for sd, sd_name in enumerate(sci_dark_list):
tmp_tmp = open_fits(self.inpath+sd_name, header=False, verbose=debug)
n_dim = tmp_tmp.ndim
if sd == 0:
if n_dim == 2:
tmp = np.array([frame_crop(tmp_tmp, self.com_sz,
force = True, verbose=debug)])
master_all_darks.append(tmp)
print(tmp.shape)
else:
tmp = cube_crop_frames(tmp_tmp, self.com_sz, force = True, verbose=debug)
master_all_darks.append(tmp[-1])
print(tmp[-1].shape)
else:
if n_dim == 2:
tmp = np.append(tmp,[frame_crop(tmp_tmp, self.com_sz, force = True, verbose=debug)],axis=0)
master_all_darks.append(tmp)
print(tmp.shape)
else:
tmp = np.append(tmp,cube_crop_frames(tmp_tmp, self.com_sz, force = True, verbose=debug),axis=0)
master_all_darks.append(tmp[-1])
print(tmp[-1].shape)
write_fits(self.outpath + 'sci_dark_cube.fits', tmp, verbose=debug)
if verbose:
print('Sci dark cubes have been cropped and saved')
tmp = np.zeros([len(unsat_dark_list), self.com_sz, self.com_sz])
#cropping of UNSAT dark frames to the common size or less
#will only add to the master dark cube if it is the same size as the SKY and SCI darks
for sd, sd_name in enumerate(unsat_dark_list):
tmp_tmp = open_fits(self.inpath+sd_name, header=False, verbose=debug)
n_dim = tmp_tmp.ndim
if sd == 0:
if n_dim ==2:
ny, nx = tmp_tmp.shape
if nx < self.com_sz:
tmp = np.array([frame_crop(tmp_tmp, nx - 1, force = True, verbose = debug)])
print(tmp.shape)
else:
if nx>self.com_sz:
tmp = np.array([frame_crop(tmp_tmp, self.com_sz, force = True, verbose = debug)])
else:
tmp = np.array([tmp_tmp])
master_all_darks.append(tmp)
print(tmp.shape)
else:
nz, ny, nx = tmp_tmp.shape
if nx < self.com_sz:
tmp = cube_crop_frames(tmp_tmp, nx-1, force = True, verbose=debug)
print(tmp[-1].shape)
else:
if nx > self.com_sz:
tmp = cube_crop_frames(tmp_tmp, self.com_sz, force = True, verbose=debug)
else:
tmp = tmp_tmp
master_all_darks.append(np.median(tmp[-nz:],axis=0))
print(tmp[-1].shape)
else:
if n_dim == 2:
ny, nx = tmp_tmp.shape
if nx < self.com_sz:
tmp = np.append(tmp,[frame_crop(tmp_tmp, nx-1, force = True, verbose=debug)],axis=0)
print(tmp[-1].shape)
else:
if nx > self.com_sz:
tmp = np.append(tmp,[frame_crop(tmp_tmp, self.com_sz, force = True, verbose=debug)],axis=0)
else:
tmp = np.append(tmp,[tmp_tmp])
master_all_darks.append(tmp[-1])
print(tmp[-1].shape)
else:
nz, ny, nx = tmp_tmp.shape
if nx < self.com_sz:
tmp = np.append(tmp,cube_crop_frames(tmp_tmp, nx - 1, force = True, verbose=debug),axis=0)
print(tmp[-1].shape)
else:
if nx > self.com_sz:
tmp = np.append(tmp,cube_crop_frames(tmp_tmp, self.com_sz, force = True, verbose=debug),axis=0)
else:
tmp = np.append(tmp,tmp_tmp)
master_all_darks.append(np.median(tmp[-nz:],axis=0))
print(tmp[-1].shape)
write_fits(self.outpath+'unsat_dark_cube.fits', tmp, verbose=debug)
if verbose:
print('Unsat dark cubes have been cropped and saved')
if verbose:
print('Total of {} median dark frames. Saving dark cube to fits file...'.format(len(master_all_darks)))
#convert master all darks to numpy array here
master_all_darks = np.array(master_all_darks)
write_fits(self.outpath + "master_all_darks.fits", master_all_darks,verbose=debug)
#defining the mask for the sky/sci pca dark subtraction
_, _, self.shadow_r = find_shadow_list(self, sci_list,verbose=verbose, debug=debug,plot=plot)
if self.coro:
self.agpm_pos = find_AGPM(self.inpath + sci_list[0],verbose=verbose,debug=debug)
else:
raise ValueError('Pipeline does not handle non-coronagraphic data here yet')
mask_AGPM_com = np.ones([self.com_sz,self.com_sz])
cy,cx = frame_center(mask_AGPM_com)
inner_rad = 3/pixel_scale
outer_rad = self.shadow_r*0.8
if NACO:
mask_sci = np.zeros([self.com_sz,self.com_sz])
mask_sci[int(cy)-23:int(cy)+23,int(cx-outer_rad):int(cx+outer_rad)] = 1
write_fits(self.outpath + 'mask_sci.fits', mask_sci, verbose=debug)
# create mask for sci and sky
mask_AGPM_com = get_annulus_segments(mask_AGPM_com, inner_rad, outer_rad - inner_rad, mode='mask')[0]
mask_AGPM_com = frame_shift(mask_AGPM_com, self.agpm_pos[0]-cy, self.agpm_pos[1]-cx, border_mode='constant',
imlib='opencv')
#create mask for flats
mask_AGPM_flat = np.ones([self.com_sz,self.com_sz])
if verbose:
print('The masks for SCI, SKY and FLAT have been defined')
# will exclude a quadrant if specified by looping over the list of bad quadrants and filling the mask with zeros
if len(bad_quadrant) > 0 :
for quadrant in bad_quadrant:
if quadrant == 1:
mask_AGPM_com[int(cy)+1:,int(cx)+1:] = 0
mask_AGPM_flat[int(cy)+1:,int(cx)+1:] = 0
#mask_std[int(cy)+1:,int(cx)+1:] = 0
#mask_sci[int(cy)+1:,int(cx)+1:] = 0
if quadrant == 2:
mask_AGPM_com[int(cy)+1:,:int(cx)+1] = 0
mask_AGPM_flat[int(cy)+1:,:int(cx)+1] = 0
#mask_std[int(cy)+1:,:int(cx)+1] = 0
#mask_sci[int(cy)+1:,:int(cx)+1] = 0
if quadrant == 3:
mask_AGPM_com[:int(cy)+1,:int(cx)+1] = 0
mask_AGPM_flat[:int(cy)+1,:int(cx)+1] = 0
#mask_std[:int(cy)+1,:int(cx)+1] = 0
#mask_sci[:int(cy)+1,:int(cx)+1] = 0
if quadrant == 4:
mask_AGPM_com[:int(cy)+1,int(cx)+1:] = 0
mask_AGPM_flat[:int(cy)+1,int(cx)+1:] = 0
#mask_std[:int(cy)+1,int(cx)+1:] = 0
#mask_sci[:int(cy)+1,:int(cx)+1] = 0
# save the mask for checking/testing
write_fits(self.outpath + 'mask_AGPM_com.fits',mask_AGPM_com, verbose = debug)
write_fits(self.outpath + 'mask_AGPM_flat.fits',mask_AGPM_flat, verbose = debug)
write_fits(self.outpath + 'mask_std.fits', mask_std, verbose=debug)
write_fits(self.outpath + 'mask_sci.fits', mask_sci, verbose=debug)
if verbose:
print('Masks have been saved as fits file')
if method == 'median':
# median dark subtraction of SCI cubes
tmp_tmp_tmp = open_fits(self.outpath + 'sci_dark_cube.fits',verbose=debug)
tmp_tmp_tmp_median = np.median(tmp_tmp_tmp, axis=0)
tmp_tmp_tmp_median = np.median(tmp_tmp_tmp_median[np.where(mask_AGPM_com)]) # consider the median within the mask
for sc, fits_name in enumerate(sci_list):
tmp = open_fits(self.inpath + fits_name, header=False, verbose=debug)
tmp = cube_crop_frames(tmp, self.com_sz, force=True, verbose=debug)
tmp_tmp = tmp - tmp_tmp_tmp_median
write_fits(self.outpath + '1_crop_' + fits_name, tmp_tmp)
if verbose:
print('Dark has been median subtracted from SCI cubes')
if plot:
tmp_tmp_med = np.median(tmp, axis=0) # sci before subtraction
tmp_tmp_med_after = np.median(tmp_tmp, axis=0) # sci after dark subtract
if plot == 'show':
plot_frames((tmp_tmp_med, tmp_tmp_med_after, mask_AGPM_com), vmax=(np.percentile(tmp_tmp_med,99.9),
np.percentile(tmp_tmp_med_after,99.9), 1), vmin=(np.percentile(tmp_tmp_med,0.1),
np.percentile(tmp_tmp_med_after,0.1), 0), label=('Raw Sci', 'Sci Median Dark Subtracted',
'Pixel Mask'), title='Sci Median Dark Subtraction')
if plot == 'save':
plot_frames((tmp_tmp_med, tmp_tmp_med_after, mask_AGPM_com), vmax=(np.percentile(tmp_tmp_med,99.9),
np.percentile(tmp_tmp_med_after,99.9), 1), vmin=(np.percentile(tmp_tmp_med,0.1),
np.percentile(tmp_tmp_med_after,0.1), 0), label=('Raw Sci', 'Sci Median Dark Subtracted',
'Pixel Mask'), title='Sci Median Dark Subtraction',
dpi=300, save=self.outpath + 'SCI_median_dark_subtract.pdf')
# median dark subtract of sky cubes
tmp_tmp_tmp = open_fits(self.outpath + 'sci_dark_cube.fits',verbose=debug)
tmp_tmp_tmp_median = np.median(tmp_tmp_tmp, axis=0)
tmp_tmp_tmp_median = np.median(tmp_tmp_tmp_median[np.where(mask_AGPM_com)])
for sc, fits_name in enumerate(sky_list):
tmp = open_fits(self.inpath + fits_name, header=False, verbose=debug)
tmp = cube_crop_frames(tmp, self.com_sz, force=True, verbose=debug)
tmp_tmp = tmp - tmp_tmp_tmp_median
write_fits(self.outpath + '1_crop_' + fits_name, tmp_tmp)
if verbose:
print('Dark has been median subtracted from SKY cubes')
if plot:
tmp_tmp_med = np.median(tmp, axis=0) # sky before subtraction
tmp_tmp_med_after = np.median(tmp_tmp, axis=0) # sky after dark subtract
if plot == 'show':
plot_frames((tmp_tmp_med, tmp_tmp_med_after, mask_AGPM_com), vmax=(np.percentile(tmp_tmp_med,99.9),
np.percentile(tmp_tmp_med_after,99.9), 1), vmin=(np.percentile(tmp_tmp_med,0.1),
np.percentile(tmp_tmp_med_after,0.1), 0), label=('Raw Sky', 'Sky Median Dark Subtracted',
'Pixel Mask'), title='Sky Median Dark Subtraction')
if plot == 'save':
plot_frames((tmp_tmp_med, tmp_tmp_med_after, mask_AGPM_com), vmax=(np.percentile(tmp_tmp_med,99.9),
np.percentile(tmp_tmp_med_after,99.9), 1), vmin=(np.percentile(tmp_tmp_med,0.1),
np.percentile(tmp_tmp_med_after,0.1), 0), label=('Raw Sky', 'Sky Median Dark Subtracted',
'Pixel Mask'), title='Sky Median Dark Subtraction',
dpi=300, save=self.outpath + 'SKY_median_dark_subtract.pdf')
# median dark subtract of flat cubes
tmp_tmp = np.zeros([len(flat_list), self.com_sz, self.com_sz])
tmp_tmp_tmp = open_fits(self.outpath + 'flat_dark_cube.fits',verbose=debug)
tmp_tmp_tmp_median = np.median(tmp_tmp_tmp, axis=0)
tmp_tmp_tmp_median = np.median(tmp_tmp_tmp_median[np.where(mask_AGPM_flat)])
for sc, fits_name in enumerate(flat_list):
tmp = open_fits(self.inpath + fits_name, header=False, verbose=debug)
if tmp.ndim == 2:
tmp = frame_crop(tmp, self.com_sz, force=True, verbose=debug)
else:
tmp = cube_crop_frames(tmp, self.com_sz, force=True, verbose=debug)
tmp_tmp[sc] = tmp - tmp_tmp_tmp_median
write_fits(self.outpath + '1_crop_flat_cube.fits', tmp_tmp,verbose=debug)
if verbose:
print('Dark has been median subtracted from FLAT frames')
if plot:
tmp_tmp_med = np.median(tmp, axis=0) # flat cube before subtraction
tmp_tmp_med_after = np.median(tmp_tmp, axis=0) # flat cube after dark subtract
if plot == 'show':
plot_frames((tmp_tmp_med, tmp_tmp_med_after, mask_AGPM_flat), vmax=(np.percentile(tmp_tmp_med,99.9),
np.percentile(tmp_tmp_med_after,99.9), 1), vmin=(np.percentile(tmp_tmp_med,0.1),
np.percentile(tmp_tmp_med_after,0.1), 0), label=('Raw Flat', 'Flat Median Dark Subtracted',
'Pixel Mask'), title='Flat Median Dark Subtraction')
if plot == 'save':
plot_frames((tmp_tmp_med, tmp_tmp_med_after, mask_AGPM_flat), vmax=(np.percentile(tmp_tmp_med,99.9),
np.percentile(tmp_tmp_med_after,99.9), 1), vmin=(np.percentile(tmp_tmp_med,0.1),
np.percentile(tmp_tmp_med_after,0.1), 0), label=('Raw Flat', 'Flat Median Dark Subtracted',
'Pixel Mask'), title='Flat Median Dark Subtraction',
dpi=300, save=self.outpath + 'FLAT_median_dark_subtract.pdf')
#original code ####################
# #now begin the dark subtraction using PCA
# npc_dark=1 #The ideal number of components to consider in PCA
#
# #coordinate system for pca subtraction
# mesh = np.arange(0,self.com_sz,1)
# xv,yv = np.meshgrid(mesh,mesh)
#
# tmp_tmp = np.zeros([len(flat_list),self.com_sz,self.com_sz])
# tmp_tmp_tmp = open_fits(self.outpath+'flat_dark_cube.fits')
# tmp_tmp_tmp_median = np.median(tmp_tmp_tmp, axis = 0)
# #consider the difference in the medium of the frames without the lower left quadrant.
# tmp_tmp_tmp_median = tmp_tmp_tmp_median[np.where(np.logical_or(xv > cx, yv > cy))] # all but the bad quadrant in the bottom left
# diff = np.zeros([len(flat_list)])
# for fl, flat_name in enumerate(flat_list):
# tmp = open_fits(raw_path+flat_name, header=False, verbose=debug)
# #PCA works best if the flux is roughly on the same scale hence the difference is subtracted before PCA and added after.
# tmp_tmp[fl] = frame_crop(tmp, self.com_sz, force = True ,verbose=debug)
# tmp_tmp_tmp_tmp = tmp_tmp[fl]
# diff[fl] = np.median(tmp_tmp_tmp_median)-np.median(tmp_tmp_tmp_tmp[np.where(np.logical_or(xv > cx, yv > cy))])
# tmp_tmp[fl]+=diff[fl]
# if debug:
# print('difference w.r.t dark = ', diff)
# tmp_tmp_pca = cube_subtract_sky_pca(tmp_tmp, tmp_tmp_tmp,
# mask_AGPM_flat, ref_cube=None, ncomp=npc_dark)
# if debug:
# write_fits(self.outpath+'1_crop_flat_cube_diff.fits', tmp_tmp_pca)
# for fl, flat_name in enumerate(flat_list):
# tmp_tmp_pca[fl] = tmp_tmp_pca[fl]-diff[fl]
# write_fits(self.outpath+'1_crop_flat_cube.fits', tmp_tmp_pca)
# if verbose:
# print('Dark has been subtracted from FLAT cubes')
# end original code ###################
#vals version of above
# npc_dark=1
# tmp_tmp = np.zeros([len(flat_list),self.com_sz,self.com_sz])
# tmp_tmp_tmp = open_fits(self.outpath+'flat_dark_cube.fits')
# npc_flat = tmp_tmp_tmp.shape[0] #not used?
# diff = np.zeros([len(flat_list)])
# for fl, flat_name in enumerate(flat_list):
# tmp = open_fits(raw_path+flat_name, header=False, verbose=False)
# tmp_tmp[fl] = frame_crop(tmp, self.com_sz, force = True, verbose=False)# added force = True
# write_fits(self.outpath+"TMP_flat_test_Val.fits",tmp_tmp[fl])
# #diff[fl] = np.median(tmp_tmp_tmp)-np.median(tmp_tmp[fl])
# #tmp_tmp[fl]+=diff[fl]
# tmp_tmp[fl] = tmp_tmp[fl] - bias
# print(diff)
# tmp_tmp_pca = cube_subtract_sky_pca(tmp_tmp, tmp_tmp_tmp - bias, mask_AGPM_flat, ref_cube=None, ncomp=npc_dark)
# for fl, flat_name in enumerate(flat_list):
# tmp_tmp_pca[fl] = tmp_tmp_pca[fl]-diff[fl]
# write_fits(self.outpath+'1_crop_flat_cube.fits', tmp_tmp_pca)
# if verbose:
# print('Dark has been subtracted from FLAT cubes')
###############
########### new Val code
# create cube combining all darks
# master_all_darks = []
# #ntot_dark = len(sci_dark_list) + len(flat_dark_list) #+ len(unsat_dark_list)
# #master_all_darks = np.zeros([ntot_dark, self.com_sz, self.com_sz])
# tmp = open_fits(self.outpath + 'flat_dark_cube.fits', verbose = verbose)
#
# # add each frame to the list
# for frame in tmp:
# master_all_darks.append(frame)
#
# for idx,fname in enumerate(sci_dark_list):
# tmp = open_fits(self.inpath + fname, verbose=verbose)
# master_all_darks.append(tmp[-1])
#
# #tmp = open_fits(self.outpath + 'sci_dark_cube.fits', verbose = verbose) # changed from master_sci_dark_cube.fits to sci_dark_cube.fits
#
# #for frame in tmp:
# # master_all_darks.append(frame)
#
# if len(unsat_dark_list) > 0:
# for idx,fname in enumerate(unsat_dark_list):
# tmp = open_fits(self.inpath + fname, verbose=verbose)
# master_all_darks.append(tmp[-1])
# #tmp = open_fits(self.outpath + 'unsat_dark_cube.fits', verbose = verbose)
# #for frame in tmp:
# #master_all_darks.append(frame)
#
# #master_all_darks[:len(flat_dark_list)] = tmp.copy()
# #master_all_darks[len(flat_dark_list):] = tmp.copy()
if method == 'pca':
tmp_tmp_tmp = open_fits(self.outpath + 'master_all_darks.fits', verbose = debug) # the cube of all darks - PCA works better with a larger library of DARKs
tmp_tmp = np.zeros([len(flat_list), self.com_sz, self.com_sz])
diff = np.zeros([len(flat_list)])
bar = pyprind.ProgBar(len(flat_list), stream=1, title='Finding difference between DARKS and FLATS')
for fl, flat_name in enumerate(flat_list):
tmp = open_fits(self.inpath+flat_name, header=False, verbose=False)
tmp_tmp[fl] = frame_crop(tmp, self.com_sz, force=True, verbose=False) # added force = True
diff[fl] = np.median(tmp_tmp_tmp)-np.median(tmp_tmp[fl]) # median of pixels in all darks - median of all pixels in flat frame
tmp_tmp[fl]+=diff[fl] # subtracting median of flat from the flat and adding the median of the dark
bar.update()
#write_fits(self.outpath + 'TMP_cropped_flat.fits', tmp_tmp, verbose=verbose) # to check if the flats are aligned with the darks
#test_diff = np.linspace(np.average(diff),5000,50)
def _get_test_diff_flat(guess,verbose=False):
#tmp_tmp_pca = np.zeros([self.com_sz,self.com_sz])
#stddev = []
# loop over values around the median of diff to scale the frames accurately
#for idx,td in enumerate(test_diff):
tmp_tmp_pca = np.median(cube_subtract_sky_pca(tmp_tmp+guess, tmp_tmp_tmp,
mask_AGPM_flat, ref_cube=None, ncomp=npc_dark),axis=0)
tmp_tmp_pca-= np.median(diff)+guess # subtract the negative median of diff values and subtract test diff (aka add it back)
subframe = tmp_tmp_pca[np.where(mask_std)] # where mask_std is an optional argument
#subframe = tmp_tmp_pca[int(cy)-23:int(cy)+23,:-17] # square around center that includes the bad lines in NaCO data
#if idx ==0:
subframe = subframe.reshape((-1,self.com_sz-crop))
#stddev.append(np.std(subframe)) # save the stddev around this bad area
stddev = np.std(subframe)
write_fits(self.outpath + 'dark_flat_subframe.fits', subframe, verbose=debug)
#if verbose:
print('Guess = {}'.format(guess))
print('Stddev = {}'.format(stddev))
# for fl, flat_name in enumerate(flat_list):
# tmp_tmp_pca[fl] = tmp_tmp_pca[fl]-diff[fl]
#return test_diff[np.argmin[stddev]] # value of test_diff corresponding to lowest stddev
return stddev
# step_size1 = 50
# step_size2 = 10
# n_test1 = 50
# n_test2 = 50
# lower_diff = guess - (n_test1 * step_size1) / 2
# upper_diff = guess + (n_test1 * step_size1) / 2
#test_diff = np.arange(lower_diff, upper_diff, n_test1) - guess
# print('lower_diff:', lower_diff)
# print('upper_diff:', upper_diff)
# print('test_diff:', test_diff)
# chisquare = function that computes stddev, p = test_diff
#solu = minimize(chisquare, p, args=(cube, angs, etc.), method='Nelder-Mead', options=options)
if verbose:
print('FLATS difference w.r.t. DARKS:', diff)
print('Calculating optimal PCA dark subtraction for FLATS...')
guess = 0
solu = minimize(_get_test_diff_flat,x0=guess,args = (debug),method='Nelder-Mead',tol = 2e-4,options = {'maxiter':100, 'disp':verbose})
# guess = solu.x
# print('best diff:',guess)
# # lower_diff = guess - (n_test2 * step_size2) / 2
# # upper_diff = guess + (n_test2 * step_size2) / 2
# #
# # test_diff = np.arange(lower_diff, upper_diff, n_test2) - guess
# # print('lower_diff:', lower_diff)
# # print('upper_diff:', upper_diff)
# # print('test_diff:', test_diff)
#
# solu = minimize(_get_test_diff_flat, x0=test_diff, args=(), method='Nelder-Mead',
# options={'maxiter': 1})
best_test_diff = solu.x # x is the solution (ndarray)
best_test_diff = best_test_diff[0] # take out of array
if verbose:
print('Best difference (value) to add to FLATS is {} found in {} iterations'.format(best_test_diff,solu.nit))
# cond = True
# max_it = 3 # maximum iterations
# counter = 0
# while cond and counter<max_it:
# index,best_diff = _get_test_diff_flat(self,first_guess = np.median(diff), n_test = n_test1,lower_limit = 0.1*np.median(diff),upper_limit = 2)
# if index !=0 and index !=n_test1-1:
# cond = False
# else:
# first_guess =
# counter +=1
# if counter==max_it:
# print('##### Reached maximum iterations for finding test diff! #####')
# _,_ = _get_test_diff_flat(self, first_guess=best_diff, n_test=n_test2, lower_limit=0.8, upper_limit=1.2,plot=plot)
#write_fits(self.outpath + '1_crop_flat_cube_test_diff.fits', tmp_tmp_pca + td, verbose=debug)
# if verbose:
# print('stddev:', np.round(stddev, 3))
# print('Lowest standard dev is {} at frame {} with constant {}'.format(np.round(np.min(stddev), 2),
# np.round(np.argmin(stddev), 2) + 1,
# test_diff[np.argmin(stddev)]))
tmp_tmp_pca = cube_subtract_sky_pca(tmp_tmp + best_test_diff, tmp_tmp_tmp,
mask_AGPM_flat, ref_cube=None, ncomp=npc_dark)
bar = pyprind.ProgBar(len(flat_list), stream=1, title='Correcting FLATS via PCA dark subtraction')
for fl, flat_name in enumerate(flat_list):
tmp_tmp_pca[fl] = tmp_tmp_pca[fl] - diff[fl] - best_test_diff # add back the constant
bar.update()
write_fits(self.outpath + '1_crop_flat_cube.fits', tmp_tmp_pca, verbose=debug)
if plot:
tmp_tmp_med = np.median(tmp_tmp, axis=0) # flat before subtraction
tmp_tmp_pca = np.median(tmp_tmp_pca, axis=0) # flat after dark subtract
if plot == 'show':
plot_frames((tmp_tmp_med, tmp_tmp_pca, mask_AGPM_flat), vmax=(np.percentile(tmp_tmp_med,99.9),
np.percentile(tmp_tmp_pca,99.9), 1),
vmin=(np.percentile(tmp_tmp_med,0.1), np.percentile(tmp_tmp_pca,0.1), 0),
title='Flat PCA Dark Subtraction')
if plot == 'save':
plot_frames((tmp_tmp_med, tmp_tmp_pca, mask_AGPM_flat), vmax=(np.percentile(tmp_tmp_med,99.9),
np.percentile(tmp_tmp_pca,99.9), 1),
vmin=(np.percentile(tmp_tmp_med,0.1), np.percentile(tmp_tmp_pca,0.1), 0),
title='Flat PCA Dark Subtraction', dpi=300, save=self.outpath + 'FLAT_PCA_dark_subtract.pdf')
if verbose:
print('Flats have been dark corrected')
# ### ORIGINAL PCA CODE
#PCA dark subtraction of SCI cubes
#tmp_tmp_tmp = open_fits(self.outpath+'sci_dark_cube.fits')
tmp_tmp_tmp = open_fits(self.outpath + 'master_all_darks.fits', verbose =debug)
tmp_tmp_tmp_median = np.median(tmp_tmp_tmp,axis = 0) # median frame of all darks
tmp_tmp_tmp_median = np.median(tmp_tmp_tmp_median[np.where(mask_AGPM_com)]) # integer median of all the pixels within the mask
tmp_tmp = np.zeros([len(sci_list), self.com_sz, self.com_sz])
diff = np.zeros([len(sci_list)])
bar = pyprind.ProgBar(len(sci_list), stream=1, title='Finding difference between DARKS and SCI cubes. This may take some time.')
for sc, fits_name in enumerate(sci_list):
tmp = open_fits(self.inpath+fits_name, header=False, verbose=debug) # open science
tmp = cube_crop_frames(tmp, self.com_sz, force = True, verbose=debug) # crop science to common size
#PCA works best when the considering the difference
tmp_median = np.median(tmp,axis = 0) # make median frame from all frames in cube
#tmp_median = tmp_median[np.where(mask_AGPM_com)]
diff[sc] = tmp_tmp_tmp_median - np.median(tmp_median) # median pixel value of all darks minus median pixel value of sci cube
tmp_tmp[sc] = tmp_median + diff[sc]
# if sc==0 or sc==middle_idx or sc==len(sci_list)-1:
# tmp_tmp[counter] = tmp_median + diff[sc]
# counter = counter + 1
if debug:
print('difference w.r.t dark =', diff[sc])
bar.update()
write_fits(self.outpath + 'dark_sci_diff.fits',diff,verbose=debug)
write_fits(self.outpath + 'sci_plus_diff.fits',tmp_tmp,verbose=debug)
# with open(self.outpath + "dark_sci_diff.txt", "w") as f:
# for diff_sci in diff:
# f.write(str(diff_sci) + '\n')
if verbose:
print('SCI difference w.r.t. DARKS has been saved to fits file.')
print('SCI difference w.r.t. DARKS:', diff)
#lower_diff = 0.8*np.median(diff)
#upper_diff = 1.2*np.median(diff)
#test_diff = np.arange(abs(lower_diff),abs(upper_diff),50) - abs(np.median(diff)) # make a range of values in increments of 50 from 0.9 to 1.1 times the median
#print('test diff:',test_diff)
#tmp_tmp_pca = np.zeros([len(test_diff),self.com_sz,self.com_sz])
#best_idx = []
def _get_test_diff_sci(guess, verbose=False):
# tmp_tmp_pca = np.zeros([self.com_sz,self.com_sz])
# stddev = []
# loop over values around the median of diff to scale the frames accurately
# for idx,td in enumerate(test_diff):
tmp_tmp_pca = np.median(cube_subtract_sky_pca(tmp_tmp + guess, tmp_tmp_tmp,
mask_AGPM_com, ref_cube=None, ncomp=npc_dark), axis=0)
tmp_tmp_pca -= np.median(diff) + guess # subtract the negative median of diff values and subtract test diff (aka add it back)
subframe = tmp_tmp_pca[np.where(mask_sci)]
# subframe = tmp_tmp_pca[int(cy)-23:int(cy)+23,:-17] # square around center that includes the bad lines in NaCO data
# if idx ==0:
# stddev.append(np.std(subframe)) # save the stddev around this bad area
stddev = np.std(subframe)
if verbose:
print('Guess = {}'.format(guess))
print('Standard deviation = {}'.format(stddev))
subframe = subframe.reshape(46,-1) # hard coded 46 because the subframe size is hardcoded to center pixel +-23
write_fits(self.outpath + 'dark_sci_subframe.fits', subframe, verbose=debug)
# for fl, flat_name in enumerate(flat_list):
# tmp_tmp_pca[fl] = tmp_tmp_pca[fl]-diff[fl]
# return test_diff[np.argmin[stddev]] # value of test_diff corresponding to lowest stddev
return stddev
#test_sci_list = [sci_list[i] for i in [0,middle_idx,-1]]
#bar = pyprind.ProgBar(len(sci_list), stream=1, title='Testing diff for science cubes')
guess = 0
#best_diff = []
#for sc in [0,middle_idx,-1]:
if verbose:
print('Calculating optimal PCA dark subtraction for SCI cubes. This may take some time.')
solu = minimize(_get_test_diff_sci, x0=guess, args=(verbose), method='Nelder-Mead',tol = 2e-4,options = {'maxiter':100, 'disp':verbose})
best_test_diff = solu.x # x is the solution (ndarray)
best_test_diff = best_test_diff[0] # take out of array
#best_diff.append(best_test_diff)
if verbose:
print('Best difference (value) to add to SCI cubes is {} found in {} iterations'.format(best_test_diff,solu.nit))
#stddev = [] # to refresh the list after each loop
#tmp = open_fits(self.inpath+sci_list[sc], header=False, verbose=debug)
#tmp = cube_crop_frames(tmp, self.com_sz, force = True, verbose=debug)
#for idx,td in enumerate(test_diff):
#tmp_tmp_pca = np.median(cube_subtract_sky_pca(tmp_tmp[sc]+guess, tmp_tmp_tmp,mask_AGPM_com, ref_cube=None, ncomp=npc_dark),axis=0)
#tmp_tmp_pca-= np.median(diff)+td
#subframe = tmp_tmp_pca[np.where(mask_std)]
#subframe = tmp_tmp_pca[idx,int(cy)-23:int(cy)+23,:] # square around center that includes that bad lines
#stddev.append(np.std(subframe))
#best_idx.append(np.argmin(stddev))
#print('Best index of test diff: {} of constant: {}'.format(np.argmin(stddev),test_diff[np.argmin(stddev)]))
#bar.update()
#if sc == 0:
# write_fits(self.outpath+'1_crop_sci_cube_test_diff.fits', tmp_tmp_pca + td, verbose = debug)
# sci_list_mjd = np.array(self.sci_list_mjd) # convert list to numpy array
# xp = sci_list_mjd[np.array([0,middle_idx,-1])] # only get first, middle, last
# #fp = test_diff[np.array(best_idx)]
# fp = best_diff
# opt_diff = np.interp(x = sci_list_mjd, xp = xp, fp = fp, left=None, right=None, period=None) # optimal diff for each sci cube
if verbose:
print('Optimal constant to apply to each science cube: {}'.format(best_test_diff))
bar = pyprind.ProgBar(len(sci_list), stream=1, title='Correcting SCI cubes via PCA dark subtraction')
for sc,fits_name in enumerate(sci_list):
tmp = open_fits(self.inpath+fits_name, header=False, verbose=debug)
tmp = cube_crop_frames(tmp, self.com_sz, force = True, verbose=debug)
tmp_tmp_pca = cube_subtract_sky_pca(tmp +diff[sc] +best_test_diff, tmp_tmp_tmp,
mask_AGPM_com, ref_cube=None, ncomp=npc_dark)
tmp_tmp_pca = tmp_tmp_pca - diff[sc] - best_test_diff # add back the constant
write_fits(self.outpath+'1_crop_'+fits_name, tmp_tmp_pca, verbose = debug)
bar.update()
if verbose:
print('Dark has been subtracted from SCI cubes')
if plot:
tmp = np.median(tmp, axis = 0)
tmp_tmp_pca = np.median(tmp_tmp_pca,axis = 0)
if plot == 'show':
plot_frames((tmp, tmp_tmp_pca, mask_AGPM_com), vmax=(np.percentile(tmp, 99.9),
np.percentile(tmp_tmp_pca, 99.9), 1),
vmin=(np.percentile(tmp, 0.1), np.percentile(tmp_tmp_pca, 0.1), 0),
label=('Raw Science', 'Science PCA Dark Subtracted', 'Pixel Mask'),
title='Science PCA Dark Subtraction')
if plot == 'save':
plot_frames((tmp, tmp_tmp_pca, mask_AGPM_com), vmax=(np.percentile(tmp, 99.9),
np.percentile(tmp_tmp_pca, 99.9), 1),
vmin=(np.percentile(tmp, 0.1), np.percentile(tmp_tmp_pca, 0.1), 0),
label=('Raw Science', 'Science PCA Dark Subtracted', 'Pixel Mask'),
title='Science PCA Dark Subtraction',
dpi=300,save = self.outpath + 'SCI_PCA_dark_subtract.pdf')
#dark subtract of sky cubes
#tmp_tmp_tmp = open_fits(self.outpath+'sci_dark_cube.fits')
# tmp_tmp_tmp = open_fits(self.outpath+'master_all_darks.fits')
# tmp_tmp_tmp_median = np.median(tmp_tmp_tmp,axis = 0)
# tmp_tmp_tmp_median = np.median(tmp_tmp_tmp_median[np.where(mask_AGPM_com)])
#
# bar = pyprind.ProgBar(len(sky_list), stream=1, title='Correcting dark current in sky cubes')
# for sc, fits_name in enumerate(sky_list):
# tmp = open_fits(self.inpath+fits_name, header=False, verbose=debug)
# tmp = cube_crop_frames(tmp, self.com_sz, force = True, verbose=debug)
# tmp_median = np.median(tmp,axis = 0)
# tmp_median = tmp_median[np.where(mask_AGPM_com)]
# diff = tmp_tmp_tmp_median - np.median(tmp_median)
# if debug:
# print('difference w.r.t dark = ', diff)
# tmp_tmp = cube_subtract_sky_pca(tmp +diff +test_diff[np.argmin(stddev)], tmp_tmp_tmp,
# mask_AGPM_com, ref_cube=None, ncomp=npc_dark)
# if debug:
# write_fits(self.outpath+'1_crop_diff'+fits_name, tmp_tmp)
# write_fits(self.outpath+'1_crop_'+fits_name, tmp_tmp -diff -test_diff[np.argmin(stddev)], verbose = debug)
# bar.update()
# if verbose:
# print('Dark has been subtracted from SKY cubes')
# if plot:
# tmp = np.median(tmp, axis = 0)
# tmp_tmp = np.median(tmp_tmp-diff,axis = 0)
# if plot == 'show':
# plot_frames((tmp,tmp_tmp,mask_AGPM_com), vmax = (25000,25000,1), vmin = (-2500,-2500,0))
# if plot == 'save':
# plot_frames((tmp,tmp_tmp,mask_AGPM_com), vmax = (25000,25000,1), vmin = (-2500,-2500,0),save = self.outpath + 'SKY_PCA_dark_subtract')
tmp_tmp_tmp = open_fits(self.outpath + 'master_all_darks.fits', verbose = debug)
tmp_tmp_tmp_median = np.median(tmp_tmp_tmp,axis = 0) # median frame of all darks
tmp_tmp_tmp_median = np.median(tmp_tmp_tmp_median[np.where(mask_AGPM_com)]) # integer median of all the pixels within the mask
tmp_tmp = np.zeros([len(sky_list), self.com_sz, self.com_sz])
cy,cx = frame_center(tmp_tmp)
diff = np.zeros([len(sky_list)])
bar = pyprind.ProgBar(len(sky_list), stream=1, title='Finding difference between darks and sky cubes')
for sc, fits_name in enumerate(sky_list):
tmp = open_fits(self.inpath+fits_name, header=False, verbose=debug) # open sky
tmp = cube_crop_frames(tmp, self.com_sz, force = True, verbose=debug) # crop sky to common size
#PCA works best when the considering the difference
tmp_median = np.median(tmp,axis = 0) # make median frame from all frames in cube
#tmp_median = tmp_median[np.where(mask_AGPM_com)]
diff[sc] = tmp_tmp_tmp_median - np.median(tmp_median) # median pixel value of all darks minus median pixel value of sky cube
tmp_tmp[sc] = tmp_median + diff[sc]
if debug:
print('difference w.r.t dark =', diff[sc])
bar.update()
write_fits(self.outpath + 'dark_sci_diff.fits', diff, verbose=debug)
if verbose:
print('SKY difference w.r.t. DARKS has been saved to fits file.')
print('SKY difference w.r.t. DARKS:', diff)
def _get_test_diff_sky(guess, verbose=False):
# tmp_tmp_pca = np.zeros([self.com_sz,self.com_sz])
# stddev = []
# loop over values around the median of diff to scale the frames accurately
# for idx,td in enumerate(test_diff):
tmp_tmp_pca = np.median(cube_subtract_sky_pca(tmp_tmp + guess, tmp_tmp_tmp,
mask_AGPM_com, ref_cube=None, ncomp=npc_dark), axis=0)
tmp_tmp_pca -= np.median(diff) + guess # subtract the negative median of diff values and subtract test diff (aka add it back)
subframe = tmp_tmp_pca[np.where(mask_sci)]
# subframe = tmp_tmp_pca[int(cy)-23:int(cy)+23,:-17] # square around center that includes the bad lines in NaCO data
# if idx ==0:
# stddev.append(np.std(subframe)) # save the stddev around this bad area
stddev = np.std(subframe)
if verbose:
print('Guess = {}'.format(guess))
print('Standard deviation = {}'.format(stddev))
subframe = subframe.reshape(46,-1) # hard coded 46 because the subframe size is hardcoded to center pixel +-23
write_fits(self.outpath + 'dark_sky_subframe.fits', subframe, verbose=debug)
# for fl, flat_name in enumerate(flat_list):
# tmp_tmp_pca[fl] = tmp_tmp_pca[fl]-diff[fl]
# return test_diff[np.argmin[stddev]] # value of test_diff corresponding to lowest stddev
return stddev
guess = 0
if verbose:
print('Calculating optimal PCA dark subtraction for SKY cubes. This may take some time.')
solu = minimize(_get_test_diff_sky, x0=guess, args=(verbose), method='Nelder-Mead',tol = 2e-4,options = {'maxiter':100, 'disp':verbose})
best_test_diff = solu.x # x is the solution (ndarray)
best_test_diff = best_test_diff[0] # take out of array
#
# lower_diff = 0.9*np.median(diff)
# upper_diff = 1.1*np.median(diff)
# test_diff = np.arange(abs(lower_diff),abs(upper_diff),50) - abs(np.median(diff)) # make a range of values in increments of 50 from 0.9 to 1.1 times the median
# tmp_tmp_pca = np.zeros([len(test_diff),self.com_sz,self.com_sz])
# best_idx = []
#middle_idx = int(len(sky_list)/2)
#print('Testing diff for SKY cubes')
# for sc in [0,middle_idx,-1]:
# stddev = [] # to refresh the list after each loop
# tmp = open_fits(self.inpath+sky_list[sc], header=False, verbose=debug)
# tmp = cube_crop_frames(tmp, self.com_sz, force = True, verbose=debug)
#
# for idx,td in enumerate(test_diff):
# tmp_tmp_pca[idx] = np.median(cube_subtract_sky_pca(tmp+diff[sc]+td, tmp_tmp_tmp,
# mask_AGPM_com, ref_cube=None, ncomp=npc_dark),axis=0)
# tmp_tmp_pca[idx]-= np.median(diff)+td
#
# subframe = tmp_tmp_pca[idx,int(cy)-23:int(cy)+23,:] # square around center that includes that bad lines
# stddev.append(np.std(subframe))
# best_idx.append(np.argmin(stddev))
# print('Best index of test diff: {} of constant: {}'.format(np.argmin(stddev),test_diff[np.argmin(stddev)]))
# #bar.update()
# if sc == 0:
# write_fits(self.outpath+'1_crop_sky_cube_test_diff.fits', tmp_tmp_pca + td, verbose = debug)
# print('test')
# sky_list_mjd = np.array(self.sky_list_mjd) # convert list to numpy array
# xp = sky_list_mjd[np.array([0,middle_idx,-1])] # only get first, middle, last
# fp = test_diff[np.array(best_idx)]
#
# opt_diff = np.interp(x = sky_list_mjd, xp = xp, fp = fp, left=None, right=None, period=None) # optimal diff for each sci cube
# print('Opt diff',opt_diff)
# if debug:
# with open(self.outpath+"best_idx_sky.txt", "w") as f:
# for idx in best_idx:
# f.write(str(idx)+'\n')
# if verbose:
# print('Optimal constant: {}'.format(opt_diff))
if verbose:
print('Optimal constant to apply to each sky cube: {}'.format(best_test_diff))
bar = pyprind.ProgBar(len(sky_list), stream=1, title='Correcting SKY cubes via PCA dark subtraction')
for sc,fits_name in enumerate(sky_list):
tmp = open_fits(self.inpath+fits_name, header=False, verbose=debug)
tmp = cube_crop_frames(tmp, self.com_sz, force = True, verbose=debug)
tmp_tmp_pca = cube_subtract_sky_pca(tmp +diff[sc] +best_test_diff, tmp_tmp_tmp,
mask_AGPM_com, ref_cube=None, ncomp=npc_dark)
tmp_tmp_pca = tmp_tmp_pca - diff[sc] - best_test_diff # add back the constant
write_fits(self.outpath+'1_crop_'+fits_name, tmp_tmp_pca, verbose = debug)
if verbose:
print('Dark has been subtracted from SKY cubes')
if plot:
tmp = np.median(tmp, axis = 0)
tmp_tmp_pca = np.median(tmp_tmp_pca,axis = 0)
if plot == 'show':
plot_frames((tmp,tmp_tmp_pca,mask_AGPM_com), vmax = (np.percentile(tmp,99.9),
np.percentile(tmp_tmp_pca,99.9),1), vmin = (np.percentile(tmp,0.1), | np.percentile(tmp_tmp_pca,0.1) | numpy.percentile |
import numpy as np
import scipy.optimize
from modellingPractical import *
# Follow the setup from the vegetation modelling practical
def drivers(year=2001,lat=50.,lon=0.0,ndays=365\
,tau=0.2,parScale=0.5,tempScale=35.):
# get sun information
s = sunInfo(julianOffset='%4d/1/1'%year)
# loop over day and month at latitude 50.0
# NB we use the dates 1-366 of January here to avoid month issues
# be careful here ... only use the minutes field if hours set to every hour
# else dt isnt constant
s.sun(0., | np.array([0.]) | numpy.array |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2019/5/15
@Author : AnNing
"""
from __future__ import print_function
import os
import sys
import numpy as np
from initialize import load_yaml_file
from load import ReadAhiL1
TEST = True
def ndsi(in_file_l1, in_file_geo, in_file_cloud):
# -------------------------------------------------------------------------
# SolarZenith_MAX : MAXIMUM SOLAR ZENITH ANGLE, *1.0 DEGREE
# solar_zenith_max = None
# -------------------------------------------------------------------------
# Date and Time
# i_year = None
# i_month = None
# i_day = None
# i_minute = None
# n_year = None
# n_month = None
# n_day = None
# n_hour = None
# n_minute = None
# n_second = None
# -------------------------------------------------------------------------
# out data
# r4_rt = np.array([])
# r4_info = np.array([])
# i2_cm = np.array([])
# r4_test = np.array([])
# -------------------------------------------------------------------------
# swath sum
# i_swath_valid = None
# i_sum_valid = None
# -------------------------------------------------------------------------
# dim_x = None
# dim_y = None
# dim_z = None
# -------------------------------------------------------------------------
# r_lats = None # LATITUDE
# r_lons = None # LONGITUDE
# a_satz = None # SATELLITE ZENITH ANGLE
# a_sata = None # SATELLITE AZIMUTH
# a_sunz = None # SOLAR ZENITH ANGLE
# a_suna = None # SOLAR AZIMUTH
# r_dems = None # DEM MASK
# i_mask = None # LANDCOVER MASK
# i_cm = None # Cloud MASK
# -------------------------------------------------------------------------
# cossl = None # SOLAR-ZENITH-ANGLE-COSINE
# glint = None # SUN GLINT
# lsm = None # Mask For Water & Land
# i_avalible = None # Mask For Data to be used
# -------------------------------------------------------------------------
# ref_01 = None # 0.645 um : Ref, NDVI
# ref_02 = None # 0.865 um : Ref, NDVI
# ref_03 = None # 0.470 um : Ref, NDVI
# ref_04 = None # 0.555 um : Ref, NDVI
# ref_05 = None # 1.640 um : Ref, NDVI
# ref_06 = None # 1.640 um : Ref, NDSI
# ref_07 = None # 2.130 um : Ref, NDSI
# ref_19 = None # 0.940 um : Ref, Vapour
# ref_26 = None # 1.375 um : Ref, Cirrus
# tbb_20 = None # 3.750 um : TBB, Temperature
# tbb_31 = None # 11.030 um : TBB, Temperature
# tbb_32 = None # 12.020 um : TBB, Temperature
# -------------------------------------------------------------------------
# ndvis = None # R2-R1/R2+R1: R0.86,R0.65
# ndsi_6 = None # R4-R6/R4+R6: R0.55,R1.64
# ndsi_7 = None # R4-R7/R4+R7: R0.55,R2.13
#
# dr_16 = None # R1-R6: R0.86,R1.64
# dr_17 = None # R1-0.5*R7: R0.86,R2.13
#
# dt_01 = None # T20-T31: T3.75-T11.0
# dt_02 = None # T20-T32: T3.75-T12.0
# dt_12 = None # T31-T32: T11.0-T12.0
#
# rr_21 = None # R2/R1: R0.86,R0.65
# rr_46 = None # R4/R6: R0.55,R1.64
# rr_47 = None # R4/R7: R0.55,R2.13
#
# dt_34 = None # T20-T23: T3.75-T4.05
# dt_81 = None # T29-T31: T8.55-T11.0
# dt_38 = None # T20-T29: T3.75-T8.55
# -------------------------------------------------------------------------
# Used for Masking Over-Estimation for snow by monthly snow pack lines.
# LookUpTable For Monthly CHN-SnowPackLine (ZhengZJ, 2006)
# Line: Longitude from 65.0 to 145.0 (Step is 0.1 deg.)
# Column: Month from Jan to Dec (Step is month)
# Value: Latitude (Unit is deg.)
# r_mon_snow_line = np.array([]) # Monthly CHN-SnowPackLine
# Used for judging low or water cloud by BT difference.
# LookUpTable For T11-T12 (Saunders and Kriebel, 1988)
# Line: T11 from 250.0K to 310.0K (Step is 1.0K)
# Column: Secant-SZA from 1.00 to 2.50 (Step is 0.01)
# Value: T11-T12 (Unit is K)
# delta_bt_lut = np.array([]) # LookUpTable for BT11-BT12
# Used for judging snow in forest by NDSI and NDVI.
# LookUpTable For Snow in Forest , by NDVI-NDSI (Klein et al., 1998)
# Line: NDVI from 0.010 to 1.000 (Step is 0.01)
# Column: NDSI from 0.01000 to 1.00000 (Step is 0.00001)
# Value: NDSI (Unit is null)
# y_ndsi_x_ndvi = np.array([]) # LookUpTable for NDSI-NDVI
# !!!!! Four Variables below should be USED TOGETHER.
# !! R138R164LUT,R164T11_LUT,R164R138LUT,T11mT12R164LUT
# !! LookUpTable For FreshSnow&WaterIceCloud (ZhengZJ, 2006)
# !! (1)Line-R164T11_LUT: T11 from 225.0 to 280.0 (Step is 0.1K)
# !! Column--R164T11_LUT: R164 from 0.00000 to 0.24000 (No Step)
# !! (2)Line-T11mT12R164LUT: R164 from 0.100 to 0.250 (Step is 0.001)
# !! Column-T11mT12R164LUT: T11mT12 from -40 to 130 (No Step)
# !! (3)Line-R138R164LUT: R164 from 0.010 to 0.260 (Step is 0.001)
# !! Column-R138R164LUT: R138 from 0.0020 to 0.3000 (No Step)
# !! (4)Line-R164R138LUT: R138 from 0.000 to 0.550 (Step is 0.001)
# !! Column-R164R138LUT: R164 from 0.1500 to 0.3000 (No Step)
# y_r164_x_t11 = np.array([]) # LookUpTable For R164T11
# y_t11_m_t12_x_r164 = np.array([]) # LookUpTable For T11mT12R164
# y_r138_x_r164 = np.array([]) # LookUpTable For R138R164
# y_r164_x_r138 = np.array([]) # LookUpTable For R164R138
# -------------------------------------------------------------------------
# Used for Reference of 11um Minimum Brightness Temperature.
# ref_bt11um = None
# ref_bt11um_slope_n = None
# ref_bt11um_slope_s = None
# ref_bt11um_offset_n = None
# ref_bt11um_offset_s = None
# a_low_t_lat = None # Referential Latitude for BT11 LowThreshold
# a_low_bt11 = None # Referential Temp for BT11 LowThreshold
# delta_t_low = None # Referential Temporal Delta-Temp for BT11_Low
# b_hai_t_lat = None # Referential Latitude for BT11 HaiThreshold
# b_hai_bt11 = None # Referential Temp for BT11 HaiThreshold
# delta_t_hai = None # Referential Temporal Delta-Temp for BT11_Hai
#
# a_low_bt11_n = None
# a_low_bt11_s = None
# b_hai_bt11_n = None
# b_hai_bt11_s = None
# -------------------------------------------------------------------------
# Used for Calculate and Store Xun number from 1 to 36 in a year.
# f_xun_n = None
# f_xun_s = None
# i2_xun_num = None
# -------------------------------------------------------------------------
# i_step = np.array([]) # TEST-STEP
# i_mark = np.array([]) # SNOW MAP
# !!!! VALUE = 255 : Fill Data--no Data expected For pixel
# !!!! VALUE = 254 : Saturated MODIS sensor detector
# !!!! VALUE = 240 : NATIONAL OR PROVINCIAL BOUNDARIES
# !!!! VALUE = 200 : Snow
# !!!! VALUE = 100 : Snow-Covered Lake Ice
# !!!! VALUE = 50 : Cloud Obscured
# !!!! VALUE = 39 : Ocean
# !!!! VALUE = 37 : Inland Water
# !!!! VALUE = 25 : Land--no snow detected
# !!!! VALUE = 11 : Darkness, terminator or polar
# !!!! VALUE = 1 : No Decision
# !!!! VALUE = 0 : Sensor Data Missing
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
print('Program : Make SNC')
# -------------------------------------------------------------------------
path = os.path.abspath(os.path.dirname(__file__))
name_list_swath_snc = os.path.join(path, 'ndsi_cfg.yaml')
print('Config file : {}'.format(name_list_swath_snc))
a = load_yaml_file(name_list_swath_snc)
solar_zenith_max = float(a['SolarZenith_MAX'])
inn_put_para_path = a['InnPut_ParaPath']
inn_put_root_l01 = a['InnPut_Root_L01']
inn_put_root_l02 = a['InnPut_Root_L02']
inn_put_root_l03 = a['InnPut_Root_L03']
# inn_put_root_l11 = a['InnPut_Root_L11']
# inn_put_root_l12 = a['InnPut_Root_L12']
# inn_put_root_l13 = a['InnPut_Root_L13']
# inn_put_root_l14 = a['InnPut_Root_L14']
inn_put_file_l01 = os.path.join(path, inn_put_para_path, inn_put_root_l01)
inn_put_file_l02 = os.path.join(path, inn_put_para_path, inn_put_root_l02)
inn_put_file_l03 = os.path.join(path, inn_put_para_path, inn_put_root_l03)
# inn_put_file_l11 = os.path.join(path, inn_put_para_path, inn_put_root_l11)
# inn_put_file_l12 = os.path.join(path, inn_put_para_path, inn_put_root_l12)
# inn_put_file_l13 = os.path.join(path, inn_put_para_path, inn_put_root_l13)
# inn_put_file_l14 = os.path.join(path, inn_put_para_path, inn_put_root_l14)
delta_bt_lut = | np.loadtxt(inn_put_file_l01, skiprows=1) | numpy.loadtxt |
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
import seaborn as sns
sns.set_color_codes()
import pandas as pd
import numpy as np
import os
from matplotlib.ticker import FuncFormatter
base_dir = './large_network_larger'
color_cycle = sns.color_palette()
COLORS = {'ma2c': color_cycle[0], 'ia2c': color_cycle[1], 'iqll': color_cycle[2],
'iqld': color_cycle[3], 'greedy':color_cycle[4]}
TRAIN_STEP = 1e6
window = 100
def plot_train_curve(scenario='large_grid', date='oct07'):
cur_dir = base_dir + ('/eval_%s/%s/train_data' % (date, scenario))
names = ['ma2c', 'ia2c', 'iqll']
labels = ['MA2C', 'IA2C', 'IQL-LR']
# names = ['ma2c', 'ia2c', 'iqld', 'iqll']
# labels = ['MA2C', 'IA2C', 'IQL-DNN', 'IQL-LR']
dfs = {}
for file in os.listdir(cur_dir):
name = file.split('_')[0]
print(file + ', ' + name)
if (name in names) and (name != 'greedy'):
df = pd.read_csv(cur_dir + '/' + file)
dfs[name] = df[df.test_id == -1]
plt.figure(figsize=(9, 6))
ymin = []
ymax = []
for i, name in enumerate(names):
if name == 'greedy':
plt.axhline(y=-972.28, color=COLORS[name], linewidth=3, label=labels[i])
else:
df = dfs[name]
x_mean = df.avg_reward.rolling(window).mean().values
x_std = df.avg_reward.rolling(window).std().values
plt.plot(df.step.values, x_mean, color=COLORS[name], linewidth=3, label=labels[i])
ymin.append(np.nanmin(x_mean - 0.5 * x_std))
ymax.append(np.nanmax(x_mean + 0.5 * x_std))
plt.fill_between(df.step.values, x_mean - x_std, x_mean + x_std, facecolor=COLORS[name], edgecolor='none',
alpha=0.1)
ymin = min(ymin)
ymax = max(ymax)
plt.xlim([0, TRAIN_STEP])
if scenario == 'large_grid':
plt.ylim([-1600, -400])
else:
plt.ylim([-225, -100])
def millions(x, pos):
return '%1.1fM' % (x * 1e-6)
formatter = FuncFormatter(millions)
plt.gca().xaxis.set_major_formatter(formatter)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.xlabel('Training step', fontsize=18)
plt.ylabel('Average episode reward', fontsize=18)
plt.legend(loc='upper left', fontsize=18)
plt.tight_layout()
plt.savefig(plot_dir + ('/%s_train.pdf' % scenario))
plt.close()
episode_sec = 3600
def fixed_agg(xs, window, agg):
xs = np.reshape(xs, (-1, window))
if agg == 'sum':
return np.sum(xs, axis=1)
elif agg == 'mean':
return np.mean(xs, axis=1)
elif agg == 'median':
return np.median(xs, axis=1)
def varied_agg(xs, ts, window, agg):
t_bin = window
x_bins = []
cur_x = []
xs = list(xs) + [0]
ts = list(ts) + [episode_sec + 1]
i = 0
while i < len(xs):
x = xs[i]
t = ts[i]
if t <= t_bin:
cur_x.append(x)
i += 1
else:
if not len(cur_x):
x_bins.append(0)
else:
if agg == 'sum':
x_stat = np.sum(np.array(cur_x))
elif agg == 'mean':
x_stat = np.mean(np.array(cur_x))
elif agg == 'median':
x_stat = np.median(np.array(cur_x))
x_bins.append(x_stat)
t_bin += window
cur_x = []
return np.array(x_bins)
def plot_series(df, name, tab, label, color, window=None, agg='sum', reward=False):
episodes = list(df.episode.unique())
num_episode = len(episodes)
num_time = episode_sec
print(label, name)
# always use avg over episodes
if tab != 'trip':
res = df.loc[df.episode == episodes[0], name].values
for episode in episodes[1:]:
res += df.loc[df.episode == episode, name].values
res = res / num_episode
print('mean: %.2f' % np.mean(res))
print('std: %.2f' % np.std(res))
print('min: %.2f' % np.min(res))
print('max: %.2f' % np.max(res))
else:
res = []
for episode in episodes:
res += list(df.loc[df.episode == episode, name].values)
print('mean: %d' % np.mean(res))
print('max: %d' % np.max(res))
if reward:
num_time = 720
if window and (agg != 'mv'):
num_time = num_time // window
x = np.zeros((num_episode, num_time))
for i, episode in enumerate(episodes):
t_col = 'arrival_sec' if tab == 'trip' else 'time_sec'
cur_df = df[df.episode == episode].sort_values(t_col)
if window and (agg == 'mv'):
cur_x = cur_df[name].rolling(window, min_periods=1).mean().values
else:
cur_x = cur_df[name].values
if window and (agg != 'mv'):
if tab == 'trip':
cur_x = varied_agg(cur_x, df[df.episode == episode].arrival_sec.values, window, agg)
else:
cur_x = fixed_agg(cur_x, window, agg)
# print(cur_x.shape)
x[i] = cur_x
if num_episode > 1:
x_mean = np.mean(x, axis=0)
x_std = np.std(x, axis=0)
else:
x_mean = x[0]
x_std = np.zeros(num_time)
if (not window) or (agg == 'mv'):
t = np.arange(1, episode_sec + 1)
if reward:
t = | np.arange(5, episode_sec + 1, 5) | numpy.arange |
import importlib.resources
import numpy as np
from hexrd import constants
from hexrd import symmetry, symbols
from hexrd.spacegroup import Allowed_HKLs
from hexrd.ipfcolor import sphere_sector, colorspace
from hexrd.valunits import valWUnit
import hexrd.resources
import warnings
import h5py
from pathlib import Path
from scipy.interpolate import interp1d
import time
eps = constants.sqrt_epsf
class unitcell:
'''
>> @AUTHOR: <NAME>, Lawrence Livermore National Lab, <EMAIL>
>> @DATE: 10/09/2018 SS 1.0 original
@DATE: 10/15/2018 SS 1.1 added space group handling
>> @DETAILS: this is the unitcell class
'''
# initialize the unitcell class
# need lattice parameters and space group data from HDF5 file
def __init__(self, lp, sgnum,
atomtypes, charge,
atominfo,
U, dmin, beamenergy,
sgsetting=0):
self._tstart = time.time()
self.pref = 0.4178214
self.atom_type = atomtypes
self.chargestates = charge
self.atom_pos = atominfo
self._dmin = dmin
self.lparms = lp
self.U = U
'''
initialize interpolation from table for anomalous scattering
'''
self.InitializeInterpTable()
'''
sets x-ray energy
calculate wavelength
also calculates anomalous form factors for xray scattering
'''
self.voltage = beamenergy * 1000.0
'''
calculate symmetry
'''
self.sgsetting = sgsetting
self.sgnum = sgnum
self._tstop = time.time()
self.tinit = self._tstop - self._tstart
def GetPgLg(self):
'''
simple subroutine to get point and laue groups
to maintain consistency for planedata initialization
in the materials class
'''
for k in list(_pgDict.keys()):
if self.sgnum in k:
pglg = _pgDict[k]
self._pointGroup = pglg[0]
self._laueGroup = pglg[1]
self._supergroup = pglg[2]
self._supergroup_laue = pglg[3]
def CalcWavelength(self):
# wavelength in nm
self.wavelength = constants.cPlanck * \
constants.cLight / \
constants.cCharge / \
self.voltage
self.wavelength *= 1e9
self.CalcAnomalous()
def calcBetaij(self):
self.betaij = np.zeros([self.atom_ntype, 3, 3])
for i in range(self.U.shape[0]):
U = self.U[i, :]
self.betaij[i, :, :] = np.array([[U[0], U[3], U[4]],
[U[3], U[1], U[5]],
[U[4], U[5], U[2]]])
self.betaij[i, :, :] *= 2. * np.pi**2 * self._aij
def calcmatrices(self):
a = self.a
b = self.b
c = self.c
alpha = np.radians(self.alpha)
beta = np.radians(self.beta)
gamma = np.radians(self.gamma)
ca = np.cos(alpha)
cb = np.cos(beta)
cg = np.cos(gamma)
sa = np.sin(alpha)
sb = np.sin(beta)
sg = np.sin(gamma)
tg = np.tan(gamma)
'''
direct metric tensor
'''
self._dmt = np.array([[a**2, a*b*cg, a*c*cb],
[a*b*cg, b**2, b*c*ca],
[a*c*cb, b*c*ca, c**2]])
self._vol = np.sqrt(np.linalg.det(self.dmt))
if(self.vol < 1e-5):
warnings.warn('unitcell volume is suspiciously small')
'''
reciprocal metric tensor
'''
self._rmt = np.linalg.inv(self.dmt)
'''
direct structure matrix
'''
self._dsm = np.array([[a, b*cg, c*cb],
[0., b*sg, -c*(cb*cg - ca)/sg],
[0., 0., self.vol/(a*b*sg)]])
self._dsm[np.abs(self._dsm) < eps] = 0.
'''
reciprocal structure matrix
'''
self._rsm = np.array([[1./a, 0., 0.],
[-1./(a*tg), 1./(b*sg), 0.],
[b*c*(cg*ca - cb)/(self.vol*sg),
a*c*(cb*cg - ca)/(self.vol*sg),
a*b*sg/self.vol]])
self._rsm[np.abs(self._rsm) < eps] = 0.
ast = self.CalcLength([1, 0, 0], 'r')
bst = self.CalcLength([0, 1, 0], 'r')
cst = self.CalcLength([0, 0, 1], 'r')
self._aij = np.array([[ast**2, ast*bst, ast*cst],
[bst*ast, bst**2, bst*cst],
[cst*ast, cst*bst, cst**2]])
''' transform between any crystal space to any other space.
choices are 'd' (direct), 'r' (reciprocal) and 'c' (cartesian)'''
def TransSpace(self, v_in, inspace, outspace):
if(inspace == 'd'):
if(outspace == 'r'):
v_out = np.dot(v_in, self.dmt)
elif(outspace == 'c'):
v_out = np.dot(self.dsm, v_in)
else:
raise ValueError(
'inspace in ''d'' but outspace can''t be identified')
elif(inspace == 'r'):
if(outspace == 'd'):
v_out = np.dot(v_in, self.rmt)
elif(outspace == 'c'):
v_out = np.dot(self.rsm, v_in)
else:
raise ValueError(
'inspace in ''r'' but outspace can''t be identified')
elif(inspace == 'c'):
if(outspace == 'r'):
v_out = np.dot(v_in, self.rsm)
elif(outspace == 'd'):
v_out = np.dot(v_in, self.dsm)
else:
raise ValueError(
'inspace in ''c'' but outspace can''t be identified')
else:
raise ValueError('incorrect inspace argument')
return v_out
''' calculate dot product of two vectors in any space 'd' 'r' or 'c' '''
def CalcDot(self, u, v, space):
if(space == 'd'):
dot = np.dot(u, np.dot(self.dmt, v))
elif(space == 'r'):
dot = np.dot(u, np.dot(self.rmt, v))
elif(space == 'c'):
dot = np.dot(u, v)
else:
raise ValueError('space is unidentified')
return dot
''' calculate dot product of two vectors in any space 'd' 'r' or 'c' '''
def CalcLength(self, u, space):
if(space == 'd'):
vlen = np.sqrt(np.dot(u, np.dot(self.dmt, u)))
elif(space == 'r'):
vlen = np.sqrt(np.dot(u, np.dot(self.rmt, u)))
elif(space == 'c'):
vlen = np.linalg.norm(u)
else:
raise ValueError('incorrect space argument')
return vlen
''' normalize vector in any space 'd' 'r' or 'c' '''
def NormVec(self, u, space):
ulen = self.CalcLength(u, space)
return u/ulen
''' calculate angle between two vectors in any space'''
def CalcAngle(self, u, v, space):
ulen = self.CalcLength(u, space)
vlen = self.CalcLength(v, space)
dot = self.CalcDot(u, v, space)/ulen/vlen
angle = np.arccos(dot)
return angle
''' calculate cross product between two vectors in any space.
cross product of two vectors in direct space is a vector in
reciprocal space
cross product of two vectors in reciprocal space is a vector in
direct space
the outspace specifies if a conversion needs to be made
@NOTE: iv is the switch (0/1) which will either turn division
by volume of the unit cell on or off.'''
def CalcCross(self, p, q, inspace, outspace, vol_divide=False):
iv = 0
if(vol_divide):
vol = self.vol
else:
vol = 1.0
pxq = np.array([p[1]*q[2]-p[2]*q[1],
p[2]*q[0]-p[0]*q[2],
p[0]*q[1]-p[1]*q[0]])
if(inspace == 'd'):
'''
cross product vector is in reciprocal space
and can be converted to direct or cartesian space
'''
pxq *= vol
if(outspace == 'r'):
pass
elif(outspace == 'd'):
pxq = self.TransSpace(pxq, 'r', 'd')
elif(outspace == 'c'):
pxq = self.TransSpace(pxq, 'r', 'c')
else:
raise ValueError(
'inspace is ''d'' but outspace is unidentified')
elif(inspace == 'r'):
'''
cross product vector is in direct space and
can be converted to any other space
'''
pxq /= vol
if(outspace == 'r'):
pxq = self.TransSpace(pxq, 'd', 'r')
elif(outspace == 'd'):
pass
elif(outspace == 'c'):
pxq = self.TransSpace(pxq, 'd', 'c')
else:
raise ValueError(
'inspace is ''r'' but outspace is unidentified')
elif(inspace == 'c'):
'''
cross product is already in cartesian space so no
volume factor is involved. can be converted to any
other space too
'''
if(outspace == 'r'):
pxq = self.TransSpace(pxq, 'c', 'r')
elif(outspace == 'd'):
pxq = self.TransSpace(pxq, 'c', 'd')
elif(outspace == 'c'):
pass
else:
raise ValueError(
'inspace is ''c'' but outspace is unidentified')
else:
raise ValueError('inspace is unidentified')
return pxq
def GenerateRecipPGSym(self):
self.SYM_PG_r = self.SYM_PG_d[0, :, :]
self.SYM_PG_r = np.broadcast_to(self.SYM_PG_r, [1, 3, 3])
self.SYM_PG_r_laue = self.SYM_PG_d[0, :, :]
self.SYM_PG_r_laue = np.broadcast_to(self.SYM_PG_r_laue, [1, 3, 3])
for i in range(1, self.npgsym):
g = self.SYM_PG_d[i, :, :]
g = np.dot(self.dmt, np.dot(g, self.rmt))
g = np.round(np.broadcast_to(g, [1, 3, 3]))
self.SYM_PG_r = np.concatenate((self.SYM_PG_r, g))
for i in range(1, self.SYM_PG_d_laue.shape[0]):
g = self.SYM_PG_d_laue[i, :, :]
g = np.dot(self.dmt, np.dot(g, self.rmt))
g = np.round(np.broadcast_to(g, [1, 3, 3]))
self.SYM_PG_r_laue = np.concatenate((self.SYM_PG_r_laue, g))
self.SYM_PG_r = self.SYM_PG_r.astype(np.int32)
self.SYM_PG_r_laue = self.SYM_PG_r_laue.astype(np.int32)
def GenerateCartesianPGSym(self):
'''
use the direct point group symmetries to generate the
symmetry operations in the cartesian frame. this is used
to reduce directions to the standard stereographi tringle
'''
self.SYM_PG_c = []
self.SYM_PG_c_laue = []
for sop in self.SYM_PG_d:
self.SYM_PG_c.append(np.dot(self.dsm, np.dot(sop, self.rsm.T)))
self.SYM_PG_c = np.array(self.SYM_PG_c)
self.SYM_PG_c[np.abs(self.SYM_PG_c) < eps] = 0.
if(self._pointGroup == self._laueGroup):
self.SYM_PG_c_laue = self.SYM_PG_c
else:
for sop in self.SYM_PG_d_laue:
self.SYM_PG_c_laue.append(
np.dot(self.dsm, np.dot(sop, self.rsm.T)))
self.SYM_PG_c_laue = np.array(self.SYM_PG_c_laue)
self.SYM_PG_c_laue[np.abs(self.SYM_PG_c_laue) < eps] = 0.
'''
use the point group symmetry of the supergroup
to generate the equivalent operations in the
cartesian reference frame
SS 11/23/2020 added supergroup symmetry operations
SS 11/24/2020 fix monoclinic groups separately since
the supergroup for monoclinic is orthorhombic
'''
supergroup = self._supergroup
sym_supergroup = symmetry.GeneratePGSYM(supergroup)
supergroup_laue = self._supergroup_laue
sym_supergroup_laue = symmetry.GeneratePGSYM(supergroup_laue)
if((self.latticeType == 'monoclinic' or
self.latticeType == 'triclinic')):
'''
for monoclinic groups c2 and c2h, the supergroups are
orthorhombic, so no need to convert from direct to
cartesian as they are identical
'''
self.SYM_PG_supergroup = sym_supergroup
self.SYM_PG_supergroup_laue = sym_supergroup_laue
else:
self.SYM_PG_supergroup = []
self.SYM_PG_supergroup_laue = []
for sop in sym_supergroup:
self.SYM_PG_supergroup.append(
np.dot(self.dsm, np.dot(sop, self.rsm.T)))
self.SYM_PG_supergroup = np.array(self.SYM_PG_supergroup)
self.SYM_PG_supergroup[np.abs(self.SYM_PG_supergroup) < eps] = 0.
for sop in sym_supergroup_laue:
self.SYM_PG_supergroup_laue.append(
np.dot(self.dsm, np.dot(sop, self.rsm.T)))
self.SYM_PG_supergroup_laue = np.array(self.SYM_PG_supergroup_laue)
self.SYM_PG_supergroup_laue[np.abs(
self.SYM_PG_supergroup_laue) < eps] = 0.
'''
the standard setting for the monoclinic system has the b-axis aligned
with the 2-fold axis. this needs to be accounted for when reduction to
the standard stereographic triangle is performed. the siplest way is to
rotate all symmetry elements by 90 about the x-axis
the supergroups for the monoclinic groups are orthorhombic so they need
not be rotated as they have the c* axis already aligned with the z-axis
SS 12/10/2020
'''
if(self.latticeType == 'monoclinic'):
om = np.array([[1., 0., 0.], [0., 0., 1.], [0., -1., 0.]])
for i, s in enumerate(self.SYM_PG_c):
ss = np.dot(om, np.dot(s, om.T))
self.SYM_PG_c[i, :, :] = ss
for i, s in enumerate(self.SYM_PG_c_laue):
ss = np.dot(om, np.dot(s, om.T))
self.SYM_PG_c_laue[i, :, :] = ss
'''
for the triclinic group c1, the supergroups are the monoclinic group m
therefore we need to rotate the mirror to be perpendicular to the z-axis
same shouldn't be done for the group ci, since the supergroup is just the
triclinic group c1!!
SS 12/10/2020
'''
if(self._pointGroup == 'c1'):
om = np.array([[1., 0., 0.], [0., 0., 1.], [0., -1., 0.]])
for i, s in enumerate(self.SYM_PG_supergroup):
ss = np.dot(om, np.dot(s, om.T))
self.SYM_PG_supergroup[i, :, :] = ss
for i, s in enumerate(self.SYM_PG_supergroup_laue):
ss = np.dot(om, np.dot(s, om.T))
self.SYM_PG_supergroup_laue[i, :, :] = ss
def CalcOrbit(self, v, reduceToUC=True):
"""
@date 03/04/2021 SS 1.0 original
@details calculate the equivalent position for the
space group symmetry. this function will replace the
code in the CalcPositions subroutine.
@params v is the factional coordinates in direct space
reduceToUC reduces the position to the
fundamental fractional unit cell (0-1)
"""
asym_pos = []
n = 1
if v.shape[0] != 3:
raise RuntimeError("fractional coordinate in not 3-d")
r = v
# using wigner-sietz notation
r = | np.hstack((r, 1.)) | numpy.hstack |
import pandas as pd
import numpy as np
import os
import math
import random
import pickle
import time
import feather
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import prismx as px
from prismx.utils import read_gmt, load_correlation, loadPrediction
from prismx.prediction import correlation_scores, loadPredictionsRange
from sklearn.manifold import TSNE
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
from sklearn.preprocessing import MinMaxScaler
from sklearn import mixture
from sklearn.metrics.cluster import homogeneity_score
from scipy import stats
gene_auc = pd.read_csv("test_data/gene_auc.tsv", sep="\t", index_col=0)
set_auc = pd.read_csv("test_data/set_auc.tsv", sep="\t", index_col=0)
diff = gene_auc.iloc[:,5] - gene_auc.iloc[:,0]
diff.sort_values(0,ascending=False).iloc[0:20]
diff = set_auc.iloc[:,5] - set_auc.iloc[:,0]
diff.sort_values(0,ascending=False).iloc[0:20]
nx = "GO_Biological_Process_2018"
set_auc.loc[nx,:]
gene_auc.loc[nx,:]
p1 = pd.read_feather("prediction_folder_300_umap/prediction_0.f").set_index("index")
correlationFolder = "correlation_folder_300"
predictionFolder = "prediction_folder_300_umap"
outfolder = "prismxresult"
clustn = 300
libs = px.list_libraries()
gmt_file = px.load_library(libs[111], overwrite=True)
outname = libs[111]
#px.predict_gmt("gobp_model_"+str(clustn)+".pkl", gmt_file, correlationFolder, predictionFolder, outfolder, libs[111], step_size=200, intersect=True, verbose=True)
gop = pd.read_feather("prismxresult/GO_Biological_Process_2018.f")
gop = gop.set_index("index")
geneAUC, setAUC = px.benchmarkGMTfast(gmt_file, correlationFolder, predictionFolder, outfolder+"/"+outname+".f", intersect=True, verbose=True)
diff_gene = geneAUC.iloc[:,1]-geneAUC.iloc[:,0]
diff_set = setAUC.iloc[:,1]-setAUC.iloc[:,0]
diff_set.sort_values(0)
dic, rdic, ugenes = px.read_gmt(gmt_file, background_genes=diff_gene.index)
def intersection(lst1, lst2):
lst3 = [value for value in lst1 if value in lst2]
return lst3
kk = intersection(list(dic.keys()), diff_set.index)
ll1 = []
ll2 = []
for i in range(len(kk)):
#print(kk[i]+" - "+str(diff_set.loc[kk[i]])+" - "+str(len(dic[kk[i]])))
ll1.append(diff_set.loc[kk[i]])
ll2.append(len(dic[kk[i]]))
c1 = | np.corrcoef(ll1,ll2) | numpy.corrcoef |
import sys
import os
import numpy
import pandas
from collections import OrderedDict
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
import seaborn as sns
import copy
from IPython.display import display
import warnings
import re
import shutil
from matplotlib import gridspec
from .._toolboxPath import toolboxPath
from ..objects import MSDataset
from pyChemometrics.ChemometricsPCA import ChemometricsPCA
from ..plotting import plotTIC, histogram, plotLRTIC, jointplotRSDvCorrelation, plotRSDs, plotIonMap, plotBatchAndROCorrection, plotScores, plotLoadings, plotTargetedFeatureDistribution
from ._generateSampleReport import _generateSampleReport
from ..utilities import generateLRmask, rsd
from ..utilities._internal import _vcorrcoef
from ..utilities._internal import _copyBackingFiles as copyBackingFiles
from ..enumerations import AssayRole, SampleType
from ._generateBasicPCAReport import generateBasicPCAReport
from ..reports._finalReportPeakPantheR import _finalReportPeakPantheR
from ..utilities._filters import blankFilter
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from ..__init__ import __version__ as version
def _generateReportMS(dataset, reportType, withExclusions=False, withArtifactualFiltering=None, destinationPath=None,
msDataCorrected=None, pcaModel=None, batch_correction_window=11):
"""
Summarise different aspects of an MS dataset
Generate reports for ``feature summary``, ``correlation to dilution``, ``batch correction assessment``, ``batch correction summary``, ``feature selection``, ``final report``, ``final report abridged``, or ``final report targeted abridged``
* **'feature summary'** Generates feature summary report, plots figures including those for feature abundance, sample TIC and acquisition structure, correlation to dilution, RSD and an ion map.
* **'correlation to dilution'** Generates a more detailed report on correlation to dilution, broken down by batch subset with TIC, detector voltage, a summary, and heatmap indicating potential saturation or other issues.
* **'batch correction assessment'** Generates a report before batch correction showing TIC overall and intensity and batch correction fit for a subset of features, to aid specification of batch start and end points.
* **'batch correction summary'** Generates a report post batch correction with pertinant figures (TIC, RSD etc.) before and after.
* **'feature selection'** Generates a summary of the number of features passing feature selection (with current settings as definite in the SOP), and a heatmap showing how this number would be affected by changes to RSD and correlation to dilution thresholds.
* **'final report'** Generates a summary of the final dataset, lists sample numbers present, a selection of figures summarising dataset quality, and a final list of samples missing from acquisition.
* **'final report abridged'** Generates an abridged summary of the final dataset, lists sample numbers present, a selection of figures summarising dataset quality, and a final list of samples missing from acquisition.
* **'final report targeted abridged'** Generates an abridged summary of the final targeted (peakPantheR) dataset, lists sample numbers present, a selection of figures summarising dataset quality, feature distributions, and a final list of samples missing from acquisition.
:param MSDataset msDataTrue: MSDataset to report on
:param str reportType: Type of report to generate, one of ``feature summary``, ``correlation to dilution``, ``batch correction``, ``feature selection``, ``final report``, ``final report abridged``, or ``final report targeted abridged``
:param bool withExclusions: If ``True``, only report on features and samples not masked by the sample and feature masks
:param None or bool withArtifactualFiltering: If ``None`` use the value from ``Attributes['artifactualFilter']``. If ``True`` apply artifactual filtering to the ``feature selection`` report and ``final report``
:param destinationPath: If ``None`` plot interactively, otherwise save report to the path specified
:type destinationPath: None or str
:param MSDataset msDataCorrected: Only if ``batch correction``, if msDataCorrected included will generate report post correction
:param PCAmodel pcaModel: Only if ``final report``, if PCAmodel object is available PCA scores plots coloured by sample type will be added to report
"""
acceptableOptions = {'feature summary', 'correlation to dilution',
'batch correction assessment',
'batch correction summary', 'feature selection',
'final report', 'final report abridged',
'final report peakpanther'}
# Check inputs
if not isinstance(dataset, MSDataset):
raise TypeError('msData must be an instance of MSDataset')
if not isinstance(reportType, str) & (reportType.lower() in acceptableOptions):
raise ValueError('reportType must be one of: ' + str(acceptableOptions))
if not isinstance(withExclusions, bool):
raise TypeError('withExclusions must be a bool')
if withArtifactualFiltering is not None:
if not isinstance(withArtifactualFiltering, bool):
raise TypeError('withArtifactualFiltering must be a bool')
if withArtifactualFiltering is None:
withArtifactualFiltering = dataset.Attributes['featureFilters']['artifactualFilter']
# if self.Attributes['artifactualFilter'] is False, can't/shouldn't apply it.
# However if self.Attributes['artifactualFilter'] is True, the user can have the choice to not apply it (withArtifactualFilering=False).
if (withArtifactualFiltering is True) & (dataset.Attributes['featureFilters']['artifactualFilter'] is False):
warnings.warn("Warning: Attributes['featureFilters']['artifactualFilter'] set to \'False\', artifactual filtering cannot be applied.")
withArtifactualFiltering = False
if destinationPath is not None:
if not isinstance(destinationPath, str):
raise TypeError('destinationPath must be a string')
if msDataCorrected is not None:
if not isinstance(msDataCorrected, MSDataset):
raise TypeError('msDataCorrected must be an instance of nPYc.MSDataset')
if pcaModel is not None:
if not isinstance(pcaModel, ChemometricsPCA):
raise TypeError('pcaModel must be a ChemometricsPCA object')
sns.set_style("whitegrid")
# Create directory to save destinationPath
if destinationPath:
if not os.path.exists(destinationPath):
os.makedirs(destinationPath)
if not os.path.exists(os.path.join(destinationPath, 'graphics')):
os.makedirs(os.path.join(destinationPath, 'graphics'))
# Apply sample/feature masks if exclusions to be applied
msData = copy.deepcopy(dataset)
if withExclusions:
msData.applyMasks()
if reportType.lower() == 'feature summary':
_featureReport(msData, destinationPath)
elif reportType.lower() == 'correlation to dilution':
_featureCorrelationToDilutionReport(msData, destinationPath)
elif reportType.lower() == 'feature selection':
_featureSelectionReport(msData, destinationPath)
elif reportType.lower() == 'batch correction assessment':
_batchCorrectionAssessmentReport(msData, destinationPath)
elif reportType.lower() == 'batch correction summary':
_batchCorrectionSummaryReport(msData, msDataCorrected, destinationPath)
elif (reportType.lower() == 'final report') or (reportType.lower() == 'final report abridged'):
_finalReport(msData, destinationPath, pcaModel, reportType=reportType)
elif (reportType.lower() == 'final report peakpanther'):
_finalReportPeakPantheR(msData, destinationPath=destinationPath)
def _finalReport(dataset, destinationPath=None, pcaModel=None, reportType='final report'):
"""
Generates a summary of the final dataset, lists sample numbers present, a selection of figures summarising dataset quality, and a final list of samples missing from acquisition.
"""
# Create save directory if required
if destinationPath is not None:
if not os.path.exists(destinationPath):
os.makedirs(destinationPath)
if not os.path.exists(os.path.join(destinationPath, 'graphics')):
os.makedirs(os.path.join(destinationPath, 'graphics'))
graphicsPath = os.path.join(destinationPath, 'graphics', 'report_finalSummary')
if not os.path.exists(graphicsPath):
os.makedirs(graphicsPath)
else:
graphicsPath = None
saveAs = None
# TODO: change how this is done If targeted assay can use compound name to label RSD plots
if (hasattr(dataset.featureMetadata, 'cpdName')):
featureName = 'cpdName'
featName=True
figureSize=(dataset.Attributes['figureSize'][0], dataset.Attributes['figureSize'][1] * (dataset.noFeatures / 35))
else:
featureName = 'Feature Name'
featName=False
figureSize=dataset.Attributes['figureSize']
# Define sample masks
SSmask = (dataset.sampleMetadata['SampleType'].values == SampleType.StudySample) & \
(dataset.sampleMetadata['AssayRole'].values == AssayRole.Assay)
SPmask = (dataset.sampleMetadata['SampleType'].values == SampleType.StudyPool) & \
(dataset.sampleMetadata['AssayRole'].values == AssayRole.PrecisionReference)
ERmask = (dataset.sampleMetadata['SampleType'].values == SampleType.ExternalReference) & \
(dataset.sampleMetadata['AssayRole'].values == AssayRole.PrecisionReference)
LRmask = (dataset.sampleMetadata['SampleType'].values == SampleType.StudyPool) & \
(dataset.sampleMetadata['AssayRole'].values == AssayRole.LinearityReference)
# Set up template item and save required info
item = dict()
item['Name'] = dataset.name
item['ReportType'] = 'feature summary'
item['Nfeatures'] = dataset.intensityData.shape[1]
item['Nsamples'] = dataset.intensityData.shape[0]
item['SScount'] = str(sum(SSmask))
item['SPcount'] = str(sum(SPmask))
item['ERcount'] = str(sum(ERmask))
item['LRcount'] = str(sum(LRmask))
item['corrMethod'] = dataset.Attributes['corrMethod']
figNo = 1
# Mean intensities of Study Pool samples (for future plotting segmented by intensity)
meanIntensitiesSP = numpy.log(numpy.nanmean(dataset.intensityData[SPmask, :], axis=0))
meanIntensitiesSP[numpy.mean(dataset.intensityData[SPmask, :], axis=0) == 0] = numpy.nan
meanIntensitiesSP[numpy.isinf(meanIntensitiesSP)] = numpy.nan
# Table 1: Sample summary
# Generate sample summary
sampleSummary = _generateSampleReport(dataset, withExclusions=True, destinationPath=None, returnOutput=True)
# Tidy table for final report format
sampleSummary['Acquired'].drop('Marked for Exclusion', inplace=True, axis=1)
if hasattr(sampleSummary['Acquired'], 'Already Excluded'):
sampleSummary['Acquired'].rename(columns={'Already Excluded': 'Excluded'}, inplace=True)
sampleSummary['isFinalReport'] = True
if 'StudySamples Exclusion Details' in sampleSummary:
sampleSummary['studySamplesExcluded'] = True
else:
sampleSummary['studySamplesExcluded'] = False
item['sampleSummary'] = sampleSummary
if not destinationPath:
print('Sample Summary')
print('\nTable 1: Summary of samples present')
display(sampleSummary['Acquired'])
print('\nDetails of any missing/excluded study samples given at the end of the report\n')
# Table 2: Feature Selection parameters
FeatureSelectionTable = pandas.DataFrame(
data=['yes', dataset.Attributes['corrMethod'], dataset.Attributes['corrThreshold']],
index=['Correlation to Dilution', 'Correlation to Dilution: Method', 'Correlation to Dilution: Threshold'],
columns=['Value Applied'])
if sum(dataset.corrExclusions) != dataset.noSamples:
temp = ', '.join(dataset.sampleMetadata.loc[dataset.corrExclusions == False, 'Sample File Name'].values)
FeatureSelectionTable = FeatureSelectionTable.append(
pandas.DataFrame(data=temp, index=['Correlation to Dilution: Sample Exclusions'], columns=['Value Applied']))
else:
FeatureSelectionTable = FeatureSelectionTable.append(
pandas.DataFrame(data=['none'], index=['Correlation To Dilution: Sample Exclusions'], columns=['Value Applied']))
FeatureSelectionTable = FeatureSelectionTable.append(
pandas.DataFrame(data=['yes', dataset.Attributes['filterParameters']['rsdThreshold'], 'yes'],
index=['Relative Standard Devation (RSD)', 'RSD of SR Samples: Threshold',
'RSD of SS Samples > RSD of SR Samples'], columns=['Value Applied']))
if 'blankFilter' in dataset.Attributes:
if dataset.Attributes['featureFilters']['blankFilter'] == True:
FeatureSelectionTable = FeatureSelectionTable.append(
pandas.DataFrame(data=['yes'], index=['Blank Filtering'], columns=['Value Applied']))
if (dataset.Attributes['featureFilters']['artifactualFilter'] == True):
FeatureSelectionTable = FeatureSelectionTable.append(pandas.DataFrame(
data=['yes', dataset.Attributes['filterParameters']['deltaMzArtifactual'], dataset.Attributes['filterParameters']['overlapThresholdArtifactual'],
dataset.Attributes['filterParameters']['corrThresholdArtifactual']],
index=['Artifactual Filtering', 'Artifactual Filtering: Delta m/z',
'Artifactual Filtering: Overlap Threshold', 'Artifactual Filtering: Correlation Threshold'],
columns=['Value Applied']))
item['FeatureSelectionTable'] = FeatureSelectionTable
nBatchCollect = len((numpy.unique(dataset.sampleMetadata['Batch'].values[~numpy.isnan(dataset.sampleMetadata['Batch'].values)])).astype(int))
if nBatchCollect == 1:
item['batchesCollect'] = '1 batch'
else:
item['batchesCollect'] = str(nBatchCollect) + ' batches'
if hasattr(dataset, 'fit'):
nBatchCorrect = len((numpy.unique(dataset.sampleMetadata['Correction Batch'].values[~numpy.isnan(dataset.sampleMetadata['Correction Batch'].values)])).astype(int))
if nBatchCorrect == 1:
item['batchesCorrect'] = 'Run-order and batch correction applied (LOWESS regression fitted to SR samples in 1 batch)'
else:
item['batchesCorrect'] = 'Run-order and batch correction applied (LOWESS regression fitted to SR samples in ' + str(nBatchCorrect) + ' batches)'
else:
item['batchesCorrect'] = 'Run-order and batch correction not required'
if ('Acquired Time' in dataset.sampleMetadata.columns):
start = pandas.to_datetime(str(dataset.sampleMetadata['Acquired Time'].loc[dataset.sampleMetadata['Run Order'] == min(dataset.sampleMetadata['Run Order'][dataset.sampleMask])].values[0]))
end = pandas.to_datetime(str(dataset.sampleMetadata['Acquired Time'].loc[dataset.sampleMetadata['Run Order'] == max(dataset.sampleMetadata['Run Order'][dataset.sampleMask])].values[0]))
item['start'] = start.strftime('%d/%m/%y')
item['end'] = end.strftime('%d/%m/%y')
else:
item['start'] = 'unknown'
item['end'] = 'unknown'
if not destinationPath:
print('\nFeature Summary')
print('\nSamples acquired in ' + item['batchesCollect'] + ' between ' + item['start'] + ' and ' + item['end'])
print(item['batchesCorrect'])
print('\nTable 2: Features selected based on the following criteria:')
display(item['FeatureSelectionTable'])
# ONLY 'final report': plot TIC by batch and TIC
if (reportType.lower() == 'final report'):
if ('Acquired Time' in dataset.sampleMetadata.columns) and ('Run Order' in dataset.sampleMetadata.columns):
# Figure 1: Acquisition Structure, TIC by sample and batch
if destinationPath:
item['finalTICbatches'] = os.path.join(graphicsPath,
item['Name'] + '_finalTICbatches.' + dataset.Attributes[
'figureFormat'])
saveAs = item['finalTICbatches']
else:
print('Figure ' + str(figNo) + ': Acquisition Structure')
figNo = figNo + 1
plotTIC(dataset,
savePath=saveAs,
addBatchShading=True,
figureFormat=dataset.Attributes['figureFormat'],
dpi=dataset.Attributes['dpi'],
figureSize=dataset.Attributes['figureSize'])
# Figure 2: Final TIC
if destinationPath:
item['finalTIC'] = os.path.join(graphicsPath,
item['Name'] + '_finalTIC.' + dataset.Attributes['figureFormat'])
saveAs = item['finalTIC']
else:
print('Figure ' + str(figNo) + ': Total Ion Count (TIC) for all samples and all features in final dataset.')
figNo = figNo + 1
plotTIC(dataset,
addViolin=True,
title='',
savePath=saveAs,
figureFormat=dataset.Attributes['figureFormat'],
dpi=dataset.Attributes['dpi'],
figureSize=dataset.Attributes['figureSize'])
else:
if not destinationPath:
print('Figure ' + str(figNo) + ': Acquisition Structure')
print('\x1b[31;1m Acquired Time/Run Order data not available to plot\n\033[0;0m')
print('Figure ' + str(figNo+1) + ': Total Ion Count (TIC) for all samples and all features in final dataset.')
print('\x1b[31;1m Acquired Time/Run Order data not available to plot\n\033[0;0m')
figNo = figNo+2
# Figure: Histogram of RSD in study pool samples
if destinationPath:
item['finalRsdHist'] = os.path.join(graphicsPath,item['Name'] + '_rsdSP.' + dataset.Attributes['figureFormat'])
saveAs = item['finalRsdHist']
else:
print('Figure ' + str(figNo) + ': Residual Standard Deviation (RSD) histogram for study reference samples and all features in final dataset, segmented by abundance percentiles.')
figNo = figNo+1
histogram(dataset.rsdSP,
xlabel='RSD',
histBins=dataset.Attributes['histBins'],
quantiles=dataset.Attributes['quantiles'],
inclusionVector=numpy.exp(meanIntensitiesSP),
logx=False,
savePath=saveAs,
figureFormat=dataset.Attributes['figureFormat'],
dpi=dataset.Attributes['dpi'],
figureSize=dataset.Attributes['figureSize'])
# Figure: Distribution of RSDs in SP and SS
if destinationPath:
item['finalRSDdistributionFigure'] = os.path.join(graphicsPath, item['Name'] + '_finalRSDdistributionFigure.' +
dataset.Attributes['figureFormat'])
saveAs = item['finalRSDdistributionFigure']
else:
print('Figure ' + str(figNo) + ': Residual Standard Deviation (RSD) distribution for all samples and all features in final dataset (by sample type)')
figNo = figNo+1
plotRSDs(dataset,
featureName=featureName,
ratio=False,
logx=True,
color='matchReport',
featName=featName,
savePath=saveAs,
figureFormat=dataset.Attributes['figureFormat'],
dpi=dataset.Attributes['dpi'],
figureSize=figureSize)
# Figure: Histogram of log mean abundance by sample type
if destinationPath:
item['finalFeatureIntensityHist'] = os.path.join(graphicsPath, item['Name'] + '_finalFeatureIntensityHist.' +
dataset.Attributes['figureFormat'])
saveAs = item['finalFeatureIntensityHist']
else:
print('Figure ' + str(figNo) + ': Feature intensity histogram for all samples and all features in final dataset (by sample type)')
figNo = figNo+1
_plotAbundanceBySampleType(dataset.intensityData, SSmask, SPmask, ERmask, saveAs, dataset)
# Figure: Ion map
if 'm/z' in dataset.featureMetadata.columns and 'Retention Time' in dataset.featureMetadata.columns:
if destinationPath:
item['finalIonMap'] = os.path.join(graphicsPath, item['Name'] + '_finalIonMap.' + dataset.Attributes['figureFormat'])
saveAs = item['finalIonMap']
else:
print('Figure ' + str(figNo) + ': Ion map of all features (coloured by log median intensity).')
figNo = figNo+1
plotIonMap(dataset,
savePath=saveAs,
figureFormat=dataset.Attributes['figureFormat'],
dpi=dataset.Attributes['dpi'],
figureSize=dataset.Attributes['figureSize'])
else:
if not destinationPath:
print('No Retention Time and m/z information, unable to plot the ion map.\n')
# ONLY 'final report targeted abridged' feature distributions (violin plots)
if (reportType.lower() == 'final report targeted abridged'):
figuresFeatureDistribution = OrderedDict()
# Plot distributions for each feature
temp = dict()
if destinationPath:
temp['FeatureConcentrationDistribution'] = os.path.join(graphicsPath, item['Name'] + '_FeatureConcentrationDistribution_')
saveAs = temp['FeatureConcentrationDistribution']
else:
print('Figure ' + str(figNo) + ': Relative concentration distributions, split by sample types')
figNo = figNo+1
figuresFeatureDistribution = plotTargetedFeatureDistribution(
dataset,
logx=False,
figures=figuresFeatureDistribution,
savePath=saveAs,
figureFormat=dataset.Attributes['figureFormat'],
dpi=dataset.Attributes['dpi'],
figureSize=dataset.Attributes['figureSize'])
for key in figuresFeatureDistribution:
if os.path.join(destinationPath, 'graphics') in str(figuresFeatureDistribution[key]):
figuresFeatureDistribution[key] = re.sub('.*graphics', 'graphics', figuresFeatureDistribution[key])
item['FeatureConcentrationDistribution'] = figuresFeatureDistribution
# ONLY 'final report' and ONLY if pcaModel available
if ((reportType.lower() == 'final report') and (pcaModel)):
if not 'Plot Sample Type' in dataset.sampleMetadata.columns:
dataset.sampleMetadata.loc[~SSmask & ~SPmask & ~ERmask, 'Plot Sample Type'] = 'Sample'
dataset.sampleMetadata.loc[SSmask, 'Plot Sample Type'] = 'Study Sample'
dataset.sampleMetadata.loc[SPmask, 'Plot Sample Type'] = 'Study Reference'
dataset.sampleMetadata.loc[ERmask, 'Plot Sample Type'] = 'Long-Term Reference'
if destinationPath:
pcaPath = destinationPath
else:
pcaPath = None
pcaModel = generateBasicPCAReport(pcaModel, dataset, figureCounter=figNo, destinationPath=pcaPath, fileNamePrefix='')
# Table 3: Summary of samples excluded
if not destinationPath:
if 'StudySamples Exclusion Details' in sampleSummary:
print('Missing/Excluded Study Samples')
print('\nTable 3: Details of missing/excluded study samples')
display(sampleSummary['StudySamples Exclusion Details'])
# Write HTML if saving
if destinationPath:
# Make paths for graphics local not absolute for use in the HTML.
for key in item:
if os.path.join(destinationPath, 'graphics') in str(item[key]):
item[key] = re.sub('.*graphics', 'graphics', item[key])
# Generate report
from jinja2 import Environment, FileSystemLoader
env = Environment(loader=FileSystemLoader(os.path.join(toolboxPath(), 'Templates')))
if reportType.lower() == 'final report':
template = env.get_template('MS_FinalSummaryReport.html')
elif reportType.lower() == 'final report abridged':
template = env.get_template('MS_FinalSummaryReport_Abridged.html')
elif reportType.lower() == 'final report targeted abridged':
template = env.get_template('MS_Targeted_FinalSummaryReport_Abridged.html')
filename = os.path.join(destinationPath, dataset.name + '_report_finalSummary.html')
f = open(filename,'w')
f.write(template.render(item=item,
attributes=dataset.Attributes,
version=version,
graphicsPath=graphicsPath,
pcaPlots=pcaModel))
f.close()
copyBackingFiles(toolboxPath(), os.path.join(destinationPath, 'graphics'))
return None
def _featureReport(dataset, destinationPath=None):
"""
Generates feature summary report, plots figures including those for feature abundance, sample TIC and acquisition structure, correlation to dilution, RSD and an ion map.
"""
if (hasattr(dataset.featureMetadata, 'cpdName')):
featureName = 'cpdName'
featName=True
figureSize=(dataset.Attributes['figureSize'][0], dataset.Attributes['figureSize'][1] * (dataset.noFeatures / 35))
else:
featureName = 'Feature Name'
featName=False
figureSize=dataset.Attributes['figureSize']
item = dict()
item['Name'] = dataset.name
item['ReportType'] = 'feature summary'
item['Nfeatures'] = dataset.intensityData.shape[1]
item['Nsamples'] = dataset.intensityData.shape[0]
# Define sample masks
SSmask = (dataset.sampleMetadata['SampleType'].values == SampleType.StudySample) & \
(dataset.sampleMetadata['AssayRole'].values == AssayRole.Assay)
SPmask = (dataset.sampleMetadata['SampleType'].values == SampleType.StudyPool) & \
(dataset.sampleMetadata['AssayRole'].values == AssayRole.PrecisionReference)
ERmask = (dataset.sampleMetadata['SampleType'].values == SampleType.ExternalReference) & \
(dataset.sampleMetadata['AssayRole'].values == AssayRole.PrecisionReference)
try:
LRmask = (dataset.sampleMetadata['SampleType'].values == SampleType.StudyPool) & \
(dataset.sampleMetadata['AssayRole'].values == AssayRole.LinearityReference)
item['LRcount'] = str(sum(LRmask))
except KeyError:
pass
# Set up template item and save required info
item['SScount'] = str(sum(SSmask))
item['SPcount'] = str(sum(SPmask))
item['ERcount'] = str(sum(ERmask))
item['corrMethod'] = dataset.Attributes['corrMethod']
##
# Report stats
##
if destinationPath is not None:
if not os.path.exists(destinationPath):
os.makedirs(destinationPath)
if not os.path.exists(os.path.join(destinationPath, 'graphics')):
os.makedirs(os.path.join(destinationPath, 'graphics'))
graphicsPath = os.path.join(destinationPath, 'graphics', 'report_featureSummary')
if not os.path.exists(graphicsPath):
os.makedirs(graphicsPath)
else:
graphicsPath = None
saveAs = None
# Generate correlation to dilution for each batch subset - plot TIC and histogram of correlation to dilution
# Mean intensities of Study Pool samples (for future plotting segmented by intensity)
meanIntensitiesSP = numpy.log(numpy.nanmean(dataset.intensityData[SPmask, :], axis=0))
meanIntensitiesSP[numpy.mean(dataset.intensityData[SPmask, :], axis=0) == 0] = numpy.nan
meanIntensitiesSP[numpy.isinf(meanIntensitiesSP)] = numpy.nan
# Figure 1: Histogram of log mean abundance by sample type
if destinationPath:
item['FeatureIntensityFigure'] = os.path.join(graphicsPath,
item['Name'] + '_meanIntensityFeature.' + dataset.Attributes[
'figureFormat'])
saveAs = item['FeatureIntensityFigure']
else:
print('Figure 1: Feature intensity histogram for all samples and all features in dataset (by sample type).')
_plotAbundanceBySampleType(dataset.intensityData, SSmask, SPmask, ERmask, saveAs, dataset)
if ('Acquired Time' in dataset.sampleMetadata.columns) and ('Run Order' in dataset.sampleMetadata.columns):
# Figure 2: Sample intensity TIC and distribution by sample type
if destinationPath:
item['SampleIntensityFigure'] = os.path.join(graphicsPath, item['Name'] + '_meanIntensitySample.' + dataset.Attributes[
'figureFormat'])
saveAs = item['SampleIntensityFigure']
else:
print('Figure 2: Sample Total Ion Count (TIC) and distribution (coloured by sample type).')
# TIC all samples
plotTIC(dataset,
addViolin=True,
savePath=saveAs,
title='',
figureFormat=dataset.Attributes['figureFormat'],
dpi=dataset.Attributes['dpi'],
figureSize=dataset.Attributes['figureSize'])
# Figure 3: Acquisition structure and detector voltage
if destinationPath:
item['AcquisitionStructureFigure'] = os.path.join(graphicsPath,
item['Name'] + '_acquisitionStructure.' + dataset.Attributes[
'figureFormat'])
saveAs = item['AcquisitionStructureFigure']
else:
print('Figure 3: Acquisition structure (coloured by detector voltage).')
# TIC all samples
plotTIC(dataset,
addViolin=False,
addBatchShading=True,
addLineAtGaps=True,
colourByDetectorVoltage=True,
savePath=saveAs,
title='',
figureFormat=dataset.Attributes['figureFormat'],
dpi=dataset.Attributes['dpi'],
figureSize=dataset.Attributes['figureSize'])
else:
if not destinationPath:
print('Figure 2: Sample Total Ion Count (TIC) and distribution (coloured by sample type).')
print('\x1b[31;1m Acquired Time/Run Order data not available to plot\n\033[0;0m')
print('Figure 3: Acquisition structure (coloured by detector voltage).')
print('\x1b[31;1m Acquired Time/Run Order data not available to plot\n\033[0;0m')
# Correlation to dilution figures:
if sum(LRmask) != 0:
# Figure 4: Histogram of correlation to dilution by abundance percentiles
if destinationPath:
item['CorrelationByPercFigure'] = os.path.join(graphicsPath,
item['Name'] + '_correlationByPerc.' + dataset.Attributes[
'figureFormat'])
saveAs = item['CorrelationByPercFigure']
else:
print('Figure 4: Histogram of ' + item[
'corrMethod'] + ' correlation of features to serial dilution, segmented by percentile.')
histogram(dataset.correlationToDilution,
xlabel='Correlation to Dilution',
histBins=dataset.Attributes['histBins'],
quantiles=dataset.Attributes['quantiles'],
inclusionVector=numpy.exp(meanIntensitiesSP),
savePath=saveAs,
figureFormat=dataset.Attributes['figureFormat'],
dpi=dataset.Attributes['dpi'],
figureSize=dataset.Attributes['figureSize'])
# Figure 5: TIC of linearity reference samples
if destinationPath:
item['TICinLRfigure'] = os.path.join(graphicsPath,
item['Name'] + '_TICinLR.' + dataset.Attributes['figureFormat'])
saveAs = item['TICinLRfigure']
else:
print('Figure 5: TIC of serial dilution (SRD) samples coloured by sample dilution.')
plotLRTIC(dataset,
sampleMask=LRmask,
savePath=saveAs,
figureFormat=dataset.Attributes['figureFormat'],
dpi=dataset.Attributes['dpi'],
figureSize=dataset.Attributes['figureSize'])
else:
if not destinationPath:
print('Figure 4: Histogram of ' + item[
'corrMethod'] + ' correlation of features to serial dilution, segmented by percentile.')
print('Unable to calculate (no serial dilution samples present in dataset).\n')
print('Figure 5: TIC of serial dilution (SRD) samples coloured by sample dilution')
print('Unable to calculate (no serial dilution samples present in dataset).\n')
# Figure 6: Histogram of RSD in SP samples by abundance percentiles
if destinationPath:
item['RsdByPercFigure'] = os.path.join(graphicsPath,
item['Name'] + '_rsdByPerc.' + dataset.Attributes['figureFormat'])
saveAs = item['RsdByPercFigure']
else:
print(
'Figure 6: Histogram of Residual Standard Deviation (RSD) in study reference (SR) samples, segmented by abundance percentiles.')
histogram(dataset.rsdSP,
xlabel='RSD',
histBins=dataset.Attributes['histBins'],
quantiles=dataset.Attributes['quantiles'],
inclusionVector=numpy.exp(meanIntensitiesSP),
logx=False,
xlim=(0, 100),
savePath=saveAs,
figureFormat=dataset.Attributes['figureFormat'],
dpi=dataset.Attributes['dpi'],
figureSize=dataset.Attributes['figureSize'])
# Figure 7: Scatterplot of RSD vs correlation to dilution
if sum(LRmask) != 0:
if destinationPath:
item['RsdVsCorrelationFigure'] = os.path.join(graphicsPath,
item['Name'] + '_rsdVsCorrelation.' + dataset.Attributes[
'figureFormat'])
saveAs = item['RsdVsCorrelationFigure']
else:
print('Figure 7: Scatterplot of RSD vs correlation to dilution.')
jointplotRSDvCorrelation(dataset.rsdSP,
dataset.correlationToDilution,
savePath=saveAs,
figureFormat=dataset.Attributes['figureFormat'],
dpi=dataset.Attributes['dpi'],
figureSize=dataset.Attributes['figureSize'])
else:
if not destinationPath:
print('Figure 7: Scatterplot of RSD vs correlation to dilution.')
print('Unable to calculate (no serial dilution samples present in dataset).\n')
if 'Peak Width' in dataset.featureMetadata.columns:
# Figure 8: Histogram of chromatographic peak width
if destinationPath:
item['PeakWidthFigure'] = os.path.join(graphicsPath,
item['Name'] + '_peakWidth.' + dataset.Attributes['figureFormat'])
saveAs = item['PeakWidthFigure']
else:
print('Figure 8: Histogram of chromatographic peak width.')
histogram(dataset.featureMetadata['Peak Width'],
xlabel='Peak Width (minutes)',
histBins=dataset.Attributes['histBins'],
savePath=saveAs,
figureFormat=dataset.Attributes['figureFormat'],
dpi=dataset.Attributes['dpi'],
figureSize=dataset.Attributes['figureSize'])
else:
if not destinationPath:
print('Figure 8: Histogram of chromatographic peak width.')
print('\x1b[31;1m Peak width data not available to plot\n\033[0;0m')
# Figure 9: Residual Standard Deviation (RSD) distribution for all samples and all features in dataset (by sample type)
if destinationPath:
item['RSDdistributionFigure'] = os.path.join(graphicsPath,
item['Name'] + '_RSDdistributionFigure.' + dataset.Attributes[
'figureFormat'])
saveAs = item['RSDdistributionFigure']
else:
print('Figure 9: RSD distribution for all samples and all features in dataset (by sample type).')
plotRSDs(dataset,
featureName=featureName,
ratio=False,
logx=True,
color='matchReport',
featName=featName,
savePath=saveAs,
figureFormat=dataset.Attributes['figureFormat'],
dpi=dataset.Attributes['dpi'],
figureSize=figureSize)
# Figure 10: Ion map
if 'm/z' in dataset.featureMetadata.columns and 'Retention Time' in dataset.featureMetadata.columns:
if destinationPath:
item['IonMap'] = os.path.join(graphicsPath, item['Name'] + '_ionMap.' + dataset.Attributes['figureFormat'])
saveAs = item['IonMap']
else:
print('Figure 10: Ion map of all features (coloured by log median intensity).')
plotIonMap(dataset,
savePath=saveAs,
figureFormat=dataset.Attributes['figureFormat'],
dpi=dataset.Attributes['dpi'],
figureSize=dataset.Attributes['figureSize'])
else:
if not destinationPath:
print('No Retention Time and m/z information, unable to plot the ion map.\n')
# Write HTML if saving
##
if destinationPath:
# Make paths for graphics local not absolute for use in the HTML.
for key in item:
if os.path.join(destinationPath, 'graphics') in str(item[key]):
item[key] = re.sub('.*graphics', 'graphics', item[key])
# Generate report
from jinja2 import Environment, FileSystemLoader
env = Environment(loader=FileSystemLoader(os.path.join(toolboxPath(), 'Templates')))
template = env.get_template('MS_FeatureSummaryReport.html')
filename = os.path.join(destinationPath, dataset.name + '_report_featureSummary.html')
f = open(filename, 'w')
f.write(template.render(item=item,
attributes=dataset.Attributes,
version=version,
graphicsPath=graphicsPath))
f.close()
copyBackingFiles(toolboxPath(), os.path.join(destinationPath, 'graphics'))
return None
def _featureSelectionReport(dataset, destinationPath=None, withArtifactualFiltering=False):
"""
Report on feature quality
Generates a summary of the number of features passing feature selection (with current settings as definite in the SOP), and a heatmap showing how this number would be affected by changes to RSD and correlation to dilution thresholds.
"""
# Define sample masks
SSmask = (dataset.sampleMetadata['SampleType'].values == SampleType.StudySample) & \
(dataset.sampleMetadata['AssayRole'].values == AssayRole.Assay)
SRmask = (dataset.sampleMetadata['SampleType'].values == SampleType.StudyPool) & \
(dataset.sampleMetadata['AssayRole'].values == AssayRole.PrecisionReference)
SRDmask = (dataset.sampleMetadata['SampleType'].values == SampleType.StudyPool) & \
(dataset.sampleMetadata['AssayRole'].values == AssayRole.LinearityReference)
Blankmask = dataset.sampleMetadata['SampleType'] == SampleType.ProceduralBlank
# Define passmask as current featureMask
passMask = dataset.featureMask
# Set up path to save
if destinationPath is not None:
if not os.path.exists(destinationPath):
os.makedirs(destinationPath)
if not os.path.exists(os.path.join(destinationPath, 'graphics')):
os.makedirs(os.path.join(destinationPath, 'graphics'))
graphicsPath = os.path.join(destinationPath, 'graphics', 'report_featureSelectionSummary')
if not os.path.exists(graphicsPath):
os.makedirs(graphicsPath)
else:
graphicsPath = None
# Feature selection parameters and numbers passing
item = dict()
item['Name'] = dataset.name
item['Nfeatures'] = dataset.intensityData.shape[1]
# Correlation to dilution
item['corrMethod'] = dataset.Attributes['filterParameters']['corrMethod'] if dataset.Attributes['filterParameters']['corrMethod'] is not None else dataset.Attributes['corrMethod']
item['corrThreshold'] = dataset.Attributes['filterParameters']['corrThreshold'] if dataset.Attributes['filterParameters']['corrThreshold'] is not None else dataset.Attributes['corrThreshold']
if sum(dataset.corrExclusions) != dataset.noSamples:
item['corrExclusions'] = str(
dataset.sampleMetadata.loc[dataset.corrExclusions == False, 'Sample File Name'].values)
else:
item['corrExclusions'] = 'none'
if sum(SRDmask) > 0:
item['corrPassed'] = str(sum(dataset.correlationToDilution >= item['corrThreshold'])) + ' passed selection.'
passMask = numpy.logical_and(passMask, dataset.correlationToDilution >= item['corrThreshold'])
else:
item['corrPassed'] = 'Not applied (no SRD samples present).'
# RSD in SR samples, and RSD in SS samples > RSD in SR samples
item['rsdThreshold'] = dataset.Attributes['filterParameters']['rsdThreshold'] if dataset.Attributes['filterParameters']['rsdThreshold'] is not None else dataset.Attributes['rsdThreshold']
item['rsdSPvsSSvarianceRatio'] = dataset.Attributes['filterParameters']['varianceRatio'] if dataset.Attributes['filterParameters']['varianceRatio'] is not None else dataset.Attributes['varianceRatio']
rsdSS = rsd(dataset.intensityData[SSmask, :])
if sum(SRmask) > 0:
item['rsdPassed'] = str(sum(dataset.rsdSP <= item['rsdThreshold'])) + ' passed selection.'
item['rsdSPvsSSPassed'] = str(sum(dataset.rsdSP * item['rsdSPvsSSvarianceRatio'] <= rsdSS)) + ' passed selection.'
passMask = numpy.logical_and(passMask, dataset.rsdSP <= item['rsdThreshold'])
passMask = numpy.logical_and(passMask, dataset.rsdSP * item['rsdSPvsSSvarianceRatio'] <= rsdSS)
else:
item['rsdPassed'] = 'Not applied (no SR samples present).'
item['rsdSPvsSSPassed'] = 'Not applied (no SR samples present).'
# Blank mask
if (dataset.Attributes['featureFilters']['blankFilter'] is True) & (sum(Blankmask) >= 2):
item['BlankThreshold'] = dataset.Attributes['filterParameters']['blankThreshold'] if dataset.Attributes['filterParameters']['blankThreshold'] is not None else dataset.Attributes['blankThreshold']
blankMask = blankFilter(dataset, item['BlankThreshold'])
passMask = numpy.logical_and(passMask, blankMask)[0]
item['BlankPassed'] = sum(blankMask)
# Artifactual filtering
if withArtifactualFiltering:
passMask = dataset.artifactualFilter(featMask=passMask)
item['artifactualPassed'] = sum(passMask)
item['featuresPassed'] = sum(passMask)
# Heatmap of the number of features passing selection with different RSD and correlation to dilution thresholds
rsdVals = numpy.arange(5, 55, 5)
rVals = numpy.arange(0.5, 1.01, 0.05)
rValsRep = numpy.tile(numpy.arange(0.5, 1.01, 0.05), [1, len(rsdVals)])
rsdValsRep = numpy.reshape(numpy.tile(numpy.arange(5, 55, 5), [len(rVals), 1]), rValsRep.shape, order='F')
featureNos = numpy.zeros(rValsRep.shape, dtype=numpy.int)
if withArtifactualFiltering:
# with blankThreshold in heatmap
if (dataset.Attributes['featureFilters']['blankFilter'] is True) & (sum(Blankmask) >= 2):
for rsdNo in range(rValsRep.shape[1]):
featureNos[0, rsdNo] = sum(dataset.artifactualFilter(featMask=(
(dataset.correlationToDilution >= rValsRep[0, rsdNo]) & (
dataset.rsdSP <= rsdValsRep[0, rsdNo]) & (
(dataset.rsdSP * item['rsdSPvsSSvarianceRatio']) <= rsdSS) & (
dataset.featureMask == True) & (blankMask == True))))
# without blankThreshold
else:
for rsdNo in range(rValsRep.shape[1]):
featureNos[0, rsdNo] = sum(dataset.artifactualFilter(featMask=(
(dataset.correlationToDilution >= rValsRep[0, rsdNo]) & (
dataset.rsdSP <= rsdValsRep[0, rsdNo]) & (
(dataset.rsdSP * item['rsdSPvsSSvarianceRatio']) <= rsdSS) & (
dataset.featureMask == True))))
else:
# with blankThreshold in heatmap
if (dataset.Attributes['featureFilters']['blankFilter'] is True) & (sum(Blankmask) >= 2):
for rsdNo in range(rValsRep.shape[1]):
featureNos[0, rsdNo] = sum(
(dataset.correlationToDilution >= rValsRep[0, rsdNo]) & (dataset.rsdSP <= rsdValsRep[0, rsdNo]) & (
(dataset.rsdSP * item['rsdSPvsSSvarianceRatio']) <= rsdSS) & (
dataset.featureMask == True) & (blankMask == True))
# without blankThreshold
else:
for rsdNo in range(rValsRep.shape[1]):
featureNos[0, rsdNo] = sum(
(dataset.correlationToDilution >= rValsRep[0, rsdNo]) & (dataset.rsdSP <= rsdValsRep[0, rsdNo]) & (
(dataset.rsdSP * item['rsdSPvsSSvarianceRatio']) <= rsdSS) & (
dataset.featureMask == True))
test = pandas.DataFrame(data=numpy.transpose( | numpy.concatenate([rValsRep, rsdValsRep, featureNos]) | numpy.concatenate |
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns # noqa
from sklearn.base import BaseEstimator
from sklearn.exceptions import NotFittedError
from sklearn.utils.validation import check_is_fitted
class GaussianProcess(BaseEstimator):
"""
Fits a Gaussian Process regressor.
Parameters
----------
kernel : Gaus
Attributes
----------
kernel : ml.Kernel
Kernel function used to compute covariance matrix.
Examples
--------
Note
----
Most of code is taken from the following tutorial:
https://katbailey.github.io/post/gaussian-processes-for-dummies/.
"""
def __init__(self, kernel):
self.kernel = kernel
self.Xtrain_ = None
self.ytrain_ = None
def fit(self, X, y):
"""
Computes the Xtrain variance and stores as attribute. Also stores
Xtrain and ytrain as attributes.
Parameters
----------
X : np.ndarray, shape (-1, n)
Input.
y : np.array, shape (n)
Targets
Returns
-------
None
Note
----
Note the K matrix is:
K_11 K_21
K_21 K_22
"""
self.Xtrain_ = X
self.ytrain_ = y
# Compute Xtrain/Xtrain elements of covariance matrix (Xtrain variance)
K_11 = self.kernel.transform(self.Xtrain_, self.Xtrain_)
self.L_11_ = np.linalg.cholesky(K_11
+ 0.00005*np.eye(len(self.Xtrain_)))
def predict(self, Xtest, n_samples=1):
"""
Returns predictions for input data by returning the posterior mean (at
the test points) of the joint distribution of the training data Xtrain
and the test data Xtest.
High-level Intuition
--------------------
* Goal of is to learn distribution over possible "functions" f(x) = y.
* Compute the "difference" between the Xtrain data and the Xtest data.
* Compute the Xtrain covariance "feature weights" cov_fw s.t.
XtrainCovMatrix • cov_fw = ytrain
* Compute post. mean by mult. cov_fw by the Xtrain/Xtest "difference":
mu = cov_fw • XtrainXtestCovDiff
Parameters
----------
Xtest : np.array
Input data.
Returns
-------
np.array, length len(Xtest)
Predictions which are the posterior mean of the joint distribution
of the training data and the test data.
Note
----
Note the K matrix is:
K_11 K_21
K_21 K_22
"""
'''Compute the posterior mean at test points.'''
if not self._is_fitted():
raise NotFittedError()
mu, L_12 = self._compute_mean_and_non_diag_covariance(Xtest)
return mu
def sample(self, Xtest, n_samples=1, use_prior=False):
"""
Returns predictions for input data by returning samples from the either
the prior or the posterior of the joint distribution of the training
data Xtrain and the test data Xtest.
If the model is not yet fitted or use_prior=True, then samples from
prior are returned. Otherwise, samples are taken from the posterior.
Parameters
----------
Xtest : np.array
Input data.
n_samples : int, default 1
Number of samples (predictions) to return.
use_prior : bool, default False
Whether or not to sample from the prior distribution. If true,
posterior is used.
Returns
-------
np.ndarray, shape (len(Xtest), n_samples)
Predictions which are samples drawn from the joint distribution of
the training data and the test data.
"""
ntest = len(Xtest)
# Compute Xtest covariance and its decomposition (sqroot)
K_22 = self.kernel.transform(Xtest, Xtest)
L_22 = np.linalg.cholesky(K_22 + 1e-15*np.eye(ntest))
if use_prior or not self._is_fitted():
# Sample n_samples sets of standard normals for our test points,
# then multiply them by the square root of the Xtest covariance.
f_prior = np.dot(L_22, np.random.normal(size=(ntest, n_samples)))
return f_prior
# Compute mean and non-diagonal (Xtrain/Xtest) elements of cov. matrix
mu, L_12 = self._compute_mean_and_non_diag_covariance(Xtest)
# Compute sqroot of entire covariance matrix
L = np.linalg.cholesky(K_22 + 1e-6* | np.eye(ntest) | numpy.eye |
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
from __future__ import division
from datetime import datetime
from sklearn import linear_model
import pandas as pd
import numpy as np
import scipy.stats as st
import statsmodels.distributions.empirical_distribution as edis
import seaborn as sns; sns.set(color_codes=True)
import matplotlib.pyplot as plt
#########################################################################
# This purpose of this script is to use historical temperature and streamflow data
# to calculate synthetic time series of daily flows at each of the stream gages
# used in the hydropower production models.
# Regression and vector-autoregressive errors are used to simulate total annual
# streamflows, and these are then paired with daily streamflow fractions tied
# to daily temperature dynamics
#########################################################################
# Import historical tmeperature data
df_temp = pd.read_excel('Synthetic_streamflows/hist_temps_1953_2007.xlsx')
his_temp_matrix = df_temp.values
# Import calender
calender=pd.read_excel('Synthetic_streamflows/BPA_hist_streamflow.xlsx',sheet_name='Calender',header= None)
calender=calender.values
julian=calender[:,2]
###############################
# Synthetic HDD CDD calculation
# Simulation data
sim_weather=pd.read_csv('Synthetic_weather/synthetic_weather_data.csv',header=0)
# Load temperature data only
cities = ['SALEM_T','EUGENE_T','SEATTLE_T','BOISE_T','PORTLAND_T','SPOKANE_T','FRESNO_T','LOS ANGELES_T','SAN DIEGO_T','SACRAMENTO_T','SAN JOSE_T','SAN FRANCISCO_T','TUCSON_T','PHOENIX_T','LAS VEGAS_T']
sim_temperature=sim_weather[cities]
# Convert temperatures to Fahrenheit
sim_temperature= (sim_temperature*(9/5))+32
sim_temperature=sim_temperature.values
num_cities = len(cities)
num_sim_days = len(sim_temperature)
HDD_sim = np.zeros((num_sim_days,num_cities))
CDD_sim = np.zeros((num_sim_days,num_cities))
# calculate daily records of heating (HDD) and cooling (CDD) degree days
for i in range(0,num_sim_days):
for j in range(0,num_cities):
HDD_sim[i,j] = np.max((0,65-sim_temperature[i,j]))
CDD_sim[i,j] = np.max((0,sim_temperature[i,j] - 65))
# calculate annual totals of heating and cooling degree days for each city
annual_HDD_sim=np.zeros((int(len(HDD_sim)/365),num_cities))
annual_CDD_sim=np.zeros((int(len(CDD_sim)/365),num_cities))
for i in range(0,int(len(HDD_sim)/365)):
for j in range(0,num_cities):
annual_HDD_sim[i,j]=np.sum(HDD_sim[0+(i*365):365+(i*365),j])
annual_CDD_sim[i,j]=np.sum(CDD_sim[0+(i*365):365+(i*365),j])
########################################################################
#Calculate HDD and CDD for historical temperature data
num_cities = len(cities)
num_days = len(his_temp_matrix)
# daily records
HDD = np.zeros((num_days,num_cities))
CDD = np.zeros((num_days,num_cities))
for i in range(0,num_days):
for j in range(0,num_cities):
HDD[i,j] = np.max((0,65-his_temp_matrix[i,j+1]))
CDD[i,j] = np.max((0,his_temp_matrix[i,j+1] - 65))
# annual sums
annual_HDD=np.zeros((int(len(HDD)/365),num_cities))
annual_CDD=np.zeros((int(len(CDD)/365),num_cities))
for i in range(0,int(len(HDD)/365)):
for j in range(0,num_cities):
annual_HDD[i,j]=np.sum(HDD[0+(i*365):365+(i*365),j])
annual_CDD[i,j]=np.sum(CDD[0+(i*365):365+(i*365),j])
###########################################################################################
#This section is used for calculating total hydro
# Load relevant streamflow data (1953-2007)
BPA_streamflow=pd.read_excel('Synthetic_streamflows/BPA_hist_streamflow.xlsx',sheet_name='Inflows',header=0)
Hoover_streamflow=pd.read_csv('Synthetic_streamflows/Hoover_hist_streamflow.csv',header=0)
CA_streamflow=pd.read_excel('Synthetic_streamflows/CA_hist_streamflow.xlsx',header=0)
Willamette_streamflow=pd.read_csv('Synthetic_streamflows/Willamette_hist_streamflow.csv',header=0)
# headings
name_Will=list(Willamette_streamflow.loc[:,'Albany':])
name_CA = list(CA_streamflow.loc[:,'ORO_fnf':])
name_BPA = list(BPA_streamflow.loc[:,'1M':])
# number of streamflow gages considered
num_BPA = len(name_BPA)
num_CA = len(name_CA)
num_Will = len(name_Will)
num_gages= num_BPA + num_CA + num_Will + 1
# Calculate historical totals for 1953-2007
years = range(1953,2008)
for y in years:
y_index = years.index(y)
BPA = BPA_streamflow.loc[BPA_streamflow['year'] ==y,'1M':]
CA = CA_streamflow.loc[CA_streamflow['year'] == y,'ORO_fnf':]
WB = Willamette_streamflow.loc[Willamette_streamflow['year'] == y,'Albany':]
HO = Hoover_streamflow.loc[Hoover_streamflow['year'] == y,'Discharge']
BPA_sums = np.reshape(np.sum(BPA,axis= 0).values,(1,num_BPA))
CA_sums = np.reshape(np.sum(CA,axis=0).values,(1,num_CA))
WB_sums = np.reshape(np.sum(WB,axis=0).values,(1,num_Will))
HO_sums = np.reshape(np.sum(HO,axis=0),(1,1))
# matrix of annual flows for each stream gage
joined = np.column_stack((BPA_sums,CA_sums,WB_sums,HO_sums))
if y_index < 1:
hist_totals = joined
else:
hist_totals = np.vstack((hist_totals,joined))
BPA_headers = np.reshape(list(BPA_streamflow.loc[:,'1M':]),(1,num_BPA))
CA_headers = np.reshape(list(CA_streamflow.loc[:,'ORO_fnf':]),(1,num_CA))
WB_headers = np.reshape(list(Willamette_streamflow.loc[:,'Albany':]),(1,num_Will))
HO_headers = np.reshape(['Hoover'],(1,1))
headers = np.column_stack((BPA_headers,CA_headers,WB_headers,HO_headers))
# annual streamflow totals for 1953-2007
df_hist_totals = pd.DataFrame(hist_totals)
df_hist_totals.columns = headers[0,:]
df_hist_totals.loc[38,'83L']=df_hist_totals.loc[36,'83L']
added_value=abs(np.min((df_hist_totals)))+5
log_hist_total=np.log(df_hist_totals+abs(added_value))
A=df_hist_totals.values
B=np.column_stack((A,annual_HDD,annual_CDD))
x,y=np.shape(B)
#data is the data matrix at all time step. The dimention would be X*Y
#data 2 is required if calculating disimilarity
#Step 1: Transform the data into emperical CDF
P=np.zeros((x,y))
for i in range(0,y):
ECDF=edis.ECDF(B[:,i])
P[:,i]=ECDF(B[:,i])
Y=2*(P-0.5)
new_cols = ['Name'] + ['type_' + str(i) for i in range(0,141)]
#remove constant zeros columns
need_to_remove=[1,17,22,24,27,32,34,36,37,38,44,107,108,109]
Y2=np.delete(Y,need_to_remove,axis=1)
Y[:,107]=1
mean=np.mean(Y,axis=0)
cov=np.cov(Y,rowvar=0)
runs=int(num_sim_days/365)*5
sim_years=int(num_sim_days/365)
N = | np.random.multivariate_normal(mean,cov,runs) | numpy.random.multivariate_normal |
import pytest
import numpy as np
from numpy.testing import assert_allclose
from astropy.time import Time, TimeDelta
from astropy import units as u
from astropy.tests.helper import assert_quantity_allclose
from astropy.timeseries.periodograms.lombscargle import LombScargle
ALL_METHODS = LombScargle.available_methods
ALL_METHODS_NO_AUTO = [method for method in ALL_METHODS if method != 'auto']
FAST_METHODS = [method for method in ALL_METHODS if 'fast' in method]
NTERMS_METHODS = [method for method in ALL_METHODS if 'chi2' in method]
NORMALIZATIONS = ['standard', 'psd', 'log', 'model']
@pytest.fixture
def data(N=100, period=1, theta=[10, 2, 3], dy=1, rseed=0):
"""Generate some data for testing"""
rng = np.random.default_rng(rseed)
t = 20 * period * rng.random(N)
omega = 2 * np.pi / period
y = theta[0] + theta[1] * np.sin(omega * t) + theta[2] * np.cos(omega * t)
dy = dy * (0.5 + rng.random(N))
y += dy * rng.standard_normal(N)
return t, y, dy
@pytest.mark.parametrize('minimum_frequency', [None, 1.0])
@pytest.mark.parametrize('maximum_frequency', [None, 5.0])
@pytest.mark.parametrize('nyquist_factor', [1, 10])
@pytest.mark.parametrize('samples_per_peak', [1, 5])
def test_autofrequency(data, minimum_frequency, maximum_frequency,
nyquist_factor, samples_per_peak):
t, y, dy = data
baseline = t.max() - t.min()
freq = LombScargle(t, y, dy).autofrequency(samples_per_peak,
nyquist_factor,
minimum_frequency,
maximum_frequency)
df = freq[1] - freq[0]
# Check sample spacing
assert_allclose(df, 1. / baseline / samples_per_peak)
# Check minimum frequency
if minimum_frequency is None:
assert_allclose(freq[0], 0.5 * df)
else:
assert_allclose(freq[0], minimum_frequency)
if maximum_frequency is None:
avg_nyquist = 0.5 * len(t) / baseline
assert_allclose(freq[-1], avg_nyquist * nyquist_factor, atol=0.5*df)
else:
assert_allclose(freq[-1], maximum_frequency, atol=0.5*df)
@pytest.mark.parametrize('method', ALL_METHODS_NO_AUTO)
@pytest.mark.parametrize('center_data', [True, False])
@pytest.mark.parametrize('fit_mean', [True, False])
@pytest.mark.parametrize('errors', ['none', 'partial', 'full'])
@pytest.mark.parametrize('with_units', [True, False])
@pytest.mark.parametrize('normalization', NORMALIZATIONS)
def test_all_methods(data, method, center_data, fit_mean,
errors, with_units, normalization):
if method == 'scipy' and (fit_mean or errors != 'none'):
return
t, y, dy = data
frequency = 0.8 + 0.01 * np.arange(40)
if with_units:
t = t * u.day
y = y * u.mag
dy = dy * u.mag
frequency = frequency / t.unit
if errors == 'none':
dy = None
elif errors == 'partial':
dy = dy[0]
elif errors == 'full':
pass
else:
raise ValueError(f"Unrecognized error type: '{errors}'")
kwds = {}
ls = LombScargle(t, y, dy, center_data=center_data, fit_mean=fit_mean,
normalization=normalization)
P_expected = ls.power(frequency)
# don't use the fft approximation here; we'll test this elsewhere
if method in FAST_METHODS:
kwds['method_kwds'] = dict(use_fft=False)
P_method = ls.power(frequency, method=method, **kwds)
if with_units:
if normalization == 'psd' and errors == 'none':
assert P_method.unit == y.unit ** 2
else:
assert P_method.unit == u.dimensionless_unscaled
else:
assert not hasattr(P_method, 'unit')
assert_quantity_allclose(P_expected, P_method)
@pytest.mark.parametrize('method', ALL_METHODS_NO_AUTO)
@pytest.mark.parametrize('center_data', [True, False])
@pytest.mark.parametrize('fit_mean', [True, False])
@pytest.mark.parametrize('with_errors', [True, False])
@pytest.mark.parametrize('normalization', NORMALIZATIONS)
def test_integer_inputs(data, method, center_data, fit_mean, with_errors,
normalization):
if method == 'scipy' and (fit_mean or with_errors):
return
t, y, dy = data
t = np.floor(100 * t)
t_int = t.astype(int)
y = np.floor(100 * y)
y_int = y.astype(int)
dy = np.floor(100 * dy)
dy_int = dy.astype('int32')
frequency = 1E-2 * (0.8 + 0.01 * np.arange(40))
if not with_errors:
dy = None
dy_int = None
kwds = dict(center_data=center_data,
fit_mean=fit_mean,
normalization=normalization)
P_float = LombScargle(t, y, dy, **kwds).power(frequency,method=method)
P_int = LombScargle(t_int, y_int, dy_int,
**kwds).power(frequency, method=method)
assert_allclose(P_float, P_int)
@pytest.mark.parametrize('method', NTERMS_METHODS)
@pytest.mark.parametrize('center_data', [True, False])
@pytest.mark.parametrize('fit_mean', [True, False])
@pytest.mark.parametrize('errors', ['none', 'partial', 'full'])
@pytest.mark.parametrize('nterms', [0, 2, 4])
@pytest.mark.parametrize('normalization', NORMALIZATIONS)
def test_nterms_methods(method, center_data, fit_mean, errors,
nterms, normalization, data):
t, y, dy = data
frequency = 0.8 + 0.01 * np.arange(40)
if errors == 'none':
dy = None
elif errors == 'partial':
dy = dy[0]
elif errors == 'full':
pass
else:
raise ValueError(f"Unrecognized error type: '{errors}'")
ls = LombScargle(t, y, dy, center_data=center_data,
fit_mean=fit_mean, nterms=nterms,
normalization=normalization)
if nterms == 0 and not fit_mean:
with pytest.raises(ValueError) as err:
ls.power(frequency, method=method)
assert 'nterms' in str(err.value) and 'bias' in str(err.value)
else:
P_expected = ls.power(frequency)
# don't use fast fft approximations here
kwds = {}
if 'fast' in method:
kwds['method_kwds'] = dict(use_fft=False)
P_method = ls.power(frequency, method=method, **kwds)
assert_allclose(P_expected, P_method, rtol=1E-7, atol=1E-25)
@pytest.mark.parametrize('method', FAST_METHODS)
@pytest.mark.parametrize('center_data', [True, False])
@pytest.mark.parametrize('fit_mean', [True, False])
@pytest.mark.parametrize('errors', ['none', 'partial', 'full'])
@pytest.mark.parametrize('nterms', [0, 1, 2])
def test_fast_approximations(method, center_data, fit_mean,
errors, nterms, data):
t, y, dy = data
frequency = 0.8 + 0.01 * | np.arange(40) | numpy.arange |
"""
Definition of the fundamental class of functions.
"""
import copy as cp
import numpy as np
from .basis1010utils import compute_Qd1_block
from .basis1010utils import compute_Qd1_dtau_block
from .basis1010utils import compute_Qd3_block
from .basis1010utils import compute_Qd3_dtau_block
class cBasis1010(object):
dim_ = 6
def __init__(self, _params):
self.Dmat_ = np.array(
[[1, -1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0], [0, 0, -1, -1, 0, 0],
[0, 0, 1, -1, 0, 0], [0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0]],
dtype=np.float)
self.buff_ = np.ones((6, ))
self.buff_[5] = 1.0
self.dim_ = 6
self.params_ = cp.deepcopy(_params)
def derivMatrixOnWindow(self, _tau, _deg):
alpha = self.params_
k = np.sqrt(2) / 4.0 * | np.power(alpha, 0.25) | numpy.power |
# coding: utf-8
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
if __name__ == "__main__":
# # Problem 1 - Gaussian Process Modelling
# ## Data I/O
X_train = np.genfromtxt('hw3-data/gaussian_process/X_train.csv', delimiter=',')
X_test = np.genfromtxt('hw3-data/gaussian_process/X_test.csv', delimiter=',')
y_train = np.genfromtxt('hw3-data/gaussian_process/y_train.csv', delimiter=',')
y_test = np.genfromtxt('hw3-data/gaussian_process/y_test.csv', delimiter=',')
# ## Helper Functions
def calculateRMSE(y_pred, y_test):
n = y_pred.shape[0]
return np.linalg.norm(y_pred - y_test)/(n**0.5)
# ## Gaussian Process Regression
class GaussianProcessRegression():
def __init__(self):
pass
def standardize(self, y):
mean = np.mean(y)
std = np.std(y)
y = (y - mean)/std
self.mean = mean
self.std = std
return y
def calcKernel(self):
X = self.X
(n, d) = X.shape
K = np.zeros((n, n))
for i in range(n):
for j in range(i, n):
xi, xj = X[i, :].flatten(), X[j, :].flatten()
k = self.calcRadialDistance(xi, xj)
K[i, j] = k
K[j, i] = k
self.K = K
def transformOutput(self, y):
y = y*self.std + self.mean
return y
def calcRadialDistance(self, x1, x2):
return np.exp(-1*(np.linalg.norm(x1-x2)**2)/self.b)
def train(self, X, y):
self.X = X
self.y = self.standardize(y)
def setb(self, b):
self.b = b
self.calcKernel()
def predict(self, X_t, sig):
X = self.X
y = self.y
(n, d) = X.shape
(m, d) = X_t.shape
Kn = np.zeros((m, n))
for i in range(m):
for j in range(n):
Kn[i, j] = self.calcRadialDistance(X_t[i, :].flatten(), X[j, :].flatten())
Kn = Kn.reshape((m, n))
K = self.K
mu = Kn.dot(np.linalg.inv((sig)*np.identity(n) + K)).dot(y)
#cov = (sig**2) + 1 - Kn.dot(np.linalg.inv((sig**2)*np.identity(n) + K)).dot(Kn.T)
return self.transformOutput(mu)
GPR = GaussianProcessRegression()
GPR.train(X_train, y_train)
# ## RMSE vs. (b, $\sigma^2$)
b_tests = [5, 7, 9, 11, 13, 15]
sig_tests = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]
results = np.zeros((len(b_tests), len(sig_tests)))
for i in range(len(b_tests)):
GPR.setb(b_tests[i])
for j in range(len(sig_tests)):
y_pred = GPR.predict(X_test, sig_tests[j])
results[i, j] = calculateRMSE(y_pred, y_test)
plt.figure(figsize=(20, 10))
sns.set_style('whitegrid')
sns.heatmap(results, annot=True, annot_kws={"size": 15}, fmt='.3f', xticklabels=sig_tests, yticklabels=b_tests)
plt.xlabel('sig_squared', fontsize=20)
plt.ylabel('b', fontsize=20)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
plt.title('RMSE', fontsize=20)
plt.savefig('1b.png')
#plt.show()
# ## Prediction using only a single dimension - (car weight)
(n, d) = X_train.shape
X_test_f4 = X_train[:, 3].reshape(n, 1)
for i in range(d-1):
X_test_f4 = np.column_stack((X_test_f4, X_train[:, 3].reshape((n, 1))))
GPR.setb(5)
y_test_f4 = GPR.predict(X_test_f4, 2)
plt.figure(figsize=(20, 10))
plt.scatter(X_train[:, 3], y_test_f4, label="Predictions")
plt.scatter(X_train[:, 3], y_train, label="Training Data")
plt.xlabel("car_weight", fontsize=20)
plt.ylabel("Mileage", fontsize=20)
plt.legend(fontsize=20)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
#plt.show()
GPR_x4 = GaussianProcessRegression()
GPR_x4.train(X_train[:, 3].reshape(X_train.shape[0], 1), y_train)
GPR_x4.setb(5)
y_train_f4 = GPR_x4.predict(X_train[:, 3].reshape(X_train.shape[0], 1), 2)
plt.figure(figsize=(20, 10))
plt.scatter(X_train[:, 3], y_train_f4, label="Predictions")
plt.scatter(X_train[:, 3], y_train, label="Training Data")
plt.xlabel("car_weight", fontsize=20)
plt.ylabel("Mileage", fontsize=20)
plt.legend(fontsize=20)
plt.xticks(fontsize=15)
plt.yticks(fontsize=15)
plt.savefig('1d_new.png')
#plt.show()
# # Problem 2 - Boosting
# ## Data I/O
X_train = np.genfromtxt('hw3-data/boosting/X_train.csv', delimiter=',')
X_test = np.genfromtxt('hw3-data/boosting/X_test.csv', delimiter=',')
y_train = | np.genfromtxt('hw3-data/boosting/y_train.csv', delimiter=',') | numpy.genfromtxt |
#!/usr/bin/env python
# coding: utf-8
# # Developing Quaternion and Space-time Number Tools for iPython3
# In this notebook, tools for working with quaternions for physics issues are developed. The class QH treat quaternions as Hamilton would have done: as a 4-vector over the real numbers.
#
# In physics, group theory plays a central role in the fundamental forces of Nature via the standard model. The gauge symmetry U(1) a unit circle in the complex plane leads to electric charge conservation. The unit quaternions SU(2) is the symmetry needed for the weak force which leads to beta decay. The group SU(3) is the symmetry of the strong force that keeps a nucleus together.
#
# The class Q8 was written in the hope that group theory would be written in first, not added as needed later. I call these "space-time numbers". The problem with such an approach is that one does not use the mathematical field of real numbers. Instead one relies on the set of positive reals. In some ways, this is like reverse engineering some basic computer science. Libraries written in C have a notion of a signed versus unsigned integer. The signed integer behaves like the familiar integers. The unsigned integer is like the positive integers. The difference between the two is whether there is a placeholder for the sign or not. All floats are signed. The modulo operations that work for unsigned integers does not work for floats.
#
# This set of tools is done 4x:
# 1. QH - Quaternions for Hamilton, can do symbolic manipulations
# 1. Q8 - Quaternions that are represented by 8 numbers
# 1. Q8a - Quaternions that are represented by 8 numbers that are numpy arrays
#
# Test driven development was used. The same tests were used for QH, QHa, Q8, and Q8a. Either class can be used to study quaternions in physics.
# In[1]:
import IPython
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import math
import numpy as np
import random
import sympy as sp
import os
import unittest
from copy import deepcopy
import pdb
from IPython.display import display
from os.path import basename
from glob import glob
get_ipython().run_line_magic('matplotlib', 'inline')
# Define the stretch factor $\gamma$ and the $\gamma \beta$ used in special relativity.
# In[2]:
def sr_gamma(beta_x=0, beta_y=0, beta_z=0):
"""The gamma used in special relativity using 3 velocites, some may be zero."""
return 1 / (1 - beta_x ** 2 - beta_y ** 2 - beta_z ** 2) ** (1/2)
def sr_gamma_betas(beta_x=0, beta_y=0, beta_z=0):
"""gamma and the three gamma * betas used in special relativity."""
g = sr_gamma(beta_x, beta_y, beta_z)
return [g, g * beta_x, g * beta_y, g * beta_z]
# ## Quaternions for Hamilton
# Define a class QH to manipulate quaternions as Hamilton would have done it so many years ago. The "qtype" is a little bit of text to leave a trail of breadcrumbs about how a particular quaternion was generated.
# In[3]:
class QH(object):
"""Quaternions as Hamilton would have defined them, on the manifold R^4."""
def __init__(self, values=None, qtype="Q", representation=""):
if values is None:
self.t, self.x, self.y, self.z = 0, 0, 0, 0
elif len(values) == 4:
self.t, self.x, self.y, self.z = values[0], values[1], values[2], values[3]
elif len(values) == 8:
self.t, self.x = values[0] - values[1], values[2] - values[3]
self.y, self.z = values[4] - values[5], values[6] - values[7]
self.representation = representation
if representation != "":
self.t, self.x, self.y, self.z = self.representation_2_txyz(representation)
self.qtype = qtype
def __str__(self, quiet=False):
"""Customize the output."""
qtype = self.qtype
if quiet:
qtype = ""
if self.representation == "":
string = "({t}, {x}, {y}, {z}) {qt}".format(
t=self.t, x=self.x, y=self.y, z=self.z, qt=qtype)
elif self.representation == "polar":
rep = self.txyz_2_representation("polar")
string = "({A} A, {thetaX} 𝜈x, {thetaY} 𝜈y, {thetaZ} 𝜈z) {qt}".format(
A=rep[0], thetaX=rep[1], thetaY=rep[2], thetaZ=rep[3], qt=qtype)
elif self.representation == "spherical":
rep = self.txyz_2_representation("spherical")
string = "({t} t, {R} R, {theta} θ, {phi} φ) {qt}".format(
t=rep[0], R=rep[1], theta=rep[2], phi=rep[3], qt=qtype)
return string
def print_state(self, label, spacer=False, quiet=True):
"""Utility for printing a quaternion."""
print(label)
print(self.__str__(quiet))
if spacer:
print("")
def is_symbolic(self):
"""Figures out if an expression has symbolic terms."""
symbolic = False
if hasattr(self.t, "free_symbols") or hasattr(self.x, "free_symbols") or hasattr(self.y, "free_symbols") or hasattr(self.z, "free_symbols"):
symbolic = True
return symbolic
def txyz_2_representation(self, representation):
"""Converts Cartesian txyz into an array of 4 values in a different representation."""
symbolic = self.is_symbolic()
if representation == "":
rep = [self.t, self.x, self.y, self.z]
elif representation == "polar":
amplitude = (self.t ** 2 + self.x ** 2 + self.y **2 + self.z **2) ** (1/2)
abs_v = self.abs_of_vector().t
if symbolic:
theta = sp.atan2(abs_v, self.t)
else:
theta = math.atan2(abs_v, self.t)
if abs_v == 0:
thetaX, thetaY, thetaZ = 0, 0, 0
else:
thetaX = theta * self.x / abs_v
thetaY = theta * self.y / abs_v
thetaZ = theta * self.z / abs_v
rep = [amplitude, thetaX, thetaY, thetaZ]
elif representation == "spherical":
t = self.t
R = (self.x ** 2 + self.y **2 + self.z **2) ** (1/2)
if R == 0:
theta = 0
else:
if symbolic:
theta = sp.acos(self.z / R)
else:
theta = math.acos(self.z / R)
if symbolic:
phi = sp.atan2(self.y, self.x)
else:
phi = math.atan2(self.y, self.x)
rep = [t, R, theta, phi]
else:
print("Oops, don't know representation: ", representation)
return rep
def representation_2_txyz(self, representation):
"""Convert from a representation to Cartesian txyz."""
symbolic = False
if hasattr(self.t, "free_symbols") or hasattr(self.x, "free_symbols") or hasattr(self.y, "free_symbols") or hasattr(self.z, "free_symbols"):
symbolic = True
if representation == "":
t, x, y, z = self.t, self.x, self.y, self.z
elif representation == "polar":
amplitude, thetaX, thetaY, thetaZ = self.t, self.x, self.y, self.z
theta = (thetaX ** 2 + thetaY ** 2 + thetaZ ** 2) ** (1/2)
if theta == 0:
t = self.t
x, y, z = 0, 0, 0
else:
if symbolic:
t = amplitude * sp.cos(theta)
x = self.x / theta * amplitude * sp.sin(theta)
y = self.y / theta * amplitude * sp.sin(theta)
z = self.z / theta * amplitude * sp.sin(theta)
else:
t = amplitude * math.cos(theta)
x = self.x / theta * amplitude * math.sin(theta)
y = self.y / theta * amplitude * math.sin(theta)
z = self.z / theta * amplitude * math.sin(theta)
elif representation == "spherical":
t, R, theta, phi = self.t, self.x, self.y, self.z
if symbolic:
x = R * sp.sin(theta) * sp.cos(phi)
y = R * sp.sin(theta) * sp.sin(phi)
z = R * sp.cos(theta)
else:
x = R * math.sin(theta) * math.cos(phi)
y = R * math.sin(theta) * math.sin(phi)
z = R * math.cos(theta)
else:
print("Oops, don't know representation: ", representation)
txyz = [t, x, y, z]
return txyz
def check_representations(self, q1):
"""If they are the same, report true. If not, kick out an exception. Don't add apples to oranges."""
if self.representation == q1.representation:
return True
else:
raise Exception("Oops, 2 quaternions have different representations: {}, {}".format(self.representation, q1.representation))
return False
def display_q(self, label = ""):
"""Display each terms in a pretty way."""
if label:
print(label)
display(self.t)
display(self.x)
display(self.y)
display(self.z)
return
def simple_q(self, label=""):
"""Simplify each term."""
if label:
print(label)
self.t = sp.simplify(self.t)
self.x = sp.simplify(self.x)
self.y = sp.simplify(self.y)
self.z = sp.simplify(self.z)
return self
def expand_q(self):
"""Expand each term."""
self.t = sp.expand(self.t)
self.x = sp.expand(self.x)
self.y = sp.expand(self.y)
self.z = sp.expand(self.z)
return self
def subs(self, symbol_value_dict):
"""Evaluates a quaternion using sympy values and a dictionary {t:1, x:2, etc}."""
t1 = self.t.subs(symbol_value_dict)
x1 = self.x.subs(symbol_value_dict)
y1 = self.y.subs(symbol_value_dict)
z1 = self.z.subs(symbol_value_dict)
q_txyz = QH([t1, x1, y1, z1], qtype=self.qtype, representation=self.representation)
return q_txyz
def scalar(self, qtype="scalar"):
"""Returns the scalar part of a quaternion."""
end_qtype = "scalar({})".format(self.qtype)
s = QH([self.t, 0, 0, 0], qtype=end_qtype, representation=self.representation)
return s
def vector(self, qtype="v"):
"""Returns the vector part of a quaternion."""
end_qtype = "vector({})".format(self.qtype)
v = QH([0, self.x, self.y, self.z], qtype=end_qtype, representation=self.representation)
return v
def xyz(self):
"""Returns the vector as an np.array."""
return np.array([self.x, self.y, self.z])
def q_0(self, qtype="0"):
"""Return a zero quaternion."""
q0 = QH([0, 0, 0, 0], qtype=qtype, representation=self.representation)
return q0
def q_1(self, n=1, qtype="1"):
"""Return a multiplicative identity quaternion."""
q1 = QH([n, 0, 0, 0], qtype=qtype, representation=self.representation)
return q1
def q_i(self, n=1, qtype="i"):
"""Return i."""
qi = QH([0, n, 0, 0], qtype=qtype, representation=self.representation)
return qi
def q_j(self, n=1, qtype="j"):
"""Return j."""
qj = QH([0, 0, n, 0], qtype=qtype, representation=self.representation)
return qj
def q_k(self, n=1, qtype="k"):
"""Return k."""
qk = QH([0, 0, 0, n], qtype=qtype, representation=self.representation)
return qk
def q_random(self, qtype="?"):
"""Return a random-valued quaternion."""
qr = QH([random.random(), random.random(), random.random(), random.random()], qtype=qtype)
return qr
def dupe(self, qtype=""):
"""Return a duplicate copy, good for testing since qtypes persist"""
du = QH([self.t, self.x, self.y, self.z], qtype=self.qtype, representation=self.representation)
return du
def equals(self, q1):
"""Tests if two quaternions are equal."""
self.check_representations(q1)
self_t, self_x, self_y, self_z = sp.expand(self.t), sp.expand(self.x), sp.expand(self.y), sp.expand(self.z)
q1_t, q1_x, q1_y, q1_z = sp.expand(q1.t), sp.expand(q1.x), sp.expand(q1.y), sp.expand(q1.z)
if math.isclose(self_t, q1_t) and math.isclose(self_x, q1_x) and math.isclose(self_y, q1_y) and math.isclose(self_z, q1_z):
return True
else:
return False
def conj(self, conj_type=0, qtype="*"):
"""Three types of conjugates."""
t, x, y, z = self.t, self.x, self.y, self.z
conj_q = QH()
if conj_type == 0:
conj_q.t = t
if x != 0:
conj_q.x = -1 * x
if y != 0:
conj_q.y = -1 * y
if z != 0:
conj_q.z = -1 * z
elif conj_type == 1:
if t != 0:
conj_q.t = -1 * t
conj_q.x = x
if y != 0:
conj_q.y = -1 * y
if z != 0:
conj_q.z = -1 * z
qtype += "1"
elif conj_type == 2:
if t != 0:
conj_q.t = -1 * t
if x != 0:
conj_q.x = -1 * x
conj_q.y = y
if z != 0:
conj_q.z = -1 * z
qtype += "2"
conj_q.qtype = self.qtype + qtype
conj_q.representation = self.representation
return conj_q
def conj_q(self, q1):
"""Given a quaternion with 0's or 1's, will do the standard conjugate, first conjugate
second conjugate, sign flip, or all combinations of the above."""
_conj = deepcopy(self)
if q1.t:
_conj = _conj.conj(conj_type=0)
if q1.x:
_conj = _conj.conj(conj_type=1)
if q1.y:
_conj = _conj.conj(conj_type=2)
if q1.z:
_conj = _conj.flip_signs()
return _conj
def flip_signs(self, qtype="-"):
"""Flip the signs of all terms."""
end_qtype = "-{}".format(self.qtype)
t, x, y, z = self.t, self.x, self.y, self.z
flip_q = QH(qtype=end_qtype, representation=self.representation)
if t != 0:
flip_q.t = -1 * t
if x != 0:
flip_q.x = -1 * x
if y != 0:
flip_q.y = -1 * y
if z != 0:
flip_q.z = -1 * z
return flip_q
def vahlen_conj(self, conj_type="-", qtype="vc"):
"""Three types of conjugates -'* done by Vahlen in 1901."""
t, x, y, z = self.t, self.x, self.y, self.z
conj_q = QH()
if conj_type == '-':
conj_q.t = t
if x != 0:
conj_q.x = -1 * x
if y != 0:
conj_q.y = -1 * y
if z != 0:
conj_q.z = -1 * z
qtype += "*-"
if conj_type == "'":
conj_q.t = t
if x != 0:
conj_q.x = -1 * x
if y != 0:
conj_q.y = -1 * y
conj_q.z = z
qtype += "*'"
if conj_type == '*':
conj_q.t = t
conj_q.x = x
conj_q.y = y
if z != 0:
conj_q.z = -1 * z
qtype += "*"
conj_q.qtype = self.qtype + qtype
conj_q.representation = self.representation
return conj_q
def _commuting_products(self, q1):
"""Returns a dictionary with the commuting products."""
s_t, s_x, s_y, s_z = self.t, self.x, self.y, self.z
q1_t, q1_x, q1_y, q1_z = q1.t, q1.x, q1.y, q1.z
products = {'tt': s_t * q1_t,
'xx+yy+zz': s_x * q1_x + s_y * q1_y + s_z * q1_z,
'tx+xt': s_t * q1_x + s_x * q1_t,
'ty+yt': s_t * q1_y + s_y * q1_t,
'tz+zt': s_t * q1_z + s_z * q1_t}
return products
def _anti_commuting_products(self, q1):
"""Returns a dictionary with the three anti-commuting products."""
s_x, s_y, s_z = self.x, self.y, self.z
q1_x, q1_y, q1_z = q1.x, q1.y, q1.z
products = {'yz-zy': s_y * q1_z - s_z * q1_y,
'zx-xz': s_z * q1_x - s_x * q1_z,
'xy-yx': s_x * q1_y - s_y * q1_x,
'zy-yz': - s_y * q1_z + s_z * q1_y,
'xz-zx': - s_z * q1_x + s_x * q1_z,
'yx-xy': - s_x * q1_y + s_y * q1_x
}
return products
def _all_products(self, q1):
"""Returns a dictionary with all possible products."""
products = self._commuting_products(q1)
products.update(self._anti_commuting_products(q1))
return products
def square(self, qtype="^2"):
"""Square a quaternion."""
end_qtype = "{}{}".format(self.qtype, qtype)
qxq = self._commuting_products(self)
sq_q = QH(qtype=end_qtype, representation=self.representation)
sq_q.t = qxq['tt'] - qxq['xx+yy+zz']
sq_q.x = qxq['tx+xt']
sq_q.y = qxq['ty+yt']
sq_q.z = qxq['tz+zt']
return sq_q
def norm_squared(self, qtype="|| ||^2"):
"""The norm_squared of a quaternion."""
end_qtype = "||{}||^2".format(self.qtype, qtype)
qxq = self._commuting_products(self)
n_q = QH(qtype=end_qtype, representation=self.representation)
n_q.t = qxq['tt'] + qxq['xx+yy+zz']
return n_q
def norm_squared_of_vector(self, qtype="|V( )|^2"):
"""The norm_squared of the vector of a quaternion."""
end_qtype = "|V({})|^2".format(self.qtype)
qxq = self._commuting_products(self)
nv_q = QH(qtype=end_qtype, representation=self.representation)
nv_q.t = qxq['xx+yy+zz']
return nv_q
def abs_of_q(self, qtype="||"):
"""The absolute value, the square root of the norm_squared."""
end_qtype = "|{}|".format(self.qtype)
a = self.norm_squared()
sqrt_t = a.t ** (1/2)
a.t = sqrt_t
a.qtype = end_qtype
a.representation = self.representation
return a
def normalize(self, n=1, qtype="U"):
"""Normalize a quaternion"""
end_qtype = "{}{}".format(self.qtype, qtype)
abs_q_inv = self.abs_of_q().inverse()
n_q = self.product(abs_q_inv).product(QH([n, 0, 0, 0]))
n_q.qtype = end_qtype
n_q.representation = self.representation
return n_q
def abs_of_vector(self, qtype="|V( )|"):
"""The absolute value of the vector, the square root of the norm_squared of the vector."""
end_qtype = "|V({})|".format(self.qtype)
av = self.norm_squared_of_vector(qtype=end_qtype)
sqrt_t = av.t ** (1/2)
av.t = sqrt_t
av.representation = self.representation
return av
def add(self, qh_1, qtype=""):
"""Form a add given 2 quaternions."""
self.check_representations(qh_1)
end_qtype = "{f}+{s}".format(f=self.qtype, s=qh_1.qtype)
t_1, x_1, y_1, z_1 = self.t, self.x, self.y, self.z
t_2, x_2, y_2, z_2 = qh_1.t, qh_1.x, qh_1.y, qh_1.z
add_q = QH(qtype=end_qtype, representation=self.representation)
add_q.t = t_1 + t_2
add_q.x = x_1 + x_2
add_q.y = y_1 + y_2
add_q.z = z_1 + z_2
return add_q
def dif(self, qh_1, qtype=""):
"""Form a add given 2 quaternions."""
self.check_representations(qh_1)
end_qtype = "{f}-{s}".format(f=self.qtype, s=qh_1.qtype)
t_1, x_1, y_1, z_1 = self.t, self.x, self.y, self.z
t_2, x_2, y_2, z_2 = qh_1.t, qh_1.x, qh_1.y, qh_1.z
dif_q = QH(qtype=end_qtype, representation=self.representation)
dif_q.t = t_1 - t_2
dif_q.x = x_1 - x_2
dif_q.y = y_1 - y_2
dif_q.z = z_1 - z_2
return dif_q
def product(self, q1, kind="", reverse=False, qtype=""):
"""Form a product given 2 quaternions. Kind can be '' aka standard, even, odd, or even_minus_odd.
Setting reverse=True is like changing the order."""
self.check_representations(q1)
commuting = self._commuting_products(q1)
q_even = QH()
q_even.t = commuting['tt'] - commuting['xx+yy+zz']
q_even.x = commuting['tx+xt']
q_even.y = commuting['ty+yt']
q_even.z = commuting['tz+zt']
anti_commuting = self._anti_commuting_products(q1)
q_odd = QH()
if reverse:
q_odd.x = anti_commuting['zy-yz']
q_odd.y = anti_commuting['xz-zx']
q_odd.z = anti_commuting['yx-xy']
else:
q_odd.x = anti_commuting['yz-zy']
q_odd.y = anti_commuting['zx-xz']
q_odd.z = anti_commuting['xy-yx']
if kind == "":
result = q_even.add(q_odd)
times_symbol = "x"
elif kind.lower() == "even":
result = q_even
times_symbol = "xE"
elif kind.lower() == "odd":
result = q_odd
times_symbol = "xO"
elif kind.lower() == "even_minus_odd":
result = q_even.dif(q_odd)
times_symbol = "xE-O"
else:
raise Exception("Four 'kind' values are known: '', 'even', 'odd', and 'even_minus_odd'.")
if reverse:
times_symbol = times_symbol.replace('x', 'xR')
if qtype:
result.qtype = qtype
else:
result.qtype = "{f}{ts}{s}".format(f=self.qtype, ts=times_symbol, s=q1.qtype)
result.representation = self.representation
return result
def Euclidean_product(self, q1, kind="", reverse=False, qtype=""):
"""Form a product p* q given 2 quaternions, not associative."""
self.check_representations(q1)
pq = QH(qtype, representation=self.representation)
pq = self.conj().product(q1, kind, reverse)
return pq
def inverse(self, qtype="^-1", additive=False):
"""The additive or multiplicative inverse of a quaternion."""
if additive:
end_qtype = "-{}".format(self.qtype, qtype)
q_inv = self.flip_signs()
q_inv.qtype = end_qtype
else:
end_qtype = "{}{}".format(self.qtype, qtype)
q_conj = self.conj()
q_norm_squared = self.norm_squared()
if (not self.is_symbolic()) and (q_norm_squared.t == 0):
return self.q_0()
q_norm_squared_inv = QH([1.0 / q_norm_squared.t, 0, 0, 0])
q_inv = q_conj.product(q_norm_squared_inv)
q_inv.qtype = end_qtype
q_inv.representation = self.representation
return q_inv
def divide_by(self, q1, qtype=""):
"""Divide one quaternion by another. The order matters unless one is using a norm_squared (real number)."""
self.check_representations(q1)
end_qtype = "{f}/{s}".format(f=self.qtype, s=q1.qtype)
q1_inv = q1.inverse()
q_div = self.product(q1.inverse())
q_div.qtype = end_qtype
q_div.representation = self.representation
return q_div
def triple_product(self, q1, q2):
"""Form a triple product given 3 quaternions."""
self.check_representations(q1)
self.check_representations(q2)
triple = self.product(q1).product(q2)
triple.representation = self.representation
return triple
# Quaternion rotation involves a triple product: u R 1/u
def rotate(self, u, qtype="rot"):
"""Do a rotation using a triple product: u R 1/u."""
end_qtype = "{}{}".format(self.qtype, qtype)
u_abs = u.abs_of_q()
u_norm_squaredalized = u.divide_by(u_abs)
q_rot = u_norm_squaredalized.triple_product(self, u_norm_squaredalized.conj())
q_rot.qtype = end_qtype
q_rot.representation = self.representation
return q_rot
# A boost also uses triple products like a rotation, but more of them.
# This is not a well-known result, but does work.
# b -> b' = h b h* + 1/2 ((hhb)* -(h*h*b)*)
# where h is of the form (cosh(a), sinh(a)) OR (0, a, b, c)
def boost(self, h, qtype="boost"):
"""A boost or rotation or both."""
end_qtype = "{}{}".format(self.qtype, qtype)
boost = h
b_conj = boost.conj()
triple_1 = boost.triple_product(self, b_conj)
triple_2 = boost.triple_product(boost, self).conj()
triple_3 = b_conj.triple_product(b_conj, self).conj()
triple_23 = triple_2.dif(triple_3)
half_23 = triple_23.product(QH([0.5, 0, 0, 0]))
triple_123 = triple_1.add(half_23, qtype=end_qtype)
triple_123.qtype = end_qtype
triple_123.representation = self.representation
return triple_123
# g_shift is a function based on the space-times-time invariance proposal for gravity,
# which proposes that if one changes the distance from a gravitational source, then
# squares a measurement, the observers at two different hieghts agree to their
# space-times-time values, but not the intervals.
# g_form is the form of the function, either minimal or exponential
# Minimal is what is needed to pass all weak field tests of gravity
def g_shift(self, dimensionless_g, g_form="exp", qtype="g_shift"):
"""Shift an observation based on a dimensionless GM/c^2 dR."""
end_qtype = "{}{}".format(self.qtype, qtype)
if g_form == "exp":
g_factor = sp.exp(dimensionless_g)
elif g_form == "minimal":
g_factor = 1 + 2 * dimensionless_g + 2 * dimensionless_g ** 2
else:
print("g_form not defined, should be 'exp' or 'minimal': {}".format(g_form))
return self
g_q = QH(qtype=end_qtype)
g_q.t = self.t / g_factor
g_q.x = self.x * g_factor
g_q.y = self.y * g_factor
g_q.z = self.z * g_factor
g_q.qtype = end_qtype
g_q.representation = self.representation
return g_q
def sin(self, qtype="sin"):
"""Take the sine of a quaternion, (sin(t) cosh(|R|), cos(t) sinh(|R|) R/|R|)"""
end_qtype = "sin({sq})".format(sq=self.qtype)
abs_v = self.abs_of_vector()
if abs_v.t == 0:
return QH([math.sin(self.t), 0, 0, 0], qtype=end_qtype)
sint = math.sin(self.t)
cost = math.cos(self.t)
sinhR = math.sinh(abs_v.t)
coshR = math.cosh(abs_v.t)
k = cost * sinhR / abs_v.t
q_out = QH()
q_out.t = sint * coshR
q_out.x = k * self.x
q_out.y = k * self.y
q_out.z = k * self.z
q_out.qtype = end_qtype
q_out.representation = self.representation
return q_out
def cos(self, qtype="sin"):
"""Take the cosine of a quaternion, (cos(t) cosh(|R|), sin(t) sinh(|R|) R/|R|)"""
end_qtype = "cos({sq})".format(sq=self.qtype)
abs_v = self.abs_of_vector()
if abs_v.t == 0:
return QH([math.cos(self.t), 0, 0, 0], qtype=end_qtype)
sint = math.sin(self.t)
cost = math.cos(self.t)
sinhR = math.sinh(abs_v.t)
coshR = math.cosh(abs_v.t)
k = -1 * sint * sinhR / abs_v.t
q_out = QH()
q_out.t = cost * coshR
q_out.x = k * self.x
q_out.y = k * self.y
q_out.z = k * self.z
q_out.qtype = end_qtype
q_out.representation = self.representation
return q_out
def tan(self, qtype="sin"):
"""Take the tan of a quaternion, sin/cos"""
end_qtype = "tan({sq})".format(sq=self.qtype)
abs_v = self.abs_of_vector()
if abs_v.t == 0:
return QH([math.tan(self.t), 0, 0, 0], qtype=end_qtype)
sinq = self.sin()
cosq = self.cos()
q_out = sinq.divide_by(cosq)
q_out.qtype = end_qtype
q_out.representation = self.representation
return q_out
def sinh(self, qtype="sinh"):
"""Take the sinh of a quaternion, (sinh(t) cos(|R|), cosh(t) sin(|R|) R/|R|)"""
end_qtype = "sinh({sq})".format(sq=self.qtype)
abs_v = self.abs_of_vector()
if abs_v.t == 0:
return QH([math.sinh(self.t), 0, 0, 0], qtype=end_qtype)
sinht = math.sinh(self.t)
cosht = math.cosh(self.t)
sinR = math.sin(abs_v.t)
cosR = math.cos(abs_v.t)
k = cosht * sinR / abs_v.t
q_out = QH(qtype=end_qtype, representation=self.representation)
q_out.t = sinht * cosR
q_out.x = k * self.x
q_out.y = k * self.y
q_out.z = k * self.z
return q_out
def cosh(self, qtype="sin"):
"""Take the cosh of a quaternion, (cosh(t) cos(|R|), sinh(t) sin(|R|) R/|R|)"""
end_qtype = "cosh({sq})".format(sq=self.qtype)
abs_v = self.abs_of_vector()
if abs_v.t == 0:
return QH([math.cosh(self.t), 0, 0, 0], qtype=end_qtype)
sinht = math.sinh(self.t)
cosht = math.cosh(self.t)
sinR = math.sin(abs_v.t)
cosR = math.cos(abs_v.t)
k = sinht * sinR / abs_v.t
q_out = QH(qtype=end_qtype, representation=self.representation)
q_out.t = cosht * cosR
q_out.x = k * self.x
q_out.y = k * self.y
q_out.z = k * self.z
return q_out
def tanh(self, qtype="tanh"):
"""Take the tanh of a quaternion, sin/cos"""
end_qtype = "tanh({sq})".format(sq=self.qtype)
abs_v = self.abs_of_vector()
if abs_v.t == 0:
return QH([math.tanh(self.t), 0, 0, 0], qtype=end_qtype)
sinhq = self.sinh()
coshq = self.cosh()
q_out = sinhq.divide_by(coshq)
q_out.qtype = end_qtype
q_out.representation = self.representation
return q_out
def exp(self, qtype="exp"):
"""Take the exponential of a quaternion."""
# exp(q) = (exp(t) cos(|R|, exp(t) sin(|R|) R/|R|)
end_qtype = "exp({st})".format(st=self.qtype)
abs_v = self.abs_of_vector()
et = math.exp(self.t)
if (abs_v.t == 0):
return QH([et, 0, 0, 0], qtype=end_qtype)
cosR = math.cos(abs_v.t)
sinR = math.sin(abs_v.t)
k = et * sinR / abs_v.t
expq = QH([et * cosR, k * self.x, k * self.y, k * self.z], qtype=end_qtype, representation=self.representation)
return expq
def ln(self, qtype="ln"):
"""Take the natural log of a quaternion."""
# ln(q) = (0.5 ln t^2 + R.R, atan2(|R|, t) R/|R|)
end_qtype = "ln({st})".format(st=self.qtype)
abs_v = self.abs_of_vector()
if (abs_v.t == 0):
if self.t > 0:
return(QH([math.log(self.t), 0, 0, 0], qtype=end_qtype))
else:
# I don't understant this, but mathematica does the same thing.
return(QH([math.log(-self.t), math.pi, 0, 0], qtype=end_type))
return QH([lt, 0, 0, 0])
t_value = 0.5 * math.log(self.t * self.t + abs_v.t * abs_v.t)
k = math.atan2(abs_v.t, self.t) / abs_v.t
expq = QH([t_value, k * self.x, k * self.y, k * self.z], qtype=end_qtype, representation=self.representation)
return expq
def q_2_q(self, q1, qtype="P"):
"""Take the natural log of a quaternion."""
# q^p = exp(ln(q) * p)
self.check_representations(q1)
end_qtype = "{st}^P".format(st=self.qtype)
q2q = self.ln().product(q1).exp()
q2q.qtype = end_qtype
q2q.representation = self.representation
return q2q
def trunc(self):
"""Truncates values."""
self.t = math.trunc(self.t)
self.x = math.trunc(self.x)
self.y = math.trunc(self.y)
self.z = math.trunc(self.z)
return self
# Write tests the QH class.
# In[4]:
class TestQH(unittest.TestCase):
"""Class to make sure all the functions work as expected."""
Q = QH([1, -2, -3, -4], qtype="Q")
P = QH([0, 4, -3, 0], qtype="P")
R = QH([3, 0, 0, 0], qtype="R")
C = QH([2, 4, 0, 0], qtype="C")
t, x, y, z = sp.symbols("t x y z")
q_sym = QH([t, x, y, x * y * z])
def test_qt(self):
self.assertTrue(self.Q.t == 1)
def test_subs(self):
q_z = self.q_sym.subs({self.t:1, self.x:2, self.y:3, self.z:4})
print("t x y xyz sub 1 2 3 4: ", q_z)
self.assertTrue(q_z.equals(QH([1, 2, 3, 24])))
def test_scalar(self):
q_z = self.Q.scalar()
print("scalar(q): ", q_z)
self.assertTrue(q_z.t == 1)
self.assertTrue(q_z.x == 0)
self.assertTrue(q_z.y == 0)
self.assertTrue(q_z.z == 0)
def test_vector(self):
q_z = self.Q.vector()
print("vector(q): ", q_z)
self.assertTrue(q_z.t == 0)
self.assertTrue(q_z.x == -2)
self.assertTrue(q_z.y == -3)
self.assertTrue(q_z.z == -4)
def test_xyz(self):
q_z = self.Q.xyz()
print("q.xyz()): ", q_z)
self.assertTrue(q_z[0] == -2)
self.assertTrue(q_z[1] == -3)
self.assertTrue(q_z[2] == -4)
def test_q_0(self):
q_z = self.Q.q_0()
print("q_0: ", q_z)
self.assertTrue(q_z.t == 0)
self.assertTrue(q_z.x == 0)
self.assertTrue(q_z.y == 0)
self.assertTrue(q_z.z == 0)
def test_q_1(self):
q_z = self.Q.q_1()
print("q_1: ", q_z)
self.assertTrue(q_z.t == 1)
self.assertTrue(q_z.x == 0)
self.assertTrue(q_z.y == 0)
self.assertTrue(q_z.z == 0)
def test_q_i(self):
q_z = self.Q.q_i()
print("q_i: ", q_z)
self.assertTrue(q_z.t == 0)
self.assertTrue(q_z.x == 1)
self.assertTrue(q_z.y == 0)
self.assertTrue(q_z.z == 0)
def test_q_j(self):
q_z = self.Q.q_j()
print("q_j: ", q_z)
self.assertTrue(q_z.t == 0)
self.assertTrue(q_z.x == 0)
self.assertTrue(q_z.y == 1)
self.assertTrue(q_z.z == 0)
def test_q_k(self):
q_z = self.Q.q_k()
print("q_k: ", q_z)
self.assertTrue(q_z.t == 0)
self.assertTrue(q_z.x == 0)
self.assertTrue(q_z.y == 0)
self.assertTrue(q_z.z == 1)
def test_q_random(self):
q_z = QH().q_random()
print("q_random():", q_z)
self.assertTrue(q_z.t >= 0 and q_z.t <= 1)
self.assertTrue(q_z.x >= 0 and q_z.x <= 1)
self.assertTrue(q_z.y >= 0 and q_z.y <= 1)
self.assertTrue(q_z.z >= 0 and q_z.z <= 1)
def test_equals(self):
self.assertTrue(self.Q.equals(self.Q))
self.assertFalse(self.Q.equals(self.P))
def test_conj_0(self):
q_z = self.Q.conj()
print("q_conj 0: ", q_z)
self.assertTrue(q_z.t == 1)
self.assertTrue(q_z.x == 2)
self.assertTrue(q_z.y == 3)
self.assertTrue(q_z.z == 4)
def test_conj_1(self):
q_z = self.Q.conj(1)
print("q_conj 1: ", q_z)
self.assertTrue(q_z.t == -1)
self.assertTrue(q_z.x == -2)
self.assertTrue(q_z.y == 3)
self.assertTrue(q_z.z == 4)
def test_conj_2(self):
q_z = self.Q.conj(2)
print("q_conj 2: ", q_z)
self.assertTrue(q_z.t == -1)
self.assertTrue(q_z.x == 2)
self.assertTrue(q_z.y == -3)
self.assertTrue(q_z.z == 4)
def test_conj_q(self):
q_z = self.Q.conj_q(self.Q)
print("conj_q(conj_q): ", q_z)
self.assertTrue(q_z.t == -1)
self.assertTrue(q_z.x == 2)
self.assertTrue(q_z.y == 3)
self.assertTrue(q_z.z == -4)
def sign_flips(self):
q_z = self.Q.sign_flips()
print("sign_flips: ", q_z)
self.assertTrue(q_z.t == -1)
self.assertTrue(q_z.x == 2)
self.assertTrue(q_z.y == 3)
self.assertTrue(q_z.z == 4)
def test_vahlen_conj_minus(self):
q_z = self.Q.vahlen_conj()
print("q_vahlen_conj -: ", q_z)
self.assertTrue(q_z.t == 1)
self.assertTrue(q_z.x == 2)
self.assertTrue(q_z.y == 3)
self.assertTrue(q_z.z == 4)
def test_vahlen_conj_star(self):
q_z = self.Q.vahlen_conj('*')
print("q_vahlen_conj *: ", q_z)
self.assertTrue(q_z.t == 1)
self.assertTrue(q_z.x == -2)
self.assertTrue(q_z.y == -3)
self.assertTrue(q_z.z == 4)
def test_vahlen_conj_prime(self):
q_z = self.Q.vahlen_conj("'")
print("q_vahlen_conj ': ", q_z)
self.assertTrue(q_z.t == 1)
self.assertTrue(q_z.x == 2)
self.assertTrue(q_z.y == 3)
self.assertTrue(q_z.z == -4)
def test_square(self):
q_z = self.Q.square()
print("square: ", q_z)
self.assertTrue(q_z.t == -28)
self.assertTrue(q_z.x == -4)
self.assertTrue(q_z.y == -6)
self.assertTrue(q_z.z == -8)
def test_norm_squared(self):
q_z = self.Q.norm_squared()
print("norm_squared: ", q_z)
self.assertTrue(q_z.t == 30)
self.assertTrue(q_z.x == 0)
self.assertTrue(q_z.y == 0)
self.assertTrue(q_z.z == 0)
def test_norm_squared_of_vector(self):
q_z = self.Q.norm_squared_of_vector()
print("norm_squared_of_vector: ", q_z)
self.assertTrue(q_z.t == 29)
self.assertTrue(q_z.x == 0)
self.assertTrue(q_z.y == 0)
self.assertTrue(q_z.z == 0)
def test_abs_of_q(self):
q_z = self.P.abs_of_q()
print("abs_of_q: ", q_z)
self.assertTrue(q_z.t == 5)
self.assertTrue(q_z.x == 0)
self.assertTrue(q_z.y == 0)
self.assertTrue(q_z.z == 0)
def test_normalize(self):
q_z = self.P.normalize()
print("q_normalized: ", q_z)
self.assertTrue(q_z.t == 0)
self.assertTrue(q_z.x == 0.8)
self.assertAlmostEqual(q_z.y, -0.6)
self.assertTrue(q_z.z == 0)
def test_abs_of_vector(self):
q_z = self.P.abs_of_vector()
print("abs_of_vector: ", q_z)
self.assertTrue(q_z.t == 5)
self.assertTrue(q_z.x == 0)
self.assertTrue(q_z.y == 0)
self.assertTrue(q_z.z == 0)
def test_add(self):
q_z = self.Q.add(self.P)
print("add: ", q_z)
self.assertTrue(q_z.t == 1)
self.assertTrue(q_z.x == 2)
self.assertTrue(q_z.y == -6)
self.assertTrue(q_z.z == -4)
def test_dif(self):
q_z = self.Q.dif(self.P)
print("dif: ", q_z)
self.assertTrue(q_z.t == 1)
self.assertTrue(q_z.x == -6)
self.assertTrue(q_z.y == 0)
self.assertTrue(q_z.z == -4)
def test_product(self):
q_z = self.Q.product(self.P)
print("product: ", q_z)
self.assertTrue(q_z.t == -1)
self.assertTrue(q_z.x == -8)
self.assertTrue(q_z.y == -19)
self.assertTrue(q_z.z == 18)
def test_product_even(self):
q_z = self.Q.product(self.P, kind="even")
print("product, kind even: ", q_z)
self.assertTrue(q_z.t == -1)
self.assertTrue(q_z.x == 4)
self.assertTrue(q_z.y == -3)
self.assertTrue(q_z.z == 0)
def test_product_odd(self):
q_z = self.Q.product(self.P, kind="odd")
print("product, kind odd: ", q_z)
self.assertTrue(q_z.t == 0)
self.assertTrue(q_z.x == -12)
self.assertTrue(q_z.y == -16)
self.assertTrue(q_z.z == 18)
def test_product_even_minus_odd(self):
q_z = self.Q.product(self.P, kind="even_minus_odd")
print("product, kind even_minus_odd: ", q_z)
self.assertTrue(q_z.t == -1)
self.assertTrue(q_z.x == 16)
self.assertTrue(q_z.y == 13)
self.assertTrue(q_z.z == -18)
def test_product_reverse(self):
q1q2_rev = self.Q.product(self.P, reverse=True)
q2q1 = self.P.product(self.Q)
self.assertTrue(q1q2_rev.equals(q2q1))
def test_Euclidean_product(self):
q_z = self.Q.Euclidean_product(self.P)
print("Euclidean product: ", q_z)
self.assertTrue(q_z.t == 1)
self.assertTrue(q_z.x == 16)
self.assertTrue(q_z.y == 13)
self.assertTrue(q_z.z == -18)
def test_inverse(self):
q_z = self.P.inverse()
print("inverse: ", q_z)
self.assertTrue(q_z.t == 0)
self.assertTrue(q_z.x == -0.16)
self.assertTrue(q_z.y == 0.12)
self.assertTrue(q_z.z == 0)
def test_divide_by(self):
q_z = self.Q.divide_by(self.Q)
print("divide_by: ", q_z)
self.assertTrue(q_z.t == 1)
self.assertTrue(q_z.x == 0)
self.assertTrue(q_z.y == 0)
self.assertTrue(q_z.z == 0)
def test_triple_product(self):
q_z = self.Q.triple_product(self.P, self.Q)
print("triple product: ", q_z)
self.assertTrue(q_z.t == -2)
self.assertTrue(q_z.x == 124)
self.assertTrue(q_z.y == -84)
self.assertTrue(q_z.z == 8)
def test_rotate(self):
q_z = self.Q.rotate(QH([0, 1, 0, 0]))
print("rotate: ", q_z)
self.assertTrue(q_z.t == 1)
self.assertTrue(q_z.x == -2)
self.assertTrue(q_z.y == 3)
self.assertTrue(q_z.z == 4)
def test_boost(self):
q1_sq = self.Q.square()
h = QH(sr_gamma_betas(0.003))
q_z = self.Q.boost(h)
q_z2 = q_z.square()
print("q1_sq: ", q1_sq)
print("boosted: ", q_z)
print("boosted squared: ", q_z2)
self.assertTrue(round(q_z2.t, 5) == round(q1_sq.t, 5))
def test_g_shift(self):
q1_sq = self.Q.square()
q_z = self.Q.g_shift(0.003)
q_z2 = q_z.square()
q_z_minimal = self.Q.g_shift(0.003, g_form="minimal")
q_z2_minimal = q_z_minimal.square()
print("q1_sq: ", q1_sq)
print("g_shift: ", q_z)
print("g squared: ", q_z2)
self.assertTrue(q_z2.t != q1_sq.t)
self.assertTrue(q_z2.x == q1_sq.x)
self.assertTrue(q_z2.y == q1_sq.y)
self.assertTrue(q_z2.z == q1_sq.z)
self.assertTrue(q_z2_minimal.t != q1_sq.t)
self.assertTrue(q_z2_minimal.x == q1_sq.x)
self.assertTrue(q_z2_minimal.y == q1_sq.y)
self.assertTrue(q_z2_minimal.z == q1_sq.z)
def test_sin(self):
self.assertTrue(QH([0, 0, 0, 0]).sin().equals(QH().q_0()))
self.assertTrue(self.Q.sin().equals(QH([91.7837157840346691, -21.8864868530291758, -32.8297302795437673, -43.7729737060583517])))
self.assertTrue(self.P.sin().equals(QH([0, 59.3625684622310033, -44.5219263466732542, 0])))
self.assertTrue(self.R.sin().equals(QH([0.1411200080598672, 0, 0, 0])))
self.assertTrue(self.C.sin().equals(QH([24.8313058489463785, -11.3566127112181743, 0, 0])))
def test_cos(self):
self.assertTrue(QH([0, 0, 0, 0]).cos().equals(QH().q_1()))
self.assertTrue(self.Q.cos().equals(QH([58.9336461679439481, 34.0861836904655959, 51.1292755356983974, 68.1723673809311919])))
self.assertTrue(self.P.cos().equals(QH([74.2099485247878476, 0, 0, 0])))
self.assertTrue(self.R.cos().equals(QH([-0.9899924966004454, 0, 0, 0])))
self.assertTrue(self.C.cos().equals(QH([-11.3642347064010600, -24.8146514856341867, 0, 0])))
def test_tan(self):
self.assertTrue(QH([0, 0, 0, 0]).tan().equals(QH().q_0()))
self.assertTrue(self.Q.tan().equals(QH([0.0000382163172501, -0.3713971716439372, -0.5570957574659058, -0.7427943432878743])))
self.assertTrue(self.P.tan().equals(QH([0, 0.7999273634100760, -0.5999455225575570, 0])))
self.assertTrue(self.R.tan().equals(QH([-0.1425465430742778, 0, 0, 0])))
self.assertTrue(self.C.tan().equals(QH([-0.0005079806234700, 1.0004385132020521, 0, 0])))
def test_sinh(self):
self.assertTrue(QH([0, 0, 0, 0]).sinh().equals(QH().q_0()))
self.assertTrue(self.Q.sinh().equals(QH([0.7323376060463428, 0.4482074499805421, 0.6723111749708131, 0.8964148999610841])))
self.assertTrue(self.P.sinh().equals(QH([0, -0.7671394197305108, 0.5753545647978831, 0])))
self.assertTrue(self.R.sinh().equals(QH([10.0178749274099026, 0, 0, 0])))
self.assertTrue(self.C.sinh().equals(QH([-2.3706741693520015, -2.8472390868488278, 0, 0])))
def test_cosh(self):
self.assertTrue(QH([0, 0, 0, 0]).cosh().equals(QH().q_1()))
self.assertTrue(self.Q.cosh().equals(QH([0.9615851176369565, 0.3413521745610167, 0.5120282618415251, 0.6827043491220334])))
self.assertTrue(self.P.cosh().equals(QH([0.2836621854632263, 0, 0, 0])))
self.assertTrue(self.R.cosh().equals(QH([10.0676619957777653, 0, 0, 0])))
self.assertTrue(self.C.cosh().equals(QH([-2.4591352139173837, -2.7448170067921538, 0, 0])))
def test_tanh(self):
self.assertTrue(QH([0, 0, 0, 0]).tanh().equals(QH().q_0()))
self.assertTrue(self.Q.tanh().equals(QH([1.0248695360556623, 0.1022956817887642, 0.1534435226831462, 0.2045913635775283])))
self.assertTrue(self.P.tanh().equals(QH([0, -2.7044120049972684, 2.0283090037479505, 0])))
self.assertTrue(self.R.tanh().equals(QH([0.9950547536867305, 0, 0, 0])))
self.assertTrue(self.C.tanh().equals(QH([1.0046823121902353, 0.0364233692474038, 0, 0])))
def test_exp(self):
self.assertTrue(QH([0, 0, 0, 0]).exp().equals(QH().q_1()))
self.assertTrue(self.Q.exp().equals(QH([1.6939227236832994, 0.7895596245415588, 1.1843394368123383, 1.5791192490831176])))
self.assertTrue(self.P.exp().equals(QH([0.2836621854632263, -0.7671394197305108, 0.5753545647978831, 0])))
self.assertTrue(self.R.exp().equals(QH([20.0855369231876679, 0, 0, 0])))
self.assertTrue(self.C.exp().equals(QH([-4.8298093832693851, -5.5920560936409816, 0, 0])))
def test_ln(self):
self.assertTrue(self.Q.ln().exp().equals(self.Q))
self.assertTrue(self.Q.ln().equals(QH([1.7005986908310777, -0.5151902926640850, -0.7727854389961275, -1.0303805853281700])))
self.assertTrue(self.P.ln().equals(QH([1.6094379124341003, 1.2566370614359172, -0.9424777960769379, 0])))
self.assertTrue(self.R.ln().equals(QH([1.0986122886681098, 0, 0, 0])))
self.assertTrue(self.C.ln().equals(QH([1.4978661367769954, 1.1071487177940904, 0, 0])))
def test_q_2_q(self):
self.assertTrue(self.Q.q_2_q(self.P).equals(QH([-0.0197219653530713, -0.2613955437374326, 0.6496281248064009, -0.3265786562423951])))
suite = unittest.TestLoader().loadTestsFromModule(TestQH())
unittest.TextTestRunner().run(suite);
# In[5]:
class TestQHRep(unittest.TestCase):
Q12 = QH([1, 2, 0, 0])
Q1123 = QH([1, 1, 2, 3])
Q11p = QH([1, 1, 0, 0], representation="polar")
Q12p = QH([1, 2, 0, 0], representation="polar")
Q12np = QH([1, -2, 0, 0], representation="polar")
Q21p = QH([2, 1, 0, 0], representation="polar")
Q23p = QH([2, 3, 0, 0], representation="polar")
Q13p = QH([1, 3, 0, 0], representation="polar")
Q5p = QH([5, 0, 0, 0], representation="polar")
def test_txyz_2_representation(self):
qr = QH(self.Q12.txyz_2_representation(""))
self.assertTrue(qr.equals(self.Q12))
qr = QH(self.Q12.txyz_2_representation("polar"))
self.assertTrue(qr.equals(QH([2.23606797749979, 1.10714871779409, 0, 0])))
qr = QH(self.Q1123.txyz_2_representation("spherical"))
self.assertTrue(qr.equals(QH([1.0, 3.7416573867739413, 0.640522312679424, 1.10714871779409])))
def test_representation_2_txyz(self):
qr = QH(self.Q12.representation_2_txyz(""))
self.assertTrue(qr.equals(self.Q12))
qr = QH(self.Q12.representation_2_txyz("polar"))
self.assertTrue(qr.equals(QH([-0.4161468365471424, 0.9092974268256817, 0, 0])))
qr = QH(self.Q1123.representation_2_txyz("spherical"))
self.assertTrue(qr.equals(QH([1.0, -0.9001976297355174, 0.12832006020245673, -0.4161468365471424])))
def test_polar_products(self):
qr = self.Q11p.product(self.Q12p)
print("polar 1 1 0 0 * 1 2 0 0: ", qr)
self.assertTrue(qr.equals(self.Q13p))
qr = self.Q12p.product(self.Q21p)
print("polar 1 2 0 0 * 2 1 0 0: ", qr)
self.assertTrue(qr.equals(self.Q23p))
def test_polar_conj(self):
qr = self.Q12p.conj()
print("polar conj of 1 2 0 0: ", qr)
self.assertTrue(qr.equals(self.Q12np))
suite = unittest.TestLoader().loadTestsFromModule(TestQHRep())
unittest.TextTestRunner().run(suite);
# ## Using More Numbers via Doublets
# My long term goal is to deal with quaternions on a quaternion manifold. This will have 4 pairs of doublets. Each doublet is paired with its additive inverse. Instead of using real numbers, one uses (3, 0) and (0, 2) to represent +3 and -2 respectively. Numbers such as (5, 6) are allowed. That can be "reduced" to (0, 1). My sense is that somewhere deep in the depths of relativistic quantum field theory, this will be a "good thing". For now, it is a minor pain to program.
# In[6]:
class Doublet(object):
"""A pair of number that are additive inverses. It can take
ints, floats, Symbols, or strings."""
def __init__(self, numbers=None):
if numbers is None:
self.p = 0
self.n = 0
elif isinstance(numbers, (int, float)):
if numbers < 0:
self.n = -1 * numbers
self.p = 0
else:
self.p = numbers
self.n = 0
elif isinstance(numbers, sp.Symbol):
self.p = numbers
self.n = 0
elif isinstance(numbers, list):
if len(numbers) == 2:
self.p, self.n = numbers[0], numbers[1]
elif isinstance(numbers, str):
n_list = numbers.split()
if (len(n_list) == 1):
if n_list.isnumeric():
n_value = float(numbers)
if n_value < 0:
self.n = -1 * n_list[0]
self.p = 0
else:
self.p = n_list[0]
self.n = 0
else:
self.p = sp.Symbol(n_list[0])
self.n = 0
if (len(n_list) == 2):
if n_list[0].isnumeric():
self.p = float(n_list[0])
else:
self.p = sp.Symbol(n_list[0])
if n_list[1].isnumeric():
self.n = float(n_list[1])
else:
self.n = sp.Symbol(n_list[1])
else:
print ("unable to parse this Double.")
def __str__(self):
"""Customize the output."""
return "{p}p {n}n".format(p=self.p, n=self.n)
def d_add(self, d1):
"""Add a doublet to another."""
pa0, n0 = self.p, self.n
p1, n1 = d1.p, d1.n
return Doublet([pa0 + p1, n0 + n1])
def d_reduce(self):
"""If p and n are not zero, subtract """
if self.p == 0 or self.n == 0:
return Doublet([self.p, self.n])
elif self.p > self.n:
return Doublet([self.p - self.n, 0])
elif self.p < self.n:
return Doublet([0, self.n - self.p])
else:
return Doublet()
def d_additive_inverse_up_to_an_automorphism(self, n=0):
"""Creates one additive inverses up to an arbitrary positive n."""
if n == 0:
return Doublet([self.n + n, self.p + n])
else:
red = self.d_reduce()
return Doublet([red.n + n, red.p + n])
def d_dif(self, d1, n=0):
"""Take the difference by flipping and adding."""
d2 = d1.d_additive_inverse_up_to_an_automorphism(n)
return self.d_add(d2)
def d_equals(self, d1):
"""Figure out if two doublets are equal up to an equivalence relation."""
self_red = self.d_reduce()
d1_red = d1.d_reduce()
if math.isclose(self_red.p, d1_red.p) and math.isclose(self_red.n, d1_red.n):
return True
else:
return False
def Z2_product(self, d1):
"""Uset the Abelian cyclic group Z2 to form the product of 2 doublets."""
p1 = self.p * d1.p + self.n * d1.n
n1 = self.p * d1.n + self.n * d1.p
return Doublet([p1, n1])
# In[7]:
class TestDoublet(unittest.TestCase):
"""Class to make sure all the functions work as expected."""
d1 = Doublet()
d2 = Doublet(2)
d3 = Doublet(-3)
d4 = Doublet([5, 3])
dstr12 = Doublet("1 2")
dstr13 = Doublet("3 2")
def test_null(self):
self.assertTrue(self.d1.p == 0)
self.assertTrue(self.d1.n == 0)
def test_2(self):
self.assertTrue(self.d2.p == 2)
self.assertTrue(self.d2.n == 0)
def test_3(self):
self.assertTrue(self.d3.p == 0)
self.assertTrue(self.d3.n == 3)
def test_str12(self):
self.assertTrue(self.dstr12.p == 1)
self.assertTrue(self.dstr12.n == 2)
def test_add(self):
d_add = self.d2.d_add(self.d3)
self.assertTrue(d_add.p == 2)
self.assertTrue(d_add.n == 3)
def test_d_additive_inverse_up_to_an_automorphism(self):
d_f = self.d2.d_additive_inverse_up_to_an_automorphism()
self.assertTrue(d_f.p == 0)
self.assertTrue(d_f.n == 2)
def test_dif(self):
d_d = self.d2.d_dif(self.d3)
self.assertTrue(d_d.p == 5)
self.assertTrue(d_d.n == 0)
def test_reduce(self):
d_add = self.d2.d_add(self.d3)
d_r = d_add.d_reduce()
self.assertTrue(d_r.p == 0)
self.assertTrue(d_r.n == 1)
def test_Z2_product(self):
Z2p = self.dstr12.Z2_product(self.dstr13)
self.assertTrue(Z2p.p == 7)
self.assertTrue(Z2p.n == 8)
def test_d_equals(self):
self.assertTrue(self.d2.d_equals(self.d4))
self.assertFalse(self.d2.d_equals(self.d1))
def test_reduced_product(self):
"""Reduce before or after, should make no difference."""
Z2p_1 = self.dstr12.Z2_product(self.dstr13)
Z2p_red = Z2p_1.d_reduce()
d_r_1 = self.dstr12.d_reduce()
d_r_2 = self.dstr13.d_reduce()
Z2p_2 = d_r_1.Z2_product(d_r_2)
self.assertTrue(Z2p_red.p == Z2p_2.p)
self.assertTrue(Z2p_red.n == Z2p_2.n)
suite = unittest.TestLoader().loadTestsFromModule(TestDoublet())
unittest.TextTestRunner().run(suite);
# Repeat the exercise for arrays.
# In[8]:
class Doubleta(object):
"""A pair of number that are additive inverses. It can take
ints, floats, Symbols, or strings."""
def __init__(self, numbers=None):
if numbers is None:
self.d = | np.array([0.0, 0.0]) | numpy.array |
from typing import Callable, Optional
import numpy as np
from numpy.typing import ArrayLike
from ._helpers import (
Identity,
Info,
LinearOperator,
Product,
aslinearoperator,
get_default_inner,
)
def cg(
A: LinearOperator,
b: ArrayLike,
M: Optional[LinearOperator] = None,
Ml: Optional[LinearOperator] = None,
inner: Optional[Callable] = None,
x0: Optional[ArrayLike] = None,
tol: float = 1e-5,
atol: float = 1.0e-15,
maxiter: Optional[int] = None,
return_arnoldi: bool = False,
callback: Optional[Callable] = None,
):
r"""Preconditioned CG method.
The *preconditioned conjugate gradient method* can be used to solve a system of
linear algebraic equations where the linear operator is self-adjoint and positive
definite. Let the following linear algebraic system be given:
.. math::
M M_l A M_r y = M M_l b,
where :math:`x=M_r y` and :math:`M_l A M_r` is self-adjoint and positive definite
with respect to the inner product :math:`\langle \cdot,\cdot \rangle` defined by
``inner``. The preconditioned CG method then computes (in exact arithmetics!)
iterates :math:`x_k \in x_0 + M_r K_k` with :math:`K_k:= K_k(M M_l A M_r, r_0)` such
that
.. math::
\|x - x_k\|_A = \min_{z \in x_0 + M_r K_k} \|x - z\|_A.
The Lanczos algorithm is used with the operator :math:`M M_l A M_r` and the inner
product defined by :math:`\langle x,y \rangle_{M^{-1}} = \langle M^{-1}x,y \rangle`.
The initial vector for Lanczos is :math:`r_0 = M M_l (b - Ax_0)` - note that
:math:`M_r` is not used for the initial vector.
Memory consumption is:
* if ``return_arnoldi==False``: 3 vectors or 6 vectors if :math:`M` is used.
* if ``return_arnoldi==True``: about iter+1 vectors for the Lanczos
basis. If :math:`M` is used the memory consumption is 2*(iter+1).
**Caution:** CG's convergence may be delayed significantly due to round-off errors,
cf. chapter 5.9 in [LieS13]_.
"""
def _get_xk(yk):
"""Compute approximate solution from initial guess and approximate solution of
the preconditioned linear system."""
# Mr_yk = yk if Mr is None else Mr @ yk
Mr_yk = yk
return x0 + Mr_yk
def get_residual_and_norm2(z):
r"""Compute residual.
For a given :math:`z\in\mathbb{C}^N`, the residual
.. math::
r = M M_l ( b - A z )
:param z: approximate solution and the absolute residual norm
.. math::
\\|M M_l (b-Az)\\|_{M^{-1}}
"""
r = b - A @ z
Ml_r = Ml @ r
M_Ml_r = M @ Ml_r
norm2 = inner(Ml_r, M_Ml_r)
if np.any(norm2.imag != 0.0):
raise ValueError("inner product <x, M x> gave nonzero imaginary part")
norm2 = norm2.real
return M_Ml_r, Ml_r, norm2
b = np.asarray(b)
assert len(A.shape) == 2
assert A.shape[0] == A.shape[1]
assert A.shape[1] == b.shape[0]
N = A.shape[0]
inner = get_default_inner(b.shape) if inner is None else inner
M = Identity() if M is None else aslinearoperator(M)
Ml = Identity() if Ml is None else aslinearoperator(Ml)
Ml_A_Mr = Product(Ml, A)
maxiter = N if maxiter is None else maxiter
x0 = np.zeros_like(b) if x0 is None else x0
# get initial residual
M_Ml_r0, Ml_r0, M_Ml_r0_norm2 = get_residual_and_norm2(x0)
M_Ml_r0_norm = np.sqrt(M_Ml_r0_norm2)
if callback is not None:
callback(x0, Ml_r0)
# TODO: reortho
resnorms = [M_Ml_r0_norm]
# resulting approximation is xk = x0 + Mr*yk
yk = np.zeros(x0.shape, dtype=M_Ml_r0.dtype)
xk = None
# square of the old residual norm
rhos = [None, M_Ml_r0_norm2]
# will be updated by _compute_rkn if explicit_residual is True
Ml_rk = Ml_r0.copy()
M_Ml_rk = M_Ml_r0.copy()
# search direction
p = M_Ml_rk.copy()
# store Lanczos vectors + matrix?
if return_arnoldi:
V = []
V.append(M_Ml_r0 / np.where(M_Ml_r0_norm > 0.0, M_Ml_r0_norm, 1.0))
if M is not None:
P = []
P.append(Ml_r0 / | np.where(M_Ml_r0_norm > 0.0, M_Ml_r0_norm, 1.0) | numpy.where |
"""DEP WEPP cli editor. One "tile" at a time.
Usage:
python daily_climate_editor.py <xtile> <ytile> <tilesz>
<scenario> <YYYY> <mm> <dd>
Where tiles start in the lower left corner and are 5x5 deg in size
development laptop has data for 3 March 2019, 23 May 2009, and 8 Jun 2009
"""
try:
from zoneinfo import ZoneInfo # type: ignore
except ImportError:
from backports.zoneinfo import ZoneInfo # type: ignore
from collections import namedtuple
import datetime
import sys
import os
from multiprocessing import cpu_count
from multiprocessing.pool import ThreadPool
from tqdm import tqdm
import numpy as np
from scipy.interpolate import NearestNDInterpolator
from osgeo import gdal
from pyiem import iemre
from pyiem.dep import SOUTH, WEST, NORTH, EAST, get_cli_fname
from pyiem.util import ncopen, logger, convert_value, utc
LOG = logger()
CENTRAL = ZoneInfo("America/Chicago")
UTC = datetime.timezone.utc
ST4PATH = "/mesonet/data/stage4"
# used for breakpoint logic
ZEROHOUR = datetime.datetime(2000, 1, 1, 0, 0)
# How many CPUs are we going to burn
CPUCOUNT = min([4, int(cpu_count() / 4)])
MEMORY = {"stamp": datetime.datetime.now()}
BOUNDS = namedtuple("Bounds", ["south", "north", "east", "west"])
def get_sts_ets_at_localhour(date, local_hour):
"""Return a Day Interval in UTC for the given date at CST/CDT hour."""
# ZoneInfo is supposed to get this right at instanciation
sts = datetime.datetime(
date.year,
date.month,
date.day,
local_hour,
tzinfo=CENTRAL,
)
date2 = datetime.date(
date.year, date.month, date.day
) + datetime.timedelta(days=1)
ets = datetime.datetime(
date2.year,
date2.month,
date2.day,
local_hour,
tzinfo=CENTRAL,
)
return (
sts.replace(hour=local_hour).astimezone(UTC),
ets.replace(hour=local_hour).astimezone(UTC),
)
def iemre_bounds_check(name, val, lower, upper):
"""Make sure our data is within bounds, if not, exit!"""
if np.isnan(val).all():
LOG.warning("FATAL: iemre %s all NaN", name)
sys.exit(3)
minval = np.nanmin(val)
maxval = np.nanmax(val)
if minval < lower or maxval > upper:
LOG.warning(
"FATAL: iemre failure %s %.3f to %.3f [%.3f to %.3f]",
name,
minval,
maxval,
lower,
upper,
)
sys.exit(3)
return val
def load_iemre(nc, data, valid):
"""Use IEM Reanalysis for non-precip data
24km product is smoothed down to the 0.01 degree grid
"""
offset = iemre.daily_offset(valid)
lats = nc.variables["lat"][:]
lons = nc.variables["lon"][:]
lons, lats = np.meshgrid(lons, lats)
# Storage is W m-2, we want langleys per day
ncdata = (
nc.variables["rsds"][offset, :, :].filled(np.nan)
* 86400.0
/ 1000000.0
* 23.9
)
# Default to a value of 300 when this data is missing, for some reason
nn = NearestNDInterpolator(
(np.ravel(lons), np.ravel(lats)), np.ravel(ncdata)
)
data["solar"][:] = iemre_bounds_check(
"rsds", nn(data["lon"], data["lat"]), 0, 1000
)
ncdata = convert_value(
nc.variables["high_tmpk"][offset, :, :].filled(np.nan), "degK", "degC"
)
nn = NearestNDInterpolator(
(np.ravel(lons), np.ravel(lats)), np.ravel(ncdata)
)
data["high"][:] = iemre_bounds_check(
"high_tmpk", nn(data["lon"], data["lat"]), -60, 60
)
ncdata = convert_value(
nc.variables["low_tmpk"][offset, :, :].filled(np.nan), "degK", "degC"
)
nn = NearestNDInterpolator(
(np.ravel(lons), np.ravel(lats)), np.ravel(ncdata)
)
data["low"][:] = iemre_bounds_check(
"low_tmpk", nn(data["lon"], data["lat"]), -60, 60
)
ncdata = convert_value(
nc.variables["avg_dwpk"][offset, :, :].filled(np.nan), "degK", "degC"
)
nn = NearestNDInterpolator(
(np.ravel(lons), np.ravel(lats)), np.ravel(ncdata)
)
data["dwpt"][:] = iemre_bounds_check(
"avg_dwpk", nn(data["lon"], data["lat"]), -60, 60
)
# Wind is already in m/s, but could be masked
ncdata = nc.variables["wind_speed"][offset, :, :].filled(np.nan)
nn = NearestNDInterpolator(
( | np.ravel(lons) | numpy.ravel |
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from hypothesis import given, strategies as st
from parameterized import parameterized
from aitkens import accelerate, second_differences
class TestAitkens(TestCase):
@given(
st.floats(min_value=1, max_value=1e9),
st.floats(min_value=-1, max_value=1).filter(
lambda initial: abs(initial > 1e-6)
),
st.floats(min_value=1, max_value=1e4),
)
def test_geometric_decay(self, limit, initial, rate):
"""Geometrically (exponentially) converging sequences converge
well when accelerated.
"""
# TODO the ranges are a bit too conservative and the tolerances
# very high. This is to make the tests pass (#2), but is it
# possible to get better estimates on the error bounds?
lst = limit + initial * np.exp(-rate * np.array(range(12)))
accelerated_lst = accelerate(lst)
np.testing.assert_allclose(
accelerated_lst[-1], [limit], atol=1/rate**2
)
@given(st.floats(allow_infinity=False, allow_nan=False))
def test_handles_constant_sequence(self, val):
lst = [val, val, val]
accelerated_lst = accelerate(lst)
self.assertEqual(accelerated_lst, [val])
def test_forward_differences(self):
xs = [1, 4, 9, 16]
txs, dxs, d2xs = second_differences(xs, direction='forward')
self.assertListEqual([1, 4], list(txs))
self.assertListEqual([3, 5], list(dxs))
self.assertListEqual([2, 2], list(d2xs))
def test_central_differences(self):
xs = [1, 4, 9, 16]
txs, dxs, d2xs = second_differences(xs, direction='central')
self.assertListEqual([4, 9], list(txs))
self.assertListEqual([4, 6], list(dxs))
self.assertListEqual([2, 2], list(d2xs))
def test_central_differences_have_expected_lengths(self):
xs = np.random.rand(8)
axs = accelerate(xs, direction='central')
self.assertTupleEqual((6,), axs.shape)
def test_forward_differences_have_expected_lengths(self):
xs = np.random.rand(8)
axs = accelerate(xs, direction='forward')
self.assertTupleEqual((6,), axs.shape)
@patch('aitkens.second_differences')
def test_default_is_forward_differences(self, m):
m.return_value = ( | np.array([]) | numpy.array |
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib.patches import Patch
from sklearn.metrics import roc_auc_score
FIGURES_DIR = Path(__file__).parent.parent.parent / "figures"
def uncertainty_plot(
ues,
ood_ues,
accuracy=None,
title="Uncertainty CIFAR100",
directory=FIGURES_DIR,
file_name="ood_boxplot",
show=False,
):
directory = Path(directory)
if not directory.exists():
directory.mkdir(parents=True, exist_ok=True)
df = pd.DataFrame(
{
"Uncertainty": np.hstack((ues, ood_ues)),
"Type": np.hstack((["InD"] * len(ues), ["OOD"] * len(ood_ues))),
}
)
plt.rc("font", size=14)
plt.figure(figsize=(9, 7))
sns.boxplot(x="Type", y="Uncertainty", data=df)
plt.title(title)
patches = []
ood_score = ood_roc_auc(ues, ood_ues)
if accuracy is not None:
patches.append(Patch(color="none", label=f"InD accuracy {accuracy}"))
print("Accuracy", accuracy)
print("OOD ROC AUC", ood_score)
patches.append(Patch(color="none", label=f"OOD roc-auc {ood_score}"))
plt.legend(
handles=patches, handlelength=0, handletextpad=0, loc="upper left"
)
if show:
plt.show()
else:
plt.savefig(directory / f"{file_name}.png", dpi=150)
return ood_score
def boxplots(
ues,
ood_ues,
ood_name,
extras="",
show=False,
directory=FIGURES_DIR,
title_extras="",
):
df = pd.DataFrame(
{
"Uncertainty": np.hstack((ues, ood_ues)),
"Type": np.hstack((["InD"] * len(ues), ["OOD"] * len(ood_ues))),
}
)
plt.rc("font", size=14)
plt.figure(figsize=(12, 10))
sns.boxplot(x="Type", y="Uncertainty", data=df)
plt.title(f"Uncertainty on CIFAR100 ({ood_name} OOD){title_extras}")
if show:
plt.show()
else:
plt.savefig(
directory / f"ood_boxplot_{extras}_{ood_name}.png", dpi=100
)
def scatterplots(ues, ood_ues, ood_name, show=False, directory=FIGURES_DIR):
plt.rc("font", size=14)
plt.figure(figsize=(12, 10))
alpha = 0.1
size = 50
gibberish = np.random.random(len(ues)) * 0.05
plt.scatter(gibberish + 0.1, ues, alpha=alpha, s=size)
plt.scatter(gibberish + 0.2, ood_ues, alpha=alpha, s=size)
if show:
plt.show()
else:
plt.savefig(directory / f"ood_scatterplot_{ood_name}.png", dpi=100)
def ood_roc_auc(ues, ood_ues):
labels = np.concatenate((np.zeros_like(ues), np.ones_like(ood_ues)))
scores = np.concatenate((ues, ood_ues))
return roc_auc_score(labels, scores).round(3)
def count_alphas(ues, ood_ues, show=False):
correct_ue = np.sum(np.array(ues) < 0.1)
correct_ood = np.sum( | np.array(ood_ues) | numpy.array |
import numpy as np
from Project_Clean_data import raw
from Project_Clean_data import header
from matplotlib.pyplot import figure, plot, title, xlabel, ylabel, show, legend, hold, subplot, xticks, yticks
from scipy.linalg import svd
classDict = {'Negative': 0, 'Positive': 1 }
classNames = classDict.keys()
Dx = list(header).index('Dx')
y = raw[:, Dx]
C = len(classNames)
raw = np.delete(raw,list(header).index('Dx'), 1)
header = np.delete(header,list(header).index('Dx'), 0)
X = raw
N = raw.shape[0]
# Subtract mean value from data
Y = (X - np.ones((N,1))*X.mean(0))
# PCA by computing SVD of Y
U,S,V = svd(Y,full_matrices=False)
V = V.T
print(V.shape)
# Project the centered data onto principal component space
Z = | np.dot(Y, V) | numpy.dot |
from numbers import Number
from abc import ABC, abstractmethod
import numpy as np
class Key(tuple):
def __new__(cls, value):
try:
if isinstance(value, str) or isinstance(value, Number):
return tuple.__new__(cls, (value,))
return tuple.__new__(cls, value)
except TypeError: # e.g. key is int
return tuple.__new__(cls, (value,))
class RandomVariable(ABC):
def __init__(self, size):
self.size = size
@abstractmethod
def to_key(self, *args, **kwargs):
pass
@abstractmethod
def to_dict_key(self, *args, **kwargs):
pass
class DiscreteRV(RandomVariable):
"""Store the details of single discrete random variable."""
def __init__(self, name, first_row):
"""Store the details of single discrete random variable.
Args:
name (str):
The discrete random variable's name.
first_row (object):
An example of independent variable to extract the
information about random variable.
"""
super().__init__(size=1)
self.name = str(name)
self.is_numeric = isinstance(first_row, Number)
def to_key(self, *args, **kwargs):
total_size = len(args) + len(kwargs.keys())
if total_size != 1:
raise ValueError(
f"Random variable '{self.name}' can accept one level,"
f" {total_size} provided."
)
if len(args) == 1:
return args[0]
key_name = next(iter(kwargs.keys()))
if key_name != self.name:
raise ValueError(
f"The provided name '{key_name}' is not"
f" defined for random variable '{self.name}'"
)
return kwargs[key_name]
def to_dict_key(self, *args, **kwargs):
return {self.name: self.to_key(*args, **kwargs)}
def __len__(self):
return 1
def __str__(self):
return f"'{self.name}'"
__repr__ = __str__
class MultiDiscreteRV(RandomVariable):
"""Store the details of or more discrete random variables."""
def __init__(self, first_row, names=None, variable_name="X"):
"""Store the details of or more discrete random variables.
it can generate the names of random variables if it is Note
provided.
Args:
first_row (object or a tuple):
An example of independent variables to extract the
information about random variable(s).
names (list, optional):
A list of random variable names. Defaults to None.
variable_name (str, optional):
The prefix for automatic name generation.
Defaults to "X".
Raises:
ValueError: When the length of provided names is not equal to
the length of tuples in 'factors'.
ValueError: When the length of tuples in 'factors' are not
equal.
"""
first_row_key = Key(first_row)
rv_len = len(first_row_key)
# Check names, if it is None, create one equal to length
# of random variables
if names is None:
self.names = np.array([f"{variable_name}{i+1}" for i in range(rv_len)])
elif len(names) != rv_len:
raise ValueError(
f"'factors' has {rv_len} random variables while"
f"'names' argument has {len(names)}."
)
else:
self.names = np.array(names)
# Store the DiscreteRV as a dictionary
if rv_len > 1:
self.multi_rvs = {
name: DiscreteRV(name, item)
for name, item in zip(self.names, first_row)
}
else:
self.multi_rvs = {self.names[0]: DiscreteRV(self.names[0], first_row)}
# The size of the MultiDiscreteRV is the same
# as the number RVs or their names
size = len(self.names)
super().__init__(size=size)
#
def to_key(self, *args, **kwargs):
total_size = len(args) + len(kwargs.keys())
if total_size != self.size:
raise ValueError(
f"Multi-random variables '{self.names}' can accept {self.size} "
f"number of levels, {total_size} provided."
)
if len(args) == self.size and self.size > 1:
return tuple(args)
elif len(args) == self.size: # self.size == 1
return args[0]
return_list = [None] * self.size
for key_name in kwargs:
if key_name not in self.names:
raise ValueError(
f"The provided name '{key_name}' is not"
" defined in multi-random variables (make"
f" sure of upper/lower cases too):'{self.names}'"
)
# find the index of the name
index = self.index_of(key_name)
# fill the return_list in the right place
return_list[index] = kwargs[key_name]
# the remaining values are filled in between
moving_index = 0
for i, value in enumerate(return_list):
if value is None:
return_list[i] = args[moving_index]
moving_index += 1
if self.size == 1:
return return_list[0]
else:
return tuple(return_list)
def to_dict_key(self, *args, **kwargs):
return {
key: value for key, value in zip(self.names, self.to_key(*args, **kwargs))
}
def __getitem__(self, rv_index):
"""An indexer by position (int) or name (str).
Args:
rv_index (int or str): Random variable's name or index.
Returns:
DiscreteRV: An instance of DiscreteRV.
"""
if isinstance(rv_index, int):
name = self.names[rv_index]
return self.multi_rvs[name]
elif isinstance(rv_index, str):
return self.multi_rvs[rv_index]
else:
raise ValueError("The provided index is not 'int' or 'str'.")
def index_of(self, name):
"""Finds the index of the random variable from its name.
Args:
name (str): Name of the random variable.
Returns:
int: Index of the random variable.
"""
indices = [i for i, n in enumerate(self.names) if n == name]
if len(indices) == 0:
return -1
else:
return indices[0]
def __len__(self):
return len(self.names)
def __str__(self):
return "".join([f"{s}\n" for s in self.multi_rvs.values()])
__repr__ = __str__
def __contains__(self, name):
"""Check the name of the random variable.
Args:
name (str): Name of the random variables.
Returns:
[bool]: True or False
"""
return name in self.multi_rvs
class Distribution(ABC):
def prob(self, *args, **kwargs):
key = self.get_random_variable().to_key(*args, **kwargs)
return self.probability(key)
def __iter__(self):
return iter(self.keys())
def levels(self):
arr = self._to_2d_array_()
# the last column is count, so we drop it
if len(arr.shape) < 2: # empty dist
return | np.array([]) | numpy.array |
#!/usr/bin/env python3
#! -*- coding: utf-8 -*-
import cv2
import numpy as np
def green_extraction(img, hsv_img):
hsv_min = np.array([30, 64, 0])
hsv_max = np.array([90, 255, 255])
img_mask = cv2.inRange(hsv_img, hsv_min, hsv_max)
green_img = cv2.bitwise_and(img, img, mask = img_mask)
return green_img
def blue_extraction(img, hsv_img):
hsv_min = | np.array([90, 0, 0]) | numpy.array |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Plotting functions."""
import logging
import logging.config
import math
import os
import warnings
from pathlib import Path
import cartopy.crs as ccrs
import iris
import matplotlib as mpl
import matplotlib.colors as colors
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib.colors import from_levels_and_colors
from tqdm import tqdm
from ..data import dummy_lat_lon_cube
from ..logging_config import LOGGING
from ..utils import (
CoordinateSystemError,
in_360_longitude_system,
multiline,
select_valid_subset,
translate_longitude_system,
)
logger = logging.getLogger(__name__)
class PlottingError(Exception):
"""Base class for exception in the plotting module."""
class MaskedDataError(PlottingError):
"""Raised when trying to plot fully-masked data."""
class FigureSaver:
"""Save figures using pre-defined options and directories.
If `debug`, `debug_options` will be used. Otherwise `options` are used.
Figure(s) are saved automatically:
with FigureSaver("filename"):
plt.figure()
...
with FigureSaver(("filename1", "filename2")):
plt.figure()
...
plt.figure()
...
Manual saving using the defined options is also possible.
with FigureSaver() as saver:
fig = plt.figure()
...
saver.save_figure(fig, "plot_name")
"""
debug = False
directory = "."
# These options serve as default value that may be overridden during
# initialisation.
options = {
"bbox_inches": "tight",
"transparent": True,
"filetype": "pdf",
"dpi": 600,
}
debug_options = {
"bbox_inches": "tight",
"transparent": False,
"filetype": "png",
"dpi": 350,
}
def __init__(self, filenames=None, *, directories=None, debug=None, **kwargs):
"""Initialise figure saver.
The initialised FigureSaver instance can be used repeatedly (as a context
manager) to save figures by calling it with at least filenames and optionally
directories, debug state, and saving options.
Args:
filenames ((iterable of) str or pathlib.Path, or None): If None, the
FigureSaver instance must be called with a list of filenames and used
as a context manager for automatic saving. Otherwise, the number of
strings or Paths given must match the number of figures opened within
the context.
directory ((iterable of) str or pathlib.Path, or None): The directory to
save figures in. If None, use `FigureSaver.directory`. New directories
will be created if they do not exist.
debug (bool or None): Select the pre-set settings with which figures will
be saved. If None, use `FigureSaver.debug`.
**kwargs: Optional kwargs which are passed to plt.savefig().
"""
# Backwards compatibility.
if "filename" in kwargs:
warnings.warn(
"The `filename` argument is deprecated in favour of the `filenames` "
"argument, which takes precedence.",
FutureWarning,
)
# Only use the deprecated argument if the updated version is not used.
if filenames is None:
filenames = kwargs.pop("filename")
if "directory" in kwargs:
warnings.warn(
"The `directory` argument is deprecated in favour of the `directories` "
"argument, which takes precedence.",
FutureWarning,
)
# Only use the deprecated argument if the updated version is not used.
if directories is None:
directories = kwargs.pop("directory")
# Set instance defaults.
if debug is not None:
self.debug = debug
directories = directories if directories is not None else self.directory
self.directories = (
(directories,) if isinstance(directories, (str, Path)) else directories
)
if filenames is not None:
self.filenames = (
(filenames,) if isinstance(filenames, (str, Path)) else filenames
)
if len(self.directories) != 1 and len(self.directories) != len(
self.filenames
):
raise ValueError(
multiline(
f"""If multiple directories are given, their number has to match
the number of file names, but got {len(self.directories)}
directories and {len(self.filenames)} file names."""
)
)
# Make sure to resolve the home directory.
self.directories = tuple(map(os.path.expanduser, self.directories))
self.options = self.debug_options.copy() if self.debug else self.options.copy()
self.options.update(kwargs)
def __call__(self, filenames=None, sub_directory=None, **kwargs):
"""Return a copy containing the given filenames for figure saving.
An optional sub-directory can also be specified for figures saved by the
returned object.
This is meant to be used as a context manager:
>>> figure_saver = FigureSaver(**options) # doctest: +SKIP
>>> with figure_saver("filename"): # doctest: +SKIP
... plt.figure() # doctest: +SKIP
Directories, options, etc... which the FigureSaver instance was initialised
with will be used to save the figures.
Args:
filenames ((iterable of) str): Filenames used to save created figures.
sub_directory (str): If given, figures will be saved in a sub-directory
`sub_directory` of the pre-specified directory/directories.
**kwargs: Optional kwargs which are passed to plt.savefig().
"""
new_inst = type(self)(
filenames,
directories=[os.path.join(orig, sub_directory) for orig in self.directories]
if sub_directory is not None
else self.directories,
debug=self.debug,
)
new_inst.options = {**self.options, **kwargs}
return new_inst
@property
def suffix(self):
return (
self.options["filetype"]
if "." in self.options["filetype"]
else "." + self.options["filetype"]
)
def __enter__(self):
self.old_fignums = plt.get_fignums()
return self
def __exit__(self, type, value, traceback):
if type is not None:
return False # Re-raise exception.
new_figure_numbers = plt.get_fignums()
if new_figure_numbers == self.old_fignums:
raise RuntimeError("No new figures detected.")
fignums_save = [
num for num in new_figure_numbers if num not in self.old_fignums
]
if len(fignums_save) != len(self.filenames):
raise RuntimeError(
f"Expected {len(self.filenames)} figures, but got {len(fignums_save)}."
)
saved_figures = [
num
if not plt.figure(num).get_label()
else (num, plt.figure(num).get_label())
for num in fignums_save
]
logger.debug(f"Saving figures {saved_figures}.")
if len(self.directories) == 1:
# Adapt to the number of figures.
directories = [self.directories[0]] * len(self.filenames)
else:
directories = self.directories
for fignum, directory, filename in zip(
fignums_save, directories, self.filenames
):
fig = plt.figure(fignum)
self.save_figure(fig, filename, directory)
def save_figure(self, fig, filename, directory=None, sub_directory=None, **kwargs):
"""Save a single figure.
Args:
fig (matplotlib.figure.Figure): Figure to save.
filename (str): Filename where the figure will be saved.
directory (str): Directory to save the figure in.
sub_directory (str): If given, figures will be saved in a sub-directory
`sub_directory` of the pre-specified directory/directories.
**kwargs: Optional kwargs which are passed to plt.savefig().
Raises:
ValueError: If multiple default directories were specified and no explicit
directory is supplied here. Since only one figure is being saved here,
it is unclear which directory to choose. In this case, the context
manager interface is to be used.
"""
if directory is None:
if len(self.directories) > 1:
raise ValueError("More than 1 default directory specified.")
# Use default.
directory = self.directories[0]
if sub_directory is not None:
directory = os.path.join(directory, sub_directory)
os.makedirs(directory, exist_ok=True)
if "." in filename:
filename = "".join(filename.split(".")[:-1])
filepath = (
os.path.expanduser(
os.path.abspath(os.path.expanduser(os.path.join(directory, filename)))
)
+ self.suffix
)
logger.debug("Saving figure to '{}'.".format(filepath))
fig.savefig(
filepath,
**{
**{
option: value
for option, value in self.options.items()
if option != "filetype"
},
**kwargs,
},
)
def get_cubes_vmin_vmax(cubes, vmin_vmax_percentiles=(0.0, 100.0)):
"""Get vmin and vmax from a list of cubes given two percentiles.
Args:
cubes (iris.cube.CubeList): List of cubes.
vmin_vmax_percentiles (tuple or None): The two percentiles, used to set the minimum
and maximum values on the colorbar. If `None`, use the minimum and maximum
of the data (equivalent to percentiles of (0, 100)).
Returns:
tuple: tuple of floats (vmin, vmax) if `vmin_vmax_percentiles` is not (0, 100) in which case
(None, None) will be returned.
"""
if vmin_vmax_percentiles is None or np.all(
np.isclose(np.array(vmin_vmax_percentiles), | np.array([0, 100]) | numpy.array |
"""
Module contains some common functions.
"""
# standard imports
import os
import sys
import time
import numpy
import pysph
from pysph.solver.output import load, dump, output_formats # noqa: 401
from pysph.solver.output import gather_array_data as _gather_array_data
ASCII_FMT = " 123456789#"
try:
uni_chr = unichr
except NameError:
uni_chr = chr
UTF_FMT = u" " + u''.join(map(uni_chr, range(0x258F, 0x2587, -1)))
def _supports_unicode(fp):
# Taken somewhat from the tqdm package.
if not hasattr(fp, 'encoding'):
return False
else:
encoding = fp.encoding
try:
u'\u2588\u2589'.encode(encoding)
except UnicodeEncodeError:
return False
except Exception:
try:
return encoding.lower().startswith('utf-') or ('U8' == encoding)
except:
return False
else:
return True
def check_array(x, y):
"""Check if two arrays are equal with an absolute tolerance of
1e-16."""
return | numpy.allclose(x, y, atol=1e-16, rtol=0) | numpy.allclose |
""":mod:`pandas.io.html` is a module containing functionality for dealing with
HTML IO.
"""
import os
import re
import numbers
import collections
import warnings
from distutils.version import LooseVersion
import numpy as np
from pandas.io.common import _is_url, urlopen, parse_url
from pandas.io.parsers import TextParser
from pandas.compat import (lrange, lmap, u, string_types, iteritems, text_type,
raise_with_traceback)
from pandas.core import common as com
from pandas import Series
try:
import bs4
except ImportError:
_HAS_BS4 = False
else:
_HAS_BS4 = True
try:
import lxml
except ImportError:
_HAS_LXML = False
else:
_HAS_LXML = True
try:
import html5lib
except ImportError:
_HAS_HTML5LIB = False
else:
_HAS_HTML5LIB = True
#############
# READ HTML #
#############
_RE_WHITESPACE = re.compile(r'[\r\n]+|\s{2,}')
def _remove_whitespace(s, regex=_RE_WHITESPACE):
"""Replace extra whitespace inside of a string with a single space.
Parameters
----------
s : str or unicode
The string from which to remove extra whitespace.
regex : regex
The regular expression to use to remove extra whitespace.
Returns
-------
subd : str or unicode
`s` with all extra whitespace replaced with a single space.
"""
return regex.sub(' ', s.strip())
def _get_skiprows(skiprows):
"""Get an iterator given an integer, slice or container.
Parameters
----------
skiprows : int, slice, container
The iterator to use to skip rows; can also be a slice.
Raises
------
TypeError
* If `skiprows` is not a slice, integer, or Container
Returns
-------
it : iterable
A proper iterator to use to skip rows of a DataFrame.
"""
if isinstance(skiprows, slice):
return lrange(skiprows.start or 0, skiprows.stop, skiprows.step or 1)
elif isinstance(skiprows, numbers.Integral) or com.is_list_like(skiprows):
return skiprows
elif skiprows is None:
return 0
raise TypeError('%r is not a valid type for skipping rows' %
type(skiprows).__name__)
def _read(obj):
"""Try to read from a url, file or string.
Parameters
----------
obj : str, unicode, or file-like
Returns
-------
raw_text : str
"""
if _is_url(obj):
with urlopen(obj) as url:
text = url.read()
elif hasattr(obj, 'read'):
text = obj.read()
elif isinstance(obj, string_types):
text = obj
try:
if os.path.isfile(text):
with open(text, 'rb') as f:
return f.read()
except TypeError:
pass
else:
raise TypeError("Cannot read object of type %r" % type(obj).__name__)
return text
class _HtmlFrameParser(object):
"""Base class for parsers that parse HTML into DataFrames.
Parameters
----------
io : str or file-like
This can be either a string of raw HTML, a valid URL using the HTTP,
FTP, or FILE protocols or a file-like object.
match : str or regex
The text to match in the document.
attrs : dict
List of HTML <table> element attributes to match.
Attributes
----------
io : str or file-like
raw HTML, URL, or file-like object
match : regex
The text to match in the raw HTML
attrs : dict-like
A dictionary of valid table attributes to use to search for table
elements.
Notes
-----
To subclass this class effectively you must override the following methods:
* :func:`_build_doc`
* :func:`_text_getter`
* :func:`_parse_td`
* :func:`_parse_tables`
* :func:`_parse_tr`
* :func:`_parse_thead`
* :func:`_parse_tbody`
* :func:`_parse_tfoot`
See each method's respective documentation for details on their
functionality.
"""
def __init__(self, io, match, attrs, encoding):
self.io = io
self.match = match
self.attrs = attrs
self.encoding = encoding
def parse_tables(self):
tables = self._parse_tables(self._build_doc(), self.match, self.attrs)
return (self._build_table(table) for table in tables)
def _parse_raw_data(self, rows):
"""Parse the raw data into a list of lists.
Parameters
----------
rows : iterable of node-like
A list of row elements.
text_getter : callable
A callable that gets the text from an individual node. This must be
defined by subclasses.
column_finder : callable
A callable that takes a row node as input and returns a list of the
column node in that row. This must be defined by subclasses.
Returns
-------
data : list of list of strings
"""
data = [[_remove_whitespace(self._text_getter(col)) for col in
self._parse_td(row)] for row in rows]
return data
def _text_getter(self, obj):
"""Return the text of an individual DOM node.
Parameters
----------
obj : node-like
A DOM node.
Returns
-------
text : str or unicode
The text from an individual DOM node.
"""
raise NotImplementedError
def _parse_td(self, obj):
"""Return the td elements from a row element.
Parameters
----------
obj : node-like
Returns
-------
columns : list of node-like
These are the elements of each row, i.e., the columns.
"""
raise NotImplementedError
def _parse_tables(self, doc, match, attrs):
"""Return all tables from the parsed DOM.
Parameters
----------
doc : tree-like
The DOM from which to parse the table element.
match : str or regular expression
The text to search for in the DOM tree.
attrs : dict
A dictionary of table attributes that can be used to disambiguate
mutliple tables on a page.
Raises
------
ValueError
* If `match` does not match any text in the document.
Returns
-------
tables : list of node-like
A list of <table> elements to be parsed into raw data.
"""
raise NotImplementedError
def _parse_tr(self, table):
"""Return the list of row elements from the parsed table element.
Parameters
----------
table : node-like
A table element that contains row elements.
Returns
-------
rows : list of node-like
A list row elements of a table, usually <tr> or <th> elements.
"""
raise NotImplementedError
def _parse_thead(self, table):
"""Return the header of a table.
Parameters
----------
table : node-like
A table element that contains row elements.
Returns
-------
thead : node-like
A <thead>...</thead> element.
"""
raise NotImplementedError
def _parse_tbody(self, table):
"""Return the body of the table.
Parameters
----------
table : node-like
A table element that contains row elements.
Returns
-------
tbody : node-like
A <tbody>...</tbody> element.
"""
raise NotImplementedError
def _parse_tfoot(self, table):
"""Return the footer of the table if any.
Parameters
----------
table : node-like
A table element that contains row elements.
Returns
-------
tfoot : node-like
A <tfoot>...</tfoot> element.
"""
raise NotImplementedError
def _build_doc(self):
"""Return a tree-like object that can be used to iterate over the DOM.
Returns
-------
obj : tree-like
"""
raise NotImplementedError
def _build_table(self, table):
header = self._parse_raw_thead(table)
body = self._parse_raw_tbody(table)
footer = self._parse_raw_tfoot(table)
return header, body, footer
def _parse_raw_thead(self, table):
thead = self._parse_thead(table)
res = []
if thead:
res = lmap(self._text_getter, self._parse_th(thead[0]))
return | np.array(res) | numpy.array |
"""
Python API for CSR matrices.
"""
import warnings
import logging
import numpy as np
import scipy.sparse as sps
from numba import config
from numba.experimental import structref
from csr.kernels import get_kernel, releasing
from . import _struct, _rows
INTC = np.iinfo(np.intc)
_log = logging.getLogger(__name__)
# ugly hack for a bug on Numba < 0.53
if config.DISABLE_JIT:
class _csr_base:
def __init__(self, nrows, ncols, nnz, ptrs, inds, vals, _cast=True):
self.nrows = nrows
self.ncols = ncols
self.nnz = nnz
if _cast and np.max(ptrs, initial=0) <= INTC.max:
self.rowptrs = np.require(ptrs, np.intc, 'C')
else:
self.rowptrs = np.require(ptrs, requirements='C')
self.colinds = np.require(inds, np.intc, 'C')
if vals is not None:
self._values = np.require(vals, requirements='C')
else:
self._values = None
def _numba_box_(self, *args):
raise NotImplementedError()
NUMBA_ENABLED = False
else:
_csr_base = structref.StructRefProxy
NUMBA_ENABLED = True
class CSR(_csr_base):
"""
Simple compressed sparse row matrix. This is like :py:class:`scipy.sparse.csr_matrix`, with
a few useful differences:
* The value array is optional, for cases in which only the matrix structure is required.
* The value array, if present, is always double-precision.
* It is usable from code compiled in Numba's nopython mode.
You generally don't want to create this class yourself with the constructor. Instead, use one
of its class or static methods. If you do use the constructor, be advised that the class may
reuse the arrays that you pass, but does not guarantee that they will be used.
Not all methods are available from Numba, and a few have restricted signatures. The
documentation for each method notes deviations when in Numba-compiled code.
At the Numba level, matrices with and without value arrays have different types. For the
most part, this is transparent, but if you want to write a Numba function that works on
the values array but only if it is present, it requires writing two versions of the
function and using :py:func:`numba.extending.overload` to dispatch to the correct one.
There are several examples of doing this in the CSR source code. The method
:py:meth:`CSRType.has_values` lets you quickly see if a CSR type instance has
values or not.
Attributes:
nrows(int): the number of rows.
ncols(int): the number of columns.
nnz(int): the number of entries.
rowptrs(numpy.ndarray): the row pointers.
colinds(numpy.ndarray): the column indices.
values(numpy.ndarray or None): the values.
"""
def __new__(cls, nrows, ncols, nnz, rps, cis, vs, _cast=True):
assert nrows >= 0
assert nrows <= INTC.max
assert ncols >= 0
assert ncols <= INTC.max
assert nnz >= 0
nrows = np.intc(nrows)
ncols = np.intc(ncols)
if _cast:
cis = np.require(cis, np.intc, 'C')
if nnz <= INTC.max:
rps = np.require(rps, np.intc, 'C')
else:
rps = np.require(rps, np.int64, 'C')
if vs is not None:
vs = np.require(vs, requirements='C')
if NUMBA_ENABLED:
return _csr_base.__new__(cls, nrows, ncols, nnz, rps, cis, vs)
else:
return _csr_base.__new__(cls)
@classmethod
def empty(cls, nrows, ncols, row_nnzs=None, values=True):
"""
Create an uninitialized CSR matrix.
Args:
nrows(int): the number of rows.
ncols(int): the number of columns.
row_nnzs(array-like):
the number of nonzero entries for each row, or None for an empty matrix.
values(bool, str, or numpy.dtype):
whether it has values or only structure; can be a NumPy data type to
specify a type other than `f8`.
"""
from .constructors import create_empty
assert nrows >= 0
assert ncols >= 0
if row_nnzs is not None:
assert len(row_nnzs) == nrows
nnz = | np.sum(row_nnzs, dtype=np.int64) | numpy.sum |
import os
from collections import defaultdict
import random
import numpy as np
from skimage.morphology import erosion, disk
import cv2
import torch
import torch.nn.functional as F
from .core import Callback
# @TODO: refactor
class InferCallback(Callback):
def __init__(self, out_dir=None, out_prefix=None):
self.out_dir = out_dir
self.out_prefix = out_prefix
self.predictions = defaultdict(lambda: [])
self._keys_from_state = ["out_dir", "out_prefix"]
def on_stage_start(self, state):
for key in self._keys_from_state:
value = getattr(state, key, None)
if value is not None:
setattr(self, key, value)
# assert self.out_prefix is not None
if self.out_dir is not None:
self.out_prefix = str(self.out_dir) + "/" + str(self.out_prefix)
if self.out_prefix is not None:
os.makedirs(os.path.dirname(self.out_prefix), exist_ok=True)
def on_loader_start(self, state):
self.predictions = defaultdict(lambda: [])
def on_batch_end(self, state):
dct = state.output
dct = {key: value.detach().cpu().numpy() for key, value in dct.items()}
for key, value in dct.items():
self.predictions[key].append(value)
def on_loader_end(self, state):
self.predictions = {
key: | np.concatenate(value, axis=0) | numpy.concatenate |
# -*- coding: utf-8 -*-
"""
Class for reading data from pCLAMP and AxoScope
files (.abf version 1 and 2), developed by Molecular device/Axon technologies.
- abf = Axon binary file
- atf is a text file based format from axon that could be
read by AsciiIO (but this file is less efficient.)
This code is a port of abfload and abf2load
written in Matlab (BSD-2-Clause licence) by :
- Copyright (c) 2009, <NAME>, <EMAIL>
- Copyright (c) 2004, <NAME>
and available here:
http://www.mathworks.com/matlabcentral/fileexchange/22114-abf2load
Information on abf 1 and 2 formats is available here:
http://www.moleculardevices.com/pages/software/developer_info.html
This file supports the old (ABF1) and new (ABF2) format.
ABF1 (clampfit <=9) and ABF2 (clampfit >10)
All possible mode are possible :
- event-driven variable-length mode 1 -> return several Segments per Block
- event-driven fixed-length mode 2 or 5 -> return several Segments
- gap free mode -> return one (or sevral) Segment in the Block
Supported : Read
Author: <NAME>, <NAME>
Note: <EMAIL> has a C++ library with SWIG bindings which also
reads abf files - would be good to cross-check
"""
from __future__ import print_function, division, absolute_import
# from __future__ import unicode_literals is not compatible with numpy.dtype both py2 py3
from .baserawio import (BaseRawIO, _signal_channel_dtype, _unit_channel_dtype,
_event_channel_dtype)
import numpy as np
import struct
import datetime
import os
from io import open, BufferedReader
import numpy as np
class AxonRawIO(BaseRawIO):
extensions = ['abf']
rawmode = 'one-file'
def __init__(self, filename=''):
BaseRawIO.__init__(self)
self.filename = filename
def _parse_header(self):
info = self._axon_info = parse_axon_soup(self.filename)
version = info['fFileVersionNumber']
# file format
if info['nDataFormat'] == 0:
sig_dtype = np.dtype('i2')
elif info['nDataFormat'] == 1:
sig_dtype = np.dtype('f4')
if version < 2.:
nbchannel = info['nADCNumChannels']
head_offset = info['lDataSectionPtr'] * BLOCKSIZE + info[
'nNumPointsIgnored'] * sig_dtype.itemsize
totalsize = info['lActualAcqLength']
elif version >= 2.:
nbchannel = info['sections']['ADCSection']['llNumEntries']
head_offset = info['sections']['DataSection'][
'uBlockIndex'] * BLOCKSIZE
totalsize = info['sections']['DataSection']['llNumEntries']
self._raw_data = np.memmap(self.filename, dtype=sig_dtype, mode='r',
shape=(totalsize,), offset=head_offset)
# 3 possible modes
if version < 2.:
mode = info['nOperationMode']
elif version >= 2.:
mode = info['protocol']['nOperationMode']
assert mode in [1, 2, 3, 5], 'Mode {} is not supported'.formagt(mode)
# event-driven variable-length mode (mode 1)
# event-driven fixed-length mode (mode 2 or 5)
# gap free mode (mode 3) can be in several episodes
# read sweep pos
if version < 2.:
nbepisod = info['lSynchArraySize']
offset_episode = info['lSynchArrayPtr'] * BLOCKSIZE
elif version >= 2.:
nbepisod = info['sections']['SynchArraySection'][
'llNumEntries']
offset_episode = info['sections']['SynchArraySection'][
'uBlockIndex'] * BLOCKSIZE
if nbepisod > 0:
episode_array = np.memmap(
self.filename, [('offset', 'i4'), ('len', 'i4')], 'r',
shape=nbepisod, offset=offset_episode)
else:
episode_array = np.empty(1, [('offset', 'i4'), ('len', 'i4')])
episode_array[0]['len'] = self._raw_data.size
episode_array[0]['offset'] = 0
# sampling_rate
if version < 2.:
self._sampling_rate = 1. / (info['fADCSampleInterval'] *
nbchannel * 1.e-6)
elif version >= 2.:
self._sampling_rate = 1.e6 / \
info['protocol']['fADCSequenceInterval']
# one sweep = one segment
nb_segment = episode_array.size
# Get raw data by segment
self._raw_signals = {}
self._t_starts = {}
pos = 0
for seg_index in range(nb_segment):
length = episode_array[seg_index]['len']
if version < 2.:
fSynchTimeUnit = info['fSynchTimeUnit']
elif version >= 2.:
fSynchTimeUnit = info['protocol']['fSynchTimeUnit']
if (fSynchTimeUnit != 0) and (mode == 1):
length /= fSynchTimeUnit
self._raw_signals[seg_index] = self._raw_data[pos:pos + length].reshape(-1, nbchannel)
pos += length
t_start = float(episode_array[seg_index]['offset'])
if (fSynchTimeUnit == 0):
t_start = t_start / self._sampling_rate
else:
t_start = t_start * fSynchTimeUnit * 1e-6
self._t_starts[seg_index] = t_start
# Create channel header
if version < 2.:
channel_ids = [chan_num for chan_num in
info['nADCSamplingSeq'] if chan_num >= 0]
else:
channel_ids = list(range(nbchannel))
sig_channels = []
adc_nums = []
for chan_index, chan_id in enumerate(channel_ids):
if version < 2.:
name = info['sADCChannelName'][chan_id].replace(b' ', b'')
units = info['sADCUnits'][chan_id].replace(b'\xb5', b'u'). \
replace(b' ', b'').decode('utf-8') # \xb5 is µ
adc_num = info['nADCPtoLChannelMap'][chan_id]
elif version >= 2.:
ADCInfo = info['listADCInfo'][chan_id]
name = ADCInfo['ADCChNames'].replace(b' ', b'')
units = ADCInfo['ADCChUnits'].replace(b'\xb5', b'u'). \
replace(b' ', b'').decode('utf-8')
adc_num = ADCInfo['nADCNum']
adc_nums.append(adc_num)
if version < 2.:
gain = info['fADCRange']
gain /= info['fInstrumentScaleFactor'][chan_id]
gain /= info['fSignalGain'][chan_id]
gain /= info['fADCProgrammableGain'][chan_id]
gain /= info['lADCResolution']
if info['nTelegraphEnable'][chan_id]:
gain /= info['fTelegraphAdditGain'][chan_id]
offset = info['fInstrumentOffset'][chan_id]
offset -= info['fSignalOffset'][chan_id]
elif version >= 2.:
gain = info['protocol']['fADCRange']
gain /= info['listADCInfo'][chan_id]['fInstrumentScaleFactor']
gain /= info['listADCInfo'][chan_id]['fSignalGain']
gain /= info['listADCInfo'][chan_id]['fADCProgrammableGain']
gain /= info['protocol']['lADCResolution']
if info['listADCInfo'][chan_id]['nTelegraphEnable']:
gain /= info['listADCInfo'][chan_id]['fTelegraphAdditGain']
offset = info['listADCInfo'][chan_id]['fInstrumentOffset']
offset -= info['listADCInfo'][chan_id]['fSignalOffset']
group_id = 0
sig_channels.append((name, chan_id, self._sampling_rate,
sig_dtype, units, gain, offset, group_id))
sig_channels = np.array(sig_channels, dtype=_signal_channel_dtype)
# only one events channel : tag
if mode in [3, 5]: # TODO check if tags exits in other mode
# In ABF timstamps are not attached too any particular segment
# so each segment acess all event
timestamps = []
labels = []
comments = []
for i, tag in enumerate(info['listTag']):
timestamps.append(tag['lTagTime'])
labels.append(str(tag['nTagType']))
comments.append(clean_string(tag['sComment']))
self._raw_ev_timestamps = np.array(timestamps)
self._ev_labels = np.array(labels, dtype='U')
self._ev_comments = np.array(comments, dtype='U')
event_channels = []
event_channels.append(('Tag', '', 'event'))
event_channels = np.array(event_channels, dtype=_event_channel_dtype)
# No spikes
unit_channels = []
unit_channels = np.array(unit_channels, dtype=_unit_channel_dtype)
# fille into header dict
self.header = {}
self.header['nb_block'] = 1
self.header['nb_segment'] = [nb_segment]
self.header['signal_channels'] = sig_channels
self.header['unit_channels'] = unit_channels
self.header['event_channels'] = event_channels
# insert some annotation at some place
self._generate_minimal_annotations()
bl_annotations = self.raw_annotations['blocks'][0]
bl_annotations['rec_datetime'] = info['rec_datetime']
bl_annotations['abf_version'] = version
for seg_index in range(nb_segment):
seg_annotations = bl_annotations['segments'][seg_index]
seg_annotations['abf_version'] = version
for c in range(sig_channels.size):
anasig_an = seg_annotations['signals'][c]
anasig_an['nADCNum'] = adc_nums[c]
for c in range(event_channels.size):
ev_ann = seg_annotations['events'][c]
ev_ann['comments'] = self._ev_comments
def _source_name(self):
return self.filename
def _segment_t_start(self, block_index, seg_index):
return self._t_starts[seg_index]
def _segment_t_stop(self, block_index, seg_index):
t_stop = self._t_starts[seg_index] + \
self._raw_signals[seg_index].shape[0] / self._sampling_rate
return t_stop
def _get_signal_size(self, block_index, seg_index, channel_indexes):
shape = self._raw_signals[seg_index].shape
return shape[0]
def _get_signal_t_start(self, block_index, seg_index, channel_indexes):
return self._t_starts[seg_index]
def _get_analogsignal_chunk(self, block_index, seg_index, i_start, i_stop, channel_indexes):
if channel_indexes is None:
channel_indexes = slice(None)
raw_signals = self._raw_signals[seg_index][slice(i_start, i_stop), channel_indexes]
return raw_signals
def _event_count(self, block_index, seg_index, event_channel_index):
return self._raw_ev_timestamps.size
def _get_event_timestamps(self, block_index, seg_index, event_channel_index, t_start, t_stop):
# In ABF timstamps are not attached too any particular segment
# so each segmetn acees all event
timestamp = self._raw_ev_timestamps
labels = self._ev_labels
durations = None
if t_start is not None:
keep = timestamp >= int(t_start * self._sampling_rate)
timestamp = timestamp[keep]
labels = labels[keep]
if t_stop is not None:
keep = timestamp <= int(t_stop * self._sampling_rate)
timestamp = timestamp[keep]
labels = labels[keep]
return timestamp, durations, labels
def _rescale_event_timestamp(self, event_timestamps, dtype):
event_times = event_timestamps.astype(dtype) / self._sampling_rate
return event_times
def read_raw_protocol(self):
"""
Read the protocol waveform of the file, if present;
function works with ABF2 only. Protocols can be reconstructed
from the ABF1 header.
Returns: list of segments (one for every episode)
with list of analog signls (one for every DAC).
Author: <NAME>
"""
info = self._axon_info
if info['fFileVersionNumber'] < 2.:
raise IOError("Protocol section is only present in ABF2 files.")
nADC = info['sections']['ADCSection'][
'llNumEntries'] # Number of ADC channels
nDAC = info['sections']['DACSection'][
'llNumEntries'] # Number of DAC channels
nSam = int(info['protocol'][
'lNumSamplesPerEpisode'] / nADC) # Number of samples per episode
nEpi = info['lActualEpisodes'] # Actual number of episodes
# Make a list of segments with analog signals with just holding levels
# List of segments relates to number of episodes, as for recorded data
sigs_by_segments = []
for epiNum in range(nEpi):
# One analog signal for each DAC in segment (episode)
signals = []
for DACNum in range(nDAC):
sig = np.ones(nSam) * info['listDACInfo'][DACNum]['fDACHoldingLevel']
# If there are epoch infos for this DAC
if DACNum in info['dictEpochInfoPerDAC']:
# Save last sample index
i_last = int(nSam * 15625 / 10 ** 6)
# TODO guess for first holding
# Go over EpochInfoPerDAC and change the analog signal
# according to the epochs
epochInfo = info['dictEpochInfoPerDAC'][DACNum]
for epochNum, epoch in epochInfo.items():
i_begin = i_last
i_end = i_last + epoch['lEpochInitDuration'] + \
epoch['lEpochDurationInc'] * epiNum
dif = i_end - i_begin
sig[i_begin:i_end] = np.ones((dif)) * \
(epoch['fEpochInitLevel'] + epoch['fEpochLevelInc'] *
epiNum)
i_last += epoch['lEpochInitDuration'] + \
epoch['lEpochDurationInc'] * epiNum
signals.append(sig)
sigs_by_segments.append(signals)
sig_names = []
sig_units = []
for DACNum in range(nDAC):
name = info['listDACInfo'][DACNum]['DACChNames'].decode("utf-8")
units = info['listDACInfo'][DACNum]['DACChUnits']. \
replace(b'\xb5', b'u').decode('utf-8') # \xb5 is µ
sig_names.append(name)
sig_units.append(units)
return sigs_by_segments, sig_names, sig_units
def parse_axon_soup(filename):
"""
read the header of the file
The strategy here differs from the original script under Matlab.
In the original script for ABF2, it completes the header with
information that is located in other structures.
In ABF2 this function returns info with sub dict:
sections (ABF2)
protocol (ABF2)
listTags (ABF1&2)
listADCInfo (ABF2)
listDACInfo (ABF2)
dictEpochInfoPerDAC (ABF2)
that contains more information.
"""
with open(filename, 'rb') as fid:
f = StructFile(fid)
# version
f_file_signature = f.read(4)
if f_file_signature == b'ABF ':
header_description = headerDescriptionV1
elif f_file_signature == b'ABF2':
header_description = headerDescriptionV2
else:
return None
# construct dict
header = {}
for key, offset, fmt in header_description:
val = f.read_f(fmt, offset=offset)
if len(val) == 1:
header[key] = val[0]
else:
header[key] = np.array(val)
# correction of version number and starttime
if f_file_signature == b'ABF ':
header['lFileStartTime'] += header[
'nFileStartMillisecs'] * .001
elif f_file_signature == b'ABF2':
n = header['fFileVersionNumber']
header['fFileVersionNumber'] = n[3] + 0.1 * n[2] + \
0.01 * n[1] + 0.001 * n[0]
header['lFileStartTime'] = header['uFileStartTimeMS'] * .001
if header['fFileVersionNumber'] < 2.:
# tags
listTag = []
for i in range(header['lNumTagEntries']):
f.seek(header['lTagSectionPtr'] + i * 64)
tag = {}
for key, fmt in TagInfoDescription:
val = f.read_f(fmt)
if len(val) == 1:
tag[key] = val[0]
else:
tag[key] = np.array(val)
listTag.append(tag)
header['listTag'] = listTag
# protocol name formatting
header['sProtocolPath'] = clean_string(header['sProtocolPath'])
header['sProtocolPath'] = header['sProtocolPath']. \
replace(b'\\', b'/')
elif header['fFileVersionNumber'] >= 2.:
# in abf2 some info are in other place
# sections
sections = {}
for s, sectionName in enumerate(sectionNames):
uBlockIndex, uBytes, llNumEntries = \
f.read_f('IIl', offset=76 + s * 16)
sections[sectionName] = {}
sections[sectionName]['uBlockIndex'] = uBlockIndex
sections[sectionName]['uBytes'] = uBytes
sections[sectionName]['llNumEntries'] = llNumEntries
header['sections'] = sections
# strings sections
# hack for reading channels names and units
# this section is not very detailed and so the code
# not very robust. The idea is to remove the first
# part by find ing one of th fowoling KEY
# unfortunatly the later part contains a the file
# taht can contain by accident also one of theses keys...
f.seek(sections['StringsSection']['uBlockIndex'] * BLOCKSIZE)
big_string = f.read(sections['StringsSection']['uBytes'])
goodstart = -1
for key in [b'AXENGN', b'clampex', b'Clampex',
b'CLAMPEX', b'axoscope', b'AxoScope', b'Clampfit']:
# goodstart = big_string.lower().find(key)
goodstart = big_string.find(b'\x00'+key)
if goodstart != -1:
break
assert goodstart != -1, \
'This file does not contain clampex, axoscope or clampfit in the header'
big_string = big_string[goodstart+1:]
strings = big_string.split(b'\x00')
# ADC sections
header['listADCInfo'] = []
for i in range(sections['ADCSection']['llNumEntries']):
# read ADCInfo
f.seek(sections['ADCSection']['uBlockIndex'] *
BLOCKSIZE + sections['ADCSection']['uBytes'] * i)
ADCInfo = {}
for key, fmt in ADCInfoDescription:
val = f.read_f(fmt)
if len(val) == 1:
ADCInfo[key] = val[0]
else:
ADCInfo[key] = np.array(val)
ADCInfo['ADCChNames'] = strings[ADCInfo['lADCChannelNameIndex'] - 1]
ADCInfo['ADCChUnits'] = strings[ADCInfo['lADCUnitsIndex'] - 1]
header['listADCInfo'].append(ADCInfo)
# protocol sections
protocol = {}
f.seek(sections['ProtocolSection']['uBlockIndex'] * BLOCKSIZE)
for key, fmt in protocolInfoDescription:
val = f.read_f(fmt)
if len(val) == 1:
protocol[key] = val[0]
else:
protocol[key] = | np.array(val) | numpy.array |
import numpy as np
import copy
from io import StringIO
import sys
import h5py
from scipy.optimize import minimize
import lmfit
import emcee
from dynesty import NestedSampler
from dynesty.utils import resample_equal
from .parameters import Parameters
from .likelihood import computeRedChiSq, lnprob, ln_like, ptform
from . import plots_s5 as plots
#FINDME: Keep reload statements for easy testing
from importlib import reload
reload(plots)
def lsqfitter(lc, model, meta, log, calling_function='lsq', **kwargs):
"""Perform least-squares fit.
Parameters
----------
lc: eureka.S5_lightcurve_fitting.lightcurve.LightCurve
The lightcurve data object
model: eureka.S5_lightcurve_fitting.models.CompositeModel
The composite model to fit
meta: MetaClass
The metadata object
log: logedit.Logedit
The open log in which notes from this step can be added.
**kwargs:
Arbitrary keyword arguments.
Returns
-------
best_model: eureka.S5_lightcurve_fitting.models.CompositeModel
The composite model after fitting
Notes
-----
History:
- December 29-30, 2021 <NAME>
Updated documentation and arguments. Reduced repeated code.
Also saving covariance matrix for later estimation of sampler step size.
- January 7-22, 2022 <NAME>
Adding ability to do a single shared fit across all channels
- February 28-March 1, 2022 <NAME>
Adding scatter_ppm parameter
"""
# Group the different variable types
freenames, freepars, prior1, prior2, priortype, indep_vars = group_variables(model)
neg_lnprob = lambda theta, lc, model, prior1, prior2, priortype, freenames: -lnprob(theta, lc, model, prior1, prior2, priortype, freenames)
if not hasattr(meta, 'lsq_method'):
log.writelog('No lsq optimization method specified - using Nelder-Mead by default.')
meta.lsq_method = 'Nelder-Mead'
if not hasattr(meta, 'lsq_tol'):
log.writelog('No lsq tolerance specified - using 1e-6 by default.')
meta.lsq_tol = 1e-6
results = minimize(neg_lnprob, freepars, args=(lc, model, prior1, prior2, priortype, freenames), method=meta.lsq_method, tol=meta.lsq_tol)
if meta.run_verbose:
log.writelog("\nVerbose lsq results: {}\n".format(results))
else:
log.writelog("Success?: {}".format(results.success))
log.writelog(results.message)
# Get the best fit params
fit_params = results.x
# Save the fit ASAP
save_fit(meta, lc, calling_function, fit_params, freenames)
# Make a new model instance
best_model = copy.copy(model)
best_model.components[0].update(fit_params, freenames)
model.update(fit_params, freenames)
if "scatter_ppm" in freenames:
ind = [i for i in np.arange(len(freenames)) if freenames[i][0:11] == "scatter_ppm"]
lc.unc_fit = np.ones_like(lc.flux) * fit_params[ind[0]] * 1e-6
if len(ind)>1:
for chan in np.arange(lc.flux.size//lc.time.size):
lc.unc_fit[chan*lc.time.size:(chan+1)*lc.time.size] = fit_params[ind[chan]] * 1e-6
# Save the covariance matrix in case it's needed to estimate step size for a sampler
model_lc = model.eval()
# FINDME
# Commented out for now because op.least_squares() doesn't provide covariance matrix
# Need to compute using Jacobian matrix instead (hess_inv = (J.T J)^{-1})
# if results[1] is not None:
# residuals = (lc.flux - model_lc)
# cov_mat = results[1]*np.var(residuals)
# else:
# # Sometimes lsq will fail to converge and will return a None covariance matrix
# cov_mat = None
cov_mat = None
best_model.__setattr__('cov_mat',cov_mat)
# Plot fit
if meta.isplots_S5 >= 1:
plots.plot_fit(lc, model, meta, fitter=calling_function)
# Compute reduced chi-squared
chi2red = computeRedChiSq(lc, model, meta, freenames, log)
print('\nLSQ RESULTS:')
for freenames_i, fit_params_i in zip(freenames, fit_params):
log.writelog('{0}: {1}'.format(freenames_i, fit_params_i))
log.writelog('')
# Plot Allan plot
if meta.isplots_S5 >= 3 and calling_function=='lsq':
# This plot is only really useful if you're actually using the lsq fitter, otherwise don't make it
plots.plot_rms(lc, model, meta, fitter=calling_function)
# Plot residuals distribution
if meta.isplots_S5 >= 3 and calling_function=='lsq':
plots.plot_res_distr(lc, model, meta, fitter=calling_function)
best_model.__setattr__('chi2red',chi2red)
best_model.__setattr__('fit_params',fit_params)
return best_model
def demcfitter(lc, model, meta, log, **kwargs):
"""Perform sampling using Differential Evolution Markov Chain.
This is an empty placeholder function to be filled later.
Parameters
----------
lc: eureka.S5_lightcurve_fitting.lightcurve.LightCurve
The lightcurve data object
model: eureka.S5_lightcurve_fitting.models.CompositeModel
The composite model to fit
meta: MetaClass
The metadata object
log: logedit.Logedit
The open log in which notes from this step can be added.
**kwargs:
Arbitrary keyword arguments.
Returns
-------
best_model: eureka.S5_lightcurve_fitting.models.CompositeModel
The composite model after fitting
Notes
-----
History:
- December 29, 2021 <NAME>
Updated documentation and arguments
"""
best_model = None
return best_model
def emceefitter(lc, model, meta, log, **kwargs):
"""Perform sampling using emcee.
Parameters
----------
lc: eureka.S5_lightcurve_fitting.lightcurve.LightCurve
The lightcurve data object
model: eureka.S5_lightcurve_fitting.models.CompositeModel
The composite model to fit
meta: MetaClass
The metadata object
log: logedit.Logedit
The open log in which notes from this step can be added.
**kwargs:
Arbitrary keyword arguments.
Returns
-------
best_model: eureka.S5_lightcurve_fitting.models.CompositeModel
The composite model after fitting
Notes
-----
History:
- December 29, 2021 <NAME>
Updated documentation. Reduced repeated code.
- January 7-22, 2022 <NAME>
Adding ability to do a single shared fit across all channels
- February 23-25, 2022 <NAME>
Added log-uniform and Gaussian priors.
- February 28-March 1, 2022 <NAME>
Adding scatter_ppm parameter. Added statements to avoid some initial
state issues.
"""
if not hasattr(meta, 'lsq_first') or meta.lsq_first:
# Only call lsq fitter first if asked or lsq_first option wasn't passed (allowing backwards compatibility)
log.writelog('\nCalling lsqfitter first...')
# RUN LEAST SQUARES
lsq_sol = lsqfitter(lc, model, meta, log, calling_function='emcee_lsq', **kwargs)
# SCALE UNCERTAINTIES WITH REDUCED CHI2
if meta.rescale_err:
lc.unc *= np.sqrt(lsq_sol.chi2red)
else:
lsq_sol = None
# Group the different variable types
freenames, freepars, prior1, prior2, priortype, indep_vars = group_variables(model)
if lsq_sol is not None and lsq_sol.cov_mat is not None:
step_size = np.diag(lsq_sol.cov_mat)
ind_zero = np.where(step_size==0.)[0]
if len(ind_zero):
step_size[ind_zero] = 0.001*np.abs(freepars[ind_zero])
else:
# Sometimes the lsq fitter won't converge and will give None as the covariance matrix
# In that case, we need to establish the step size in another way. A fractional step compared
# to the value can work okay, but it may fail if the step size is larger than the bounds
# which is not uncommon for precisely known values like t0 and period
log.writelog('No covariance matrix from LSQ - falling back on a 0.1% step size')
step_size = 0.001*np.abs(freepars)
ndim = len(step_size)
nwalkers = meta.run_nwalkers
# make it robust to lsq hitting the upper or lower bound of the param space
ind_max = np.where(np.logical_and(freepars - prior2 == 0., priortype=='U'))
ind_min = np.where(np.logical_and(freepars - prior1 == 0., priortype=='U'))
pmid = (prior2+prior1)/2.
if len(ind_max[0]):
log.writelog('Warning: >=1 params hit the upper bound in the lsq fit. Setting to the middle of the interval.')
freepars[ind_max] = pmid[ind_max]
if len(ind_min[0]):
log.writelog('Warning: >=1 params hit the lower bound in the lsq fit. Setting to the middle of the interval.')
freepars[ind_min] = pmid[ind_min]
ind_zero_step = np.where(step_size==0.)
if len(ind_zero_step[0]):
log.writelog('Warning: >=1 params would have a zero step. changing to 0.001 * prior range')
step_size[ind_zero_step] = 0.001*(pmax[ind_zero_step] - pmin[ind_zero_step])
pos = np.array([freepars + np.array(step_size)*np.random.randn(ndim) for i in range(nwalkers)])
uniformprior=np.where(priortype=='U')
loguniformprior=np.where(priortype=='LU')
in_range = np.array([((prior1[uniformprior] <= ii) & (ii <= prior2[uniformprior])).all() for ii in pos[:,uniformprior]])
in_range2 = np.array([((prior1[loguniformprior] <= np.log(ii)) & (np.log(ii) <= prior2[loguniformprior])).all() for ii in pos[:,loguniformprior]])
if not (np.all(in_range))&(np.all(in_range2)):
log.writelog('Not all walkers were initialized within the priors, using a smaller proposal distribution')
pos = pos[in_range]
# Make sure the step size is well within the limits
step_size_options = np.append(step_size.reshape(-1,1), np.abs(np.append((prior2-freepars).reshape(-1,1)/10, (freepars-prior1).reshape(-1,1)/10, axis=1)), axis=1)
step_size = np.min(step_size_options, axis=1)
if pos.shape[0]==0:
remove_zeroth = True
new_nwalkers = nwalkers-len(pos)
pos = np.zeros((1,ndim))
else:
remove_zeroth = False
new_nwalkers = nwalkers-len(pos)
pos = np.append(pos, np.array([freepars + np.array(step_size)*np.random.randn(ndim) for i in range(new_nwalkers)]).reshape(-1,ndim), axis=0)
if remove_zeroth:
pos = pos[1:]
in_range = np.array([((prior1[uniformprior] <= ii) & (ii <= prior2[uniformprior])).all() for ii in pos[:,uniformprior]])
in_range2 = np.array([((prior1[loguniformprior] <= np.log(ii)) & (np.log(ii) <= prior2[loguniformprior])).all() for ii in pos[:,loguniformprior]])
if not (np.any(in_range))&(np.any(in_range2)):
raise AssertionError('Failed to initialize any walkers within the set bounds for all parameters!\n'+
'Check your stating position, decrease your step size, or increase the bounds on your parameters')
elif not (np.all(in_range))&(np.all(in_range2)):
log.writelog('Warning: Failed to initialize all walkers within the set bounds for all parameters!')
log.writelog('Using {} walkers instead of the initially requested {} walkers'.format(np.sum(in_range), nwalkers))
pos = pos[in_range+in_range2]
nwalkers = pos.shape[0]
log.writelog('Running emcee...')
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=(lc, model, prior1, prior2, priortype, freenames))
sampler.run_mcmc(pos, meta.run_nsteps, progress=True)
samples = sampler.get_chain(flat=True, discard=meta.run_nburn)
medians = []
for i in range(len(step_size)):
q = np.percentile(samples[:, i], [16, 50, 84])
medians.append(q[1])
fit_params = np.array(medians)
# Save the fit ASAP so plotting errors don't make you lose everything
save_fit(meta, lc, 'emcee', fit_params, freenames, samples)
if meta.isplots_S5 >= 5:
plots.plot_chain(sampler.get_chain(), lc, meta, freenames, fitter='emcee', full=True, nburn=meta.run_nburn)
plots.plot_chain(sampler.get_chain(discard=meta.run_nburn), lc, meta, freenames, fitter='emcee', full=False)
plots.plot_corner(samples, lc, meta, freenames, fitter='emcee')
# Make a new model instance
best_model = copy.copy(model)
best_model.components[0].update(fit_params, freenames)
model.update(fit_params, freenames)
if "scatter_ppm" in freenames:
ind = [i for i in np.arange(len(freenames)) if freenames[i][0:11] == "scatter_ppm"]
lc.unc_fit = | np.ones_like(lc.flux) | numpy.ones_like |
"""
Model Classes
<NAME>, January 21, 2020
"""
import golly as g
import model_parameters as mparam
import random as rand
import numpy as np
import copy
"""
Make a class for seeds.
"""
#
# Note: Golly locates cells by x (horizontal) and y (vertical) coordinates,
# usually given in the format (x, y). On the other hand, we are storing
# these cells in matrices, where the coordinates are usually given in the
# format [row][column], where row is a vertical coordinate and column
# is a horizontal coordinate. Although it may be somewhat confusing, we
# use [x][y] for our matrices (x = row index, y = column index). That is:
#
# self.xspan = self.cells.shape[0]
# self.yspan = self.cells.shape[1]
#
class Seed:
"""
A class for seeds.
"""
#
# __init__(self, xspan, yspan, pop_size) -- returns NULL
#
def __init__(self, xspan, yspan, pop_size):
"""
Make an empty seed (all zeros).
"""
# width of seed on the x-axis
self.xspan = xspan
# height of seed on the y-axis
self.yspan = yspan
# initial seed of zeros, to be modified later
self.cells = np.zeros((xspan, yspan), dtype=np.int)
# initial history of zeros
self.history = np.zeros(pop_size, dtype=np.float)
# initial similarities of zeros
self.similarities = np.zeros(pop_size, dtype=np.float)
# position of seed in the population array, to be modified later
self.address = 0
# count of living cells (ones) in the seed, to be modified later
self.num_living = 0
#
# randomize(self, seed_density) -- returns NULL
#
def randomize(self, seed_density):
"""
Randomly set some cells to state 1. It is assumed that the
cells in the given seed are initially all in state 0. The
result is a seed in which the fraction of cells in state 1
is approximately equal to seed_density (with some random
variation). Strictly speaking, seed_density is the
expected value of the fraction of cells in state 1.
"""
for x in range(self.xspan):
for y in range(self.yspan):
if (rand.random() <= seed_density):
self.cells[x][y] = 1
#
# shuffle(self) -- returns a shuffled copy of the given seed
#
def shuffle(self):
"""
Make a copy of the given seed and then shuffle the cells in
the seed. The new shuffled seed will have the same dimensions
and the same density of 1s and 0s as the given seed, but the
locations of the 1s and 0s will be different. (There is a very
small probability that shuffling might not result in any change,
just as shuffling a deck of cards might not change the deck.)
The density of shuffled_seed is exactly the same as the density
of the given seed.
"""
#
shuffled_seed = copy.deepcopy(self)
#
# for each location [x0][y0], randomly choose another location
# [x1][y1] and swap the values of the cells in the two locations.
#
for x0 in range(self.xspan):
for y0 in range(self.yspan):
x1 = rand.randrange(self.xspan)
y1 = rand.randrange(self.yspan)
temp = shuffled_seed.cells[x0][y0]
shuffled_seed.cells[x0][y0] = shuffled_seed.cells[x1][y1]
shuffled_seed.cells[x1][y1] = temp
#
return shuffled_seed
#
#
# red2blue(self) -- returns NULL
#
def red2blue(self):
"""
Switch cells from state 1 (red) to state 2 (blue).
"""
for x in range(self.xspan):
for y in range(self.yspan):
if (self.cells[x][y] == 1):
self.cells[x][y] = 2
#
# insert(self, g, g_xmin, g_xmax, g_ymin, g_ymax) -- returns NULL
#
def insert(self, g, g_xmin, g_xmax, g_ymin, g_ymax):
"""
Write the seed into the Golly grid at a random location
within the given bounds.
g = the Golly universe
s = a seed
"""
step = 1
g_xstart = rand.randrange(g_xmin, g_xmax - self.xspan, step)
g_ystart = rand.randrange(g_ymin, g_ymax - self.yspan, step)
for s_x in range(self.xspan):
for s_y in range(self.yspan):
g_x = g_xstart + s_x
g_y = g_ystart + s_y
s_state = self.cells[s_x][s_y]
g.setcell(g_x, g_y, s_state)
#
# random_rotate(self) -- returns new_seed
#
def random_rotate(self):
"""
Randomly rotate and flip the given seed and return a new seed.
"""
rotation = rand.randrange(0, 4, 1) # 0, 1, 2, 3
flip = rand.randrange(0, 2, 1) # 0, 1
new_seed = copy.deepcopy(self)
# rotate by 90 degrees * rotation (0, 90, 180 270)
new_seed.cells = np.rot90(new_seed.cells, rotation)
if (flip == 1):
# flip upside down
new_seed.cells = np.flipud(new_seed.cells)
new_seed.xspan = new_seed.cells.shape[0]
new_seed.yspan = new_seed.cells.shape[1]
return new_seed
#
# fitness(self) -- returns fitness
#
def fitness(self):
"""
Calculate a seed's fitness from its history.
"""
history = self.history
return sum(history) / len(history)
#
# mutate(self, prob_grow, prob_flip, prob_shrink, seed_density, mutation_rate)
# -- returns mutant
#
def mutate(self, prob_grow, prob_flip, prob_shrink, seed_density, mutation_rate):
"""
Make a copy of self and return a mutated version of the copy.
"""
#
mutant = copy.deepcopy(self)
#
# prob_grow = probability of invoking grow()
# prob_flip = probability of invoking flip_bits()
# prob_shrink = probability of invoking shrink()
# seed_density = target density of ones in an initial random seed
# mutation_rate = probability of flipping an individual bit
#
assert prob_grow + prob_flip + prob_shrink == 1.0
#
uniform_random = rand.uniform(0, 1)
#
if (uniform_random < prob_grow):
# this will be invoked with a probability of prob_grow
mutant.grow(seed_density)
elif (uniform_random < (prob_grow + prob_flip)):
# this will be invoked with a probability of prob_flip
mutant.flip_bits(mutation_rate)
else:
# this will be invoked with a probability of prob_shrink
mutant.shrink()
# erase the parent's history from the child
pop_size = len(self.history)
mutant.history = np.zeros(pop_size, dtype=np.float)
return mutant
#
# flip_bits(self, mutation_rate) -- returns NULL
#
def flip_bits(self, mutation_rate):
"""
Mutate a seed by randomly flipping bits. Assumes the seed
contains 0s and 1s.
"""
num_mutations = 0
for s_x in range(self.xspan):
for s_y in range(self.yspan):
if (rand.uniform(0, 1) < mutation_rate):
# flip cell value: 0 becomes 1 and 1 becomes 0
self.cells[s_x][s_y] = 1 - self.cells[s_x][s_y]
# count the number of mutations so far
num_mutations = num_mutations + 1
# force a minimum of one mutation -- there is no value
# in having duplicates in the population
if (num_mutations == 0):
s_x = rand.randrange(self.xspan)
s_y = rand.randrange(self.yspan)
self.cells[s_x][s_y] = 1 - self.cells[s_x][s_y]
#
# shrink(self) -- returns NULL
#
def shrink(self):
"""
Randomly remove rows or columns from a seed.
"""
# first we need to decide how to shrink
choice = rand.choice([0, 1, 2, 3])
# now do it
if ((choice == 0) and (self.xspan > mparam.min_s_xspan)):
# delete first row
self.cells = np.delete(self.cells, (0), axis=0)
elif ((choice == 1) and (self.xspan > mparam.min_s_xspan)):
# delete last row
self.cells = np.delete(self.cells, (-1), axis=0)
elif ((choice == 2) and (self.yspan > mparam.min_s_yspan)):
# delete first column
self.cells = np.delete(self.cells, (0), axis=1)
elif ((choice == 3) and (self.yspan > mparam.min_s_yspan)):
# delete last column
self.cells = np.delete(self.cells, (-1), axis=1)
# now let's update xspan and yspan to the new size
self.xspan = self.cells.shape[0]
self.yspan = self.cells.shape[1]
#
#
# grow(self, seed_density) -- returns NULL
#
def grow(self, seed_density):
"""
Randomly add or remove rows or columns from a seed. Assumes
the seed contains 0s and 1s.
"""
# - first we need to decide how to grow
choice = rand.choice([0, 1, 2, 3])
# - now do it
if (choice == 0):
# add a new row before the first row
self.cells = np.vstack([np.zeros(self.yspan, dtype=np.int), self.cells])
# initialize the new row with a density of approximately seed_density
for s_y in range(self.yspan):
if (rand.uniform(0, 1) < seed_density):
self.cells[0][s_y] = 1
#
elif (choice == 1):
# add a new row after the last row
self.cells = np.vstack([self.cells, np.zeros(self.yspan, dtype=np.int)])
# initialize the new row with a density of approximately seed_density
for s_y in range(self.yspan):
if (rand.uniform(0, 1) < seed_density):
self.cells[-1][s_y] = 1
#
elif (choice == 2):
# add a new column before the first column
self.cells = np.hstack([np.zeros((self.xspan, 1), dtype=np.int), self.cells])
# initialize the new column with a density of approximately seed_density
for s_x in range(self.xspan):
if (rand.uniform(0, 1) < seed_density):
self.cells[s_x][0] = 1
#
elif (choice == 3):
# add a new column after the last column
self.cells = np.hstack([self.cells, | np.zeros((self.xspan, 1), dtype=np.int) | numpy.zeros |
import numpy as np
import numba as nb
import numpy.linalg as la
from math import pi # pylint: disable=no-name-in-module
@nb.jit(nb.float32[:, :](
nb.float32
), nopython=True, nogil=True )
def rot2d(alpha):
""" get a 2d rotation matrix
:param alpha: in radians
:returns rotation matrix
"""
R = np.array([
[np.cos(alpha), -np.sin(alpha)],
[np.sin(alpha), np.cos(alpha)]
], np.float32)
return np.ascontiguousarray(R)
@nb.jit(nb.float32[:, ](
nb.float32[:, ], nb.float32[:, ]
), nopython=True, nogil=True)
def get_2d_normal(left, right):
""" gets the 2d normal
:param left: [x, y]
:param right: [x, y]
"""
lr = left - right
lr = lr/la.norm(lr)
R = rot2d(-pi/2)
R = np.ascontiguousarray(R)
n = R @ lr
return n
@nb.jit(nb.float32(
nb.float32[:, ]
), nopython=True, nogil=True)
def get_2d_rotation_for_upward(p):
""" gets alpha to rotate point p to [0, 1]
:param p: [x, y]
:return angle in radians
"""
p = p / la.norm(p) # normalize (just to be sure)
up = np.array([0, 1], np.float32)
alpha = np.arccos(up @ p)
if p[0] < 0:
alpha *= -1
return alpha
# === 3 dimensions ===
def distances(human1, human2):
""" calculate distances between two humans for each joint
:param human1: [ (x, y, z), ... ]
:param human2: [ (x, y, z), ... ]
"""
J = len(human1)
assert len(human2) == J
return _distances(human1, human2)
@nb.jit(nb.float32[:, ](
nb.float32[:, :], nb.float32[:, :]
), nopython=True, nogil=True)
def _distances(human1, human2):
""" calculate distances between two humans for each joint
:param human1: [ (x, y, z), ... ]
:param human2: [ (x, y, z), ... ]
"""
J = len(human1)
results = np.empty((J, ), np.float32)
for jid in range(J):
a = human1[jid]
b = human2[jid]
results[jid] = la.norm(a - b)
return results
@nb.jit(nb.float32[:, ](
nb.float32[:, ], nb.float32[:, ], nb.int32[:, ]
), nopython=True, nogil=True)
def get_3d_normal_on_plane(left, right, plane):
""" calculate 3d normal on defined plane
:param left: (x, y, z)
:param right: (x, y, z)
:return
"""
oo_plane_dim = -1 # find out-of-plane dimension
for i in range(3):
i_is_not_in_plane = True
for j in plane:
if i == j:
i_is_not_in_plane = False
if i_is_not_in_plane:
oo_plane_dim = i
break
oo_mean = (left[oo_plane_dim] + right[oo_plane_dim]) / 2
left = left[plane]
right = right[plane]
n_2d = get_2d_normal(left, right)
result = np.empty((3, ), np.float32)
result[plane[0]] = n_2d[0]
result[plane[1]] = n_2d[1]
result[oo_plane_dim] = oo_mean
return result
@nb.jit(nb.float32[:, :](
nb.float32, nb.float32, nb.float32
), nopython=True, nogil=True)
def rot3d(a, b, c):
"""
"""
Rx = np.array([
[1., 0., 0.],
[0, np.cos(a), -np.sin(a)],
[0, np.sin(a), | np.cos(a) | numpy.cos |
import sys
import numpy as np
import math
import random
import ast
SAFE_FX = {
'exp': math.exp,
'input': input,
'int': int,
'float': float,
'round': round,
'sqrt': math.sqrt,
'floor': math.floor,
'ceil': math.ceil,
'abs': abs,
'all': all,
'any': any,
'bool': bool,
'complex': complex,
'divmod': divmod,
'max': max,
'min': min,
'pow': pow,
'ord': ord,
'sum': sum,
'tuple': tuple,
'copysign': math.copysign,
'fabs': math.fabs,
'factorial': math.factorial,
'fmod': math.fmod,
'frexp': math.frexp,
'ldexp': math.ldexp,
'trunc': math.trunc,
'log': math.log,
'acos': math.acos,
'atan': math.atan,
'asin': math.asin,
'sin': math.sin,
'cos': math.cos,
'tan': math.tan,
'atan2': math.atan2,
'hypot': math.hypot,
'degrees': math.degrees,
'radians': math.radians,
'acosh': math.acosh,
'asinh': math.asinh,
'atanh': math.atanh,
'cosh': math.cosh,
'sinh': math.sinh,
'tanh': math.tanh,
}
SAFE_NODES = set(
(ast.Expression,
ast.Num,
ast.Str,
ast.Call,
ast.Name,
ast.Load,
ast.BinOp,
ast.Add,
ast.Sub,
ast.Mult,
ast.Div,
ast.FloorDiv,
ast.Mod,
ast.Pow,
ast.LShift,
ast.RShift,
ast.BitOr,
ast.BitXor,
ast.BitAnd,
ast.keyword,
ast.Tuple,
ast.UnaryOp,
ast.USub,
ast.Lambda,
ast.arguments,
ast.arg,
ast.IfExp,
ast.Compare,
ast.Eq,
ast.BoolOp,
ast.And,
ast.Or,
ast.GtE,
ast.Not,
ast.LtE,
ast.Lt,
ast.Gt,
ast.Invert,
)
)
class CleansingNodeVisitor(ast.NodeVisitor):
def __init__(self,nodelist,fxlist):
self.nodelist = nodelist
self.fxlist = fxlist
def generic_visit(self, node):
if type(node) not in self.nodelist:
raise Exception("%s not in SAFE_NODES" % type(node))
super(CleansingNodeVisitor, self).generic_visit(node)
def visit_Call(self, call):
try:
if call.func.id not in self.fxlist:
raise Exception("Unknown function: %s" % call.func.id)
except AttributeError:
print(call)
def safe_eval(s):
tree = ast.parse(s, mode='eval')
cnv = CleansingNodeVisitor(SAFE_NODES,SAFE_FX)
cnv.visit(tree)
compiled = compile(tree, s, "eval")
return(eval(compiled, SAFE_FX))
def recursive_eval(s):
safe_fx = SAFE_FX
safe_fx['eval'] = safe_eval
tree = ast.parse(s, mode='eval')
cnv = CleansingNodeVisitor(SAFE_NODES,safe_fx)
cnv.visit(tree)
compiled = compile(tree, s, "eval")
return(eval(compiled, safe_fx))
class Photon:
class Halt(RuntimeError):
pass
@staticmethod
def normalized(a):
if any(a!=0):
return a/np.linalg.norm(a)
return a
def doprint(self):
if self.verbose:
print(self.printbuffer)
self.printbuffer = ""
def vprint(self,text):
self.printbuffer += text+"\n"
def maybeclearbuffer(self):
if not self.showmiss:
self.printbuffer = ""
def __init__(self,listing,filename,verbose=False,showmiss=False):
self.verbose = verbose
self.showmiss = showmiss
self.printbuffer = ""
parts = listing.splitlines()
if "[verbose]" in parts[0]:
if "[showmiss]" in parts[0]:
self.showmiss = True
self.verbose = True
parts.pop(0)
start = parts[0]
x,y,z,vx,vy,vz = recursive_eval(start)
self.start=np.array([x,y,z])
self.grad=np.array([vx,vy,vz])
self.center=np.array([round(x) for x in self.start])
expr="lambda x,y,z:"+"".join(parts[1:])
self.fx = recursive_eval(expr)
self.t = 0
def curpoint(self):
return self.start+self.t*self.grad
def update_t(self):
next_t=np.inf
# if round(self.curpoint()[1]+4.5)==0:
# import pdb;pdb.set_trace()
for i,dim in enumerate(self.curpoint()):
if self.grad[i]==0:
continue
if abs(dim-0.5-round(dim-0.5))<0.000000000001:
nextboundary=dim+np.sign(self.grad)[i]
elif self.grad[i]<0:
nextboundary=math.floor(dim+0.5)-0.5
else:
nextboundary=math.ceil(dim+0.5)-0.5
candidate_t = np.roots([self.grad[i],self.start[i]-nextboundary])[0]
if next_t>candidate_t>self.t:
next_t=candidate_t
self.t=next_t
def identify_cube(self):
candidates=[[],[],[]]
for i,dim in enumerate(self.curpoint()):
if abs(dim-0.5-round(dim-0.5))<0.00000000001:
candidates[i]+=[round(dim-0.5),round(dim+0.5)]
else:
candidates[i]+=[round(dim)]
center=[]
dist=np.inf
for x in candidates[0]:
for y in candidates[1]:
for z in candidates[2]:
point= | np.array([x,y,z]) | numpy.array |
import tensorflow as tf
import numpy as np
import math
import matplotlib.pyplot as plt
def corrupt(x):
r = tf.add(x, tf.cast(tf.random_uniform(shape=tf.shape(x),minval=0,maxval=0.1,dtype=tf.float32), tf.float32))
# r = tf.multiply(x,tf.cast(tf.random_uniform(shape=tf.shape(x), minval=0.5, maxval=1.5, dtype=tf.float32), tf.float32))
return r
def kl_divergence(p, p_hat):
# return tf.reduce_mean(p * tf.log(tf.abs(p)) - p * tf.log(tf.abs(p_hat)) + (1 - p) * tf.log(tf.abs(1 - p)) - (1 - p) * tf.log(tf.abs(1 - p_hat)))
return tf.reduce_mean(p * tf.log(tf.abs(p)) - p * tf.log(tf.abs(p_hat)) + (1 - p) * tf.log(1 - p) - (1 - p) * tf.log(1 - p_hat))
def autoencoder(dimensions=[784, 512, 256, 64]):
x = tf.placeholder(tf.float32, [None, dimensions[0]], name='x')
corrupt_prob = tf.placeholder(tf.float32, [1])
current_input = corrupt(x) * corrupt_prob + x * (1 - corrupt_prob)
noise_input = current_input
# Build the encoder
print("========= encoder begin ==========")
encoder = []
for layer_i, n_output in enumerate(dimensions[1:]):
n_input = int(current_input.get_shape()[1])
print("encoder : layer_i - n_output - n_input",layer_i,n_output,n_input)
W = tf.Variable(tf.random_uniform([n_input, n_output],-1.0 / math.sqrt(n_input),1.0 / math.sqrt(n_input)))
b = tf.Variable(tf.zeros([n_output]))
encoder.append(W)
output = tf.nn.tanh(tf.matmul(current_input, W) + b)
current_input = output
print("========= encoder finish =========")
# latent representation
encoder_out = current_input
print(encoder_out.shape)
encoder.reverse()
# Build the decoder using the same weights
print("========= decoder begin ==========")
for layer_i, n_output in enumerate(dimensions[:-1][::-1]):
print("decoder : layer_i - n_output", layer_i, n_output)
W = tf.transpose(encoder[layer_i]) # transpose of the weights
b = tf.Variable(tf.zeros([n_output]))
output = tf.nn.tanh(tf.matmul(current_input, W) + b)
current_input = output
print("========= decoder finish =========")
# now have the reconstruction through the network
reconstruction = current_input
# kl = tf.reduce_mean(-tf.nn.softmax_cross_entropy_with_logits(logits=z, labels=z/0.01))
p_hat = tf.reduce_mean(encoder_out,0)
p = np.repeat([-0.05], 200).astype(np.float32)
dummy = np.repeat([1], 200).astype(np.float32)
kl = kl_divergence(p_hat,p)
cost = tf.reduce_mean(tf.square(reconstruction - x)) + 0.01*kl
# cost = 0.5 * tf.reduce_sum(tf.square(y - x))
return {
'x': x,
'encoder_out': encoder_out,
'reconstruction': reconstruction,
'corrupt_prob': corrupt_prob,
'cost': cost,
'noise_input' : noise_input,
'kl' : kl
}
def train_DOA():
from get_csv_data import HandleData
import csv
################ TEST DATA ################
data = HandleData(total_data=880, data_per_angle=110, num_angles=8)
antenna_data, label_data = data.get_synthatic_data(test_data=False)
antenna_data_mean = np.mean(antenna_data, axis=0)
###########################################
################ learning parameters ######
learning_rate = 0.001
batch_size = 20
n_epochs = 1000
###########################################
################ AutoEncoder ##############
ae = autoencoder(dimensions=[4, 200])
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(ae['cost'])
###########################################
################ Training #################
sess = tf.Session()
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
########### restore ###########
# saver_restore = tf.train.import_meta_graph('./DAE_save/DenoisingAE_save_noise_add.meta')
# saver_restore = tf.train.import_meta_graph('DenoisingAE_save_noise_multiply.meta')
# saver_restore.restore(sess, tf.train.latest_checkpoint('./DAE_save/'))
###############################
train=0
for epoch_i in range(n_epochs):
for batch_i in range(data.total_data//batch_size):
batch_xs, _ = data.next_batch(batch_size)
train = | np.array([img - antenna_data_mean for img in batch_xs]) | numpy.array |
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
#Created by: <NAME>
#BE department, University of Pennsylvania
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
import numpy as np
import nibabel as nib
import os
from tqdm import tqdm
from functools import partial
import matplotlib.pyplot as plt
from multiprocessing import Pool
from scipy.spatial.distance import directed_hausdorff
import data_utils.surface_distance as surface_distance
def estimate_weights_mfb(labels):
labels = labels.astype(np.float64)
class_weights = np.zeros_like(labels)
unique, counts = np.unique(labels, return_counts=True)
median_freq = | np.median(counts) | numpy.median |
import tensorflow as tf
import numpy as np
import copy
class PolicyWithValue:
def __init__(self, observation_space, action_space, name, temp=0.1):
self.ob_space = observation_space
self.act_space = action_space
with tf.variable_scope(name):
self.obs = tf.placeholder(dtype=tf.float32, shape=[None] + list(self.ob_space), name='observation')
with tf.variable_scope('policy_net'):
layer_1 = tf.layers.dense(inputs=self.obs, units=20, activation=tf.tanh)
layer_2 = tf.layers.dense(inputs=layer_1, units=20, activation=tf.tanh)
layer_3 = tf.layers.dense(inputs=layer_2, units=self.act_space, activation=tf.tanh)
self.act_probs = tf.layers.dense(inputs=tf.divide(layer_3, temp), units=self.act_space,
activation=tf.nn.softmax)
with tf.variable_scope('value_net'):
layer_1 = tf.layers.dense(inputs=self.obs, units=20, activation=tf.tanh)
layer_2 = tf.layers.dense(inputs=layer_1, units=20, activation=tf.tanh)
self.v_preds = tf.layers.dense(inputs=layer_2, units=1, activation=None)
# for stochastic
self.act_stochastic = tf.multinomial(tf.log(self.act_probs), num_samples=1)
self.act_stochastic = tf.reshape(self.act_stochastic, shape=[-1])
# for deterministic
self.act_deterministic = tf.argmax(self.act_probs, axis=1)
self.scope = tf.get_variable_scope().name
def _get_action(self, sess, obs, stochastic=True):
if stochastic:
return sess.run([self.act_stochastic, self.v_preds], feed_dict={self.obs: obs})
else:
return sess.run([self.act_deterministic, self.v_preds], feed_dict={self.obs: obs})
def _get_trainable_variables(self):
return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.scope)
class PPOAgent:
def __init__(self, policy, old_policy, horizon, learning_rate, epochs,
batch_size, gamma, lmbd, clip_value, value_coeff, entropy_coeff):
self.sess = tf.Session()
self.writer = tf.summary.FileWriter('./log/train', self.sess.graph)
self.policy = policy
self.old_policy = old_policy
self.horizon = horizon
self.batch_size = batch_size
self.learning_rate = learning_rate
self.epochs = epochs
self.gamma = gamma
self.lmbd = lmbd
print('horizon : {}'.format(self.horizon))
print('batch_size : {}'.format(self.batch_size))
print('epochs : {}'.format(self.epochs))
print('learning_rate : {}'.format(self.learning_rate))
print('gamma : {}'.format(self.gamma))
print('lambda : {}'.format(self.lmbd))
self.iteration = 0
self.list_observations = []
self.list_actions = []
self.list_v_preds = []
self.list_rewards = []
pi_trainable = self.policy._get_trainable_variables()
old_pi_trainable = self.old_policy._get_trainable_variables()
# assignment operation to update old_policy with policy
with tf.variable_scope('assign_op'):
self.assign_ops = []
for v_old, v in zip(old_pi_trainable, pi_trainable):
self.assign_ops.append(tf.assign(v_old, v))
# inputs for train operation
with tf.variable_scope('train_input'):
self.actions = tf.placeholder(dtype=tf.int32, shape=[None], name='actions')
self.rewards = tf.placeholder(dtype=tf.float32, shape=[None], name='rewards')
self.v_preds_next = tf.placeholder(dtype=tf.float32, shape=[None], name='v_preds_next')
self.gaes = tf.placeholder(dtype=tf.float32, shape=[None], name='gaes')
act_probs = self.policy.act_probs
act_probs_old = self.old_policy.act_probs
# probability of actions chosen with the policy
act_probs = act_probs * tf.one_hot(indices=self.actions, depth=act_probs.shape[1])
act_probs = tf.reduce_sum(act_probs, axis=1)
# probabilities of actions which agent took with old policy
act_probs_old = act_probs_old * tf.one_hot(indices=self.actions, depth=act_probs_old.shape[1])
act_probs_old = tf.reduce_sum(act_probs_old, axis=1)
# clipped surrogate objective (7)
# TODO adaptive KL penalty coefficient can be added (8)
with tf.variable_scope('loss/clip'):
ratios = tf.exp(tf.log(act_probs) - tf.log(act_probs_old))
clipped_ratios = tf.clip_by_value(ratios, clip_value_min=1 - clip_value, clip_value_max=1 + clip_value)
loss_clip = tf.minimum(tf.multiply(self.gaes, ratios), tf.multiply(self.gaes, clipped_ratios))
loss_clip = tf.reduce_mean(loss_clip)
tf.summary.scalar('loss_clip', loss_clip)
# squared difference between value (9)
with tf.variable_scope('loss/value'):
v_preds = self.policy.v_preds
loss_v = tf.squared_difference(self.rewards + self.gamma * self.v_preds_next, v_preds)
loss_v = tf.reduce_mean(loss_v)
tf.summary.scalar('loss_value', loss_v)
# entropy bonus (9)
with tf.variable_scope('loss/entropy'):
entropy = -tf.reduce_sum(self.policy.act_probs *
tf.log(tf.clip_by_value(self.policy.act_probs, 1e-10, 1.0)), axis=1)
entropy = tf.reduce_mean(entropy, axis=0)
tf.summary.scalar('entropy', entropy)
# loss (9)
with tf.variable_scope('loss'):
# c_1 = value_coeff, c_2 = entropy_coeff
# loss = (clipped loss) - c_1 * (value loss) + c_2 * (entropy bonus)
loss = loss_clip - value_coeff * loss_v + entropy_coeff * entropy
# loss : up
# clipped loss : up
# value loss : down
# entropy : up
tf.summary.scalar('loss', loss)
# gradient ascent using adam optimizer
loss = -loss
optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate, epsilon=1e-5)
self.train_op = optimizer.minimize(loss, var_list=pi_trainable)
self.sess.run(tf.global_variables_initializer())
self.merge_op = tf.summary.merge_all()
def action(self, obs, stochastic=True):
obs = np.stack([obs]).astype(dtype=np.float32)
act, v_pred = self.policy._get_action(sess=self.sess, obs=obs, stochastic=stochastic)
act = np.asscalar(act)
v_pred = np.asscalar(v_pred)
self.list_observations.append(obs)
self.list_actions.append(act)
self.list_v_preds.append(v_pred)
return act, v_pred
def observe_and_learn(self, reward, terminal, score=False):
self.list_rewards.append(reward)
if terminal == False:
# if have not reached end of the episode yet, wait
return
else:
# if have reached end of the episode, train
# make v_preds_next from v_preds
self.list_v_preds_next = self.list_v_preds[1:] + [0]
# get generalized advantage estimations
self.list_gaes = self._get_gaes(self.list_rewards,
self.list_v_preds,
self.list_v_preds_next)
# make list_* into numpy array to feed to placeholders
np_observations = np.reshape(self.list_observations, newshape=[-1] + list(self.policy.ob_space))
np_actions = np.array(self.list_actions).astype(dtype=np.int32)
np_rewards = np.array(self.list_rewards).astype(dtype=np.float32)
np_v_preds_next = np.array(self.list_v_preds_next).astype(dtype=np.float32)
np_gaes = np.array(self.list_gaes).astype(dtype=np.float32)
np_gaes = (np_gaes - np_gaes.mean()) / np_gaes.std()
input_samples = [np_observations, np_actions, np_rewards, np_v_preds_next, np_gaes]
# update old policy with current policy
self._update_old_policy()
# sample horizon
if self.horizon != -1:
horizon_indices = np.random.randint(low=0, high=np_observations.shape[0], size=self.horizon)
horizon_samples = [ | np.take(a=input_sample, indices=horizon_indices, axis=0) | numpy.take |
import numpy as np
from astropy.io import fits
import os
import re
import glob
import copy
from vorbin.voronoi_2d_binning import voronoi_2d_binning
import matplotlib.pyplot as plt
from scipy import interpolate, stats, optimize
import gc
from matplotlib import gridspec, animation
try:
import tqdm
except:
tqdm = None
from joblib import Parallel, delayed
plt.style.use('dark_background')
def read_muse_ifu(fits_file,z=0):
"""
Read in a MUSE-formatted IFU cube
:param fits_file: str
File path to the FITS IFU cube
:param z: float, optional
The redshift of the spectrum, since MUSE cubes often do not provide this information
:return nx: int
x-dimension (horizontal axis) of the cube
:return ny: int
y-dimension (vertical axis) of the cube
:return nz: int
z-dimension (wavelength axis) of the cube
:return ra: float
Right ascension
:return dec: float
Declination
:return museid: str
The MUSE ID of the observation
:return wave: array
1-D Wavelength array with dimension (nz,)
:return flux: array
3-D flux array with dimensions (nz, ny, nx)
:return ivar: array
3-D inverse variance array with dimensions (nz, ny, nx)
:return specres: array
1-D spectral resolution ("R") array with dimension (nz,)
:return mask: array
3-D mask array with dimensions (nz, ny, nx)
:return object_name: str
The name of the object, if provided in the FITS header
"""
# Load the file
# https://www.eso.org/rm/api/v1/public/releaseDescriptions/78
with fits.open(fits_file) as hdu:
# First axis is wavelength, then 2nd and 3rd are image x/y
try:
nx, ny, nz = hdu[1].header['NAXIS1'], hdu[1].header['NAXIS2'], hdu[1].header['NAXIS3']
ra = hdu[0].header['RA']
dec = hdu[0].header['DEC']
except:
# ra = hdu[0].header['ESO ADA GUID RA']
# dec = hdu[0].header['ESO ADA GUID DEC']
nx, ny, nz = hdu[0].header['NAXIS1'], hdu[0].header['NAXIS2'], hdu[0].header['NAXIS3']
ra = hdu[0].header['CRVAL1']
dec = hdu[0].header['CRVAL2']
primary = hdu[0].header
try:
object_name = primary['OBJECT']
except:
object_name = None
i = 1
museid = []
while True:
try:
museid.append(primary['OBID'+str(i)])
i += 1
except:
break
# Get unit of flux, assuming 10^-x erg/s/cm2/Angstrom/spaxel
# unit = hdu[0].header['BUNIT']
# power = int(re.search('10\*\*(\(?)(.+?)(\))?\s', unit).group(2))
# scale = 10**(-17) / 10**power
try:
# 3d rectified cube in units of 10(-20) erg/s/cm2/Angstrom/spaxel [NX x NY x NWAVE], convert to 10(-17)
flux = hdu[1].data
# Variance (sigma2) for the above [NX x NY x NWAVE], convert to 10(-17)
var = hdu[2].data
# Wavelength vector must be reconstructed, convert from nm to angstroms
header = hdu[1].header
wave = np.array(header['CRVAL3'] + header['CD3_3']*np.arange(header['NAXIS3']))
# wave = np.linspace(primary['WAVELMIN'], primary['WAVELMAX'], nz) * 10
# Median spectral resolution at (wavelmin + wavelmax)/2
# dlambda = cwave / primary['SPEC_RES']
# specres = wave / dlambda
# Default behavior for MUSE data cubes using https://www.aanda.org/articles/aa/pdf/2017/12/aa30833-17.pdf equation 7
dlambda = 5.835e-8 * wave**2 - 9.080e-4 * wave + 5.983
specres = wave / dlambda
# Scale by the measured spec_res at the central wavelength
spec_cent = primary['SPEC_RES']
cwave = np.nanmedian(wave)
c_dlambda = 5.835e-8 * cwave**2 - 9.080e-4 * cwave + 5.983
scale = 1 + (spec_cent - cwave/c_dlambda) / spec_cent
specres *= scale
except:
flux = hdu[0].data
var = (0.1 * flux)**2
wave = np.arange(primary['CRVAL3'], primary['CRVAL3']+primary['CDELT3']*(nz-1), primary['CDELT3'])
# specres = wave / 2.6
dlambda = 5.835e-8 * wave**2 - 9.080e-4 * wave + 5.983
specres = wave / dlambda
ivar = 1/var
mask = np.zeros_like(flux)
return nx,ny,nz,ra,dec,museid,wave,flux,ivar,specres,mask,object_name
def read_manga_ifu(fits_file,z=0):
"""
Read in a MANGA-formatted IFU cube
:param fits_file: str
File path to the FITS IFU cube
:param z: float, optional
The redshift of the spectrum, this is unused.
:return nx: int
x-dimension (horizontal axis) of the cube
:return ny: int
y-dimension (vertical axis) of the cube
:return nz: int
z-dimension (wavelength axis) of the cube
:return ra: float
Right ascension
:return dec: float
Declination
:return mangaid: str
The MANGA ID of the observation
:return wave: array
1-D Wavelength array with dimension (nz,)
:return flux: array
3-D flux array with dimensions (nz, ny, nx)
:return ivar: array
3-D inverse variance array with dimensions (nz, ny, nx)
:return specres: array
1-D spectral resolution ("R") array with dimension (nz,)
:return mask: array
3-D mask array with dimensions (nz, ny, nx)
:return None:
To mirror the output length of read_muse_ifu
"""
# Load the file
# https://data.sdss.org/datamodel/files/MANGA_SPECTRO_REDUX/DRPVER/PLATE4/stack/manga-CUBE.html#hdu1
with fits.open(fits_file) as hdu:
# First axis is wavelength, then 2nd and 3rd are image x/y
nx, ny, nz = hdu[1].header['NAXIS1'], hdu[1].header['NAXIS2'], hdu[1].header['NAXIS3']
try:
ra = hdu[0].header['OBJRA']
dec = hdu[0].header['OBJDEC']
except:
ra = hdu[1].header['IFURA']
dec = hdu[1].header['IFUDEC']
primary = hdu[0].header
ebv = primary['EBVGAL']
mangaid = primary['MANGAID']
# 3d rectified cube in units of 10(-17) erg/s/cm2/Angstrom/spaxel [NX x NY x NWAVE]
flux = hdu[1].data
# Inverse variance (1/sigma2) for the above [NX x NY x NWAVE]
ivar = hdu[2].data
# Pixel mask [NX x NY x NWAVE]. Defined values are set in sdssMaskbits.par
mask = hdu[3].data
# Wavelength vector [NWAVE]
wave = hdu[6].data
# Median spectral resolution as a function of wavelength for the fibers in this IFU [NWAVE]
specres = hdu[7].data
# ebv = hdu[0].header['EBVGAL']
return nx,ny,nz,ra,dec,mangaid,wave,flux,ivar,specres,mask,None
def prepare_ifu(fits_file,z,format,aperture=None,voronoi_binning=True,fixed_binning=False,targetsn=None,cvt=True,voronoi_plot=True,quiet=True,wvt=False,
maxbins=800,snr_threshold=0.5,fixed_bin_size=10,use_and_mask=True,nx=None,ny=None,nz=None,ra=None,dec=None,dataid=None,wave=None,flux=None,ivar=None,
specres=None,mask=None,objname=None):
"""
Deconstruct an IFU cube into individual spaxel files for fitting with BADASS
:param fits_file: str
The file path to the IFU FITS file; if format == 'user', this field may be left as None, '', or any other filler value
:param z: float
The redshift of the spectrum
:param aperture: array, optional
The lower-left and upper-right corners of a square aperture, formatted as [y0, y1, x0, x1]
:param voronoi_binning: bool
Whether or not to bin spaxels using the voronoi method (grouping to read a certain SNR threshold). Default True.
Mutually exclusive with fixed_binning.
:param fixed_binning: bool
Whether or not to bin spaxels using a fixed size. Default False.
Mutually exclusive with voronoi_binning.
:param targetsn: float, optional
The target SNR to bin by, if using voronoi binning.
:param cvt: bool
Vorbin CVT option (see the vorbin package docs). Default True.
:param voronoi_plot: bool
Whether or not to plot the voronoi bin structure. Default True.
:param quiet: bool
Vorbin quiet option (see the vorbin package docs). Default True.
:param wvt: bool
Vorbin wvt option (see the vorbin package docs). Default False.
:param maxbins: int
If no target SNR is provided for voronoi binning, maxbins may be specified, which will automatically calculate
the target SNR required to reach the number of bins desired. Default 800.
:param snr_threshold: float
Minimum SNR threshold, below which spaxel data will be removed and not fit.
:param fixed_bin_size: int
If using fixed binning, this is the side length of the square bins, in units of spaxels.
:param use_and_mask: bool
Whether or not to save the and_mask data.
:param nx: int, optional
x-dimension of the cube, only required if format == 'user'
:param ny: int, optional
y-dimension of the cube, only required if format == 'user'
:param nz: int, optional
z-dimension of the cube, only required if format == 'user'
:param ra: float, optional
Right ascension of the cube, only required if format == 'user'
:param dec: float, optional
Declination of the cube, only required if format == 'user'
:param dataid: str, optional
ID of the cube, only required if format == 'user'
:param wave: array, optional
1-D wavelength array with shape (nz,), only required if format == 'user'
:param flux: array, optional
3-D flux array with shape (nz, ny, nx), only required if format == 'user'
:param ivar: array, optional
3-D inverse variance array with shape (nz, ny, nx), only required if format == 'user'
:param specres: array, optional
1-D spectral resolution ("R") array with shape (nz,), only required if format == 'user'
:param mask: array, optional
3-D mask array with shape (nz, ny, nx), only required if format == 'user'
:param objname: str, optional
The name of the object, only required if format == 'user'
:return wave: array
1-D wavelength array with shape (nz,)
:return flux: array
3-D masked flux array with shape (nz, ny, nx)
:return ivar: array
3-D masked inverse variance array with shape (nz, ny, nx)
:return mask: array
3-D mask array with shape (nz, ny, nx)
:return fwhm_res: array
1-D FWHM resolution array with shape (nz,)
:return binnum: array
Bin number array that specifies which spaxels are in each bin (see the vorbin docs)
:return npixels: array
Number of spaxels in each bin (see the vorbin docs)
:return xpixbin: array
The x positions of spaxels in each bin
:return ypixbin: array
The y positions of spaxels in each bin
:return z: float
The redshift
:return dataid: str
The data ID
:return objname: str
The object name
"""
assert format in ('manga', 'muse', 'user'), "format must be either 'manga' or 'muse'; no others currently supported!"
# Read the FITS file using the appropriate parsing function
# no more eval 🥲
if format == 'manga':
nx,ny,nz,ra,dec,dataid,wave,flux,ivar,specres,mask,objname = read_manga_ifu(fits_file,z)
elif format == 'muse':
nx,ny,nz,ra,dec,dataid,wave,flux,ivar,specres,mask,objname = read_muse_ifu(fits_file,z)
else:
# wave array shape = (nz,)
# flux, ivar array shape = (nz, ny, nx)
# specres can be a single value or an array of shape (nz,)
# VALIDATE THAT USER INPUTS ARE IN THE CORRECT FORMAT
for value in (nx, ny, nz, ra, dec, wave, flux, specres):
assert value is not None, "For user spec, all of (nx, ny, nz, ra, dec, wave, flux, specres) must be specified!"
if ivar is None:
print("WARNING: No ivar was input. Defaulting to sqrt(flux).")
ivar = np.sqrt(flux)
if mask is None:
mask = np.zeros(flux.shape, dtype=int)
assert wave.shape == (nz,), "Wave array shape should be (nz,)"
assert flux.shape == (nz, ny, nx), "Flux array shape should be (nz, ny, nx)"
assert ivar.shape == (nz, ny, nx), "Ivar array shape should be (nz, ny, nx)"
assert mask.shape == (nz, ny, nx), "Mask array shape should be (nz, ny, nx)"
assert (type(specres) in (int, float, np.int_, np.float_)) or (specres.shape == (nz,)), "Specres should be a float or an array of shape (nz,)"
loglam = np.log10(wave)
# FWHM Resolution in angstroms:
fwhm_res = wave / specres # dlambda = lambda / R; R = lambda / dlambda
if not use_and_mask:
mask = np.zeros(flux.shape, dtype=int)
# Converting to wdisp -- so that 2.355*wdisp*dlam_gal = fwhm_res
# if format == 'manga':
# c = 299792.458 # speed of light in km/s
# frac = wave[1]/wave[0] # Constant lambda fraction per pixel
# dlam_gal = (frac-1)*wave # Size of every pixel in Angstrom
# vdisp = c / (2.355*specres) # delta v = c / R in km/s
# velscale = np.log(frac) * c # Constant velocity scale in km/s per pixel
# wdisp = vdisp / velscale # Intrinsic dispersion of every pixel, in pixels units
minx, maxx = 0, nx
miny, maxy = 0, ny
if aperture:
miny, maxy, minx, maxx = aperture
maxy += 1
maxx += 1
x = np.arange(minx, maxx, 1)
y = np.arange(miny, maxy, 1)
# Create x/y grid for the voronoi binning
X, Y = np.meshgrid(x, y)
_x, _y = X.ravel(), Y.ravel()
if voronoi_binning:
# Average along the wavelength axis so each spaxel has one s/n value
# Note to self: Y AXIS IS ALWAYS FIRST ON NUMPY ARRAYS
signal = np.nanmean(flux[:, miny:maxy, minx:maxx], axis=0)
noise = np.sqrt(1 / np.nanmean(ivar[:, miny:maxy, minx:maxx], axis=0))
sr = signal.ravel()
nr = noise.ravel()
good = np.where(np.isfinite(sr) & np.isfinite(nr) & (sr > 0) & (nr > 0))[0]
# Target S/N ratio to bin for. If none, defaults to value such that the highest pixel isnt binned
# In general this isn't a great choice. Should want to maximize resolution without sacrificing too much
# computation time.
if not targetsn:
# binnum = np.array([maxbins+1])
targetsn0 = np.max([np.sort((sr / nr)[good], kind='quicksort')[-1] / 16, 10])
def objective(targetsn, return_data=False):
vplot = voronoi_plot if return_data else False
qt = quiet if return_data else True
try:
binnum, xbin, ybin, xbar, ybar, sn, npixels, scale = voronoi_2d_binning(_x[good], _y[good], sr[good], nr[good],
targetsn, cvt=cvt, pixelsize=1, plot=vplot,
quiet=qt, wvt=wvt)
except ValueError:
return np.inf
if return_data:
return binnum, xbin, ybin, xbar, ybar, sn, npixels, scale
return (np.max(binnum)+1 - maxbins)**2
print(f'Performing S/N optimization to reach {maxbins} bins. This may take a while...')
soln = optimize.minimize(objective, [targetsn0], method='Nelder-Mead', bounds=[(1, X.size)])
targetsn = soln.x[0]
binnum, xbin, ybin, xbar, ybar, SNR, npixels, scale = objective(targetsn, return_data=True)
else:
binnum, xbin, ybin, xbar, ybar, SNR, npixels, scale = voronoi_2d_binning(_x[good], _y[good], sr[good], nr[good],
targetsn, cvt=cvt, pixelsize=1, plot=voronoi_plot,
quiet=quiet, wvt=wvt)
print(f'Voronoi binning successful with target S/N = {targetsn}! Created {np.max(binnum)+1} bins.')
if voronoi_plot:
# For some reason voronoi makes the plot but doesnt save it or anything
filename = os.path.join(os.path.dirname(fits_file), 'voronoi_binning.pdf')
plt.savefig(filename, bbox_inches='tight', dpi=300)
plt.close()
_x = _x[good]
_y = _y[good]
# Create output arrays for flux, ivar, mask
out_flux = np.zeros((flux.shape[0], np.nanmax(binnum)+1))
out_ivar = np.zeros((ivar.shape[0], np.nanmax(binnum)+1))
out_mask = np.zeros((mask.shape[0], np.nanmax(binnum)+1))
xpixbin = np.full(np.nanmax(binnum)+1, fill_value=np.nan, dtype=object)
ypixbin = np.full(np.nanmax(binnum)+1, fill_value=np.nan, dtype=object)
for j in range(xpixbin.size):
xpixbin[j] = []
ypixbin[j] = []
# Average flux/ivar in each bin
for i, bin in enumerate(binnum):
# there is probably a better way to do this, but I'm lazy
xi, yi = _x[i], _y[i]
out_flux[:, bin] += flux[:, yi, xi]
out_ivar[:, bin] += ivar[:, yi, xi]
out_mask[:, bin] += mask[:, yi, xi]
xpixbin[bin].append(xi)
ypixbin[bin].append(yi)
out_flux /= npixels
out_ivar /= npixels
irange = np.nanmax(binnum)+1
for bin in binnum:
if SNR[bin] < snr_threshold:
flux[:, np.asarray(ypixbin[bin]), np.asarray(xpixbin[bin])] = np.nan
ivar[:, np.asarray(ypixbin[bin]), np.asarray(xpixbin[bin])] = np.nan
mask[:, np.asarray(ypixbin[bin]), np.asarray(xpixbin[bin])] = 1
elif fixed_binning:
print(f'Performing binning with fixed bin size of {fixed_bin_size}')
# Create square bins of a fixed size
binnum = np.zeros((maxy-miny, maxx-minx), dtype=int)
wy = int(np.ceil((maxy-miny)/fixed_bin_size))
wx = int(np.ceil((maxx-minx)/fixed_bin_size))
indx = 0
nbins = wy*wx
out_flux = np.zeros((flux.shape[0], nbins))
out_ivar = np.zeros((ivar.shape[0], nbins))
out_mask = np.zeros((mask.shape[0], nbins))
xpixbin = np.full(nbins, fill_value=np.nan, dtype=object)
ypixbin = np.full(nbins, fill_value=np.nan, dtype=object)
npixels = np.zeros((nbins,), dtype=int)
SNR = np.zeros((nbins,))
for iy in range(wy):
for ix in range(wx):
# Relative axes indices
ylo = iy*fixed_bin_size
yhi = np.min([(iy+1)*fixed_bin_size, binnum.shape[0]])
xlo = ix*fixed_bin_size
xhi = np.min([(ix+1)*fixed_bin_size, binnum.shape[1]])
binnum[ylo:yhi, xlo:xhi] = indx
# Shift axes limits by the aperture
ylo += miny
yhi += miny
xlo += minx
xhi += minx
ybin, xbin = np.meshgrid(np.arange(ylo, yhi, 1), np.arange(xlo, xhi, 1))
ypixbin[indx] = ybin.flatten().tolist()
xpixbin[indx] = xbin.flatten().tolist()
out_flux[:, indx] = np.apply_over_axes(np.nanmean, flux[:, ylo:yhi, xlo:xhi], (1,2)).flatten()
out_ivar[:, indx] = np.apply_over_axes(np.nanmean, ivar[:, ylo:yhi, xlo:xhi], (1,2)).flatten()
out_mask[:, indx] = np.apply_over_axes(np.nansum, mask[:, ylo:yhi, xlo:xhi], (1,2)).flatten()
npixels[indx] = len(ybin)
signal = np.nanmean(flux[:, ylo:yhi, xlo:xhi], axis=0)
noise = np.sqrt(1/np.nanmean(ivar[:, ylo:yhi, xlo:xhi], axis=0))
SNR[indx] = np.nansum(signal) / np.sqrt(np.nansum(noise**2))
if SNR[indx] < snr_threshold:
flux[:, ylo:yhi, xlo:xhi] = np.nan
ivar[:, ylo:yhi, xlo:xhi] = np.nan
mask[:, ylo:yhi, xlo:xhi] = 1
indx += 1
binnum = binnum.flatten()
irange = nbins
print(f'Fixed binning successful, created {nbins} bins')
else:
xpixbin = None
ypixbin = None
out_flux = flux[:, miny:maxy, minx:maxx].reshape(nz, (maxx-minx)*(maxy-miny))
out_ivar = ivar[:, miny:maxy, minx:maxx].reshape(nz, (maxx-minx)*(maxy-miny))
out_mask = mask[:, miny:maxy, minx:maxx].reshape(nz, (maxx-minx)*(maxy-miny))
binnum = np.zeros((maxx-minx)*(maxy-miny))
npixels = np.ones((maxx-minx)*(maxy-miny)) * (maxx-minx)*(maxy-miny)
irange = (maxx-minx)*(maxy-miny)
signal = np.nanmean(flux, axis=0)
noise = np.sqrt(1 / np.nanmean(ivar, axis=0))
SNR = signal / noise
flux[:, SNR < snr_threshold] = np.nan
ivar[:, SNR < snr_threshold] = np.nan
mask[:, SNR < snr_threshold] = 1
for i in range(irange):
# Unpack the spaxel
galaxy_spaxel = out_flux[:,i] # observed flux
ivar_spaxel = out_ivar[:,i] # 1-sigma spectral noise
mask_spaxel = out_mask[:,i] # bad pixels
if voronoi_binning or fixed_binning:
xi = xpixbin[i] # x and y pixel position
yi = ypixbin[i]
snr_thresh = SNR[i] >= snr_threshold # make sure bin has an overall SNR greater than the threshold
else:
xi = [_x[i]]
yi = [_y[i]]
snr_thresh = SNR[_y[i], _x[i]] >= snr_threshold # make sure spaxel has an SNR greater than the threshold
binnum_i = 0 if (not voronoi_binning) and (not fixed_binning) else i # Voronoi bin index that this pixel belongs to
# Package into a FITS file -- but only if the SNR is high enough, otherwise throw out the data
if snr_thresh:
primaryhdu = fits.PrimaryHDU()
primaryhdu.header.append(("FORMAT", format.upper(), "Data format"), end=True)
if type(dataid) is list:
for j, did in enumerate(dataid):
primaryhdu.header.append((f'{format.upper()}ID{j}', did, f'{"MANGA" if format == "manga" else "MUSE"} ID number'), end=True)
else:
primaryhdu.header.append((f'{format.upper()}ID', dataid, f'{"MANGA" if format == "manga" else "MUSE"} ID number'), end=True)
primaryhdu.header.append(('OBJNAME', objname, 'Object Name'), end=True)
primaryhdu.header.append(('RA', ra, 'Right ascension'), end=True)
primaryhdu.header.append(('DEC', dec, 'Declination'), end=True)
primaryhdu.header.append(('BINNUM', binnum_i, 'bin index of the spaxel (Voronoi)'), end=True)
primaryhdu.header.append(('NX', nx, 'x dimension of the full MANGA cube'), end=True)
primaryhdu.header.append(('NY', ny, 'y dimension of the full MANGA cube'), end=True)
coadd = fits.BinTableHDU.from_columns(fits.ColDefs([
fits.Column(name='flux', array=galaxy_spaxel, format='D'),
fits.Column(name='loglam', array=loglam, format='D'),
fits.Column(name='ivar', array=ivar_spaxel, format='D'),
fits.Column(name='and_mask', array=mask_spaxel, format='D'),
fits.Column(name='fwhm_res', array=fwhm_res, format='D')
]))
specobj = fits.BinTableHDU.from_columns(fits.ColDefs([
fits.Column(name='z', array=np.array([z]), format='D'),
# fits.Column(name='ebv', array=np.array([ebv]), format='E')
]))
specobj.header.append(('PLUG_RA', ra, 'Right ascension'), end=True)
specobj.header.append(('PLUG_DEC', dec, 'Declination'), end=True)
binobj = fits.BinTableHDU.from_columns(fits.ColDefs([
fits.Column(name='spaxelx', array= | np.array(xi) | numpy.array |
from six.moves import range
import numpy as np
import scipy.sparse as sparse
import scipy.sparse.linalg as linalg
from landlab.grid.base import BAD_INDEX_VALUE
# these ones only so we can run this module ad-hoc:
# import pylab
from landlab import ModelParameterDictionary, Component
from landlab.utils.decorators import use_file_name_or_kwds
# from copy import copy
# Things to add: 1. Explicit stability check.
# 2. Implicit handling of scenarios where kappa*dt exceeds critical step -
# subdivide dt automatically.
class PerronNLDiffuse(Component):
"""Nonlinear diffusion, following Perron (2011).
This module uses Taylor Perron's implicit (2011) method to solve the
nonlinear hillslope diffusion equation across a rectangular, regular grid
for a single timestep. Note it works with the mass flux implicitly, and
thus does not actually calculate it. Grid must be at least 5x5.
Boundary condition handling assumes each edge uses the same BC for each of
its nodes.
This component cannot yet handle looped boundary conditions, but all others
should be fine.
This component has KNOWN STABILITY ISSUES which will be resolved in a
future release; use at your own risk.
The primary method of this class is :func:`run_one_step`.
Examples
--------
>>> from landlab.components import PerronNLDiffuse
>>> from landlab import RasterModelGrid
>>> import numpy as np
>>> mg = RasterModelGrid((5, 5))
>>> z = mg.add_zeros('node', 'topographic__elevation')
>>> nl = PerronNLDiffuse(mg, nonlinear_diffusivity=1.)
>>> dt = 100.
>>> nt = 20
>>> uplift_rate = 0.001
>>> for i in range(nt):
... z[mg.core_nodes] += uplift_rate*dt
... nl.run_one_step(dt)
>>> z_target = np.array(
... [ 0. , 0. , 0. , 0. , 0. ,
... 0. , 0.00778637, 0.0075553 , 0.00778637, 0. ,
... 0. , 0.0075553 , 0.0078053 , 0.0075553 , 0. ,
... 0. , 0.00778637, 0.0075553 , 0.00778637, 0. ,
... 0. , 0. , 0. , 0. , 0. ])
>>> np.allclose(z, z_target)
True
"""
_name = 'PerronNLDiffuse'
_input_var_names = ('topographic__elevation', )
_output_var_names = ('topographic__elevation', )
_var_units = {'topographic__elevation': 'm'}
_var_mapping = {'topographic__elevation': 'node'}
_var_doc = {
'topographic__elevation': ('Land surface topographic elevation; can ' +
'be overwritten in initialization')}
@use_file_name_or_kwds
def __init__(self, grid, nonlinear_diffusivity=None, S_crit=33.*np.pi/180.,
rock_density=2700., sed_density=2700., **kwds):
"""
Parameters
----------
grid : RasterModelGrid
A Landlab raster grid
nonlinear_diffusivity : float, array or field name
The nonlinear diffusivity
S_crit : float (radians)
The critical hillslope angle
rock_density : float (kg*m**-3)
The density of intact rock
sed_density : float (kg*m**-3)
The density of the mobile (sediment) layer
"""
# disable internal_uplift option:
internal_uplift = None
self._grid = grid
self._bc_set_code = self.grid.bc_set_code
self.values_to_diffuse = 'topographic__elevation'
if nonlinear_diffusivity is not None:
if nonlinear_diffusivity is not str:
self._kappa = nonlinear_diffusivity
else:
self._kappa = self.grid.at_node[nonlinear_diffusivity]
else:
try:
self._kappa = kwds.pop('kappa', None)
except KeyError:
raise KeyError("nonlinear_diffusivity must be provided to " +
"the PerronNLDiffuse component")
if internal_uplift is None:
self.internal_uplifts = False
self._uplift = 0.
else:
self.internal_uplifts = True
self._uplift = float(internal_uplift)
# self._uplift = self.grid.zeros('node', dtype=float)
# self._uplift[self.grid.core_nodes] = internal_uplift
self._rock_density = rock_density
self._sed_density = sed_density
self._S_crit = S_crit
# for component back compatibility (undocumented):
# ###
self.timestep_in = kwds.pop('dt', None)
if 'values_to_diffuse' in kwds.keys():
self.values_to_diffuse = kwds.pop('values_to_diffuse')
for mytups in (self._input_var_names, self._output_var_names):
myset = set(mytups)
myset.remove('topographic__elevation')
myset.add(self.values_to_diffuse)
mytups = tuple(myset)
for mydicts in (self._var_units, self._var_mapping, self._var_doc):
mydicts[self.values_to_diffuse] = mydicts.pop(
'topographic__elevation')
self._delta_x = grid.dx
self._delta_y = grid.dy
self._one_over_delta_x = 1. / self._delta_x
self._one_over_delta_y = 1. / self._delta_y
self._one_over_delta_x_sqd = self._one_over_delta_x**2.
self._one_over_delta_y_sqd = self._one_over_delta_y**2.
self._b = 1. / self._S_crit**2.
ncols = grid.number_of_node_columns
self.ncols = ncols
nrows = grid.number_of_node_rows
self.nrows = nrows
nnodes = grid.number_of_nodes
self.nnodes = nnodes
ninteriornodes = grid.number_of_interior_nodes
ncorenodes = ninteriornodes - 2 * (ncols + nrows - 6)
self.ninteriornodes = ninteriornodes
self.interior_grid_width = ncols - 2
self.core_cell_width = ncols - 4
self._interior_corners = np.array([ncols + 1, 2 * ncols - 2,
nnodes - 2 * ncols + 1,
nnodes - ncols - 2])
_left_list = np.array(
range(2 * ncols + 1, nnodes - 2 * ncols, ncols))
# ^these are still real IDs
_right_list = np.array(
range(3 * ncols - 2, nnodes - 2 * ncols, ncols))
_bottom_list = np.array(range(ncols + 2, 2 * ncols - 2))
_top_list = np.array(
range(nnodes - 2 * ncols + 2, nnodes - ncols - 2))
self._left_list = _left_list
self._right_list = _right_list
self._bottom_list = _bottom_list
self._top_list = _top_list
self._core_nodes = self._coreIDtoreal(np.arange(
ncorenodes, dtype=int))
self.corenodesbyintIDs = self._realIDtointerior(self._core_nodes)
self.ncorenodes = len(self._core_nodes)
self.corner_interior_IDs = self._realIDtointerior(
self._interior_corners)
# ^i.e., interior corners as interior IDs
self.bottom_interior_IDs = self._realIDtointerior(np.array(
_bottom_list))
self.top_interior_IDs = self._realIDtointerior(np.array(_top_list))
self.left_interior_IDs = self._realIDtointerior(np.array(_left_list))
self.right_interior_IDs = self._realIDtointerior(np.array(
_right_list))
# build an ID map to let us easily map the variables of the core nodes
# onto the operating matrix:
# This array is ninteriornodes long, but the IDs it contains are
# REAL IDs
operating_matrix_ID_map = np.empty((ninteriornodes, 9))
self.interior_IDs_as_real = self._interiorIDtoreal(
np.arange(ninteriornodes))
for j in range(ninteriornodes):
i = self.interior_IDs_as_real[j]
operating_matrix_ID_map[j, :] = np.array(
[(i-ncols-1), (i-ncols), (i-ncols+1), (i-1), i, (i+1),
(i+ncols-1), (i+ncols), (i+ncols+1)])
self.operating_matrix_ID_map = operating_matrix_ID_map
self.operating_matrix_core_int_IDs = self._realIDtointerior(
operating_matrix_ID_map[self.corenodesbyintIDs, :])
# ^shape(ncorenodes,9)
# see below for corner and edge maps
# Build masks for the edges and corners to be applied to the operating
# matrix map.
# Antimasks are the boundary nodes, masks are "normal"
self.topleft_mask = [1, 2, 4, 5]
topleft_antimask = [0, 3, 6, 7, 8]
self.topright_mask = [0, 1, 3, 4]
topright_antimask = [2, 5, 6, 7, 8]
self.bottomleft_mask = [4, 5, 7, 8]
bottomleft_antimask = [0, 1, 2, 3, 6]
self.bottomright_mask = [3, 4, 6, 7]
bottomright_antimask = [0, 1, 2, 5, 8]
self.corners_masks = (np.vstack((self.bottomleft_mask,
self.bottomright_mask,
self.topleft_mask,
self.topright_mask)))
# ^(each_corner,mask_for_each_corner)
self.corners_antimasks = (np.vstack((bottomleft_antimask,
bottomright_antimask,
topleft_antimask,
topright_antimask)))
# ^so shape becomes (4,5)
self.left_mask = [1, 2, 4, 5, 7, 8]
self.left_antimask = [0, 3, 6]
self.top_mask = [0, 1, 2, 3, 4, 5]
self.top_antimask = [6, 7, 8]
self.right_mask = [0, 1, 3, 4, 6, 7]
self.right_antimask = [2, 5, 8]
self.bottom_mask = [3, 4, 5, 6, 7, 8]
self.bottom_antimask = [0, 1, 2]
self.antimask_corner_position = [0, 2, 2, 4]
# ^this is the position w/i the corner antimasks that the true corner
# actually occupies
self.modulator_mask = np.array([-ncols - 1, -ncols, -ncols + 1, -1,
0, 1, ncols - 1, ncols, ncols + 1])
self.updated_boundary_conditions()
def updated_boundary_conditions(self):
"""Call if grid BCs are updated after component instantiation.
"""
grid = self.grid
nrows = self.nrows
ncols = self.ncols
# ^Set up terms for BC handling (still feels very clumsy)
bottom_edge = grid.nodes_at_bottom_edge[1: -1]
top_edge = grid.nodes_at_top_edge[1: -1]
left_edge = grid.nodes_at_left_edge[1: -1]
right_edge = grid.nodes_at_right_edge[1: -1]
self.bottom_flag = 1
self.top_flag = 1
self.left_flag = 1
self.right_flag = 1
# self.corner_flags = [1,1,1,1] #In ID order, so BL,BR,TL,TR
if np.all(grid.status_at_node[bottom_edge] == 4):
# ^This should be all of them, or none of them
self.bottom_flag = 4
elif np.all(grid.status_at_node[bottom_edge] == 3):
self.bottom_flag = 3
elif np.all(grid.status_at_node[bottom_edge] == 2):
self.bottom_flag = 2
elif np.all(grid.status_at_node[bottom_edge] == 1):
pass
else:
raise NameError("Different cells on the same grid edge have "
"different boundary statuses")
# Note this could get fraught if we need to open a cell to let
# water flow out...
if np.all(grid.status_at_node[top_edge] == 4):
self.top_flag = 4
elif np.all(grid.status_at_node[top_edge] == 3):
self.top_flag = 3
elif np.all(grid.status_at_node[top_edge] == 2):
self.top_flag = 2
elif np.all(grid.status_at_node[top_edge] == 1):
pass
else:
raise NameError("Different cells on the same grid edge have "
"different boundary statuses")
if np.all(grid.status_at_node[left_edge] == 4):
self.left_flag = 4
elif np.all(grid.status_at_node[left_edge] == 3):
self.left_flag = 3
elif np.all(grid.status_at_node[left_edge] == 2):
self.left_flag = 2
elif np.all(grid.status_at_node[left_edge] == 1):
pass
else:
raise NameError("Different cells on the same grid edge have "
"different boundary statuses")
if np.all(grid.status_at_node[right_edge] == 4):
self.right_flag = 4
elif np.all(grid.status_at_node[right_edge] == 3):
self.right_flag = 3
elif np.all(grid.status_at_node[right_edge] == 2):
self.right_flag = 2
elif np.all(grid.status_at_node[right_edge] == 1):
pass
else:
raise NameError("Different cells on the same grid edge have "
"different boundary statuses")
self.fixed_grad_BCs_present = (self.bottom_flag == 2 or
self.top_flag == 2 or
self.left_flag == 2 or
self.right_flag == 2)
self.looped_BCs_present = (self.bottom_flag == 3 or
self.top_flag == 3 or
self.left_flag == 3 or
self.right_flag == 3)
if self.fixed_grad_BCs_present:
if self.values_to_diffuse != grid.fixed_gradient_of:
raise ValueError("Boundary conditions set in the grid don't "
"apply to the data the diffuser is trying to "
"work with")
if np.any(grid.status_at_node == 2):
self.fixed_grad_offset_map = np.empty(
nrows * ncols, dtype=float)
self.fixed_grad_anchor_map = np.empty_like(
self.fixed_grad_offset_map)
self.fixed_grad_offset_map[grid.fixed_gradient_node_properties[
'boundary_node_IDs']] = grid.fixed_gradient_node_properties[
'values_to_add']
self.corner_flags = grid.status_at_node[[0, ncols - 1, -ncols, -1]]
op_mat_just_corners = self.operating_matrix_ID_map[
self.corner_interior_IDs, :]
op_mat_cnr0 = op_mat_just_corners[0, self.bottomleft_mask]
op_mat_cnr1 = op_mat_just_corners[1, self.bottomright_mask]
op_mat_cnr2 = op_mat_just_corners[2, self.topleft_mask]
op_mat_cnr3 = op_mat_just_corners[3, self.topright_mask]
op_mat_just_active_cnrs = np.vstack((op_mat_cnr0, op_mat_cnr1,
op_mat_cnr2, op_mat_cnr3))
self.operating_matrix_corner_int_IDs = self._realIDtointerior(
op_mat_just_active_cnrs)
# ^(4corners,4nodesactivepercorner)
self.operating_matrix_bottom_int_IDs = self._realIDtointerior(
self.operating_matrix_ID_map[
self.bottom_interior_IDs, :][:, self.bottom_mask])
# ^(nbottomnodes,6activenodeseach)
self.operating_matrix_top_int_IDs = self._realIDtointerior(
self.operating_matrix_ID_map[
self.top_interior_IDs, :][:, self.top_mask])
self.operating_matrix_left_int_IDs = self._realIDtointerior(
self.operating_matrix_ID_map[
self.left_interior_IDs, :][:, self.left_mask])
self.operating_matrix_right_int_IDs = self._realIDtointerior(
self.operating_matrix_ID_map[
self.right_interior_IDs, :][:, self.right_mask])
def _initialize(self, grid, input_stream):
inputs = ModelParameterDictionary(input_stream)
self.inputs = inputs
self.grid = grid
self.internal_uplifts = False
if self.internal_uplifts:
try:
self._uplift = inputs.read_float('uplift')
except:
self._uplift = inputs.read_float('uplift_rate')
else:
self._uplift = 0.
self._rock_density = inputs.read_float('rock_density')
self._sed_density = inputs.read_float('sed_density')
self._kappa = inputs.read_float('kappa') # ==_a
self._S_crit = inputs.read_float('S_crit')
try:
self.values_to_diffuse = inputs.read_str('values_to_diffuse')
except:
self.values_to_diffuse = 'topographic__elevation'
try:
self.timestep_in = inputs.read_float('dt')
except:
raise NameError('''No fixed timestep supplied, it must be set
dynamically somewhere else. Be sure to call
input_timestep(timestep_in) as part of your run
loop.''')
self._delta_x = grid.dx
self._delta_y = grid.dy
self._one_over_delta_x = 1. / self._delta_x
self._one_over_delta_y = 1. / self._delta_y
self._one_over_delta_x_sqd = self._one_over_delta_x**2.
self._one_over_delta_y_sqd = self._one_over_delta_y**2.
self._b = 1. / self._S_crit**2.
ncols = grid.number_of_node_columns
self.ncols = ncols
nrows = grid.number_of_node_rows
self.nrows = nrows
nnodes = grid.number_of_nodes
self.nnodes = nnodes
ninteriornodes = grid.number_of_interior_nodes
ncorenodes = ninteriornodes - 2 * (ncols + nrows - 6)
self.ninteriornodes = ninteriornodes
self.interior_grid_width = ncols - 2
self.core_cell_width = ncols - 4
self._interior_corners = np.array([ncols + 1, 2 * ncols - 2,
nnodes - 2 * ncols + 1,
nnodes - ncols - 2])
_left_list = np.array(
range(2 * ncols + 1, nnodes - 2 * ncols, ncols))
# ^these are still real IDs
_right_list = np.array(
range(3 * ncols - 2, nnodes - 2 * ncols, ncols))
_bottom_list = np.array(range(ncols + 2, 2 * ncols - 2))
_top_list = np.array(
range(nnodes - 2 * ncols + 2, nnodes - ncols - 2))
self._left_list = _left_list
self._right_list = _right_list
self._bottom_list = _bottom_list
self._top_list = _top_list
self._core_nodes = self._coreIDtoreal(np.arange(
ncorenodes, dtype=int))
self.corenodesbyintIDs = self._realIDtointerior(self._core_nodes)
self.ncorenodes = len(self._core_nodes)
self.corner_interior_IDs = self._realIDtointerior(
self._interior_corners)
# ^i.e., interior corners as interior IDs
self.bottom_interior_IDs = self._realIDtointerior(np.array(
_bottom_list))
self.top_interior_IDs = self._realIDtointerior(np.array(_top_list))
self.left_interior_IDs = self._realIDtointerior(np.array(_left_list))
self.right_interior_IDs = self._realIDtointerior(np.array(
_right_list))
# build an ID map to let us easily map the variables of the core nodes
# onto the operating matrix:
# This array is ninteriornodes long, but the IDs it contains are
# REAL IDs
operating_matrix_ID_map = np.empty((ninteriornodes, 9))
self.interior_IDs_as_real = self._interiorIDtoreal(
np.arange(ninteriornodes))
for j in range(ninteriornodes):
i = self.interior_IDs_as_real[j]
operating_matrix_ID_map[j, :] = np.array(
[(i-ncols-1), (i-ncols), (i-ncols+1), (i-1), i, (i+1),
(i+ncols-1), (i+ncols), (i+ncols+1)])
self.operating_matrix_ID_map = operating_matrix_ID_map
self.operating_matrix_core_int_IDs = self._realIDtointerior(
operating_matrix_ID_map[self.corenodesbyintIDs, :])
# ^shape(ncorenodes,9)
# see below for corner and edge maps
# Build masks for the edges and corners to be applied to the operating
# matrix map.
# Antimasks are the boundary nodes, masks are "normal"
topleft_mask = [1, 2, 4, 5]
topleft_antimask = [0, 3, 6, 7, 8]
topright_mask = [0, 1, 3, 4]
topright_antimask = [2, 5, 6, 7, 8]
bottomleft_mask = [4, 5, 7, 8]
bottomleft_antimask = [0, 1, 2, 3, 6]
bottomright_mask = [3, 4, 6, 7]
bottomright_antimask = [0, 1, 2, 5, 8]
self.corners_masks = (np.vstack((bottomleft_mask, bottomright_mask,
topleft_mask, topright_mask)))
# ^(each_corner,mask_for_each_corner)
self.corners_antimasks = (np.vstack((bottomleft_antimask,
bottomright_antimask,
topleft_antimask,
topright_antimask)))
# ^so shape becomes (4,5)
self.left_mask = [1, 2, 4, 5, 7, 8]
self.left_antimask = [0, 3, 6]
self.top_mask = [0, 1, 2, 3, 4, 5]
self.top_antimask = [6, 7, 8]
self.right_mask = [0, 1, 3, 4, 6, 7]
self.right_antimask = [2, 5, 8]
self.bottom_mask = [3, 4, 5, 6, 7, 8]
self.bottom_antimask = [0, 1, 2]
self.antimask_corner_position = [0, 2, 2, 4]
# ^this is the position w/i the corner antimasks that the true corner
# actually occupies
self.modulator_mask = np.array([-ncols - 1, -ncols, -ncols + 1, -1,
0, 1, ncols - 1, ncols, ncols + 1])
# ^Set up terms for BC handling (still feels very clumsy)
bottom_edge = grid.nodes_at_bottom_edge[1: -1]
top_edge = grid.nodes_at_top_edge[1: -1]
left_edge = grid.nodes_at_left_edge[1: -1]
right_edge = grid.nodes_at_right_edge[1: -1]
self.bottom_flag = 1
self.top_flag = 1
self.left_flag = 1
self.right_flag = 1
# self.corner_flags = [1,1,1,1] #In ID order, so BL,BR,TL,TR
if np.all(grid.status_at_node[bottom_edge] == 4):
# ^This should be all of them, or none of them
self.bottom_flag = 4
elif np.all(grid.status_at_node[bottom_edge] == 3):
self.bottom_flag = 3
elif np.all(grid.status_at_node[bottom_edge] == 2):
self.bottom_flag = 2
elif np.all(grid.status_at_node[bottom_edge] == 1):
pass
else:
raise NameError("Different cells on the same grid edge have "
"different boundary statuses")
# Note this could get fraught if we need to open a cell to let
# water flow out...
if np.all(grid.status_at_node[top_edge] == 4):
self.top_flag = 4
elif np.all(grid.status_at_node[top_edge] == 3):
self.top_flag = 3
elif np.all(grid.status_at_node[top_edge] == 2):
self.top_flag = 2
elif np.all(grid.status_at_node[top_edge] == 1):
pass
else:
raise NameError("Different cells on the same grid edge have "
"different boundary statuses")
if np.all(grid.status_at_node[left_edge] == 4):
self.left_flag = 4
elif np.all(grid.status_at_node[left_edge] == 3):
self.left_flag = 3
elif np.all(grid.status_at_node[left_edge] == 2):
self.left_flag = 2
elif np.all(grid.status_at_node[left_edge] == 1):
pass
else:
raise NameError("Different cells on the same grid edge have "
"different boundary statuses")
if np.all(grid.status_at_node[right_edge] == 4):
self.right_flag = 4
elif np.all(grid.status_at_node[right_edge] == 3):
self.right_flag = 3
elif np.all(grid.status_at_node[right_edge] == 2):
self.right_flag = 2
elif np.all(grid.status_at_node[right_edge] == 1):
pass
else:
raise NameError("Different cells on the same grid edge have "
"different boundary statuses")
self.fixed_grad_BCs_present = (self.bottom_flag == 2 or
self.top_flag == 2 or
self.left_flag == 2 or
self.right_flag == 2)
self.looped_BCs_present = (self.bottom_flag == 3 or
self.top_flag == 3 or
self.left_flag == 3 or
self.right_flag == 3)
if self.fixed_grad_BCs_present:
if self.values_to_diffuse != grid.fixed_gradient_of:
raise ValueError("Boundary conditions set in the grid don't "
"apply to the data the diffuser is trying to "
"work with")
if np.any(grid.status_at_node == 2):
self.fixed_grad_offset_map = np.empty(
nrows * ncols, dtype=float)
self.fixed_grad_anchor_map = np.empty_like(
self.fixed_grad_offset_map)
self.fixed_grad_offset_map[grid.fixed_gradient_node_properties[
'boundary_node_IDs']] = grid.fixed_gradient_node_properties[
'values_to_add']
self.corner_flags = grid.status_at_node[[0, ncols - 1, -ncols, -1]]
op_mat_just_corners = operating_matrix_ID_map[self.corner_interior_IDs,
:]
op_mat_cnr0 = op_mat_just_corners[0, bottomleft_mask]
op_mat_cnr1 = op_mat_just_corners[1, bottomright_mask]
op_mat_cnr2 = op_mat_just_corners[2, topleft_mask]
op_mat_cnr3 = op_mat_just_corners[3, topright_mask]
op_mat_just_active_cnrs = np.vstack((op_mat_cnr0, op_mat_cnr1,
op_mat_cnr2, op_mat_cnr3))
self.operating_matrix_corner_int_IDs = self._realIDtointerior(
op_mat_just_active_cnrs)
# ^(4corners,4nodesactivepercorner)
self.operating_matrix_bottom_int_IDs = self._realIDtointerior(
operating_matrix_ID_map[
self.bottom_interior_IDs, :][:, self.bottom_mask])
# ^(nbottomnodes,6activenodeseach)
self.operating_matrix_top_int_IDs = self._realIDtointerior(
operating_matrix_ID_map[
self.top_interior_IDs, :][:, self.top_mask])
self.operating_matrix_left_int_IDs = self._realIDtointerior(
operating_matrix_ID_map[
self.left_interior_IDs, :][:, self.left_mask])
self.operating_matrix_right_int_IDs = self._realIDtointerior(
operating_matrix_ID_map[
self.right_interior_IDs, :][:, self.right_mask])
def input_timestep(self, timestep_in):
"""
Allows the user to set a dynamic (evolving) timestep manually as part
of a run loop.
"""
self.timestep_in = timestep_in
def _gear_timestep(self, timestep_in, new_grid):
"""
This method allows the gearing between the model run step and the
component (shorter) step.
The method becomes unstable if S>Scrit, so we test to prevent this.
We implicitly assume the initial condition does not contain
slopes > Scrit. If the method persistently explodes, this may be the
problem.
"""
extended_elevs = np.empty(
self.grid.number_of_nodes + 1, dtype=float)
extended_elevs[-1] = np.nan
node_neighbors = self.grid.active_adjacent_nodes_at_node
extended_elevs[:-1] = new_grid['node'][self.values_to_diffuse]
max_offset = np.nanmax(np.fabs(
extended_elevs[:-1][node_neighbors] -
extended_elevs[:-1].reshape((self.grid.number_of_nodes, 1))))
if max_offset > np.tan(self._S_crit) * min(self.grid.dx,
self.grid.dy):
# ^using S not tan(S) adds a buffer - but not appropriate
self.internal_repeats = int(max_offset // (
np.tan(self._S_crit) * min(self.grid.dx, self.grid.dy))) + 1
# now we rig it so the actual timestep is an integer divisor
# of T_in:
self._delta_t = timestep_in / self.internal_repeats
self.uplift_per_step = (new_grid['node'][self.values_to_diffuse] -
self.grid['node'][self.values_to_diffuse]
) / self.internal_repeats
if self.internal_repeats > 10000:
raise ValueError('''Uplift rate is too high; solution is not
stable!!''')
else:
self.internal_repeats = 1
self._delta_t = timestep_in
self.uplift_per_step = new_grid['node'][
self.values_to_diffuse] - self.grid['node'][
self.values_to_diffuse]
return self._delta_t
def _set_variables(self, grid):
'''
This function sets the variables needed for update().
Now vectorized, shouold run faster.
At the moment, this method can only handle fixed value BCs.
'''
n_interior_nodes = grid.number_of_interior_nodes
# Initialize the local builder lists
_mat_RHS = np.zeros(n_interior_nodes)
try:
elev = grid['node'][self.values_to_diffuse]
except:
raise NameError('elevations not found in grid!')
try:
_delta_t = self._delta_t
except:
raise NameError('''Timestep not set! Call _gear_timestep(tstep)
after initializing the component, but before
running it.''')
_one_over_delta_x = self._one_over_delta_x
_one_over_delta_x_sqd = self._one_over_delta_x_sqd
_one_over_delta_y = self._one_over_delta_y
_one_over_delta_y_sqd = self._one_over_delta_y_sqd
_kappa = self._kappa
_b = self._b
_S_crit = self._S_crit
_core_nodes = self._core_nodes
corenodesbyintIDs = self.corenodesbyintIDs
operating_matrix_core_int_IDs = self.operating_matrix_core_int_IDs
operating_matrix_corner_int_IDs = self.operating_matrix_corner_int_IDs
_interior_corners = self._interior_corners
corners_antimasks = self.corners_antimasks
corner_interior_IDs = self.corner_interior_IDs
modulator_mask = self.modulator_mask
corner_flags = self.corner_flags
bottom_interior_IDs = self.bottom_interior_IDs
top_interior_IDs = self.top_interior_IDs
left_interior_IDs = self.left_interior_IDs
right_interior_IDs = self.right_interior_IDs
bottom_antimask = self.bottom_antimask
_bottom_list = self._bottom_list
top_antimask = self.top_antimask
_top_list = self._top_list
left_antimask = self.left_antimask
_left_list = self._left_list
right_antimask = self.right_antimask
_right_list = self._right_list
# Need to modify the "effective" values of the edge nodes if any of
# the edges are inactive:
if self.bottom_flag == 4:
bottom_edge, inside_bottom_edge = grid.nodes[(0, 1), :]
elev[bottom_edge] = elev[inside_bottom_edge]
# corners are special cases, and assumed linked to the bottom and
# top edge BCs...
elev[bottom_edge[0]] = elev[inside_bottom_edge[1]]
elev[bottom_edge[-1]] = elev[inside_bottom_edge[-2]]
if self.top_flag == 4:
top_edge, inside_top_edge = grid.nodes[(-1, -2), :]
elev[top_edge] = elev[inside_top_edge]
# corners are special cases, and assumed linked to the bottom and
# top edge BCs...
elev[top_edge[0]] = elev[inside_top_edge[1]]
elev[top_edge[-1]] = elev[inside_top_edge[-2]]
if self.left_flag == 4:
left_edge = grid.nodes[1: -1, 0]
inside_left_edge = grid.nodes[1: -1, 1]
elev[left_edge] = elev[inside_left_edge]
if self.right_flag == 4:
right_edge = grid.nodes[1: -1, -1]
inside_right_edge = grid.nodes[1: -1, -2]
elev[right_edge] = elev[inside_right_edge]
# replacing loop:
cell_neighbors = grid.active_adjacent_nodes_at_node
# ^E,N,W,S
cell_diagonals = grid.diagonal_adjacent_nodes_at_node # NE,NW,SW,SE
# ^this should be dealt with by active_neighbors... (skips bad nodes)
_z_x = (elev[cell_neighbors[:, 0]] - elev[cell_neighbors[:, 2]]
) * 0.5 * _one_over_delta_x
_z_y = (elev[cell_neighbors[:, 1]] - elev[cell_neighbors[:, 3]]
) * 0.5 * _one_over_delta_y
_z_xx = (elev[cell_neighbors[:, 0]] - 2. * elev + elev[
cell_neighbors[:, 2]]) * _one_over_delta_x_sqd
_z_yy = (elev[cell_neighbors[:, 1]] - 2. * elev + elev[
cell_neighbors[:, 3]]) * _one_over_delta_y_sqd
_z_xy = (elev[cell_diagonals[:, 0]] - elev[cell_diagonals[:, 1]] -
elev[cell_diagonals[:, 3]] + elev[cell_diagonals[:, 2]]
) * 0.25 * _one_over_delta_x * _one_over_delta_y
_d = 1. / (1. - _b * (_z_x * _z_x + _z_y * _z_y))
_abd_sqd = _kappa * _b * _d * _d
_F_ij = (-2.*_kappa*_d*(_one_over_delta_x_sqd+_one_over_delta_y_sqd) -
4.*_abd_sqd*(_z_x*_z_x*_one_over_delta_x_sqd +
_z_y*_z_y*_one_over_delta_y_sqd))
_F_ijminus1 = (
_kappa*_d*_one_over_delta_x_sqd - _abd_sqd*_z_x*(_z_xx+_z_yy) *
_one_over_delta_x - 4.*_abd_sqd*_b*_d*(_z_x*_z_x*_z_xx+_z_y*_z_y *
_z_yy+2.*_z_x*_z_y*_z_xy) *
_z_x*_one_over_delta_x - 2.*_abd_sqd*(
_z_x*_z_xx*_one_over_delta_x -
_z_x*_z_x*_one_over_delta_x_sqd +
_z_y*_z_xy*_one_over_delta_x))
_F_ijplus1 = (
_kappa*_d*_one_over_delta_x_sqd + _abd_sqd*_z_x*(_z_xx+_z_yy) *
_one_over_delta_x + 4.*_abd_sqd*_b*_d*(_z_x*_z_x*_z_xx+_z_y*_z_y *
_z_yy+2.*_z_x*_z_y*_z_xy) *
_z_x*_one_over_delta_x + 2.*_abd_sqd*(
_z_x*_z_xx*_one_over_delta_x +
_z_x*_z_x*_one_over_delta_x_sqd +
_z_y*_z_xy*_one_over_delta_x))
_F_iminus1j = (
_kappa*_d*_one_over_delta_y_sqd - _abd_sqd*_z_y*(_z_xx+_z_yy) *
_one_over_delta_y - 4.*_abd_sqd*_b*_d*(_z_x*_z_x*_z_xx+_z_y*_z_y *
_z_yy+2.*_z_x*_z_y*_z_xy) *
_z_y*_one_over_delta_y - 2.*_abd_sqd*(
_z_y*_z_yy*_one_over_delta_y -
_z_y*_z_y*_one_over_delta_y_sqd +
_z_x*_z_xy*_one_over_delta_y))
_F_iplus1j = (
_kappa*_d*_one_over_delta_y_sqd + _abd_sqd*_z_y*(_z_xx+_z_yy) *
_one_over_delta_y + 4.*_abd_sqd*_b*_d*(_z_x*_z_x*_z_xx+_z_y*_z_y *
_z_yy+2.*_z_x*_z_y*_z_xy) *
_z_y*_one_over_delta_y + 2.*_abd_sqd*(
_z_y*_z_yy*_one_over_delta_y +
_z_y*_z_y*_one_over_delta_y_sqd +
_z_x*_z_xy*_one_over_delta_y))
_F_iplus1jplus1 = (
_abd_sqd*_z_x*_z_y*_one_over_delta_x*_one_over_delta_y)
_F_iminus1jminus1 = _F_iplus1jplus1
_F_iplus1jminus1 = -_F_iplus1jplus1
_F_iminus1jplus1 = _F_iplus1jminus1
_equ_RHS_calc_frag = (_F_ij * elev + _F_ijminus1 *
elev[cell_neighbors[:, 2]] + _F_ijplus1 *
elev[cell_neighbors[:, 0]] + _F_iminus1j *
elev[cell_neighbors[:, 3]] + _F_iplus1j *
elev[cell_neighbors[:, 1]] + _F_iminus1jminus1 *
elev[cell_diagonals[:, 2]] + _F_iplus1jplus1 *
elev[cell_diagonals[:, 0]] + _F_iplus1jminus1 *
elev[cell_diagonals[:, 1]] + _F_iminus1jplus1 *
elev[cell_diagonals[:, 3]])
# NB- all _z_... and _F_... variables are nnodes long, and thus use
# real IDs (tho calcs will be flawed for Bnodes)
# RHS of equ 6 (see para [20])
_func_on_z = (
self._rock_density/self._sed_density*self._uplift + _kappa*(
(_z_xx+_z_yy)/(1.-(_z_x*_z_x+_z_y*_z_y)/_S_crit*_S_crit) +
2.*(_z_x*_z_x*_z_xx+_z_y*_z_y*_z_yy+2.*_z_x*_z_y*_z_xy) /
(_S_crit*_S_crit*(1.-(_z_x*_z_x+_z_y*_z_y) /
_S_crit*_S_crit)**2.)))
# Remember, the RHS is getting wiped each loop as part of
# self._set_variables()
# _mat_RHS is ninteriornodes long, but were only working on a
# ncorenodes long subset here
_mat_RHS[corenodesbyintIDs] += elev[_core_nodes] + _delta_t * (
_func_on_z[_core_nodes] - _equ_RHS_calc_frag[_core_nodes])
low_row = np.vstack((_F_iminus1jminus1, _F_iminus1j,
_F_iminus1jplus1)) * -_delta_t
mid_row = np.vstack((-_delta_t * _F_ijminus1, 1. - _delta_t * _F_ij,
-_delta_t * _F_ijplus1))
top_row = np.vstack((_F_iplus1jminus1, _F_iplus1j,
_F_iplus1jplus1)) * -_delta_t
nine_node_map = np.vstack((low_row, mid_row, top_row)).T
# ^Note shape is (nnodes,9); it's realID indexed
core_op_mat_row = np.repeat(corenodesbyintIDs, 9)
core_op_mat_col = operating_matrix_core_int_IDs.astype(int).flatten()
core_op_mat_data = nine_node_map[_core_nodes, :].flatten()
# Now the interior corners; BL,BR,TL,TR
_mat_RHS[corner_interior_IDs] += (elev[_interior_corners] + _delta_t *
(_func_on_z[_interior_corners] -
_equ_RHS_calc_frag[
_interior_corners]))
corners_op_mat_row = np.repeat(self.corner_interior_IDs, 4)
corners_op_mat_col = operating_matrix_corner_int_IDs.astype(
int).flatten()
corners_op_mat_data = nine_node_map[_interior_corners, :][
( | np.arange(4) | numpy.arange |
import numpy as np
import enum
from scipy.spatial.transform import Rotation
from scipy.spatial.distance import pdist, squareform
from casadi import *
from scipy.optimize import nnls
from rrc_iprl_package.control.contact_point import ContactPoint
from trifinger_simulation.tasks import move_cube
from rrc_iprl_package.traj_opt.fixed_contact_point_opt import FixedContactPointOpt
from rrc_iprl_package.traj_opt.fixed_contact_point_system import FixedContactPointSystem
from rrc_iprl_package.traj_opt.static_object_opt import StaticObjectOpt
class PolicyMode(enum.Enum):
RESET = enum.auto()
TRAJ_OPT = enum.auto()
IMPEDANCE = enum.auto()
RL_PUSH = enum.auto()
RESIDUAL = enum.auto()
# Object properties
OBJ_MASS = 0.016 # 16 grams
OBJ_SIZE = move_cube._CUBOID_SIZE
OBJ_SIZE_OFFSET = 0.012
OBJ_MU = 1
# Here, hard code the base position of the fingers (as angle on the arena)
r = 0.15
theta_0 = 80
theta_1 = 310
theta_2 = 200
FINGER_BASE_POSITIONS = [
np.array([[np.cos(theta_0*(np.pi/180))*r, np.sin(theta_0*(np.pi/180))*r, 0]]),
np.array([[np.cos(theta_1*(np.pi/180))*r, np.sin(theta_1*(np.pi/180))*r, 0]]),
np.array([[np.cos(theta_2*(np.pi/180))*r, np.sin(theta_2*(np.pi/180))*r, 0]]),
]
BASE_ANGLE_DEGREES = [0, -120, -240]
# Information about object faces given face_id
OBJ_FACES_INFO = {
1: {"center_param": np.array([0.,-1.,0.]),
"face_down_default_quat": np.array([0.707,0,0,0.707]),
"adjacent_faces": [6,4,3,5],
"opposite_face": 2,
"up_axis": np.array([0.,1.,0.]), # UP axis when this face is ground face
},
2: {"center_param": np.array([0.,1.,0.]),
"face_down_default_quat": np.array([-0.707,0,0,0.707]),
"adjacent_faces": [6,4,3,5],
"opposite_face": 1,
"up_axis": np.array([0.,-1.,0.]),
},
3: {"center_param": np.array([1.,0.,0.]),
"face_down_default_quat": np.array([0,0.707,0,0.707]),
"adjacent_faces": [1,2,4,6],
"opposite_face": 5,
"up_axis": np.array([-1.,0.,0.]),
},
4: {"center_param": np.array([0.,0.,1.]),
"face_down_default_quat": np.array([0,1,0,0]),
"adjacent_faces": [1,2,3,5],
"opposite_face": 6,
"up_axis": np.array([0.,0.,-1.]),
},
5: {"center_param": np.array([-1.,0.,0.]),
"face_down_default_quat": np.array([0,-0.707,0,0.707]),
"adjacent_faces": [1,2,4,6],
"opposite_face": 3,
"up_axis": np.array([1.,0.,0.]),
},
6: {"center_param": np.array([0.,0.,-1.]),
"face_down_default_quat": np.array([0,0,0,1]),
"adjacent_faces": [1,2,3,5],
"opposite_face": 4,
"up_axis": np.array([0.,0.,1.]),
},
}
CUBOID_SHORT_FACES = [1,2]
CUBOID_LONG_FACES = [3,4,5,6]
"""
Compute wrench that needs to be applied to object to maintain it on desired trajectory
"""
def track_obj_traj_controller(x_des, dx_des, x_cur, dx_cur, Kp, Kv):
#print(x_des)
#print(x_cur.position, x_cur.orientation)
#print(dx_des)
#print(dx_cur)
g = np.array([0, 0, -9.81, 0, 0, 0]) # Gravity vector
# Force (compute position error)
p_delta = (x_des[0:3] - x_cur.position)
dp_delta = (dx_des[0:3] - dx_cur[0:3])
# Moment (compute orientation error)
# Compute difference between desired and current quaternion
R_des = Rotation.from_quat(x_des[3:])
R_cur = Rotation.from_quat(x_cur.orientation)
o_delta = np.zeros(3)
for i in range(3):
o_delta += -0.5 * np.cross(R_cur.as_matrix()[:,i], R_des.as_matrix()[:,i])
do_delta = (dx_des[3:] - dx_cur[3:]) # is this the angular velocity?
#print("p_delta: {}".format(p_delta))
#print("dp_delta: {}".format(dp_delta))
#print("o_delta: {}".format(o_delta))
#print("do_delta: {}".format(do_delta))
# Compute wrench W (6x1) with PD feedback law
x_delta = np.concatenate((p_delta, -1*o_delta))
dx_delta = np.concatenate((dp_delta, do_delta))
W = Kp @ x_delta + Kv @ dx_delta - OBJ_MASS * g
print("x_delta: {}".format(x_delta))
print("dx_delta: {}".format(dx_delta))
#print(W)
return W
"""
Compute fingertip forces necessary to keep object on desired trajectory
"""
def get_ft_forces(x_des, dx_des, x_cur, dx_cur, Kp, Kv, cp_params):
# Get desired wrench for object COM to track obj traj
W = track_obj_traj_controller(x_des, dx_des, x_cur, dx_cur, Kp, Kv)
# Get list of contact point positions and orientations in object frame
# By converting cp_params to contactPoints
cp_list = []
for cp_param in cp_params:
if cp_param is not None:
cp = get_cp_of_from_cp_param(cp_param)
cp_list.append(cp)
fnum = len(cp_list)
# To compute grasp matrix
G = __get_grasp_matrix(np.concatenate((x_cur.position, x_cur.orientation)), cp_list)
# Solve for fingertip forces via optimization
# TODO use casadi for now, make new problem every time. If too slow, try cvxopt or scipy.minimize,
# Or make a parametrized problem??
# Contact-surface normal vector for each contact point
n = np.array([1, 0, 0]) # contact point frame x axis points into object
# Tangent vectors d_i for each contact point
d = [np.array([0, 1, 0]),
np.array([0, -1, 0]),
np.array([0, 0, 1]),
np.array([0, 0, -1])]
V = np.zeros((9,12));
for i in range(3):
for j in range(4):
V[i*3:(i+1)*3,i*4+j] = n + OBJ_MU * d[j]
B_soln = nnls(G@V,W)[0]
L = V@B_soln
# Formulate optimization problem
#B = SX.sym("B", len(d) * fnum) # Scaling weights for each of the cone vectors
#B0 = np.zeros(B.shape[0]) # Initial guess for weights
## Fill lambda vector
#l_list = []
#for j in range(fnum):
# l = 0 # contact force
# for i in range(len(d)):
# v = n + OBJ_MU * d[i]
# l += B[j*fnum + i] * v
# l_list.append(l)
#L = vertcat(*l_list) # (9x1) lambda vector
#f = G @ L - W # == 0
## Formulate constraints
#g = f # contraint function
#g_lb = np.zeros(f.shape[0]) # constraint lower bound
#g_ub = np.zeros(f.shape[0]) # constraint upper bound
## Constraints on B
#z_lb = np.zeros(B.shape[0]) # Lower bound on beta
#z_ub = np.ones(B.shape[0]) * np.inf # Upper bound on beta
#cost = L.T @ L
#problem = {"x": B, "f": cost, "g": g}
#options = {"ipopt.print_level":5,
# "ipopt.max_iter":10000,
# "ipopt.tol": 1e-4,
# "print_time": 1
# }
#solver = nlpsol("S", "ipopt", problem, options)
#r = solver(x0=B0, lbg=g_lb, ubg=g_ub, lbx=z_lb, ubx=z_ub)
#B_soln = r["x"]
# Compute contact forces in contact point frames from B_soln
# TODO fix list length when there are only 2 contact points
# save for later since we always have a 3 fingered grasp
l_wf_soln = []
for j in range(fnum):
l_cf = 0 # contact force
for i in range(len(d)):
v = n + OBJ_MU * d[i]
l_cf += B_soln[j*fnum + i] * v
# Convert from contact point frame to world frame
cp = cp_list[j]
R_cp_2_o = Rotation.from_quat(cp.quat_of)
R_o_2_w = Rotation.from_quat(x_cur.orientation)
l_wf = R_o_2_w.apply(R_cp_2_o.apply(np.squeeze(l_cf)))
l_wf_soln.append(l_wf)
return l_wf_soln, W
"""
Compute joint torques to move fingertips to desired locations
Inputs:
tip_pos_desired_list: List of desired fingertip positions for each finger
q_current: Current joint angles
dq_current: Current joint velocities
tip_forces_wf: fingertip forces in world frame
tol: tolerance for determining when fingers have reached goal
"""
def impedance_controller(
tip_pos_desired_list,
tip_vel_desired_list,
q_current,
dq_current,
custom_pinocchio_utils,
tip_forces_wf = None,
Kp = [25,25,25,25,25,25,25,25,25],
Kv = [1,1,1,1,1,1,1,1,1],
):
torque = 0
for finger_id in range(3):
# Get contact forces for single finger
if tip_forces_wf is None:
f_wf = None
else:
f_wf = np.expand_dims(np.array(tip_forces_wf[finger_id * 3:finger_id*3 + 3]),1)
finger_torque = impedance_controller_single_finger(
finger_id,
tip_pos_desired_list[finger_id],
tip_vel_desired_list[finger_id],
q_current,
dq_current,
custom_pinocchio_utils,
tip_force_wf = f_wf,
Kp = Kp,
Kv = Kv,
)
torque += finger_torque
return torque
"""
Compute joint torques to move fingertip to desired location
Inputs:
finger_id: Finger 0, 1, or 2
tip_desired: Desired fingertip pose **ORIENTATION??**
for orientation: transform fingertip reference frame to world frame (take into account object orientation)
for now, just track position
q_current: Current joint angles
dq_current: Current joint velocities
tip_forces_wf: fingertip forces in world frame
tol: tolerance for determining when fingers have reached goal
"""
def impedance_controller_single_finger(
finger_id,
tip_pos_desired,
tip_vel_desired,
q_current,
dq_current,
custom_pinocchio_utils,
tip_force_wf = None,
Kp = [25,25,25,25,25,25,25,25,25],
Kv = [1,1,1,1,1,1,1,1,1]
):
Kp_x = Kp[finger_id*3 + 0]
Kp_y = Kp[finger_id*3 + 1]
Kp_z = Kp[finger_id*3 + 2]
Kp = np.diag([Kp_x, Kp_y, Kp_z])
Kv_x = Kv[finger_id*3 + 0]
Kv_y = Kv[finger_id*3 + 1]
Kv_z = Kv[finger_id*3 + 2]
Kv = np.diag([Kv_x, Kv_y, Kv_z])
# Compute current fingertip position
x_current = custom_pinocchio_utils.forward_kinematics(q_current)[finger_id]
delta_x = np.expand_dims(np.array(tip_pos_desired) - np.array(x_current), 1)
#print("Current x: {}".format(x_current))
#print("Desired x: {}".format(tip_desired))
#print("Delta: {}".format(delta_x))
# Get full Jacobian for finger
Ji = custom_pinocchio_utils.get_tip_link_jacobian(finger_id, q_current)
# Just take first 3 rows, which correspond to linear velocities of fingertip
Ji = Ji[:3, :]
# Get g matrix for gravity compensation
_, g = custom_pinocchio_utils.get_lambda_and_g_matrix(finger_id, q_current, Ji)
# Get current fingertip velocity
dx_current = Ji @ np.expand_dims(np.array(dq_current), 1)
delta_dx = np.expand_dims(np.array(tip_vel_desired),1) - np.array(dx_current)
if tip_force_wf is not None:
torque = np.squeeze(Ji.T @ (Kp @ delta_x + Kv @ delta_dx) + Ji.T @ tip_force_wf) + g
else:
torque = np.squeeze(Ji.T @ (Kp @ delta_x + Kv @ delta_dx)) + g
#print("Finger {} delta".format(finger_id))
#print(np.linalg.norm(delta_x))
return torque
"""
Compute contact point position in world frame
Inputs:
cp_param: Contact point param [px, py, pz]
cube: Block object, which contains object shape info
"""
def get_cp_pos_wf_from_cp_param(cp_param, cube_pos_wf, cube_quat_wf, use_obj_size_offset = False):
cp = get_cp_of_from_cp_param(cp_param, use_obj_size_offset = use_obj_size_offset)
rotation = Rotation.from_quat(cube_quat_wf)
translation = np.asarray(cube_pos_wf)
return rotation.apply(cp.pos_of) + translation
"""
Get contact point positions in world frame from cp_params
"""
def get_cp_pos_wf_from_cp_params(cp_params, cube_pos, cube_quat, use_obj_size_offset = False):
# Get contact points in wf
fingertip_goal_list = []
for i in range(len(cp_params)):
if cp_params[i] is None:
fingertip_goal_list.append(None)
else:
fingertip_goal_list.append(get_cp_pos_wf_from_cp_param(cp_params[i], cube_pos, cube_quat, use_obj_size_offset = use_obj_size_offset))
return fingertip_goal_list
"""
Compute contact point position in object frame
Inputs:
cp_param: Contact point param [px, py, pz]
"""
def get_cp_of_from_cp_param(cp_param, use_obj_size_offset = False):
cp_of = []
# Get cp position in OF
for i in range(3):
if use_obj_size_offset:
cp_of.append(-(OBJ_SIZE[i] + OBJ_SIZE_OFFSET)/2 + (cp_param[i]+1)*(OBJ_SIZE[i] + OBJ_SIZE_OFFSET)/2)
else:
cp_of.append(-OBJ_SIZE[i]/2 + (cp_param[i]+1)*OBJ_SIZE[i]/2)
cp_of = np.asarray(cp_of)
x_param = cp_param[0]
y_param = cp_param[1]
z_param = cp_param[2]
# For now, just hard code quat
if y_param == -1:
quat = (np.sqrt(2)/2, 0, 0, np.sqrt(2)/2)
elif y_param == 1:
quat = (np.sqrt(2)/2, 0, 0, -np.sqrt(2)/2)
elif x_param == 1:
quat = (0, 0, 1, 0)
elif z_param == 1:
quat = (np.sqrt(2)/2, 0, np.sqrt(2)/2, 0)
elif x_param == -1:
quat = (1, 0, 0, 0)
elif z_param == -1:
quat = (np.sqrt(2)/2, 0, -np.sqrt(2)/2, 0)
cp = ContactPoint(cp_of, quat)
return cp
"""
Get face id on cube, given cp_param
cp_param: [x,y,z]
"""
def get_face_from_cp_param(cp_param):
x_param = cp_param[0]
y_param = cp_param[1]
z_param = cp_param[2]
# For now, just hard code quat
if y_param == -1:
face = 1
elif y_param == 1:
face = 2
elif x_param == 1:
face = 3
elif z_param == 1:
face = 4
elif x_param == -1:
face = 5
elif z_param == -1:
face = 6
return face
"""
Trasform point p from world frame to object frame, given object pose
"""
def get_wf_from_of(p, obj_pose):
cube_pos_wf = obj_pose.position
cube_quat_wf = obj_pose.orientation
rotation = Rotation.from_quat(cube_quat_wf)
translation = np.asarray(cube_pos_wf)
return rotation.apply(p) + translation
"""
Trasform point p from object frame to world frame, given object pose
"""
def get_of_from_wf(p, obj_pose):
cube_pos_wf = obj_pose.position
cube_quat_wf = obj_pose.orientation
rotation = Rotation.from_quat(cube_quat_wf)
translation = np.asarray(cube_pos_wf)
rotation_inv = rotation.inv()
translation_inv = -rotation_inv.apply(translation)
return rotation_inv.apply(p) + translation_inv
##############################################################################
# Lift mode functions
##############################################################################
"""
Run trajectory optimization
obj_pose: current object pose (for getting contact points)
current_position: current joint positions of robot
x0: object initial position for traj opt
x_goal: object goal position for traj opt
nGrid: number of grid points
dt: delta t
"""
def run_fixed_cp_traj_opt(obj_pose, cp_params, current_position, custom_pinocchio_utils, x0, x_goal, nGrid, dt, npz_filepath = None):
cp_params_on_obj = []
for cp in cp_params:
if cp is not None: cp_params_on_obj.append(cp)
fnum = len(cp_params_on_obj)
# Formulate and solve optimization problem
opt_problem = FixedContactPointOpt(
nGrid = nGrid, # Number of timesteps
dt = dt, # Length of each timestep (seconds)
fnum = fnum,
cp_params = cp_params_on_obj,
x0 = x0,
x_goal = x_goal,
obj_shape = OBJ_SIZE,
obj_mass = OBJ_MASS,
npz_filepath = npz_filepath
)
x_soln = np.array(opt_problem.x_soln)
dx_soln = np.array(opt_problem.dx_soln)
l_wf_soln = np.array(opt_problem.l_wf_soln)
return x_soln, dx_soln, l_wf_soln
"""
Get initial contact points on cube
Assign closest cube face to each finger
Since we are lifting object, don't worry about wf z-axis, just care about wf xy-plane
"""
def get_lifting_cp_params(obj_pose):
# TODO this is assuming that cuboid is always resting on one of its long sides
# face that is touching the ground
ground_face = get_closest_ground_face(obj_pose)
# Transform finger base positions to object frame
finger_base_of = []
for f_wf in FINGER_BASE_POSITIONS:
f_of = get_of_from_wf(f_wf, obj_pose)
finger_base_of.append(f_of)
# Find distance from x axis and y axis, and store in xy_distances
# Need some additional logic to prevent multiple fingers from being assigned to same face
x_axis = np.array([1,0])
y_axis = np.array([0,1])
# Object frame axis corresponding to plane parallel to ground plane
x_ind, y_ind = __get_parallel_ground_plane_xy(ground_face)
xy_distances = np.zeros((3, 2)) # Row corresponds to a finger, columns are x and y axis distances
for f_i, f_of in enumerate(finger_base_of):
point_in_plane = np.array([f_of[0,x_ind], f_of[0,y_ind]]) # Ignore dimension of point that's not in the plane
x_dist = __get_distance_from_pt_2_line(x_axis, np.array([0,0]), point_in_plane)
y_dist = __get_distance_from_pt_2_line(y_axis, np.array([0,0]), point_in_plane)
xy_distances[f_i, 0] = np.sign(f_of[0,y_ind]) * x_dist
xy_distances[f_i, 1] = np.sign(f_of[0,x_ind]) * y_dist
free_faces = \
[x for x in OBJ_FACES_INFO[ground_face]["adjacent_faces"] if x not in CUBOID_SHORT_FACES]
# For each face, choose closest finger
finger_assignments = {}
for face in free_faces:
face_ind = OBJ_FACES_INFO[ground_face]["adjacent_faces"].index(face)
if face_ind in [2,3]:
# Check y_ind column for finger that is furthest away
if OBJ_FACES_INFO[face]["center_param"][x_ind] < 0:
# Want most negative value
f_i = np.nanargmin(xy_distances[:,1])
else:
# Want most positive value
f_i = np.nanargmax(xy_distances[:,1])
else:
# Check x_ind column for finger that is furthest away
if OBJ_FACES_INFO[face]["center_param"][y_ind] < 0:
f_i = np.nanargmin(xy_distances[:,0])
else:
f_i = np.nanargmax(xy_distances[:,0])
finger_assignments[face] = [f_i]
xy_distances[f_i, :] = np.nan
# Assign last finger to one of the long faces
max_ind = np.unravel_index(np.nanargmax(xy_distances), xy_distances.shape)
curr_finger_id = max_ind[0]
face = assign_faces_to_fingers(obj_pose, [curr_finger_id], free_faces)[curr_finger_id]
finger_assignments[face].append(curr_finger_id)
print("finger assignments: {}".format(finger_assignments))
# Set contact point params for two long faces
cp_params = [None, None, None]
height_param = -0.85 # Always want cps to be at this height
width_param = 0.5 # Always want cps to be at this height
for face, finger_id_list in finger_assignments.items():
param = OBJ_FACES_INFO[face]["center_param"].copy()
param += OBJ_FACES_INFO[OBJ_FACES_INFO[ground_face]["opposite_face"]]["center_param"] * height_param
if len(finger_id_list) == 2:
# Find the closest short face to each finger
nearest_short_faces = assign_faces_to_fingers(obj_pose,
finger_id_list,
CUBOID_SHORT_FACES.copy())
for f_i, short_face in nearest_short_faces.items():
new_param = param.copy()
new_param += OBJ_FACES_INFO[short_face]["center_param"] * width_param
cp_params[f_i] = new_param
else:
cp_params[finger_id_list[0]] = param
print("LIFT CP PARAMS: {}".format(cp_params))
return cp_params
"""
For a specified finger f_i and list of available faces, get closest face
"""
def assign_faces_to_fingers(obj_pose, finger_id_list, free_faces):
ground_face = get_closest_ground_face(obj_pose)
# Find distance from x axis and y axis, and store in xy_distances
# Need some additional logic to prevent multiple fingers from being assigned to same face
x_axis = np.array([1,0])
y_axis = np.array([0,1])
# Object frame axis corresponding to plane parallel to ground plane
x_ind, y_ind = __get_parallel_ground_plane_xy(ground_face)
# Transform finger base positions to object frame
finger_base_of = []
for f_wf in FINGER_BASE_POSITIONS:
f_of = get_of_from_wf(f_wf, obj_pose)
finger_base_of.append(f_of)
xy_distances = np.zeros((3, 2)) # Rows: fingers, columns are x and y axis distances
for f_i, f_of in enumerate(finger_base_of):
point_in_plane = np.array([f_of[0,x_ind], f_of[0,y_ind]]) # Ignore dimension of point that's not in the plane
x_dist = __get_distance_from_pt_2_line(x_axis, np.array([0,0]), point_in_plane)
y_dist = __get_distance_from_pt_2_line(y_axis, np.array([0,0]), point_in_plane)
xy_distances[f_i, 0] = x_dist
xy_distances[f_i, 1] = y_dist
assignments = {}
for i in range(3):
max_ind = np.unravel_index(np.nanargmax(xy_distances), xy_distances.shape)
f_i = max_ind[0]
if f_i not in finger_id_list:
xy_distances[f_i, :] = np.nan
continue
furthest_axis = max_ind[1]
x_dist = xy_distances[f_i, 0]
y_dist = xy_distances[f_i, 1]
if furthest_axis == 0: # distance to x axis is greater than to y axis
if finger_base_of[f_i][0, y_ind] > 0:
face = OBJ_FACES_INFO[ground_face]["adjacent_faces"][1] # 2
else:
face = OBJ_FACES_INFO[ground_face]["adjacent_faces"][0] # 1
else:
if finger_base_of[f_i][0, x_ind] > 0:
face = OBJ_FACES_INFO[ground_face]["adjacent_faces"][2] # 3
else:
face = OBJ_FACES_INFO[ground_face]["adjacent_faces"][3] # 5
# Get alternate closest face
if face not in free_faces:
alternate_axis = abs(furthest_axis - 1)
if alternate_axis == 0:
if finger_base_of[f_i][0, y_ind] > 0:
face = OBJ_FACES_INFO[ground_face]["adjacent_faces"][1] # 2
else:
face = OBJ_FACES_INFO[ground_face]["adjacent_faces"][0] # 1
else:
if finger_base_of[f_i][0, x_ind] > 0:
face = OBJ_FACES_INFO[ground_face]["adjacent_faces"][2] # 3
else:
face = OBJ_FACES_INFO[ground_face]["adjacent_faces"][3] # 5
assignments[f_i] = face
xy_distances[f_i, :] = np.nan
free_faces.remove(face)
return assignments
def get_pre_grasp_ft_goal(obj_pose, fingertips_current_wf, cp_params):
ft_goal = np.zeros(9)
incr = 0.03
# Get list of desired fingertip positions
cp_wf_list = get_cp_pos_wf_from_cp_params(cp_params, obj_pose.position, obj_pose.orientation, use_obj_size_offset = True)
for f_i in range(3):
f_wf = cp_wf_list[f_i]
if cp_params[f_i] is None:
f_new_wf = fingertips_current_wf[f_i]
else:
# Get face that finger is on
face = get_face_from_cp_param(cp_params[f_i])
f_of = get_of_from_wf(f_wf, obj_pose)
# Release object
f_new_of = f_of - incr * OBJ_FACES_INFO[face]["up_axis"]
# Convert back to wf
f_new_wf = get_wf_from_of(f_new_of, obj_pose)
ft_goal[3*f_i:3*f_i+3] = f_new_wf
return ft_goal
"""
Set up traj opt for fingers and static object
"""
def define_static_object_opt(nGrid, dt):
problem = StaticObjectOpt(
nGrid = nGrid,
dt = dt,
obj_shape = OBJ_SIZE,
)
return problem
"""
Solve traj opt to get finger waypoints
"""
def get_finger_waypoints(nlp, ft_goal, q_cur, obj_pose, npz_filepath = None):
nlp.solve_nlp(ft_goal, q_cur, obj_pose = obj_pose, npz_filepath = npz_filepath)
ft_pos = nlp.ft_pos_soln
ft_vel = nlp.ft_vel_soln
return ft_pos, ft_vel
##############################################################################
# Flip mode functions
##############################################################################
"""
Determine face that is closest to ground
"""
def get_closest_ground_face(obj_pose):
min_z = np.inf
min_face = None
for i in range(1,7):
c = OBJ_FACES_INFO[i]["center_param"].copy()
c_wf = get_wf_from_of(c, obj_pose)
if c_wf[2] < min_z:
min_z = c_wf[2]
min_face = i
return min_face
"""
Get flipping contact points
"""
def get_flipping_cp_params(
init_pose,
goal_pose,
):
# Get goal face
init_face = get_closest_ground_face(init_pose)
#print("Init face: {}".format(init_face))
# Get goal face
goal_face = get_closest_ground_face(goal_pose)
#print("Goal face: {}".format(goal_face))
if goal_face not in OBJ_FACES_INFO[init_face]["adjacent_faces"]:
#print("Goal face not adjacent to initial face")
goal_face = OBJ_FACES_INFO[init_face]["adjacent_faces"][0]
#print("Intermmediate goal face: {}".format(goal_face))
# Common adjacent faces to init_face and goal_face
common_adjacent_faces = list(set(OBJ_FACES_INFO[init_face]["adjacent_faces"]). intersection(OBJ_FACES_INFO[goal_face]["adjacent_faces"]))
opposite_goal_face = OBJ_FACES_INFO[goal_face]["opposite_face"]
#print("place fingers on faces {}, towards face {}".format(common_adjacent_faces, opposite_goal_face))
# Find closest fingers to each of the common_adjacent_faces
# Transform finger tip positions to object frame
finger_base_of = []
for f_wf in FINGER_BASE_POSITIONS:
f_of = get_of_from_wf(f_wf, init_pose)
#f_of = np.squeeze(get_of_from_wf(f_wf, init_pose))
finger_base_of.append(f_of)
# Object frame axis corresponding to plane parallel to ground plane
x_ind, y_ind = __get_parallel_ground_plane_xy(init_face)
# Find distance from x axis and y axis, and store in xy_distances
x_axis = np.array([1,0])
y_axis = np.array([0,1])
xy_distances = np.zeros((3, 2)) # Row corresponds to a finger, columns are x and y axis distances
for f_i, f_of in enumerate(finger_base_of):
point_in_plane = np.array([f_of[0,x_ind], f_of[0,y_ind]]) # Ignore dimension of point that's not in the plane
x_dist = __get_distance_from_pt_2_line(x_axis, np.array([0,0]), point_in_plane)
y_dist = __get_distance_from_pt_2_line(y_axis, np.array([0,0]), point_in_plane)
xy_distances[f_i, 0] = np.sign(f_of[0,y_ind]) * x_dist
xy_distances[f_i, 1] = np.sign(f_of[0,x_ind]) * y_dist
finger_assignments = {}
for face in common_adjacent_faces:
face_ind = OBJ_FACES_INFO[init_face]["adjacent_faces"].index(face)
if face_ind in [2,3]:
# Check y_ind column for finger that is furthest away
if OBJ_FACES_INFO[face]["center_param"][x_ind] < 0:
# Want most negative value
f_i = np.nanargmin(xy_distances[:,1])
else:
# Want most positive value
f_i = | np.nanargmax(xy_distances[:,1]) | numpy.nanargmax |
import numpy as np
import IPython
from .module import Module
from .parameter import Parameter
from .activation import Sigmoid, Tanh, ReLU
class RNN(Module):
"""Vanilla recurrent neural network layer.
The single time step forward transformation is
h[:,t+1] = tanh(Whh * h[:,t] + Whx * X[:,t] + bh)
with the following dimensions
X: (T, N, D)
h: (N, H)
Whx: (H, D)
Whh: (H, H)
b: (H)
where
D: input dimension
T: input sequence length
H: hidden dimension
Parameters
----------
input_size : [type]
[description]
hidden_size : [type]
[description]
bias : [type]
[description]
nonlinearity : [type]
[description]
Returns
-------
[type]
[description]
"""
def __init__(self, input_size, hidden_size, output_size, bias=True, nonlinearity=Tanh(), time_first=True, bptt_truncate=0):
super(RNN, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.nonlinearity = nonlinearity
self.time_first = time_first
self.bptt_truncate = bptt_truncate
self.Wxh = Parameter(np.zeros((hidden_size, input_size)))
self.Whh = Parameter(np.zeros((hidden_size, hidden_size)))
self.Why = Parameter(np.zeros((output_size, hidden_size)))
if bias:
self.b = Parameter(np.zeros(hidden_size))
else:
self.b = None
if time_first:
self.t_dim = 0
self.n_dim = 1
self.d_dim = 2
else:
self.t_dim = 1
self.n_dim = 0
self.d_dim = 2
self.reset_parameters()
def reset_parameters(self):
stdhh = np.sqrt(1. / self.hidden_size)
stdhx = np.sqrt(1. / self.input_size)
self.Wxh.data = np.random.uniform(-stdhx, stdhx, size=(self.hidden_size, self.input_size))
self.Whh.data = np.random.uniform(-stdhh, stdhh, size=(self.hidden_size, self.hidden_size))
if self.b is not None:
self.b.data = np.zeros(self.hidden_size)
def forward_step(self, x, h):
"""Compute state k from the previous state (sk) and current input (xk),
by use of the input weights (wx) and recursive weights (wRec).
"""
return self.nonlinearity.forward(h @ self.Whh.data.T + x @ self.Wxh.data.T + self.b.data)
def forward(self, X, h0=None):
"""Unfold the network and compute all state activations given the input X,
and input weights (wx) and recursive weights (wRec).
Return the state activations in a matrix, the last column S[:,-1] contains the
final activations.
"""
# Initialise the matrix that holds all states for all input sequences.
# The initial state s0 is set to 0.
if not self.time_first:
X = X.transpose(self.n_dim, self.t_dim, self.n_dim) # [N, T, D] --> [T, N, D]
h = np.zeros((X.shape[self.t_dim] + 1, X.shape[self.n_dim], self.hidden_size)) # (T, N, H)
if h0:
h[0] = h0
# Use the recurrence relation defined by forward_step to update the states trough time.
for t in range(0, X.shape[self.t_dim]):
h[t + 1] = self.nonlinearity.forward(np.dot(X[t], self.Wxh.data.T) + np.dot(h[t], self.Whh.data.T) + self.b.data)
# h[t + 1] = self.forward_step(X[t, :], h[t])
# np.dot(self.Wxh.data, X[t][5])
# np.dot(X[t], self.Wxh.data.T)
# Cache
self.X = X
self.h = h
return h
def backward_step_old_broken(self, dh, x_cache, h_cache):
"""Compute a single backwards time step.
"""
# https://gist.github.com/karpathy/d4dee566867f8291f086
# Activation
dh = self.nonlinearity.backward(dh, h_cache)
# Gradient of the linear layer parameters (accumulate)
self.Whh.grad += dh.T @ h_cache # np.outer(dh, h_cache)
self.Wxh.grad += dh.T @ x_cache # np.outer(dh, x_cache)
if self.b is not None:
self.b.grad += dh.sum(axis=0)
# Gradient at the output of the previous layer
dh_prev = dh @ self.Whh.data.T # self.Whh.data @ dh.T
return dh_prev
def backward_old_broken(self, delta):
"""Backpropagate the gradient computed at the output (delta) through the network.
Accumulate the parameter gradients for `Whx` and `Whh` by for each layer by addition.
Return the parameter gradients as a tuple, and the gradients at the output of each layer.
"""
# Initialise the array that stores the gradients of the cost with respect to the states.
dh = np.zeros((self.X.shape[self.t_dim] + 1, self.X.shape[self.n_dim], self.hidden_size))
dh[-1] = delta
for t in range(self.X.shape[self.t_dim], 0, -1):
dh[t - 1, :] = self.backward_step_old_broken(dh[t, :], self.X[t - 1, :], self.h[t - 1, :])
return dh
def backward(self, delta):
"""Backpropagate the gradient computed at the output (delta) through the network.
Accumulate the parameter gradients for `Whx` and `Whh` by for each layer by addition.
Return the parameter gradients as a tuple, and the gradients at the output of each layer.
delta can be
(N, H)
(N, H, T)
"""
# http://www.wildml.com/2015/10/recurrent-neural-networks-tutorial-part-3-backpropagation-through-time-and-vanishing-gradients/
# Initialise the array that stores the gradients of the cost with respect to the states.
# dh = np.zeros((self.X.shape[self.t_dim] + 1, self.X.shape[self.n_dim], self.hidden_size))
# dh[-1] = delta
dh_t = delta
for t in range(self.X.shape[self.t_dim], 0, -1):
# IPython.embed()
# Initial delta calculation: dL/dz (TODO Don't really care about this)
# dLdz = self.V.T.dot(delta_o[t]) * (1 - (self.h[t] ** 2)) # (1 - (self.h[t] ** 2)) is Tanh()
dh_t = self.nonlinearity.backward(dh_t, self.h[t])
# Backpropagation through time (for at most self.bptt_truncate steps)
for bptt_step in np.arange(max(0, t - self.bptt_truncate), t + 1)[::-1]:
# print "Backpropagation step t=%d bptt step=%d " % (t, bptt_step)
# Add to gradients at each previous step
self.Whh.grad += np.einsum('NH,iNH->NH', dh_t, self.h[bptt_step - 1])
# self.Whh.grad += np.outer(dh_t, self.h[bptt_step - 1])
self.Wxh.grad[:, self.X[bptt_step]] += dh_t
# self.Wxh.grad[:, self.X[bptt_step]] += dLdz # TODO Really want dh/dU
# Update delta for next step dL/dz at t-1
dh_t = self.nonlinearity.backward(self.Whh.data.T.dot(dh_t), self.h[bptt_step-1]) # (1 - self.h[bptt_step-1] ** 2)
# dh[t - 1, :] = self.backward_step(dh[t, :], self.X[t - 1, :], self.h[t - 1, :])
return dh_t
def backward_step(self, dh, x_cache, h_cache):
pass
# return [dLdU, dLdV, dLdW]
def bptt(self, x, y):
T = len(y)
# Perform forward propagation
o, s = self.forward_propagation(x)
# We accumulate the gradients in these variables
dLdU = np.zeros(self.Wxh.shape)
dLdV = np.zeros(self.V.shape)
dLdW = np.zeros(self.Whh.shape)
delta_o = o
delta_o[np.arange(len(y)), y] -= 1.
# For each output backwards...
for t in np.arange(T)[::-1]:
dLdV += np.outer(delta_o[t], s[t].T)
# Initial delta calculation: dL/dz
delta_t = self.V.T.dot(delta_o[t]) * (1 - (s[t] ** 2)) # (1 - (s[t] ** 2)) is Tanh()
# Backpropagation through time (for at most self.bptt_truncate steps)
for bptt_step in np.arange(max(0, t - self.bptt_truncate), t + 1)[::-1]:
# print "Backpropagation step t=%d bptt step=%d " % (t, bptt_step)
# Add to gradients at each previous step
dLdW += np.outer(delta_t, s[bptt_step - 1])
dLdU[:, x[bptt_step]] += delta_t
# Update delta for next step dL/dz at t-1
delta_t = self.Whh.data.T.dot(delta_t) * (1 - s[bptt_step-1] ** 2)
return [dLdU, dLdV, dLdW]
# http://willwolf.io/2016/10/18/recurrent-neural-network-gradients-and-lessons-learned-therein/
# https://github.com/go2carter/nn-learn/blob/master/grad-deriv-tex/rnn-grad-deriv.pdf
# https://peterroelants.github.io/posts/rnn-implementation-part01/
# http://www.wildml.com/2015/10/recurrent-neural-networks-tutorial-part-3-backpropagation-through-time-and-vanishing-gradients/
class GRU(Module):
def __init__(self):
pass
class LSTM(Module):
def __init__(self, input_size, hidden_size=128, bias=True, time_first=True):
super(LSTM, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.time_first = time_first
if time_first:
self.t_dim = 0
self.n_dim = 1
self.d_dim = 2
else:
self.t_dim = 1
self.n_dim = 0
self.d_dim = 2
D = self.input_size
H = self.hidden_size
Z = D + H # Concatenation
self.Wf = Parameter(np.zeros((Z, H)))
self.Wi = Parameter(np.zeros((Z, H)))
self.Wc = Parameter(np.zeros((Z, H)))
self.Wo = Parameter(np.zeros((Z, H)))
self.Wy = Parameter(np.zeros((H, D)))
if bias:
self.bf = Parameter(np.zeros((1, H)))
self.bi = Parameter(np.zeros((1, H)))
self.bc = Parameter(np.zeros((1, H)))
self.bo = Parameter(np.zeros((1, H)))
self.by = Parameter(np.zeros((1, D)))
else:
self.bf = None
self.bi = None
self.bc = None
self.bo = None
self.by = None
self.reset_parameters()
def reset_parameters(self):
# TODO Add orthogonal initialization
D = self.input_size
H = self.hidden_size
Z = D + H # Concatenation
self.Wf.data = np.random.randn(Z, H) / np.sqrt(Z / 2.)
self.Wi.data = np.random.randn(Z, H) / np.sqrt(Z / 2.)
self.Wc.data = np.random.randn(Z, H) / np.sqrt(Z / 2.)
self.Wo.data = np.random.randn(Z, H) / np.sqrt(Z / 2.)
self.Wy.data = np.random.randn(H, D) / np.sqrt(D / 2.)
if self.bf is not None:
self.bf.data = np.zeros((1, H))
self.bi.data = np.zeros((1, H))
self.bc.data = np.zeros((1, H))
self.bo.data = np.zeros((1, H))
self.by.data = np.zeros((1, D))
else:
self.bf = None
self.bi = None
self.bc = None
self.bo = None
self.by = None
self.sigmoidf = Sigmoid()
self.sigmoidi = Sigmoid()
self.sigmoido = Sigmoid()
self.tanhc = Tanh()
self.tanh = Tanh()
def forward_step(self, x, state):
h_old, c_old = state
# # One-hot encode
# X_one_hot = np.zeros(D)
# X_one_hot[X] = 1.
# X_one_hot = X_one_hot.reshape(1, -1)
# Concatenate old state with current input
hx = np.column_stack((h_old, x))
hf = self.sigmoidf.forward(hx @ self.Wf.data + self.bf.data)
hi = self.sigmoidi.forward(hx @ self.Wi.data + self.bi.data)
ho = self.sigmoido.forward(hx @ self.Wo.data + self.bo.data)
hc = self.tanhc.forward(hx @ self.Wc.data + self.bc.data)
c = hf * c_old + hi * hc
h = ho * self.tanh.forward(c)
# y = h @ Wy + by
# prob = softmax(y)
self.cache = dict(hx=[*self.cache['hx'], hx],
hf=[*self.cache['hf'], hf],
hi=[*self.cache['hi'], hi],
ho=[*self.cache['ho'], ho],
hc=[*self.cache['hc'], hc],
c=[*self.cache['c'], c],
c_old=[*self.cache['c_old'], c_old])
return (h, c)
def forward(self, X):
self.cache = dict(hx=[],
hf=[],
hi=[],
ho=[],
hc=[],
c=[],
c_old=[])
if not self.time_first:
X = X.transpose(self.n_dim, self.t_dim, self.n_dim) # [N, T, D] --> [T, N, D]
h = np.zeros((X.shape[self.t_dim] + 1, X.shape[self.n_dim], self.hidden_size)) # (T, N, H)
c = np.zeros((X.shape[self.t_dim] + 1, X.shape[self.n_dim], self.hidden_size)) # (T, N, H)
# Use the recurrence relation defined by forward_step to update the states trough time.
for t in range(0, X.shape[self.t_dim]):
h[t + 1], c[t + 1] = self.forward_step(X[t, :], (h[t], c[t]))
return h[-1]
def backward_step(self, dh_next, dc_next, t):
# Unpack the cache variable to get the intermediate variables used in forward step
hx = self.cache['hx'][t]
hf = self.cache['hf'][t]
hi = self.cache['hi'][t]
ho = self.cache['ho'][t]
hc = self.cache['hc'][t]
c = self.cache['c'][t]
c_old = self.cache['c_old'][t]
IPython.embed()
# # Softmax loss gradient
# dy = prob.copy()
# dy[1, y_train] -= 1.
# # Hidden to output gradient
# dWy = h.T @ dy
# dby = dy
# # Note we're adding dh_next here
# dh = dy @ Wy.T + dh_next
# Gradient for ho in h = ho * tanh(c)
dho = self.tanh.forward(c) * dh_next
dho = self.sigmoido.backward(ho) * dho
# Gradient for c in h = ho * tanh(c), note we're adding dc_next here
dc = ho * dh_next * self.tanh.backward(c)
dc = dc + dc_next
# Gradient for hf in c = hf * c_old + hi * hc
dhf = c_old * dc
dhf = self.sigmoidf.backward(hf) * dhf
# Gradient for hi in c = hf * c_old + hi * hc
dhi = hc * dc
dhi = self.sigmoidi.backward(hi) * dhi
# Gradient for hc in c = hf * c_old + hi * hc
dhc = hi * dc
dhc = self.tanhc.backward(hc) * dhc
# Gate gradients, just a normal fully connected layer gradient
self.Wf.grad += hx.T @ dhf
self.bf.grad += dhf.sum(axis=0)
dxf = dhf @ self.Wf.data.T
self.Wi.grad += hx.T @ dhi
self.bi.grad += dhi.sum(axis=0)
dxi = dhi @ self.Wi.data.T
self.Wo.grad += hx.T @ dho
self.bo.grad += dho.sum(axis=0)
dxo = dho @ self.Wo.data.T
self.Wc.grad += hx.T @ dhc
self.bc.grad += dhc.sum(axis=0)
dxc = dhc @ self.Wc.data.T
# As x was used in multiple gates, the gradient must be accumulated here
dx = dxo + dxc + dxi + dxf
# Split the concatenated X, so that we get our gradient of h_old
dh_next = dx[:, :self.hidden_size]
# Gradient for c_old in c = hf * c_old + hi * hc
dc_next = hf * dc
return dh_next, dc_next
def backward(self, delta):
# https://wiseodd.github.io/techblog/2016/08/12/lstm-backprop/
# https://gist.github.com/karpathy/d4dee566867f8291f086
dh_next = delta
dc_next = np.zeros_like(dh_next)
for t in range(len(self.cache['hx']) - 1, 0, -1):
dh_next, dc_next = self.backward_step(dh_next, dc_next, t)
def lstm_backward(prob, y_train, d_next, cache):
# Unpack the cache variable to get the intermediate variables used in forward step
# ... = cache
dh_next, dc_next = d_next
# Softmax loss gradient
dy = prob.copy()
dy[1, y_train] -= 1.
# Hidden to output gradient
dWy = h.T @ dy
dby = dy
# Note we're adding dh_next here
dh = dy @ Wy.T + dh_next
# Gradient for ho in h = ho * tanh(c)
dho = tanh(c) * dh
dho = dsigmoid(ho) * dho
# Gradient for c in h = ho * tanh(c), note we're adding dc_next here
dc = ho * dh * dtanh(c)
dc = dc + dc_next
# Gradient for hf in c = hf * c_old + hi * hc
dhf = c_old * dc
dhf = dsigmoid(hf) * dhf
# Gradient for hi in c = hf * c_old + hi * hc
dhi = hc * dc
dhi = dsigmoid(hi) * dhi
# Gradient for hc in c = hf * c_old + hi * hc
dhc = hi * dc
dhc = dtanh(hc) * dhc
# Gate gradients, just a normal fully connected layer gradient
dWf = X.T @ dhf
dbf = dhf
dXf = dhf @ Wf.T
dWi = X.T @ dhi
dbi = dhi
dXi = dhi @ Wi.T
dWo = X.T @ dho
dbo = dho
dXo = dho @ Wo.T
dWc = X.T @ dhc
dbc = dhc
dXc = dhc @ Wc.T
# As X was used in multiple gates, the gradient must be accumulated here
dX = dXo + dXc + dXi + dXf
# Split the concatenated X, so that we get our gradient of h_old
dh_next = dX[:, :H]
# Gradient for c_old in c = hf * c_old + hi * hc
dc_next = hf * dc
grad = dict(Wf=dWf, Wi=dWi, Wc=dWc, Wo=dWo, Wy=dWy, bf=dbf, bi=dbi, bc=dbc, bo=dbo, by=dby)
state = (dh_next, dc_next)
return grad, state
import numpy as np
import code
class LSTM:
# https://gist.github.com/karpathy/587454dc0146a6ae21fc
@staticmethod
def init(input_size, hidden_size, fancy_forget_bias_init = 3):
"""
Initialize parameters of the LSTM (both weights and biases in one matrix)
One might way to have a positive fancy_forget_bias_init number (e.g. maybe even up to 5, in some papers)
"""
# +1 for the biases, which will be the first row of WLSTM
WLSTM = np.random.randn(input_size + hidden_size + 1, 4 * hidden_size) / np.sqrt(input_size + hidden_size)
WLSTM[0,:] = 0 # initialize biases to zero
if fancy_forget_bias_init != 0:
# forget gates get little bit negative bias initially to encourage them to be turned off
# remember that due to Xavier initialization above, the raw output activations from gates before
# nonlinearity are zero mean and on order of standard deviation ~1
WLSTM[0,hidden_size:2*hidden_size] = fancy_forget_bias_init
return WLSTM
@staticmethod
def forward(X, WLSTM, c0 = None, h0 = None):
"""
X should be of shape (n,b,input_size), where n = length of sequence, b = batch size
"""
n,b,input_size = X.shape
d = WLSTM.shape[1]/4 # hidden size
if c0 is None: c0 = | np.zeros((b,d)) | numpy.zeros |
from pathlib import Path
from typing import List, Tuple, Dict
import h5py
import torch
import pandas as pd
import numpy as np
from tqdm import tqdm
from torch.utils.data import Dataset
from .datautils import (load_discharge,
load_forcings_lumped,
load_static_attributes,
reshape_data)
from .scaling import InputScaler, OutputScaler, StaticAttributeScaler
class LumpedBasin(Dataset):
"""PyTorch data set to work with the raw text files for lumped (daily basin-aggregated)
forcings and streamflow.
Parameters
----------
data_root : Path
Path to the main directory of the data set
basin : str
Gauge-id of the basin
forcing_vars : List
Names of forcing variables to use
dates : List
Start and end date of the period.
is_train : bool
If True, discharge observations are normalized and invalid discharge samples are removed
train_basins : List
List of basins used in the training of the experiment this Dataset is part of. Needed to
create the correct feature scalers (the ones that are calculated on these basins)
seq_length : int
Length of the input sequence
with_attributes : bool, optional
If True, loads and returns addtionaly attributes, by default False
concat_static : bool, optional
If true, adds catchment characteristics at each time step to the meteorological forcing
input data, by default True
db_path : str, optional
Path to sqlite3 database file containing the catchment characteristics, by default None
allow_negative_target : bool, optional
If False, will remove samples with negative target value from the dataset.
scalers : Tuple[InputScaler, OutputScaler, Dict[str, StaticAttributeScaler]], optional
Scalers to normalize and resale input, output, and static variables. If not provided,
the scalers will be initialized at runtime, which will result in poor performance if
many datasets are created. Instead, it makes sense to re-use the scalers across datasets.
"""
def __init__(self,
data_root: Path,
basin: str,
forcing_vars: List,
dates: List,
is_train: bool,
train_basins: List,
seq_length: int,
with_attributes: bool = False,
concat_static: bool = True,
db_path: str = None,
allow_negative_target: bool = False,
scalers: Tuple[InputScaler, OutputScaler,
Dict[str, StaticAttributeScaler]] = None):
self.data_root = data_root
self.basin = basin
self.forcing_vars = forcing_vars
self.seq_length = seq_length
self.is_train = is_train
self.train_basins = train_basins
self.dates = dates
self.with_attributes = with_attributes
self.concat_static = concat_static
self.db_path = db_path
self.allow_negative_target = allow_negative_target
if scalers is not None:
self.input_scalers, self.output_scalers, self.static_scalers = scalers
else:
self.input_scalers, self.output_scalers, self.static_scalers = None, None, {}
if self.input_scalers is None:
self.input_scalers = InputScaler(self.data_root, self.train_basins,
self.dates[0], self.dates[1],
self.forcing_vars)
if self.output_scalers is None:
self.output_scalers = OutputScaler(self.data_root, self.train_basins,
self.dates[0], self.dates[1])
# placeholder to store std of discharge, used for rescaling losses during training
self.q_std = None
# placeholder to store start and end date of entire period (incl warmup)
self.period_start = None
self.period_end = None
self.attribute_names = None
self.x, self.y = self._load_data()
if self.with_attributes:
self.attributes = self._load_attributes()
self.num_samples = self.x.shape[0]
def __len__(self):
return self.num_samples
def __getitem__(self, idx: int):
if self.with_attributes:
if self.concat_static:
x = torch.cat([self.x[idx], self.attributes.repeat((self.seq_length, 1))], dim=-1)
return x, self.y[idx]
else:
return self.x[idx], self.attributes, self.y[idx]
else:
return self.x[idx], self.y[idx]
def _load_data(self) -> Tuple[torch.Tensor, torch.Tensor]:
"""Loads input and output data from text files. """
# we use (seq_len) time steps before start for warmup
df = load_forcings_lumped(self.data_root, [self.basin])[self.basin]
qobs = load_discharge(self.data_root, basins=[self.basin]).set_index('date')['qobs']
if not self.is_train and len(qobs) == 0:
tqdm.write(f"Treating {self.basin} as validation basin (no streamflow data found).")
qobs = pd.Series(np.nan, index=df.index, name='qobs')
df = df.loc[self.dates[0]:self.dates[1]]
qobs = qobs.loc[self.dates[0]:self.dates[1]]
if len(qobs) != len(df):
print(f"Length of forcings {len(df)} and observations {len(qobs)} \
doesn't match for basin {self.basin}")
df['qobs'] = qobs
# store first and last date of the selected period
self.period_start = df.index[0]
self.period_end = df.index[-1]
# use all meteorological variables as inputs
x = np.array([df[var].values for var in self.forcing_vars]).T
y = np.array([df['qobs'].values]).T
# normalize data, reshape for LSTM training and remove invalid samples
x = self.input_scalers.normalize(x)
x, y = reshape_data(x, y, self.seq_length)
if self.is_train:
# Delete all samples where discharge is NaN
if np.sum(np.isnan(y)) > 0:
tqdm.write(f"Deleted {np.sum(np.isnan(y))} NaNs in basin {self.basin}.")
x = np.delete(x, np.argwhere(np.isnan(y)), axis=0)
y = np.delete(y, np.argwhere(np.isnan(y)), axis=0)
# Deletes all records with invalid discharge
if not self.allow_negative_target and np.any(y < 0):
tqdm.write(f"Deleted {np.sum(y < 0)} negative values in basin {self.basin}.")
x = np.delete(x, np.argwhere(y < 0)[:, 0], axis=0)
y = np.delete(y, np.argwhere(y < 0)[:, 0], axis=0)
# store std of discharge before normalization
self.q_std = np.std(y)
y = self.output_scalers.normalize(y)
# convert arrays to torch tensors
x = torch.from_numpy(x.astype(np.float32))
y = torch.from_numpy(y.astype(np.float32))
return x, y
def _load_attributes(self) -> torch.Tensor:
df = load_static_attributes(self.db_path, [self.basin], drop_lat_lon=True)
# normalize data
for feature in [f for f in df.columns if f[:7] != 'onehot_']:
if feature not in self.static_scalers or self.static_scalers[feature] is None:
self.static_scalers[feature] = \
StaticAttributeScaler(self.db_path, self.train_basins, feature)
df[feature] = self.static_scalers[feature].normalize(df[feature])
# store attribute names
self.attribute_names = df.columns
# store feature as PyTorch Tensor
attributes = df.loc[df.index == self.basin].values
return torch.from_numpy(attributes.astype(np.float32))
class LumpedH5(Dataset):
"""PyTorch data set to work with pre-packed hdf5 data base files.
Should be used only in combination with the files processed from `create_h5_files` in the
`utils` module.
Parameters
----------
h5_file : Path
Path to hdf5 file, containing the bundled data
basins : List
List containing the basin ids
db_path : str
Path to sqlite3 database file, containing the catchment characteristics
concat_static : bool
If true, adds catchment characteristics at each time step to the meteorological forcing
input data, by default True
cache : bool, optional
If True, loads the entire data into memory, by default False
no_static : bool, optional
If True, no catchment attributes are added to the inputs, by default False
"""
def __init__(self,
h5_file: Path,
basins: List,
db_path: str,
concat_static: bool = True,
cache: bool = False,
no_static: bool = False):
self.h5_file = h5_file
self.basins = basins
self.db_path = db_path
self.concat_static = concat_static
self.cache = cache
self.no_static = no_static
# Placeholder for catchment attributes stats
self.df = None
self.attribute_names = None
# preload data if cached is true
if self.cache:
(self.x, self.y, self.sample_2_basin, self.q_stds) = self._preload_data()
# load attributes into data frame
self._load_attributes()
# determine number of samples once
if self.cache:
self.num_samples = self.y.shape[0]
else:
with h5py.File(h5_file, 'r') as f:
self.num_samples = f["target_data"].shape[0]
def __len__(self):
return self.num_samples
def __getitem__(self, idx: int):
if self.cache:
x = self.x[idx]
y = self.y[idx]
basin = self.sample_2_basin[idx]
q_std = self.q_stds[idx]
else:
with h5py.File(self.h5_file, 'r') as f:
x = f["input_data"][idx]
y = f["target_data"][idx]
basin = f["sample_2_basin"][idx]
basin = basin.decode("ascii")
q_std = f["q_stds"][idx]
if not self.no_static:
# get attributes from data frame and create 2d array with copies
attributes = self.df.loc[self.df.index == basin].values
if self.concat_static:
attributes = | np.repeat(attributes, repeats=x.shape[0], axis=0) | numpy.repeat |
"""
Spherical Harmonic Grid classes
"""
import numpy as _np
import matplotlib as _mpl
import matplotlib.pyplot as _plt
from mpl_toolkits.axes_grid1 import make_axes_locatable as _make_axes_locatable
import copy as _copy
import xarray as _xr
import tempfile as _tempfile
from .. import shtools as _shtools
try:
import cartopy.crs as _ccrs
from cartopy.mpl.ticker import LongitudeFormatter as _LongitudeFormatter
from cartopy.mpl.ticker import LatitudeFormatter as _LatitudeFormatter
_cartopy_module = True
except ModuleNotFoundError:
_cartopy_module = False
try:
import pygmt as _pygmt
_pygmt_module = True
except ModuleNotFoundError:
_pygmt_module = False
class SHGrid(object):
"""
Class for spatial gridded data on the sphere.
Grids can be initialized from:
x = SHGrid.from_array(array)
x = SHGrid.from_xarray(data_array)
x = SHGrid.from_netcdf(netcdf)
x = SHGrid.from_file('fname.dat')
x = SHGrid.from_zeros(lmax)
x = SHGrid.from_cap(theta, clat, clon, lmax)
x = SHGrid.from_ellipsoid(lmax, a, b, c)
The class instance defines the following class attributes:
data : Gridded array of the data.
nlat, nlon : The number of latitude and longitude bands in the grid.
n : The number of samples in latitude for 'DH' grids.
lmax : The maximum spherical harmonic degree that can be resolved
by the grid sampling.
sampling : The longitudinal sampling for Driscoll and Healy grids. Either
1 for equally sampled grids (nlat=nlon) or 2 for equally
spaced grids in degrees.
kind : Either 'real' or 'complex' for the data type.
grid : Either 'DH' or 'GLQ' for Driscoll and Healy grids or Gauss-
Legendre Quadrature grids.
units : The units of the gridded data.
zeros : The cos(colatitude) nodes used with Gauss-Legendre
Quadrature grids. Default is None.
weights : The latitudinal weights used with Gauss-Legendre
Quadrature grids. Default is None.
extend : True if the grid contains the redundant column for 360 E and
(for 'DH' grids) the unnecessary row for 90 S.
Each class instance provides the following methods:
to_array() : Return the raw gridded data as a numpy array.
to_xarray() : Return the gridded data as an xarray DataArray.
to_file() : Save gridded data to a text or binary file.
to_netcdf() : Return the gridded data as a netcdf formatted file or object.
to_real() : Return a new SHGrid class instance of the real component
of the data.
to_imag() : Return a new SHGrid class instance of the imaginary component
of the data.
lats() : Return a vector containing the latitudes of each row
of the gridded data.
lons() : Return a vector containing the longitudes of each column
of the gridded data.
expand() : Expand the grid into spherical harmonics.
max() : Return the maximum value of data using numpy.max().
min() : Return the minimum value of data using numpy.min().
copy() : Return a copy of the class instance.
plot() : Plot the data.
plotgmt() : Plot projected data using the generic mapping tools (GMT).
plot3d() : Plot a 3-dimensional representation of the data.
info() : Print a summary of the data stored in the SHGrid instance.
"""
def __init__():
"""Unused constructor of the super class."""
print('Initialize the class using one of the class methods:\n'
'>>> pyshtools.SHGrid.from_array\n'
'>>> pyshtools.SHGrid.from_xarray\n'
'>>> pyshtools.SHGrid.from_netcdf\n'
'>>> pyshtools.SHGrid.from_file\n'
'>>> pyshtools.SHGrid.from_zeros\n'
'>>> pyshtools.SHGrid.from_cap\n'
'>>> pyshtools.SHGrid.from_ellipsoid\n')
# ---- Factory methods ----
@classmethod
def from_array(self, array, grid='DH', units=None, copy=True):
"""
Initialize the class instance from an input array.
Usage
-----
x = SHGrid.from_array(array, [grid, units, copy])
Returns
-------
x : SHGrid class instance
Parameters
----------
array : ndarray, shape (nlat, nlon)
2-D numpy array of the gridded data, where nlat and nlon are the
number of latitudinal and longitudinal bands, respectively.
grid : str, optional, default = 'DH'
'DH' or 'GLQ' for Driscoll and Healy grids or Gauss-Legendre
Quadrature grids, respectively.
units : str, optional, default = None
The units of the gridded data.
copy : bool, optional, default = True
If True (default), make a copy of array when initializing the class
instance. If False, initialize the class instance with a reference
to array.
"""
if _np.iscomplexobj(array):
kind = 'complex'
else:
kind = 'real'
if type(grid) != str:
raise ValueError('grid must be a string. Input type is {:s}.'
.format(str(type(grid))))
if grid.upper() not in set(['DH', 'GLQ']):
raise ValueError(
"grid must be 'DH' or 'GLQ'. Input value is {:s}."
.format(repr(grid))
)
for cls in self.__subclasses__():
if cls.istype(kind) and cls.isgrid(grid):
return cls(array, units=units, copy=copy)
@classmethod
def from_zeros(self, lmax, grid='DH', kind='real', sampling=2,
units=None, extend=True, empty=False):
"""
Initialize the class instance using an array of zeros.
Usage
-----
x = SHGrid.from_zeros(lmax, [grid, kind, sampling, units, extend,
empty])
Returns
-------
x : SHGrid class instance
Parameters
----------
lmax : int
The maximum spherical harmonic degree resolvable by the grid.
grid : str, optional, default = 'DH'
'DH' or 'GLQ' for Driscoll and Healy grids or Gauss Legendre
Quadrature grids, respectively.
kind : str, optional, default = 'real'
Either 'real' or 'complex' for the data type.
sampling : int, optional, default = 2
The longitudinal sampling for Driscoll and Healy grids. Either 1
for equally sampled grids (nlong=nlat) or 2 for equally spaced
grids in degrees (nlong=2*nlat with extend=False or nlong=2*nlat-1
with extend=True).
units : str, optional, default = None
The units of the gridded data.
extend : bool, optional, default = True
If True, include the longitudinal band for 360 E (DH and GLQ grids)
and the latitudinal band for 90 S (DH grids only).
empty : bool, optional, default = False
If True, create the data array using numpy.empty() and do not
initialize with zeros.
"""
if type(grid) != str:
raise ValueError('grid must be a string. Input type is {:s}.'
.format(str(type(grid))))
if grid.upper() not in set(['DH', 'GLQ']):
raise ValueError("grid must be 'DH' or 'GLQ'. " +
"Input value is {:s}.".format(repr(grid)))
if grid.upper() == 'DH':
nlat = 2 * lmax + 2
if sampling == 1:
nlon = nlat
else:
nlon = nlat * 2
if extend:
nlat += 1
nlon += 1
elif grid.upper() == 'GLQ':
nlat = lmax + 1
nlon = 2 * nlat - 1
if extend:
nlon += 1
if kind == 'real':
if empty:
array = _np.empty((nlat, nlon), dtype=_np.float_)
else:
array = _np.zeros((nlat, nlon), dtype=_np.float_)
else:
if empty:
array = _np.empty((nlat, nlon), dtype=_np.complex_)
else:
array = _np.zeros((nlat, nlon), dtype=_np.complex_)
for cls in self.__subclasses__():
if cls.istype(kind) and cls.isgrid(grid):
return cls(array, units=units, copy=False)
@classmethod
def from_ellipsoid(self, lmax, a, b=None, c=None, grid='DH', kind='real',
sampling=2, units=None, extend=True):
"""
Initialize the class instance with a triaxial ellipsoid whose principal
axes are aligned with the x, y, and z axes.
Usage
-----
x = SHGrid.from_ellipsoid(lmax, a, [b, c, grid, kind, sampling,
units, extend])
Returns
-------
x : SHGrid class instance
Parameters
----------
a : float
Length of the principal axis aligned with the x axis.
b : float, optional, default = a
Length of the principal axis aligned with the y axis.
c : float, optional, default = b
Length of the principal axis aligned with the z axis.
lmax : int
The maximum spherical harmonic degree resolvable by the grid.
grid : str, optional, default = 'DH'
'DH' or 'GLQ' for Driscoll and Healy grids or Gauss-Legendre
Quadrature grids, respectively.
kind : str, optional, default = 'real'
Either 'real' or 'complex' for the data type.
sampling : int, optional, default = 2
The longitudinal sampling for Driscoll and Healy grids. Either 1
for equally sampled grids (nlong=nlat) or 2 for equally spaced
grids in degrees (nlong=2*nlat with extend=False or nlong=2*nlat-1
with extend=True).
units : str, optional, default = None
The units of the gridded data.
extend : bool, optional, default = True
If True, include the longitudinal band for 360 E (DH and GLQ grids)
and the latitudinal band for 90 S (DH grids only).
"""
temp = self.from_zeros(lmax, grid=grid, kind=kind, sampling=sampling,
units=units, extend=extend, empty=True)
if c is None and b is None:
temp.data[:, :] = a
elif c is not None and b is None:
for ilat, lat in enumerate(temp.lats()):
temp.data[ilat, :] = 1. / _np.sqrt(
_np.cos(_np.deg2rad(lat))**2 / a**2 +
_np.sin(_np.deg2rad(lat))**2 / c**2
)
else:
if c is None:
c = b
cos2 = _np.cos(_np.deg2rad(temp.lons()))**2
sin2 = _np.sin(_np.deg2rad(temp.lons()))**2
for ilat, lat in enumerate(temp.lats()):
temp.data[ilat, :] = 1. / _np.sqrt(
_np.cos(_np.deg2rad(lat))**2 * cos2 / a**2 +
_np.cos(_np.deg2rad(lat))**2 * sin2 / b**2 +
_np.sin(_np.deg2rad(lat))**2 / c**2
)
return temp
@classmethod
def from_cap(self, theta, clat, clon, lmax, grid='DH', kind='real',
sampling=2, degrees=True, units=None, extend=True):
"""
Initialize the class instance with an array equal to unity within
a spherical cap and zero elsewhere.
Usage
-----
x = SHGrid.from_cap(theta, clat, clon, lmax, [grid, kind, sampling,
degrees, units, extend])
Returns
-------
x : SHGrid class instance
Parameters
----------
theta : float
The angular radius of the spherical cap, default in degrees.
clat, clon : float
Latitude and longitude of the center of the rotated spherical cap
(default in degrees).
lmax : int
The maximum spherical harmonic degree resolvable by the grid.
grid : str, optional, default = 'DH'
'DH' or 'GLQ' for Driscoll and Healy grids or Gauss-Legendre
Quadrature grids, respectively.
kind : str, optional, default = 'real'
Either 'real' or 'complex' for the data type.
sampling : int, optional, default = 2
The longitudinal sampling for Driscoll and Healy grids. Either 1
for equally sampled grids (nlong=nlat) or 2 for equally spaced
grids in degrees (nlong=2*nlat with extend=False or nlong=2*nlat-1
with extend=True).
degrees : bool, optional = True
If True, theta, clat, and clon are in degrees.
units : str, optional, default = None
The units of the gridded data.
extend : bool, optional, default = True
If True, include the longitudinal band for 360 E (DH and GLQ grids)
and the latitudinal band for 90 S (DH grids only).
"""
temp = self.from_zeros(lmax, grid=grid, kind=kind, sampling=sampling,
units=units, extend=extend)
if degrees is True:
theta = _np.deg2rad(theta)
clat = _np.deg2rad(clat)
clon = _np.deg2rad(clon)
# Set array equal to 1 within the cap
lats = temp.lats(degrees=False)
lons = temp.lons(degrees=False)
imin = _np.inf
imax = 0
for i, lat in enumerate(lats):
if lat <= clat + theta:
if i <= imin:
imin = i
if lat >= clat - theta:
if i >= imax:
imax = i
x = _np.cos(clat) * _np.cos(clon)
y = _np.cos(clat) * _np.sin(clon)
z = _np.sin(clat)
coslon = _np.cos(lons)
sinlon = | _np.sin(lons) | numpy.sin |
# -*- coding: utf-8 -*-
__author__ = 'Adward'
# Python utils imports
import math
import os
import sys
from time import time
import sqlite3
# Standard scientific Python imports
import matplotlib.pyplot as plt
import numpy as np
# Import classifiers and performance metrics
from sklearn.preprocessing import *
from sklearn.feature_extraction import DictVectorizer
from sklearn.cross_validation import StratifiedKFold, ShuffleSplit
from sklearn.metrics import *
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.decomposition import PCA
# Constant values
DATA_PATH = '/Users/Adward/OneDrive/YelpData/'
DB_PATH = os.path.join(DATA_PATH, 'yelp.sqlite')
n_sample = 2225213 # 1992542
review_class = [260492, 190048, 282115, 591618, 900940] # 2.6:1.9:2.8:5.9:9.0
earliest = {'day': 20041018, 'month': 200410, 'year': 2004}
latest = {'day': 20151224, 'month': 201512, 'year': 2015}
valid_states = ['AZ', 'NV', 'ON', 'WI', 'QC', 'SC', 'EDH', 'PA', 'MLN', 'BW', 'NC', "IL"]
applied_categories = {'Debt Relief Services', 'Armenian', 'Spine Surgeons', 'House Sitters', 'Taxidermy', 'Iberian', 'Pita', 'Beer Hall', 'Childproofing', 'Assisted Living Facilities', 'Rhinelandian', 'Oriental', 'Palatine', 'Carpenters', 'Choirs', 'Wok', 'Nursing Schools', 'Surf Shop', 'Perfume', 'Kitchen Incubators', 'Flowers', 'Swiss Food', 'Castles', 'Parenting Classes', 'Ferries', 'Donairs', 'Rest Stops', 'Gerontologists', 'Bike Sharing', 'Piano Stores', 'Trinidadian', 'Translation Services', 'Eastern European', 'College Counseling', 'Community Gardens', 'Wine Tasting Classes', 'Art Restoration', 'Slovakian', 'Backshop', 'Supper Clubs', 'Editorial Services', 'Dialysis Clinics', 'Childbirth Education', 'IP & Internet Law', 'Tax Law', 'Farming Equipment', 'Art Tours', 'Concept Shops', 'Mosques', 'Australian'}
# Loading samples from the database & pre-scale
def load_samples(attr_list, prescale=False, oversampling=(0, 0), elite_expand=False, state_all=False):
'''
:param attr_list: List[Str], containing the list of features to be selected and encoded
:param prescale: Bool, (when True) pre-scale features with too large range of values to expedite converging
:param oversampling: Tuple(Int), double review samples with star classes in range
:param elite_expand: Bool, (when True) encode 12 features related to user.elite as [elite20**] & elite-year-sum;
(when False) only 1 feature stands for elite-year-sum
:param state_all: Bool, (when True) occupies 39 features; (when False) using only 12 prime states PLUS OTHERS
:return: List[Dict], List[Int]
'''
t = time()
with sqlite3.connect(DB_PATH) as conn:
# conn.execute('CREATE TEMP TABLE tmp_b1 (business_id TEXT, avg_star_elite REAL)')
# conn.execute('CREATE TEMP TABLE tmp_b2 (business_id TEXT, avg_star_nonelite REAL)')
# conn.execute('INSERT INTO tmp_b1 (business_id, avg_star_elite) '
# 'SELECT business_id, AVG(average_stars) AS avg_star_elite FROM '
# '(review JOIN user USING (user_id)) WHERE elite!="" GROUP BY business_id')
# conn.execute('INSERT INTO tmp_b2 (business_id, avg_star_nonelite) '
# 'SELECT business_id, AVG(average_stars) AS avg_star_nonelite FROM '
# '(review JOIN user USING (user_id)) WHERE elite="" GROUP BY business_id')
# conn.execute('DROP TABLE IF EXISTS bstat_by_elite')
# conn.execute('CREATE TABLE bstat_by_elite (business_id TEXT, avg_star_elite REAL, avg_star_nonelite REAL)')
# conn.execute('INSERT INTO tmp_b SELECT * FROM '
# '((business LEFT OUTER JOIN tmp_b1 USING (business_id)) '
# 'LEFT OUTER JOIN tmp_b2 USING (business_id))')
# conn.row_factory = sqlite3.Row
cur = conn.execute('SELECT ' + ','.join(attr_list) +
' FROM ('
'(review JOIN (business JOIN b_category_pca USING (business_id)) USING (business_id)) '
'JOIN user '
'USING (user_id) )')
sample_matrix = [] # feature matrix to return
targets = [] # class vector
row_num = 0
for row in cur:
targets.append(row[0]) # review.stars
# construct temp feature dict
sample = {}
for j in range(1, len(attr_list)):
sample[attr_list[j]] = row[j]
# encode features for business.state
if ('business.state' in attr_list) and (not state_all) and (sample['business.state'] not in valid_states):
sample['business.state'] = 'OTH' # other 17 states with few business recorded
if ('user_state' in attr_list) and (not state_all) and (sample['user_state'] not in valid_states):
sample['user_state'] = 'OTH'
# Create elite-related features || encode elite-year-number
# if elite_expand:
# for year in range(earliest['year']+1, latest['year']+1):
# sample['elite'+str(year)] = 0
# if len(sample['elite']):
# elite_years = [int(y) for y in sample['elite'].split('&')]
# sample['elite'] = len(elite_years)
# for year in elite_years:
# sample['elite'+str(year)] = 1
# else:
# sample['elite'] = 0
# else:
# if len(sample['elite']):
# sample['elite'] = len(sample['elite'].split('&'))
# else:
# sample['elite'] = 0
# encode features of friends_stat
# encode features of business_avg_stars_by_elite
nan_list = ['avg_review_count', 'avg_votes', 'avg_star_elite', 'avg_star_nonelite']
for feat in nan_list:
if feat in attr_list and not sample[feat]:
sample[feat] = 0
# encode business.categories features
if 'cas' in attr_list:
cas = sample['cas'].split(';')
del sample['cas']
for i in range(3):
sample['ca_'+str(i)] = float(cas[i])
# for ca in applied_categories:
# sample['ca_'+ca] = 0
# if len(sample['categories']):
# categories = sample['categories'].split('&')
# for j in range(len(categories)):
# if categories[j] in applied_categories:
# sample['ca_' + categories[j]] = 1
# del sample['categories']
# process control & display
row_num += 1
# print(sample)
if row_num % 100000 == 0:
print("%.1f %%" % (row_num * 100 / n_sample))
sample_matrix.append(sample)
# oversampling some review star classes
if oversampling[0] <= targets[-1] <= oversampling[1]:
sample_matrix.append(sample)
targets.append(targets[-1])
# if row_num == 10000:
# break
print('Done with joining & collecting data from database, using ', time()-t, 's')
return sample_matrix, targets
def reform_features(sample_matrix, scaling=False):
t = time()
print('Start reforming categorical features using OneHotDecoding...')
print(sample_matrix[0])
dictVectorizer = DictVectorizer()
X = dictVectorizer.fit_transform(sample_matrix).toarray()
n_features = len(X[0])
if scaling:
# scaler = StandardScaler()
# X = scaler.fit_transform(X)
X = scale(X)
print('Feature Num.:', n_features)
print('Done with reforming categorical features, using ', time()-t, 's')
# print(X[0])
return X, n_features
def train_and_predict(X, y, div, model, n_features):
print('Starting 5-fold training & cross validating...')
# input()
# scores = cross_validation.cross_val_score(clf, data, target, cv=2, scoring='f1_weighted')
t = time()
scores = {'f1_by_star': [[] for i in range(5)], 'f1_weighted': [], 'mae': [], 'rmse': []}
feature_weights = np.zeros(n_features)
for train, test in div:
X_train = np.array([X[i] for i in train])
X_test = | np.array([X[i] for i in test]) | numpy.array |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy import signal as sig
from scipy.interpolate import interp1d
from obspy import read
from obspy import Trace
from obspy.core.stream import Stream
from obspy.signal import PPSD
# plt.style.use('ggplot')
plt.style.use('seaborn')
# %% Peterson (1993) - OFR 93-322 - New High/Low Noise Model (NHNM/NLNM)
def to_dB(signal):
N = len(signal)
dB_series = np.zeros(N)
for i in range(N):
dB_series[i] = 10 * np.log10(signal[i])
return dB_series
def to_log(series):
log = np.log10(series)
return log
def to_linear(series):
linear = 10**series
return linear
def to_Hz(period):
N = len(period)
Hz_series = np.zeros(N)
for i in range(N):
Hz_series[i] = 1 / period[i]
return Hz_series
def to_Period(frequency):
N = len(frequency)
Hz_series = np.zeros(N)
for i in range(N):
if frequency[i] == 0.0:
Hz_series[i] = 1 / 0.00001
else:
Hz_series[i] = 1 / frequency[i]
return Hz_series
def get_coeffs(model="high"):
if model == "high":
NHNM_coeffs = pd.read_csv('./noise_models/NHNM-coeffs.txt')
P = np.array(NHNM_coeffs["P"])
A = np.array(NHNM_coeffs["A"])
B = np.array(NHNM_coeffs["B"])
return [P, A, B]
elif model == "low":
NLNM_coeffs = pd.read_csv('./noise_models/NLNM-coeffs.txt')
P = np.array(NLNM_coeffs["P"])
A = np.array(NLNM_coeffs["A"])
B = np.array(NLNM_coeffs["B"])
return [P, A, B]
else:
print("Invalid model choice. Select: 'high' or 'low'")
return None
def get_model_interp(interp_mode="log", model="high", quantity="acc", x_units="T", y_units="dB", npts=1798, delta=0.01):
# change delta to df of dT
if y_units == "dB" or y_units == "SI":
if model == "high":
x, acc = NHNM(quantity=quantity, units=y_units)
if interp_mode == "log":
log_x = np.log10(x)
delta = (max((log_x)) - min((log_x))) / (npts / 2)
interp = interp1d(log_x, acc, kind='linear')
log_x = np.arange(min(log_x), max(log_x), delta)
return log_x, interp
elif interp_mode == "linear":
interp = interp1d(x, acc, kind='linear')
x = np.arange(min(x), max(x), delta)
return x, interp
elif model == "low":
x, acc = NLNM(quantity=quantity, units=y_units)
if interp_mode == "log":
log_x = np.log10(x)
delta = (max((log_x)) - min((log_x))) / (npts / 2)
interp = interp1d(log_x, acc, kind='linear')
log_x = np.arange(min(log_x), max(log_x), delta)
return log_x, interp
elif interp_mode == "linear":
interp = interp1d(x, acc, kind='linear')
x = np.arange(min(x), max(x), delta)
return x, interp
else:
print("Invalid model choice. Select: 'high' or 'low'")
return None
else:
print("Invalid units. Choose dB or SI")
return None
# def get_power(model="high", quantity="acc", units="dB", delta=0.01):
# if units == "dB" or units == "SI":
# if model == "high":
# log_T, NHNM = get_model_interp(model="high",
# quantity=quantity,
# units=units, delta=delta)
# P = np.zeros(len(log_T))
# for i in range(len(log_T)):
# P[i] = NHNM(log_T[i])[()]
# return [log_T, P]
# elif model == "low":
# log_T, NLNM = get_model_interp(model="low",
# quantity=quantity,
# units=units, delta=delta)
# P = np.zeros(len(log_T))
# for i in range(len(log_T)):
# P[i] = NLNM(log_T[i])[()]
# return [log_T, P]
# else:
# print("Invalid model choice. Select: 'high' or 'low'")
# return None
# else:
# print("Invalid units. Choose dB or SI")
# return None
def get_uniform_rand_phase(phase_min, phase_max, N, plot_checks=False):
phases = np.random.uniform(phase_min, phase_max, N)
rad = np.arange(0, N)
if plot_checks:
plt.close()
plt.title("Function Check: Phase via Numpy Random-Uniform")
plt.scatter(rad, phases)
plt.xlabel("N")
plt.ylabel("Phase (Random on 0 - 2pi)")
plt.show()
plt.close()
plt.title("Function Check: Phase via Numpy Random-Uniform")
plt.hist(phases, 20, label="Uniformly sampled mostly")
plt.ylabel("Counts")
plt.xlabel("Phase (Random on 0 - 2pi)")
plt.legend()
plt.show()
return phases
def get_spectral_amplitude(psd, interp_mode):
if any(val < 0 for val in psd):
print("\nNegative values, units likely in dB, attempting to convert ...\n")
psd_SI = np.zeros(len(psd))
for i in range(len(psd)):
psd_SI[i] = 10**(psd[i] / 10)
psd = psd_SI
amp = np.zeros_like(psd)
for i in range(len(psd)):
amp[i] = np.sqrt(2 * psd[i])
plt.close()
if interp_mode == 'log':
plt.semilogy(amp)
else:
plt.loglog(amp)
plt.title("Function Check: get_spectral_amplitude() output")
plt.xlabel("Sample N from PSD (corresponds to Period)")
plt.ylabel("Spectral Amplitude")
plt.show()
return amp
def rand_phase_PSD_signal(freq, psd, phase, interp_mode):
N = len(psd)
Z = np.zeros(N, dtype="complex")
A = get_spectral_amplitude(psd, interp_mode)
img = np.sqrt(-1 + 0j)
if len(freq) == len(psd) == len(phase):
for i in range(N):
Z[i] = A[i] * np.exp(img * phase[i])
return Z
else:
print("\nInput arrays must be of equal size\n")
return None
def NHNM(quantity="acc", units="dB", P=None):
NHNM_coeffs = pd.read_csv('./noise_models/NHNM-coeffs.txt')
NHNM_SI = pd.read_csv('./noise_models/NHNM.csv')
if units == "dB":
if P is None:
P = np.array(NHNM_coeffs["P"])
A = np.array(NHNM_coeffs["A"])
B = np.array(NHNM_coeffs["B"])
if quantity == "acc":
acc = A + (B * (np.log10(P)))
return [P, acc]
elif quantity == "vel":
p, acc = NHNM(quantity="acc")
vel = acc + (20.0 * np.log10(P / (2 * np.pi)))
return [P, vel]
elif quantity == "disp":
p, vel = NHNM(quantity="vel")
disp = acc + (20.0 * np.log10(P**2 / (2 * np.pi)**2))
return [P, disp]
else:
print("Unacceptable argument for quantity")
elif units == "SI":
if P is None:
P = np.array(NHNM_SI["T [s]"])
if quantity == "acc":
acc = np.array(NHNM_SI["Pa [m2s-4/Hz]"])
return [P, acc]
elif quantity == "vel":
vel = np.array(NHNM_SI["Pv [m2s-2/Hz]"])
return [P, vel]
elif quantity == "disp":
disp = np.array(NHNM_SI["Pd [m2/Hz]"])
return [P, disp]
else:
print("Unacceptable argument for quantity")
else:
print("Invalid units. Choose dB or SI")
return None
def NLNM(quantity="acc", units="dB", P=None):
NLNM_coeffs = pd.read_csv('./noise_models/NLNM-coeffs.txt')
NLNM_SI = pd.read_csv('./noise_models/NLNM.csv')
if units == "dB":
if P is None:
P = np.array(NLNM_coeffs["P"])
A = np.array(NLNM_coeffs["A"])
B = np.array(NLNM_coeffs["B"])
if quantity == "acc":
acc = A + B * (np.log10(P))
return [P, acc]
elif quantity == "vel":
p, acc = NLNM(quantity="acc")
vel = acc + 20.0 * np.log10(P / (2 * np.pi))
return [P, vel]
elif quantity == "disp":
p, vel = NLNM(quantity="vel")
disp = acc + 20.0 * np.log10(P**2 / (2 * np.pi)**2)
return [P, disp]
else:
print("Unacceptable argument for quantity")
return None
elif units == "SI":
if P is None:
P = np.array(NLNM_SI["T [s]"])
if quantity == "acc":
acc = np.array(NLNM_SI["Pa [m2s-4/Hz]"])
return [P, acc]
elif quantity == "vel":
vel = np.array(NLNM_SI["Pv [m2s-2/Hz]"])
return [P, vel]
elif quantity == "disp":
disp = np.array(NLNM_SI["Pd [m2/Hz]"])
return [P, disp]
else:
print("Unacceptable argument for quantity")
return None
else:
print("Invalid units. Choose dB or SI")
return None
#%% Plotting both models
def plot_acc_NHNM_and_NLNM(log=True, save=False, path='./'):
[P_H, spectra_H] = NHNM(quantity="acc")
[P_L, spectra_L] = NLNM(quantity="acc")
fig = plt.figure()
plt.plot(P_H, spectra_H, label="NHNM")
plt.plot(P_L, spectra_L, label="NLNM")
plt.title("NHNM/NLNM PSD after Peterson (1993)")
plt.xlabel("Period (s)")
plt.ylabel("Power Spectral Density (m/s^2)^2/Hz")
ax = plt.gca()
if log:
ax.set_xscale('log')
plt.legend(loc=1)
if save:
plt.savefig(fname='/Users/gabriel/Documents/Research/USGS_Work/gmprocess/figs/models/NHNM_and_NLNM_power_spectra.png',dpi=500)
return fig
def plot_vel_NHNM_and_NLNM(log=True, save=False, path='./'):
[P_H, spectra_H] = NHNM(quantity="vel")
[P_L, spectra_L] = NLNM(quantity="vel")
fig = plt.figure()
plt.plot(P_H, spectra_H, label="NHNM")
plt.plot(P_L, spectra_L, label="NLNM")
plt.title("NHNM/NLNM Velocity/Hz after Peterson (1993)")
plt.xlabel("Period (s)")
plt.ylabel("Spectral Density (m/s)^2/Hz")
ax = plt.gca()
if log:
ax.set_xscale('log')
plt.legend(loc=1)
if save:
plt.savefig(fname='/Users/gabriel/Documents/Research/USGS_Work/gmprocess/figs/models/NHNM_and_NLNM_velocity_spectra.png',dpi=500)
return fig
def plot_disp_NHNM_and_NLNM(log=True, save=False, path='./'):
P_H, spectra_H = NHNM(quantity="disp")
P_L, spectra_L = NLNM(quantity="disp")
fig = plt.figure()
plt.plot(P_H, spectra_H, label="NHNM")
plt.plot(P_L, spectra_L, label="NLNM")
plt.title("NHNM/NLNM Displacement/Hz after Peterson (1993)")
plt.xlabel("Period (s)")
plt.ylabel("Spectral Density m^2/Hz")
ax = plt.gca()
if log:
ax.set_xscale('log')
plt.legend(loc=1)
if save:
plt.savefig(fname='/Users/gabriel/Documents/Research/USGS_Work/gmprocess/figs/models/NHNM_and_NLNM_displacement_spectra.png',dpi=500)
return fig
#%% More Functions
def assemble_signal(interp_mode="log", model="high",
quantity="acc", x_units="T", y_units="dB",
npts=1798, delta=0.02559485, plot_checks=False):
M = 2 * npts
[T, P] = get_model_interp(interp_mode=interp_mode,
model=model, quantity=quantity,
x_units=x_units, y_units=y_units,
npts=M, delta=delta)
amplitude_spectrum = P(T)
amplitude_spectrum = 10**(amplitude_spectrum / 10)
phase = get_uniform_rand_phase(0, (2 * np.pi), int(M / 2))
amplitude_r = amplitude_spectrum * np.cos(phase)
amplitude_i = amplitude_spectrum * np.sin(phase)
ifft_complex2 = amplitude_r + amplitude_i * 1j
signal = np.fft.ifft(ifft_complex2)
signal_r = np.real(signal)
signal_i = np.imag(signal)
# Build time array
tmax = (npts * delta)
t = np.arange(0, tmax, delta)
if plot_checks:
if model == "high":
label = "NHNM"
elif model == "low":
label = "NLNM"
plt.plot(t, signal_r, label=quantity)
plt.title(label + ": Reconstructed Time Series (Real)")
plt.xticks(np.arange(0, max(t), 5))
plt.xlabel("Time (s)")
plt.ylabel("Amplitude")
plt.legend()
plt.show()
plt.scatter(signal_r, signal_i, label="Discrete points in Complex Signal")
plt.title("Polar Plot of Reconstructed Time Series in Complex Plane")
plt.xlabel("Real Signal")
plt.ylabel("Imag. Signal")
plt.legend()
plt.show()
# Informative, but takes a little while to plot
# plt.figure()
# for p in signal:
# plt.polar([0, np.angle(p)], [0, np.abs(p)], marker='o')
# plt.title("Phase of Reconstructed Time Series in Complex Plane")
# plt.xlabel("Real", labelpad=10)
# plt.ylabel("Imaginary", labelpad=35)
# plt.tight_layout()
# plt.show()
# plt.title("Histogram of Signal")
# plt.hist(signal, bins=20, label=model)
# plt.legend()
# plt.show()
return [t, signal_r]
def generate_noise_boore(model='NHNM', npts=1798, dt = 0.02559485):
# Get appropriate model to use in the Boore (2003) method
model_coeffs = pd.DataFrame()
if model == 'NHNM':
print("\nGrabbing NHNM model coeffiecients ... \n")
model_coeffs = pd.read_csv('./noise_models/NHNM-coeffs.txt')
elif model == 'NLNM':
print("\nGrabbing NLNM model coeffiecients ... \n")
model_coeffs = pd.read_csv('./noise_models/NLNM-coeffs.txt')
else:
print("Invalid model selection ... Exiting ...")
A = np.array(model_coeffs["A"])
B = np.array(model_coeffs["B"])
# Calculate the model values from coefficients
model_period = | np.array(model_coeffs["P"]) | numpy.array |
import fnmatch
import os
import pprint
import feather
import numpy as np
import pandas as pd
import scipy.io as sio
import pdb
import matplotlib.pyplot as plt
import seaborn as sns
from copy import deepcopy
class Node():
'''Simple Node class. Each instance contains a list of children and parents.'''
def __init__(self,name,C_list=[],P_list=[]):
self.name=name
self.C_name_list = C_list[P_list==name]
self.P_name = P_list[C_list==name]
return
def __repr__(self):
#Invoked when printing a list of Node objects
return self.name
def __str__(self):
#Invoked when printing a single Node object
return self.name
def __eq__(self,other):
if isinstance(other, self.__class__):
return self.name == other.name
else:
return False
def children(self,C_list=[],P_list=[]):
return [Node(n,C_list,P_list) for n in self.C_name_list]
def get_valid_classifications(current_node_list,C_list,P_list,valid_classes):
'''Recursively generates all possible classifications that are valid,
based on the hierarchical tree defined by `C_list` and `P_list` \n
`current_node_list` is a list of Node objects. It is initialized as a list with only the root Node.'''
current_node_list.sort(key=lambda x: x.name)
valid_classes.append(sorted([node.name for node in current_node_list]))
for node in current_node_list:
current_node_list_copy = current_node_list.copy()
children_node_list = node.children(C_list=C_list,P_list=P_list)
if len(children_node_list)>0:
current_node_list_copy.remove(node)
current_node_list_copy.extend(children_node_list)
if sorted([node.name for node in current_node_list_copy]) not in valid_classes:
valid_classes = get_valid_classifications(current_node_list_copy,C_list=C_list,P_list=P_list,valid_classes=valid_classes)
return valid_classes
class HTree():
'''Class to work with hierarchical tree .csv generated for the transcriptomic data.
`htree_file` is full path to a .csv. The original .csv was generated from dend.RData,
processed with `dend_functions.R` and `dend_parents.R` (Ref. Rohan/Zizhen)'''
def __init__(self,htree_df=None,htree_file=None):
#Load and rename columns from filename
if htree_file is not None:
htree_df = pd.read_csv(htree_file)
htree_df = htree_df[['x', 'y', 'leaf', 'label', 'parent', 'col']]
htree_df = htree_df.rename(columns={'label': 'child','leaf': 'isleaf'})
#Sanitize values
htree_df['isleaf'].fillna(False,inplace=True)
htree_df['y'].values[htree_df['isleaf'].values] = 0.0
htree_df['col'].fillna('#000000',inplace=True)
htree_df['parent'].fillna('root',inplace=True)
#Sorting for convenience
htree_df = htree_df.sort_values(by=['y', 'x'], axis=0, ascending=[True, True]).copy(deep=True)
htree_df = htree_df.reset_index(drop=True).copy(deep=True)
#Set class attributes using dataframe columns
for c in htree_df.columns:
setattr(self, c, htree_df[c].values)
return
def obj2df(self):
'''Convert HTree object to a pandas dataframe'''
htree_df = pd.DataFrame({key:val for (key,val) in self.__dict__.items()})
return htree_df
def df2obj(self,htree_df):
'''Convert a valid pandas dataframe to a HTree object'''
for key in htree_df.columns:
setattr(self, key, htree_df[key].values)
return
def plot(self,figsize=(15,10),fontsize=10,skeletononly=False,skeletoncol='#BBBBBB',skeletonalpha=1.0,ls='-',txtleafonly=False,fig=None):
if fig is None:
fig = plt.figure(figsize=figsize)
#Labels are shown only for children nodes
if skeletononly==False:
if txtleafonly==False:
for i, label in enumerate(self.child):
plt.text(self.x[i], self.y[i], label,
color=self.col[i],
horizontalalignment='center',
verticalalignment='top',
rotation=90,
fontsize=fontsize)
else:
for i in np.flatnonzero(self.isleaf):
label = self.child[i]
plt.text(self.x[i], self.y[i], label,
color=self.col[i],
horizontalalignment='center',
verticalalignment='top',
rotation=90,
fontsize=fontsize)
for parent in np.unique(self.parent):
#Get position of the parent node:
p_ind = np.flatnonzero(self.child==parent)
if p_ind.size==0: #Enters here for any root node
p_ind = np.flatnonzero(self.parent==parent)
xp = self.x[p_ind]
yp = 1.1*np.max(self.y)
else:
xp = self.x[p_ind]
yp = self.y[p_ind]
all_c_inds = np.flatnonzero(np.isin(self.parent,parent))
for c_ind in all_c_inds:
xc = self.x[c_ind]
yc = self.y[c_ind]
plt.plot([xc, xc], [yc, yp], color=skeletoncol,alpha=skeletonalpha,ls=ls,)
plt.plot([xc, xp], [yp, yp], color=skeletoncol,alpha=skeletonalpha,ls=ls)
if skeletononly==False:
ax = plt.gca()
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim([np.min(self.x) - 1, np.max(self.x) + 1])
ax.set_ylim([np.min(self.y), 1.2*np.max(self.y)])
plt.tight_layout()
fig.subplots_adjust(bottom=0.2)
return
def plotnodes(self,nodelist,fig=None):
ind = np.isin(self.child,nodelist)
plt.plot(self.x[ind], self.y[ind],'s',color='r')
return
def get_descendants(self,node:str,leafonly=False):
'''Return a list consisting of all descendents for a given node. Given node is excluded.\n
'node' is of type str \n
`leafonly=True` returns only leaf node descendants'''
descendants = []
current_node = self.child[self.parent == node].tolist()
descendants.extend(current_node)
while current_node:
parent = current_node.pop(0)
next_node = self.child[self.parent == parent].tolist()
current_node.extend(next_node)
descendants.extend(next_node)
if leafonly:
descendants = list(set(descendants) & set(self.child[self.isleaf]))
return descendants
def get_all_descendants(self,leafonly=False):
'''Return a dict consisting of node names as keys and, corresp. descendant list as values.\n
`leafonly=True` returns only leaf node descendants'''
descendant_dict = {}
for key in np.unique(np.concatenate([self.child,self.parent])):
descendant_dict[key]=self.get_descendants(node=key,leafonly=leafonly)
return descendant_dict
def get_ancestors(self,node,rootnode=None):
'''Return a list consisting of all ancestors
(till `rootnode` if provided) for a given node.'''
ancestors = []
current_node = node
while current_node:
current_node = self.parent[self.child == current_node]
ancestors.extend(current_node)
if current_node==rootnode:
current_node=[]
return ancestors
def get_mergeseq(self):
'''Returns `ordered_merges` consisting of \n
1. list of children to merge \n
2. parent label to merge the children into \n
3. number of remaining nodes in the tree'''
# Log changes for every merge step
ordered_merge_parents = np.setdiff1d(self.parent,self.child[self.isleaf])
y = []
for label in ordered_merge_parents:
if np.isin(label,self.child):
y.extend(self.y[self.child==label])
else:
y.extend([np.max(self.y)+0.1])
#Lowest value is merged first
ind = np.argsort(y)
ordered_merge_parents = ordered_merge_parents[ind].tolist()
ordered_merges = []
while len(ordered_merge_parents) > 1:
# Best merger based on sorted list
parent = ordered_merge_parents.pop(0)
children = self.child[self.parent == parent].tolist()
ordered_merges.append([children, parent])
return ordered_merges
def get_subtree(self, node):
'''Return a subtree from the current tree'''
subtree_node_list = self.get_descendants(node=node)+[node]
if len(subtree_node_list)>1:
subtree_df = self.obj2df()
subtree_df = subtree_df[subtree_df['child'].isin(subtree_node_list)]
else:
print('Node not found in current tree')
return HTree(htree_df=subtree_df)
def update_layout(self):
'''Update `x` positions of tree based on newly assigned leaf nodes.
'''
#Update x position for leaf nodes to evenly distribute them.
all_child = self.child[self.isleaf]
all_child_x = self.x[self.isleaf]
sortind = np.argsort(all_child_x)
new_x = 0
for (this_child,this_x) in zip(all_child[sortind],all_child_x[sortind]):
self.x[self.child==this_child]=new_x
new_x = new_x+1
parents = self.child[~self.isleaf].tolist()
for node in parents:
descendant_leaf_nodes = self.get_descendants(node=node,leafonly=True)
parent_ind = np.isin(self.child,[node])
descendant_leaf_ind = np.isin(self.child,descendant_leaf_nodes)
self.x[parent_ind] = | np.mean(self.x[descendant_leaf_ind]) | numpy.mean |
"""
"""
import numpy as np
from astropy.utils.misc import NumpyRNGContext
from ..distribution_matching import distribution_matching_indices, resample_x_to_match_y
from ..distribution_matching import bijective_distribution_matching
__all__ = ('test_distribution_matching_indices1', )
fixed_seed = 43
def test_distribution_matching_indices1():
npts1, npts2 = int(1e5), int(1e4)
nselect = int(2e4)
with NumpyRNGContext(fixed_seed):
input_distribution = np.random.normal(loc=0, scale=1, size=npts1)
output_distribution = np.random.normal(loc=1, scale=0.5, size=npts2)
xmin = min(input_distribution.min(), output_distribution.min())
xmax = min(input_distribution.max(), output_distribution.max())
nbins = 50
bins = np.linspace(xmin, xmax, nbins)
indices = distribution_matching_indices(
input_distribution, output_distribution, nselect, bins, seed=fixed_seed)
result = input_distribution[indices]
percentile_table = np.linspace(0.01, 0.99, 25)
result_percentiles = np.percentile(result, percentile_table)
correct_percentiles = np.percentile(output_distribution, percentile_table)
assert np.allclose(result_percentiles, correct_percentiles, rtol=0.1)
def test_resample_x_to_match_y():
"""
"""
nx, ny = int(9.9999e5), int(1e6)
with NumpyRNGContext(fixed_seed):
x = np.random.normal(loc=0, size=nx, scale=1)
y = np.random.normal(loc=0.5, size=ny, scale=0.25)
bins = np.linspace(y.min(), y.max(), 100)
indices = resample_x_to_match_y(x, y, bins, seed=fixed_seed)
rescaled_x = x[indices]
idx_x_sorted = np.argsort(x)
assert np.all(np.diff(rescaled_x[idx_x_sorted]) >= 0)
try:
result, __ = np.histogram(rescaled_x, bins, density=True)
correct_result, __ = | np.histogram(y, bins, density=True) | numpy.histogram |
"""
Module: processGeom.py
Description: Series of functions to clean geometry from blocks, streets, and transport lines.
License: MIT, see full license in LICENSE.txt
Web: https://github.com/mateoneira/MultiplexSegregation
"""
import geopandas as gpd
from geopandas.tools import overlay
import shapely.geometry as geometry
import numpy as np
from scipy.spatial import Delaunay, Voronoi
from shapely.ops import cascaded_union, polygonize, linemerge
import math
# Geographical projection of OpenStreetMap data.
crs_osm = {'init': 'epsg:4326'}
def get_vertex_of_polygons(geom):
"""
Get list of vertices of all polygons in geoseries and return as list of points.
If no polygons are supplied in geoseries empty list is returned.
Parameters
----------
:param geom: geopandas.GeoSeries
geometries of city blocks.
Returns
-------
:return: list
list of vertices of polygons
"""
if type(geom) != gpd.geoseries.GeoSeries:
raise TypeError("geom should be a *geopandas.GeoSeries* type.")
# get vertex of polygons.
points = []
for poly in geom:
# check if geometry is polygon of multipolygon
# if polygon add vertices to points list
if poly.type == 'Polygon':
for pnt in poly.exterior.coords:
points.append(geometry.Point(pnt))
elif poly.type == 'MultiPolygon':
for parts in poly:
for pnt in parts.exterior.coords:
points.append(geometry.Point(pnt))
return points
def alpha_shape(points, alpha):
"""
Calculate alpha shape from set of points and alpha value.
Parameters
----------
:param points: list
list containing shapely.Geometry.Point objects
:param alpha: float
alpha value greater than 0
Returns
-------
:return: shapely.geometry
"""
if not all(isinstance(x, geometry.point.Point) for x in points):
raise TypeError("points list must contain only *geometry.Point* type.")
if alpha <= 0:
raise ValueError("alpha must be greater than zero.")
if len(points) < 3:
raise TypeError("points list must have at least 3 items.")
# create Delaunay triangulation
coords = np.array([point.coords[0] for point in points])
tri = Delaunay(coords)
# create empty edge set and point list
edges = set()
edge_points = []
# helper function to calculate which edges to keep
def add_edge(i, j):
if (i, j) in edges or (j, i) in edges:
return
edges.add((i, j))
edge_points.append(coords[[i, j]])
for ia, ib, ic in tri.simplices:
pa = coords[ia]
pb = coords[ib]
pc = coords[ic]
# calculate length of side of triangles
a = math.sqrt((pa[0] - pb[0]) ** 2 + (pa[1] - pb[1]) ** 2)
b = math.sqrt((pb[0] - pc[0]) ** 2 + (pb[1] - pc[1]) ** 2)
c = math.sqrt((pc[0] - pa[0]) ** 2 + (pc[1] - pa[1]) ** 2)
# calculate semiperimeter of triangle
s = (a + b + c) / 2.0
# calculate area of triangle
area = math.sqrt(s * (s - a) * (s - b) * (s - c))
if area == 0:
circum_r = 0
elif area > 0:
circum_r = a * b * c / (4.0 * area)
else:
pass
# radius filter
if circum_r < 1.0 / alpha:
add_edge(ia, ib)
add_edge(ib, ic)
add_edge(ic, ia)
m = geometry.MultiLineString(edge_points)
triangles = list(polygonize(m))
res = cascaded_union(triangles)
return res
def boundary_from_areas(blocks, alpha=1, buffer_dist=0):
"""
Create spatial boundary given unconnected block area geometries of
city through an alpha shape.
Parameters
----------
:param blocks: geopandas.GeoDataFrame
city block geometry
:param alpha: float
alpha value for alpha shape calculation
:param buffer_dist: float
distance to buffer alpha shape in meters.
:return: geopandas.GeoSeries
"""
if type(blocks) != gpd.geodataframe.GeoDataFrame:
raise TypeError("blocks must be a *geopandas.GeoDataFrame*.")
if alpha <= 0:
raise ValueError("alpha must be an float greater than 0.")
if buffer_dist < 0:
raise ValueError("buffer_dist must be a float greater than 0.")
# subset geometry from geodataframe.
geom = blocks.geometry
points = get_vertex_of_polygons(geom)
# calculate alpha shape
boundary = alpha_shape(points, alpha)
# buffer alpha shape
if buffer_dist > 0:
boundary = boundary.buffer(buffer_dist)
return gpd.GeoSeries(boundary)
def join_lines(line, line_list, tolerance=20):
"""
Join MultiLineStrings and returns SingleLineString through recursion.
Parameters
----------
:param line: list
list of coordinates of LineString
:param line_list: list
list of list of coordinates of LineStrings
:param tolerance: float
tolerance of check if two points are the same point (in meters).
Return
------
:return: list
"""
line_list = line_list.copy()
# get last coordinate of line and make a point
point_1 = geometry.Point(line[-1])
# list to store coords list and their reverse
coord_list = []
if line_list is not None:
for coords in line_list:
# store all lines and reversed lines in one list
coord_list.append(coords)
coord_list.append(list(reversed(coords)))
for coords in coord_list:
point_2 = geometry.Point(coords[0])
if point_1.distance(point_2) < tolerance+1:
line_list.remove(coords)
for coord in coords:
line.append(coord)
join_lines(line, line_list)
else:
return line
def clean_stops(stops, boundary, group_by=None, tolerance=50, data=None):
"""
Create geodataframe containing point geometries representing stops in transport network.
Points are clustered based on tolerance distance, and and centroid is returned as new point.
Parameters
----------
:param stops: geopandas.GeoDataFrame
transport stops geometry.
:param boundary: geopandas.GeoDataFrame
geodataframe of boundary polygon.
:param group_by: str
column name of group, if None, the whole dataset is processed as one. Default None.
:param tolerance: float
tolerance to check if two points are the sme point (in meters).
:param data: dict
data that has to be retained and mapping to ne column name.
Returns
-------
:return: geopandas.GeoDataFrame
"""
if data is None:
data = {}
temp = []
stops = stops.copy()
boundary_geom = boundary.unary_union
# check if data values need to be conserved
mapped_data = {new_column: [] for (old_column, new_column) in data.items()}
if 'geometry' not in mapped_data.keys():
mapped_data['geometry'] = []
# Define how data will be subset to process
if group_by is None:
stops['grouped'] = 0
else:
stops['grouped'] = stops[group_by]
mapped_data['grouped'] = []
# loop through groups, buffer, join, and append new point
for group in stops.grouped.unique():
stops_subset = stops[stops.grouped == group]
buffered_stops = stops_subset.buffer(tolerance).unary_union
# check if new geom is polygon, and convert to list
if isinstance(buffered_stops, geometry.Polygon):
buffered_stops = [buffered_stops]
for geom in buffered_stops:
mapped_data['grouped'].append(group)
mapped_data['geometry'].append(geom.centroid)
# map data from points to centroids
if data:
temp = stops_subset[stops_subset.intersects(geom)]
for column_name, new_column in data.items():
val = ', '.join(str(v) for v in temp[column_name].unique())
mapped_data[new_column].append(val)
stopsGPD = gpd.GeoDataFrame(mapped_data)
stopsGPD = stopsGPD[stopsGPD.intersects(boundary_geom)]
return stopsGPD
def clean_lines(lines, group_by=None, tolerance=20, data=None):
"""
Creates geodataframe containing geometries of LineString objects.
MultiLineStrings and LineStringZ is converted to LineStrings.
Parameters
----------
:param lines: geopandas.GeoDataFrame
transport line geometries
:param group_by: str
column name of group, if None, the whole dataset is processed as one. Default None.
:param tolerance: float
tolerance of check if two points are the same point (in meters).
:param data: dict
data that has to be retained and mapping to new column name.
Returns
-------
:return: geopandas.GeoDataFrame
"""
lines = lines.copy()
if data is None:
data = {}
# check if data values need to be conserved
mapped_data = {new_column: [] for (old_column, new_column) in data.items()}
if 'geometry' not in mapped_data.keys():
mapped_data['geometry'] = []
# Define how data will be subset to process
if group_by is None:
lines['grouped'] = 0
else:
lines['grouped'] = lines[group_by]
mapped_data['grouped'] = []
# loop through subset of data and join MultiLineString to SingleLineString
for group in lines.grouped.unique():
lines_subset = lines[lines.grouped == group]
# loop through individual geometries
for i, row in lines_subset.iterrows():
geom = row.geometry
# check if line is MultiLineString
if isinstance(geom, geometry.MultiLineString):
geom_list = geom.geoms
# create empty list to store coordinates of line
lines_coords = []
for line in geom_list:
# if line is not smaller than tolerance meters and not a self-loop
if line.length > tolerance and line.coords[0] != line.coords[-1]:
if line.has_z:
coord_list = []
for coord in line.coords:
coord_list.append(coord[0:2])
lines_coords.append(coord_list)
else:
coord_list = list(line.coords)
lines_coords.append(coord_list)
# choose first line and look for continuation
line_coord = lines_coords[0]
line_list = lines_coords[1:]
line_joined = join_lines(line_coord, line_list)
line_joined = join_lines(list(reversed(line_joined)), line_list)
line_geom = geometry.LineString(coor for coor in line_joined)
else:
if geom.has_z:
coord_list = []
for coord in geom.coords:
coord_list.append(coord[0:2])
line_geom = geometry.LineString(coor for coor in coord_list)
else:
line_geom = geom
mapped_data['geometry'].append(line_geom)
mapped_data['grouped'].append(row['grouped'])
# map values
for column_name, new_column in data.items():
mapped_data[new_column].append(row[column_name])
linesGPD = gpd.GeoDataFrame(mapped_data)
return linesGPD
def snap_stops_to_lines(lines, stops, tolerance=50):
"""
Snaps points to lines based on tolerance distance and route.
Parameters
----------
:param lines: geopandas.GeoDataFrame
geodataframe containing line geometries.
:param stops: geopandas.GeoDataFrame
geodataframe containing stop geometries.
:param tolerance: float
distance tolerance for snapping points (in meters).
Returns
-------
:return: geopandas.GeoDataFrame
geodataframe with point geometries snapped to closest transport route.
"""
snapped_stops = gpd.GeoDataFrame()
for group in lines.grouped.unique():
lines_subset = lines[lines.grouped == group]
stops_subset = stops[stops.grouped == group]
# snap points to lines
for i, line in lines_subset.iterrows():
geom = line.geometry
# get only points within buffer and inside area
buffer = geom.buffer(tolerance)
stops_inside = stops_subset[stops_subset.intersects(buffer)].copy()
points_proj = [geom.project(stop) for stop in stops_inside.geometry]
stops_inside.geometry = [geom.interpolate(point) for point in points_proj]
stops_inside['at_length'] = points_proj
stops_inside['line_id'] = [i for point in points_proj]
snapped_stops = snapped_stops.append(stops_inside, ignore_index=True)
snapped_stops = snapped_stops.drop_duplicates(subset=[col for col in snapped_stops.columns if col != 'geometry'])
snapped_stops = snapped_stops.dropna(how="all")
snapped_stops['stop_id'] = [i for i in range(len(snapped_stops))]
snapped_stops['x'] = [point.xy[0][0] for point in snapped_stops.geometry]
snapped_stops['y'] = [point.xy[1][0] for point in snapped_stops.geometry]
return snapped_stops
def snap_lines_to_points(G):
pass
def cut_line(line, distance):
"""
Cuts line at a set distance.
Parameters
----------
:param line: shapely.LineString
line geometry to cut.
:param distance: float
distance at which to cut line.
Returns
-------
:return: list
list containing line segments resultant from the cut.
"""
if distance <= 0.0 or distance >= line.length:
return [line]
coords = list(line.coords)
for i, p in enumerate(coords):
current_distance =line.project(geometry.Point(p))
if current_distance == distance:
return [geometry.LineString(coords[:i+1]), geometry.LineString(coords[i:])]
elif current_distance>distance:
cut_point = line.interpolate(distance)
return [geometry.LineString(coords[:i+1] + [(cut_point.x, cut_point.y)]),
geometry.LineString([(cut_point.x, cut_point.y)] + coords[i:])]
def find_nearest_node(data, nodes, spatial_index, buff=50):
pass
def voronoi_finite_polygons_2d(vor, radius=None):
"""
Reconstruct infinite voronoi regions in a 2D diagram to finite
regions.
Parameters
----------
vor : Voronoi
Input diagram
radius : float, optional
Distance to 'points at infinity'.
Returns
-------
regions : list of tuples
Indices of vertices in each revised Voronoi regions.
vertices : list of tuples
Coordinates for revised Voronoi vertices. Same as coordinates
of input vertices, with 'points at infinity' appended to the
end.
"""
if vor.points.shape[1] != 2:
raise ValueError("Requires 2D input")
new_regions = []
new_vertices = vor.vertices.tolist()
center = vor.points.mean(axis=0)
if radius is None:
radius = vor.points.ptp().max()
# Construct a map containing all ridges for a given point
all_ridges = {}
for (p1, p2), (v1, v2) in zip(vor.ridge_points, vor.ridge_vertices):
all_ridges.setdefault(p1, []).append((p2, v1, v2))
all_ridges.setdefault(p2, []).append((p1, v1, v2))
# Reconstruct infinite regions
for p1, region in enumerate(vor.point_region):
vertices = vor.regions[region]
if all(v >= 0 for v in vertices):
# finite region
new_regions.append(vertices)
continue
# reconstruct a non-finite region
ridges = all_ridges[p1]
new_region = [v for v in vertices if v >= 0]
for p2, v1, v2 in ridges:
if v2 < 0:
v1, v2 = v2, v1
if v1 >= 0:
# finite ridge: already in the region
continue
# Compute the missing endpoint of an infinite ridge
t = vor.points[p2] - vor.points[p1] # tangent
t /= np.linalg.norm(t)
n = | np.array([-t[1], t[0]]) | numpy.array |
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 26 19:46:39 2020
@author: giamm
"""
import numpy as np
import math
from scipy.interpolate import interp1d
from scipy.integrate import cumtrapz
import matplotlib.pyplot as plt
import datareader #routine created to properly read the files needed in the following
# from cumulative_frequency import cum_freq #routine created to evaluate the cumulative frequency for a given appliance, over 24h
# from profile_interpolation import interp_profile #routine created to interpolate a profile
###############################################################################
# This file contains a method that, for a given appliance (considering its
# frequency density, its cumulative frequency, its duty cycle and its nominal
# yearly energy consumption) returns the load profile for the appliance during
# one day (1440 min), with a resolution of 1 minute.
###############################################################################
def load_profiler(time_dict, app, day, season, appliances_data, **params):
''' The method returns a load profile for a given appliance in a total simulation time of 1440 min, with a timestep of 1 min.
Inputs:
app - str, name of the appliance(str)
day - str, type of day (weekday: 'wd'| weekend: 'we')
season - str, season (summer: 's', winter: 'w', autumn or spring: 'ap')
appliances_data - dict, various input data related to the appliances
params - dict, simulation parameters
Output:
load_profile - 1d-array, load profile for the appliance (W)
'''
## Time
# Time discretization for the simulation
# Time-step (min)
dt = time_dict['dt']
# Total time of simulation (min)
time = time_dict['time']
# Vector of time from 00:00 to 23:59 (one day) (min)
time_sim = time_dict['time_sim']
## Parameters
# Simulation parameters that can be changed from the user
# Energy class of the appliances
en_class = params['en_class']
# Tolerance and standard deviation on the appliances' duration
toll = params['toll']
devsta = params['devsta']
# Average square footage of the household
ftg_avg = params['ftg_avg']
## Input data for the appliances
# Appliances' attributes, energy consumptions and user's coefficients
# apps is a 2d-array in which, for each appliance (rows) and attribute value is given (columns)
apps_ID = appliances_data['apps_ID']
# apps_ID is a dictionary in which, for each appliance (key), its ID number, type, weekly and seasonal behavior, class are given (value)
apps = appliances_data['apps']
# apps_attr is a dictionary in which the name of each attribute (value) is linked to its columns number in apps (key)
apps_attr = appliances_data['apps_attr']
# ec_yearly_energy is a 2d-array in which for each appliance, its yearly energy consumption is given for each energetic class
ec_yearly_energy = appliances_data['ec_yearly_energy']
# ec_levels_dict is a dictionary that links each energetic level (value) to its columns number in ec_yearly_energy
ec_levels_dict = appliances_data['ec_levels_dict']
# coeff_matrix is a 2d-array in which for each appliance, its coefficient k, related to user's behaviour in different seasons, is given
coeff_matrix = appliances_data['coeff_matrix']
# seasons_dict is a dictionary that links each season (value) to its columns number in coeff_matrix
seasons_dict = appliances_data['seasons_dict']
# apps_avg_lps is a dictionary where the average load profiles are stored for each appliance, according to the different seasonal and weeky behaviour
apps_avg_lps = appliances_data['apps_avg_lps']
# apps_dcs is a dictionary where the typical duty_cycle for "duty cycle" type appliances is stored
apps_dcs = appliances_data['apps_dcs']
## Appliance attributes
# Extracting the correct data for the appliance from the applinces's attributes
# The ID number of the appliance is stored in a variable since it will be used man times
app_ID = apps_ID[app][apps_attr['id_number']]
# T_on is the duration of the time period in which an appliance is switched on during a day (min)
T_on = apps[app_ID, apps_attr['time_on'] - (len(apps_attr) - np.size(apps, 1))]
# energy is the yearly energy consumption, according to the energy label (kWh/year)
energy = ec_yearly_energy[app_ID, ec_levels_dict[en_class]]
# kk is the coefficient accounting for the user's behaviour in different season, for each appliance(-)
kk = coeff_matrix[app_ID, seasons_dict[season]]
# Return a vector of zeros if the appliance is not used at all during the season
if kk == 0: return(np.zeros((np.shape(time_sim))))
# k_ftg is a coefficient that adapts the consumption from lux to the actual footage of the household
k_ftg = ftg_avg/100 #(m2/m2)
## Nominal power consumption
# The nominal power is evaluated, according to the yearly energy
# consumption (kWh/year) and the time-period in which the appliance is
# switched on during a day (min/day). The first one is given in kWh/year
# and it is multiplied by 1000 and divided by 365 in order to get Wh/day.
# The second one is given in minutes and it is divided by 60 to get hours.
# An if statement is used to avoid negative power when T_on = -1
if T_on == -1:
power = 0
else:
power = (energy*1000/365)/(T_on/60) #(W)
## Input data for the appliance
# Selecting the correct key where to find the average daily load profile in the dictionary
# app_nickname is a 2 or 3 characters string identifying the appliance
app_nickname = apps_ID[app][apps_attr['nickname']]
# app_type depends from the work cycle for the appliance: 'continuous'|'no_duty_cycle'|'duty_cycle'|
app_type = apps_ID[app][apps_attr['type']]
# app_wbe (weekly behavior), different usage of the appliance in each type of days: 'wde'|'we','wd'
app_wbe = apps_ID[app][apps_attr['week_behaviour']]
# app_sbe (seasonal behavior), different usage of the appliance in each season: 'sawp'|'s','w','ap'
app_sbe = apps_ID[app][apps_attr['season_behaviour']]
# Default choice (no different behaviour for different seasons):
# if the appliance has got different profiles in different seasons, this will be changed
key_season = 'sawp'
if len(app_sbe) > 1: key_season = season
# Default choice (no different behaviour for different types of day):
# if the appliance has got different profiles in different days of the week, this will be changed
key_day = 'wde'
if len(app_wbe) > 1: key_day = day
avg_load_profile = apps_avg_lps[app][(key_season, key_day)]
### Different routines according to the appliance's type
## Routine to be followed for appliances which belong to "continuous" type (ac and lux)
# The load profile has already been evaluated therefore it only needs to be loaded and properly managed
if app_type == 'continuous':
load_profile = avg_load_profile
# Lighting: the load profile is taken as it is, since no information about the different
# yearly energy consumption are available. The value is just adjusted to the users' seasonal
# behavior (kk) and house's footage (ftg_avg)
if app_nickname == 'lux':
activate_kk = 0
if activate_kk == 0: kk = 1
load_profile = load_profile*kk*k_ftg
# Other types of continuous appliances: the load profile is adjusted to the yearly energy consumption
else:
activate_kk = 1
if activate_kk == 0: kk = 1
load_profile = load_profile/(np.trapz(load_profile, time_sim)/(time_sim[-1] - time_sim[0]))*power*kk
return(load_profile)
## Routine to be followed for appliances which belong to "duty cycle" or "uniform" types
# The load profile has to be evaluated according to the time-period in which they are switched on (T_on)
# and to the frequency density of their usage during the day
# Loading the duty-cycle, for those appliances which have one
if app_type == 'duty_cycle':
time_dc = apps_dcs[app]['time_dc']
duty_cycle = apps_dcs[app]['duty_cycle']
# Adjusting the duty cycle to the actual nominal power
# and the users' habits (coefficient kk, varying according to the season)
activate_kk = 0
if activate_kk == 0: kk = 1
duty_cycle = duty_cycle/(np.trapz(duty_cycle, time_dc)/(time_dc[-1] - time_dc[0]))*power*kk
# Building a uniform duty-cycle for those appliances which use a constant power,
# according to the time-period in a day in which they are used (T_on)
if app_type == 'uniform':
# The following procedure is performed only if the appliance is not used continously during the day
if T_on != time:
# A randomly variating (in a normal distribution) duration is
# assumed for appliances which don't have a duty-cycle
d = T_on
lim_up = d+(toll*(d)/100)
lim_low = d-(toll*(d)/100)
lim_up , lim_low = np.clip((lim_up, lim_low), 0, time)
T_on = 0
while (T_on > lim_up or T_on < lim_low):
T_on = int(np.random.normal(d, devsta))
# Adjusting T_on to the seasonal coefficient for the user's behaviour
T_on = T_on*kk
# Creating the time and power vectors for the (uniform) duty-cycle
time_dc = np.linspace(0, T_on, int(T_on/dt + 1))
duty_cycle = np.ones(np.shape(time_dc))
# Giving the duty cycle the same shape as "duty_cycle" type appliances,
# meaning that the first value for the power is 0
duty_cycle[0] = 0
# Adjusting the duty cycle to the actual nominal power
duty_cycle = duty_cycle/(np.trapz(duty_cycle, time_dc)/(time_dc[-1] - time_dc[0]))*power
## Usage probability distributions
# Selecting a time instant from the usage's frquency distribution of the appliance. The latter
# is equal to the average daily load profile (a normalization is perfoemd since the latter is in W)
freq_dens = apps_avg_lps[app][(key_season, key_day)]
# Evaluating the cumulative frquency of appliance'usage in one day
# cumfreq = cum_freq(time_sim, freq_dens)
cum_freq = cumtrapz(freq_dens, time_sim, initial = 0)
cum_freq = cum_freq/np.max(cum_freq)
## Switch-on instant
# Selecting a random istant in which the appliances starts working (according to the cumulative frequency)
# and using its duty-cycle (uniform for those appliances which don't have a duty-cycle) to create the load profile
# Selecting a random instant when to make the appliance start its cycle,
# according to the frequency density and the cumulative frequency
random_probability = np.random.rand()
# Evaluating a time instant at which the appliance starts its cycle through the cumulative frequency,
# extracting it from a probability distribution that follows the frequency density of the appliance's usage
switch_on_instant = time_sim[cum_freq >= random_probability][0]
switch_on_index = int(np.where(time_sim == switch_on_instant)[0])
duration_index = int(T_on/2/dt)
## Building the load profile using the appliance's duty-cycle.
# This is done by initializing a vector of zeros of the same length as time_sim
# then injecting the duty-cycle at the begininng and shifting it to the switch-on
# instant using np.roll
load_profile = np.zeros( | np.shape(time_sim) | numpy.shape |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
def gaussian_radius(det_size, min_overlap=0.7):
height, width = det_size
a1 = 1
b1 = (height + width)
c1 = width * height * (1 - min_overlap) / (1 + min_overlap)
sq1 = np.sqrt(b1 ** 2 - 4 * a1 * c1)
r1 = (b1 + sq1) / 2
a2 = 4
b2 = 2 * (height + width)
c2 = (1 - min_overlap) * width * height
sq2 = np.sqrt(b2 ** 2 - 4 * a2 * c2)
r2 = (b2 + sq2) / 2
a3 = 4 * min_overlap
b3 = -2 * min_overlap * (height + width)
c3 = (min_overlap - 1) * width * height
sq3 = | np.sqrt(b3 ** 2 - 4 * a3 * c3) | numpy.sqrt |
import unittest
import numpy as np
import pandas as pd
import MTSGL.data
class TestDfToData(unittest.TestCase):
def test_bad_y(self):
n = 10
p = 5
df = pd.DataFrame(data={
"y": np.random.normal(0, 1, n),
"w": np.random.uniform(0, 1, n),
"task": np.random.choice([0, 1, 2], n)
})
for i in range(p):
df["var" + str(i + 1)] = np.random.normal(0, 1, n)
self.assertRaises(
TypeError,
MTSGL.data.utils.df_to_data,
df=df,
y_cols=2,
task_col="task",
w_cols="w",
x_cols=["var1", "var2", "var3"]
)
def test_bad_col_name(self):
n = 10
p = 5
df = pd.DataFrame(data={
"y": np.random.normal(0, 1, n),
"w": np.random.uniform(0, 1, n),
"task": np.random.choice([0, 1, 2], n)
})
for i in range(p):
df["var" + str(i + 1)] = | np.random.normal(0, 1, n) | numpy.random.normal |
# -*- mode: python; coding: utf-8 -*-
# Copyright (c) 2019 Radio Astronomy Software Group
# Licensed under the 2-clause BSD License
import pytest
from _pytest.outcomes import Skipped
import os
import numpy as np
import pyuvdata.tests as uvtest
from pyuvdata import UVData, UVCal, utils as uvutils
from pyuvdata.data import DATA_PATH
from pyuvdata import UVFlag
from ..uvflag import lst_from_uv, flags2waterfall, and_rows_cols
from pyuvdata import __version__
import shutil
import copy
import warnings
import h5py
import pathlib
test_d_file = os.path.join(DATA_PATH, "zen.2457698.40355.xx.HH.uvcAA.uvh5")
test_c_file = os.path.join(DATA_PATH, "zen.2457555.42443.HH.uvcA.omni.calfits")
test_f_file = test_d_file.rstrip(".uvh5") + ".testuvflag.h5"
pyuvdata_version_str = " Read/written with pyuvdata version: " + __version__ + "."
pytestmark = pytest.mark.filterwarnings(
"ignore:telescope_location is not set. Using known values for HERA.",
"ignore:antenna_positions is not set. Using known values for HERA.",
)
@pytest.fixture(scope="session")
def uvdata_obj_main():
uvdata_object = UVData()
uvdata_object.read(test_d_file)
yield uvdata_object
# cleanup
del uvdata_object
return
@pytest.fixture(scope="function")
def uvdata_obj(uvdata_obj_main):
uvdata_object = uvdata_obj_main.copy()
yield uvdata_object
# cleanup
del uvdata_object
return
# The following three fixtures are used regularly
# to initizize UVFlag objects from standard files
# We need to define these here in order to set up
# some skips for developers who do not have `pytest-cases` installed
@pytest.fixture(scope="function")
def uvf_from_data(uvdata_obj):
uvf = UVFlag()
uvf.from_uvdata(uvdata_obj)
# yield the object for the test
yield uvf
# do some cleanup
del (uvf, uvdata_obj)
@pytest.fixture(scope="function")
def uvf_from_uvcal():
uvc = UVCal()
uvc.read_calfits(test_c_file)
uvf = UVFlag()
uvf.from_uvcal(uvc)
# the antenna type test file is large, so downselect to speed up
if uvf.type == "antenna":
uvf.select(antenna_nums=uvf.ant_array[:5])
# yield the object for the test
yield uvf
# do some cleanup
del (uvf, uvc)
@pytest.fixture(scope="function")
def uvf_from_waterfall(uvdata_obj):
uvf = UVFlag()
uvf.from_uvdata(uvdata_obj, waterfall=True)
# yield the object for the test
yield uvf
# do some cleanup
del uvf
# Try to import `pytest-cases` and define decorators used to
# iterate over the three main types of UVFlag objects
# otherwise make the decorators skip the tests that use these iterators
try:
pytest_cases = pytest.importorskip("pytest_cases", minversion="1.12.1")
cases_decorator = pytest_cases.parametrize(
"input_uvf",
[
pytest_cases.fixture_ref(uvf_from_data),
pytest_cases.fixture_ref(uvf_from_uvcal),
pytest_cases.fixture_ref(uvf_from_waterfall),
],
)
cases_decorator_no_waterfall = pytest_cases.parametrize(
"input_uvf",
[
pytest_cases.fixture_ref(uvf_from_data),
pytest_cases.fixture_ref(uvf_from_uvcal),
],
)
# This warning is raised by pytest_cases
# It is due to a feature the developer does
# not know how to handle yet. ignore for now.
warnings.filterwarnings(
"ignore",
message="WARNING the new order is not" + " taken into account !!",
append=True,
)
except Skipped:
cases_decorator = pytest.mark.skipif(
True, reason="pytest-cases not installed or not required version"
)
cases_decorator_no_waterfall = pytest.mark.skipif(
True, reason="pytest-cases not installed or not required version"
)
@pytest.fixture()
def test_outfile(tmp_path):
yield str(tmp_path / "outtest_uvflag.h5")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_check_flag_array(uvdata_obj):
uvf = UVFlag()
uvf.from_uvdata(uvdata_obj, mode="flag")
uvf.flag_array = np.ones((uvf.flag_array.shape), dtype=int)
with pytest.raises(
ValueError, match="UVParameter _flag_array is not the appropriate type.",
):
uvf.check()
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_init_bad_mode(uvdata_obj):
uv = uvdata_obj
with pytest.raises(ValueError) as cm:
UVFlag(uv, mode="bad_mode", history="I made a UVFlag object", label="test")
assert str(cm.value).startswith("Input mode must be within acceptable")
uv = UVCal()
uv.read_calfits(test_c_file)
with pytest.raises(ValueError) as cm:
UVFlag(uv, mode="bad_mode", history="I made a UVFlag object", label="test")
assert str(cm.value).startswith("Input mode must be within acceptable")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_init_uvdata(uvdata_obj):
uv = uvdata_obj
uvf = UVFlag(uv, history="I made a UVFlag object", label="test")
assert uvf.metric_array.shape == uv.flag_array.shape
assert np.all(uvf.metric_array == 0)
assert uvf.weights_array.shape == uv.flag_array.shape
assert np.all(uvf.weights_array == 1)
assert uvf.type == "baseline"
assert uvf.mode == "metric"
assert np.all(uvf.time_array == uv.time_array)
assert np.all(uvf.lst_array == uv.lst_array)
assert np.all(uvf.freq_array == uv.freq_array[0])
assert np.all(uvf.polarization_array == uv.polarization_array)
assert np.all(uvf.baseline_array == uv.baseline_array)
assert np.all(uvf.ant_1_array == uv.ant_1_array)
assert np.all(uvf.ant_2_array == uv.ant_2_array)
assert "I made a UVFlag object" in uvf.history
assert 'Flag object with type "baseline"' in uvf.history
assert pyuvdata_version_str in uvf.history
assert uvf.label == "test"
assert uvf.filename == uv.filename
def test_add_extra_keywords(uvdata_obj):
uv = uvdata_obj
uvf = UVFlag(uv, history="I made a UVFlag object", label="test")
uvf.extra_keywords = {"keyword1": 1, "keyword2": 2}
assert "keyword1" in uvf.extra_keywords
assert "keyword2" in uvf.extra_keywords
uvf.extra_keywords["keyword3"] = 3
assert "keyword3" in uvf.extra_keywords
assert uvf.extra_keywords.get("keyword1") == 1
assert uvf.extra_keywords.get("keyword2") == 2
assert uvf.extra_keywords.get("keyword3") == 3
def test_read_extra_keywords(uvdata_obj):
uv = uvdata_obj
uv.extra_keywords = {"keyword1": 1, "keyword2": 2}
assert "keyword1" in uv.extra_keywords
assert "keyword2" in uv.extra_keywords
uvf = UVFlag(uv, history="I made a UVFlag object", label="test")
assert "keyword1" in uvf.extra_keywords
assert "keyword2" in uvf.extra_keywords
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_init_uvdata_x_orientation(uvdata_obj):
uv = uvdata_obj
uv.x_orientation = "east"
uvf = UVFlag(uv, history="I made a UVFlag object", label="test")
assert uvf.x_orientation == uv.x_orientation
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
@pytest.mark.parametrize("future_shapes", [True, False])
def test_init_uvdata_copy_flags(uvdata_obj, future_shapes):
uv = uvdata_obj
if future_shapes:
uv.use_future_array_shapes()
with uvtest.check_warnings(UserWarning, 'Copying flags to type=="baseline"'):
uvf = UVFlag(uv, copy_flags=True, mode="metric")
# with copy flags uvf.metric_array should be none
assert hasattr(uvf, "metric_array")
assert uvf.metric_array is None
if future_shapes:
assert np.array_equal(uvf.flag_array[:, 0, :, :], uv.flag_array)
else:
assert np.array_equal(uvf.flag_array, uv.flag_array)
assert uvf.weights_array is None
assert uvf.type == "baseline"
assert uvf.mode == "flag"
assert np.all(uvf.time_array == uv.time_array)
assert np.all(uvf.lst_array == uv.lst_array)
if future_shapes:
assert np.all(uvf.freq_array == uv.freq_array)
else:
assert np.all(uvf.freq_array == uv.freq_array[0])
assert np.all(uvf.polarization_array == uv.polarization_array)
assert np.all(uvf.baseline_array == uv.baseline_array)
assert np.all(uvf.ant_1_array == uv.ant_1_array)
assert np.all(uvf.ant_2_array == uv.ant_2_array)
assert 'Flag object with type "baseline"' in uvf.history
assert pyuvdata_version_str in uvf.history
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_init_uvdata_mode_flag(uvdata_obj):
uv = uvdata_obj
uvf = UVFlag()
uvf.from_uvdata(uv, copy_flags=False, mode="flag")
# with copy flags uvf.metric_array should be none
assert hasattr(uvf, "metric_array")
assert uvf.metric_array is None
assert np.array_equal(uvf.flag_array, uv.flag_array)
assert uvf.weights_array is None
assert uvf.type == "baseline"
assert uvf.mode == "flag"
assert np.all(uvf.time_array == uv.time_array)
assert np.all(uvf.lst_array == uv.lst_array)
assert np.all(uvf.freq_array == uv.freq_array[0])
assert np.all(uvf.polarization_array == uv.polarization_array)
assert np.all(uvf.baseline_array == uv.baseline_array)
assert np.all(uvf.ant_1_array == uv.ant_1_array)
assert np.all(uvf.ant_2_array == uv.ant_2_array)
assert 'Flag object with type "baseline"' in uvf.history
assert pyuvdata_version_str in uvf.history
def test_init_uvcal():
uvc = UVCal()
uvc.read_calfits(test_c_file)
uvf = UVFlag(uvc)
assert uvf.metric_array.shape == uvc.flag_array.shape
assert np.all(uvf.metric_array == 0)
assert uvf.weights_array.shape == uvc.flag_array.shape
assert np.all(uvf.weights_array == 1)
assert uvf.type == "antenna"
assert uvf.mode == "metric"
assert np.all(uvf.time_array == uvc.time_array)
assert uvf.x_orientation == uvc.x_orientation
lst = lst_from_uv(uvc)
assert np.all(uvf.lst_array == lst)
assert np.all(uvf.freq_array == uvc.freq_array[0])
assert np.all(uvf.polarization_array == uvc.jones_array)
assert np.all(uvf.ant_array == uvc.ant_array)
assert 'Flag object with type "antenna"' in uvf.history
assert pyuvdata_version_str in uvf.history
assert uvf.filename == uvc.filename
def test_init_uvcal_mode_flag():
uvc = UVCal()
uvc.read_calfits(test_c_file)
uvf = UVFlag(uvc, copy_flags=False, mode="flag")
assert hasattr(uvf, "metric_array")
assert uvf.metric_array is None
assert np.array_equal(uvf.flag_array, uvc.flag_array)
assert uvf.weights_array is None
assert uvf.type == "antenna"
assert uvf.mode == "flag"
assert np.all(uvf.time_array == uvc.time_array)
lst = lst_from_uv(uvc)
assert np.all(uvf.lst_array == lst)
assert np.all(uvf.freq_array == uvc.freq_array[0])
assert np.all(uvf.polarization_array == uvc.jones_array)
assert np.all(uvf.ant_array == uvc.ant_array)
assert 'Flag object with type "antenna"' in uvf.history
assert pyuvdata_version_str in uvf.history
def test_init_cal_copy_flags():
uv = UVCal()
uv.read_calfits(test_c_file)
with uvtest.check_warnings(UserWarning, 'Copying flags to type=="antenna"'):
uvf = UVFlag(uv, copy_flags=True, mode="metric")
# with copy flags uvf.metric_array should be none
assert hasattr(uvf, "metric_array")
assert uvf.metric_array is None
assert np.array_equal(uvf.flag_array, uv.flag_array)
assert uvf.type == "antenna"
assert uvf.mode == "flag"
assert np.all(uvf.time_array == np.unique(uv.time_array))
assert np.all(uvf.freq_array == uv.freq_array[0])
assert np.all(uvf.polarization_array == uv.jones_array)
assert pyuvdata_version_str in uvf.history
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
@pytest.mark.parametrize("future_shapes", [True, False])
def test_init_waterfall_uvd(uvdata_obj, future_shapes):
uv = uvdata_obj
if future_shapes:
uv.use_future_array_shapes()
uvf = UVFlag(uv, waterfall=True)
assert uvf.metric_array.shape == (uv.Ntimes, uv.Nfreqs, uv.Npols)
assert np.all(uvf.metric_array == 0)
assert uvf.weights_array.shape == (uv.Ntimes, uv.Nfreqs, uv.Npols)
assert np.all(uvf.weights_array == 1)
assert uvf.type == "waterfall"
assert uvf.mode == "metric"
assert np.all(uvf.time_array == np.unique(uv.time_array))
assert np.all(uvf.lst_array == np.unique(uv.lst_array))
if future_shapes:
assert np.all(uvf.freq_array == uv.freq_array)
else:
assert np.all(uvf.freq_array == uv.freq_array[0])
assert np.all(uvf.polarization_array == uv.polarization_array)
assert 'Flag object with type "waterfall"' in uvf.history
assert pyuvdata_version_str in uvf.history
def test_init_waterfall_uvc():
uv = UVCal()
uv.read_calfits(test_c_file)
uvf = UVFlag(uv, waterfall=True, history="input history check")
assert uvf.metric_array.shape == (uv.Ntimes, uv.Nfreqs, uv.Njones)
assert np.all(uvf.metric_array == 0)
assert uvf.weights_array.shape == (uv.Ntimes, uv.Nfreqs, uv.Njones)
assert np.all(uvf.weights_array == 1)
assert uvf.type == "waterfall"
assert uvf.mode == "metric"
assert np.all(uvf.time_array == np.unique(uv.time_array))
assert np.all(uvf.freq_array == uv.freq_array[0])
assert np.all(uvf.polarization_array == uv.jones_array)
assert 'Flag object with type "waterfall"' in uvf.history
assert "input history check" in uvf.history
assert pyuvdata_version_str in uvf.history
def test_init_waterfall_flag_uvcal():
uv = UVCal()
uv.read_calfits(test_c_file)
uvf = UVFlag(uv, waterfall=True, mode="flag")
assert uvf.flag_array.shape == (uv.Ntimes, uv.Nfreqs, uv.Njones)
assert not np.any(uvf.flag_array)
assert uvf.weights_array is None
assert uvf.type == "waterfall"
assert uvf.mode == "flag"
assert np.all(uvf.time_array == np.unique(uv.time_array))
assert np.all(uvf.freq_array == uv.freq_array[0])
assert np.all(uvf.polarization_array == uv.jones_array)
assert 'Flag object with type "waterfall"' in uvf.history
assert pyuvdata_version_str in uvf.history
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_init_waterfall_flag_uvdata(uvdata_obj):
uv = uvdata_obj
uvf = UVFlag(uv, waterfall=True, mode="flag")
assert uvf.flag_array.shape == (uv.Ntimes, uv.Nfreqs, uv.Npols)
assert not np.any(uvf.flag_array)
assert uvf.weights_array is None
assert uvf.type == "waterfall"
assert uvf.mode == "flag"
assert np.all(uvf.time_array == np.unique(uv.time_array))
assert np.all(uvf.freq_array == uv.freq_array[0])
assert np.all(uvf.polarization_array == uv.polarization_array)
assert 'Flag object with type "waterfall"' in uvf.history
assert pyuvdata_version_str in uvf.history
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_init_waterfall_copy_flags(uvdata_obj):
uv = UVCal()
uv.read_calfits(test_c_file)
with pytest.raises(NotImplementedError) as cm:
UVFlag(uv, copy_flags=True, mode="flag", waterfall=True)
assert str(cm.value).startswith("Cannot copy flags when initializing")
uv = uvdata_obj
with pytest.raises(NotImplementedError) as cm:
UVFlag(uv, copy_flags=True, mode="flag", waterfall=True)
assert str(cm.value).startswith("Cannot copy flags when initializing")
def test_init_invalid_input():
# input is not UVData, UVCal, path, or list/tuple
with pytest.raises(ValueError) as cm:
UVFlag(14)
assert str(cm.value).startswith("input to UVFlag.__init__ must be one of:")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_from_uvcal_error(uvdata_obj):
uv = uvdata_obj
uvf = UVFlag()
with pytest.raises(ValueError) as cm:
uvf.from_uvcal(uv)
assert str(cm.value).startswith("from_uvcal can only initialize a UVFlag object")
def test_from_udata_error():
uv = UVCal()
uv.read_calfits(test_c_file)
uvf = UVFlag()
with pytest.raises(ValueError) as cm:
uvf.from_uvdata(uv)
assert str(cm.value).startswith("from_uvdata can only initialize a UVFlag object")
def test_init_list_files_weights(tmpdir):
# Test that weights are preserved when reading list of files
tmp_path = tmpdir.strpath
# Create two files to read
uvf = UVFlag(test_f_file)
np.random.seed(0)
wts1 = np.random.rand(*uvf.weights_array.shape)
uvf.weights_array = wts1.copy()
uvf.write(os.path.join(tmp_path, "test1.h5"))
wts2 = np.random.rand(*uvf.weights_array.shape)
uvf.weights_array = wts2.copy()
uvf.write(os.path.join(tmp_path, "test2.h5"))
uvf2 = UVFlag(
[os.path.join(tmp_path, "test1.h5"), os.path.join(tmp_path, "test2.h5")]
)
assert np.all(uvf2.weights_array == np.concatenate([wts1, wts2], axis=0))
def test_init_posix():
# Test that weights are preserved when reading list of files
testfile_posix = pathlib.Path(test_f_file)
uvf1 = UVFlag(test_f_file)
uvf2 = UVFlag(testfile_posix)
assert uvf1 == uvf2
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_data_like_property_mode_tamper(uvdata_obj):
uv = uvdata_obj
uvf = UVFlag(uv, label="test")
uvf.mode = "test"
with pytest.raises(ValueError) as cm:
list(uvf.data_like_parameters)
assert str(cm.value).startswith("Invalid mode. Mode must be one of")
def test_read_write_loop(uvdata_obj, test_outfile):
uv = uvdata_obj
uvf = UVFlag(uv, label="test")
uvf.write(test_outfile, clobber=True)
uvf2 = UVFlag(test_outfile)
assert uvf.__eq__(uvf2, check_history=True)
assert uvf2.filename == [os.path.basename(test_outfile)]
def test_read_write_loop_with_optional_x_orientation(uvdata_obj, test_outfile):
uv = uvdata_obj
uvf = UVFlag(uv, label="test")
uvf.x_orientation = "east"
uvf.write(test_outfile, clobber=True)
uvf2 = UVFlag(test_outfile)
assert uvf.__eq__(uvf2, check_history=True)
def test_read_write_loop_waterfal(uvdata_obj, test_outfile):
uv = uvdata_obj
uvf = UVFlag(uv, label="test")
uvf.to_waterfall()
uvf.write(test_outfile, clobber=True)
uvf2 = UVFlag(test_outfile)
assert uvf.__eq__(uvf2, check_history=True)
def test_read_write_loop_ret_wt_sq(test_outfile):
uvf = UVFlag(test_f_file)
uvf.weights_array = 2 * np.ones_like(uvf.weights_array)
uvf.to_waterfall(return_weights_square=True)
uvf.write(test_outfile, clobber=True)
uvf2 = UVFlag(test_outfile)
assert uvf.__eq__(uvf2, check_history=True)
def test_bad_mode_savefile(uvdata_obj, test_outfile):
uv = uvdata_obj
uvf = UVFlag(uv, label="test")
# create the file so the clobber gets tested
with h5py.File(test_outfile, "w") as h5file:
h5file.create_dataset("Test", list(range(10)))
uvf.write(test_outfile, clobber=True)
# manually re-read and tamper with parameters
with h5py.File(test_outfile, "a") as h5:
mode = h5["Header/mode"]
mode[...] = np.string_("test")
with pytest.raises(ValueError) as cm:
uvf = UVFlag(test_outfile)
assert str(cm.value).startswith("File cannot be read. Received mode")
def test_bad_type_savefile(uvdata_obj, test_outfile):
uv = uvdata_obj
uvf = UVFlag(uv, label="test")
uvf.write(test_outfile, clobber=True)
# manually re-read and tamper with parameters
with h5py.File(test_outfile, "a") as h5:
mode = h5["Header/type"]
mode[...] = np.string_("test")
with pytest.raises(ValueError) as cm:
uvf = UVFlag(test_outfile)
assert str(cm.value).startswith("File cannot be read. Received type")
def test_write_add_version_str(uvdata_obj, test_outfile):
uv = uvdata_obj
uvf = UVFlag(uv, label="test")
uvf.history = uvf.history.replace(pyuvdata_version_str, "")
assert pyuvdata_version_str not in uvf.history
uvf.write(test_outfile, clobber=True)
with h5py.File(test_outfile, "r") as h5:
assert h5["Header/history"].dtype.type is np.string_
hist = h5["Header/history"][()].decode("utf8")
assert pyuvdata_version_str in hist
def test_read_add_version_str(uvdata_obj, test_outfile):
uv = uvdata_obj
uvf = UVFlag(uv, label="test")
assert pyuvdata_version_str in uvf.history
uvf.write(test_outfile, clobber=True)
with h5py.File(test_outfile, "r") as h5:
hist = h5["Header/history"]
del hist
uvf2 = UVFlag(test_outfile)
assert pyuvdata_version_str in uvf2.history
assert uvf == uvf2
def test_read_write_ant(test_outfile):
uv = UVCal()
uv.read_calfits(test_c_file)
uvf = UVFlag(uv, mode="flag", label="test")
uvf.write(test_outfile, clobber=True)
uvf2 = UVFlag(test_outfile)
assert uvf.__eq__(uvf2, check_history=True)
def test_read_missing_nants_data(test_outfile):
uv = UVCal()
uv.read_calfits(test_c_file)
uvf = UVFlag(uv, mode="flag", label="test")
uvf.write(test_outfile, clobber=True)
with h5py.File(test_outfile, "a") as h5:
del h5["Header/Nants_data"]
with uvtest.check_warnings(UserWarning, "Nants_data not available in file,"):
uvf2 = UVFlag(test_outfile)
# make sure this was set to None
assert uvf2.Nants_data == len(uvf2.ant_array)
uvf2.Nants_data = uvf.Nants_data
# verify no other elements were changed
assert uvf.__eq__(uvf2, check_history=True)
def test_read_missing_nspws(test_outfile):
uv = UVCal()
uv.read_calfits(test_c_file)
uvf = UVFlag(uv, mode="flag", label="test")
uvf.write(test_outfile, clobber=True)
with h5py.File(test_outfile, "a") as h5:
del h5["Header/Nspws"]
uvf2 = UVFlag(test_outfile)
# make sure Nspws was calculated
assert uvf2.Nspws == 1
# verify no other elements were changed
assert uvf.__eq__(uvf2, check_history=True)
def test_read_write_nocompress(uvdata_obj, test_outfile):
uv = uvdata_obj
uvf = UVFlag(uv, label="test")
uvf.write(test_outfile, clobber=True, data_compression=None)
uvf2 = UVFlag(test_outfile)
assert uvf.__eq__(uvf2, check_history=True)
def test_read_write_nocompress_flag(uvdata_obj, test_outfile):
uv = uvdata_obj
uvf = UVFlag(uv, mode="flag", label="test")
uvf.write(test_outfile, clobber=True, data_compression=None)
uvf2 = UVFlag(test_outfile)
assert uvf.__eq__(uvf2, check_history=True)
def test_read_write_extra_keywords(uvdata_obj, test_outfile):
uv = uvdata_obj
uvf = UVFlag(uv, label="test")
uvf.extra_keywords = {"keyword1": 1, "keyword2": "string"}
uvf.write(test_outfile, clobber=True, data_compression=None)
uvf2 = UVFlag(test_outfile)
assert uvf2.extra_keywords["keyword1"] == 1
assert uvf2.extra_keywords["keyword2"] == "string"
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_init_list(uvdata_obj):
uv = uvdata_obj
uv.time_array -= 1
uvf = UVFlag([uv, test_f_file])
uvf1 = UVFlag(uv)
uvf2 = UVFlag(test_f_file)
assert np.array_equal(
np.concatenate((uvf1.metric_array, uvf2.metric_array), axis=0), uvf.metric_array
)
assert np.array_equal(
np.concatenate((uvf1.weights_array, uvf2.weights_array), axis=0),
uvf.weights_array,
)
assert np.array_equal(
np.concatenate((uvf1.time_array, uvf2.time_array)), uvf.time_array
)
assert np.array_equal(
np.concatenate((uvf1.baseline_array, uvf2.baseline_array)), uvf.baseline_array
)
assert np.array_equal(
np.concatenate((uvf1.ant_1_array, uvf2.ant_1_array)), uvf.ant_1_array
)
assert np.array_equal(
np.concatenate((uvf1.ant_2_array, uvf2.ant_2_array)), uvf.ant_2_array
)
assert uvf.mode == "metric"
assert np.all(uvf.freq_array == uv.freq_array[0])
assert np.all(uvf.polarization_array == uv.polarization_array)
def test_read_list(uvdata_obj, test_outfile):
uv = uvdata_obj
uv.time_array -= 1
uvf = UVFlag(uv)
uvf.write(test_outfile, clobber=True)
uvf.read([test_outfile, test_f_file])
assert uvf.filename == sorted(
os.path.basename(file) for file in [test_outfile, test_f_file]
)
uvf1 = UVFlag(uv)
uvf2 = UVFlag(test_f_file)
assert np.array_equal(
np.concatenate((uvf1.metric_array, uvf2.metric_array), axis=0), uvf.metric_array
)
assert np.array_equal(
np.concatenate((uvf1.weights_array, uvf2.weights_array), axis=0),
uvf.weights_array,
)
assert np.array_equal(
np.concatenate((uvf1.time_array, uvf2.time_array)), uvf.time_array
)
assert np.array_equal(
np.concatenate((uvf1.baseline_array, uvf2.baseline_array)), uvf.baseline_array
)
assert np.array_equal(
np.concatenate((uvf1.ant_1_array, uvf2.ant_1_array)), uvf.ant_1_array
)
assert np.array_equal(
np.concatenate((uvf1.ant_2_array, uvf2.ant_2_array)), uvf.ant_2_array
)
assert uvf.mode == "metric"
assert np.all(uvf.freq_array == uv.freq_array[0])
assert np.all(uvf.polarization_array == uv.polarization_array)
def test_read_error():
with pytest.raises(IOError) as cm:
UVFlag("foo")
assert str(cm.value).startswith("foo not found")
def test_read_change_type(test_outfile):
uvc = UVCal()
uvc.read_calfits(test_c_file)
uvf = UVFlag(uvc)
uvf.write(test_outfile, clobber=True)
assert hasattr(uvf, "ant_array")
uvf.read(test_f_file)
# clear sets these to None now
assert hasattr(uvf, "ant_array")
assert uvf.ant_array is None
assert hasattr(uvf, "baseline_array")
assert hasattr(uvf, "ant_1_array")
assert hasattr(uvf, "ant_2_array")
uvf.read(test_outfile)
assert hasattr(uvf, "ant_array")
assert hasattr(uvf, "baseline_array")
assert uvf.baseline_array is None
assert hasattr(uvf, "ant_1_array")
assert uvf.ant_1_array is None
assert hasattr(uvf, "ant_2_array")
assert uvf.ant_2_array is None
def test_read_change_mode(uvdata_obj, test_outfile):
uv = uvdata_obj
uvf = UVFlag(uv, mode="flag")
assert hasattr(uvf, "flag_array")
assert hasattr(uvf, "metric_array")
assert uvf.metric_array is None
uvf.write(test_outfile, clobber=True)
uvf.read(test_f_file)
assert hasattr(uvf, "metric_array")
assert hasattr(uvf, "flag_array")
assert uvf.flag_array is None
uvf.read(test_outfile)
assert hasattr(uvf, "flag_array")
assert hasattr(uvf, "metric_array")
assert uvf.metric_array is None
def test_write_no_clobber():
uvf = UVFlag(test_f_file)
with pytest.raises(ValueError) as cm:
uvf.write(test_f_file)
assert str(cm.value).startswith("File " + test_f_file + " exists;")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_lst_from_uv(uvdata_obj):
uv = uvdata_obj
lst_array = lst_from_uv(uv)
assert np.allclose(uv.lst_array, lst_array)
def test_lst_from_uv_error():
with pytest.raises(ValueError) as cm:
lst_from_uv(4)
assert str(cm.value).startswith("Function lst_from_uv can only operate on")
def test_add():
uv1 = UVFlag(test_f_file)
uv2 = copy.deepcopy(uv1)
uv2.time_array += 1 # Add a day
uv3 = uv1 + uv2
assert np.array_equal(
np.concatenate((uv1.time_array, uv2.time_array)), uv3.time_array
)
assert np.array_equal(
np.concatenate((uv1.baseline_array, uv2.baseline_array)), uv3.baseline_array
)
assert np.array_equal(
np.concatenate((uv1.ant_1_array, uv2.ant_1_array)), uv3.ant_1_array
)
assert np.array_equal(
np.concatenate((uv1.ant_2_array, uv2.ant_2_array)), uv3.ant_2_array
)
assert np.array_equal(np.concatenate((uv1.lst_array, uv2.lst_array)), uv3.lst_array)
assert np.array_equal(
np.concatenate((uv1.metric_array, uv2.metric_array), axis=0), uv3.metric_array
)
assert np.array_equal(
np.concatenate((uv1.weights_array, uv2.weights_array), axis=0),
uv3.weights_array,
)
assert np.array_equal(uv1.freq_array, uv3.freq_array)
assert uv3.type == "baseline"
assert uv3.mode == "metric"
assert np.array_equal(uv1.polarization_array, uv3.polarization_array)
assert "Data combined along time axis. " in uv3.history
def test_add_collapsed_pols():
uvf = UVFlag(test_f_file)
uvf.weights_array = np.ones_like(uvf.weights_array)
uvf2 = uvf.copy()
uvf2.polarization_array[0] = -4
uvf.__add__(uvf2, inplace=True, axis="pol") # Concatenate to form multi-pol object
uvf.collapse_pol()
uvf3 = uvf.copy()
uvf3.time_array += 1 # increment the time array
uvf4 = uvf + uvf3
assert uvf4.Ntimes == 2 * uvf.Ntimes
assert uvf4.check()
def test_add_add_version_str():
uv1 = UVFlag(test_f_file)
uv1.history = uv1.history.replace(pyuvdata_version_str, "")
assert pyuvdata_version_str not in uv1.history
uv2 = copy.deepcopy(uv1)
uv2.time_array += 1 # Add a day
uv3 = uv1 + uv2
assert pyuvdata_version_str in uv3.history
def test_add_baseline():
uv1 = UVFlag(test_f_file)
uv2 = copy.deepcopy(uv1)
uv2.baseline_array += 100 # Arbitrary
uv3 = uv1.__add__(uv2, axis="baseline")
assert np.array_equal(
np.concatenate((uv1.time_array, uv2.time_array)), uv3.time_array
)
assert np.array_equal(
np.concatenate((uv1.baseline_array, uv2.baseline_array)), uv3.baseline_array
)
assert np.array_equal(
np.concatenate((uv1.ant_1_array, uv2.ant_1_array)), uv3.ant_1_array
)
assert np.array_equal(
np.concatenate((uv1.ant_2_array, uv2.ant_2_array)), uv3.ant_2_array
)
assert np.array_equal(np.concatenate((uv1.lst_array, uv2.lst_array)), uv3.lst_array)
assert np.array_equal(
np.concatenate((uv1.metric_array, uv2.metric_array), axis=0), uv3.metric_array
)
assert np.array_equal(
np.concatenate((uv1.weights_array, uv2.weights_array), axis=0),
uv3.weights_array,
)
assert np.array_equal(uv1.freq_array, uv3.freq_array)
assert uv3.type == "baseline"
assert uv3.mode == "metric"
assert np.array_equal(uv1.polarization_array, uv3.polarization_array)
assert "Data combined along baseline axis. " in uv3.history
def test_add_antenna():
uvc = UVCal()
uvc.read_calfits(test_c_file)
uv1 = UVFlag(uvc)
uv2 = copy.deepcopy(uv1)
uv2.ant_array += 100 # Arbitrary
uv3 = uv1.__add__(uv2, axis="antenna")
assert np.array_equal(np.concatenate((uv1.ant_array, uv2.ant_array)), uv3.ant_array)
assert np.array_equal(
np.concatenate((uv1.metric_array, uv2.metric_array), axis=0), uv3.metric_array
)
assert np.array_equal(
np.concatenate((uv1.weights_array, uv2.weights_array), axis=0),
uv3.weights_array,
)
assert np.array_equal(uv1.freq_array, uv3.freq_array)
assert np.array_equal(uv1.time_array, uv3.time_array)
assert np.array_equal(uv1.lst_array, uv3.lst_array)
assert uv3.type == "antenna"
assert uv3.mode == "metric"
assert np.array_equal(uv1.polarization_array, uv3.polarization_array)
assert "Data combined along antenna axis. " in uv3.history
def test_add_frequency():
uv1 = UVFlag(test_f_file)
uv2 = copy.deepcopy(uv1)
uv2.freq_array += 1e4 # Arbitrary
uv3 = uv1.__add__(uv2, axis="frequency")
assert np.array_equal(
np.concatenate((uv1.freq_array, uv2.freq_array), axis=-1), uv3.freq_array
)
assert np.array_equal(uv1.time_array, uv3.time_array)
assert np.array_equal(uv1.baseline_array, uv3.baseline_array)
assert np.array_equal(uv1.ant_1_array, uv3.ant_1_array)
assert np.array_equal(uv1.ant_2_array, uv3.ant_2_array)
assert np.array_equal(uv1.lst_array, uv3.lst_array)
assert np.array_equal(
np.concatenate((uv1.metric_array, uv2.metric_array), axis=2), uv3.metric_array
)
assert np.array_equal(
np.concatenate((uv1.weights_array, uv2.weights_array), axis=2),
uv3.weights_array,
)
assert uv3.type == "baseline"
assert uv3.mode == "metric"
assert np.array_equal(uv1.polarization_array, uv3.polarization_array)
assert "Data combined along frequency axis. " in uv3.history
def test_add_frequency_with_weights_square():
# Same test as above, just checking an optional parameter (also in waterfall mode)
uvf1 = UVFlag(test_f_file)
uvf1.weights_array = 2 * np.ones_like(uvf1.weights_array)
uvf1.to_waterfall(return_weights_square=True)
uvf2 = copy.deepcopy(uvf1)
uvf2.freq_array += 1e4
uvf3 = uvf1.__add__(uvf2, axis="frequency")
assert np.array_equal(
np.concatenate((uvf1.weights_square_array, uvf2.weights_square_array), axis=1),
uvf3.weights_square_array,
)
def test_add_frequency_mix_weights_square():
# Same test as above, checking some error handling
uvf1 = UVFlag(test_f_file)
uvf1.weights_array = 2 * np.ones_like(uvf1.weights_array)
uvf2 = copy.deepcopy(uvf1)
uvf1.to_waterfall(return_weights_square=True)
uvf2.to_waterfall(return_weights_square=False)
uvf2.freq_array += 1e4
with pytest.raises(
ValueError,
match="weights_square_array optional parameter is missing from second UVFlag",
):
uvf1.__add__(uvf2, axis="frequency", inplace=True)
def test_add_pol():
uv1 = UVFlag(test_f_file)
uv2 = copy.deepcopy(uv1)
uv2.polarization_array += 1 # Arbitrary
uv3 = uv1.__add__(uv2, axis="polarization")
assert np.array_equal(uv1.freq_array, uv3.freq_array)
assert np.array_equal(uv1.time_array, uv3.time_array)
assert np.array_equal(uv1.baseline_array, uv3.baseline_array)
assert np.array_equal(uv1.ant_1_array, uv3.ant_1_array)
assert np.array_equal(uv1.ant_2_array, uv3.ant_2_array)
assert np.array_equal(uv1.lst_array, uv3.lst_array)
assert np.array_equal(
np.concatenate((uv1.metric_array, uv2.metric_array), axis=3), uv3.metric_array
)
assert np.array_equal(
np.concatenate((uv1.weights_array, uv2.weights_array), axis=3),
uv3.weights_array,
)
assert uv3.type == "baseline"
assert uv3.mode == "metric"
assert np.array_equal(
np.concatenate((uv1.polarization_array, uv2.polarization_array)),
uv3.polarization_array,
)
assert "Data combined along polarization axis. " in uv3.history
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_add_flag(uvdata_obj):
uv = uvdata_obj
uv1 = UVFlag(uv, mode="flag")
uv2 = copy.deepcopy(uv1)
uv2.time_array += 1 # Add a day
uv3 = uv1 + uv2
assert np.array_equal(
np.concatenate((uv1.time_array, uv2.time_array)), uv3.time_array
)
assert np.array_equal(
np.concatenate((uv1.baseline_array, uv2.baseline_array)), uv3.baseline_array
)
assert np.array_equal(
np.concatenate((uv1.ant_1_array, uv2.ant_1_array)), uv3.ant_1_array
)
assert np.array_equal(
np.concatenate((uv1.ant_2_array, uv2.ant_2_array)), uv3.ant_2_array
)
assert np.array_equal(np.concatenate((uv1.lst_array, uv2.lst_array)), uv3.lst_array)
assert np.array_equal(
np.concatenate((uv1.flag_array, uv2.flag_array), axis=0), uv3.flag_array
)
assert np.array_equal(uv1.freq_array, uv3.freq_array)
assert uv3.type == "baseline"
assert uv3.mode == "flag"
assert np.array_equal(uv1.polarization_array, uv3.polarization_array)
assert "Data combined along time axis. " in uv3.history
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_add_errors(uvdata_obj):
uv = uvdata_obj
uvc = UVCal()
uvc.read_calfits(test_c_file)
uv1 = UVFlag(uv)
# Mismatched classes
with pytest.raises(ValueError) as cm:
uv1.__add__(3)
assert str(cm.value).startswith(
"Only UVFlag objects can be added to a UVFlag object"
)
# Mismatched types
uv2 = UVFlag(uvc)
with pytest.raises(ValueError) as cm:
uv1.__add__(uv2)
assert str(cm.value).startswith("UVFlag object of type ")
# Mismatched modes
uv3 = UVFlag(uv, mode="flag")
with pytest.raises(ValueError) as cm:
uv1.__add__(uv3)
assert str(cm.value).startswith("UVFlag object of mode ")
# Invalid axes
with pytest.raises(ValueError) as cm:
uv1.__add__(uv1, axis="antenna")
assert str(cm.value).endswith("concatenated along antenna axis.")
with pytest.raises(ValueError) as cm:
uv2.__add__(uv2, axis="baseline")
assert str(cm.value).endswith("concatenated along baseline axis.")
def test_inplace_add():
uv1a = UVFlag(test_f_file)
uv1b = copy.deepcopy(uv1a)
uv2 = copy.deepcopy(uv1a)
uv2.time_array += 1
uv1a += uv2
assert uv1a.__eq__(uv1b + uv2)
def test_clear_unused_attributes():
uv = UVFlag(test_f_file)
assert hasattr(uv, "baseline_array")
assert hasattr(uv, "ant_1_array")
assert hasattr(uv, "ant_2_array")
assert hasattr(uv, "Nants_telescope")
uv._set_type_antenna()
uv.clear_unused_attributes()
# clear_unused_attributes now sets these to None
assert hasattr(uv, "baseline_array")
assert uv.baseline_array is None
assert hasattr(uv, "ant_1_array")
assert uv.ant_1_array is None
assert hasattr(uv, "ant_2_array")
assert uv.ant_2_array is None
assert hasattr(uv, "Nants_telescope")
assert uv.Nants_telescope is None
uv._set_mode_flag()
assert hasattr(uv, "metric_array")
uv.clear_unused_attributes()
assert hasattr(uv, "metric_array")
assert uv.metric_array is None
# Start over
uv = UVFlag(test_f_file)
uv.ant_array = np.array([4])
uv.flag_array = np.array([5])
uv.clear_unused_attributes()
assert hasattr(uv, "ant_array")
assert uv.ant_array is None
assert hasattr(uv, "flag_array")
assert uv.flag_array is None
def test_not_equal():
uvf1 = UVFlag(test_f_file)
# different class
assert not uvf1.__eq__(5)
# different mode
uvf2 = uvf1.copy()
uvf2.mode = "flag"
assert not uvf1.__eq__(uvf2)
# different type
uvf2 = uvf1.copy()
uvf2.type = "antenna"
assert not uvf1.__eq__(uvf2)
# array different
uvf2 = uvf1.copy()
uvf2.freq_array += 1
assert not uvf1.__eq__(uvf2)
# history different
uvf2 = uvf1.copy()
uvf2.history += "hello"
assert not uvf1.__eq__(uvf2, check_history=True)
def test_to_waterfall_bl():
uvf = UVFlag(test_f_file)
uvf.weights_array = np.ones_like(uvf.weights_array)
uvf.to_waterfall()
assert uvf.type == "waterfall"
assert uvf.metric_array.shape == (
len(uvf.time_array),
len(uvf.freq_array),
len(uvf.polarization_array),
)
assert uvf.weights_array.shape == uvf.metric_array.shape
def test_to_waterfall_add_version_str():
uvf = UVFlag(test_f_file)
uvf.weights_array = np.ones_like(uvf.weights_array)
uvf.history = uvf.history.replace(pyuvdata_version_str, "")
assert pyuvdata_version_str not in uvf.history
uvf.to_waterfall()
assert pyuvdata_version_str in uvf.history
def test_to_waterfall_bl_multi_pol():
uvf = UVFlag(test_f_file)
uvf.weights_array = np.ones_like(uvf.weights_array)
uvf2 = uvf.copy()
uvf2.polarization_array[0] = -4
uvf.__add__(uvf2, inplace=True, axis="pol") # Concatenate to form multi-pol object
uvf2 = uvf.copy() # Keep a copy to run with keep_pol=False
uvf.to_waterfall()
assert uvf.type == "waterfall"
assert uvf.metric_array.shape == (
len(uvf.time_array),
len(uvf.freq_array),
len(uvf.polarization_array),
)
assert uvf.weights_array.shape == uvf.metric_array.shape
assert len(uvf.polarization_array) == 2
# Repeat with keep_pol=False
uvf2.to_waterfall(keep_pol=False)
assert uvf2.type == "waterfall"
assert uvf2.metric_array.shape == (len(uvf2.time_array), len(uvf.freq_array), 1)
assert uvf2.weights_array.shape == uvf2.metric_array.shape
assert len(uvf2.polarization_array) == 1
assert uvf2.polarization_array[0] == np.str_(
",".join(map(str, uvf.polarization_array))
)
def test_to_waterfall_bl_ret_wt_sq():
uvf = UVFlag(test_f_file)
Nbls = uvf.Nbls
uvf.weights_array = 2 * np.ones_like(uvf.weights_array)
uvf.to_waterfall(return_weights_square=True)
assert np.all(uvf.weights_square_array == 4 * Nbls)
# Switch to flag and check that it is now set to None
uvf.to_flag()
assert uvf.weights_square_array is None
def test_collapse_pol(test_outfile):
uvf = UVFlag(test_f_file)
uvf.weights_array = np.ones_like(uvf.weights_array)
uvf2 = uvf.copy()
uvf2.polarization_array[0] = -4
uvf.__add__(uvf2, inplace=True, axis="pol") # Concatenate to form multi-pol object
uvf2 = uvf.copy()
uvf2.collapse_pol()
assert len(uvf2.polarization_array) == 1
assert uvf2.polarization_array[0] == np.str_(
",".join(map(str, uvf.polarization_array))
)
assert uvf2.mode == "metric"
assert hasattr(uvf2, "metric_array")
assert hasattr(uvf2, "flag_array")
assert uvf2.flag_array is None
# test check passes just to be sure
assert uvf2.check()
# test writing it out and reading in to make sure polarization_array has
# correct type
uvf2.write(test_outfile, clobber=True)
with h5py.File(test_outfile, "r") as h5:
assert h5["Header/polarization_array"].dtype.type is np.string_
uvf = UVFlag(test_outfile)
assert uvf._polarization_array.expected_type == str
assert uvf._polarization_array.acceptable_vals is None
assert uvf == uvf2
os.remove(test_outfile)
def test_collapse_pol_add_pol_axis():
uvf = UVFlag(test_f_file)
uvf.weights_array = np.ones_like(uvf.weights_array)
uvf2 = uvf.copy()
uvf2.polarization_array[0] = -4
uvf.__add__(uvf2, inplace=True, axis="pol") # Concatenate to form multi-pol object
uvf2 = uvf.copy()
uvf2.collapse_pol()
with pytest.raises(NotImplementedError) as cm:
uvf2.__add__(uvf2, axis="pol")
assert str(cm.value).startswith("Two UVFlag objects with their")
def test_collapse_pol_or():
uvf = UVFlag(test_f_file)
uvf.to_flag()
assert uvf.weights_array is None
uvf2 = uvf.copy()
uvf2.polarization_array[0] = -4
uvf.__add__(uvf2, inplace=True, axis="pol") # Concatenate to form multi-pol object
uvf2 = uvf.copy()
uvf2.collapse_pol(method="or")
assert len(uvf2.polarization_array) == 1
assert uvf2.polarization_array[0] == np.str_(
",".join(map(str, uvf.polarization_array))
)
assert uvf2.mode == "flag"
assert hasattr(uvf2, "flag_array")
assert hasattr(uvf2, "metric_array")
assert uvf2.metric_array is None
def test_collapse_pol_add_version_str():
uvf = UVFlag(test_f_file)
uvf.to_flag()
uvf2 = uvf.copy()
uvf2.polarization_array[0] = -4
uvf.__add__(uvf2, inplace=True, axis="pol") # Concatenate to form multi-pol object
uvf.history = uvf.history.replace(pyuvdata_version_str, "")
assert pyuvdata_version_str not in uvf.history
uvf2 = uvf.copy()
uvf2.collapse_pol(method="or")
assert pyuvdata_version_str in uvf2.history
def test_collapse_single_pol():
uvf = UVFlag(test_f_file)
uvf.weights_array = np.ones_like(uvf.weights_array)
uvf2 = uvf.copy()
with uvtest.check_warnings(UserWarning, "Cannot collapse polarization"):
uvf.collapse_pol()
assert uvf == uvf2
def test_collapse_pol_flag():
uvf = UVFlag(test_f_file)
uvf.to_flag()
assert uvf.weights_array is None
uvf2 = uvf.copy()
uvf2.polarization_array[0] = -4
uvf.__add__(uvf2, inplace=True, axis="pol") # Concatenate to form multi-pol object
uvf2 = uvf.copy()
uvf2.collapse_pol()
assert len(uvf2.polarization_array) == 1
assert uvf2.polarization_array[0] == np.str_(
",".join(map(str, uvf.polarization_array))
)
assert uvf2.mode == "metric"
assert hasattr(uvf2, "metric_array")
assert hasattr(uvf2, "flag_array")
assert uvf2.flag_array is None
def test_to_waterfall_bl_flags():
uvf = UVFlag(test_f_file)
uvf.to_flag()
uvf.to_waterfall()
assert uvf.type == "waterfall"
assert uvf.mode == "metric"
assert uvf.metric_array.shape == (
len(uvf.time_array),
len(uvf.freq_array),
len(uvf.polarization_array),
)
assert uvf.weights_array.shape == uvf.metric_array.shape
assert len(uvf.lst_array) == len(uvf.time_array)
def test_to_waterfall_bl_flags_or():
uvf = UVFlag(test_f_file)
uvf.to_flag()
assert uvf.weights_array is None
uvf.to_waterfall(method="or")
assert uvf.type == "waterfall"
assert uvf.mode == "flag"
assert uvf.flag_array.shape == (
len(uvf.time_array),
len(uvf.freq_array),
len(uvf.polarization_array),
)
assert len(uvf.lst_array) == len(uvf.time_array)
uvf = UVFlag(test_f_file)
uvf.to_flag()
uvf.to_waterfall(method="or")
assert uvf.type == "waterfall"
assert uvf.mode == "flag"
assert uvf.flag_array.shape == (
len(uvf.time_array),
len(uvf.freq_array),
len(uvf.polarization_array),
)
assert len(uvf.lst_array) == len(uvf.time_array)
def test_to_waterfall_ant():
uvc = UVCal()
uvc.read_calfits(test_c_file)
uvf = UVFlag(uvc)
uvf.weights_array = np.ones_like(uvf.weights_array)
uvf.to_waterfall()
assert uvf.type == "waterfall"
assert uvf.metric_array.shape == (
len(uvf.time_array),
len(uvf.freq_array),
len(uvf.polarization_array),
)
assert uvf.weights_array.shape == uvf.metric_array.shape
assert len(uvf.lst_array) == len(uvf.time_array)
def test_to_waterfall_waterfall():
uvf = UVFlag(test_f_file)
uvf.weights_array = np.ones_like(uvf.weights_array)
uvf.to_waterfall()
with uvtest.check_warnings(UserWarning, "This object is already a waterfall"):
uvf.to_waterfall()
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_to_baseline_flags(uvdata_obj):
uv = uvdata_obj
uvf = UVFlag(uv)
uvf.to_waterfall()
uvf.to_flag()
uvf.flag_array[0, 10, 0] = True # Flag time0, chan10
uvf.flag_array[1, 15, 0] = True # Flag time1, chan15
uvf.to_baseline(uv)
assert uvf.type == "baseline"
assert np.all(uvf.baseline_array == uv.baseline_array)
assert np.all(uvf.time_array == uv.time_array)
times = np.unique(uvf.time_array)
ntrue = 0.0
ind = np.where(uvf.time_array == times[0])[0]
ntrue += len(ind)
assert np.all(uvf.flag_array[ind, 0, 10, 0])
ind = np.where(uvf.time_array == times[1])[0]
ntrue += len(ind)
assert np.all(uvf.flag_array[ind, 0, 15, 0])
assert uvf.flag_array.mean() == ntrue / uvf.flag_array.size
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
@pytest.mark.parametrize("future_shapes", [True, False])
def test_to_baseline_metric(uvdata_obj, future_shapes):
uv = uvdata_obj
if future_shapes:
uv.use_future_array_shapes()
uvf = UVFlag(uv)
uvf.to_waterfall()
uvf.metric_array[0, 10, 0] = 3.2 # Fill in time0, chan10
uvf.metric_array[1, 15, 0] = 2.1 # Fill in time1, chan15
uvf.to_baseline(uv)
assert np.all(uvf.baseline_array == uv.baseline_array)
assert np.all(uvf.time_array == uv.time_array)
times = np.unique(uvf.time_array)
ind = np.where(uvf.time_array == times[0])[0]
nt0 = len(ind)
assert np.all(uvf.metric_array[ind, 0, 10, 0] == 3.2)
ind = np.where(uvf.time_array == times[1])[0]
nt1 = len(ind)
assert np.all(uvf.metric_array[ind, 0, 15, 0] == 2.1)
assert np.isclose(
uvf.metric_array.mean(), (3.2 * nt0 + 2.1 * nt1) / uvf.metric_array.size
)
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_to_baseline_add_version_str(uvdata_obj):
uv = uvdata_obj
uvf = UVFlag(uv)
uvf.to_waterfall()
uvf.metric_array[0, 10, 0] = 3.2 # Fill in time0, chan10
uvf.metric_array[1, 15, 0] = 2.1 # Fill in time1, chan15
uvf.history = uvf.history.replace(pyuvdata_version_str, "")
assert pyuvdata_version_str not in uvf.history
uvf.to_baseline(uv)
assert pyuvdata_version_str in uvf.history
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_baseline_to_baseline(uvdata_obj):
uv = uvdata_obj
uvf = UVFlag(uv)
uvf2 = uvf.copy()
uvf.to_baseline(uv)
assert uvf == uvf2
def test_to_baseline_metric_error(uvdata_obj, uvf_from_uvcal):
uvf = uvf_from_uvcal
uvf.select(polarizations=uvf.polarization_array[0])
uv = uvdata_obj
with pytest.raises(NotImplementedError) as cm:
uvf.to_baseline(uv, force_pol=True)
assert str(cm.value).startswith(
"Cannot currently convert from " "antenna type, metric mode"
)
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_to_baseline_from_antenna(uvdata_obj, uvf_from_uvcal):
uvf = uvf_from_uvcal
uvf.select(polarizations=uvf.polarization_array[0])
uvf.to_flag()
uv = uvdata_obj
ants_data = np.unique(uv.ant_1_array.tolist() + uv.ant_2_array.tolist())
new_ants = np.setdiff1d(ants_data, uvf.ant_array)
old_baseline = (uvf.ant_array[0], uvf.ant_array[1])
old_times = np.unique(uvf.time_array)
or_flags = np.logical_or(uvf.flag_array[0], uvf.flag_array[1])
or_flags = np.transpose(or_flags, [2, 0, 1, 3])
uv2 = copy.deepcopy(uv)
uvf2 = uvf.copy()
# hack in the exact times so we can compare some values later
uv2.select(bls=old_baseline)
uv2.time_array[: uvf2.time_array.size] = uvf.time_array
uvf.to_baseline(uv, force_pol=True)
uvf2.to_baseline(uv2, force_pol=True)
assert uvf.check()
uvf2.select(bls=old_baseline, times=old_times)
assert np.allclose(or_flags, uvf2.flag_array)
# all new antenna should be completely flagged
# checks auto correlations
uvf_new = uvf.select(antenna_nums=new_ants, inplace=False)
for bl in np.unique(uvf_new.baseline_array):
uvf2 = uvf_new.select(bls=uv.baseline_to_antnums(bl), inplace=False)
assert np.all(uvf2.flag_array)
# check for baselines with one new antenna
bls = [
uvf.baseline_to_antnums(bl)
for bl in uvf.baseline_array
if np.intersect1d(new_ants, uvf.baseline_to_antnums(bl)).size > 0
]
uvf_new = uvf.select(bls=bls, inplace=False)
for bl in np.unique(uvf_new.baseline_array):
uvf2 = uvf_new.select(bls=uv.baseline_to_antnums(bl), inplace=False)
assert np.all(uvf2.flag_array)
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_to_baseline_errors(uvdata_obj):
uvc = UVCal()
uvc.read_calfits(test_c_file)
uv = uvdata_obj
uvf = UVFlag(test_f_file)
uvf.to_waterfall()
with pytest.raises(ValueError) as cm:
uvf.to_baseline(7.3) # invalid matching object
assert str(cm.value).startswith("Must pass in UVData object or UVFlag object")
uvf = UVFlag(test_f_file)
uvf.to_waterfall()
uvf2 = uvf.copy()
uvf.polarization_array[0] = -4
with pytest.raises(ValueError) as cm:
uvf.to_baseline(uv) # Mismatched pols
assert str(cm.value).startswith("Polarizations do not match.")
uvf.__iadd__(uvf2, axis="polarization")
with pytest.raises(ValueError) as cm:
uvf.to_baseline(uv) # Mismatched pols, can't be forced
assert str(cm.value).startswith("Polarizations could not be made to match.")
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_to_baseline_force_pol(uvdata_obj):
uv = uvdata_obj
uvf = UVFlag(uv)
uvf.to_waterfall()
uvf.to_flag()
uvf.flag_array[0, 10, 0] = True # Flag time0, chan10
uvf.flag_array[1, 15, 0] = True # Flag time1, chan15
uvf.polarization_array[0] = -4 # Change pol, but force pol anyway
uvf.to_baseline(uv, force_pol=True)
assert np.all(uvf.baseline_array == uv.baseline_array)
assert np.all(uvf.time_array == uv.time_array)
assert np.array_equal(uvf.polarization_array, uv.polarization_array)
times = np.unique(uvf.time_array)
ntrue = 0.0
ind = np.where(uvf.time_array == times[0])[0]
ntrue += len(ind)
assert np.all(uvf.flag_array[ind, 0, 10, 0])
ind = np.where(uvf.time_array == times[1])[0]
ntrue += len(ind)
assert np.all(uvf.flag_array[ind, 0, 15, 0])
assert uvf.flag_array.mean() == ntrue / uvf.flag_array.size
@pytest.mark.filterwarnings("ignore:The uvw_array does not match the expected values")
def test_to_baseline_force_pol_npol_gt_1(uvdata_obj):
uv = uvdata_obj
uvf = UVFlag(uv)
uvf.to_waterfall()
uvf.to_flag()
uvf.flag_array[0, 10, 0] = True # Flag time0, chan10
uvf.flag_array[1, 15, 0] = True # Flag time1, chan15
uv2 = copy.deepcopy(uv)
uv2.polarization_array[0] = -6
uv += uv2
uvf.to_baseline(uv, force_pol=True)
assert np.all(uvf.baseline_array == uv.baseline_array)
assert np.all(uvf.time_array == uv.time_array)
assert | np.array_equal(uvf.polarization_array, uv.polarization_array) | numpy.array_equal |
# -*- coding: utf-8 -*-
from __future__ import print_function, division
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import time
import math
import os
import scipy.io
import yaml
from tqdm import tqdm
from sklearn.cluster import DBSCAN
from model import ft_net, ft_net_angle, ft_net_dense, ft_net_NAS, PCB, PCB_test, CPB
from evaluate_gpu import calculate_result
from evaluate_rerank import calculate_result_rerank
from re_ranking import re_ranking, re_ranking_one
from utils import load_network
from losses import L2Normalization
from shutil import copyfile
import pickle
import PIL
#fp16
try:
from apex.fp16_utils import *
except ImportError: # will be 3.x series
print('This is not an error. If you want to use low precision, i.e., fp16, please install the apex with cuda support (https://github.com/NVIDIA/apex) and update pytorch to 1.0')
######################################################################
# Options
# --------
parser = argparse.ArgumentParser(description='Training')
parser.add_argument('--gpu_ids',default='0', type=str,help='gpu_ids: e.g. 0 0,1,2 0,2')
parser.add_argument('--ms',default='1', type=str,help='multiple_scale: e.g. 1 1,1.1 1,1.1,1.2')
parser.add_argument('--which_epoch',default='59', type=str, help='0,1,2,3...or last')
parser.add_argument('--test_dir',default='./data/test_data',type=str, help='./test_data')
parser.add_argument('--crop_dir',default='./data/cropped_aicity',type=str, help='./test_data')
parser.add_argument('--names', default='ft_ResNet50,xxxx,xxxxx', type=str, help='save model path')
parser.add_argument('--batchsize', default=100, type=int, help='batchsize')
parser.add_argument('--inputsize', default=384, type=int, help='batchsize')
parser.add_argument('--h', default=384, type=int, help='batchsize')
parser.add_argument('--w', default=384, type=int, help='batchsize')
parser.add_argument('--use_dense', action='store_true', help='use densenet121' )
parser.add_argument('--use_NAS', action='store_true', help='use densenet121' )
parser.add_argument('--PCB', action='store_true', help='use PCB' )
parser.add_argument('--CPB', action='store_true', help='use CPB' )
parser.add_argument('--multi', action='store_true', help='use multiple query' )
parser.add_argument('--fp16', action='store_true', help='use fp16.' )
parser.add_argument('--pool',default='avg', type=str, help='last pool')
parser.add_argument('--k1', default=70, type=int, help='batchsize')
parser.add_argument('--k2', default=10, type=int, help='batchsize')
parser.add_argument('--lam', default=0.2, type=float, help='batchsize')
parser.add_argument('--dba', default=0, type=int, help='batchsize')
opt = parser.parse_args()
str_ids = opt.gpu_ids.split(',')
#which_epoch = opt.which_epoch
test_dir = opt.test_dir
gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if id >=0:
gpu_ids.append(id)
str_ms = opt.ms.split(',')
ms = []
for s in str_ms:
s_f = float(s)
ms.append(math.sqrt(s_f))
# set gpu ids
if len(gpu_ids)>0:
torch.cuda.set_device(gpu_ids[0])
cudnn.benchmark = True
######################################################################
# Load Data
# ---------
#
# We will use torchvision and torch.utils.data packages for loading the
# data.
#
if opt.h == opt.w:
data_transforms = transforms.Compose([
transforms.Resize( ( round(opt.inputsize*1.1), round(opt.inputsize*1.1)), interpolation=3),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
else:
data_transforms = transforms.Compose([
transforms.Resize( (round(opt.h*1.1), round(opt.w*1.1)), interpolation=3),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
if opt.PCB:
data_transforms = transforms.Compose([
transforms.Resize((384,192), interpolation=3),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
data_dir = test_dir
image_datasets = {x: datasets.ImageFolder( os.path.join(data_dir,x) ,data_transforms) for x in ['gallery','query']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=opt.batchsize,
shuffle=False, num_workers=16) for x in ['gallery','query']}
cropped_image_datasets = {x: datasets.ImageFolder( os.path.join(opt.crop_dir,x) ,data_transforms) for x in ['gallery','query']}
cropped_dataloaders = {x: torch.utils.data.DataLoader(cropped_image_datasets[x], batch_size=opt.batchsize,
shuffle=False, num_workers=16) for x in ['gallery','query']}
class_names = image_datasets['query'].classes
use_gpu = torch.cuda.is_available()
######################################################################
# Extract feature
# ----------------------
#
# Extract feature from a trained model.
#
def fliplr(img):
'''flip horizontal'''
inv_idx = torch.arange(img.size(3)-1,-1,-1).long() # N x C x H x W
img_flip = img.index_select(3,inv_idx)
return img_flip
def extract_feature(model,dataloaders):
features = torch.FloatTensor()
count = 0
for data in tqdm(dataloaders):
img, label = data
n, c, h, w = img.size()
count += n
#print(count)
ff = torch.FloatTensor(n,512).zero_().cuda()
for i in range(2):
if(i==1):
img = fliplr(img)
input_img = Variable(img.cuda())
for scale in ms:
if scale != 1:
input_img = nn.functional.interpolate(input_img, scale_factor=scale, mode='bilinear', align_corners=False)
outputs = model(input_img)
ff += outputs
fnorm = torch.norm(ff, p=2, dim=1, keepdim=True)
ff = ff.div(fnorm.expand_as(ff))
#print(ff.shape)
features = torch.cat((features,ff.data.cpu().float()), 0)
return features
def extract_cam(model, dataloaders):
cams = torch.FloatTensor()
count = 0
for data in tqdm(dataloaders):
img, label = data
n, c, h, w = img.size()
count += n
input_img = Variable(img.cuda())
ff = torch.FloatTensor(n,512).zero_().cuda()
for scale in ms:
if scale != 1:
input_img = nn.functional.interpolate(input_img, scale_factor=scale, mode='bilinear', align_corners=False)
ff += model(input_img)
fnorm = torch.norm(ff, p=2, dim=1, keepdim=True)
ff = ff.div(fnorm.expand_as(ff))
#outputs = nn.functional.softmax(outputs, dim=1)
cams = torch.cat((cams, ff.data.cpu().float()), 0)
return cams
def predict_cam(model, dataloaders):
cams = torch.FloatTensor()
count = 0
for data in tqdm(dataloaders):
img, label = data
n, c, h, w = img.size()
count += n
input_img = Variable(img.cuda())
#for scale in ms:
# if scale != 1:
# input_img = nn.functional.interpolate(input_img, scale_factor=scale, mode='bilinear', align_corners=False)
outputs = model(input_img)
cams = torch.cat((cams, outputs.data.cpu().float()), 0)
return cams
def load_pickle(filename):
fr=open(filename,'rb')
try:
data = pickle.load(fr, encoding='latin1')
except:
data = pickle.load(fr)
index = 0
for name, f in data.items():
if index == 0:
feature = torch.zeros( len(data), len(f))
feature[int(name[:-4])-1,:] = torch.FloatTensor(f)
index +=1
feature = L2Normalization(feature, dim=1)
return feature
def load_attribute(filename):
fr=open(filename,'rb')
data = pickle.load(fr, encoding='latin1')
index = 0
direction_total = np.ndarray( len(data))
color_total = np.ndarray( len(data))
vtype_total = np.ndarray( len(data))
for name, value in data.items():
direction_total[int(name[:-4])-1] = value[0]
color_total[int(name[:-4])-1] = value[1]
vtype_total[int(name[:-4])-1] = value[2]
return vtype_total
def get_shape(path):
shape_total = np.zeros(len(path))
count = 0
for name, label in path:
img = np.asarray(PIL.Image.open(name))
shape_total[count] = img.shape[0] * img.shape[1]
count += 1
return shape_total
gallery_path = image_datasets['gallery'].imgs
query_path = image_datasets['query'].imgs
query_shape = get_shape(query_path)
gallery_shape = get_shape(gallery_path)
#with open('q_g_direct_sim.pkl','rb') as fid:
# q_g_direction_sim = pickle.load(fid)
with open('pkl_feas/q_g_direct_sim_track.pkl','rb') as fid:
q_g_direction_sim = pickle.load(fid)
with open('pkl_feas/q_q_direct_sim.pkl','rb') as fid:
q_q_direction_sim = pickle.load(fid)
#with open('pkl_feas/g_g_direct_sim.pkl','rb') as fid:
# g_g_direction_sim = pickle.load(fid)
with open('pkl_feas/g_g_direct_sim_track.pkl','rb') as fid:
g_g_direction_sim = pickle.load(fid)
######################################################################
# Extract feature
result = scipy.io.loadmat('feature/submit_result_ft_SE_imbalance_s1_384_p0.5_lr2_mt_d0_b24+v+aug.mat')
query_feature0 = torch.FloatTensor(result['query_f']).cuda()
gallery_feature0 = torch.FloatTensor(result['gallery_f']).cuda()
query_path = 'pkl_feas/query_fea_ResNeXt101_vd_64x4d_cos_alldata_final.pkl'
query_feature1 = torch.FloatTensor(load_pickle(query_path)).cuda()
gallery_feature1 = torch.FloatTensor(load_pickle(query_path.replace('query', 'gallery'))).cuda()
query_path = 'pkl_feas/query_fea_ResNeXt101_vd_64x4d_twosource_alldata_final.pkl'
query_feature2 = torch.FloatTensor(load_pickle(query_path)).cuda()
gallery_feature2 = torch.FloatTensor(load_pickle(query_path.replace('query', 'gallery'))).cuda()
query_path = 'pkl_feas/real_query_fea_ResNeXt101_32x8d_wsl_416_416_final.pkl'
query_feature3 = torch.FloatTensor(load_pickle(query_path)).cuda()
gallery_feature3 = torch.FloatTensor(load_pickle(query_path.replace('query', 'gallery'))).cuda()
query_path = 'pkl_feas/query_fea_ResNeXt101_vd_64x4d_twosource_cos_autoaug_final2.pkl'
query_feature4 = torch.FloatTensor(load_pickle(query_path)).cuda()
gallery_feature4 = torch.FloatTensor(load_pickle(query_path.replace('query', 'gallery'))).cuda()
query_path = 'pkl_feas/real_query_fea_ResNeXt101_32x16d_wsl_384_384_final.pkl'
query_feature5 = torch.FloatTensor(load_pickle(query_path)).cuda()
gallery_feature5 = torch.FloatTensor(load_pickle(query_path.replace('query', 'gallery'))).cuda()
query_path = 'pkl_feas/real_query_fea_ResNeXt101_32x8d_wsl_384_384_final.pkl'
query_feature6 = torch.FloatTensor(load_pickle(query_path)).cuda()
gallery_feature6 = torch.FloatTensor(load_pickle(query_path.replace('query', 'gallery'))).cuda()
query_path = 'pkl_feas/bzc_res50ibn_ensemble_query_4307.pkl'
query_feature7 = torch.FloatTensor(load_pickle(query_path)).cuda()
gallery_feature7 = torch.FloatTensor(load_pickle(query_path.replace('query', 'gallery'))).cuda()
query_path = 'pkl_feas/real_query_fea_ResNeXt101_32x8d_wsl_400_400_final.pkl'
query_feature8 = torch.FloatTensor(load_pickle(query_path)).cuda()
gallery_feature8 = torch.FloatTensor(load_pickle(query_path.replace('query', 'gallery'))).cuda()
query_path = 'pkl_feas/real_query_fea_ResNeXt101_32x8d_wsl_rect_final.pkl'
query_feature9 = torch.FloatTensor(load_pickle(query_path)).cuda()
gallery_feature9 = torch.FloatTensor(load_pickle(query_path.replace('query', 'gallery'))).cuda()
query_path = 'pkl_feas/0403/query_fea_ResNeXt101_vd_64x4d_twosource_cos_trans_merge.pkl'
query_feature10 = torch.FloatTensor(load_pickle(query_path)).cuda()
gallery_feature10 = torch.FloatTensor(load_pickle(query_path.replace('query', 'gallery'))).cuda()
query_path = 'pkl_feas/query_fea_Res2Net101_vd_final2.pkl'
query_feature11 = torch.FloatTensor(load_pickle(query_path)).cuda()
gallery_feature11 = torch.FloatTensor(load_pickle(query_path.replace('query', 'gallery'))).cuda()
query_path = 'pkl_feas/res50ibn_ensemble_query_bzc.pkl'
query_feature12 = torch.FloatTensor(load_pickle(query_path)).cuda()
gallery_feature12 = torch.FloatTensor(load_pickle(query_path.replace('query', 'gallery'))).cuda()
query_feature = torch.cat( (query_feature0, query_feature1, query_feature2, query_feature3, query_feature4, query_feature5, query_feature6, query_feature7, query_feature8, query_feature9, query_feature10, query_feature11,query_feature12), dim =1)
gallery_feature = torch.cat( (gallery_feature0, gallery_feature1, gallery_feature2, gallery_feature3, gallery_feature4, gallery_feature5, gallery_feature6, gallery_feature7, gallery_feature8, gallery_feature9, gallery_feature10, gallery_feature11, gallery_feature12), dim=1)
gallery_path = image_datasets['gallery'].imgs
query_path = image_datasets['query'].imgs
query_feature = L2Normalization(query_feature, dim=1)
gallery_feature = L2Normalization(gallery_feature, dim=1)
print(query_feature.shape)
threshold = 0.5
#query cluster
nq = query_feature.shape[0]
nf = query_feature.shape[1]
q_q_dist = torch.mm(query_feature, torch.transpose(query_feature, 0, 1))
q_q_dist = q_q_dist.cpu().numpy()
q_q_dist[q_q_dist>1] = 1 #due to the epsilon
q_q_dist = 2-2*q_q_dist
eps = threshold
# first cluster
min_samples= 2
cluster1 = DBSCAN(eps=eps, min_samples=min_samples, metric='precomputed', algorithm='auto', n_jobs=-1)
cluster1 = cluster1.fit(q_q_dist)
qlabels = cluster1.labels_
nlabel_q = len(np.unique(cluster1.labels_))
# gallery cluster
ng = gallery_feature.shape[0]
### Using tracking ID
g_g_dist = torch.ones(ng,ng).numpy()
nlabel_g = 0
glabels = torch.zeros(ng).numpy() - 1
with open('data/test_track_id.txt','r') as f:
for line in f:
line = line.replace('\n','')
g_name = line.split(' ')
g_name.remove('')
g_name = list(map(int, g_name))
for i in g_name:
glabels[i-1] = nlabel_g
for j in g_name:
g_g_dist[i-1,j-1] = 0
nlabel_g +=1
nimg_g = len(np.argwhere(glabels!=-1))
print('Gallery Cluster Class Number: %d'%nlabel_g)
print('Gallery Cluster Image per Class: %.2f'%(nimg_g/nlabel_g))
query_feature = L2Normalization(query_feature, dim=1)
gallery_feature = L2Normalization(gallery_feature, dim=1)
# Gallery Video fusion
gallery_feature_clone = gallery_feature.clone()
g_g_direction_sim_clone = g_g_direction_sim.copy()
junk_index_g = np.argwhere(gallery_shape< 15000).flatten() # 150x150
junk_index_q = np.argwhere(query_shape< 15000).flatten() # 150x150
print('Low Qualtiy Image in Query: %d'% len(junk_index_q))
print('Low Qualtiy Image in Gallery: %d'% len(junk_index_g))
for i in range(nlabel_g):
index = np.argwhere(glabels==i).flatten() #from small to large, start from 0
high_quality_index = np.setdiff1d(index, junk_index_g)
if len(high_quality_index) == 0:
high_quality_index = index
gf_mean = torch.mean(gallery_feature_clone[high_quality_index,:], dim=0)
gd_mean = np.mean(g_g_direction_sim_clone[high_quality_index,:], axis=0)
for j in range(len(index)):
gallery_feature[index[j],:] += 0.5*gf_mean
#g_g_direction_sim[index[j],:] = (g_g_direction_sim[index[j],:] + gd_mean)/2
# Query Feature fusion
query_feature_clone = query_feature.clone()
for i in range(nlabel_q-1):
index = np.argwhere(qlabels==i).flatten() #from small to large, start from 0
high_quality_index = np.setdiff1d(index, junk_index_q)
if len(high_quality_index) == 0:
high_quality_index = index
qf_mean = torch.mean(query_feature_clone[high_quality_index,:], dim=0)
for j in range(len(index)):
query_feature[index[j],:] = qf_mean
query_feature = L2Normalization(query_feature, dim=1)
gallery_feature = L2Normalization(gallery_feature, dim=1)
######################################################################
# Predict Camera
q_cam = []
g_cam = []
with open('query_cam_preds_baidu.txt','r') as f:
for line in f:
line = line.replace('\n','')
ID = line.split(' ')
q_cam.append(int(ID[1]))
with open('gallery_cam_preds_baidu.txt','r') as f:
for line in f:
line = line.replace('\n','')
ID = line.split(' ')
g_cam.append(int(ID[1]))
q_cam = np.asarray(q_cam)
g_cam = | np.asarray(g_cam) | numpy.asarray |
import numpy as np
from prml.linear.regressor import Regressor
class VariationalLinearRegressor_DR():
"""
variational bayesian estimation the parameters
p(w,alpha|X,t)
~ q(w)q(alpha)
= N(w|w_mean, w_var)Gamma(alpha|a,b)
Attributes
----------
a : float
a parameter of variational posterior gamma distribution
b : float
another parameter of variational posterior gamma distribution
w_mean : (n_features,) ndarray
mean of variational posterior gaussian distribution
w_var : (n_features, n_feautures) ndarray
variance of variational posterior gaussian distribution
n_iter : int
number of iterations performed
"""
def __init__(self, beta=1., a0=1., b0=1.):
"""
construct variational linear regressor
Parameters
----------
beta : float
precision of observation noise
a0 : float
a parameter of prior gamma distribution
Gamma(alpha|a0,b0)
b0 : float
another parameter of prior gamma distribution
Gamma(alpha|a0,b0)
"""
self.beta = beta
self.a0 = a0
self.b0 = b0
def fit(self, X_dr, iter_max=100):
self.a = self.a0 + 0.5 * np.size(X_dr['XY'], 0)
self.b = self.b0
I = np.eye(np.size(X_dr['XY'], 0))
for i in range(iter_max):
param = self.b
self.w_var = np.linalg.inv(
self.a * I / self.b
+ self.beta * X_dr['XX'])
self.w_mean = self.beta * self.w_var @ X_dr['XY']
self.b = self.b0 + 0.5 * (
np.sum(self.w_mean ** 2)
+ np.trace(self.w_var))
if np.allclose(self.b, param):
break
self.n_iter = i + 1
def predict(self, X, return_std=False):
assert X.ndim == 2
y = X @ self.w_mean
if return_std:
y_var = 1 / self.beta + np.sum(X @ self.w_var * X, axis=1)
y_std = | np.sqrt(y_var) | numpy.sqrt |
import argparse
import cv2
import numpy as np
from binary_thresholding import GetThresholdedBinary
from lane_tracking import TLaneTracker
from lens_correction import TLensCorrector
from moviepy.editor import VideoFileClip
from perspective_transform import TPerspectiveTransformer
# Global Variables ------------------------------------------------------------
LensCorrector = TLensCorrector("camera_calibration")
LaneTracker = TLaneTracker()
PerspectiveTransformer = TPerspectiveTransformer(1280, 720)
# Functions ------------------------------------------------------------
def ProcessImage(img):
""" Processes an RGB image by detecting the lane lines, radius of curvature and course deviation.
The information is added to the undistorded original image in overlay.
param: img: Image to process
returns: Processed RGB image
"""
# Convert the RGB image of MoviePy to BGR format of OpenCV
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
# Undistort the image
undistortedImg = LensCorrector.Undistort(img)
# Transform
thresholdedBinary = GetThresholdedBinary(undistortedImg)
# Generate a bird's eye view
warpedBinary = PerspectiveTransformer.Warp(thresholdedBinary)
# Detect the lane lines, radius of curvature and course deviation
leftCoefficients, rightCoefficients, curveRad, deviation = LaneTracker.ProcessLaneImage(warpedBinary)
# Generate x and y values for plotting
plotY = np.linspace(0, warpedBinary.shape[0] - 1, warpedBinary.shape[0])
leftPlotX = leftCoefficients[0] * plotY**2 + leftCoefficients[1] * plotY + leftCoefficients[2]
rightPlotX = rightCoefficients[0] * plotY**2 + rightCoefficients[1] * plotY + rightCoefficients[2]
# Fill the lane surface
laneImg = np.zeros_like(undistortedImg)
# Recast the x and y points into usable format for cv2.fillPoly()
leftPoints = np.array([np.transpose(np.vstack([leftPlotX, plotY]))])
rightPoints = np.array([np.flipud(np.transpose(np.vstack([rightPlotX, plotY])))])
lanePoints = np.hstack((leftPoints, rightPoints))
# Draw the lane onto the warped blank image
cv2.fillPoly(laneImg, np.int_([lanePoints]), (255, 0, 0))
# Draw the lane lines
cv2.polylines(laneImg, np.int_([leftPoints]), isClosed=False, color=(0, 0, 255), thickness=32)
cv2.polylines(laneImg, np.int_([rightPoints]), isClosed=False, color=(0, 255, 0), thickness=32)
# Convert the lane image from the bird's eye view to the original perspective
unwarpedLane = PerspectiveTransformer.Unwarp(laneImg)
# Add the lane lines overlay
outImg = cv2.addWeighted(undistortedImg, 1, unwarpedLane, 0.3, 0)
# Add the radius of curvature overlay
cv2.putText(outImg, "Curvature radius: %dm" % (curveRad),
(20, 70), cv2.FONT_HERSHEY_SIMPLEX, 1.4, (255, 255, 255), 2)
# Add the course deviation overlay
if deviation < 0:
deviationDirection = "left"
else:
deviationDirection = "right"
deviation = | np.absolute(deviation) | numpy.absolute |
from astropy.table import Table, Column, Row
#from astropy_healpix import healpy
import sys
import os, glob
import time
from astropy.cosmology import FlatLambdaCDM
import matplotlib.pyplot as plt
import astropy.units as u
from astropy.coordinates import Angle
from astropy.coordinates import SkyCoord
import astropy.constants as cc
import astropy.io.fits as fits
import scipy
from scipy.special import erf
from scipy.stats import norm
from scipy.interpolate import interp2d
from scipy.interpolate import interp1d
from scipy.stats import scoreatpercentile
import h5py
import numpy as np
from colossus.cosmology import cosmology
from colossus.lss import mass_function as mf
from colossus.lss import peaks
from sklearn import mixture
from scipy import integrate
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from scipy.optimize import curve_fit
import ultranest
from ultranest.plot import cornerplot
import corner
import pandas as pd
import hydro_mc
import random
print('Looks for correlation between Xray center-BCG displacement with Xoff')
print('------------------------------------------------')
print('------------------------------------------------')
#set cosmology
cosmo = cosmology.setCosmology('multidark-planck')
dc = peaks.collapseOverdensity(z = 0)
h = cosmo.Hz(z=0)/100
cosmo_astropy = FlatLambdaCDM(H0=67.77, Om0=0.307)
direct = '.'
path_2_BCG = os.path.join(direct, 'SpidersXclusterBCGs-v2.0.fits')
path_2_clusters = os.path.join(direct,'catCluster-SPIDERS_RASS_CLUS-v3.0.fits')
#path_2_bcg_eFEDS = os.path.join(direct, 'BCG_eFEDS.fits')
#path_2_clusters_eFEDS = os.path.join(direct,'eFEDS_properties_18_3_2020.fits')
path_2_clusters_eFEDS = os.path.join(direct,'wcen','decals_dr8_run_32_efeds_extendedSourceCatalog_mllist_ph_22_11_2019_v940_final_catalog.fit')
#path_2_model = os.path.join('..','quarantine','HMF','g_sigma','coeff','HMD','z_0.000','model.fit')
path_2_model = os.path.join('..','quarantine','gsigma','extended','3d','z_0.000','hsigma_params.fit')
path_2_zevo = os.path.join('..','quarantine','gsigma','extended','3d','zevo','hsigma_params.fit')
path_2_clusters_eRASS = os.path.join(direct,'eRASS','decals_dr8_run_redmapper_v0.6.6_lgt20_catalog2.fit')
path_2_clusters_shear_sel = os.path.join(direct,'distance_shearsel_xray.txt')
path2lightcone_MD10 = os.path.join(direct,'MD10_eRO_CLU_b10_CM_0_pixS_20.0.fits')
path2lightcone_MD40 = os.path.join(direct,'MD40_eRO_CLU_b8_CM_0_pixS_20.0.fits')
#read catalogs SDSS
print('reading catalogs...')
t_clus = Table.read(path_2_clusters)
dt_clus = t_clus.to_pandas()
id_clus = np.array(dt_clus.CLUS_ID)
ra_bcg = np.array(dt_clus.RA_OPT)
dec_bcg = np.array(dt_clus.DEC_OPT)
z_sp = np.array(dt_clus.SCREEN_CLUZSPEC)
ra_clus = np.array(dt_clus.RA)
dec_clus = np.array(dt_clus.DEC)
rich = np.array(dt_clus.LAMBDA_CHISQ_OPT)
z_ph = np.array(dt_clus.Z_LAMBDA)
r200 = np.array(dt_clus.R200C_DEG)
richness = np.array(dt_clus.LAMBDA_CHISQ_OPT)
Ncomp = np.array(dt_clus.NCOMPONENT)
index1comp = np.where(Ncomp==1)
print(r200)
print(np.average(z_sp))
ind1 = (ra_clus > 100) & (ra_clus<315)
print(min(ra_clus[ind1]),max(ra_clus[ind1]))
print(min(dec_clus[ind1]),max(dec_clus[ind1]))
ind1 = (ra_clus < 100) #or (ra_clus>315)
print(min(ra_clus[ind1]),max(ra_clus[ind1]))
print(min(dec_clus[ind1]),max(dec_clus[ind1]))
print(min(z_sp),max(z_sp))
mass_rich = 3e14*(richness/38.56*((1+z_sp)/1.18)**1.13)**(1/0.99)*(200/178)**3
print('%.3g'%min(mass_rich),'%.3g'%max(mass_rich))
print('computing offset...')
dist_col = cosmo.comovingDistance(z_min=0.,z_max=z_sp)*u.pc*1e6/0.6777 #/h
#print('colossus = ', dist_col)
dist = cosmo_astropy.comoving_distance(z_sp).to(u.pc)
#print('astropy = ', dist)
bcg = SkyCoord(ra_bcg[index1comp]*u.degree,dec_bcg[index1comp]*u.degree,frame='fk5',distance=dist[index1comp])
clus = SkyCoord(ra_clus[index1comp]*u.degree, dec_clus[index1comp]*u.degree, frame='fk5',distance=dist[index1comp])
print(clus)
sep = bcg.separation_3d(clus)
#get separation in kpc (same as Xoff in distinct catalogs)
sep = sep.value*1e-3
#print(sep)
r_200 = cosmo_astropy.kpc_comoving_per_arcmin(z_sp).value*(r200*60)
x = (cosmo.Om(z_sp)-1)
delta_vir = (18*(np.pi)**2+82*x-39*x**2)/cosmo.Om(z_sp)
#print(delta_vir)
rvir = (200/delta_vir)**3*r_200
#print(rvir)
#get xoff from the data and get its PDF
#xoff_data = sep/rvir
xoff_data = sep
#binning = np.linspace(-3.,0.,20)
binning = np.linspace(min(xoff_data),max(xoff_data),20)
pdf, b = np.histogram(np.log10(xoff_data),bins=binning,density=True)
bins = (b[:-1]+b[1:])/2
xoff_data = np.sort(xoff_data)
cdf = np.arange(1,len(xoff_data)+1)/len(xoff_data)
print(cdf)
fig = plt.figure(figsize=(10,10))
psf = np.array([10,30,60,120,180])
shift = np.zeros(len(psf))
for k,kernel in enumerate(psf):
#error = cosmo_astropy.kpc_comoving_per_arcmin(z_sp[index1comp]).value*(kernel/60)
#print('error = ',error,' kpc')
#compute a new centroid shifted by xoff and noise
print(bcg,clus)
offset = bcg.separation(clus)+Angle(np.random.normal(0,Angle(kernel/3600, unit=u.deg).value,len(clus)), unit=u.deg) #noise is a gaussian with sigma=kernel
angle = np.random.randint(0,360,size=len(clus))
print('angle = ',angle)
clus_new = clus.directional_offset_by(angle, offset)
clus_new = SkyCoord(clus_new,distance=dist[index1comp])
print('clus_new = ',clus_new)
sep_new = bcg.separation_3d(clus_new).value*1e-3
print('sep_new =', sep_new)
plt.scatter(sep, sep-sep_new, label='%d'%(kernel),s=12)
print('convolution effect = ',np.average(sep-sep_new))
shift[k] = np.sqrt(np.average((sep-sep_new)**2))
plt.ylabel('Xoff - Xoff degraded',fontsize=20)
plt.xlabel('Xoff',fontsize=20)
plt.legend(fontsize=15)
plt.tick_params(labelsize=15)
plt.title('SPIDERS',fontsize=20)
plt.grid(True)
plt.tight_layout()
outf = os.path.join(direct,'xoff_degraded.png')
plt.savefig(outf,overwrite=True)
t=Table()
t.add_column(Column(name='kernel',data=psf,unit=''))
t.add_column(Column(name='shift', data=shift,unit=''))
outt = os.path.join(direct,'shift.fit')
t.write(outt,overwrite=True)
#xoff_spiders_shift_ = np.abs(xoff_data*((xoff_data-shift[1])/xoff_data))
#print('xoff data = ',(xoff_data))
#print('xoff_data shift', xoff_spiders_shift_)
#print('shift = ',np.average(np.abs(shift)))
#xoff_spiders_shift = np.sort(xoff_spiders_shift_)
#cdf_spiders_shift = np.arange(1,len(xoff_spiders_shift)+1)/len(xoff_spiders_shift)
#sys.exit()
#read catalogs eFEDS
print('reading catalogs...')
t_clus_eFEDS = Table.read(path_2_clusters_eFEDS)
print(t_clus_eFEDS['id_src'])
id_clus_eFEDS = t_clus_eFEDS['id_src']
ra_bcg_eFEDS = t_clus_eFEDS['ra']
dec_bcg_eFEDS = t_clus_eFEDS['dec']
ra_clus_eFEDS = t_clus_eFEDS['ra_orig']
dec_clus_eFEDS = t_clus_eFEDS['dec_orig']
z_lambda_eFEDS = t_clus_eFEDS['z_lambda']
print('computing offset...')
dist_col_eFEDS = cosmo.comovingDistance(z_min=0.,z_max=np.array(z_lambda_eFEDS))*u.pc*1e6/0.6777 #/h
print('colossus = ', dist_col_eFEDS)
dist_eFEDS = cosmo_astropy.comoving_distance(np.array(z_lambda_eFEDS)).to(u.pc)
print('astropy = ', dist)
bcg_eFEDS = SkyCoord(np.array(ra_bcg_eFEDS)*u.degree,np.array(dec_bcg_eFEDS)*u.degree,frame='fk5',distance=dist_eFEDS)
clus_eFEDS = SkyCoord(np.array(ra_clus_eFEDS)*u.degree, np.array(dec_clus_eFEDS)*u.degree, frame='fk5',distance=dist_eFEDS)
sep_eFEDS = bcg_eFEDS.separation_3d(clus_eFEDS)
#get separation in kpc (same as Xoff in distinct catalogs)
sep_eFEDS = sep_eFEDS.value*1e-3
print(len(sep_eFEDS))
#x_eFEDS = (cosmo.Om(z_mcmf_eFEDS)-1)
#delta_vir_eFEDS = (18*(np.pi)**2+82*x_eFEDS-39*x_eFEDS**2)/cosmo.Om(z_mcmf_eFEDS)
#print(delta_vir)
#vir_eFEDS = (200/delta_vir_eFEDS)**3*r200_eFEDS
#print(rvir)
#get xoff from the data and get its PDF
#xoff_data = sep/rvir
xoff_data_eFEDS = sep_eFEDS
#binning_eFEDS = np.linspace(-3.,0.,20)
binning_eFEDS = np.linspace(min(xoff_data_eFEDS),max(xoff_data_eFEDS),20)
pdf_eFEDS, b_eFEDS = np.histogram(np.log10(xoff_data_eFEDS),bins=binning,density=True)
bins_eFEDS = (b_eFEDS[:-1]+b_eFEDS[1:])/2
indsort_eFEDS = np.argsort(xoff_data_eFEDS)
xoff_data_eFEDS_sort = xoff_data_eFEDS[indsort_eFEDS]
cdf_eFEDS = np.arange(1,len(xoff_data_eFEDS_sort)+1)/len(xoff_data_eFEDS_sort)
print(cdf_eFEDS)
ind_new_eFEDS = []
for i in range(len(cdf_eFEDS)):
ind_new_eFEDS.append(int(np.argwhere(indsort_eFEDS==i)))
cdf_eFEDS_back = cdf_eFEDS[ind_new_eFEDS]
t_clus_eFEDS.add_column(Column(name='Sep',data=xoff_data_eFEDS,unit=''))
t_clus_eFEDS.add_column(Column(name='cdf',data=cdf_eFEDS_back,unit=''))
outt_eFEDS = os.path.join(direct,'wcen','decals_dr8_run_32_efeds_extendedSourceCatalog_mllist_ph_22_11_2019_v940_final_catalog_cdf.fit')
t_clus_eFEDS.write(outt_eFEDS,overwrite=True)
#read catalogs eRASS
print('reading catalog eRASS...')
t_clus_eRASS_uncut = Table.read(path_2_clusters_eRASS)
richness_eRASS = t_clus_eRASS_uncut['lambda']
ext_like_eRASS = t_clus_eRASS_uncut['ext_like']
det_like_eRASS = t_clus_eRASS_uncut['det_like_0']
index = ((richness_eRASS > 30) & (ext_like_eRASS > 0) & (det_like_eRASS > 5))
print(index)
t_clus_eRASS = t_clus_eRASS_uncut[index]
id_clus_eRASS = t_clus_eRASS['id_src']#[index]
print(id_clus_eRASS)
ra_bcg_eRASS = t_clus_eRASS['ra']#[index]
dec_bcg_eRASS = t_clus_eRASS['dec']#[index]
ra_clus_eRASS = t_clus_eRASS['ra_orig']#[index]
dec_clus_eRASS = t_clus_eRASS['dec_orig']#[index]
z_lambda_eRASS = t_clus_eRASS['z_lambda']#[index]
print('computing offset...')
dist_col_eRASS = cosmo.comovingDistance(z_min=0.,z_max=np.array(z_lambda_eRASS))*u.pc*1e6/0.6777 #/h
print('colossus = ', dist_col_eRASS)
dist_eRASS = cosmo_astropy.comoving_distance(np.array(z_lambda_eRASS)).to(u.pc)
print('astropy = ', dist)
bcg_eRASS = SkyCoord(np.array(ra_bcg_eRASS)*u.degree,np.array(dec_bcg_eRASS)*u.degree,frame='fk5',distance=dist_eRASS)
clus_eRASS = SkyCoord(np.array(ra_clus_eRASS)*u.degree, np.array(dec_clus_eRASS)*u.degree, frame='fk5',distance=dist_eRASS)
sep_eRASS = bcg_eRASS.separation_3d(clus_eRASS)
#get separation in kpc (same as Xoff in distinct catalogs)
sep_eRASS = sep_eRASS.value*1e-3
#get xoff from the data and get its PDF
#xoff_data = sep/rvir
xoff_data_eRASS = sep_eRASS
binning_eRASS = np.linspace(min(xoff_data_eRASS),max(xoff_data_eRASS),20)
pdf_eRASS, b_eRASS = np.histogram(np.log10(xoff_data_eRASS),bins=binning,density=True)
bins_eRASS = (b_eRASS[:-1]+b_eRASS[1:])/2
indsort_eRASS = np.argsort(xoff_data_eRASS)
xoff_data_eRASS_sort = xoff_data_eRASS[indsort_eRASS]
cdf_eRASS = np.arange(1,len(xoff_data_eRASS_sort)+1)/len(xoff_data_eRASS_sort)
ind_new_eRASS = []
for i in range(len(cdf_eRASS)):
ind_new_eRASS.append(int(np.argwhere(indsort_eRASS==i)))
cdf_eRASS_back = cdf_eRASS[ind_new_eRASS]
t_clus_eRASS.add_column(Column(name='Sep',data=xoff_data_eRASS,unit=''))
t_clus_eRASS.add_column(Column(name='cdf',data=cdf_eRASS_back,unit=''))
outt_eRASS = os.path.join(direct,'eRASS','decals_dr8_run_redmapper_v0.6.6_lgt30_catalog_eRASS_clusters_cdf.fit')
t_clus_eRASS.write(outt_eRASS,overwrite=True)
print(cdf_eRASS)
#work on shear selected efeds clusters
dfr = pd.read_csv(path_2_clusters_shear_sel, sep='\t', header=None, dtype='a')
print(dfr)
dist_shearsel = pd.to_numeric(dfr[9][1:].values)
dist_shear_sort = np.sort(dist_shearsel)
cdf_shear_sel = np.arange(1,len(dist_shear_sort)+1)/len(dist_shear_sort)
#ota2020
displ_ota = np.array([52,18,239,22,20,40,76,23,228,17,40,171,109,133,41,260,5,111,74,113,188,102,17,26,93,187,30,129,129,279,64,189,131,15,196,166,82])
displ_ota_sort = np.sort(displ_ota)
cdf_ota = np.arange(1,len(displ_ota_sort)+1)/len(displ_ota_sort)
#mann2012
displ_mann = np.array([357.3,279.3,50.7,23.7,130.3,98.1,69.7,72.5,32.7,463.1,138.8,90.8,316.5,147.5,61.8,23.5,180.1,107.3,88.9,96.1,319.7,129.1,44.8,31.4,155.8,79, 21.3,11.8,53.9,103.3,38.9,47.3,15.1,24.1,35.9,67.3,119.9,70.1,25.5,48.1,89.9,8.3,30.8,18,9.1,5.7,70.5,23.8,10.2,33.5,59.9,19.4,10.5,114,33.8,16.8,32.5,37.7,21.5,34.7, 15.5,7.1,2.5,14.1,7.2,4.1,14.8,5.7,20.5,19.5,25.6,9.9,5.6,22.0,10.9,14.4,21.4,9.9,5.4,14.6,20.8,19.2,20.1,7.6,7,27.3,2.5,32.6,10.3,5.9,4.9,5.3,10,10.8,12.2,22.2,12.9, 3.9,7.9,7.7,7.8,13.7,7.3,8.0,26.7,21.7,19.7])
displ_mann_sort = np.sort(displ_mann)
cdf_mann = np.arange(1,len(displ_mann_sort)+1)/len(displ_mann_sort)
#rossetti2016
displ_rossetti = np.array([143.8, 2.3, 48.4, 3.9, 7.2, 71.9, 2.8, 0.3, 20.1, 14, 2, 204.7, 8.6, 32.4, 3.9, 1015.8, 9.1, 185.7, 6.2, 54, 3.2, 157.1, 38.3, 53.1, 24.8, 0.7, 242.2, 341.3, 13.8, 7.2, 33.1, 4.8, 31.6, 160.5, 123.7, 716.9, 33.9, 96.2, 1.7, 250.2, 16.7, 45.6, 6.4, 3.7, 9.2, 2.7, 42.4, 58.9, 11.6, 7.1, 51.4, 7.9, 6.3, 8.4, 77.5, 10.5, 401, 2.6, 234.7, 6.3, 7.3, 12.2, 10.3, 11.4, 34.3, 192.6, 10, 218, 2.3, 726.4, 163.5, 225.3, 5.2, 65.4, 23.7, 15.7, 1004, 20.4, 1.3, 390.3, 29.3, 16.3, 89.6, 200.1, 29.2, 112.6, 349.6, 22.7, 18.8, 565.5, 13.8, 14.9, 2.3, 3.5, 581.5, 28.7, 24.8, 16.8, 7.5, 996.3, 87.9, 58.8, 168.9, 175.4, 25.8, 12.2, 69.3, 3.3, 814.2, 2.2, 5.7, 143.7, 3.2, 6.4, 1.7, 5.4, 89.5, 59.7, 1.6, 11.6, 7.6, 3.7, 12.4, 65.8, 3.3, 212, 7.1, 88.9, 15.1, 444.6, 25.3, 11.8])
displ_rossetti_sort = np.sort(displ_rossetti)
cdf_rossetti = np.arange(1,len(displ_rossetti_sort)+1)/len(displ_rossetti_sort)
#lightcone
t10 = Table.read(path2lightcone_MD10)
displ_lightcone_10 = t10['HALO_Xoff']/h*2/np.pi
#displ_lc_sort_10 = np.sort(displ_lightcone_10)
#cdf_lightcone_10 = np.arange(1,len(displ_lc_sort_10)+1)/len(displ_lc_sort_10)
t40 = Table.read(path2lightcone_MD40)
displ_lightcone_40 = t40['HALO_Xoff']/h*2/np.pi
#displ_lc_sort_40 = np.sort(displ_lightcone_40)
#cdf_lightcone_40 = np.arange(1,len(displ_lc_sort_40)+1)/len(displ_lc_sort_40)
index10_spiders = (t10['HALO_Mvir']/h>7e13) & (t10['redshift_S']<0.67) & (t10['redshift_S']>0.01) & (t10['HALO_pid']==-1) & (t10['CLUSTER_FX_soft']>1e-13) & ((t10['RA'].all()>110.2 and t10['RA'].all()<261.6 and t10['DEC'].all()>16 and t10['DEC'].all()<60.5) or (t10['RA'].all()>0 and t10['RA'].all()<43.2 and t10['DEC'].all()>-5.5 and t10['DEC'].all()<35.3))
#index40_spiders = (t40['HALO_Mvir']/h>7e13) & (t40['redshift_S']<0.67) & (t40['redshift_S']>0.01) & (t40['HALO_pid']==-1) & (t40['CLUSTER_FX_soft']>1e-13) & ((t40['RA'].all()>110.2 and t40['RA'].all()<261.6 and t40['DEC'].all()>16 and t40['DEC'].all()<60.5) or (t40['RA'].all()>0 and t40['RA'].all()<43.2 and t40['DEC'].all()>-5.5 and t40['DEC'].all()<35.3))
#displ_lightcone_concat_spiders_ = np.append(displ_lightcone_10[index10_spiders],displ_lightcone_40[index40_spiders])
displ_lightcone_concat_spiders_ = displ_lightcone_10[index10_spiders] #+ shift[2]
displ_lightcone_concat_spiders_low_ = displ_lightcone_10[index10_spiders] #+ shift[2] - shift[0]
displ_lightcone_concat_spiders_up_ = displ_lightcone_10[index10_spiders] + shift[4] #+ shift[0]
#displ_lc_concat_sort_spiders = np.sort(displ_lightcone_concat_spiders)
displ_lc_concat_sort_spiders_low = np.sort(displ_lightcone_concat_spiders_low_)
displ_lc_concat_sort_spiders_up = np.sort(displ_lightcone_concat_spiders_up_)
displ_lc_concat_sort_spiders = np.sort(displ_lightcone_concat_spiders_)
cdf_lightcone_concat_spiders_low = np.arange(1,len(displ_lc_concat_sort_spiders_low)+1)/len(displ_lc_concat_sort_spiders_low)
cdf_lightcone_concat_spiders_up = np.arange(1,len(displ_lc_concat_sort_spiders_up)+1)/len(displ_lc_concat_sort_spiders_up)
cdf_lightcone_concat_spiders = np.arange(1,len(displ_lc_concat_sort_spiders)+1)/len(displ_lc_concat_sort_spiders)
M_vir_ota = hydro_mc.mass_from_mm_relation('500c', 'vir', M=7e13, a=1/(1+0.37),omega_m = 0.307, omega_b = 0.048, sigma8=0.8228, h0=h)
print('%.3g'%(M_vir_ota))
M_vir_mann = hydro_mc.mass_from_mm_relation('500c', 'vir', M=7e13, a=1/(1+0.38),omega_m = 0.307, omega_b = 0.048, sigma8=0.8228, h0=h)
#index10_ota = (t10['HALO_Mvir']/h>M_vir_ota) & (t10['redshift_S']<1.1) & (t10['redshift_S']>0.1) & (t10['HALO_pid']==-1)
index10_ota = (t10['HALO_Mvir']/h>M_vir_ota) & (t10['redshift_S']<1.1) & (t10['redshift_S']>0.1) & (t10['HALO_pid']==-1) & (t10['CLUSTER_FX_soft']>2e-14) & ((t10['RA'].all()>0 and t10['RA'].all()<14.4 and t10['DEC'].all()>-7.2 and t10['DEC'].all()<7.2))
#displ_lightcone_concat_ota_ = np.append(displ_lightcone_10[index10_ota],displ_lightcone_40[index40_ota])
displ_lightcone_concat_ota_ = displ_lightcone_10[index10_ota]
displ_lc_concat_sort_ota = np.sort(displ_lightcone_concat_ota_)
cdf_lightcone_concat_ota = np.arange(1,len(displ_lc_concat_sort_ota)+1)/len(displ_lc_concat_sort_ota)
#index10_mann = (t10['HALO_Mvir']/h>M_vir_mann) & (t10['redshift_S']<0.7) & (t10['redshift_S']>0.15) & (t10['HALO_pid']==-1)
index10_mann = (t10['redshift_S']<0.7) & (t10['redshift_S']>0.15) & (t10['HALO_pid']==-1) & (t10['CLUSTER_FX_soft']>1e-12) & ((t10['DEC'].all()>-40 and t10['DEC'].all()<80)) & (t10['CLUSTER_LX_soft']>44.7)
#displ_lightcone_concat_mann_ = np.append(displ_lightcone_10[index10_mann],displ_lightcone_40[index40_mann])
displ_lightcone_concat_mann_ = displ_lightcone_10[index10_mann]
displ_lc_concat_sort_mann = np.sort(displ_lightcone_concat_mann_)
cdf_lightcone_concat_mann = np.arange(1,len(displ_lc_concat_sort_mann)+1)/len(displ_lc_concat_sort_mann)
#make prediction from the model
model = Table.read(path_2_model)
pars_model = model['pars']
zevo = Table.read(path_2_zevo)
zevo_pars = zevo['pars']
parameters = np.append(pars_model,zevo_pars)
#colossus wants masses in Msun/h, so if I want to use physical 5e13 Msun, I will give him 5e13*h= 3.39e13 Msun/h
M1 = 5e13*h
R1 = peaks.lagrangianR(M1)
sigma1 = cosmo.sigma(R1,z=0)
log1_sigma1 = np.log10(1/sigma1)
M2 = 2e14*h
R2 = peaks.lagrangianR(M2)
sigma2 = cosmo.sigma(R2,z=0)
log1_sigma2 = | np.log10(1/sigma2) | numpy.log10 |
''' This script is written to conduct a case-study reported in the paper
"Unsupervised learning-based SKU segmentation.
The script utilizes a free software machine learning library “scikit-learn” as a core
complementing it with several algorithms.
The script uses the concept of data-pipeline to consequentially perform the following procedures:
to impute the missing data with nearest-neighbour-imputation
to standardize the data
to identify and trim outliers and small 'blobs' with LOF and mean-shift
to cluster the data with k-mean and DBSCAN
to improve the eventual clustering result via PCA
Since the ground truth is not provided, the clustering is validated only by internal evaluation, namely
by silhouette index, Calinski-Harabazs index and Dunn-index '''
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from itertools import cycle
from fancyimpute import KNN
from sklearn import preprocessing
from sklearn.neighbors import LocalOutlierFactor
from sklearn.decomposition import PCA
from sklearn.cluster import MeanShift, KMeans, DBSCAN, estimate_bandwidth
from sklearn import metrics
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.decomposition import FactorAnalysis
from scipy import ndimage
class Pipeline:
def __init__(self, methods):
self.methods = methods
def pump(self):
for method in self.methods:
method
w = Writer(pd.DataFrame(p.data))
w.write_to_excel()
class Processing:
def __init__(self, data, k=10, n_neighbors=20):
self.data = data
self.k = k
self.n_neighbors = n_neighbors
def knn_imputation(self):
self.data = pd.DataFrame(KNN(self.k).fit_transform(self.data))
def standardization(self):
self.data = preprocessing.scale(self.data)
def local_outlier_factor(self, drop_anomalies=True):
lof = LocalOutlierFactor(self.n_neighbors)
predicted = lof.fit_predict(self.data)
data_with_outliers = pd.DataFrame(self.data)
data_with_outliers['outliers'] = pd.Series(predicted, index=data_with_outliers.index)
if drop_anomalies is True:
def drop_outliers(data):
data = data_with_outliers
data = data.sort_values(by=['outliers'])
outliers_number = -data[data.outliers == -1].sum().loc['outliers'].astype(int)
print(outliers_number, " outliers are found")
return data, data.iloc[outliers_number:], outliers_number
data_with_outliers, data_without_outliers, outliers_number = drop_outliers(data_with_outliers)
w = Writer(data_without_outliers, sheet='Sheet2', file='outliers.xlsx')
w.write_to_excel()
def get_data(self):
return self.data
class Reduction:
def __init__(self, n_components=2):
self.n_components = n_components
def pca(self, data):
compressor = PCA(self.n_components)
compressor.fit(data)
return compressor.transform(data), compressor.explained_variance_ratio_.sum()
def factor_analysis(self, data):
def ortho_rotation(lam, method='varimax', gamma=None, eps=1e-6, itermax=100):
if gamma == None:
if (method == 'varimax'):
gamma = 1.0
nrow, ncol = lam.shape
R = np.eye(ncol)
var = 0
for i in range(itermax):
lam_rot = np.dot(lam, R)
tmp = np.diag(np.sum(lam_rot ** 2, axis=0)) / nrow * gamma
u, s, v = np.linalg.svd(np.dot(lam.T, lam_rot ** 3 - np.dot(lam_rot, tmp)))
R = np.dot(u, v)
var_new = np.sum(s)
if var_new < var * (1 + eps):
break
var = var_new
print(var)
print(R)
return R
transformer = FactorAnalysis(n_components=self.n_components, random_state=0)
transformed_data = transformer.fit_transform(data)
r = ortho_rotation(transformed_data)
transformed_data = np.matmul(r, np.transpose(transformed_data))
return transformed_data
class Clustering:
def __init__(self, data):
self.data = data
def mean_shift_clustering(self, plot=False, drop_small_clusters=True, threshold=4):
def shift(data):
bandwidth = estimate_bandwidth(data, quantile=0.2, n_samples=500)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(data)
return ms.labels_, ms.cluster_centers_, pd.DataFrame(data)
labels, cluster_centers, labeled_data = shift(self.data)
old_id = labeled_data.index
iteration = 1
while drop_small_clusters is True and iteration<4:
dropped = 0
labeled_data['mean-shift'] = pd.Series(labels)
labeled_data['clusters_sorted'] = pd.Series(labels).value_counts()
to_drop = []
labeled_data.reset_index(drop=True)
for cluster in labeled_data.index:
if labeled_data.loc[cluster, 'clusters_sorted']< threshold:
for row in range(0, len(labeled_data['mean-shift'])):
if labeled_data.loc[row, 'mean-shift'] == cluster:
to_drop.append(row)
for i in to_drop:
labeled_data = labeled_data.drop(i)
dropped += 1
labeled_data = labeled_data.drop(['clusters_sorted', 'mean-shift'], axis=1)
labels, cluster_centers, labeled_data = shift(labeled_data)
iteration += 1
print("iteration: ", iteration, "clusters: ", max(labels)+1, "dropped: ", dropped)
labeled_data['mean-shift'] = pd.Series(labels)
labeled_data['clusters_sorted'] = pd.Series(labels).value_counts()
w = Writer(labeled_data, sheet='Sheet2', file='labeled.xlsx')
w.write_to_excel()
labeled_data = labeled_data.drop('mean-shift', axis=1)
labeled_data = labeled_data.drop('clusters_sorted', axis=1)
self.data = labeled_data
labels_unique = | np.unique(labels) | numpy.unique |
"""
Collection of quick and simple plotting functions
horizontal_map - driver to make two nice horizontal maps next to each other
depth_slice - same, but for contour plot of depth vs some coordinate
_nice_plot - underlying script for a single nice figure
"""
from copy import copy
import numpy as np
import matplotlib.pyplot as plt
import cmocean
import xarray as xr
from warnings import warn
try:
import ecco_v4_py as ecco
except ImportError:
print('You need to reorganize pych at some point')
from matplotlib.ticker import MultipleLocator
from .utils import get_cmap_rgb
def plot_logbin(xda,x=None, y=None,
nbins=3,bin_edges=None,
ax=None,
cmap='RdBu_r',
cbar_label=None,
**kwargs):
"""Make a plot, binning field by log10 of values
Parameters
----------
xda : xarray.DataArray
field to be plotted, must be 2D
x, y : array_like, optional
x and y coordinates for the plot
nbins : int, optional
number of colored bin (centers) positive and negative values
i.e. we get 2*nbins+1, bins. one is neutral (middle)
bin_edges : array-like, optional
exclusive with nbins, specify bin edges (positive only)
ax : matplotlib.axes, optional
to make plot at
cmap : str, optional
specifies colormap
cbar_label : str, optional
label for colorbar, default grabs units from DataArray
kwargs
passed to matpotlib.pyplot.contourf
Returns
-------
ax : matplotlib.axes
if one is not provided
"""
return_ax = False
if ax is None:
_,ax = plt.subplots()
return_ax=True
if nbins is not None and bin_edges is not None:
raise TypeError('one or the other')
log = np.log10(np.abs(xda))
log = log.where((~np.isnan(log)) & (~np.isinf(log)),0.)
if nbins is not None:
_,bin_edges = np.histogram(log,bins=nbins)
else:
nbins = len(bin_edges)-1
logbins=np.round(bin_edges)
# determine if colorbar will be extended
maxExtend = (xda>10**logbins[-1]).any().values
minExtend = (xda<-10**logbins[-1]).any().values
extend='neither'
if minExtend and maxExtend:
extend='both'
elif maxExtend:
extend='max'
elif minExtend:
extend='min'
# determine number of colors, adding one for each extension
# and always one extra, the middle color bin
ncolors=2*nbins+1
ncolors = ncolors+1 if maxExtend else ncolors
ncolors = ncolors+1 if minExtend else ncolors
# if only one end is extended,
# chop off the extreme value from the other end to fit
# in the middle (neutral) colorbin
if extend in ['min' ,'max']:
cmap = get_cmap_rgb(cmap,ncolors+1)
bot = np.arange(1,nbins+1) if extend=='max' else np.arange(0,nbins+1)
top = np.arange(ncolors-nbins,ncolors) if extend=='min' else np.arange(ncolors-nbins,ncolors+1)
index = list(bot)+[nbins+1]+list(top)
cmap = cmap[index,:]
else:
cmap=get_cmap_rgb(cmap,ncolors)
# levels and plot
levels=10**logbins
levels = np.concatenate([-levels[::-1],levels],axis=0)
if x is None or y is None:
im=ax.contourf(xda,levels=levels,colors=cmap,extend=extend,**kwargs)
else:
im=ax.contourf(x,y,xda,levels=levels,colors=cmap,extend=extend,**kwargs)
# label dem ticks
if cbar_label==None and 'units' in xda.attrs:
cbar_label=f'[{xda.attrs["units"]}]'
p=plt.colorbar(im,ax=ax,label=cbar_label)
ticklabels = [f'-10^{b:.0f}' for b in logbins[::-1]]
ticklabels += [f'10^{b:.0f}' for b in logbins]
p.set_ticklabels(ticklabels)
if return_ax:
return ax
else:
return im
def nice_inward_ticks(ax,
xminor_skip=None,yminor_skip=None):
"""Make nice inward pointing ticks
Parameters
----------
ax : matplotlib axis object
xminor_skip, yminor_skip : int, optional
interval of "minor" ticks, if None, then no minor ticks
"""
ax.tick_params(direction='in',which='major',length=8,
top=True,right=True,pad=6)
if xminor_skip is not None:
ax.xaxis.set_minor_locator(MultipleLocator(xminor_skip))
if yminor_skip is not None:
ax.yaxis.set_minor_locator(MultipleLocator(yminor_skip))
if xminor_skip is not None or yminor_skip is not None:
top = xminor_skip is not None
right = yminor_skip is not None
ax.tick_params(direction='in',which='minor',length=5,
top=top,right=right,pad=6)
def fill_between_std(x,ymean,ystd,
ax=None,fill_alpha=0.4,**kwargs):
"""A simple version of fill between to reduce typing"""
fill_kwargs = copy(kwargs)
if 'alpha' in kwargs:
warn(f'Resetting fill_alpha with provided alpha={kwargs["alpha"]}')
fill_kwargs['alpha'] = kwargs['alpha']
else:
fill_kwargs['alpha'] = fill_alpha
ax.plot(x,ymean,**kwargs) if ax is not None else plt.plot(x,ymean,**kwargs)
ax.fill_between(x,ymean-ystd,ymean+ystd,**fill_kwargs) if ax is not None else \
plt.fill_between(x,ymean-ystd,ymean+ystd,**fill_kwargs)
def plot_section(fld, left, right,
datasets, grids,
labels=None,
collapse_dim='x',
plot_diff0=False,
plot_sections_at_bottom=False,
single_plot=False,
nrows=None,
ncols=5,
fig=None,
xr_kwargs={}):
"""Plot a field in each dataset provided along a section in the domain
Parameters
----------
fld : str
string denoting field to grab in each dataset
left, right : pair of floats
denoting in longitude/latitude the coordinates of the left and rightmost
points to get a section of
datasets : list of xarray Datasets
containing all the data
grids : list of or a single xgcm Grid object(s)
this allows one to get a section of the data
use a single grid if all datasets have same grid information
labels : list of strings, optional
corresponding to the different datasets to label in figure
collapse_dim : str, optional
dimension along which to collapse
plot_diff0 : bool, optional
plot difference between first dataset and all others
plot_sections_at_bottom : bool, optional
if True, add a row at the bottom showing the section line
for each field
single_plot : bool, optional
if True, plot all fields on one plot, better be 1D
ncols : int, optional
changes the relative width of the quantity being plotted
and the rightmost plot showing the section
fig : matplotlib figure object, optional
for a different figure size
xr_kwargs : dict, optional
arguments to pass to xarray's plotting wrapper
Returns
-------
fig : matplotlib figure object
axs : matplotlib axis object(s)
"""
# setup the plot
if not single_plot:
nrows = len(datasets) if not plot_sections_at_bottom else len(datasets)+1
else:
nrows = 1 if not plot_sections_at_bottom else 2
ncols = ncols if not plot_sections_at_bottom else len(datasets)
fig = plt.figure(figsize=(18,6*nrows)) if fig is None else fig
axs = []
gs = fig.add_gridspec(nrows,ncols)
# handle list or single
datasets = [datasets] if not isinstance(datasets,list) else datasets
grids = [grids] if not isinstance(grids,list) else grids
labels = [labels] if not isinstance(labels,list) else labels
# assumption: same grid for all datasets if length 1
if len(grids)==1:
grids = grids*nrows
if len(labels)==1:
labels = labels*nrows
# set colormap for depth plot with section
cmap_deep = copy(plt.get_cmap('cmo.deep'))
cmap_deep.set_bad('gray')
if single_plot:
ax = fig.add_subplot(gs[0,:-1]) if not plot_sections_at_bottom else \
fig.add_subplot(gs[0,:])
axs.append(ax)
for i,(ds,g,lbl) in enumerate(zip(datasets,grids,labels)):
# what to plot
plotme = ds[fld] - datasets[0][fld] if plot_diff0 and i!=0 else ds[fld]
# get the section as a mask
m={}
m['C'],m['W'],m['S'] = ecco.get_section_line_masks(left,right,ds,g)
# get coordinates for field
x,y,mask,sec_mask = _get_coords_and_mask(ds[fld].coords,m)
# replace collapse dim with actual name
rm_dim = x if collapse_dim == 'x' else y
# get mask and field
mask = mask.where(sec_mask,drop=True).mean(rm_dim)
plotme = plotme.where(sec_mask,drop=True).mean(rm_dim).where(mask)
# Plot the field
if len(plotme.dims)>1:
if single_plot:
raise TypeError('Can''t put multiple fields on single plot')
ax = fig.add_subplot(gs[i,:-1]) if not plot_sections_at_bottom else \
fig.add_subplot(gs[i,:])
axs.append(ax)
plotme.plot.contourf(y='Z',ax=ax,**xr_kwargs)
else:
if not single_plot:
ax = fig.add_subplot(gs[i,:-1]) if not plot_sections_at_bottom else \
fig.add_subplot(gs[i,:])
axs.append(ax)
plot_dim = x if rm_dim==y else y
plotme.plot.line(x=plot_dim,ax=ax,label=lbl,**xr_kwargs)
ax.grid()
if lbl is not None:
if not single_plot:
if plot_diff0 and i!=0:
ax.set_title(f'{fld}({lbl}) - {fld}({labels[0]})')
else:
ax.set_title(f'{fld}({lbl})')
else:
ax.legend()
# Plot the section
axb = fig.add_subplot(gs[i,-1]) if not plot_sections_at_bottom else \
fig.add_subplot(gs[-1,i])
datasets[i].Depth.where(datasets[i].maskC.any('Z')).plot(
ax=axb,cmap=cmap_deep,add_colorbar=False)
m['C'].cumsum(dim=rm_dim[0]+'C').where(m['C']).plot(ax=axb,cmap='Greys',add_colorbar=False)
axb.set(title=f'',ylabel='',xlabel='')
axs.append(axb)
return fig,axs
def plot_zlev_with_max(xda,use_mask=True,ax=None,xr_kwargs={}):
"""Make a 2D plot at the vertical level where data array
has it's largest value in amplitude
Parameters
----------
xda : xarray DataArray
with the field to be plotted, function of (Z,Y,X)
use_mask : bool, optional
mask the field
ax : matplotlib axis object, optional
current plotting axis
xr_kwargs : dict, optional
additional arguments for xarray plotting method
Returns
-------
z : float
height of zlevel at maximum
"""
def _make_float(xarr):
"""useful for putting x,y,z of max val in plot title"""
if len(xarr)>1:
warn(f'{xarr.name} has more than one max location, picking first...')
xarr=xarr[0]
return float(xarr.values)
xda_max = np.abs(xda).max()
x,y,mask = _get_coords_and_mask(xda.coords)
# get X, Y, Z of max value
xda_maxloc = xda.where(xda==xda_max,drop=True)
if len(xda_maxloc)==0:
xda_maxloc = xda.where(xda==-xda_max,drop=True)
xsel = _make_float(xda_maxloc[x])
ysel = _make_float(xda_maxloc[y])
zsel = _make_float(xda_maxloc['Z'])
# grab the zlev
xda = xda.sel(Z=zsel)
# mask?
if use_mask:
xda = xda.where(mask.sel(Z=zsel))
if ax is not None:
xda.plot(ax=ax,**xr_kwargs)
ax.set_title(f'max loc (x,y,z) = ({xsel:.2f},{ysel:.2f},{zsel:.2f})')
else:
xda.plot(**xr_kwargs)
plt.title(f'max loc (x,y,z) = ({xsel:.2f},{ysel:.2f},{zsel:.2f})')
return zsel
def horizontal_map(x,y,fld1,fld2=None,
title1=None,title2=None,
depth=None,log_data=False,
mask1=None,mask2=None,
ncolors=None,
c_lim=None,c_lim1=None,c_lim2=None,
cmap=None,cmap1=None,cmap2=None):
"""
Make a figure with plots of fld1 and fld2 over x,y next to e/o
Parameters
----------
x,y: Grid information, giving lat/lon coordinates
fld1/2: 2D field as numpy array or xarray DataArray
fld2 optional, otherwise generate single figure
Optional Parameters
-------------------
title1/2: string for title above figure
depth: depth field as an xarray DataArray to be used as
plt.contour(depth.XC,depth.YC,depth.Depth)
log_data: plot log_10(fld)
mask1/2: mask field to with given mask array
ncolors: Number of colors for colormap
c_lim: two element array with colorbar limits
c_lim1/2: different colorbar limits for each plot
c_lim is used for both, c_lim1/2 are for left or right plot
cmap: string or colormap object
default for sequential data is 'YlGnBu_r'
default for diverging data is 'BuBG_r'
cmap1/2: similar logic for c_lim, c_lim1/2.
cmap is global, cmap1/2 are for individual plots
Returns
-------
fig : matplotlib.figure.Figure object
"""
# Test for c_lim or c_lim1/2
if c_lim is not None and (c_lim1 is not None or c_lim2 is not None):
raise ValueError('Can only provide c_lim or c_lim1/2, not all three')
if cmap is not None and (cmap1 is not None or cmap2 is not None):
raise ValueError('Can only provide cmap or cmap1/2, not all three')
if c_lim is not None:
c_lim1 = c_lim
c_lim2 = c_lim
if cmap is not None:
cmap1 = cmap
cmap2 = cmap
fig = plt.figure(figsize=(15,6))
plt.subplot(1,2,1)
_single_horizontal_map(x,y,fld1,title1,depth,log_data,mask1,ncolors,c_lim1,cmap1)
if fld2 is not None:
plt.subplot(1,2,2)
_single_horizontal_map(x,y,fld2,title2,depth,log_data,mask2,ncolors,c_lim2,cmap2)
plt.show()
return fig
def depth_slice(x,z,fld1,fld2=None,
title1=None,title2=None,
depth=None,log_data=False,
mask1=None,mask2=None,
ncolors=None,
c_lim=None,c_lim1=None,c_lim2=None,
cmap=None,cmap1=None,cmap2=None):
"""
Make a slice through depth with plots of fld1 and fld2 and depth on y axis next to e/o
Parameters
----------
x,z: Grid information, x is some generic coordinate, z is depth
fld1/2: 2D field as numpy array or xarray DataArray
fld2 optional, otherwise generate single figure
Optional Parameters
-------------------
title1/2: string for title above figure
depth: depth field as an xarray DataArray to be used as
plt.contour(depth.XC,depth.YC,depth.Depth)
log_data: plot log_10(fld)
mask1/2: mask field to with given mask array
ncolors: Number of colors for colormap
c_lim: two element array with colorbar limits
c_lim1/2: different colorbar limits for each plot
c_lim is used for both, c_lim1/2 are for left or right plot
cmap: string or colormap object
default for sequential data is 'YlGnBu_r'
default for diverging data is 'BuBG_r'
cmap1/2: similar logic for c_lim, c_lim1/2.
cmap is global, cmap1/2 are for individual plots
Returns
-------
fig : matplotlib.figure.Figure object
"""
# Test for c_lim or c_lim1/2
if c_lim is not None and (c_lim1 is not None or c_lim2 is not None):
raise ValueError('Can only provide c_lim or c_lim1/2, not all three')
if cmap is not None and (cmap1 is not None or cmap2 is not None):
raise ValueError('Can only provide cmap or cmap1/2, not all three')
if c_lim is not None:
c_lim1 = c_lim
c_lim2 = c_lim
if cmap is not None:
cmap1 = cmap
cmap2 = cmap
fig = plt.figure(figsize=(15,6))
plt.subplot(1,2,1)
_single_depth_slice(x,z,fld1,title1,depth,log_data,mask1,ncolors,c_lim1,cmap1)
if fld2 is not None:
plt.subplot(1,2,2)
_single_depth_slice(x,z,fld2,title2,depth,log_data,mask2,ncolors,c_lim2,cmap2)
plt.show()
return fig
def _single_horizontal_map(x,y,fld,titleStr,depth,log_data,mask,ncolors,c_lim,cmap):
"""
Non-user facing function to distill horizontal data to numpy array for plotting
"""
if isinstance(fld, np.ndarray):
if len(np.shape(fld))==2:
fld_values = fld
fld_name = ''
elif len( | np.shape(fld) | numpy.shape |
from sklearn.metrics import roc_curve
from sklearn.cluster import KMeans
from tqdm import tqdm
import tensorflow as tf
import numpy as np
def custom_roc_curve(labels, distances):
'''
custom roc curve, wrapper for roc_curve in sklearn.metrics
Arg:
laels : Numpy 1D array
shoule be binary with 0 or 1
0 - different images
1 - same images
distances : Numpy 1D array
Closer two images, more likely them to be same image(positive)
score should be minus
Return:
'''
fpr, tpr, thr = roc_curve(labels, -distances, pos_label=1)
return fpr, tpr, -thr
def k_mean_labeling(X_samples, n_clusters):
'''KMeansClustering wrapper
Args:
X_samples - Numpy 2D array
[n_sample, n_features]
n_clusters - int
'''
kmc = KMeansClustering(X_samples=X_samples, n_clusters=n_clusters, print_option=False)
return kmc.predict(predict_x=X_samples)
class KMeansClustering:
def __init__(self, X_samples, n_clusters, print_option=True):
'''
Args:
X_samples - Numpy 2D array
[n_sample, n_features]
n_clusters - int
print_option - bool
defaults to be True
'''
if print_option: print("Fitting X_samples starts")
self.nfeatures = X_samples.shape[1]
self.nclusters = n_clusters
self.manager = KMeans(n_clusters=self.nclusters, random_state=0).fit(X_samples)
if print_option: print("Fitting X_samples done")
@property
def centers(self):
return self.manager.cluster_centers_
def predict(self, predict_x):
'''
Args:
predict_x - Numpy 2D array
[n_predict, nfeatures]
Return:
label - Numpy 1D array
[n_predict, ], whose values is [0, self.clusters)
'''
assert predict_x.shape[1] == self.nfeatures, "x should have the same features %d but %d"%(self.nfeatures, predict_x.shape[1])
return self.manager.predict(predict_x)
def k_hash(self, predict_x, session):
'''
accerelated with tensorflow
Bigger closer implemented with just simple negative
Args:
predict_x - Numpy 2D array [npredict, nfeatures]
Return:
k_hash - Numpy 2D array [npredict, n_clusters]
'''
assert predict_x.shape[1] == self.nfeatures, "x should have the same features %d but %d"%(self.nfeatures, predict_x.shape[1])
npredict = predict_x.shape[0]
batch_size = 2
if npredict%batch_size!=0:
predict_x = np.concatenate([predict_x, | np.zeros([batch_size-npredict%batch_size, self.nfeatures]) | numpy.zeros |
"""
MIT License
Copyright (c) 2019 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import stratx.partdep
def test_basic():
a = np.array([0,1,2])
b = | np.array([0,1,2]) | numpy.array |
import copy
import glob
import os
import time
from collections import deque
import gym
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import utils
from PPO import PPO
from arguments import get_args
from envs import make_vec_envs
from model import Policy, SEVN
from storage import RolloutStorage
from evaluation import evaluate
from torch.utils.tensorboard import SummaryWriter
import datetime
current_time = datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S")
l_dir = "./logs/"+current_time
writer = SummaryWriter(l_dir)
def main():
args = get_args()
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
if args.cuda and torch.cuda.is_available() and args.cuda_deterministic:
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
log_dir = os.path.expanduser(args.log_dir)
eval_log_dir = log_dir + "_eval"
utils.cleanup_log_dir(log_dir)
utils.cleanup_log_dir(eval_log_dir)
torch.set_num_threads(1)
device = torch.device("cuda:0" if args.cuda else "cpu")
envs = make_vec_envs(args.env_name, args.seed, args.num_processes,
args.gamma, args.log_dir, device, False, args.custom_gym)
base=SEVN
actor_critic = Policy(
envs.observation_space.shape,
envs.action_space,
base_kwargs={'recurrent': args.recurrent_policy})
actor_critic.to(device)
if args.algo == 'ppo':
agent = PPO(
actor_critic,
args.clip_param,
args.ppo_epoch,
args.num_mini_batch,
args.value_loss_coef,
args.entropy_coef,
lr=args.lr,
eps=args.eps,
max_grad_norm=args.max_grad_norm)
rollouts = RolloutStorage(args.num_steps, args.num_processes,
envs.observation_space.shape, envs.action_space,
actor_critic.recurrent_hidden_state_size)
obs = envs.reset()
rollouts.obs[0].copy_(obs)
rollouts.to(device)
episode_rewards = deque(maxlen=10)
episode_length = deque(maxlen=10)
episode_success_rate = deque(maxlen=100)
episode_total=0
start = time.time()
num_updates = int(
args.num_env_steps) // args.num_steps // args.num_processes
for j in range(num_updates):
if args.use_linear_lr_decay:
# decrease learning rate linearly
utils.update_linear_schedule(
agent.optimizer, j, num_updates,args.lr)
for step in range(args.num_steps):
# Sample actions
with torch.no_grad():
value, action, action_log_prob, recurrent_hidden_states = actor_critic.act(
rollouts.obs[step], rollouts.recurrent_hidden_states[step],
rollouts.masks[step])
# Obser reward and next obs
obs, reward, done, infos = envs.step(action)
for info in infos:
if 'episode' in info.keys():
episode_rewards.append(info['episode']['r'])
episode_length.append(info['episode']['l'])
episode_success_rate.append(info['was_successful_trajectory'])
episode_total+=1
# If done then clean the history of observations.
masks = torch.FloatTensor(
[[0.0] if done_ else [1.0] for done_ in done])
bad_masks = torch.FloatTensor(
[[0.0] if 'bad_transition' in info.keys() else [1.0]
for info in infos])
rollouts.insert(obs, recurrent_hidden_states, action,
action_log_prob, value, reward, masks, bad_masks)
with torch.no_grad():
next_value = actor_critic.get_value(
rollouts.obs[-1], rollouts.recurrent_hidden_states[-1],
rollouts.masks[-1]).detach()
rollouts.compute_returns(next_value, args.use_gae, args.gamma,
args.gae_lambda, args.use_proper_time_limits)
value_loss, action_loss, dist_entropy = agent.update(rollouts)
rollouts.after_update()
# save for every interval-th episode or for the last epoch
if (j % args.save_interval == 0
or j == num_updates - 1) and args.save_dir != "":
save_path = os.path.join(args.save_dir, args.algo)
try:
os.makedirs(save_path)
except OSError:
pass
torch.save([
actor_critic,
getattr(utils.get_vec_normalize(envs), 'ob_rms', None)
], os.path.join(save_path, args.env_name + ".pt"))
if j % args.log_interval == 0 and len(episode_rewards) > 1:
total_num_steps = (j + 1) * args.num_processes * args.num_steps
end = time.time()
writer.add_scalars('Train/Episode Reward', {"Reward Mean": | np.mean(episode_rewards) | numpy.mean |
import time
import numpy as np
from numba import njit, prange, numba
from numba.typed import List
from deepdrive_zero.constants import CACHE_NUMBA
@njit(cache=CACHE_NUMBA, nogil=True)
def get_lane_distance(p0, p1, ego_rect_pts: np.array, is_left_lane_line=True):
"""
Get the ego's distance from the lane segment p1-p0
by rotating p1 about p0 such that the segment is vertical.
Then rotate the ego points by the same angle and find the minimum x
coordinate of the ego, finally comparing it to the x coordinate of the
rotated line.
Note that direction of travel should be from p0 to p1.
:param p0: First lane line point
:param p1: Second lane line point
:param ego_rect_pts: Four points of ego rectangle
:param is_left_lane_line: Whether lane line should be to the left of ego, else right
:return: Max distance from lane if crossed, min distance if not. Basically
how bad is your position.
"""
# TODO:
# Handle this case (bad map):
# https://user-images.githubusercontent.com/181225/81328684-d74a1880-908c-11ea-920e-caf4c94d3d5c.jpg
# Perhaps check that the closest point to the infinite line
# made up by p0 and p1 is NOT closer than the closest point to the
# bounded line segment, as in this case you could have a problem with the
# map, where for example on a dog leg left turn, the ego point is before
# the turn and therefore can be legally be left of the infinite line
# but right of the lane segment before the turn (i.e. the lane segment
# that _should_ have been passed.
lane_adjacent = p1[0] - p0[0]
lane_opposite = p1[1] - p0[1]
if lane_adjacent == 0:
# Already parallel
angle_to_vert = 0
else:
# Not parallel
lane_angle = | np.arctan2(lane_opposite, lane_adjacent) | numpy.arctan2 |
from __future__ import division
import numpy as np
import scipy.special, scipy.stats
import ctypes
import logging
logger = logging.getLogger("pygmmis")
# set up multiprocessing
import multiprocessing
import parmap
def createShared(a, dtype=ctypes.c_double):
"""Create a shared array to be used for multiprocessing's processes.
Taken from http://stackoverflow.com/questions/5549190/
Works only for float, double, int, long types (e.g. no bool).
Args:
numpy array, arbitrary shape
Returns:
numpy array whose container is a multiprocessing.Array
"""
shared_array_base = multiprocessing.Array(dtype, a.size)
shared_array = np.ctypeslib.as_array(shared_array_base.get_obj())
shared_array[:] = a.flatten()
shared_array = shared_array.reshape(a.shape)
return shared_array
# this is to allow multiprocessing pools to operate on class methods:
# https://gist.github.com/bnyeggen/1086393
def _pickle_method(method):
func_name = method.im_func.__name__
obj = method.im_self
cls = method.im_class
if func_name.startswith('__') and not func_name.endswith('__'): #deal with mangled names
cls_name = cls.__name__.lstrip('_')
func_name = '_' + cls_name + func_name
return _unpickle_method, (func_name, obj, cls)
def _unpickle_method(func_name, obj, cls):
for cls in cls.__mro__:
try:
func = cls.__dict__[func_name]
except KeyError:
pass
else:
break
return func.__get__(obj, cls)
import types
# python 2 -> 3 adjustments
try:
import copy_reg
except ImportError:
import copyreg as copy_reg
copy_reg.pickle(types.MethodType, _pickle_method, _unpickle_method)
try:
xrange
except NameError:
xrange = range
# Blantant copy from <NAME>'s esutil
# https://github.com/esheldon/esutil/blob/master/esutil/numpy_util.py
def match1d(arr1input, arr2input, presorted=False):
"""
NAME:
match
CALLING SEQUENCE:
ind1,ind2 = match(arr1, arr2, presorted=False)
PURPOSE:
Match two numpy arrays. Return the indices of the matches or empty
arrays if no matches are found. This means arr1[ind1] == arr2[ind2] is
true for all corresponding pairs. arr1 must contain only unique
inputs, but arr2 may be non-unique.
If you know arr1 is sorted, set presorted=True and it will run
even faster
METHOD:
uses searchsorted with some sugar. Much faster than old version
based on IDL code.
REVISION HISTORY:
Created 2015, <NAME>, SLAC.
"""
# make sure 1D
arr1 = np.array(arr1input, ndmin=1, copy=False)
arr2 = np.array(arr2input, ndmin=1, copy=False)
# check for integer data...
if (not issubclass(arr1.dtype.type,np.integer) or
not issubclass(arr2.dtype.type,np.integer)) :
mess="Error: only works with integer types, got %s %s"
mess = mess % (arr1.dtype.type,arr2.dtype.type)
raise ValueError(mess)
if (arr1.size == 0) or (arr2.size == 0) :
mess="Error: arr1 and arr2 must each be non-zero length"
raise ValueError(mess)
# make sure that arr1 has unique values...
test=np.unique(arr1)
if test.size != arr1.size:
raise ValueError("Error: the arr1input must be unique")
# sort arr1 if not presorted
if not presorted:
st1 = np.argsort(arr1)
else:
st1 = None
# search the sorted array
sub1= | np.searchsorted(arr1,arr2,sorter=st1) | numpy.searchsorted |
import os, tqdm
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
from muchbettermoments import quadratic_2d
from astropy.wcs import WCS, NoConvergence
from astropy.table import Table
from astropy.nddata import Cutout2D
from astropy.utils.data import download_file
import requests
from bs4 import BeautifulSoup
import warnings
import urllib
from .mast import tic_by_contamination
def load_pointing_model(sector, camera, chip):
""" Loads in pointing model from website.
"""
user_agent = 'eleanor 0.1.6'
values = {'name': 'eleanor',
'language': 'Python' }
headers = {'User-Agent': user_agent}
data = urllib.parse.urlencode(values)
data = data.encode('ascii')
guide_link = 'https://users.flatironinstitute.org/dforeman/public_www/tess/postcards_test/s{0:04d}/pointing_model/pointingModel_{0:04d}_{1}-{2}.txt'.format(sector, camera, chip)
req = urllib.request.Request(guide_link, data, headers)
with urllib.request.urlopen(req) as response:
pointing = response.read().decode('utf-8')
pointing = Table.read(pointing, format='ascii.basic') # guide to postcard locations
return pointing
def use_pointing_model(coords, pointing_model):
"""Applies pointing model to correct the position of star(s) on postcard.
Parameters
----------
coords : tuple
(`x`, `y`) position of star(s).
pointing_model : astropy.table.Table
pointing_model for ONE cadence.
Returns
-------
coords : tuple
Corrected position of star(s).
"""
pointing_model = np.reshape(list(pointing_model), (3,3))
A = np.column_stack([coords[0], coords[1], np.ones_like(coords[0])])
fhat = np.dot(A, pointing_model)
return fhat
def pm_quality(time, sector, camera, chip, pm=None):
""" Fits a line to the centroid motions using the pointing model.
A quality flag is set if the centroid is > 2*sigma away from
the majority of the centroids.
"""
def outliers(x, y, poly, mask):
dist = (y - poly[0]*x - poly[1])/np.sqrt(poly[0]**2+1**2)
std = np.std(dist)
ind = np.where((dist > 2*std) | (dist < -2*std))[0]
mask[ind] = 1
return mask
cen_x, cen_y = 1024, 1024 # Uses a point in the center of the FFI
cent_x,cent_y = [], []
if pm is None:
pm = load_pointing_model(sector, camera, chip)
# Applies centroids
for i in range(len(pm)):
new_coords = use_pointing_model(np.array([cen_x, cen_y]), pm[i])
cent_x.append(new_coords[0][0])
cent_y.append(new_coords[0][1])
cent_x = np.array(cent_x); cent_y = np.array(cent_y)
# Finds gap in orbits
t = np.diff(time)
brk = np.where( t > np.mean(t)+2*np.std(t))[0][0]
brk += 1
# Initiates lists for each orbit
x1 = cent_x[0:brk]; y1 = cent_y[0:brk]
x2 = cent_x[brk:len(cent_x)+1];y2 = cent_y[brk:len(cent_y)+1]
# Initiates masks
mask1 = np.zeros(len(x1)); mask2 = np.zeros(len(x2))
# Loops through and searches for points > 2 sigma away from distribution
for i in np.arange(0,10,1):
poly1 = np.polyfit(x1[mask1==0], y1[mask1==0], 1)
poly2 = np.polyfit(x2[mask2==0], y2[mask2==0], 1)
mask1 = outliers(x1, y1, poly1, mask1)
mask2 = outliers(x2, y2, poly2, mask2)
# Returns a total mask for each orbit
return np.append(mask1, mask2)
def set_quality_flags(ffi_start, ffi_stop, shortCad_fn, sector, camera, chip,
pm=None):
""" Uses the quality flags in a 2-minute target to create quality flags
in the postcards.
We create our own quality flag as well, using our pointing model.
"""
# Obtains information for 2-minute target
twoMin = fits.open(shortCad_fn)
twoMinTime = twoMin[1].data['TIME']-twoMin[1].data['TIMECORR']
finite = np.isfinite(twoMinTime)
twoMinQual = twoMin[1].data['QUALITY']
twoMinTime = twoMinTime[finite]
twoMinQual = twoMinQual[finite]
perFFIcad = []
for i in range(len(ffi_start)):
where = np.where( (twoMinTime > ffi_start[i]) &
(twoMinTime < ffi_stop[i]) )[0]
perFFIcad.append(where)
perFFIcad = np.array(perFFIcad)
# Binary string for values which apply to the FFIs
ffi_apply = int('100010101111', 2)
convolve_ffi = []
for cadences in perFFIcad:
v = np.bitwise_or.reduce(twoMinQual[cadences])
convolve_ffi.append(v)
convolve_ffi = np.array(convolve_ffi)
flags = np.bitwise_and(convolve_ffi, ffi_apply)
pm_flags = pm_quality(ffi_stop, sector, camera, chip, pm=pm) * 4096
pm_flags[ ((ffi_stop>1420.) & (ffi_stop < 1424.)) ] = 4096
return flags+pm_flags
class ffi:
"""This class allows the user to download all full-frame images for a given sector,
camera, and chip. It also allows the user to create their own pointing model
based on each cadence for a given combination of sector, camera, and chip.
No individual user should have to download all of the full-frame images because
stacked postcards will be available for the user to download from MAST.
Parameters
----------
sector : int, optional
camera : int, optional
chip : int, optional
"""
def __init__(self, sector=None, camera=None, chip=None):
self.sector = sector
self.camera = camera
self.chip = chip
self.ffiindex = None
def download_ffis(self, download_dir=None):
"""
Downloads entire sector of data into FFI download directory.
Parameters
----------
download_dir : str
Location where the data files will be stored.
Defaults to "~/.eleanor/sector_{}/ffis" if `None` is passed.
"""
def findAllFFIs():
nonlocal url
sub_paths = []
subsub_paths = []
calFiles, urlPaths = [], []
paths = BeautifulSoup(requests.get(url).text, "lxml").find_all('a')
for direct in paths:
subdirect = direct.get('href')
if ('2018' in subdirect) or ('2019' in subdirect):
sub_paths.append(os.path.join(url, subdirect))
for sp in sub_paths:
for fn in BeautifulSoup(requests.get(sp).text, "lxml").find_all('a'):
subsub = fn.get('href')
if (subsub[0] != '?') and (subsub[0] != '/'):
subsub_paths.append(os.path.join(sp, subsub))
subsub_paths = [os.path.join(i, '{}-{}/'.format(self.camera, self.chip)) for i in subsub_paths]
for sbp in subsub_paths:
for fn in BeautifulSoup(requests.get(sbp).text, "lxml").find_all('a'):
if 'ffic.fits' in fn.get('href'):
calFiles.append(fn.get('href'))
urlPaths.append(sbp)
return np.array(calFiles), np.array(urlPaths)
# This URL applies to ETE-6 simulated data ONLY
url = 'https://archive.stsci.edu/missions/tess/ffi/'
url = os.path.join(url, "s{0:04d}".format(self.sector))
files, urlPaths = findAllFFIs()
if download_dir is None:
# Creates hidden .eleanor FFI directory
ffi_dir = self._fetch_ffi_dir()
else:
ffi_dir = download_dir
files_in_dir = os.listdir(ffi_dir)
local_paths = []
for i in range(len(files)):
if files[i] not in files_in_dir:
os.system('cd {} && curl -O -L {}'.format(ffi_dir, urlPaths[i]+files[i]))
local_paths.append(ffi_dir+files[i])
self.local_paths = np.array(local_paths)
return
def _fetch_ffi_dir(self):
"""Returns the default path to the directory where FFIs will be downloaded.
By default, this method will return "~/.eleanor/sector_{}/ffis" and create
this directory if it does not exist. If the directory cannot be
access or created, then it returns the local directory (".").
Returns
-------
download_dir : str
Path to location of `ffi_dir` where FFIs will be downloaded
"""
download_dir = os.path.join(os.path.expanduser('~'), '.eleanor',
'sector_{}'.format(self.sector), 'ffis')
if os.path.isdir(download_dir):
return download_dir
else:
# if it doesn't exist, make a new cache directory
try:
os.makedirs(download_dir)
# downloads locally if OS error occurs
except OSError:
warnings.warn('Warning: unable to create {}. '
'Downloading FFIs to the current '
'working directory instead.'.format(download_dir))
download_dir = '.'
return download_dir
def sort_by_date(self):
"""Sorts FITS files by start date of observation."""
dates, time, index = [], [], []
for f in self.local_paths:
hdu = fits.open(f)
hdr = hdu[1].header
dates.append(hdr['DATE-OBS'])
if 'ffiindex' in hdu[0].header:
index.append(hdu[0].header['ffiindex'])
if len(index) == len(dates):
dates, index, fns = np.sort(np.array([dates, index, self.local_paths]))
self.ffiindex = index.astype(int)
else:
dates, fns = np.sort(np.array([dates, self.local_paths]))
self.local_paths = fns
self.dates = dates
return
def build_pointing_model(self, pos_predicted, pos_inferred, outlier_removal=False):
"""Builds an affine transformation to correct the positions of stars
from a possibly incorrect WCS.
Parameters
----------
pos_predicted : tuple
Positions taken straight from the WCS; [[x,y],[x,y],...] format.
pos_inferred : tuple
Positions taken using any centroiding method; [[x,y],[x,y],...] format.
outlier_removal : bool, optional
Whether to clip 1-sigma outlier frames. Default `False`.
Returns
-------
xhat : np.ndarray
(3, 3) affine transformation matrix between WCS positions
and inferred positions.
"""
A = np.column_stack([pos_predicted[:,0], pos_predicted[:,1], np.ones_like(pos_predicted[:,0])])
f = np.column_stack([pos_inferred[:,0], pos_inferred[:,1], np.ones_like(pos_inferred[:,0])])
if outlier_removal == True:
dist = np.sqrt(np.sum((A - f)**2, axis=1))
mean, std = np.nanmean(dist), np.nanstd(dist)
lim = 1.0
A = A[dist < mean + lim*std]
f = f[dist < mean + lim*std]
ATA = np.dot(A.T, A)
ATAinv = np.linalg.inv(ATA)
ATf = | np.dot(A.T, f) | numpy.dot |
import json
import tqdm
import torch
import itertools
import numpy as np
import pandas as pd
import torch.nn as nn
import matplotlib.pyplot as plt
from transformers import AutoTokenizer, BertTokenizer, RobertaTokenizer
from torch.utils.data import DataLoader, TensorDataset
from sklearn.metrics import recall_score, accuracy_score, precision_score, f1_score, confusion_matrix
HEADER_CONST = "# sent_id = "
TEXT_CONST = "# text = "
STOP_CONST = "\n"
WORD_OFFSET = 1
LABEL_OFFSET = 3
NUM_OFFSET = 0
def txt_to_dataframe(data_path):
'''
read UD text file and convert to df format
'''
with open(data_path, "r") as fp:
df = pd.DataFrame(
columns={
"text",
"word",
"label"
}
)
for line in fp.readlines():
if TEXT_CONST in line:
words_list = []
labels_list = []
num_list = []
text = line.split(TEXT_CONST)[1]
# this is a new text, need to parse all the words in it
elif line is not STOP_CONST and HEADER_CONST not in line:
temp_list = line.split("\t")
num_list.append(temp_list[NUM_OFFSET])
words_list.append(temp_list[WORD_OFFSET])
labels_list.append(temp_list[LABEL_OFFSET])
if line == STOP_CONST:
# this is the end of the text, adding to df
cur_df = pd.DataFrame(
{
"text": len(words_list) * [text],
"word": words_list,
"word_offset": num_list,
"label": labels_list,
"word_count" : len(words_list)
}
)
df = pd.concat([df, cur_df])
return df
def tokenize_word(sentence_ids, target_word, bert_tokenizer, word_offset, text, word_count):
word_mask = len(sentence_ids) * [0]
if isinstance(bert_tokenizer, RobertaTokenizer):
# adding the pesky character of the roberta BPE
sentence_tokens = set(bert_tokenizer.convert_ids_to_tokens(sentence_ids))
candidate_tokens_1 = bert_tokenizer.tokenize(f'Ġ{target_word}')[2:]
candidate_tokens_2 = bert_tokenizer.tokenize(f' {target_word}')
candidate_tokens_3 = bert_tokenizer.tokenize(f'{target_word}.')
candidate_tokens_4 = bert_tokenizer.tokenize(f'{target_word}".')
candidate_tokens_5 = bert_tokenizer.tokenize(f'"{target_word}.')
if set(candidate_tokens_1).issubset(sentence_tokens):
matching_tokens = candidate_tokens_1
elif set(candidate_tokens_3).issubset(sentence_tokens):
matching_tokens = candidate_tokens_3
elif set(candidate_tokens_4).issubset(sentence_tokens):
matching_tokens = candidate_tokens_4
elif set(candidate_tokens_5).issubset(sentence_tokens):
matching_tokens = candidate_tokens_5
elif set(candidate_tokens_2).issubset(sentence_tokens):
matching_tokens = candidate_tokens_2
else:
matching_tokens = bert_tokenizer.tokenize(target_word)
word_ids = bert_tokenizer.convert_tokens_to_ids(matching_tokens)
else:
word_ids = bert_tokenizer.convert_tokens_to_ids(bert_tokenizer.tokenize(target_word))
try:
word_ids_indexes_in_text = [sentence_ids.index(word) for word in word_ids]
for tok_idx in word_ids_indexes_in_text:
word_mask[tok_idx] = 1
except Exception as ex:
pass
#print(ex)
#print(f"target word: {target_word} , matching_tokens: {matching_tokens}")
#print()
return word_mask
def preprocess_text(x: str, tokenizer: AutoTokenizer, max_sequence_len: int):
cur_x = x
if isinstance(tokenizer, BertTokenizer):
cur_x = "[CLS] " + cur_x
cur_x = cur_x.replace("\n", "")
cur_x = cur_x.replace(" cannot ", " can not ")
cur_x = tokenizer.tokenize(cur_x)
cur_x = tokenizer.convert_tokens_to_ids(cur_x)
cur_x = cur_x[:max_sequence_len]
cur_x = cur_x + [0] * (max_sequence_len - len(cur_x))
return cur_x
def extract_attn_mask(x: list, max_sequence_len):
first_0_token_idx = x.index(0)
return first_0_token_idx * [1] + (max_sequence_len - first_0_token_idx) * [0]
def text_to_dataloader(
sentences_df: pd.DataFrame,
device: torch.device,
inference_batch_size: int,
bert_tokenizer: AutoTokenizer,
max_sequence_len: int) -> DataLoader:
'''
mutates dataframe!
:param sentenecs: pd.DataFrame,
:return: returns a torch.utils.data.Dataloader objects that iterates over the input data
'''
assert isinstance(sentences_df, pd.DataFrame)
assert "text" in sentences_df.columns
assert "label" in sentences_df.columns
LABELS_TO_DROP = ["X", "_"]
df = sentences_df[~sentences_df["label"].isin(LABELS_TO_DROP)]
df["label"] = df["label"].astype("category")
with open("pos_to_label.json", "rb") as fp:
pos_to_label_dict = json.load(fp)
df["label_idx"] = df["label"].map(pos_to_label_dict)
df["text_ids"] = df["text"].apply(lambda x: preprocess_text(x, bert_tokenizer,max_sequence_len))
df["word_count"] = df["word_count"].astype(int)
df["attn_mask"] = df["text_ids"].apply(lambda x: extract_attn_mask(x, max_sequence_len))
df["query_mask"] = df.apply(lambda row: tokenize_word(row.text_ids, row.word, bert_tokenizer, int(row.word_offset), row.text, row.word_count), axis=1)
# drop failed target word mask extraction
df = df[df["query_mask"].apply(lambda x: sum(x) > 0)]
sentences_idx_tensor = torch.LongTensor(np.stack(df["text_ids"].values)).to(device)
sentences_mask_tensor = torch.LongTensor( | np.stack(df["attn_mask"].values) | numpy.stack |
from gaptrain.configurations import ConfigurationSet, Configuration
from gaptrain.exceptions import LoadingFailed
from gaptrain.systems import System
from gaptrain.molecules import Molecule
from gaptrain.solvents import get_solvent
import gaptrain as gt
import numpy as np
import pytest
import ase
import os
here = os.path.abspath(os.path.dirname(__file__))
h2o = Molecule(os.path.join(here, 'data', 'h2o.xyz'))
methane = Molecule(os.path.join(here, 'data', 'methane.xyz'))
side_length = 7.0
system = System(box_size=[side_length, side_length, side_length])
system.add_molecules(h2o, n=3)
def test_print_exyz():
configs = ConfigurationSet(name='test')
for _ in range(5):
configs += system.random()
configs.save()
assert os.path.exists('test.xyz')
os.remove('test.xyz')
# If the energy and forces are set for all the configurations an exyz
# should be able to be printed
for config in configs:
config.energy = 1.0
config.forces = np.zeros(shape=(9, 3))
configs.save()
assert os.path.exists('test.xyz')
for line in open('test.xyz', 'r'):
items = line.split()
# Number of atoms in the configuration
if len(items) == 1:
assert int(items[0]) == 9
if len(items) == 7:
atomic_symbol, x, y, z, fx, fy, fz = items
# Atomic symbols should be letters
assert all(letter.isalpha() for letter in atomic_symbol)
# Positions should be float-able and inside the box
for component in (x, y, z):
assert 0.0 < float(component) < side_length
# Forces should be ~0, as they were set above
assert all(-1E-6 < float(fk) < 1E-6 for fk in (fx, fy, fz))
os.remove('test.xyz')
def test_wrap():
config_all_in_box = system.random()
coords = config_all_in_box.coordinates()
config_all_in_box.wrap()
wrapped_corods = config_all_in_box.coordinates()
# Wrapping should do nothing if all the atoms are already in the box
assert np.linalg.norm(coords - wrapped_corods) < 1E-6
config = system.random(on_grid=True, min_dist_threshold=1)
for atom in config.atoms[:3]:
atom.translate(vec= | np.array([10.0, 0, 0]) | numpy.array |
# Copyright (C) 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import logging
from typing import Mapping
import numpy as np
from graph.types import LSTMParameters, RNNParameters
from graph.types.rnn import GRUParameters
from quantization.kernels.kernel_base import KernelBase, params_type, qrec_type
from quantization.new_qrec import QRec
from quantization.qtype import QType
from utils.at_norm import at_norm
from utils.diag_collector import DiagCollector
from utils.sigmoid_tanh_lut import sigmoid_lut, tanh_lut
LOG = logging.getLogger("nntool." + __name__)
# Another TANH and SIGMOID approx -> less precise
# def exp_taylor_quant(x, qtype, order='third'):
# ONE_OVER_3 = qtype.quantize(np.array([1.0 / 3.0]))
# ONE = qtype.quantize(np.array([1]))
# x2 = (x.astype(np.int32)*x) >> qtype.q
# x3 = (x2*x) >> qtype.q
# if order == 'third':
# x3_over_6_plus_x2_over_2 = (((x3 * ONE_OVER_3) >> qtype.q) + x2) >> 1
# return ONE + ((ONE * (x + x3_over_6_plus_x2_over_2)) >> qtype.q)
# x4 = (x3*x) >> qtype.q
# if order == 'fourth':
# x4_over_4 = x4>>2
# x4_over_24_plus_x3_over_6_plus_x2_over_2 = ((((x4_over_4 + x3) * ONE_OVER_3) >> qtype.q) + x2) >> 1
# return ONE + ((ONE * (x + x4_over_24_plus_x3_over_6_plus_x2_over_2)) >> qtype.q)
# def quant_tanh(x, qtype, k=3):
# K = qtype.quantize(np.array([k])).astype(np.int32)
# ONE = qtype.quantize(np.array([1])).astype(np.int32)
# result_neg = ((ONE-exp_taylor_quant(-2*x, qtype).astype(np.int32)).astype(np.int32)<<qtype.q)//(ONE+exp_taylor_quant(-2*x, qtype))
# result_pos = ((ONE-exp_taylor_quant(2*x, qtype).astype(np.int32)).astype(np.int32)<<qtype.q)//(ONE+exp_taylor_quant(2*x, qtype))
# return np.where(x<(-K), -ONE, np.where(x>K, ONE, np.where(x<0, result_neg, -result_pos)))
# def quant_sigmoid(x, qtype):
# ONE = qtype.quantize(np.array([1])).astype(np.int32)
# return np.where(x>0, (exp_taylor_quant(x, qtype) << qtype.q) // (ONE + exp_taylor_quant(x, qtype)),
# (ONE << qtype.q) // (ONE + exp_taylor_quant(-x, qtype)))
def abs_clip(arr: np.ndarray, abs_limit):
return np.clip(arr, -abs_limit, abs_limit)
def relu(x, qtype):
del qtype
return np.minimum(x, 0)
def sigmoid(x, qtype):
x = qtype.dequantize(x)
pos_mask = (x >= 0)
neg_mask = (x < 0)
z = np.zeros_like(x)
z[pos_mask] = np.exp(-x[pos_mask])
z[neg_mask] = np.exp(x[neg_mask])
top = np.ones_like(x)
top[neg_mask] = z[neg_mask]
return qtype.quantize(top / (1 + z))
def hsigmoid(x, qtype):
x = x.astype(np.int32)
relued = np.maximum(0,
np.minimum(qtype.quantize(np.array([3])) + x,
qtype.quantize(np.array([6]))))
relued *= qtype.quantize(np.array(1/6))
relued += (1 << (qtype.q - 1))
relued >>= qtype.q
return relued
def mean_stddev_normalization(arr: np.ndarray):
mean = np.mean(arr)
variance = np.sum(np.square(arr - mean)) / arr.size()
stddev_inv = 1.0 / | np.sqrt(variance + 1e-8) | numpy.sqrt |
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import numpy as np
from pyscf import lib
from pyscf import scf
from pyscf.lib import logger
from pyscf.cc import ccsd
from pyscf.cc import uccsd
from pyscf.cc import eom_rccsd
from pyscf.cc import eom_gccsd
from pyscf.cc import addons
########################################
# EOM-IP-CCSD
########################################
class EOMIP(eom_gccsd.EOMIP):
def __init__(self, cc):
gcc = addons.convert_to_gccsd(cc)
eom_gccsd.EOMIP.__init__(self, gcc)
########################################
# EOM-EA-CCSD
########################################
class EOMEA(eom_gccsd.EOMEA):
def __init__(self, cc):
gcc = addons.convert_to_gccsd(cc)
eom_gccsd.EOMEA.__init__(self, gcc)
########################################
# EOM-EE-CCSD
########################################
def eeccsd(eom, nroots=1, koopmans=False, guess=None, eris=None, imds=None):
'''Calculate N-electron neutral excitations via EOM-EE-CCSD.
Kwargs:
nroots : int
Number of roots (eigenvalues) requested
koopmans : bool
Calculate Koopmans'-like (1p1h) excitations only, targeting via
overlap.
guess : list of ndarray
List of guess vectors to use for targeting via overlap.
'''
if eris is None: eris = eom._cc.ao2mo()
if imds is None: imds = eom.make_imds(eris)
spinvec_size = eom.vector_size()
nroots = min(nroots, spinvec_size)
diag_ee, diag_sf = eom.get_diag(imds)
guess_ee = []
guess_sf = []
if guess and guess[0].size == spinvec_size:
raise NotImplementedError
#TODO: initial guess from GCCSD EOM amplitudes
#orbspin = scf.addons.get_ghf_orbspin(eris.mo_coeff)
#nmo = np.sum(eom.nmo)
#nocc = np.sum(eom.nocc)
#for g in guess:
# r1, r2 = eom_gccsd.vector_to_amplitudes_ee(g, nmo, nocc)
# r1aa = r1[orbspin==0][:,orbspin==0]
# r1ab = r1[orbspin==0][:,orbspin==1]
# if abs(r1aa).max() > 1e-7:
# r1 = addons.spin2spatial(r1, orbspin)
# r2 = addons.spin2spatial(r2, orbspin)
# guess_ee.append(eom.amplitudes_to_vector(r1, r2))
# else:
# r1 = spin2spatial_eomsf(r1, orbspin)
# r2 = spin2spatial_eomsf(r2, orbspin)
# guess_sf.append(amplitudes_to_vector_eomsf(r1, r2))
# r1 = r2 = r1aa = r1ab = g = None
#nroots_ee = len(guess_ee)
#nroots_sf = len(guess_sf)
elif guess:
for g in guess:
if g.size == diag_ee.size:
guess_ee.append(g)
else:
guess_sf.append(g)
nroots_ee = len(guess_ee)
nroots_sf = len(guess_sf)
else:
dee = np.sort(diag_ee)[:nroots]
dsf = np.sort(diag_sf)[:nroots]
dmax = np.sort(np.hstack([dee,dsf]))[nroots-1]
nroots_ee = np.count_nonzero(dee <= dmax)
nroots_sf = np.count_nonzero(dsf <= dmax)
guess_ee = guess_sf = None
def eomee_sub(cls, nroots, guess, diag):
ee_sub = cls(eom._cc)
ee_sub.__dict__.update(eom.__dict__)
e, v = ee_sub.kernel(nroots, koopmans, guess, eris, imds, diag=diag)
if nroots == 1:
e, v = [e], [v]
ee_sub.converged = [ee_sub.converged]
return list(ee_sub.converged), list(e), list(v)
e0 = e1 = []
v0 = v1 = []
conv0 = conv1 = []
if nroots_ee > 0:
conv0, e0, v0 = eomee_sub(EOMEESpinKeep, nroots_ee, guess_ee, diag_ee)
if nroots_sf > 0:
conv1, e1, v1 = eomee_sub(EOMEESpinFlip, nroots_sf, guess_sf, diag_sf)
e = np.hstack([e0,e1])
idx = e.argsort()
e = e[idx]
conv = conv0 + conv1
conv = [conv[x] for x in idx]
v = v0 + v1
v = [v[x] for x in idx]
if nroots == 1:
conv = conv[0]
e = e[0]
v = v[0]
eom.converged = conv
eom.e = e
eom.v = v
return eom.e, eom.v
def eomee_ccsd(eom, nroots=1, koopmans=False, guess=None,
eris=None, imds=None, diag=None):
if eris is None: eris = eom._cc.ao2mo()
if imds is None: imds = eom.make_imds(eris)
eom.converged, eom.e, eom.v \
= eom_rccsd.kernel(eom, nroots, koopmans, guess, imds=imds, diag=diag)
return eom.e, eom.v
def eomsf_ccsd(eom, nroots=1, koopmans=False, guess=None,
eris=None, imds=None, diag=None):
'''Spin flip EOM-EE-CCSD
'''
return eomee_ccsd(eom, nroots, koopmans, guess, eris, imds, diag)
amplitudes_to_vector_ee = uccsd.amplitudes_to_vector
vector_to_amplitudes_ee = uccsd.vector_to_amplitudes
def amplitudes_to_vector_eomsf(t1, t2, out=None):
t1ab, t1ba = t1
t2baaa, t2aaba, t2abbb, t2bbab = t2
nocca, nvirb = t1ab.shape
noccb, nvira = t1ba.shape
otrila = np.tril_indices(nocca, k=-1)
otrilb = np.tril_indices(noccb, k=-1)
vtrila = np.tril_indices(nvira, k=-1)
vtrilb = np.tril_indices(nvirb, k=-1)
baaa = np.take(t2baaa.reshape(noccb*nocca,nvira*nvira),
vtrila[0]*nvira+vtrila[1], axis=1)
abbb = np.take(t2abbb.reshape(nocca*noccb,nvirb*nvirb),
vtrilb[0]*nvirb+vtrilb[1], axis=1)
vector = np.hstack((t1ab.ravel(), t1ba.ravel(),
baaa.ravel(), t2aaba[otrila].ravel(),
abbb.ravel(), t2bbab[otrilb].ravel()))
return vector
def vector_to_amplitudes_eomsf(vector, nmo, nocc):
nocca, noccb = nocc
nmoa, nmob = nmo
nvira, nvirb = nmoa-nocca, nmob-noccb
t1ab = vector[:nocca*nvirb].reshape(nocca,nvirb).copy()
t1ba = vector[nocca*nvirb:nocca*nvirb+noccb*nvira].reshape(noccb,nvira).copy()
pvec = vector[t1ab.size+t1ba.size:]
nbaaa = noccb*nocca*nvira*(nvira-1)//2
naaba = nocca*(nocca-1)//2*nvirb*nvira
nabbb = nocca*noccb*nvirb*(nvirb-1)//2
nbbab = noccb*(noccb-1)//2*nvira*nvirb
t2baaa = np.zeros((noccb*nocca,nvira*nvira), dtype=vector.dtype)
t2aaba = np.zeros((nocca*nocca,nvirb*nvira), dtype=vector.dtype)
t2abbb = np.zeros((nocca*noccb,nvirb*nvirb), dtype=vector.dtype)
t2bbab = np.zeros((noccb*noccb,nvira*nvirb), dtype=vector.dtype)
otrila = np.tril_indices(nocca, k=-1)
otrilb = np.tril_indices(noccb, k=-1)
vtrila = np.tril_indices(nvira, k=-1)
vtrilb = np.tril_indices(nvirb, k=-1)
oidxab = np.arange(nocca*noccb, dtype=np.int32)
vidxab = np.arange(nvira*nvirb, dtype=np.int32)
v = pvec[:nbaaa].reshape(noccb*nocca,-1)
lib.takebak_2d(t2baaa, v, oidxab, vtrila[0]*nvira+vtrila[1])
lib.takebak_2d(t2baaa,-v, oidxab, vtrila[1]*nvira+vtrila[0])
v = pvec[nbaaa:nbaaa+naaba].reshape(-1,nvirb*nvira)
lib.takebak_2d(t2aaba, v, otrila[0]*nocca+otrila[1], vidxab)
lib.takebak_2d(t2aaba,-v, otrila[1]*nocca+otrila[0], vidxab)
v = pvec[nbaaa+naaba:nbaaa+naaba+nabbb].reshape(nocca*noccb,-1)
lib.takebak_2d(t2abbb, v, oidxab, vtrilb[0]*nvirb+vtrilb[1])
lib.takebak_2d(t2abbb,-v, oidxab, vtrilb[1]*nvirb+vtrilb[0])
v = pvec[nbaaa+naaba+nabbb:].reshape(-1,nvira*nvirb)
lib.takebak_2d(t2bbab, v, otrilb[0]*noccb+otrilb[1], vidxab)
lib.takebak_2d(t2bbab,-v, otrilb[1]*noccb+otrilb[0], vidxab)
t2baaa = t2baaa.reshape(noccb,nocca,nvira,nvira)
t2aaba = t2aaba.reshape(nocca,nocca,nvirb,nvira)
t2abbb = t2abbb.reshape(nocca,noccb,nvirb,nvirb)
t2bbab = t2bbab.reshape(noccb,noccb,nvira,nvirb)
return (t1ab,t1ba), (t2baaa, t2aaba, t2abbb, t2bbab)
def spatial2spin_eomsf(rx, orbspin):
'''Convert EOM spatial R1,R2 to spin-orbital R1,R2'''
if len(rx) == 2: # r1
r1ab, r1ba = rx
nocca, nvirb = r1ab.shape
noccb, nvira = r1ba.shape
else:
r2baaa,r2aaba,r2abbb,r2bbab = rx
noccb, nocca, nvira = r2baaa.shape[:3]
nvirb = r2aaba.shape[2]
nocc = nocca + noccb
nvir = nvira + nvirb
idxoa = np.where(orbspin[:nocc] == 0)[0]
idxob = np.where(orbspin[:nocc] == 1)[0]
idxva = np.where(orbspin[nocc:] == 0)[0]
idxvb = np.where(orbspin[nocc:] == 1)[0]
if len(rx) == 2: # r1
r1 = np.zeros((nocc,nvir), dtype=r1ab.dtype)
lib.takebak_2d(r1, r1ab, idxoa, idxvb)
lib.takebak_2d(r1, r1ba, idxob, idxva)
return r1
else:
r2 = np.zeros((nocc**2,nvir**2), dtype=r2aaba.dtype)
idxoaa = idxoa[:,None] * nocc + idxoa
idxoab = idxoa[:,None] * nocc + idxob
idxoba = idxob[:,None] * nocc + idxoa
idxobb = idxob[:,None] * nocc + idxob
idxvaa = idxva[:,None] * nvir + idxva
idxvab = idxva[:,None] * nvir + idxvb
idxvba = idxvb[:,None] * nvir + idxva
idxvbb = idxvb[:,None] * nvir + idxvb
r2baaa = r2baaa.reshape(noccb*nocca,nvira*nvira)
r2aaba = r2aaba.reshape(nocca*nocca,nvirb*nvira)
r2abbb = r2abbb.reshape(nocca*noccb,nvirb*nvirb)
r2bbab = r2bbab.reshape(noccb*noccb,nvira*nvirb)
lib.takebak_2d(r2, r2baaa, idxoba.ravel(), idxvaa.ravel())
lib.takebak_2d(r2, r2aaba, idxoaa.ravel(), idxvba.ravel())
lib.takebak_2d(r2, r2abbb, idxoab.ravel(), idxvbb.ravel())
lib.takebak_2d(r2, r2bbab, idxobb.ravel(), idxvab.ravel())
lib.takebak_2d(r2, r2baaa, idxoab.T.ravel(), idxvaa.T.ravel())
lib.takebak_2d(r2, r2aaba, idxoaa.T.ravel(), idxvab.T.ravel())
lib.takebak_2d(r2, r2abbb, idxoba.T.ravel(), idxvbb.T.ravel())
lib.takebak_2d(r2, r2bbab, idxobb.T.ravel(), idxvba.T.ravel())
return r2.reshape(nocc,nocc,nvir,nvir)
def spin2spatial_eomsf(rx, orbspin):
'''Convert EOM spin-orbital R1,R2 to spatial R1,R2'''
if rx.ndim == 2: # r1
nocc, nvir = rx.shape
else:
nocc, nvir = rx.shape[1:3]
idxoa = np.where(orbspin[:nocc] == 0)[0]
idxob = np.where(orbspin[:nocc] == 1)[0]
idxva = np.where(orbspin[nocc:] == 0)[0]
idxvb = np.where(orbspin[nocc:] == 1)[0]
nocca = len(idxoa)
noccb = len(idxob)
nvira = len(idxva)
nvirb = len(idxvb)
if rx.ndim == 2:
r1ab = lib.take_2d(rx, idxoa, idxvb)
r1ba = lib.take_2d(rx, idxob, idxva)
return r1ab, r1ba
else:
idxoaa = idxoa[:,None] * nocc + idxoa
idxoab = idxoa[:,None] * nocc + idxob
idxoba = idxob[:,None] * nocc + idxoa
idxobb = idxob[:,None] * nocc + idxob
idxvaa = idxva[:,None] * nvir + idxva
idxvab = idxva[:,None] * nvir + idxvb
idxvba = idxvb[:,None] * nvir + idxva
idxvbb = idxvb[:,None] * nvir + idxvb
r2 = rx.reshape(nocc**2,nvir**2)
r2baaa = lib.take_2d(r2, idxoba.ravel(), idxvaa.ravel())
r2aaba = lib.take_2d(r2, idxoaa.ravel(), idxvba.ravel())
r2abbb = lib.take_2d(r2, idxoab.ravel(), idxvbb.ravel())
r2bbab = lib.take_2d(r2, idxobb.ravel(), idxvab.ravel())
r2baaa = r2baaa.reshape(noccb,nocca,nvira,nvira)
r2aaba = r2aaba.reshape(nocca,nocca,nvirb,nvira)
r2abbb = r2abbb.reshape(nocca,noccb,nvirb,nvirb)
r2bbab = r2bbab.reshape(noccb,noccb,nvira,nvirb)
return r2baaa,r2aaba,r2abbb,r2bbab
# Ref: <NAME>, and <NAME>. Chem. Theory Comput. 10, 5567 (2014) Eqs.(9)-(10)
# Note: Last line in Eq. (10) is superfluous.
# See, e.g. Gwaltney, Nooijen, and Barlett, Chem. Phys. Lett. 248, 189 (1996)
def eomee_ccsd_matvec(eom, vector, imds=None):
if imds is None: imds = eom.make_imds()
t1, t2, eris = imds.t1, imds.t2, imds.eris
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
nocca, noccb, nvira, nvirb = t2ab.shape
nmoa, nmob = nocca+nvira, noccb+nvirb
r1, r2 = vector_to_amplitudes_ee(vector, (nmoa,nmob), (nocca,noccb))
r1a, r1b = r1
r2aa, r2ab, r2bb = r2
#:eris_vvvv = ao2mo.restore(1, np.asarray(eris.vvvv), nvirb)
#:eris_VVVV = ao2mo.restore(1, np.asarray(eris.VVVV), nvirb)
#:eris_vvVV = _restore(np.asarray(eris.vvVV), nvira, nvirb)
#:Hr2aa += lib.einsum('ijef,aebf->ijab', tau2aa, eris_vvvv) * .5
#:Hr2bb += lib.einsum('ijef,aebf->ijab', tau2bb, eris_VVVV) * .5
#:Hr2ab += lib.einsum('iJeF,aeBF->iJaB', tau2ab, eris_vvVV)
tau2aa, tau2ab, tau2bb = uccsd.make_tau(r2, r1, t1, 2)
Hr2aa, Hr2ab, Hr2bb = eom._cc._add_vvvv(None, (tau2aa,tau2ab,tau2bb), eris)
Hr2aa *= .5
Hr2bb *= .5
tau2aa = tau2ab = tau2bb = None
Hr1a = lib.einsum('ae,ie->ia', imds.Fvva, r1a)
Hr1a -= lib.einsum('mi,ma->ia', imds.Fooa, r1a)
Hr1a += np.einsum('me,imae->ia',imds.Fova, r2aa)
Hr1a += np.einsum('ME,iMaE->ia',imds.Fovb, r2ab)
Hr1b = lib.einsum('ae,ie->ia', imds.Fvvb, r1b)
Hr1b -= lib.einsum('mi,ma->ia', imds.Foob, r1b)
Hr1b += np.einsum('me,imae->ia',imds.Fovb, r2bb)
Hr1b += np.einsum('me,mIeA->IA',imds.Fova, r2ab)
Hr2aa += lib.einsum('mnij,mnab->ijab', imds.woooo, r2aa) * .25
Hr2bb += lib.einsum('mnij,mnab->ijab', imds.wOOOO, r2bb) * .25
Hr2ab += lib.einsum('mNiJ,mNaB->iJaB', imds.woOoO, r2ab)
Hr2aa += lib.einsum('be,ijae->ijab', imds.Fvva, r2aa)
Hr2bb += lib.einsum('be,ijae->ijab', imds.Fvvb, r2bb)
Hr2ab += lib.einsum('BE,iJaE->iJaB', imds.Fvvb, r2ab)
Hr2ab += lib.einsum('be,iJeA->iJbA', imds.Fvva, r2ab)
Hr2aa -= lib.einsum('mj,imab->ijab', imds.Fooa, r2aa)
Hr2bb -= lib.einsum('mj,imab->ijab', imds.Foob, r2bb)
Hr2ab -= lib.einsum('MJ,iMaB->iJaB', imds.Foob, r2ab)
Hr2ab -= lib.einsum('mj,mIaB->jIaB', imds.Fooa, r2ab)
#:tau2aa, tau2ab, tau2bb = uccsd.make_tau(r2, r1, t1, 2)
#:eris_ovvv = lib.unpack_tril(np.asarray(eris.ovvv).reshape(nocca*nvira,-1)).reshape(nocca,nvira,nvira,nvira)
#:eris_ovVV = lib.unpack_tril(np.asarray(eris.ovVV).reshape(nocca*nvira,-1)).reshape(nocca,nvira,nvirb,nvirb)
#:eris_OVvv = lib.unpack_tril(np.asarray(eris.OVvv).reshape(noccb*nvirb,-1)).reshape(noccb,nvirb,nvira,nvira)
#:eris_OVVV = lib.unpack_tril(np.asarray(eris.OVVV).reshape(noccb*nvirb,-1)).reshape(noccb,nvirb,nvirb,nvirb)
#:Hr1a += lib.einsum('mfae,imef->ia', eris_ovvv, r2aa)
#:tmpaa = lib.einsum('meaf,ijef->maij', eris_ovvv, tau2aa)
#:Hr2aa+= lib.einsum('mb,maij->ijab', t1a, tmpaa)
#:tmpa = lib.einsum('mfae,me->af', eris_ovvv, r1a)
#:tmpa-= lib.einsum('meaf,me->af', eris_ovvv, r1a)
#:Hr1b += lib.einsum('mfae,imef->ia', eris_OVVV, r2bb)
#:tmpbb = lib.einsum('meaf,ijef->maij', eris_OVVV, tau2bb)
#:Hr2bb+= lib.einsum('mb,maij->ijab', t1b, tmpbb)
#:tmpb = lib.einsum('mfae,me->af', eris_OVVV, r1b)
#:tmpb-= lib.einsum('meaf,me->af', eris_OVVV, r1b)
#:Hr1b += lib.einsum('mfAE,mIfE->IA', eris_ovVV, r2ab)
#:tmpab = lib.einsum('meAF,iJeF->mAiJ', eris_ovVV, tau2ab)
#:Hr2ab-= lib.einsum('mb,mAiJ->iJbA', t1a, tmpab)
#:tmpb-= lib.einsum('meAF,me->AF', eris_ovVV, r1a)
#:Hr1a += lib.einsum('MFae,iMeF->ia', eris_OVvv, r2ab)
#:tmpba =-lib.einsum('MEaf,iJfE->MaiJ', eris_OVvv, tau2ab)
#:Hr2ab+= lib.einsum('MB,MaiJ->iJaB', t1b, tmpba)
#:tmpa-= lib.einsum('MEaf,ME->af', eris_OVvv, r1b)
tau2aa = uccsd.make_tau_aa(r2aa, r1a, t1a, 2)
mem_now = lib.current_memory()[0]
max_memory = max(0, eom.max_memory - mem_now)
tmpa = np.zeros((nvira,nvira))
tmpb = np.zeros((nvirb,nvirb))
blksize = min(nocca, max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvira**3*3))))
for p0, p1 in lib.prange(0, nocca, blksize):
ovvv = eris.get_ovvv(slice(p0,p1)) # ovvv = eris.ovvv[p0:p1]
Hr1a += lib.einsum('mfae,imef->ia', ovvv, r2aa[:,p0:p1])
tmpaa = lib.einsum('meaf,ijef->maij', ovvv, tau2aa)
Hr2aa+= lib.einsum('mb,maij->ijab', t1a[p0:p1], tmpaa)
tmpa+= lib.einsum('mfae,me->af', ovvv, r1a[p0:p1])
tmpa-= lib.einsum('meaf,me->af', ovvv, r1a[p0:p1])
ovvv = tmpaa = None
tau2aa = None
tau2bb = uccsd.make_tau_aa(r2bb, r1b, t1b, 2)
blksize = min(noccb, max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvirb**3*3))))
for p0, p1 in lib.prange(0, noccb, blksize):
OVVV = eris.get_OVVV(slice(p0,p1)) # OVVV = eris.OVVV[p0:p1]
Hr1b += lib.einsum('mfae,imef->ia', OVVV, r2bb[:,p0:p1])
tmpbb = lib.einsum('meaf,ijef->maij', OVVV, tau2bb)
Hr2bb+= lib.einsum('mb,maij->ijab', t1b[p0:p1], tmpbb)
tmpb+= lib.einsum('mfae,me->af', OVVV, r1b[p0:p1])
tmpb-= lib.einsum('meaf,me->af', OVVV, r1b[p0:p1])
OVVV = tmpbb = None
tau2bb = None
tau2ab = uccsd.make_tau_ab(r2ab, r1 , t1 , 2)
blksize = min(nocca, max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvira*nvirb**2*3))))
for p0, p1 in lib.prange(0, nocca, blksize):
ovVV = eris.get_ovVV(slice(p0,p1)) # ovVV = eris.ovVV[p0:p1]
Hr1b += lib.einsum('mfAE,mIfE->IA', ovVV, r2ab[p0:p1])
tmpab = lib.einsum('meAF,iJeF->mAiJ', ovVV, tau2ab)
Hr2ab-= lib.einsum('mb,mAiJ->iJbA', t1a[p0:p1], tmpab)
tmpb-= lib.einsum('meAF,me->AF', ovVV, r1a[p0:p1])
ovVV = tmpab = None
blksize = min(noccb, max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvirb*nvira**2*3))))
for p0, p1 in lib.prange(0, noccb, blksize):
OVvv = eris.get_OVvv(slice(p0,p1)) # OVvv = eris.OVvv[p0:p1]
Hr1a += lib.einsum('MFae,iMeF->ia', OVvv, r2ab[:,p0:p1])
tmpba = lib.einsum('MEaf,iJfE->MaiJ', OVvv, tau2ab)
Hr2ab-= lib.einsum('MB,MaiJ->iJaB', t1b[p0:p1], tmpba)
tmpa-= lib.einsum('MEaf,ME->af', OVvv, r1b[p0:p1])
OVvv = tmpba = None
tau2ab = None
Hr2aa-= lib.einsum('af,ijfb->ijab', tmpa, t2aa)
Hr2bb-= lib.einsum('af,ijfb->ijab', tmpb, t2bb)
Hr2ab-= lib.einsum('af,iJfB->iJaB', tmpa, t2ab)
Hr2ab-= lib.einsum('AF,iJbF->iJbA', tmpb, t2ab)
eris_ovov = np.asarray(eris.ovov)
eris_OVOV = np.asarray(eris.OVOV)
eris_ovOV = np.asarray(eris.ovOV)
tau2aa = uccsd.make_tau_aa(r2aa, r1a, t1a, 2)
tauaa = uccsd.make_tau_aa(t2aa, t1a, t1a)
tmpaa = lib.einsum('menf,ijef->mnij', eris_ovov, tau2aa)
Hr2aa += lib.einsum('mnij,mnab->ijab', tmpaa, tauaa) * 0.25
tau2aa = tauaa = None
tau2bb = uccsd.make_tau_aa(r2bb, r1b, t1b, 2)
taubb = uccsd.make_tau_aa(t2bb, t1b, t1b)
tmpbb = lib.einsum('menf,ijef->mnij', eris_OVOV, tau2bb)
Hr2bb += lib.einsum('mnij,mnab->ijab', tmpbb, taubb) * 0.25
tau2bb = taubb = None
tau2ab = uccsd.make_tau_ab(r2ab, r1 , t1 , 2)
tauab = uccsd.make_tau_ab(t2ab, t1 , t1)
tmpab = lib.einsum('meNF,iJeF->mNiJ', eris_ovOV, tau2ab)
Hr2ab += lib.einsum('mNiJ,mNaB->iJaB', tmpab, tauab)
tau2ab = tauab = None
tmpa = lib.einsum('menf,imef->ni', eris_ovov, r2aa)
tmpa-= lib.einsum('neMF,iMeF->ni', eris_ovOV, r2ab)
tmpb = lib.einsum('menf,imef->ni', eris_OVOV, r2bb)
tmpb-= lib.einsum('mfNE,mIfE->NI', eris_ovOV, r2ab)
Hr1a += lib.einsum('na,ni->ia', t1a, tmpa)
Hr1b += lib.einsum('na,ni->ia', t1b, tmpb)
Hr2aa+= lib.einsum('mj,imab->ijab', tmpa, t2aa)
Hr2bb+= lib.einsum('mj,imab->ijab', tmpb, t2bb)
Hr2ab+= lib.einsum('MJ,iMaB->iJaB', tmpb, t2ab)
Hr2ab+= lib.einsum('mj,mIaB->jIaB', tmpa, t2ab)
tmp1a = np.einsum('menf,mf->en', eris_ovov, r1a)
tmp1a-= np.einsum('mfne,mf->en', eris_ovov, r1a)
tmp1a-= np.einsum('neMF,MF->en', eris_ovOV, r1b)
tmp1b = np.einsum('menf,mf->en', eris_OVOV, r1b)
tmp1b-= np.einsum('mfne,mf->en', eris_OVOV, r1b)
tmp1b-= np.einsum('mfNE,mf->EN', eris_ovOV, r1a)
tmpa = np.einsum('en,nb->eb', tmp1a, t1a)
tmpa+= lib.einsum('menf,mnfb->eb', eris_ovov, r2aa)
tmpa-= lib.einsum('meNF,mNbF->eb', eris_ovOV, r2ab)
tmpb = np.einsum('en,nb->eb', tmp1b, t1b)
tmpb+= lib.einsum('menf,mnfb->eb', eris_OVOV, r2bb)
tmpb-= lib.einsum('nfME,nMfB->EB', eris_ovOV, r2ab)
Hr2aa+= lib.einsum('eb,ijae->ijab', tmpa, t2aa)
Hr2bb+= lib.einsum('eb,ijae->ijab', tmpb, t2bb)
Hr2ab+= lib.einsum('EB,iJaE->iJaB', tmpb, t2ab)
Hr2ab+= lib.einsum('eb,iJeA->iJbA', tmpa, t2ab)
eirs_ovov = eris_ovOV = eris_OVOV = None
Hr2aa-= lib.einsum('mbij,ma->ijab', imds.wovoo, r1a)
Hr2bb-= lib.einsum('mbij,ma->ijab', imds.wOVOO, r1b)
Hr2ab-= lib.einsum('mBiJ,ma->iJaB', imds.woVoO, r1a)
Hr2ab-= lib.einsum('MbJi,MA->iJbA', imds.wOvOo, r1b)
Hr1a-= 0.5*lib.einsum('mnie,mnae->ia', imds.wooov, r2aa)
Hr1a-= lib.einsum('mNiE,mNaE->ia', imds.woOoV, r2ab)
Hr1b-= 0.5*lib.einsum('mnie,mnae->ia', imds.wOOOV, r2bb)
Hr1b-= lib.einsum('MnIe,nMeA->IA', imds.wOoOv, r2ab)
tmpa = lib.einsum('mnie,me->ni', imds.wooov, r1a)
tmpa-= lib.einsum('nMiE,ME->ni', imds.woOoV, r1b)
tmpb = lib.einsum('mnie,me->ni', imds.wOOOV, r1b)
tmpb-= lib.einsum('NmIe,me->NI', imds.wOoOv, r1a)
Hr2aa+= lib.einsum('ni,njab->ijab', tmpa, t2aa)
Hr2bb+= lib.einsum('ni,njab->ijab', tmpb, t2bb)
Hr2ab+= lib.einsum('ni,nJaB->iJaB', tmpa, t2ab)
Hr2ab+= lib.einsum('NI,jNaB->jIaB', tmpb, t2ab)
for p0, p1 in lib.prange(0, nvira, nocca):
Hr2aa+= lib.einsum('ejab,ie->ijab', imds.wvovv[p0:p1], r1a[:,p0:p1])
Hr2ab+= lib.einsum('eJaB,ie->iJaB', imds.wvOvV[p0:p1], r1a[:,p0:p1])
for p0, p1 in lib.prange(0, nvirb, noccb):
Hr2bb+= lib.einsum('ejab,ie->ijab', imds.wVOVV[p0:p1], r1b[:,p0:p1])
Hr2ab+= lib.einsum('EjBa,IE->jIaB', imds.wVoVv[p0:p1], r1b[:,p0:p1])
Hr1a += np.einsum('maei,me->ia',imds.wovvo,r1a)
Hr1a += np.einsum('MaEi,ME->ia',imds.wOvVo,r1b)
Hr1b += np.einsum('maei,me->ia',imds.wOVVO,r1b)
Hr1b += np.einsum('mAeI,me->IA',imds.woVvO,r1a)
Hr2aa+= lib.einsum('mbej,imae->ijab', imds.wovvo, r2aa) * 2
Hr2aa+= lib.einsum('MbEj,iMaE->ijab', imds.wOvVo, r2ab) * 2
Hr2bb+= lib.einsum('mbej,imae->ijab', imds.wOVVO, r2bb) * 2
Hr2bb+= lib.einsum('mBeJ,mIeA->IJAB', imds.woVvO, r2ab) * 2
Hr2ab+= lib.einsum('mBeJ,imae->iJaB', imds.woVvO, r2aa)
Hr2ab+= lib.einsum('MBEJ,iMaE->iJaB', imds.wOVVO, r2ab)
Hr2ab+= lib.einsum('mBEj,mIaE->jIaB', imds.woVVo, r2ab)
Hr2ab+= lib.einsum('mbej,mIeA->jIbA', imds.wovvo, r2ab)
Hr2ab+= lib.einsum('MbEj,IMAE->jIbA', imds.wOvVo, r2bb)
Hr2ab+= lib.einsum('MbeJ,iMeA->iJbA', imds.wOvvO, r2ab)
Hr2aa *= .5
Hr2bb *= .5
Hr2aa = Hr2aa - Hr2aa.transpose(0,1,3,2)
Hr2aa = Hr2aa - Hr2aa.transpose(1,0,2,3)
Hr2bb = Hr2bb - Hr2bb.transpose(0,1,3,2)
Hr2bb = Hr2bb - Hr2bb.transpose(1,0,2,3)
vector = amplitudes_to_vector_ee((Hr1a,Hr1b), (Hr2aa,Hr2ab,Hr2bb))
return vector
def eomsf_ccsd_matvec(eom, vector, imds=None):
'''Spin flip EOM-CCSD'''
if imds is None: imds = eom.make_imds()
t1, t2, eris = imds.t1, imds.t2, imds.eris
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
nocca, noccb, nvira, nvirb = t2ab.shape
nmoa, nmob = nocca+nvira, noccb+nvirb
r1, r2 = vector_to_amplitudes_eomsf(vector, (nmoa,nmob), (nocca,noccb))
r1ab, r1ba = r1
r2baaa, r2aaba, r2abbb, r2bbab = r2
Hr1ab = np.einsum('ae,ie->ia', imds.Fvvb, r1ab)
Hr1ab -= np.einsum('mi,ma->ia', imds.Fooa, r1ab)
Hr1ab += np.einsum('me,imae->ia', imds.Fovb, r2abbb)
Hr1ab += np.einsum('me,imae->ia', imds.Fova, r2aaba)
Hr1ba = np.einsum('ae,ie->ia', imds.Fvva, r1ba)
Hr1ba -= np.einsum('mi,ma->ia', imds.Foob, r1ba)
Hr1ba += np.einsum('me,imae->ia', imds.Fova, r2baaa)
Hr1ba += np.einsum('me,imae->ia', imds.Fovb, r2bbab)
Hr2baaa = .5 *lib.einsum('nMjI,Mnab->Ijab', imds.woOoO, r2baaa)
Hr2aaba = .25*lib.einsum('mnij,mnAb->ijAb', imds.woooo, r2aaba)
Hr2abbb = .5 *lib.einsum('mNiJ,mNAB->iJAB', imds.woOoO, r2abbb)
Hr2bbab = .25*lib.einsum('MNIJ,MNaB->IJaB', imds.wOOOO, r2bbab)
Hr2baaa += lib.einsum('be,Ijae->Ijab', imds.Fvva , r2baaa)
Hr2baaa -= lib.einsum('mj,imab->ijab', imds.Fooa*.5, r2baaa)
Hr2baaa -= lib.einsum('MJ,Miab->Jiab', imds.Foob*.5, r2baaa)
Hr2bbab -= lib.einsum('mj,imab->ijab', imds.Foob , r2bbab)
Hr2bbab += lib.einsum('BE,IJaE->IJaB', imds.Fvvb*.5, r2bbab)
Hr2bbab += lib.einsum('be,IJeA->IJbA', imds.Fvva*.5, r2bbab)
Hr2aaba -= lib.einsum('mj,imab->ijab', imds.Fooa , r2aaba)
Hr2aaba += lib.einsum('be,ijAe->ijAb', imds.Fvva*.5, r2aaba)
Hr2aaba += lib.einsum('BE,ijEa->ijBa', imds.Fvvb*.5, r2aaba)
Hr2abbb += lib.einsum('BE,iJAE->iJAB', imds.Fvvb , r2abbb)
Hr2abbb -= lib.einsum('mj,imab->ijab', imds.Foob*.5, r2abbb)
Hr2abbb -= lib.einsum('mj,mIAB->jIAB', imds.Fooa*.5, r2abbb)
tau2baaa = np.einsum('ia,jb->ijab', r1ba, t1a)
tau2baaa = tau2baaa - tau2baaa.transpose(0,1,3,2)
tau2abbb = np.einsum('ia,jb->ijab', r1ab, t1b)
tau2abbb = tau2abbb - tau2abbb.transpose(0,1,3,2)
tau2aaba = np.einsum('ia,jb->ijab', r1ab, t1a)
tau2aaba = tau2aaba - tau2aaba.transpose(1,0,2,3)
tau2bbab = np.einsum('ia,jb->ijab', r1ba, t1b)
tau2bbab = tau2bbab - tau2bbab.transpose(1,0,2,3)
tau2baaa += r2baaa
tau2bbab += r2bbab
tau2abbb += r2abbb
tau2aaba += r2aaba
#:eris_ovvv = lib.unpack_tril(np.asarray(eris.ovvv).reshape(nocca*nvira,-1)).reshape(nocca,nvira,nvira,nvira)
#:Hr1ba += lib.einsum('mfae,Imef->Ia', eris_ovvv, r2baaa)
#:tmp1aaba = lib.einsum('meaf,Ijef->maIj', eris_ovvv, tau2baaa)
#:Hr2baaa += lib.einsum('mb,maIj->Ijab', t1a , tmp1aaba)
mem_now = lib.current_memory()[0]
max_memory = max(0, eom.max_memory - mem_now)
blksize = min(nocca, max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvira**3*3))))
for p0,p1 in lib.prange(0, nocca, blksize):
ovvv = eris.get_ovvv(slice(p0,p1)) # ovvv = eris.ovvv[p0:p1]
Hr1ba += lib.einsum('mfae,Imef->Ia', ovvv, r2baaa[:,p0:p1])
tmp1aaba = lib.einsum('meaf,Ijef->maIj', ovvv, tau2baaa)
Hr2baaa += lib.einsum('mb,maIj->Ijab', t1a[p0:p1], tmp1aaba)
ovvv = tmp1aaba = None
#:eris_OVVV = lib.unpack_tril(np.asarray(eris.OVVV).reshape(noccb*nvirb,-1)).reshape(noccb,nvirb,nvirb,nvirb)
#:Hr1ab += lib.einsum('MFAE,iMEF->iA', eris_OVVV, r2abbb)
#:tmp1bbab = lib.einsum('MEAF,iJEF->MAiJ', eris_OVVV, tau2abbb)
#:Hr2abbb += lib.einsum('MB,MAiJ->iJAB', t1b , tmp1bbab)
blksize = min(noccb, max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvirb**3*3))))
for p0, p1 in lib.prange(0, noccb, blksize):
OVVV = eris.get_OVVV(slice(p0,p1)) # OVVV = eris.OVVV[p0:p1]
Hr1ab += lib.einsum('MFAE,iMEF->iA', OVVV, r2abbb[:,p0:p1])
tmp1bbab = lib.einsum('MEAF,iJEF->MAiJ', OVVV, tau2abbb)
Hr2abbb += lib.einsum('MB,MAiJ->iJAB', t1b[p0:p1], tmp1bbab)
OVVV = tmp1bbab = None
#:eris_ovVV = lib.unpack_tril(np.asarray(eris.ovVV).reshape(nocca*nvira,-1)).reshape(nocca,nvira,nvirb,nvirb)
#:Hr1ab += lib.einsum('mfAE,imEf->iA', eris_ovVV, r2aaba)
#:tmp1abaa = lib.einsum('meAF,ijFe->mAij', eris_ovVV, tau2aaba)
#:tmp1abbb = lib.einsum('meAF,IJeF->mAIJ', eris_ovVV, tau2bbab)
#:tmp1ba = lib.einsum('mfAE,mE->Af', eris_ovVV, r1ab)
#:Hr2bbab -= lib.einsum('mb,mAIJ->IJbA', t1a*.5, tmp1abbb)
#:Hr2aaba -= lib.einsum('mb,mAij->ijAb', t1a*.5, tmp1abaa)
tmp1ba = np.zeros((nvirb,nvira))
blksize = min(nocca, max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvira*nvirb**2*3))))
for p0,p1 in lib.prange(0, nocca, blksize):
ovVV = eris.get_ovVV(slice(p0,p1)) # ovVV = eris.ovVV[p0:p1]
Hr1ab += lib.einsum('mfAE,imEf->iA', ovVV, r2aaba[:,p0:p1])
tmp1abaa = lib.einsum('meAF,ijFe->mAij', ovVV, tau2aaba)
tmp1abbb = lib.einsum('meAF,IJeF->mAIJ', ovVV, tau2bbab)
tmp1ba += lib.einsum('mfAE,mE->Af', ovVV, r1ab[p0:p1])
Hr2bbab -= lib.einsum('mb,mAIJ->IJbA', t1a[p0:p1]*.5, tmp1abbb)
Hr2aaba -= lib.einsum('mb,mAij->ijAb', t1a[p0:p1]*.5, tmp1abaa)
#:eris_OVvv = lib.unpack_tril(np.asarray(eris.OVvv).reshape(noccb*nvirb,-1)).reshape(noccb,nvirb,nvira,nvira)
#:Hr1ba += lib.einsum('MFae,IMeF->Ia', eris_OVvv, r2bbab)
#:tmp1baaa = lib.einsum('MEaf,ijEf->Maij', eris_OVvv, tau2aaba)
#:tmp1babb = lib.einsum('MEaf,IJfE->MaIJ', eris_OVvv, tau2bbab)
#:tmp1ab = lib.einsum('MFae,Me->aF', eris_OVvv, r1ba)
#:Hr2aaba -= lib.einsum('MB,Maij->ijBa', t1b*.5, tmp1baaa)
#:Hr2bbab -= lib.einsum('MB,MaIJ->IJaB', t1b*.5, tmp1babb)
tmp1ab = np.zeros((nvira,nvirb))
blksize = min(noccb, max(ccsd.BLKMIN, int(max_memory*1e6/8/(nvirb*nvira**2*3))))
for p0, p1 in lib.prange(0, noccb, blksize):
OVvv = eris.get_OVvv(slice(p0,p1)) # OVvv = eris.OVvv[p0:p1]
Hr1ba += lib.einsum('MFae,IMeF->Ia', OVvv, r2bbab[:,p0:p1])
tmp1baaa = lib.einsum('MEaf,ijEf->Maij', OVvv, tau2aaba)
tmp1babb = lib.einsum('MEaf,IJfE->MaIJ', OVvv, tau2bbab)
tmp1ab+= lib.einsum('MFae,Me->aF', OVvv, r1ba[p0:p1])
Hr2aaba -= lib.einsum('MB,Maij->ijBa', t1b[p0:p1]*.5, tmp1baaa)
Hr2bbab -= lib.einsum('MB,MaIJ->IJaB', t1b[p0:p1]*.5, tmp1babb)
Hr2baaa += lib.einsum('aF,jIbF->Ijba', tmp1ab , t2ab)
Hr2bbab -= lib.einsum('aF,IJFB->IJaB', tmp1ab*.5, t2bb)
Hr2abbb += lib.einsum('Af,iJfB->iJBA', tmp1ba , t2ab)
Hr2aaba -= lib.einsum('Af,ijfb->ijAb', tmp1ba*.5, t2aa)
Hr2baaa -= lib.einsum('MbIj,Ma->Ijab', imds.wOvOo, r1ba )
Hr2bbab -= lib.einsum('MBIJ,Ma->IJaB', imds.wOVOO, r1ba*.5)
Hr2abbb -= lib.einsum('mBiJ,mA->iJAB', imds.woVoO, r1ab )
Hr2aaba -= lib.einsum('mbij,mA->ijAb', imds.wovoo, r1ab*.5)
Hr1ab -= 0.5*lib.einsum('mnie,mnAe->iA', imds.wooov, r2aaba)
Hr1ab -= lib.einsum('mNiE,mNAE->iA', imds.woOoV, r2abbb)
Hr1ba -= 0.5*lib.einsum('MNIE,MNaE->Ia', imds.wOOOV, r2bbab)
Hr1ba -= lib.einsum('MnIe,Mnae->Ia', imds.wOoOv, r2baaa)
tmp1ab = lib.einsum('MnIe,Me->nI', imds.wOoOv, r1ba)
tmp1ba = lib.einsum('mNiE,mE->Ni', imds.woOoV, r1ab)
Hr2baaa += lib.einsum('nI,njab->Ijab', tmp1ab*.5, t2aa)
Hr2bbab += lib.einsum('nI,nJaB->IJaB', tmp1ab , t2ab)
Hr2abbb += lib.einsum('Ni,NJAB->iJAB', tmp1ba*.5, t2bb)
Hr2aaba += lib.einsum('Ni,jNbA->ijAb', tmp1ba , t2ab)
for p0, p1 in lib.prange(0, nvira, nocca):
Hr2baaa += lib.einsum('ejab,Ie->Ijab', imds.wvovv[p0:p1], r1ba[:,p0:p1]*.5)
Hr2bbab += lib.einsum('eJaB,Ie->IJaB', imds.wvOvV[p0:p1], r1ba[:,p0:p1] )
for p0, p1 in lib.prange(0, nvirb, noccb):
Hr2abbb += lib.einsum('EJAB,iE->iJAB', imds.wVOVV[p0:p1], r1ab[:,p0:p1]*.5)
Hr2aaba += lib.einsum('EjAb,iE->ijAb', imds.wVoVv[p0:p1], r1ab[:,p0:p1] )
Hr1ab += np.einsum('mAEi,mE->iA', imds.woVVo, r1ab)
Hr1ba += np.einsum('MaeI,Me->Ia', imds.wOvvO, r1ba)
Hr2baaa += lib.einsum('mbej,Imae->Ijab', imds.wovvo, r2baaa)
Hr2baaa += lib.einsum('MbeJ,Miae->Jiab', imds.wOvvO, r2baaa)
Hr2baaa += lib.einsum('MbEj,IMaE->Ijab', imds.wOvVo, r2bbab)
Hr2bbab += lib.einsum('MBEJ,IMaE->IJaB', imds.wOVVO, r2bbab)
Hr2bbab += lib.einsum('MbeJ,IMeA->IJbA', imds.wOvvO, r2bbab)
Hr2bbab += lib.einsum('mBeJ,Imae->IJaB', imds.woVvO, r2baaa)
Hr2aaba += lib.einsum('mbej,imAe->ijAb', imds.wovvo, r2aaba)
Hr2aaba += lib.einsum('mBEj,imEa->ijBa', imds.woVVo, r2aaba)
Hr2aaba += lib.einsum('MbEj,iMAE->ijAb', imds.wOvVo, r2abbb)
Hr2abbb += lib.einsum('MBEJ,iMAE->iJAB', imds.wOVVO, r2abbb)
Hr2abbb += lib.einsum('mBEj,mIAE->jIAB', imds.woVVo, r2abbb)
Hr2abbb += lib.einsum('mBeJ,imAe->iJAB', imds.woVvO, r2aaba)
eris_ovov = np.asarray(eris.ovov)
eris_OVOV = np.asarray(eris.OVOV)
eris_ovOV = | np.asarray(eris.ovOV) | numpy.asarray |
import numpy
import tensorflow as tf
from sklearn.metrics import confusion_matrix, average_precision_score
import Constants as Constants
from Log import log
def create_confusion_matrix(pred, targets, n_classes):
result = None
targets = targets.reshape((targets.shape[0], -1))
pred = pred.reshape((pred.shape[0], -1))
for i in range(pred.shape[0]):
conf_matrix = confusion_matrix(targets[i],
pred[i],
list(range(0, n_classes)))
conf_matrix = conf_matrix[numpy.newaxis, :, :]
if result is None:
result = conf_matrix
else:
result = numpy.append(result, conf_matrix, axis=0)
return result
def get_average_precision(targets, outputs, conf_matrix):
targets = targets.reshape(targets.shape[0], -1)
outputs = outputs[:, :, :, :, 1]
outputs = outputs.reshape(outputs.shape[1], -1)
ap = numpy.empty(outputs.shape[0], numpy.float64)
# ap_interpolated = numpy.empty(outputs.shape[0], numpy.float64)
for i in range(outputs.shape[0]):
# precision, recall, thresholds = precision_recall_curve(targets[i], outputs[i])
ap[i] = average_precision_score(targets[i].flatten(), outputs[i].flatten())
# result = eng.get_ap(matlab.double(outputs[i].tolist()), matlab.double(targets[i].tolist()))
# ap_interpolated[i] = result
ap = numpy.nan_to_num(ap)
# ap_interpolated = numpy.nan_to_num(ap_interpolated)
return ap
def compute_binary_ious_tf(targets, outputs):
binary_ious = [compute_iou_for_binary_segmentation(target, output) for target, output in
zip(targets, outputs)]
return numpy.sum(binary_ious, dtype="float32")
def compute_iou_for_binary_segmentation(y_argmax, target):
I = numpy.logical_and(y_argmax == 1, target == 1).sum()
U = numpy.logical_or(y_argmax == 1, target == 1).sum()
if U == 0:
IOU = 1.0
else:
IOU = float(I) / U
return IOU
def compute_measures_for_binary_segmentation(prediction, target):
T = target.sum()
P = prediction.sum()
I = numpy.logical_and(prediction == 1, target == 1).sum()
U = numpy.logical_or(prediction == 1, target == 1).sum()
if U == 0:
recall = 1.0
precision = 1.0
iou = 1.0
else:
if T == 0:
recall = 1.0
else:
recall = float(I) / T
if P == 0:
precision = 1.0
else:
precision = float(I) / P
iou = float(I) / U
measures = {"recall": recall, "precision": precision, "iou": iou}
return measures
def average_measures(measures_dicts):
keys = list(measures_dicts[0].keys())
averaged_measures = {}
for k in keys:
vals = [m[k] for m in measures_dicts]
val = numpy.mean(vals)
averaged_measures[k] = val
return averaged_measures
def compute_iou_from_logits(preds, labels, num_labels):
"""
Computes the intersection over union (IoU) score for given logit tensor and target labels
:param logits: 4D tensor of shape [batch_size, height, width, num_classes]
:param labels: 3D tensor of shape [batch_size, height, width] and type int32 or int64
:param num_labels: tensor with the number of labels
:return: 1D tensor of shape [num_classes] with intersection over union for each class, averaged over batch
"""
with tf.variable_scope("IoU"):
# compute predictions
# probs = softmax(logits, axis=-1)
# preds = tf.arg_max(probs, dimension=3)
# num_labels = preds.get_shape().as_list()[-1];
IoUs = []
for label in range(num_labels):
# find pixels with given label
P = tf.equal(preds, label)
L = tf.equal(labels, label)
# Union
U = tf.logical_or(P, L)
U = tf.reduce_sum(tf.cast(U, tf.float32))
# intersection
I = tf.logical_and(P, L)
I = tf.reduce_sum(tf.cast(I, tf.float32))
IOU = tf.cast(I, tf.float32) / tf.cast(U, tf.float32)
# U might be 0!
IOU = tf.where(tf.equal(U, 0), 1, IOU)
IOU = tf.Print(IOU, [IOU], "iou" + repr(label))
IoUs.append(IOU)
return tf.reshape(tf.stack(IoUs), (num_labels,))
def calc_measures_avg(measures, n_imgs, ignore_classes, for_final_result):
measures_result = {}
# these measures can just be averaged
for measure in [Constants.ERRORS, Constants.IOU, Constants.BINARY_IOU, Constants.AP, Constants.MOTA, Constants.MOTP,
Constants.AP_INTERPOLATED, Constants.FALSE_POSITIVES, Constants.FALSE_NEGATIVES,
Constants.ID_SWITCHES]:
if measure in measures:
measures_result[measure] = numpy.sum(measures[measure]) / n_imgs
# TODO: This has to be added as IOU instead of conf matrix.
if Constants.CONFUSION_MATRIX in measures:
measures_result[Constants.IOU] = calc_iou(measures, n_imgs, ignore_classes)
if Constants.CLICKS in measures:
clicks = [int(x.rsplit(':', 1)[-1]) for x in measures[Constants.CLICKS]]
measures_result[Constants.CLICKS] = float(numpy.sum(clicks)) / n_imgs
if for_final_result and Constants.DETECTION_AP in measures:
from object_detection.utils.object_detection_evaluation import ObjectDetectionEvaluation
if isinstance(measures[Constants.DETECTION_AP], ObjectDetectionEvaluation):
evaluator = measures[Constants.DETECTION_AP]
else:
n_classes = measures[Constants.DETECTION_AP][-2]
evaluator = ObjectDetectionEvaluation(n_classes, matching_iou_threshold=0.5)
evaluator.next_image_key = 0 # add a new field which we will use
_add_aps(evaluator, measures[Constants.DETECTION_AP])
aps, mAP, _, _, _, _ = evaluator.evaluate()
measures_result[Constants.DETECTION_APS] = aps
measures_result[Constants.DETECTION_AP] = mAP
if for_final_result and Constants.CLUSTER_IDS in measures and Constants.ORIGINAL_LABELS in measures:
from sklearn.metrics import adjusted_mutual_info_score, homogeneity_score, completeness_score
labels_true = numpy.reshape(numpy.array(measures[Constants.ORIGINAL_LABELS], dtype=numpy.int32), [-1])
labels_pred = numpy.reshape(numpy.array(measures[Constants.CLUSTER_IDS], dtype=numpy.int32), [-1])
ami = adjusted_mutual_info_score(labels_true, labels_pred)
measures_result[Constants.ADJUSTED_MUTUAL_INFORMATION] = ami
homogeneity = homogeneity_score(labels_true, labels_pred)
measures_result[Constants.HOMOGENEITY] = homogeneity
completeness = completeness_score(labels_true, labels_pred)
measures_result[Constants.COMPLETENESS] = completeness
NO_EVAL = False
if not NO_EVAL:
if for_final_result and Constants.ORIGINAL_LABELS in measures and Constants.EMBEDDING in measures:
from sklearn import mixture
from sklearn.cluster import KMeans
from sklearn.metrics import adjusted_mutual_info_score, homogeneity_score, completeness_score
embeddings = numpy.array(measures[Constants.EMBEDDING], dtype=numpy.int32)
embeddings = numpy.reshape(embeddings,[-1,embeddings.shape[-1]])
labels_true = numpy.reshape(numpy.array(measures[Constants.ORIGINAL_LABELS], dtype=numpy.int32), [-1])
# n_components = 80
# n_components = 400
# n_components = 1000
n_components = 3000
import time
# start = time.time()
# gmm = mixture.GaussianMixture(n_components=n_components, covariance_type='full')
# gmm.fit(embeddings)
# labels_pred= gmm.predict(embeddings)
# print "gmm took ", time.time()-start
start = time.time()
kmeans = KMeans(n_clusters=n_components, n_jobs=-1)
labels_pred = kmeans.fit_predict(embeddings)
print("km took ", time.time() - start)
ami = adjusted_mutual_info_score(labels_true, labels_pred)
measures_result[Constants.ADJUSTED_MUTUAL_INFORMATION] = ami
homogeneity = homogeneity_score(labels_true, labels_pred)
measures_result[Constants.HOMOGENEITY] = homogeneity
completeness = completeness_score(labels_true, labels_pred)
measures_result[Constants.COMPLETENESS] = completeness
return measures_result
def calc_iou(measures, n_imgs, ignore_classes):
assert Constants.CONFUSION_MATRIX in measures
conf_matrix = measures[Constants.CONFUSION_MATRIX]
assert conf_matrix.shape[0] == n_imgs # not sure, if/why we need these n_imgs
ious = get_ious_per_image(measures, ignore_classes)
IOU_avg = numpy.mean(ious)
return IOU_avg
def get_ious_per_image(measures, ignore_classes):
assert Constants.CONFUSION_MATRIX in measures
conf_matrix = measures[Constants.CONFUSION_MATRIX]
I = (numpy.diagonal(conf_matrix, axis1=1, axis2=2)).astype("float32")
sum_predictions = numpy.sum(conf_matrix, axis=1)
sum_labels = numpy.sum(conf_matrix, axis=2)
U = sum_predictions + sum_labels - I
n_classes = conf_matrix.shape[-1]
class_mask = numpy.ones((n_classes,))
# Temporary fix to avoid index out of bounds when there is a void label in the list of classes to be ignored.
ignore_classes = | numpy.array(ignore_classes) | numpy.array |
"""Orbita theoretical model."""
from typing import Tuple
import numpy as np
from pyquaternion import Quaternion
from numpy import linalg as LA
from scipy.spatial.transform import Rotation as R
def rot(axis, deg):
"""Compute 3D rotation matrix given euler rotation."""
return R.from_euler(axis, np.deg2rad(deg)).as_matrix()
class Actuator(object):
"""
Orbita theoretical model.
This actuator is composed of three disks, linked to three arms and a
platform in the end. The goal is to orientate the platform, so the disks do
a rotation following a circle called "proximal circle".
Then, these disks make the arm rotate around the platform's center on a
circle called "distal circle".
Three parameters need to be set : The distal radius R and the 3D
coordinates of the centers of the distal circle and the proximal circle.
The mathematical explanation can be found in the spherical_symbolic.ipynb
notebook
"""
def __init__(self,
Pc_z: Tuple[float, float, float] = (0, 0, 89.4),
Cp_z: Tuple[float, float, float] = (0, 0, 64.227),
R: float = 39.162,
R0: np.ndarray = np.dot(rot('z', 60), rot('y', 10))):
"""Create a new actuator with the given disks configuration."""
self.Pc_z = np.array(Pc_z)
self.Cp_z = np.array(Cp_z)
self.R = R
self.x0, self.y0, self.z0 = np.array(R0)
self.x0_quat = Quaternion(0, self.x0[0], self.x0[1], self.x0[2])
self.y0_quat = Quaternion(0, self.y0[0], self.y0[1], self.y0[2])
self.z0_quat = Quaternion(0, self.z0[0], self.z0[1], self.z0[2])
self.last_angles = np.array([0, 2 * np.pi / 3, -2 * np.pi / 3])
self.offset = np.array([0, 0, 0])
def get_new_frame_from_vector(self, vector: np.ndarray, angle: float = 0) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Compute the coordinates of the vectors of a new frame whose Z axis is the chosen vector.
Parameters
----------
vector : array_like
Vector used to orientate the platform
angle : float
The desired angle of rotation of the platform on its Z axis
in degrees
Returns
-------
X : array_like
New X vector of the platform's frame
Y : array_like
New Y vector of the platform's frame
Z : array_like
New Z vector of the platform's frame
"""
beta = np.deg2rad(angle)
# GOAL VECTOR (the desired Z axis)
goal = vector
goal_norm = [
i / LA.norm(goal) for i in goal
]
alpha = np.arccos(np.vdot(self.z0, goal_norm)) # Angle of rotation
if alpha == 0:
v = Quaternion(0.0, 0.0, 0.0, 1.0)
else: # Vector of rotation as a quaternion
# VECTOR AND ANGLE OF ROTATION
vec = np.cross(self.z0, goal_norm)
vector_norm = [
i / LA.norm(vec) for i in vec
]
v = Quaternion(0.0, vector_norm[0], vector_norm[1], vector_norm[2])
# QUATERNION OF ROTATION ###
w1 = np.cos(alpha / 2.0)
x1 = np.sin(alpha / 2.0) * v.x
y1 = np.sin(alpha / 2.0) * v.y
z1 = np.sin(alpha / 2.0) * v.z
q1 = Quaternion(w1, x1, y1, z1) # 1st rotation quaternion
z_prime = q1 * self.z0_quat * q1.inverse
w2 = np.cos(beta / 2.0)
x2 = np.sin(beta / 2.0) * z_prime.x
y2 = | np.sin(beta / 2.0) | numpy.sin |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.