prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
"""Runners for the different benchmark scenarios."""
# pylint: disable=broad-except
import abc
import json
import time
from typing import Any, Dict, List
import numpy as np
from perfkitbenchmarker.scripts.messaging_service_scripts.common import client
GET_TIME_IN_MILLISECONDS = lambda: time.time() * 1000
UNIT_OF_TIME = 'milliseconds'
class BaseRunner(metaclass=abc.ABCMeta):
"""Base Runner class.
This class is the ancestor of all the runner classes. Concrete subclasses must
implement the run_phase method which actually contains the code for a given
scenario.
The actual cloud connection logic is delegated to the
BaseMessagingServiceClient instance at self.client.
"""
STARTUP_RUN = False
@classmethod
def run_class_startup(cls):
"""Requests to run the class startup code if it hasn't run yet.
Do not override this. Instead override the on_startup method.
"""
if not cls.STARTUP_RUN:
cls.on_startup()
cls.STARTUP_RUN = True
@classmethod
def on_startup(cls):
"""Executes code before creating the 1st instance with the factories utils.
Optional override.
"""
pass
def __init__(self, client_: client.BaseMessagingServiceClient):
self.client = client_
def _get_summary_statistics(self, scenario: str, results: List[float],
number_of_messages: int,
failure_counter: int) -> Dict[str, Any]:
"""Getting statistics based on results from the benchmark."""
metrics_data = {}
common_metadata = {}
latency_mean =
|
np.mean(results)
|
numpy.mean
|
from numba.pycc import CC
from photon_circular import *
import numpy as np
from numba import typed, types
import numba as nb
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
from plot import *
from numpy import genfromtxt
from numba.cuda.random import init_xoroshiro128p_states
from mpl_toolkits.mplot3d import Axes3D
import os
from matplotlib import cm
from scipy import optimize
from tqdm.notebook import trange, tqdm
def fit_func(x, const, a, b, c, d):
return np.power(x, 3) * d + np.power(x, 2) * c + np.power(x, 2) * b + x * a + const
gridDim = 1
blockDim = 1
dim = gridDim * blockDim
all_adj_dist = []
all_collected = []
all_amps = []
all_modes = []
mode = 1.0
iters = 3 * 16
iters = 512
NNxy = 50
NNr = int(NNxy / 2)
NNz = int(NNxy / 2)
degree_divs = 64
n_steps = 100
np.random.seed(1)
s1 = genfromtxt(f's1_pm_{degree_divs}.txt', delimiter=',')
s2 = genfromtxt(f's2_pm_{degree_divs}.txt', delimiter=',')
m11 = genfromtxt(f'm11_pm_{degree_divs}.txt', delimiter=',')
m12 = genfromtxt(f'm12_pm_{degree_divs}.txt', delimiter=',')
temp = 1j * s1[:, 2]
temp += s1[:, 1]
s1 = temp
temp = 1j * s2[:, 2]
temp += s2[:, 1]
s2 = temp
m11 = m11[:, 1]
m12 = m12[:, 1]
s1 = np.ascontiguousarray(s1)
s2 = np.ascontiguousarray(s2)
m11 = np.ascontiguousarray(m11)
m12 = np.ascontiguousarray(m12)
cuda.pinned(s1)
cuda.pinned(s2)
cuda.pinned(m11)
cuda.pinned(m12)
co_xy_all = np.zeros((dim, NNxy, NNxy), dtype=np.float32)
co_rz_all = np.zeros((dim, NNr, NNz), dtype=np.float32)
co_rz_trad_all = np.zeros((dim, NNr, NNz), dtype=np.float32)
incoh_cross_xy_all = np.zeros((dim, NNxy, NNxy), dtype=np.float32)
incoh_cross_rz_all = np.zeros((dim, NNr, NNz), dtype=np.float32)
cross_xy_all =
|
np.zeros((dim, NNxy, NNxy), dtype=np.float32)
|
numpy.zeros
|
#! /usr/bin/env python
"""Grid element mappers that are specific to raster grids.
Mapping functions unique to raster grids
++++++++++++++++++++++++++++++++++++++++
.. autosummary::
~landlab.grid.raster_mappers.map_sum_of_inlinks_to_node
~landlab.grid.raster_mappers.map_mean_of_inlinks_to_node
~landlab.grid.raster_mappers.map_max_of_inlinks_to_node
~landlab.grid.raster_mappers.map_min_of_inlinks_to_node
~landlab.grid.raster_mappers.map_sum_of_outlinks_to_node
~landlab.grid.raster_mappers.map_mean_of_outlinks_to_node
~landlab.grid.raster_mappers.map_max_of_outlinks_to_node
~landlab.grid.raster_mappers.map_min_of_outlinks_to_node
~landlab.grid.raster_mappers.map_mean_of_links_to_node
~landlab.grid.raster_mappers.map_mean_of_horizontal_links_to_node
~landlab.grid.raster_mappers.map_mean_of_horizontal_active_links_to_node
~landlab.grid.raster_mappers.map_mean_of_vertical_links_to_node
~landlab.grid.raster_mappers.map_mean_of_vertical_active_links_to_node
"""
import numpy as np
def _node_out_link_ids(shape):
"""Links leaving each node.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
Returns
-------
tuple :
Tuple of array of link IDs as (vertical_links, horizontal_links).
Examples
--------
>>> from landlab.grid.raster_mappers import _node_out_link_ids
>>> (vert, horiz) = _node_out_link_ids((3, 4))
>>> vert
array([[ 3, 4, 5, 6],
[10, 11, 12, 13],
[-1, -1, -1, -1]])
>>> horiz
array([[ 0, 1, 2, -1],
[ 7, 8, 9, -1],
[14, 15, 16, -1]])
"""
from ..graph.structured_quad.structured_quad import StructuredQuadGraphTopology
layout = StructuredQuadGraphTopology(shape)
node_horizontal_link_ids = np.empty(shape, int)
node_horizontal_link_ids[:, :-1] = layout.horizontal_links.reshape(
(shape[0], shape[1] - 1)
)
node_horizontal_link_ids[:, -1] = -1
node_vertical_link_ids = np.empty(shape, int)
node_vertical_link_ids[:-1, :] = layout.vertical_links.reshape(
(shape[0] - 1, shape[1])
)
node_vertical_link_ids[-1, :] = -1
return node_vertical_link_ids, node_horizontal_link_ids
def _node_in_link_ids(shape):
"""Links entering each node.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
Returns
-------
tuple :
Tuple of array of link IDs as (vertical_links, horizontal_links).
Examples
--------
>>> from landlab.grid.raster_mappers import _node_in_link_ids
>>> (vert, horiz) = _node_in_link_ids((3, 4))
>>> vert
array([[-1, -1, -1, -1],
[ 3, 4, 5, 6],
[10, 11, 12, 13]])
>>> horiz
array([[-1, 0, 1, 2],
[-1, 7, 8, 9],
[-1, 14, 15, 16]])
"""
from ..graph.structured_quad.structured_quad import StructuredQuadGraphTopology
layout = StructuredQuadGraphTopology(shape)
node_horizontal_link_ids = np.empty(shape, int)
node_horizontal_link_ids[:, 1:] = layout.horizontal_links.reshape(
(shape[0], shape[1] - 1)
)
node_horizontal_link_ids[:, 0] = -1
node_vertical_link_ids = np.empty(shape, int)
node_vertical_link_ids[1:, :] = layout.vertical_links.reshape(
(shape[0] - 1, shape[1])
)
node_vertical_link_ids[0, :] = -1
return node_vertical_link_ids, node_horizontal_link_ids
def _number_of_links_per_node(shape):
"""Number of links touching each node.
Parameters
----------
shape : tuple of int
Shape of grid of nodes.
Returns
-------
ndarray :
Array of number of links per node.
Examples
--------
>>> from landlab.grid.raster_mappers import _number_of_links_per_node
>>> _number_of_links_per_node((3, 4))
array([[2, 3, 3, 2],
[3, 4, 4, 3],
[2, 3, 3, 2]])
"""
from ..graph.structured_quad.structured_quad import StructuredQuadGraphTopology
layout = StructuredQuadGraphTopology(shape)
n_links_at_node = np.full(shape[0] * shape[1], 4, int)
n_links_at_node[layout.perimeter_nodes] = 3
n_links_at_node[layout.corner_nodes] = 2
return n_links_at_node.reshape(shape)
def map_sum_of_inlinks_to_node(grid, var_name, out=None):
"""Map the sum of links entering a node to the node.
map_sum_of_inlinks_to_node takes an array *at the links* and finds the
inlink values for each node in the grid. it sums the inlinks and returns
values at the nodes.
.. note::
This considers all inactive links to have a value of 0.
Parameters
----------
grid : ModelGrid
A landlab ModelGrid.
var_name : array or field name
Values defined at links.
out : ndarray, optional
Buffer to place mapped values into or `None` to create a new array.
Returns
-------
ndarray
Mapped values at nodes.
Examples
--------
>>> import numpy as np
>>> from landlab.grid.raster_mappers import map_sum_of_inlinks_to_node
>>> from landlab import RasterModelGrid
>>> rmg = RasterModelGrid((3, 4))
>>> _ = rmg.add_field("z", np.arange(17.), at="link")
>>> map_sum_of_inlinks_to_node(rmg, 'z')
array([ 0., 0., 1., 2., 3., 11., 13., 15., 10., 25., 27.,
29.])
LLCATS: NINF LINF MAP
"""
if out is None:
out = grid.empty(centering="node")
if type(var_name) is str:
values_at_links = grid.at_link[var_name]
else:
values_at_links = var_name
values_at_links = np.append(values_at_links, 0)
south, west = _node_in_link_ids(grid.shape)
south, west = south.reshape(south.size), west.reshape(west.size)
out[:] = values_at_links[south] + values_at_links[west]
return out
def map_mean_of_inlinks_to_node(grid, var_name, out=None):
"""Map the mean of links entering a node to the node.
map_mean_of_inlinks_to_node takes an array *at the links* and finds the
inlink values for each node in the grid. It finds the average of
the inlinks and returns values at the nodes.
This considers all inactive links to have a value of 0.
Parameters
----------
grid : ModelGrid
A landlab ModelGrid.
var_name : array or field name
Values defined at links.
out : ndarray, optional
Buffer to place mapped values into or `None` to create a new array.
Returns
-------
ndarray
Mapped values at nodes.
Examples
--------
>>> import numpy as np
>>> from landlab.grid.raster_mappers import map_mean_of_inlinks_to_node
>>> from landlab import RasterModelGrid
>>> rmg = RasterModelGrid((3, 4))
>>> _ = rmg.add_field("z", np.arange(17.), at="link")
>>> map_mean_of_inlinks_to_node(rmg, 'z')
array([ 0. , 0. , 0.5, 1. , 1.5, 5.5, 6.5, 7.5, 5. ,
12.5, 13.5, 14.5])
LLCATS: NINF LINF MAP
"""
if out is None:
out = grid.empty(centering="node")
if type(var_name) is str:
values_at_links = grid.at_link[var_name]
else:
values_at_links = var_name
values_at_links = np.append(values_at_links, 0)
south, west = _node_in_link_ids(grid.shape)
south, west = south.reshape(south.size), west.reshape(west.size)
out[:] = 0.5 * (values_at_links[south] + values_at_links[west])
return out
def map_max_of_inlinks_to_node(grid, var_name, out=None):
"""Map the maximum of links entering a node to the node.
map_max_of_inlinks_to_node takes an array *at the links* and finds the
inlink values for each node in the grid. it finds the maximum value at the
the inlinks and returns values at the nodes.
.. note::
This considers all inactive links to have a value of 0.
Parameters
----------
grid : ModelGrid
A landlab ModelGrid.
var_name : array or field name
Values defined at links.
out : ndarray, optional
Buffer to place mapped values into or `None` to create a new array.
Returns
-------
ndarray
Mapped values at nodes.
Examples
--------
>>> import numpy as np
>>> from landlab.grid.raster_mappers import map_max_of_inlinks_to_node
>>> from landlab import RasterModelGrid
>>> rmg = RasterModelGrid((3, 4))
>>> _ = rmg.add_field("z", np.arange(17.), at="link")
>>> map_max_of_inlinks_to_node(rmg, 'z')
array([ 0., 0., 1., 2.,
3., 7., 8., 9.,
10., 14., 15., 16.])
LLCATS: NINF LINF MAP
"""
if out is None:
out = grid.empty(centering="node")
if type(var_name) is str:
values_at_links = grid.at_link[var_name]
else:
values_at_links = var_name
values_at_links = np.append(values_at_links, 0)
south, west = _node_in_link_ids(grid.shape)
south, west = south.reshape(south.size), west.reshape(west.size)
out[:] = np.maximum(values_at_links[south], values_at_links[west])
return out
def map_min_of_inlinks_to_node(grid, var_name, out=None):
"""Map the minimum of links entering a node to the node.
map_min_of_inlinks_to_node takes an array *at the links* and finds the
inlink values for each node in the grid. it finds the minimum value at the
the inlinks and returns values at the nodes.
.. note::
This considers all inactive links to have a value of 0.
Parameters
----------
grid : ModelGrid
A landlab ModelGrid.
var_name : array or field name
Values defined at links.
out : ndarray, optional
Buffer to place mapped values into or `None` to create a new array.
Returns
-------
ndarray
Mapped values at nodes.
Examples
--------
>>> import numpy as np
>>> from landlab.grid.raster_mappers import map_min_of_inlinks_to_node
>>> from landlab import RasterModelGrid
>>> rmg = RasterModelGrid((3, 4))
>>> _ = rmg.add_field("z", np.arange(17.), at="link")
>>> map_min_of_inlinks_to_node(rmg, 'z')
array([ 0., 0., 0., 0., 0., 4., 5., 6., 0., 11., 12.,
13.])
LLCATS: NINF LINF MAP
"""
if out is None:
out = grid.empty(centering="node")
if type(var_name) is str:
values_at_links = grid.at_link[var_name]
else:
values_at_links = var_name
values_at_links = np.append(values_at_links, 0)
south, west = _node_in_link_ids(grid.shape)
south, west = south.reshape(south.size), west.reshape(west.size)
out[:] = np.minimum(values_at_links[south], values_at_links[west])
return out
def map_sum_of_outlinks_to_node(grid, var_name, out=None):
"""Map the sum of links leaving a node to the node.
map_sum_of_outlinks_to_node takes an array *at the links* and finds the
outlink values for each node in the grid. it sums the outlinks and returns
values at the nodes.
.. note::
This considers all inactive links to have a value of 0.
Parameters
----------
grid : ModelGrid
A landlab ModelGrid.
var_name : array or field name
Values defined at links.
out : ndarray, optional
Buffer to place mapped values into or `None` to create a new array.
Returns
-------
ndarray
Mapped values at nodes.
Examples
--------
>>> import numpy as np
>>> from landlab.grid.raster_mappers import map_sum_of_outlinks_to_node
>>> from landlab import RasterModelGrid
>>> rmg = RasterModelGrid((3, 4))
>>> _ = rmg.add_field("z", np.arange(17.), at="link")
>>> map_sum_of_outlinks_to_node(rmg, 'z')
array([ 3., 5., 7., 6., 17., 19., 21., 13., 14., 15., 16.,
0.])
LLCATS: NINF LINF MAP
"""
if out is None:
out = grid.empty(centering="node")
if type(var_name) is str:
values_at_links = grid.at_link[var_name]
else:
values_at_links = var_name
values_at_links =
|
np.append(values_at_links, 0)
|
numpy.append
|
import numpy as np
import pylab as pl
def plotGyroError(cself, iidx, fno=1, clearFigure=True, noShow=False):
errors = np.array([np.dot(re.error(), re.error()) for re in cself.ImuList[iidx].gyroErrors])
f = pl.figure(fno)
if clearFigure:
f.clf()
f.suptitle("imu{0}: angular velocities error".format(iidx))
pl.subplot(2, 1, 1)
pl.plot(errors)
pl.xlabel('error index')
pl.ylabel('error (rad/sec) squared')
pl.grid('on')
#only plot till 5*sigma (output formatting...)
sigma=np.std(errors)
errors = errors[ errors < 5*sigma ]
pl.subplot(2, 1, 2)
pl.hist(errors, len(errors)/100)
pl.xlabel('error ($rad/s$) squared')
pl.ylabel('error index')
pl.grid('on')
def plotGyroErrorPerAxis(cself, iidx, fno=1, clearFigure=True, noShow=False):
errors = np.array([re.error() for re in cself.ImuList[iidx].gyroErrors])
f = pl.figure(fno)
if clearFigure:
f.clf()
f.suptitle("imu{0}: angular velocities error".format(iidx))
for i in xrange(3):
pl.subplot(3, 1, i+1)
pl.plot(errors[:,i])
pl.xlabel('error index')
pl.ylabel('error ($rad/s$)')
pl.grid('on')
sigma = cself.ImuList[iidx].getImuConfig().getGyroStatistics()[0]
pl.plot(np.array([0., errors.shape[0]]), sigma * 3.* np.ones(2), 'r--')
pl.plot(np.array([0., errors.shape[0]]), -sigma * 3.* np.ones(2), 'r--')
pl.xlim([0., errors.shape[0]])
def plotAccelError(cself, iidx, fno=1, clearFigure=True, noShow=False):
errors = np.array([np.dot(re.error(), re.error()) for re in cself.ImuList[iidx].accelErrors])
f = pl.figure(fno)
if clearFigure:
f.clf()
f.suptitle("imu{0}: acceleration error".format(iidx))
pl.subplot(2, 1, 1)
pl.plot(errors)
pl.xlabel('error index')
pl.ylabel('(m/sec*sec) squared')
pl.grid('on')
#only plot till 5*sigma (output formatting...)
sigma=np.std(errors)
errors = errors[ errors < 5*sigma ]
pl.subplot(2, 1, 2)
pl.hist(errors, len(errors)/100)
pl.xlabel('($m/s^2$) squared')
pl.ylabel('Error Number')
pl.grid('on')
def plotAccelErrorPerAxis(cself, iidx, fno=1, clearFigure=True, noShow=False):
errors = np.array([re.error() for re in cself.ImuList[iidx].accelErrors])
f = pl.figure(fno)
if clearFigure:
f.clf()
f.suptitle("imu{0}: acceleration error".format(iidx))
for i in xrange(3):
pl.subplot(3, 1, i+1)
pl.plot(errors[:,i])
pl.xlabel('error index')
pl.ylabel('error ($m/s^2$)')
pl.grid('on')
sigma = cself.ImuList[iidx].getImuConfig().getAccelerometerStatistics()[0]
pl.plot(np.array([0, errors.shape[0]]), sigma * 3.* np.ones(2), 'r--')
pl.plot(np.array([0, errors.shape[0]]), -sigma * 3.* np.ones(2), 'r--')
pl.xlim([0., errors.shape[0]])
def plotAccelBias(cself, imu_idx, fno=1, clearFigure=True, noShow=False):
imu = cself.ImuList[imu_idx]
bias = imu.accelBiasDv.spline()
times = np.array([im.stamp.toSec() for im in imu.imuData if im.stamp.toSec() > bias.t_min() \
and im.stamp.toSec() < bias.t_max() ])
acc_bias_spline = np.array([bias.evalD(t,0) for t in times]).T
times = times - times[0] #remove time offset
plotVectorOverTime(times, acc_bias_spline,
title="imu{0}: estimated accelerometer bias (imu frame)".format(imu_idx),
ylabel="bias ($m/s^2$)",
fno=fno, clearFigure=clearFigure, noShow=noShow)
sigma_rw = cself.ImuList[imu_idx].getImuConfig().getAccelerometerStatistics()[1]
bounds = 3. * sigma_rw * np.sqrt(times)
for i in xrange(3):
pl.subplot(3, 1, i+1)
pl.plot(times, acc_bias_spline[i,0] + bounds, 'r--')
pl.plot(times, acc_bias_spline[i,0] - bounds, 'r--')
def plotAngularVelocityBias(cself, imu_idx, fno=1, clearFigure=True, noShow=False):
imu = cself.ImuList[imu_idx]
bias = imu.gyroBiasDv.spline()
times = np.array([im.stamp.toSec() for im in imu.imuData if im.stamp.toSec() > bias.t_min() \
and im.stamp.toSec() < bias.t_max() ])
gyro_bias_spline = np.array([bias.evalD(t,0) for t in times]).T
times = times - times[0] #remove time offset
plotVectorOverTime(times, gyro_bias_spline,
title="imu{0}: estimated gyro bias (imu frame)".format(imu_idx),
ylabel="bias ($rad/s$)",
fno=fno, clearFigure=clearFigure, noShow=noShow)
sigma_rw = cself.ImuList[imu_idx].getImuConfig().getGyroStatistics()[1]
bounds = 3. * sigma_rw * np.sqrt(times)
for i in xrange(3):
pl.subplot(3, 1, i+1)
pl.plot(times, gyro_bias_spline[i,0] + bounds, 'r--')
pl.plot(times, gyro_bias_spline[i,0] - bounds, 'r--')
#plots angular velocity of the body fixed spline versus all imu measurements
def plotAngularVelocities(cself, iidx, fno=1, clearFigure=True, noShow=False):
#predicted (over the time of the imu)
imu = cself.ImuList[iidx]
bodyspline = cself.poseDv.spline()
times = np.array([im.stamp.toSec() + imu.timeOffset for im in imu.imuData \
if im.stamp.toSec() + imu.timeOffset > bodyspline.t_min() \
and im.stamp.toSec() + imu.timeOffset < bodyspline.t_max() ])
predictedAng_body = np.array([err.getPredictedMeasurement() for err in imu.gyroErrors]).T
#transform the measurements to the body frame
#not neccessray for imu0 as it is aligned with the spline
measuredAng_body = np.array([err.getMeasurement() for err in imu.gyroErrors]).T
#remove time offset
times = times - times[0]
#plot the predicted measurements
plotVectorOverTime(times, predictedAng_body,
title="Comparison of predicted and measured angular velocities (body frame)",
ylabel="ang. velocity ($rad/s$)",
label="est. bodyspline",
fno=fno, clearFigure=clearFigure, noShow=noShow, lw=3)
#plot measurements
for r in range(0,3):
ax=pl.subplot(3, 1, r+1)
pl.plot(times, measuredAng_body[r,:], 'x', lw=1, label="imu{0}".format(iidx))
pl.legend()
def plotAccelerations(cself, iidx, fno=1, clearFigure=True, noShow=False):
#predicted
imu = cself.ImuList[iidx]
bodyspline = cself.poseDv.spline()
times = np.array([im.stamp.toSec() + imu.timeOffset for im in imu.imuData \
if im.stamp.toSec() + imu.timeOffset > bodyspline.t_min() \
and im.stamp.toSec() + imu.timeOffset < bodyspline.t_max() ])
predicetedAccel_body = np.array([err.getPredictedMeasurement() for err in imu.accelErrors]).T
#transform accelerations from imu to body frame (on fixed body and geometry was estimated...)
#works for imu0 as it is aligned with the spline
#TODO(schneith): implement the fixed-body acceleration transformation
measuredAccel_imu = np.array([err.getMeasurement() for err in imu.accelErrors]).T
measuredAccel_body = measuredAccel_imu
#remove time offset
times = times - times[0]
#plot the predicted measurements
plotVectorOverTime(times, predicetedAccel_body,
title="Comparison of predicted and measured specific force (imu0 frame)",
ylabel="specific force ($m/s^2$)",
label="est. bodyspline",
fno=fno, clearFigure=clearFigure, noShow=noShow, lw=3)
#plot the measurements
for r in range(0,3):
ax=pl.subplot(3, 1, r+1)
pl.plot(times, measuredAccel_body[r,:], 'x', lw=1, label="imu{0}".format(iidx))
pl.legend()
def plotVectorOverTime(times, values, title="", ylabel="", label="", fno=1, clearFigure=True, noShow=False, lw=3):
f = pl.figure(fno)
if clearFigure:
f.clf()
f.suptitle(title)
for r in range(0,3):
pl.subplot(3, 1, r+1)
pl.plot(times, values[r,:], 'b-', lw=lw, label=label)
pl.grid('on')
pl.xlabel("time (s)")
pl.ylabel(ylabel)
if label is not "":
pl.legend()
def plotReprojectionScatter(cself, cam_id, fno=1, clearFigure=True, noShow=False, title=""):
cam = cself.CameraChain.camList[cam_id]
#create figure
f = pl.figure(fno)
if clearFigure:
f.clf()
f.suptitle(title)
numImages = len(cam.allReprojectionErrors)
values = np.arange(numImages)/np.double(numImages)
cmap = pl.cm.jet(values,alpha=0.5)
#reprojection errors scatter plot
for image_id, rerrs_image in enumerate(cam.allReprojectionErrors):
color = cmap[image_id,:]
rerrs = np.array([rerr.error() for rerr in rerrs_image])
pl.plot(rerrs[:,0], rerrs[:,1], 'x', lw=3, mew=3, color=color)
#A red uncertainty bound would be more consistent, but it is less well visible.
uncertainty_bound = pl.Circle((0,0), 3. * cam.cornerUncertainty, color='k', linestyle='dashed', \
fill=False, lw=2, zorder=len(cam.allReprojectionErrors))
f.gca().add_artist(uncertainty_bound)
pl.axis('equal')
pl.grid('on')
pl.xlabel('error x ($pix$)')
pl.ylabel('error y ($pix$)')
SM = pl.cm.ScalarMappable(pl.cm.colors.Normalize(0.0,numImages), pl.cm.jet)
SM.set_array(np.arange(numImages));
cb = pl.colorbar(SM)
cb.set_label('image index')
if not noShow:
pl.show()
class CameraPlot:
def __init__(self, fig, targetPoints, camSize):
self.initialized = False
#get the data
self.targetPoints = targetPoints
self.camSize = camSize
self.fig = fig
#setup the figure
self.setupFigure()
self.plot3Dgrid()
#initialize camerea
T = np.eye(4,4)
self.plot3DCamera(T)
def setupFigure(self):
#interactive mode
pl.ion()
#hack to enforce axis equal (matplotlib doesn't support that)
self.ax.set_aspect('equal')
MAX = 1
for direction in (-1, 1):
for point in np.diag(direction * MAX * np.array([1,1,1])):
self.ax.plot([point[0]], [point[1]], [point[2]], 'w')
self.fig.show()
def plot3Dgrid(self):
#draw target corners
for i in range(0, len(self.targetPoints) ):
self.ax.scatter(self.targetPoints[i,0], self.targetPoints[i,1], self.targetPoints[i,2],color="g",s=1)
self.ax.plot([0,self.targetPoints[-1,0]],[0,0],[0,0], color="r")
self.ax.plot([0,0],[0,self.targetPoints[-1,1]],[0,0], color="g")
self.ax.plot([0,0],[0,0],[0,self.targetPoints[-1,0]], color="b")
def plot3DCamera(self, T):
#transform affine
ori = T * np.matrix([[0],[0],[0],[1]])
v1 = T * np.matrix([[self.camSize],[0],[0],[1]])
v2 = T * np.matrix([[0],[self.camSize],[0],[1]])
v3 = T *
|
np.matrix([[0],[0],[self.camSize],[1]])
|
numpy.matrix
|
# coding=utf-8
import copy
import numpy as np
from numpy import sin, cos
from scipy.io import savemat, loadmat
from petsc4py import PETSc
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from src.support_class import *
import abc
from scipy.special import hyp2f1
from scipy import interpolate, integrate, optimize, sparse
from itertools import compress
from scipy.spatial.transform import Rotation as spR
from src.support_class import *
__all__ = ['base_geo', 'sphere_geo', 'ellipse_base_geo', 'ellipse_3d_geo',
'geoComposit',
'tunnel_geo', 'pipe_cover_geo', 'supHelix', 'FatHelix',
'lineOnFatHelix', 'ThickLine_base_geo',
'SelfRepeat_body_geo', 'SelfRepeat_FatHelix',
'infgeo_1d', 'infHelix', 'infPipe',
'slb_geo', 'slb_helix', 'Johnson_helix', 'expJohnson_helix', 'sphereEnd_helix',
'regularizeDisk', 'helicoid',
'_revolve_geo', 'revolve_pipe', 'revolve_ellipse',
'region', 'set_axes_equal']
class base_geo():
def __init__(self):
self._nodes = np.array([])
self._elems = np.array([])
self._elemtype = ' '
self._normal = np.array([]) # norm of surface at each point.
self._geo_norm = np.array((0, 0, 1)) # describing the aspect of the geo.
self._origin = np.array((0, 0, 0))
self._u = np.array([])
self._deltaLength = 0
self._dmda = None # dof management
self._stencil_width = 0 # --->>>if change in further version, deal with combine method.
self._glbIdx = np.array([]) # global indices
self._glbIdx_all = np.array([]) # global indices for all process.
self._selfIdx = np.array([]) # indices of _glbIdx in _glbIdx_all
self._dof = 3 # degrees of freedom pre node.
self._type = 'general_geo' # geo type
def __str__(self):
return "%s(%r)" % (self.get_type(), id(self))
def mat_nodes(self, filename: str = '..',
mat_handle: str = 'nodes'):
err_msg = 'wrong mat file name. '
assert filename != '..', err_msg
filename = check_file_extension(filename, '.mat')
mat_contents = loadmat(filename)
nodes = mat_contents[mat_handle].astype(np.float, order='F')
err_msg = 'nodes is a n*3 numpy array containing x, y and z coordinates. '
assert nodes.shape[1] == 3, err_msg
self._nodes = nodes
self._u = np.zeros(self._nodes.size)
self.set_dmda()
return True
def mat_elmes(self, filename: str = '..',
mat_handle: str = 'elmes',
elemtype: str = ' '):
err_msg = 'wrong mat file name. '
assert filename != '..', err_msg
mat_contents = loadmat(filename)
elems = mat_contents[mat_handle].astype(np.int, order='F')
elems = elems - elems.min()
self._elems = elems
self._elemtype = elemtype
return True
def text_nodes(self, filename: str = '..'):
err_msg = 'wrong mat file name. '
assert filename != '..', err_msg
nodes = np.loadtxt(filename)
err_msg = 'nodes is a n*3 numpy array containing x, y and z coordinates. '
assert nodes.shape[1] == 3, err_msg
self._nodes = np.asfortranarray(nodes)
self._u = np.zeros(self._nodes.size)
self.set_dmda()
return True
def mat_origin(self, filename: str = '..',
mat_handle: str = 'origin'):
err_msg = 'wrong mat file name. '
assert filename != '..', err_msg
mat_contents = loadmat(filename)
self._origin = mat_contents[mat_handle].astype(np.float)
return True
def mat_velocity(self, filename: str = '..',
mat_handle: str = 'U'):
err_msg = 'wrong mat file name. '
assert filename != '..', err_msg
mat_contents = loadmat(filename)
self._u = mat_contents[mat_handle].flatten()
return True
def node_rotation(self, norm=np.array([0, 0, 1]), theta=0, rotation_origin=None):
rotM = get_rot_matrix(norm, theta)
return self.node_rotM(rotM=rotM, rotation_origin=rotation_origin)
def node_rotM(self, rotM, rotation_origin=None):
# The rotation is counterclockwise
if rotation_origin is None:
rotation_origin = self.get_origin()
else:
rotation_origin = np.array(rotation_origin).reshape((3,))
self._nodes = np.dot(rotM, (self._nodes - rotation_origin).T).T + \
rotation_origin # The rotation is counterclockwise
self._origin = np.dot(rotM, (self._origin - rotation_origin)) + rotation_origin
self._geo_norm = np.dot(rotM, self._geo_norm) / np.linalg.norm(self._geo_norm)
return True
def coord_rotation(self, norm=np.array([0, 0, 1]), theta=0):
# TODO: check the direction.
assert 1 == 2
# theta = -theta # The rotation is counterclockwise
rotation = get_rot_matrix(norm, theta)
temp_u = self._u.reshape((3, -1), order='F')
self._u = rotation.dot(temp_u).T.flatten()
self._nodes = np.dot(rotation, self._nodes.T).T
self._origin = 000
self._geo_norm = 000
return True
def node_zoom(self, factor, zoom_origin=None):
if zoom_origin is None:
zoom_origin = self.get_origin()
self._nodes = (self._nodes - zoom_origin) * factor + zoom_origin
return True
def node_zoom_x(self, factor, zoom_origin=None):
if zoom_origin is None:
zoom_origin = self.get_origin()
self._nodes[:, 0] = (self._nodes[:, 0] - zoom_origin[0]) * factor + zoom_origin[0]
return True
def node_zoom_y(self, factor, zoom_origin=None):
if zoom_origin is None:
zoom_origin = self.get_origin()
self._nodes[:, 1] = (self._nodes[:, 1] - zoom_origin[1]) * factor + zoom_origin[1]
return True
def node_zoom_z(self, factor, zoom_origin=None):
if zoom_origin is None:
zoom_origin = self.get_origin()
self._nodes[:, 2] = (self._nodes[:, 2] - zoom_origin[2]) * factor + zoom_origin[2]
return True
def move(self, displacement: np.array):
displacement = np.array(displacement).reshape((3,))
self.set_nodes(self.get_nodes() + displacement, self.get_deltaLength())
self.set_origin(self.get_origin() + displacement)
return True
def mirrorImage(self, norm=np.array([0, 0, 1]), rotation_origin=None):
if rotation_origin is None:
rotation_origin = self.get_origin()
else:
rotation_origin = np.array(rotation_origin).reshape((3,))
norm = norm / np.linalg.norm(norm)
nodes = self.get_nodes()
dist = nodes - rotation_origin
parallel = np.einsum('i,j', np.einsum('ij,j', dist, norm), norm)
perpendicular = dist - parallel
dist2 = perpendicular + (-1 * parallel)
nodes2 = dist2 + rotation_origin
self.set_nodes(nodes2, self.get_deltaLength())
return True
def combine(self, geo_list, deltaLength=None, origin=None, geo_norm=None):
if len(geo_list) == 0:
return False
for geo1 in geo_list:
err_msg = 'some objects in geo_list are not geo object. %s' % str(type(geo1))
assert isinstance(geo1, base_geo), err_msg
err_msg = 'one or more objects not finished create yet. '
assert geo1.get_n_nodes() != 0, err_msg
if deltaLength is None:
deltaLength = geo_list[0].get_deltaLength()
if origin is None:
origin = geo_list[0].get_origin()
if geo_norm is None:
geo_norm = geo_list[0].get_geo_norm()
geo1 = geo_list.pop(0)
self.set_nodes(geo1.get_nodes(), deltalength=deltaLength)
self.set_velocity(geo1.get_velocity())
for geo1 in geo_list:
self.set_nodes(np.vstack((self.get_nodes(), geo1.get_nodes())), deltalength=deltaLength)
self.set_velocity(np.hstack((self.get_velocity(), geo1.get_velocity())))
self.set_dmda()
self._geo_norm = geo_norm
self.set_origin(origin)
return True
def get_nodes(self):
return self._nodes
def get_nodes_petsc(self):
nodes_petsc = self.get_dmda().createGlobalVector()
nodes_petsc[:] = self._nodes.reshape((3, -1))[:]
nodes_petsc.assemble()
return nodes_petsc
def set_nodes(self, nodes, deltalength, resetVelocity=False):
nodes = np.array(nodes).reshape((-1, 3), order='F')
self._nodes = nodes
self._deltaLength = deltalength
self.set_dmda()
if resetVelocity:
self._u = np.zeros(self._nodes.size)
return True
def get_nodes_x(self):
return self._nodes[:, 0]
def get_nodes_y(self):
return self._nodes[:, 1]
def get_nodes_z(self):
return self._nodes[:, 2]
def get_nodes_x_petsc(self):
x_petsc = self.get_dmda().createGlobalVector()
t_x = np.matlib.repmat(self._nodes[:, 0].reshape((-1, 1)), 1, 3).flatten()
x_petsc[:] = t_x[:]
x_petsc.assemble()
return x_petsc
def get_nodes_y_petsc(self):
y_petsc = self.get_dmda().createGlobalVector()
t_y = np.matlib.repmat(self._nodes[:, 1].reshape((-1, 1)), 1, 3).flatten()
y_petsc[:] = t_y[:]
y_petsc.assemble()
return y_petsc
def get_nodes_z_petsc(self):
z_petsc = self.get_dmda().createGlobalVector()
t_z = np.matlib.repmat(self._nodes[:, 2].reshape((-1, 1)), 1, 3).flatten()
z_petsc[:] = t_z[:]
z_petsc.assemble()
return z_petsc
def get_n_nodes(self):
return self._nodes.shape[0]
def get_n_velocity(self):
return self._u.size
def get_velocity(self):
return self._u.flatten()
def set_velocity(self, velocity):
err_msg = 'set nodes first. '
assert self._nodes.size != 0, err_msg
err_msg = 'velocity is a numpy array having a similar size of nodes. '
assert velocity.size == self._nodes.size, err_msg
self._u = velocity.flatten()
return True
def set_rigid_velocity(self, U=np.zeros(6), center=None):
"""
:type U: np.array
:param U: [u1, u2, u3, w1, w2, w3], velocity and angular velocity.
:type center: np.array
:param center: rotation center.
"""
if center is None:
center = self._origin
center = np.array(center)
err_msg = 'center is a np.array containing 3 scales. '
assert center.size == 3, err_msg
r = self._nodes - center
self._u = np.zeros(self._nodes.size)
self._u[0::3] = U[0] + U[4] * r[:, 2] - U[5] * r[:, 1]
self._u[1::3] = U[1] + U[5] * r[:, 0] - U[3] * r[:, 2]
self._u[2::3] = U[2] + U[3] * r[:, 1] - U[4] * r[:, 0]
return True
def get_velocity_x(self):
return self._u[0::3].flatten()
def get_velocity_y(self):
return self._u[1::3].flatten()
def get_velocity_z(self):
return self._u[2::3].flatten()
def get_polar_coord(self):
phi = np.arctan2(self.get_nodes_y(), self.get_nodes_x())
rho = np.sqrt(self.get_nodes_x() ** 2 + self.get_nodes_y() ** 2)
z = self.get_nodes_z()
return phi, rho, z
def get_normal(self):
return self._normal
def set_normal(self, normal):
self._normal = normal
return True
def get_geo_norm(self):
return self._geo_norm
def set_geo_norm(self, geo_norm):
geo_norm = np.array(geo_norm).ravel()
assert geo_norm.size == 3
self._geo_norm = geo_norm
return True
def get_origin(self):
return self._origin
def get_center(self):
return self.get_origin()
def set_origin(self, origin):
self._origin = np.array(origin).ravel()
assert self._origin.size == 3
return True
def set_center(self, origin):
return self.set_origin(origin=origin)
def get_deltaLength(self):
return self._deltaLength
def set_deltaLength(self, deltaLength):
self._deltaLength = deltaLength
return True
def copy(self) -> 'base_geo':
self.destroy_dmda()
geo2 = copy.deepcopy(self)
self.set_dmda()
geo2.set_dmda()
return geo2
def save_nodes(self, filename):
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
filename = check_file_extension(filename, extension='.mat')
if rank == 0:
savemat(filename,
{'nodes': self.get_nodes()},
oned_as='column')
return True
def _show_velocity(self, length_factor=1, show_nodes=True):
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
if rank == 0:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# ax.set_aspect('equal')
# Be careful. the axis using in matplotlib is a left-handed coordinate system
if show_nodes:
ax.plot(self.get_nodes_x(), self.get_nodes_y(), self.get_nodes_z(),
linestyle='None', c='b',
marker='o')
INDEX = np.zeros_like(self.get_nodes_z(), dtype=bool)
INDEX[:] = True
length = 1 / np.mean(self._deltaLength) * length_factor
ax.quiver(self.get_nodes_x()[INDEX], self.get_nodes_y()[INDEX],
self.get_nodes_z()[INDEX],
self.get_velocity_x()[INDEX], self.get_velocity_y()[INDEX],
self.get_velocity_z()[INDEX],
color='r', length=length)
# ax.quiver(self.get_nodes_x(), self.get_nodes_y(), self.get_nodes_z(),
# 0, 0, self.get_nodes_z(), length=self._deltaLength * 2)
X = np.hstack((self.get_nodes_x()))
Y = np.hstack((self.get_nodes_y()))
Z = np.hstack((self.get_nodes_z()))
max_range = np.array(
[X.max() - X.min(), Y.max() - Y.min(), Z.max() - Z.min()]).max() / 2.0
mid_x = (X.max() + X.min()) * 0.5
mid_y = (Y.max() + Y.min()) * 0.5
mid_z = (Z.max() + Z.min()) * 0.5
ax.set_xlim(mid_x - max_range, mid_x + max_range)
ax.set_ylim(mid_y - max_range, mid_y + max_range)
ax.set_zlim(mid_z - max_range, mid_z + max_range)
ax.set_xlabel('$x_1$', size='xx-large')
ax.set_ylabel('$x_2$', size='xx-large')
ax.set_zlabel('$x_3$', size='xx-large')
else:
fig = None
return fig
def show_velocity(self, length_factor=1, show_nodes=True):
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
self._show_velocity(length_factor=length_factor, show_nodes=show_nodes)
if rank == 0:
plt.grid()
# plt.get_current_fig_manager().window.showMaximized()
plt.show()
return True
def core_show_nodes(self, linestyle='-', marker='.'):
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
if rank == 0:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# ax.set_aspect('equal')
ax.plot(self.get_nodes_x(), self.get_nodes_y(), self.get_nodes_z(),
linestyle=linestyle,
color='b',
marker=marker)
X = np.hstack((self.get_nodes_x()))
Y = np.hstack((self.get_nodes_y()))
Z = np.hstack((self.get_nodes_z()))
max_range = np.array([X.max() - X.min(),
Y.max() - Y.min(),
Z.max() - Z.min()]).max() / 2.0
mid_x = (X.max() + X.min()) * 0.5
mid_y = (Y.max() + Y.min()) * 0.5
mid_z = (Z.max() + Z.min()) * 0.5
ax.set_xlim(mid_x - max_range, mid_x + max_range)
ax.set_ylim(mid_y - max_range, mid_y + max_range)
ax.set_zlim(mid_z - max_range, mid_z + max_range)
ax.set_xlabel('$x_1$', size='xx-large')
ax.set_ylabel('$x_2$', size='xx-large')
ax.set_zlabel('$x_3$', size='xx-large')
else:
fig = None
return fig
def show_nodes(self, linestyle='-', marker='.'):
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
self.core_show_nodes(linestyle=linestyle, marker=marker)
if rank == 0:
plt.grid()
# plt.get_current_fig_manager().window.showMaximized()
plt.show()
return True
def png_nodes(self, finename, linestyle='-', marker='.'):
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
finename = check_file_extension(finename, '.png')
fig = self.core_show_nodes(linestyle=linestyle, marker=marker)
if rank == 0:
fig.set_size_inches(18.5, 10.5)
fig.savefig(finename, dpi=100)
plt.close()
return True
def get_mesh(self):
return self._elems, self._elemtype
def get_dmda(self):
return self._dmda
def set_dmda(self):
if self.get_dmda() is not None:
self._dmda.destroy()
if not hasattr(self, '_dof'):
self._dof = 3
self._dmda = PETSc.DMDA().create(sizes=(self.get_n_nodes(),), dof=self._dof,
stencil_width=self._stencil_width, comm=PETSc.COMM_WORLD)
self._dmda.setFromOptions()
self._dmda.setUp()
# self._dmda.createGlobalVector()
return True
def destroy_dmda(self):
self._dmda.destroy()
self._dmda = None
return True
def get_dof(self):
return self._dof
def set_dof(self, dof):
self._dof = dof
return True
def set_glbIdx(self, indices):
comm = PETSc.COMM_WORLD.tompi4py()
self._glbIdx = indices
self._glbIdx_all = np.hstack(comm.allgather(indices))
self._selfIdx = np.searchsorted(self._glbIdx_all, self._glbIdx)
return True
def set_glbIdx_all(self, indices):
self._glbIdx = []
self._selfIdx = []
self._glbIdx_all = indices
return True
def get_glbIdx(self):
return self._glbIdx, self._glbIdx_all
def get_selfIdx(self):
return self._selfIdx
# def _heaviside(self, n, factor):
# f = lambda x: 1 / (1 + np.exp(-factor * x))
# x = np.linspace(-0.5, 0.5, n)
# return (f(x) - f(-0.5)) / (f(0.5) - f(-0.5))
def get_type(self):
return self._type
def print_info(self):
PETSc.Sys.Print(' %s: norm %s, center %s' %
(str(self), str(self.get_geo_norm()), str(self.get_center())))
return True
def pickmyself_prepare(self):
if not self._dmda is None:
self.destroy_dmda()
return True
class geoComposit(uniqueList):
def __init__(self, liste=[]):
acceptType = base_geo
super().__init__(acceptType=acceptType)
liste = list(tube_flatten((liste,)))
for geoi in liste:
self.append(geoi)
def core_show_nodes(self, linestyle='-', marker='.'):
color_list = ['b', 'g', 'r', 'c', 'm', 'y', 'k', ]
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
if len(self) == 0:
return False
if rank == 0:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# ax.set_aspect('equal')
xlim_list = np.zeros((len(self), 2))
ylim_list = np.zeros((len(self), 2))
zlim_list = np.zeros((len(self), 2))
for i0, geo0 in enumerate(self):
if geo0.get_n_nodes() > 0:
ax.plot(geo0.get_nodes_x(), geo0.get_nodes_y(), geo0.get_nodes_z(),
linestyle=linestyle,
color=color_list[i0 % len(color_list)],
marker=marker)
X = np.hstack((geo0.get_nodes_x()))
Y = np.hstack((geo0.get_nodes_y()))
Z = np.hstack((geo0.get_nodes_z()))
max_range = np.array([X.max() - X.min(),
Y.max() - Y.min(),
Z.max() - Z.min()]).max() / 2.0
mid_x = (X.max() + X.min()) * 0.5
mid_y = (Y.max() + Y.min()) * 0.5
mid_z = (Z.max() + Z.min()) * 0.5
xlim_list[i0] = (mid_x - max_range, mid_x + max_range)
ylim_list[i0] = (mid_y - max_range, mid_y + max_range)
zlim_list[i0] = (mid_z - max_range, mid_z + max_range)
else:
xlim_list[i0] = (np.nan, np.nan)
ylim_list[i0] = (np.nan, np.nan)
zlim_list[i0] = (np.nan, np.nan)
ax.set_xlim(np.nanmin(xlim_list), np.nanmax(xlim_list))
ax.set_ylim(np.nanmin(ylim_list), np.nanmax(ylim_list))
ax.set_zlim(np.nanmin(zlim_list), np.nanmax(zlim_list))
ax.set_xlabel('$x_1$', size='xx-large')
ax.set_ylabel('$x_2$', size='xx-large')
ax.set_zlabel('$x_3$', size='xx-large')
set_axes_equal(ax)
else:
fig = None
return fig
def show_nodes(self, linestyle='-', marker='.'):
if len(self) == 0:
return False
comm = PETSc.COMM_WORLD.tompi4py()
rank = comm.Get_rank()
self.core_show_nodes(linestyle=linestyle, marker=marker)
if rank == 0:
plt.grid()
# plt.get_current_fig_manager().window.showMaximized()
plt.show()
return True
def move(self, displacement: np.array):
if len(self) == 0:
return False
else:
for sub_geo in self:
sub_geo.move(displacement=displacement)
return True
class ThickLine_base_geo(base_geo):
def __init__(self):
super().__init__()
self._r = 0 # radius of thick line itself, thick is a cycle.
self._dth = 0 # anglar between nodes in a cycle.
self._axisNodes = np.array([]).reshape((-1, 3))
self._frenetFrame = (np.array([]).reshape((-1, 3)),
np.array([]).reshape((-1, 3)),
np.array([]).reshape((-1, 3)))
self._iscover = [] # start: -1, body: 0, end: 1
self._with_cover = 0
self._factor = 1e-5
self._left_hand = False
self._check_epsilon = True
self._type = '_ThickLine_geo' # geo type
self._cover_strat_idx = np.array([])
self._body_idx_list = []
self._cover_end_idx = np.array([])
self._local_rot = True # special parameter for selfrepeat_geo
self._node_axisNode_idx = []
def set_check_epsilon(self, check_epsilon):
self._check_epsilon = check_epsilon
return True
def get_check_epsilon(self):
return self._check_epsilon
def _get_theta(self):
def eqr(dth, ds, r):
return (ds / (2 * r)) ^ 2 + np.sin(dth / 4) ** 2 - np.sin(dth / 2) ** 2
from scipy import optimize as sop
self._dth = sop.brentq(eqr, -1e-3 * np.pi, np.pi, args=(self.get_deltaLength(), self._r))
return self._dth
def _get_deltalength(self):
# dl = 2 * self._r * np.sqrt(np.sin(self._dth / 2) ** 2 - np.sin(self._dth / 4) ** 2)
dl = 2 * self._r * np.sin(self._dth / 2)
self.set_deltaLength(dl)
return dl
@abc.abstractmethod
def _get_axis(self):
return
@abc.abstractmethod
def _get_fgeo_axis(self, epsilon):
return
@abc.abstractmethod
def _body_pretreatment(self, nodes, **kwargs):
return
@abc.abstractmethod
def _strat_pretreatment(self, nodes, **kwargs):
return
@abc.abstractmethod
def _end_pretreatment(self, nodes, **kwargs):
return
def _create_deltatheta(self, dth: float, # delta theta of the cycle for the mesh
radius: float, # radius of the cycle
epsilon=0, with_cover=0, local_rot=True):
# the tunnel is along z axis
err_msg = 'dth must less than pi'
assert dth < np.pi, err_msg
self._dth = dth
self._r = radius
self._with_cover = with_cover
deltalength = self._get_deltalength()
nc = np.ceil(2 * np.pi / dth).astype(int)
angleCycle = np.linspace(0, 2 * np.pi, nc, endpoint=False)
axisNodes, T_frame, N_frame, B_frame = self._get_axis()
fgeo_axisNodes, fgeo_T_frame, fgeo_N_frame, fgeo_B_frame = self._get_fgeo_axis(epsilon)
iscover = []
vgeo_nodes = []
fgeo_nodes = []
epsilon = (radius + epsilon * deltalength) / radius
if self.get_check_epsilon():
err_msg = 'epsilon > %f. ' % (-radius / deltalength)
assert epsilon > 0, err_msg
ai_para = 0
t_node_idx = 0
local_rot = self._local_rot
self._node_axisNode_idx = []
self._body_idx_list = []
# cover at start
if with_cover == 1:
# old version, cover is a plate.
nc = np.ceil((radius - deltalength) / deltalength).astype(int)
ri = np.linspace(deltalength / 2, radius, nc, endpoint=False)
# self
tidx = 0
for i0 in np.arange(0, nc):
ai_para = ai_para + 1 if local_rot else 0
ni = np.ceil(2 * np.pi * ri[i0] / deltalength).astype(int)
ai = np.linspace(0, 2 * np.pi, ni, endpoint=False) + (-1) ** ai_para * dth / 4
iscover.append(np.ones_like(ai) * -1)
nodes_cycle = np.vstack(
(np.cos(ai) * ri[i0], np.sin(ai) * ri[i0], np.zeros_like(ai))).T
t_nodes = axisNodes[0] + np.dot(nodes_cycle,
np.vstack((N_frame[0], B_frame[0],
np.zeros_like(T_frame[0]))))
vgeo_nodes.append(t_nodes)
tidx = tidx + t_nodes.shape[0]
tf_nodes = fgeo_axisNodes[0] + np.dot(nodes_cycle * epsilon,
np.vstack((N_frame[0], B_frame[0],
np.zeros_like(T_frame[0]))))
fgeo_nodes.append(tf_nodes)
self._strat_pretreatment(t_nodes)
self._cover_strat_idx = np.arange(len(vgeo_nodes))
t_node_idx = self._cover_strat_idx[-1] + 1 if self._cover_strat_idx.size > 0 else 0
self._node_axisNode_idx.append(np.zeros(tidx))
elif with_cover == 2:
# 20170929, new version, cover is a hemisphere
vhsgeo = sphere_geo()
vhsgeo.create_half_delta(deltalength, radius)
vhsgeo.node_rotation((1, 0, 0), np.pi / 2 + ai_para)
t_nodes = axisNodes[0] + np.dot(vhsgeo.get_nodes(),
np.vstack((-T_frame[0], N_frame[0], B_frame[0])))
vgeo_nodes.append(t_nodes)
self._cover_strat_idx = np.arange(t_nodes.shape[0]) + t_node_idx
t_node_idx = self._cover_strat_idx[-1] + 1
fhsgeo = vhsgeo.copy()
# fhsgeo.show_nodes()
fhsgeo.node_zoom(epsilon)
# fhsgeo.show_nodes()
tf_nodes = fgeo_axisNodes[0] + np.dot(fhsgeo.get_nodes(),
np.vstack((-T_frame[0], N_frame[0], B_frame[0])))
fgeo_nodes.append(tf_nodes)
self._strat_pretreatment(t_nodes)
iscover.append(np.ones(vhsgeo.get_n_nodes()) * -1)
self._node_axisNode_idx.append(np.zeros(vhsgeo.get_n_nodes()))
# body
for i0, nodei_line in enumerate(axisNodes):
ai_para = ai_para + 1 if local_rot else 0
ai = angleCycle + (-1) ** ai_para * dth / 4
nodes_cycle = np.vstack((np.cos(ai) * radius, np.sin(ai) * radius, np.zeros_like(ai))).T
t_nodes = nodei_line + np.dot(nodes_cycle,
np.vstack((N_frame[i0], B_frame[i0],
np.zeros_like(T_frame[i0]))))
vgeo_nodes.append(t_nodes)
self._body_idx_list.append(np.arange(t_nodes.shape[0]) + t_node_idx)
t_node_idx = self._body_idx_list[-1][-1] + 1
iscover.append(np.zeros_like(ai))
nodes_cycle = np.vstack(
(np.cos(ai) * radius, np.sin(ai) * radius, np.zeros_like(ai))).T * epsilon
tf_nodes = fgeo_axisNodes[i0] + np.dot(nodes_cycle, np.vstack(
(fgeo_N_frame[i0], fgeo_B_frame[i0], np.zeros_like(fgeo_T_frame[i0]))))
fgeo_nodes.append(tf_nodes)
self._body_pretreatment(t_nodes)
self._node_axisNode_idx.append(np.ones(ai.size) * i0)
self._body_idx_list = np.array(self._body_idx_list)
# cover at end
if with_cover == 1:
# old version, cover is a plate.
nc = np.ceil((radius - deltalength) / deltalength).astype(int)
ri = np.linspace(deltalength / 2, radius, nc, endpoint=False)[-1::-1]
tidx = 0
for i0 in np.arange(0, nc):
ai_para = ai_para + 1 if local_rot else 0
ni = np.ceil(2 * np.pi * ri[i0] / deltalength).astype(int)
ai = np.linspace(0, 2 * np.pi, ni, endpoint=False) + (-1) ** ai_para * dth / 4
iscover.append(np.ones_like(ai))
nodes_cycle = np.vstack(
(np.cos(ai) * ri[i0], np.sin(ai) * ri[i0], np.zeros_like(ai))).T
t_nodes = axisNodes[-1] + np.dot(nodes_cycle,
np.vstack((N_frame[-1], B_frame[-1],
np.zeros_like(T_frame[-1]))))
vgeo_nodes.append(t_nodes)
tidx = tidx + t_nodes.shape[0]
tf_nodes = fgeo_axisNodes[-1] + np.dot(nodes_cycle * epsilon, np.vstack(
(fgeo_N_frame[-1], fgeo_B_frame[-1], np.zeros_like(fgeo_T_frame[-1]))))
fgeo_nodes.append(tf_nodes)
self._end_pretreatment(t_nodes)
self._cover_end_idx = np.arange(len(vgeo_nodes) - t_node_idx) + t_node_idx
self._node_axisNode_idx.append(np.ones(tidx) * (axisNodes.shape[0] - 1))
elif with_cover == 2:
# 20170929, new version, cover is a hemisphere
vhsgeo = sphere_geo()
vhsgeo.create_half_delta(deltalength, radius)
vhsgeo.node_rotation((1, 0, 0), -np.pi / 2 - ai_para)
t_nodes = axisNodes[-1] + np.dot(vhsgeo.get_nodes(),
np.vstack((T_frame[-1], N_frame[-1], B_frame[-1])))
vgeo_nodes.append(np.flipud(t_nodes))
self._cover_end_idx = np.arange(t_nodes.shape[0]) + t_node_idx
fhsgeo = vhsgeo.copy()
fhsgeo.node_zoom(epsilon)
tf_nodes = fgeo_axisNodes[-1] + np.dot(fhsgeo.get_nodes(),
np.vstack(
(T_frame[-1], N_frame[-1], B_frame[-1])))
fgeo_nodes.append(np.flipud(tf_nodes))
self._end_pretreatment(t_nodes)
iscover.append(np.ones(vhsgeo.get_n_nodes()))
self._node_axisNode_idx.append(np.ones(vhsgeo.get_n_nodes()) * (axisNodes.shape[0] - 1))
self._iscover = np.hstack(iscover)
self._nodes = np.asfortranarray(np.vstack(vgeo_nodes))
self.set_dmda()
self._u = np.zeros(self._nodes.size)
self._normal = np.zeros((self._nodes.shape[0], 2), order='F')
self._node_axisNode_idx = np.hstack(self._node_axisNode_idx).astype('int')
fgeo = self.copy()
# fgeo.set_dof(self.get_dof())
fgeo.set_nodes(np.asfortranarray(np.vstack(fgeo_nodes)), deltalength=deltalength * epsilon,
resetVelocity=True)
return fgeo
def get_iscover(self):
return self._iscover
def _factor_fun(self, n, factor):
err_msg = 'factor must positive'
assert factor > 0, err_msg
if np.abs(factor - 1) < 0.01:
y = np.linspace(0, 1, n)
else:
f1 = lambda x: (np.exp(x * factor) - 1) / (2 * (np.exp(0.5 * factor) - 1))
f2 = lambda x: np.log(2 * (np.exp(0.5 / factor) - 1) * x + 1) * factor
x = np.linspace(-0.5, 0.5, n)
y1 = np.sign(x) * f1(np.abs(x)) + 0.5
y2 = np.sign(x) * f2(np.abs(x)) + 0.5
y = (y1 * factor + y2 / factor) / (y1[-1] * factor + y2[-1] / factor)
return y
@property
def axisNodes(self):
return self._axisNodes
@property
def frenetFrame(self):
return self._frenetFrame
@property
def cover_strat_idx(self):
return self._cover_strat_idx
@property
def body_idx_list(self):
return self._body_idx_list
@property
def cover_end_idx(self):
return self._cover_end_idx
@property
def with_cover(self):
return self._with_cover
@property
def cover_start_nodes(self):
return self.get_nodes()[self.cover_strat_idx]
@property
def body_nodes_list(self):
return [self.get_nodes()[tidx] for tidx in self.body_idx_list]
@property
def cover_end_nodes(self):
return self.get_nodes()[self.cover_end_idx]
@property
def node_axisNode_idx(self):
return self._node_axisNode_idx
@property
def left_hand(self):
return self.left_hand
# def node_rotation(self, norm=np.array([0, 0, 1]), theta=0, rotation_origin=None):
# # The rotation is counterclockwise
# super().node_rotation(norm, theta, rotation_origin)
#
# if rotation_origin is None:
# rotation_origin = self.get_origin()
# else:
# rotation_origin = np.array(rotation_origin).reshape((3,))
#
# rotation = get_rot_matrix(norm, theta)
# t_axisNodes = self._axisNodes
# self._axisNodes = np.dot(rotation, (self._axisNodes - rotation_origin).T).T + \
# rotation_origin # The rotation is counterclockwise
# t0 = []
# for i0 in range(3):
# t1 = []
# for t2, taxis0, taxis in zip(self._frenetFrame[i0], t_axisNodes, self._axisNodes):
# t2 = np.dot(rotation, (t2 + taxis0 - rotation_origin)) \
# + rotation_origin - taxis
# t2 = t2 / np.linalg.norm(t2)
# t1.append(t2)
# t0.append(np.vstack(t1))
# self._frenetFrame = t0
# return True
def node_rotM(self, rotM, rotation_origin=None):
# The rotation is counterclockwise
super().node_rotM(rotM, rotation_origin)
if rotation_origin is None:
rotation_origin = self.get_origin()
else:
rotation_origin = np.array(rotation_origin).reshape((3,))
t_axisNodes = self._axisNodes
self._axisNodes = np.dot(rotM, (self._axisNodes - rotation_origin).T).T + \
rotation_origin # The rotation is counterclockwise
t0 = []
for i0 in range(3):
t1 = []
for t2, taxis0, taxis in zip(self._frenetFrame[i0], t_axisNodes, self._axisNodes):
t2 = np.dot(rotM, (t2 + taxis0 - rotation_origin)) \
+ rotation_origin - taxis
t2 = t2 / np.linalg.norm(t2)
t1.append(t2)
t0.append(np.vstack(t1))
self._frenetFrame = t0
return True
def move(self, displacement: np.array):
super().move(displacement)
displacement = np.array(displacement).reshape((3,))
self._axisNodes = self._axisNodes + displacement
return True
def nodes_local_coord(self, nodes, axis_idx):
tnode_line = self.axisNodes[axis_idx]
tT = self.frenetFrame[0][axis_idx]
tN = self.frenetFrame[1][axis_idx]
tB = self.frenetFrame[2][axis_idx]
tfnodes_local = np.dot((nodes - tnode_line), np.vstack((tN, tB, tT)).T)
return tfnodes_local
def selfnodes_local_coord(self, axis_idx):
nodes = self.get_nodes()[self.body_idx_list[axis_idx]]
return self.nodes_local_coord(nodes, axis_idx)
def force_local_coord(self, force, axis_idx):
tT = self.frenetFrame[0][axis_idx]
tN = self.frenetFrame[1][axis_idx]
tB = self.frenetFrame[2][axis_idx]
tfi_local = np.dot(force, np.vstack((tN, tB, tT)).T)
return tfi_local
def frenetFrame_local(self, axis_idx):
tT = self.frenetFrame[0][axis_idx]
tN = self.frenetFrame[1][axis_idx]
tB = self.frenetFrame[2][axis_idx]
return tT, tN, tB
class ellipse_base_geo(base_geo):
def __init__(self):
super().__init__()
self._type = 'ellipse_geo' # geo type
def create_n(self, n: int, # number of nodes.
headA: float, # major axis = 2*headA
headC: float): # minor axis = 2*headC
err_msg = 'both major and minor axises should positive. '
assert headA > 0 and headC > 0, err_msg
jj = np.arange(n)
xlocH = -1 + 2 * jj / (n - 1)
numf = 0.5
prefac = 3.6 * np.sqrt(headC / headA)
spherePhi = np.ones(n)
for i0 in range(0, n):
if i0 == 0 or i0 == n - 1:
spherePhi[i0] = 0
else:
tr = np.sqrt(1 - xlocH[i0] ** 2)
wgt = prefac * (1 - numf * (1 - tr)) / tr
spherePhi[i0] = (spherePhi[i0 - 1] + wgt / np.sqrt(n)) % (2 * np.pi)
tsin = np.sqrt(1 - xlocH ** 2)
self._nodes = np.zeros((n, 3), order='F')
self._nodes[:, 0] = headC * xlocH
self._nodes[:, 1] = headA * tsin * np.cos(spherePhi)
self._nodes[:, 2] = headA * tsin * np.sin(spherePhi)
self.set_dmda()
self._u = np.zeros(self._nodes.size)
self._normal = np.zeros((n, 2), order='F')
return True
def create_delta(self, ds: float, # length of the mesh
a: float, # axis1 = 2*a
b: float): # axis2 = 2*b
err_msg = 'both major and minor axises should positive. '
assert a > 0 and b > 0, err_msg
self._deltaLength = ds
# fit arc length as function F of theta using 2-degree pylonomial
from scipy.special import ellipeinc
from scipy.optimize import curve_fit
func = lambda theta, a, b: a * theta ** 2 + b * theta
theta = np.linspace(0, np.pi / 2, 100)
arcl = b * ellipeinc(theta, 1 - (a / b) ** 2)
popt, _ = curve_fit(func, theta, arcl)
# # dbg
# plt.plot(theta, arcl, '.')
# plt.plot(theta, func(theta, popt[0], popt[1]))
# plt.show()
# assert 1 == 2
# divided arc length equally, and get theta using F^-1.
n = np.ceil(arcl[-1] / ds).astype(int)
t_arcl = np.linspace(0, arcl[-1], n, endpoint=False) + ds / 2
# do something to correct the fitting error.
while t_arcl[-1] > arcl[-1]:
t_arcl = t_arcl[:-1]
t_theta1 = (-popt[1] + np.sqrt(popt[1] ** 2 + 4 * popt[0] * t_arcl)) / (2 * popt[0])
t_theta2 = (-popt[1] - np.sqrt(popt[1] ** 2 + 4 * popt[0] * t_arcl)) / (2 * popt[0])
b_theta1 = [a and b for a, b in zip(t_theta1 > 0, t_theta1 < np.pi / 2)]
b_theta2 = [a and b for a, b in zip(t_theta2 > 0, t_theta2 < np.pi / 2)]
err_msg = 'something is wrong, theta of ellipse is uncertain. '
assert all([a != b for a, b in zip(b_theta1, b_theta2)]), err_msg
t_theta0 = t_theta1 * b_theta1 + t_theta2 * b_theta2
t_theta = np.hstack((t_theta0, np.pi / 2, np.pi - t_theta0[::-1]))
t_x = a * np.cos(t_theta)
t_y = b * np.sin(t_theta)
# generate nodes.
x = []
y = []
z = []
ai_para = 0
for xi, yi in zip(t_x, t_y):
ai_para = ai_para + 1
ni = np.ceil(2 * np.pi * yi / ds).astype(int)
ai, da = np.linspace(0, 2 * np.pi, ni, endpoint=False, retstep=True)
ai = ai + (-1) ** ai_para * da / 4 + np.sign(xi) * np.pi / 2
x.append(xi * np.ones_like(ai))
y.append(np.sign(xi) * yi * np.cos(ai))
z.append(np.sign(xi) * yi * np.sin(ai))
self._nodes = np.vstack((np.hstack(x), np.hstack(y), np.hstack(z))).T
self.set_dmda()
self._u = np.zeros(self._nodes.size)
self._normal = np.zeros((self._nodes.shape[0], 2), order='F')
self._geo_norm = np.array((1, 0, 0))
return True
def create_half_delta(self, ds: float, # length of the mesh
a: float, # axis1 = 2*a
b: float): # axis2 = 2*b
err_msg = 'both major and minor axises should positive. '
assert a > 0 and b > 0, err_msg
self._deltaLength = ds
# fit arc length as function F of theta using 2-degree pylonomial
from scipy.special import ellipeinc
from scipy.optimize import curve_fit
func = lambda theta, a, b: a * theta ** 2 + b * theta
theta = np.linspace(0, np.pi / 2, 100)
arcl = b * ellipeinc(theta, 1 - (a / b) ** 2)
popt, _ = curve_fit(func, theta, arcl)
# # dbg
# plt.plot(theta, arcl, '.')
# plt.plot(theta, func(theta, popt[0], popt[1]))
# plt.show()
# assert 1 == 2
# divided arc length equally, and get theta using F^-1.
n = np.ceil(arcl[-1] / ds).astype(int)
t_arcl = np.linspace(0, arcl[-1], n, endpoint=False) + ds / 2
# do something to correct the fitting error.
while t_arcl[-1] > arcl[-1]:
t_arcl = t_arcl[:-1]
t_theta1 = (-popt[1] + np.sqrt(popt[1] ** 2 + 4 * popt[0] * t_arcl)) / (2 * popt[0])
t_theta2 = (-popt[1] - np.sqrt(popt[1] ** 2 + 4 * popt[0] * t_arcl)) / (2 * popt[0])
b_theta1 = [a and b for a, b in zip(t_theta1 > 0, t_theta1 < np.pi / 2)]
b_theta2 = [a and b for a, b in zip(t_theta2 > 0, t_theta2 < np.pi / 2)]
err_msg = 'something is wrong, theta of ellipse is uncertain. '
assert all([a != b for a, b in zip(b_theta1, b_theta2)]), err_msg
t_theta0 = t_theta1 * b_theta1 + t_theta2 * b_theta2
t_x = a * np.cos(t_theta0)
t_y = b * np.sin(t_theta0)
# generate nodes.
x = []
y = []
z = []
ai_para = 0
for xi, yi in zip(t_x, t_y):
ai_para = ai_para + 1
ni = np.ceil(2 * np.pi * yi / ds).astype(int)
ai, da = np.linspace(0, 2 * np.pi, ni, endpoint=False, retstep=True)
ai = ai + (-1) ** ai_para * da / 4 + np.sign(xi) * np.pi / 2
x.append(xi * np.ones_like(ai))
y.append(np.sign(xi) * yi * np.cos(ai))
z.append(np.sign(xi) * yi * np.sin(ai))
self._nodes = np.vstack((np.hstack(x), np.hstack(y), np.hstack(z))).T
self.set_dmda()
self._u = np.zeros(self._nodes.size)
self._normal = np.zeros((self._nodes.shape[0], 2), order='F')
return True
class ellipse_3d_geo(base_geo):
def __init__(self):
super().__init__()
self._type = 'ellipse_3d_geo' # geo type
def create_delta(self, ds: float, # length of the mesh
a: float, # axis1 = 2*a
b1: float, b2: float): # axis2 = 2*b
tgeo = ellipse_base_geo()
tgeo.create_delta(ds, a, b1)
tnode = tgeo.get_nodes()
tnode[:, 2] = tnode[:, 2] / b1 * b2
self._deltaLength = ds
self._nodes = tnode
self.set_dmda()
self._u = np.zeros(self._nodes.size)
self._normal = np.zeros((self._nodes.shape[0], 2), order='F')
self._geo_norm = np.array((1, 0, 0))
return True
class sphere_geo(ellipse_base_geo):
def __init__(self):
super().__init__()
self._type = 'sphere_geo' # geo type
def create_n(self, n: int, # number of nodes.
radius: float, *args): # radius
err_msg = 'additional parameters are useless. '
assert not args, err_msg
self._deltaLength = np.sqrt(4 * np.pi * radius * radius / n)
return super().create_n(n, radius, radius)
def create_delta(self, deltaLength: float, # length of the mesh
radius: float, *args): # radius
err_msg = 'additional parameters are useless. '
assert not args, err_msg
return super().create_delta(deltaLength, radius, radius)
def create_half_delta(self, ds: float, # length of the mesh
a: float, *args):
err_msg = 'additional parameters are useless. '
assert not args, err_msg
return super().create_half_delta(ds, a, a)
def normal(self):
self._normal = np.zeros((self._nodes.shape[0],
2)) # {Sin[a] Sin[b], -Cos[a] Sin[b], Cos[b]} = {n1, n2, n3} is the normal vector
normal_vector = self._nodes / np.sqrt(
self._nodes[:, 0] ** 2 + self._nodes[:, 1] ** 2 + self._nodes[:, 2] ** 2).reshape(
self._nodes.shape[0],
1)
self._normal[:, 1] = np.arccos(normal_vector[:, 2]) # b
self._normal[:, 0] = np.arcsin(normal_vector[:, 0] / np.sin(self._normal[:, 1])) # a
return True
# noinspection PyUnresolvedReferences
class tunnel_geo(ThickLine_base_geo):
def __init__(self):
super().__init__()
self._length = 0
self._cover_strat_list = []
self._cover_end_list = []
self._type = 'tunnel_geo' # geo type
def create_n(self, n: int, # number of nodes.
length: float, # length of the tunnel
radius: float): # radius of the tunnel
deltaLength = np.sqrt(2 * np.pi * radius * length / n)
self._deltaLength = deltaLength
deltaTheta = deltaLength / radius
# the geo is symmetrical
if n % 2: # if n is odd
n_half = int((n - 1) / 2)
theta = np.arange(-n_half, n_half + 1) * deltaTheta
else: # if n is even
n_half = int(n / 2)
theta = np.arange(-n_half, n_half) * deltaTheta + deltaTheta / 2
self._nodes = np.zeros((n, 3), order='F')
self._nodes[:, 0] = deltaLength * theta / 2 / np.pi
self._nodes[:, 1] = radius * np.sin(theta)
self._nodes[:, 2] = radius * np.cos(theta)
self.set_dmda()
self._u = np.zeros(self._nodes.size)
self._normal = np.zeros((n, 2), order='F')
self._geo_norm = np.array((1, 0, 0))
return True
def create_deltalength(self, deltaLength: float, # length of the mesh
length: float, # length of the tunnel
radius: float): # radius of the tunnel
# the tunnel is along z axis
self._deltaLength = deltaLength
a = np.arange(0, 2 * np.pi - deltaLength / radius / 2, deltaLength / radius)
x, y = np.cos(a) * radius, np.sin(a) * radius
z = np.linspace(-length / 2, length / 2, num=np.ceil((length / deltaLength)).astype(int))
n_a, n_z = a.size, z.size
self._nodes = np.zeros((n_a * n_z, 3), order='F')
self._nodes[:, 0] = np.tile(z, n_a).reshape(n_a, -1).flatten(order='F')
self._nodes[:, 1] = np.tile(x, (n_z, 1)).reshape(-1, 1).flatten(order='F')
self._nodes[:, 2] = np.tile(y, (n_z, 1)).reshape(-1, 1).flatten(order='F')
self.set_dmda()
self._u = np.zeros(self._nodes.size)
self._normal = np.zeros((self._nodes.shape[0], 2), order='F')
self._geo_norm = np.array((0, 0, 1))
return True
def create_deltatheta(self, dth: float, # delta theta of the cycle for the mesh
radius: float,
length: float,
epsilon=0,
with_cover=0,
factor=1,
left_hand=False):
self._length = length
self._factor = factor
self._left_hand = left_hand
self._geo_norm = np.array((0, 0, 1))
return self._create_deltatheta(dth, radius, epsilon, with_cover)
def _get_axis(self):
length = self._length
factor = self._factor
left_hand = self._left_hand
ds = self.get_deltaLength()
nl = np.ceil(length / ds).astype(int)
z = self._factor_fun(nl, factor) * length - length / 2
self._axisNodes = np.vstack((np.zeros_like(z), np.zeros_like(z), z)).T
if left_hand:
T_frame = np.vstack((np.zeros(nl), np.zeros(nl), np.ones(nl))).T # (0, 0, 1)
N_frame = np.vstack((np.ones(nl), np.zeros(nl), np.zeros(nl))).T # (1, 0, 0)
B_frame = np.vstack((np.zeros(nl), np.ones(nl), np.zeros(nl))).T # (0, 1, 0)
else:
T_frame = np.vstack((np.zeros(nl), np.zeros(nl), np.ones(nl))).T # (0, 0, 1)
N_frame = np.vstack((np.zeros(nl), np.ones(nl), np.zeros(nl))).T # (0, 1, 0)
B_frame = np.vstack((np.ones(nl), np.zeros(nl), np.zeros(nl))).T # (1, 0, 0)
self._frenetFrame = (T_frame, N_frame, B_frame)
return self._axisNodes, self._frenetFrame[0], self._frenetFrame[1], self._frenetFrame[2]
def _get_fgeo_axis(self, epsilon):
length = self._length
factor = self._factor
nl = self._axisNodes.shape[0]
ds = -self.get_deltaLength() * epsilon / 4
z = self._factor_fun(nl, factor) * (length - ds * 2) - length / 2 + ds
axisNodes = np.vstack((np.zeros_like(z), np.zeros_like(z), z)).T
return axisNodes, self._frenetFrame[0], self._frenetFrame[1], self._frenetFrame[2]
def _strat_pretreatment(self, nodes, **kwargs):
def cart2pol(x, y):
rho = np.sqrt(x ** 2 + y ** 2)
phi = np.arctan2(y, x)
return rho, phi
r, ai = cart2pol(nodes[:, 0], nodes[:, 1])
self._cover_strat_list.append((np.mean(r), ai, np.mean(nodes[:, 2])))
return True
def _end_pretreatment(self, nodes, **kwargs):
def cart2pol(x, y):
rho = np.sqrt(x ** 2 + y ** 2)
phi = np.arctan2(y, x)
return (rho, phi)
r, ai = cart2pol(nodes[:, 0], nodes[:, 1])
self._cover_end_list.append((np.mean(r), ai, np.mean(nodes[:, 2])))
return True
def get_cover_start_list(self):
return self._cover_strat_list
def get_cover_end_list(self):
return self._cover_end_list
def normal(self):
self._normal = np.zeros((self._nodes.shape[0],
2)) # {Sin[a] Sin[b], -Cos[a] Sin[b], Cos[b]} = {n1, n2, n3} is the normal vector
normal_vector = -1 * self._nodes / np.sqrt(
self._nodes[:, 1] ** 2 + self._nodes[:, 2] ** 2).reshape(
self._nodes.shape[0], 1) # -1 means swap direction
self._normal[:, 1] = np.arccos(normal_vector[:, 2]) # b
self._normal[:, 0] = 0 # a
return True
def node_zoom_radius(self, factor):
def cart2pol(x, y):
rho = np.sqrt(x ** 2 + y ** 2)
phi = np.arctan2(y, x)
return rho, phi
def pol2cart(rho, phi):
x = rho * np.cos(phi)
y = rho * np.sin(phi)
return x, y
# zooming geo along radius of tunnel, keep longitude axis.
# 1. copy
temp_geo = base_geo()
temp_nodes = self.get_nodes() - self.get_origin()
temp_geo.set_nodes(temp_nodes, self.get_deltaLength())
# temp_geo.show_nodes()
# 2. rotation, tunnel center line along x axis.
temp_norm = self._geo_norm
rotation_norm = np.cross(temp_norm, [1, 0, 0])
temp_theta = -np.arccos(temp_norm[0] / np.linalg.norm(temp_norm))
doRotation = (not np.array_equal(rotation_norm, np.array((0, 0, 0)))) and temp_theta != 0.
if doRotation:
temp_geo.node_rotation(rotation_norm, temp_theta)
# 3. zooming
temp_nodes = temp_geo.get_nodes()
temp_R, temp_phi = cart2pol(temp_nodes[:, 1], temp_nodes[:, 2])
temp_R = temp_R * factor
X1 = np.min(temp_nodes[:, 0])
X2 = np.max(temp_nodes[:, 0])
factor = (factor - 1) / 2 + 1
temp_nodes[:, 0] = (temp_nodes[:, 0] - (X1 + X2) / 2) * factor + (X1 + X2) / 2
temp_nodes[:, 1], temp_nodes[:, 2] = pol2cart(temp_R, temp_phi)
temp_geo.set_nodes(temp_nodes, self.get_deltaLength())
# 4. rotation back
if doRotation:
temp_geo.node_rotation(rotation_norm, -temp_theta)
# 5. set
# temp_geo.show_nodes()
self.set_nodes(temp_geo.get_nodes() + self.get_origin(), self.get_deltaLength())
return True
class _revolve_geo(base_geo):
def __init__(self):
super().__init__()
def create_full_geo(self, n_c):
# rotate alone z axis
def rot_nodes(nodes):
r = nodes[:, 0]
z = nodes[:, 2]
theta = np.linspace(0, 2 * np.pi, n_c, endpoint=False)
x = np.outer(r, np.cos(theta)).flatten()
y = np.outer(r, np.sin(theta)).flatten()
z = np.outer(z, np.ones_like(theta)).flatten()
nodes = np.vstack((x, y, z)).T
return nodes
self.set_nodes(rot_nodes(self.get_nodes()), self.get_deltaLength(), resetVelocity=True)
return True
class revolve_ellipse(_revolve_geo):
def __init__(self):
super().__init__()
self._length = 0
self._radius = 0
self._type = 'revolve_ellipse'
def create_deltaz(self, ds: float, # length of the mesh
a: float, # axis1 = 2*a
b: float): # axis2 = 2*b
epsilon1 = 1 / 3
epsilon2 = 0.3
err_msg = 'both major and minor axises should positive. '
assert a > 0 and b > 0, err_msg
self._deltaLength = ds
n_2 = np.ceil(a / 2 / ds).astype(int)
dz = a / n_2
z0 = np.linspace(a - dz / 2, dz / 2, n_2)
z1 = np.hstack([z0, np.flipud(z0) * -1])
x1 = np.sqrt(b ** 2 * (1 - (z1 / a) ** 2))
# generate nodes.
self._nodes = np.zeros((x1.size, 3), order='F')
self._nodes[:, 0] = x1.flatten(order='F')
self._nodes[:, 2] = z1.flatten(order='F')
self.set_dmda()
self._u = np.zeros(self._nodes.size)
self._normal = np.zeros((self._nodes.shape[0], 2), order='F')
self._geo_norm = np.array((0, 0, 1))
# associated force geo
move_delta = dz * epsilon1
z0 = z0 - move_delta
z1 = np.hstack([z0, np.flipud(z0) * -1])
dx = x1 / b * dz * epsilon2
x1 = x1 - dx
f_geo = self.copy()
fnodes = np.vstack((x1, np.zeros_like(x1), z1)).T
f_geo.set_nodes(fnodes, 1)
return f_geo
def create_half_deltaz(self, ds: float, # length of the mesh
a: float, # axis1 = 2*a
b: float): # axis2 = 2*b
epsilon1 = 1 / 3
epsilon2 = 0.3
err_msg = 'both major and minor axises should positive. '
assert a > 0 and b > 0, err_msg
self._deltaLength = ds
n_2 = np.ceil(a / 2 / ds).astype(int)
dz = a / n_2
z1 = np.linspace(a - dz / 2, dz / 2, n_2)
x1 = np.sqrt(b ** 2 * (1 - (z1 / a) ** 2))
# generate nodes.
self._nodes = np.zeros((x1.size, 3), order='F')
self._nodes[:, 0] = x1.flatten(order='F')
self._nodes[:, 2] = z1.flatten(order='F')
self.set_dmda()
self._u = np.zeros(self._nodes.size)
self._normal = np.zeros((self._nodes.shape[0], 2), order='F')
self._geo_norm = np.array((0, 0, 1))
# associated force geo
move_delta = dz * epsilon1
z1 = z1 - move_delta
dx = x1 / b * dz * epsilon2
x1 = x1 - dx
f_geo = self.copy()
fnodes = np.vstack((x1, np.zeros_like(x1), z1)).T
f_geo.set_nodes(fnodes, 1)
return f_geo
def create_delta(self, ds: float, # length of the mesh
a: float, # axis1 = 2*a
b: float, # axis2 = 2*b
epsilon):
err_msg = 'both major and minor axises should positive. '
assert a > 0 and b > 0, err_msg
self._deltaLength = ds
# fit arc length as function F of theta using 2-degree pylonomial
from scipy.special import ellipeinc
from scipy.optimize import curve_fit
func = lambda theta, a, b: a * theta ** 2 + b * theta
theta = np.linspace(0, np.pi / 2, 100)
arcl = b * ellipeinc(theta, 1 - (a / b) ** 2)
popt, _ = curve_fit(func, theta, arcl)
# divided arc length equally, and get theta using F^-1.
n = np.ceil(arcl[-1] / ds).astype(int)
t_arcl = np.linspace(0, arcl[-1], n, endpoint=False) + ds / 2
# do something to correct the fitting error.
while t_arcl[-1] > arcl[-1]:
t_arcl = t_arcl[:-1]
t_theta1 = (-popt[1] + np.sqrt(popt[1] ** 2 + 4 * popt[0] * t_arcl)) / (2 * popt[0])
t_theta2 = (-popt[1] - np.sqrt(popt[1] ** 2 + 4 * popt[0] * t_arcl)) / (2 * popt[0])
b_theta1 = [a and b for a, b in zip(t_theta1 > 0, t_theta1 < np.pi / 2)]
b_theta2 = [a and b for a, b in zip(t_theta2 > 0, t_theta2 < np.pi / 2)]
err_msg = 'something is wrong, theta of ellipse is uncertain. '
assert all([a != b for a, b in zip(b_theta1, b_theta2)]), err_msg
t_theta0 = t_theta1 * b_theta1 + t_theta2 * b_theta2
t_theta = np.hstack((t_theta0, np.pi / 2, np.pi - t_theta0[::-1]))
t_x = a * np.cos(t_theta)
t_y = b * np.sin(t_theta)
self._nodes = np.vstack((t_y, np.zeros_like(t_y), np.hstack(t_x))).T
self.set_dmda()
self._u = np.zeros(self._nodes.size)
self._normal = np.zeros((self._nodes.shape[0], 2), order='F')
self._geo_norm = np.array((0, 0, 1))
# force geo
tfct = (a + epsilon * ds) / a
t_x = a * tfct * np.cos(t_theta)
t_y = b * tfct * np.sin(t_theta)
fnodes = np.vstack((t_y, np.zeros_like(t_y), np.hstack(t_x))).T
f_geo = self.copy()
f_geo.set_nodes(fnodes, 1)
return f_geo
def create_half_delta(self, ds: float, # length of the mesh
a: float, # axis1 = 2*a
b: float, # axis2 = 2*b
epsilon):
err_msg = 'both major and minor axises should positive. '
assert a > 0 and b > 0, err_msg
self._deltaLength = ds
# fit arc length as function F of theta using 2-degree pylonomial
from scipy.special import ellipeinc
from scipy.optimize import curve_fit
func = lambda theta, a, b: a * theta ** 2 + b * theta
theta = np.linspace(0, np.pi / 2, 100)
arcl = b * ellipeinc(theta, 1 - (a / b) ** 2)
popt, _ = curve_fit(func, theta, arcl)
# divided arc length equally, and get theta using F^-1.
n = np.ceil(arcl[-1] / ds).astype(int)
t_arcl = np.linspace(0, arcl[-1], n, endpoint=False) + ds / 2
# do something to correct the fitting error.
while t_arcl[-1] > arcl[-1]:
t_arcl = t_arcl[:-1]
t_theta1 = (-popt[1] + np.sqrt(popt[1] ** 2 + 4 * popt[0] * t_arcl)) / (2 * popt[0])
t_theta2 = (-popt[1] - np.sqrt(popt[1] ** 2 + 4 * popt[0] * t_arcl)) / (2 * popt[0])
b_theta1 = [a and b for a, b in zip(t_theta1 > 0, t_theta1 < np.pi / 2)]
b_theta2 = [a and b for a, b in zip(t_theta2 > 0, t_theta2 < np.pi / 2)]
err_msg = 'something is wrong, theta of ellipse is uncertain. '
assert all([a != b for a, b in zip(b_theta1, b_theta2)]), err_msg
t_theta0 = t_theta1 * b_theta1 + t_theta2 * b_theta2
t_x = a * np.cos(t_theta0)
t_y = b * np.sin(t_theta0)
self._nodes = np.vstack((t_y, np.zeros_like(t_y), np.hstack(t_x))).T
self.set_dmda()
self._u = np.zeros(self._nodes.size)
self._normal = np.zeros((self._nodes.shape[0], 2), order='F')
self._geo_norm = np.array((0, 0, 1))
# force geo
tfct = (a + epsilon * ds) / a
t_x = a * tfct * np.cos(t_theta0)
t_y = b * tfct * np.sin(t_theta0)
fnodes = np.vstack((t_y, np.zeros_like(t_y), np.hstack(t_x))).T
f_geo = self.copy()
f_geo.set_nodes(fnodes, 1)
return f_geo
class revolve_pipe(_revolve_geo):
def __init__(self):
super().__init__()
self._length = 0
self._radius = 0
self._type = 'revolve_pipe'
def create_deltaz(self, ds: float, # length of the mesh
length: float, # length of the tunnel
radius: float): # radius of the tunnel
epsilon_x = 1 / 2
epsilon_z = 1 / 3
cover_fct = 2
self._deltaLength = ds
self._length = length
self._radius = radius
# the tunnel is along z axis
# due to the symmetry of pipe, generate the first part and get the image as the other part.
z0 = np.linspace((length - ds) / 2, 0,
num=np.ceil((length / ds / 2)).astype(int))[1:]
z0 = z0 + ds / 2
x0 = np.ones_like(z0) * radius
# cover 1
x1 = np.linspace(0, radius, num=cover_fct * np.ceil((radius / ds)).astype(int))
z1 = np.ones_like(x1) * length / 2
# half pard
xi = np.hstack((x1, x0))
zi = np.hstack((z1, z0))
# all
x = np.hstack((xi, np.flipud(xi)))
z = np.hstack((zi, np.flipud(zi) * -1))
self._nodes = np.zeros((x.size, 3), order='F')
self._nodes[:, 0] = x.flatten(order='F')
self._nodes[:, 1] = np.zeros_like(x).flatten(order='F')
self._nodes[:, 2] = z.flatten(order='F')
self.set_dmda()
self._u = np.zeros(self._nodes.size)
self._normal = np.zeros((self._nodes.shape[0], 2), order='F')
self._geo_norm = np.array((0, 0, 1))
# associated force geo
f_geo = self.copy()
epsilon_x = epsilon_x / cover_fct
a = (radius - ds * epsilon_x * 2) / radius
b = ds * epsilon_x
z0 = z0 - ds * epsilon_z
x0 = a * x0 + b
x1 = a * x1 + b
z1 = np.ones_like(x1) * length / 2 - ds * epsilon_z
# half pard
xi = np.hstack((x1, x0))
zi = np.hstack((z1, z0))
# all
x = np.hstack((xi, np.flipud(xi)))
z = np.hstack((zi, np.flipud(zi) * -1))
fnodes = np.vstack((x, np.zeros_like(x), z)).T
f_geo.set_nodes(fnodes, 1)
return f_geo
def create_half_deltaz(self, ds: float, # length of the mesh
length: float, # length of the tunnel
radius: float): # radius of the tunnel
epsilon_x = 1 / 2
epsilon_z = 1 / 2
cover_fct = 1.5
self._deltaLength = ds
self._length = length
self._radius = radius
# the tunnel is along z axis
z0 = np.linspace(length / 2, ds / 2, num=np.ceil(length / ds / 2).astype(int))[1:]
x0 = np.ones_like(z0) * radius
# cover
x1 = np.linspace(0, radius, num=np.ceil(cover_fct * radius / ds).astype(int))
z1 = np.ones_like(x1) * length / 2
# half part
xi = np.hstack((x1, x0))
zi = np.hstack((z1, z0))
self._nodes = np.zeros((xi.size, 3), order='F')
self._nodes[:, 0] = xi.flatten(order='F')
self._nodes[:, 1] = np.zeros_like(xi).flatten(order='F')
self._nodes[:, 2] = zi.flatten(order='F')
self.set_dmda()
self._u = np.zeros(self._nodes.size)
self._normal = np.zeros((self._nodes.shape[0], 2), order='F')
self._geo_norm = np.array((0, 0, 1))
# associated force geo
f_geo = self.copy()
epsilon_x = epsilon_x / cover_fct
a = (radius - ds * epsilon_x * 2) / radius
b = ds * epsilon_x
z0 = z0 - ds * epsilon_z
x0 = a * x0 + b
x1 = a * x1 + b
z1 = np.ones_like(x1) * length / 2 - ds * epsilon_z
# half part
xi = np.hstack((x1, x0))
zi = np.hstack((z1, z0))
fnodes = np.vstack((xi, np.zeros_like(xi), zi)).T
f_geo.set_nodes(fnodes, 1)
return f_geo
def create_half_deltaz_v2(self, ds: float, # length of the mesh
length: float, # length of the tunnel
radius: float): # radius of the tunnel
epsilon_x = 1 / 2
epsilon_z = 1 / 2
epsilon_3 = 1 / 5 # radio between radii of tangent curve and pipe.
cover_fct = 1
tc_fct = 5
self._deltaLength = ds
self._length = length
self._radius = radius
# the tunnel is along z axis
z0 = np.linspace(length / 2, ds / 2, num=np.ceil(length / ds / 2).astype(int))[1:]
x0 = np.ones_like(z0) * radius
# Tangent curve
tnz = np.ceil(epsilon_3 * radius / ds).astype(int)
r_cv = ds * tnz
z1 = np.flipud(np.arange(tnz) * ds + length / 2)
x1 = (r_cv ** tc_fct - (z1 - length / 2) ** tc_fct) ** (1 / tc_fct) + radius - r_cv
# cover
num = np.ceil(cover_fct * x1[0] / ds).astype(int)
x2 = np.linspace(0, x1[0], num=num)[:np.ceil(-2 * cover_fct).astype(int)]
z2 = np.ones_like(x2) * z1[0]
# half part
xi = np.hstack((x2, x1, x0))
zi = np.hstack((z2, z1, z0))
self._nodes = np.zeros((xi.size, 3), order='F')
self._nodes[:, 0] = xi.flatten(order='F')
self._nodes[:, 1] = np.zeros_like(xi).flatten(order='F')
self._nodes[:, 2] = zi.flatten(order='F')
self.set_dmda()
self._u = np.zeros(self._nodes.size)
self._normal = np.zeros((self._nodes.shape[0], 2), order='F')
self._geo_norm = np.array((0, 0, 1))
# associated force geo
f_geo = self.copy()
epsilon_x = epsilon_x / cover_fct
a = (radius - ds * epsilon_x * 2) / radius
b = ds * epsilon_x
x0 = a * x0 + b
z0 = z0 - ds * epsilon_z
x1 = a * x1 + b
z1 = z1 - ds * epsilon_z
x2 = a * x2 + b
z2 = np.ones_like(x2) * length / 2 - ds * epsilon_z + r_cv
# half part
xi = np.hstack((x2, x1, x0))
zi = np.hstack((z2, z1, z0))
fnodes = np.vstack((xi, np.zeros_like(xi), zi)).T
f_geo.set_nodes(fnodes, 1)
return f_geo
def create_half_deltaz_v3(self, ds: float, # length of the mesh
length: float, # length of the tunnel
radius: float): # radius of the tunnel
epsilon_x = 1 / 2
epsilon_z = 1 / 2
epsilon_3 = 1 / 1 # radio between radii of tangent curve and pipe.
cover_fct = 1.5
tc_fct = 2
self._deltaLength = ds
self._length = length
self._radius = radius
# the tunnel is along z axis
z0 = np.linspace(length / 2, ds / 2, num=np.ceil(length / ds / 2).astype(int))[1:]
x0 = np.ones_like(z0) * radius
# Tangent curve
tnz = np.ceil(epsilon_3 * radius / ds).astype(int)
r_cv = ds * tnz
z1 = np.flipud(np.arange(tnz) * ds + length / 2)
x1 = (r_cv ** tc_fct - (z1 - length / 2) ** tc_fct) ** (1 / tc_fct) + radius - r_cv
# cover
num = np.ceil(cover_fct * x1[0] / ds).astype(int)
x2 = np.linspace(0, x1[0], num=num)[:np.ceil(-2 * cover_fct).astype(int)]
z2 = np.ones_like(x2) * length / 2 + r_cv
# half part
xi = np.hstack((x2, x1, x0))
zi = np.hstack((z2, z1, z0))
self._nodes = np.zeros((xi.size, 3), order='F')
self._nodes[:, 0] = xi.flatten(order='F')
self._nodes[:, 1] = np.zeros_like(xi).flatten(order='F')
self._nodes[:, 2] = zi.flatten(order='F')
self.set_dmda()
self._u =
|
np.zeros(self._nodes.size)
|
numpy.zeros
|
''' This module develops the perception module of AVs. '''
import numpy as np
__author__ = "<NAME>"
def percept(i, V_e_x_seq, V_e_y_seq, V_e_theta_seq, V_e_v_long_seq, V_e_v_lat_seq, V_e_a_seq, V_e_omega_r_seq,
V_e_wheel_anlge_seq, V_x_seq, V_y_seq, r_seed=False):
''' Develop the perception module of AVs. '''
# Define the random seed.
np.random.seed(r_seed)
# Define the perception time delay.
Time_Perc = 10
Time_Perc_temp = np.min([Time_Perc, i])
# At the time step (i), obtain the surrounding vehicle's dynamics at the time step (i-Time_Perc_temp).
v_e_x0, v_e_y0 = V_e_x_seq[i - Time_Perc_temp], V_e_y_seq[i - Time_Perc_temp]
# Perception error depends on the relative distance.
Perc_err_x_std = 0.0005 * np.abs(v_e_x0 - V_x_seq[i - Time_Perc_temp]) + 0.005
Perc_err_x = np.random.normal(0, Perc_err_x_std, 1)[0]
Perc_err_y_std = 0.0005 * np.abs(v_e_y0 - V_y_seq[i - Time_Perc_temp]) + 0.005
Perc_err_y = np.random.normal(0, Perc_err_y_std, 1)[0]
# Estimate the surrounding vehicle's dynamics at the time step (i), using the constant acceleration model.
x, y = v_e_x0 + Perc_err_x, v_e_y0 + Perc_err_y
theta, v_long, v_lat, v_long_dot, omega_r, wheel_anlge = V_e_theta_seq[i - Time_Perc_temp], V_e_v_long_seq[
i - Time_Perc_temp], V_e_v_lat_seq[i - Time_Perc_temp], V_e_a_seq[i - Time_Perc_temp], V_e_omega_r_seq[
i - Time_Perc_temp], V_e_wheel_anlge_seq[i - Time_Perc_temp]
# Define the vehicle parameters.
(l_1, l_2, m, I_z, h, r_wheel) = (1.421, 1.434, 2270, 4600, 0.647, 0.351)
(C_alhpa, F_x_max, F_y_1_max, F_y_2_max, mu_max, T_max, F_x_Tmax) = (
100000, 20000, 10400, 10600, 0.88, 3000, 3000 / 0.351)
x_list, y_list, theta_list, v_long_list, v_lat_list, v_long_dot_list, omega_r_list, wheel_anlge_list = [], [], [], [], [], [], [], []
x_list.append(x)
y_list.append(y)
theta_list.append(theta)
v_long_list.append(v_long)
v_lat_list.append(v_lat)
v_long_dot_list.append(v_long_dot)
omega_r_list.append(omega_r)
wheel_anlge_list.append(wheel_anlge)
# Update vehicle dynamics using the defined plane bicycle model.
time_step = 0.01
for time_i in range(int(1.99 / time_step) + Time_Perc_temp):
if v_long_dot < 0.1 and v_long < 0.02:
x_list.append(x)
y_list.append(y)
theta_list.append(theta)
v_long_list.append(v_long)
v_lat_list.append(v_lat)
v_long_dot_list.append(v_long_dot)
omega_r_list.append(omega_r)
wheel_anlge_list.append(wheel_anlge)
continue
beta = np.arctan(v_lat / v_long)
alpha_1 = - (beta + l_1 * omega_r / v_long - wheel_anlge)
alpha_2 = - (beta - l_1 * omega_r / v_long)
# Define the simplified linear vehicle tire model with saturation.
F_y_1 = np.min([C_alhpa * np.abs(alpha_1), C_alhpa *
|
np.deg2rad(8)
|
numpy.deg2rad
|
import numpy as np
from pandas import read_csv
from astropy import constants
from pkg_resources import resource_filename
from .likelihoods import lnL_TP, lnL_EB, lnL_EB_twin
from .priors import *
from .funcs import stellar_relations, flux_relation
np.seterr(divide='ignore')
Msun = constants.M_sun.cgs.value
Rsun = constants.R_sun.cgs.value
Rearth = constants.R_earth.cgs.value
G = constants.G.cgs.value
au = constants.au.cgs.value
pi = np.pi
ln2pi = np.log(2*pi)
# load limb darkening coefficients
LDC_FILE = resource_filename('triceratops', 'data/ldc.tsv')
ldc = read_csv(
LDC_FILE, sep='\t', skiprows=48, usecols=[0, 1, 2, 3, 4, 5]
)[2:]
ldc = ldc[(
(np.array(ldc.Teff, dtype=int) <= 10000)
& (np.array(ldc.logg, dtype=float) >= 3.5)
& (np.array(ldc.xi, dtype=float) == 2.0)
)]
ldc_Zs = np.array(ldc.Z, dtype=float)
ldc_Teffs = np.array(ldc.Teff, dtype=int)
ldc_loggs = np.array(ldc.logg, dtype=float)
ldc_u1s = np.array(ldc.aLSM, dtype=float)
ldc_u2s = np.array(ldc.bLSM, dtype=float)
def lnZ_TTP(time: np.ndarray, flux: np.ndarray, sigma: float,
P_orb: float, M_s: float, R_s: float, Teff: float,
Z: float, N: int = 1000000):
"""
Calculates the marginal likelihood of the TTP scenario.
Args:
time (numpy array): Time of each data point
[days from transit midpoint].
flux (numpy array): Normalized flux of each data point.
sigma (float): Normalized flux uncertainty.
P_orb (float): Orbital period [days].
M_s (float): Target star mass [Solar masses].
R_s (float): Target star radius [Solar radii].
Teff (float): Target star effective temperature [K].
Z (float): Target star metallicity [dex].
N (int): Number of draws for MC.
Returns:
res (dict): Best-fit properties and marginal likelihood.
"""
lnsigma = np.log(sigma)
a = ((G*M_s*Msun)/(4*pi**2)*(P_orb*86400)**2)**(1/3)
logg = np.log10(G*(M_s*Msun)/(R_s*Rsun)**2)
# determine target star limb darkening coefficients
this_Z = ldc_Zs[np.argmin(np.abs(ldc_Zs-Z))]
this_Teff = ldc_Teffs[np.argmin(np.abs(ldc_Teffs-Teff))]
this_logg = ldc_loggs[np.argmin(np.abs(ldc_loggs-logg))]
mask = (
(ldc_Zs == this_Z)
& (ldc_Teffs == this_Teff)
& (ldc_loggs == this_logg)
)
u1, u2 = ldc_u1s[mask], ldc_u2s[mask]
# calculate short-period planet prior for star of mass M_s
lnprior_Mstar = lnprior_Mstar_planet(np.array([M_s]))
# calculate orbital period prior
lnprior_Porb = lnprior_Porb_planet(P_orb)
# sample from R_p and inc prior distributions
rps = sample_rp(np.random.rand(N), np.full_like(N, M_s))
incs = sample_inc(np.random.rand(N))
# calculate transit probability for each instance
Ptra = (rps*Rearth + R_s*Rsun)/a
lnL = np.full(N, -np.inf)
for i in range(N):
if Ptra[i] <= 1:
inc_min = np.arccos(Ptra[i]) * 180/pi
else:
continue
if incs[i] >= inc_min:
lnL[i] = -0.5*ln2pi - lnsigma - lnL_TP(
time, flux, sigma, rps[i],
P_orb, incs[i], a, R_s, u1, u2
)
idx = lnL.argmax()
Z = np.mean(np.exp(lnL + lnprior_Mstar + lnprior_Porb))
lnZ = np.log(Z)
res = {
'M_s': M_s, 'R_s': R_s, 'u1': u1, 'u2': u2,
'P_orb': P_orb, 'inc': incs[idx], 'R_p': rps[idx],
'M_EB': 0, 'R_EB': 0, 'fluxratio_EB': 0,
'fluxratio_comp': 0, 'lnZ': lnZ
}
return res
def lnZ_TEB(time: np.ndarray, flux: np.ndarray, sigma: float,
P_orb: float, M_s: float, R_s: float, Teff: float,
Z: float, N: int = 1000000):
"""
Calculates the marginal likelihood of the TEB scenario.
Args:
time (numpy array): Time of each data point
[days from transit midpoint].
flux (numpy array): Normalized flux of each data point.
sigma (float): Normalized flux uncertainty.
P_orb (float): Orbital period [days].
M_s (float): Target star mass [Solar masses].
R_s (float): Target star radius [Solar radii].
Teff (float): Target star effective temperature [K].
Z (float): Target star metallicity [dex].
N (int): Number of draws for MC.
Returns:
res (dict): Best-fit properties and marginal likelihood.
res_twin (dict): Best-fit properties and marginal likelihood.
"""
lnsigma = np.log(sigma)
logg = np.log10(G*(M_s*Msun)/(R_s*Rsun)**2)
# determine target star limb darkening coefficients
this_Z = ldc_Zs[np.argmin(np.abs(ldc_Zs-Z))]
this_Teff = ldc_Teffs[np.argmin(np.abs(ldc_Teffs-Teff))]
this_logg = ldc_loggs[np.argmin(np.abs(ldc_loggs-logg))]
mask = (
(ldc_Zs == this_Z)
& (ldc_Teffs == this_Teff)
& (ldc_loggs == this_logg)
)
u1, u2 = ldc_u1s[mask], ldc_u2s[mask]
# sample from inc and q prior distributions
incs = sample_inc(np.random.rand(N))
qs = sample_q(np.random.rand(N))
# calculate properties of the drawn EBs
masses = qs*M_s
radii, Teffs = stellar_relations(
masses, np.full(N, R_s), np.full(N, Teff)
)
# calculate flux ratios in the TESS band
fluxratios = (
flux_relation(masses)
/ (flux_relation(masses) + flux_relation(np.array([M_s])))
)
# calculate short-period binary prior for star of mass M_s
lnprior_Mstar = lnprior_Mstar_binary(np.array([M_s]))
# calculate orbital period prior
lnprior_Porb = lnprior_Porb_binary(P_orb)
# calculate transit probability for each instance
a = ((G*(M_s+masses)*Msun)/(4*pi**2)*(P_orb*86400)**2)**(1/3)
Ptra = (radii*Rsun + R_s*Rsun)/a
a_twin = ((G*(M_s+masses)*Msun)/(4*pi**2)*(2*P_orb*86400)**2)**(1/3)
Ptra_twin = (radii*Rsun + R_s*Rsun)/a_twin
lnL = np.full(N, -np.inf)
lnL_twin = np.full(N, -np.inf)
for i in range(N):
# q < 0.95
if Ptra[i] <= 1:
inc_min = np.arccos(Ptra[i]) * 180/pi
else:
continue
if (incs[i] >= inc_min) & (qs[i] < 0.95):
lnL[i] = -0.5*ln2pi - lnsigma - lnL_EB(
time, flux, sigma, radii[i], fluxratios[i],
P_orb, incs[i], a[i], R_s, u1, u2
)
# q >= 0.95 and 2xP_orb
if Ptra_twin[i] <= 1:
inc_min = np.arccos(Ptra_twin[i]) * 180/pi
else:
continue
if (incs[i] >= inc_min) & (qs[i] >= 0.95):
lnL_twin[i] = -0.5*ln2pi - lnsigma - lnL_EB_twin(
time, flux, sigma, radii[i], fluxratios[i],
2*P_orb, incs[i], a_twin[i], R_s, u1, u2
)
# results for q < 0.95
idx = lnL.argmax()
Z = np.mean(np.exp(lnL + lnprior_Mstar + lnprior_Porb))
lnZ = np.log(Z)
res = {
'M_s': M_s, 'R_s': R_s, 'u1': u1, 'u2': u2,
'P_orb': P_orb, 'inc': incs[idx], 'R_p': 0,
'M_EB': masses[idx], 'R_EB': radii[idx],
'fluxratio_EB': fluxratios[idx],
'fluxratio_comp': 0, 'lnZ': lnZ
}
# results for q >= 0.95 and 2xP_orb
idx = lnL_twin.argmax()
Z = np.mean(np.exp(lnL_twin + lnprior_Mstar + lnprior_Porb))
lnZ = np.log(Z)
res_twin = {
'M_s': M_s, 'R_s': R_s, 'u1': u1, 'u2': u2,
'P_orb': 2*P_orb, 'inc': incs[idx], 'R_p': 0,
'M_EB': masses[idx], 'R_EB': radii[idx],
'fluxratio_EB': fluxratios[idx],
'fluxratio_comp': 0, 'lnZ': lnZ
}
return res, res_twin
def lnZ_PTP(time: np.ndarray, flux: np.ndarray, sigma: float,
P_orb: float, M_s: float, R_s: float, Teff: float,
Z: float, plx: float, contrast_curve_file: str = None,
N: int = 1000000):
"""
Calculates the marginal likelihood of the PTP scenario.
Args:
time (numpy array): Time of each data point
[days from transit midpoint].
flux (numpy array): Normalized flux of each data point.
sigma (float): Normalized flux uncertainty.
P_orb (float): Orbital period [days].
M_s (float): Target star mass [Solar masses].
R_s (float): Target star radius [Solar radii].
Teff (float): Target star effective temperature [K].
Z (float): Target star metallicity [dex].
plx (float): Target star parallax [mas].
contrast_curve_file (string): Path to contrast curve file.
N (int): Number of draws for MC.
Returns:
res (dict): Best-fit properties and marginal likelihood.
"""
lnsigma = np.log(sigma)
a = ((G*M_s*Msun)/(4*pi**2)*(P_orb*86400)**2)**(1/3)
logg = np.log10(G*(M_s*Msun)/(R_s*Rsun)**2)
# determine target star limb darkening coefficients
this_Z = ldc_Zs[np.argmin(np.abs(ldc_Zs-Z))]
this_Teff = ldc_Teffs[np.argmin(np.abs(ldc_Teffs-Teff))]
this_logg = ldc_loggs[np.argmin(np.abs(ldc_loggs-logg))]
mask = (
(ldc_Zs == this_Z)
& (ldc_Teffs == this_Teff)
& (ldc_loggs == this_logg)
)
u1, u2 = ldc_u1s[mask], ldc_u2s[mask]
# sample from q prior distributions
qs_comp = sample_q_companion(np.random.rand(N))
# calculate properties of the drawn companions
masses_comp = qs_comp*M_s
radii_comp, Teffs_comp = stellar_relations(
masses_comp, np.full(N, R_s), np.full(N, Teff)
)
# calculate flux ratios in the TESS band
fluxratios_comp = (
flux_relation(masses_comp)
/ (flux_relation(masses_comp) + flux_relation(np.array([M_s])))
)
# calculate priors for companions
delta_mags = 2.5*np.log10(fluxratios_comp/(1-fluxratios_comp))
if contrast_curve_file is None:
lnprior_companion = lnprior_bound(
M_s, plx, np.abs(delta_mags),
np.array([2.2]), np.array([1.0])
)
lnprior_companion[lnprior_companion > 0.0] = 0.0
lnprior_companion[delta_mags > 0.0] = -np.inf
else:
separations, contrasts = file_to_contrast_curve(
contrast_curve_file
)
lnprior_companion = lnprior_bound(
M_s, plx, np.abs(delta_mags), separations, contrasts
)
lnprior_companion[lnprior_companion > 0.0] = 0.0
lnprior_companion[delta_mags > 0.0] = -np.inf
# calculate short-period planet prior for star of mass M_s
lnprior_Mstar = lnprior_Mstar_planet(np.array([M_s]))
# calculate orbital period prior
lnprior_Porb = lnprior_Porb_planet(P_orb)
# sample from R_p and inc prior distributions
rps = sample_rp(np.random.rand(N), np.full_like(N, M_s))
incs = sample_inc(np.random.rand(N))
# calculate transit probability for each instance
Ptra = (rps*Rearth + R_s*Rsun)/a
lnL = np.full(N, -np.inf)
for i in range(N):
if Ptra[i] <= 1:
inc_min = np.arccos(Ptra[i]) * 180/pi
else:
continue
if incs[i] >= inc_min:
lnL[i] = -0.5*ln2pi - lnsigma - lnL_TP(
time, flux, sigma, rps[i],
P_orb, incs[i], a, R_s, u1, u2,
companion_fluxratio=fluxratios_comp[i],
companion_is_host=False
)
idx = lnL.argmax()
Z = np.mean(
np.exp(lnL + lnprior_companion + lnprior_Mstar + lnprior_Porb)
)
lnZ = np.log(Z)
res = {
'M_s': M_s, 'R_s': R_s, 'u1': u1, 'u2': u2,
'P_orb': P_orb, 'inc': incs[idx], 'R_p': rps[idx],
'M_EB': 0, 'R_EB': 0, 'fluxratio_EB': 0,
'fluxratio_comp': fluxratios_comp[idx], 'lnZ': lnZ
}
return res
def lnZ_PEB(time: np.ndarray, flux: np.ndarray, sigma: float,
P_orb: float, M_s: float, R_s: float, Teff: float,
Z: float, plx: float, contrast_curve_file: str = None,
N: int = 1000000):
"""
Calculates the marginal likelihood of the PEB scenario.
Args:
time (numpy array): Time of each data point
[days from transit midpoint].
flux (numpy array): Normalized flux of each data point.
sigma (float): Normalized flux uncertainty.
P_orb (float): Orbital period [days].
M_s (float): Target star mass [Solar masses].
R_s (float): Target star radius [Solar radii].
Teff (float): Target star effective temperature [K].
Z (float): Target star metallicity [dex].
plx (float): Target star parallax [mas].
contrast_curve_file (string): Path to contrast curve file.
N (int): Number of draws for MC.
Returns:
res (dict): Best-fit properties and marginal likelihood.
res_twin (dict): Best-fit properties and marginal likelihood.
"""
lnsigma = np.log(sigma)
logg = np.log10(G*(M_s*Msun)/(R_s*Rsun)**2)
# determine target star limb darkening coefficients
this_Z = ldc_Zs[np.argmin(np.abs(ldc_Zs-Z))]
this_Teff = ldc_Teffs[np.argmin(np.abs(ldc_Teffs-Teff))]
this_logg = ldc_loggs[np.argmin(np.abs(ldc_loggs-logg))]
mask = (
(ldc_Zs == this_Z)
& (ldc_Teffs == this_Teff)
& (ldc_loggs == this_logg)
)
u1, u2 = ldc_u1s[mask], ldc_u2s[mask]
# sample from inc and q prior distributions
incs = sample_inc(np.random.rand(N))
qs = sample_q(np.random.rand(N))
qs_comp = sample_q_companion(np.random.rand(N))
# calculate properties of the drawn EBs
masses = qs*M_s
radii, Teffs = stellar_relations(
masses, np.full(N, R_s), np.full(N, Teff)
)
# calculate flux ratios in the TESS band
fluxratios = (
flux_relation(masses)
/ (flux_relation(masses) + flux_relation(np.array([M_s])))
)
# calculate properties of the drawn companions
masses_comp = qs_comp*M_s
radii_comp, Teffs_comp = stellar_relations(
masses_comp, np.full(N, R_s), np.full(N, Teff)
)
# calculate flux ratios in the TESS band
fluxratios_comp = (
flux_relation(masses_comp)
/ (flux_relation(masses_comp) + flux_relation(np.array([M_s])))
)
# calculate priors for companions
delta_mags = 2.5*np.log10(fluxratios_comp/(1-fluxratios_comp))
if contrast_curve_file is None:
lnprior_companion = lnprior_bound(
M_s, plx, np.abs(delta_mags),
np.array([2.2]), np.array([1.0])
)
lnprior_companion[lnprior_companion > 0.0] = 0.0
lnprior_companion[delta_mags > 0.0] = -np.inf
else:
separations, contrasts = file_to_contrast_curve(
contrast_curve_file
)
lnprior_companion = lnprior_bound(
M_s, plx, np.abs(delta_mags), separations, contrasts
)
lnprior_companion[lnprior_companion > 0.0] = 0.0
lnprior_companion[delta_mags > 0.0] = -np.inf
# calculate short-period binary prior for star of mass M_s
lnprior_Mstar = lnprior_Mstar_binary(np.array([M_s]))
# calculate orbital period prior
lnprior_Porb = lnprior_Porb_binary(P_orb)
# calculate transit probability for each instance
a = ((G*(M_s+masses)*Msun)/(4*pi**2)*(P_orb*86400)**2)**(1/3)
Ptra = (radii*Rsun + R_s*Rsun)/a
a_twin = ((G*(M_s+masses)*Msun)/(4*pi**2)*(2*P_orb*86400)**2)**(1/3)
Ptra_twin = (radii*Rsun + R_s*Rsun)/a_twin
lnL = np.full(N, -np.inf)
lnL_twin = np.full(N, -np.inf)
for i in range(N):
# q < 0.95
if Ptra[i] <= 1:
inc_min = np.arccos(Ptra[i]) * 180/pi
else:
continue
if (incs[i] >= inc_min) & (qs[i] < 0.95):
lnL[i] = -0.5*ln2pi - lnsigma - lnL_EB(
time, flux, sigma, radii[i], fluxratios[i],
P_orb, incs[i], a[i], R_s, u1, u2,
companion_fluxratio=fluxratios_comp[i],
companion_is_host=False
)
# q >= 0.95 and 2xP_orb
if Ptra_twin[i] <= 1:
inc_min = np.arccos(Ptra_twin[i]) * 180/pi
else:
continue
if (incs[i] >= inc_min) & (qs[i] >= 0.95):
lnL_twin[i] = -0.5*ln2pi - lnsigma - lnL_EB_twin(
time, flux, sigma, radii[i], fluxratios[i],
2*P_orb, incs[i], a_twin[i], R_s, u1, u2,
companion_fluxratio=fluxratios_comp[i],
companion_is_host=False
)
# results for q < 0.95
idx = lnL.argmax()
Z = np.mean(
np.exp(lnL + lnprior_companion + lnprior_Mstar + lnprior_Porb)
)
lnZ = np.log(Z)
res = {
'M_s': M_s, 'R_s': R_s, 'u1': u1, 'u2': u2,
'P_orb': P_orb, 'inc': incs[idx], 'R_p': 0,
'M_EB': masses[idx], 'R_EB': radii[idx],
'fluxratio_EB': fluxratios[idx],
'fluxratio_comp': fluxratios_comp[idx], 'lnZ': lnZ
}
# results for q >= 0.95 and 2xP_orb
idx = lnL_twin.argmax()
Z = np.mean(
np.exp(lnL_twin+lnprior_companion+lnprior_Mstar+lnprior_Porb)
)
lnZ = np.log(Z)
res_twin = {
'M_s': M_s, 'R_s': R_s, 'u1': u1, 'u2': u2,
'P_orb': 2*P_orb, 'inc': incs[idx], 'R_p': 0,
'M_EB': masses[idx], 'R_EB': radii[idx],
'fluxratio_EB': fluxratios[idx],
'fluxratio_comp': fluxratios_comp[idx], 'lnZ': lnZ
}
return res, res_twin
def lnZ_STP(time: np.ndarray, flux: np.ndarray, sigma: float,
P_orb: float, M_s: float, R_s: float, Teff: float, Z: float,
plx: float, contrast_curve_file: str = None,
N: int = 1000000):
"""
Calculates the marginal likelihood of the STP scenario.
Args:
time (numpy array): Time of each data point
[days from transit midpoint].
flux (numpy array): Normalized flux of each data point.
sigma (float): Normalized flux uncertainty.
P_orb (float): Orbital period [days].
M_s (float): Target star mass [Solar masses].
R_s (float): Target star radius [Solar radii].
Teff (float): Target star effective temperature [K].
Z (float): Target star metallicity [dex].
plx (float): Target star parallax [mas].
contrast_curve_file (string): contrast curve file.
N (int): Number of draws for MC.
Returns:
res (dict): Best-fit properties and marginal likelihood.
res_twin (dict): Best-fit properties and marginal likelihood.
"""
lnsigma = np.log(sigma)
# sample from q prior distribution
qs_comp = sample_q_companion(np.random.rand(N))
# calculate properties of the drawn companions
masses_comp = qs_comp*M_s
radii_comp, Teffs_comp = stellar_relations(
masses_comp, np.full(N, R_s), np.full(N, Teff)
)
loggs_comp = np.log10(G*(masses_comp*Msun)/(radii_comp*Rsun)**2)
# calculate flux ratios in the TESS band
fluxratios_comp = (
flux_relation(masses_comp)
/ (flux_relation(masses_comp) + flux_relation(np.array([M_s])))
)
# calculate limb darkening ceofficients for companions
ldc_at_Z = ldc[(ldc_Zs == ldc_Zs[np.abs(ldc_Zs - Z).argmin()])]
Teffs_at_Z = np.array(ldc_at_Z.Teff, dtype=int)
loggs_at_Z = np.array(ldc_at_Z.logg, dtype=float)
u1s_at_Z = np.array(ldc_at_Z.aLSM, dtype=float)
u2s_at_Z = np.array(ldc_at_Z.bLSM, dtype=float)
rounded_loggs_comp = np.round(loggs_comp/0.5) * 0.5
rounded_loggs_comp[rounded_loggs_comp < 3.5] = 3.5
rounded_loggs_comp[rounded_loggs_comp > 5.0] = 5.0
rounded_Teffs_comp = np.round(Teffs_comp/250) * 250
rounded_Teffs_comp[rounded_Teffs_comp < 3500] = 3500
rounded_Teffs_comp[rounded_Teffs_comp > 10000] = 10000
u1s_comp, u2s_comp = np.zeros(N), np.zeros(N)
for i, (comp_Teff, comp_logg) in enumerate(
zip(rounded_Teffs_comp, rounded_loggs_comp)
):
mask = (Teffs_at_Z == comp_Teff) & (loggs_at_Z == comp_logg)
u1s_comp[i], u2s_comp[i] = u1s_at_Z[mask], u2s_at_Z[mask]
# calculate priors for companions
delta_mags = 2.5*np.log10(fluxratios_comp/(1-fluxratios_comp))
if contrast_curve_file is None:
lnprior_companion = lnprior_bound(
M_s, plx, np.abs(delta_mags),
np.array([2.2]), np.array([1.0])
)
lnprior_companion[lnprior_companion > 0.0] = 0.0
lnprior_companion[delta_mags > 0.0] = -np.inf
else:
separations, contrasts = file_to_contrast_curve(
contrast_curve_file
)
lnprior_companion = lnprior_bound(
M_s, plx, np.abs(delta_mags), separations, contrasts
)
lnprior_companion[lnprior_companion > 0.0] = 0.0
lnprior_companion[delta_mags > 0.0] = -np.inf
# calculate short-period planet prior for stars
# with masses masses_comp
lnprior_Mstar = lnprior_Mstar_planet(masses_comp)
# calculate orbital period prior
lnprior_Porb = lnprior_Porb_planet(P_orb)
# sample from R_p and inc prior distributions
rps = sample_rp(np.random.rand(N), masses_comp)
incs = sample_inc(np.random.rand(N))
# calculate transit probability for each instance
a = ((G*masses_comp*Msun)/(4*pi**2)*(P_orb*86400)**2)**(1/3)
Ptra = (rps*Rearth + radii_comp*Rsun)/a
lnL = np.full(N, -np.inf)
for i in range(N):
if Ptra[i] <= 1:
inc_min = np.arccos(Ptra[i]) * 180/pi
else:
continue
if incs[i] >= inc_min:
lnL[i] = -0.5*ln2pi - lnsigma - lnL_TP(
time, flux, sigma, rps[i],
P_orb, incs[i], a[i], radii_comp[i],
u1s_comp[i], u2s_comp[i],
companion_fluxratio=fluxratios_comp[i],
companion_is_host=True
)
idx = lnL.argmax()
Z = np.mean(
np.exp(lnL + lnprior_companion + lnprior_Mstar + lnprior_Porb)
)
lnZ = np.log(Z)
res = {
'M_s': masses_comp[idx], 'R_s': radii_comp[idx],
'u1': u1s_comp[idx], 'u2': u2s_comp[idx],
'P_orb': P_orb, 'inc': incs[idx], 'R_p': rps[idx],
'M_EB': 0, 'R_EB': 0, 'fluxratio_EB': 0,
'fluxratio_comp': fluxratios_comp[idx], 'lnZ': lnZ
}
return res
def lnZ_SEB(time: np.ndarray, flux: np.ndarray, sigma: float,
P_orb: float, M_s: float, R_s: float, Teff: float,
Z: float, plx: float, contrast_curve_file: str = None,
N: int = 1000000):
"""
Calculates the marginal likelihood of the SEB scenario.
Args:
time (numpy array): Time of each data point
[days from transit midpoint].
flux (numpy array): Normalized flux of each data point.
sigma (float): Normalized flux uncertainty.
P_orb (float): Orbital period [days].
M_s (float): Target star mass [Solar masses].
R_s (float): Target star radius [Solar radii].
Teff (float): Target star effective temperature [K].
Z (float): Target star metallicity [dex].
plx (float): Target star parallax [mas].
contrast_curve_file (string): Path to contrast curve file.
N (int): Number of draws for MC.
Returns:
res (dict): Best-fit properties and marginal likelihood.
res_twin (dict): Best-fit properties and marginal likelihood.
"""
lnsigma = np.log(sigma)
# sample from inc and q prior distributions
incs = sample_inc(np.random.rand(N))
qs = sample_q(np.random.rand(N))
qs_comp = sample_q_companion(np.random.rand(N))
# calculate properties of the drawn companions
masses_comp = qs_comp*M_s
radii_comp, Teffs_comp = stellar_relations(
masses_comp, np.full(N, R_s), np.full(N, Teff)
)
loggs_comp = np.log10(G*(masses_comp*Msun)/(radii_comp*Rsun)**2)
# calculate flux ratios in the TESS band
fluxratios_comp = (
flux_relation(masses_comp)
/ (flux_relation(masses_comp) + flux_relation(np.array([M_s])))
)
# calculate limb darkening ceofficients for companions
ldc_at_Z = ldc[(ldc_Zs == ldc_Zs[np.abs(ldc_Zs - Z).argmin()])]
Teffs_at_Z = np.array(ldc_at_Z.Teff, dtype=int)
loggs_at_Z = np.array(ldc_at_Z.logg, dtype=float)
u1s_at_Z = np.array(ldc_at_Z.aLSM, dtype=float)
u2s_at_Z = np.array(ldc_at_Z.bLSM, dtype=float)
rounded_loggs_comp = np.round(loggs_comp/0.5) * 0.5
rounded_loggs_comp[rounded_loggs_comp < 3.5] = 3.5
rounded_loggs_comp[rounded_loggs_comp > 5.0] = 5.0
rounded_Teffs_comp = np.round(Teffs_comp/250) * 250
rounded_Teffs_comp[rounded_Teffs_comp < 3500] = 3500
rounded_Teffs_comp[rounded_Teffs_comp > 13000] = 13000
u1s_comp, u2s_comp = np.zeros(N), np.zeros(N)
for i, (comp_Teff, comp_logg) in enumerate(
zip(rounded_Teffs_comp, rounded_loggs_comp)
):
mask = (Teffs_at_Z == comp_Teff) & (loggs_at_Z == comp_logg)
u1s_comp[i], u2s_comp[i] = u1s_at_Z[mask], u2s_at_Z[mask]
# calculate properties of the drawn EBs
masses = qs*masses_comp
radii, Teffs = stellar_relations(masses, radii_comp, Teffs_comp)
# calculate flux ratios in the TESS band
fluxratios = (
flux_relation(masses)
/ (flux_relation(masses) + flux_relation(np.array([M_s])))
)
# calculate priors for companions
delta_mags = 2.5*np.log10(
(fluxratios_comp/(1-fluxratios_comp))
+ (fluxratios/(1-fluxratios))
)
if contrast_curve_file is None:
lnprior_companion = lnprior_bound(
M_s, plx, np.abs(delta_mags),
np.array([2.2]), np.array([1.0])
)
lnprior_companion[lnprior_companion > 0.0] = 0.0
lnprior_companion[delta_mags > 0.0] = -np.inf
else:
separations, contrasts = file_to_contrast_curve(
contrast_curve_file
)
lnprior_companion = lnprior_bound(
M_s, plx, np.abs(delta_mags), separations, contrasts
)
lnprior_companion[lnprior_companion > 0.0] = 0.0
lnprior_companion[delta_mags > 0.0] = -np.inf
# calculate short-period binary prior for stars
# with masses masses_comp
lnprior_Mstar = lnprior_Mstar_binary(masses_comp)
# calculate orbital period prior
lnprior_Porb = lnprior_Porb_binary(P_orb)
# calculate transit probability for each instance
a = (
(G*(masses_comp+masses)*Msun)/(4*pi**2)*(P_orb*86400)**2
)**(1/3)
Ptra = (radii*Rsun + radii_comp*Rsun)/a
a_twin = (
(G*(masses_comp+masses)*Msun)/(4*pi**2)*(2*P_orb*86400)**2
)**(1/3)
Ptra_twin = (radii*Rsun + radii_comp*Rsun)/a_twin
lnL = np.full(N, -np.inf)
lnL_twin = np.full(N, -np.inf)
for i in range(N):
# q < 0.95
if Ptra[i] <= 1:
inc_min = np.arccos(Ptra[i]) * 180/pi
else:
continue
if (incs[i] >= inc_min) & (qs[i] < 0.95):
lnL[i] = -0.5*ln2pi - lnsigma - lnL_EB(
time, flux, sigma, radii[i], fluxratios[i],
P_orb, incs[i], a[i], radii_comp[i],
u1s_comp[i], u2s_comp[i],
companion_fluxratio=fluxratios_comp[i],
companion_is_host=True
)
# q >= 0.95 and 2xP_orb
if Ptra_twin[i] <= 1:
inc_min = np.arccos(Ptra_twin[i]) * 180/pi
else:
continue
if (incs[i] >= inc_min) & (qs[i] >= 0.95):
lnL_twin[i] = -0.5*ln2pi - lnsigma - lnL_EB_twin(
time, flux, sigma, radii[i], fluxratios[i],
2*P_orb, incs[i], a_twin[i], radii_comp[i],
u1s_comp[i], u2s_comp[i],
companion_fluxratio=fluxratios_comp[i],
companion_is_host=True
)
# results for q < 0.95
idx = lnL.argmax()
Z = np.mean(
np.exp(lnL + lnprior_companion + lnprior_Mstar + lnprior_Porb)
)
lnZ = np.log(Z)
res = {
'M_s': masses_comp[idx], 'R_s': radii_comp[idx],
'u1': u1s_comp[idx], 'u2': u2s_comp[idx],
'P_orb': P_orb, 'inc': incs[idx], 'R_p': 0,
'M_EB': masses[idx], 'R_EB': radii[idx],
'fluxratio_EB': fluxratios[idx],
'fluxratio_comp': fluxratios_comp[idx], 'lnZ': lnZ
}
# results for q >= 0.95 and 2xP_orb
idx = lnL_twin.argmax()
Z = np.mean(
np.exp(lnL_twin+lnprior_companion+lnprior_Mstar+lnprior_Porb)
)
lnZ = np.log(Z)
res_twin = {
'M_s': masses_comp[idx], 'R_s': radii_comp[idx],
'u1': u1s_comp[idx], 'u2': u2s_comp[idx],
'P_orb': 2*P_orb, 'inc': incs[idx], 'R_p': 0,
'M_EB': masses[idx], 'R_EB': radii[idx],
'fluxratio_EB': fluxratios[idx],
'fluxratio_comp': fluxratios_comp[idx], 'lnZ': lnZ
}
return res, res_twin
def lnZ_DTP(time: np.ndarray, flux: np.ndarray, sigma: float,
P_orb: float, M_s: float, R_s: float, Teff: float,
Z: float, Tmag: float, output_url: str,
contrast_curve_file: str = None, N: int = 1000000):
"""
Calculates the marginal likelihood of the DTP scenario.
Args:
time (numpy array): Time of each data point
[days from transit midpoint].
flux (numpy array): Normalized flux of each data point.
sigma (float): Normalized flux uncertainty.
P_orb (float): Orbital period [days].
M_s (float): Target star mass [Solar masses].
R_s (float): Target star radius [Solar radii].
Teff (float): Target star effective temperature [K].
Z (float): Target star metallicity [dex].
Tmag (float): Target star TESS magnitude.
output_url (string): Link to trilegal query results.
contrast_curve_file (string): Contrast curve file.
N (int): Number of draws for MC.
Returns:
res (dict): Best-fit properties and marginal likelihood.
"""
lnsigma = np.log(sigma)
a = ((G*M_s*Msun)/(4*pi**2)*(P_orb*86400)**2)**(1/3)
logg = np.log10(G*(M_s*Msun)/(R_s*Rsun)**2)
# determine target star limb darkening coefficients
this_Z = ldc_Zs[np.argmin(np.abs(ldc_Zs-Z))]
this_Teff = ldc_Teffs[np.argmin(np.abs(ldc_Teffs-Teff))]
this_logg = ldc_loggs[np.argmin(np.abs(ldc_loggs-logg))]
mask = (
(ldc_Zs == this_Z)
& (ldc_Teffs == this_Teff)
& (ldc_loggs == this_logg)
)
u1, u2 = ldc_u1s[mask], ldc_u2s[mask]
# determine background star population properties
Tmags_comp, masses_comp, loggs_comp, Teffs_comp, Zs_comp = (
trilegal_results(output_url, Tmag)
)
delta_mags = Tmag - Tmags_comp
fluxratios_comp = 10**(delta_mags/2.5) / (1 + 10**(delta_mags/2.5))
N_comp = Tmags_comp.shape[0]
# draw random sample of background stars
idxs = np.random.randint(0, N_comp-1, N)
# calculate priors for companions
delta_mags = 2.5*np.log10(
fluxratios_comp[idxs]/(1-fluxratios_comp[idxs])
)
if contrast_curve_file is None:
lnprior_companion = np.full(
N, np.log10((N_comp/0.1) * (1/3600)**2 * 2.2**2)
)
lnprior_companion[lnprior_companion > 0.0] = 0.0
lnprior_companion[delta_mags > 0.0] = -np.inf
else:
separations, contrasts = file_to_contrast_curve(
contrast_curve_file
)
lnprior_companion = lnprior_background(
N_comp, np.abs(delta_mags), separations, contrasts
)
lnprior_companion[lnprior_companion > 0.0] = 0.0
lnprior_companion[delta_mags > 0.0] = -np.inf
# calculate short-period planet prior for star of mass M_s
lnprior_Mstar = lnprior_Mstar_planet(np.array([M_s]))
# calculate orbital period prior
lnprior_Porb = lnprior_Porb_planet(P_orb)
# sample from R_p and inc prior distributions
rps = sample_rp(np.random.rand(N), np.full_like(N, M_s))
incs = sample_inc(np.random.rand(N))
# calculate transit probability for each instance
Ptra = (rps*Rearth + R_s*Rsun)/a
lnL = np.full(N, -np.inf)
for i in range(N):
if Ptra[i] <= 1:
inc_min = np.arccos(Ptra[i]) * 180/pi
else:
continue
if incs[i] >= inc_min:
lnL[i] = -0.5*ln2pi - lnsigma - lnL_TP(
time, flux, sigma, rps[i],
P_orb, incs[i], a, R_s, u1, u2,
companion_fluxratio=fluxratios_comp[idxs[i]],
companion_is_host=False
)
idx = lnL.argmax()
Z = np.mean(
np.exp(lnL + lnprior_companion + lnprior_Mstar + lnprior_Porb)
)
lnZ = np.log(Z)
res = {
'M_s': M_s, 'R_s': R_s, 'u1': u1, 'u2': u2,
'P_orb': P_orb, 'inc': incs[idx], 'R_p': rps[idx],
'M_EB': 0, 'R_EB': 0, 'fluxratio_EB': 0,
'fluxratio_comp': fluxratios_comp[idxs[idx]], 'lnZ': lnZ
}
return res
def lnZ_DEB(time: np.ndarray, flux: np.ndarray, sigma: float,
P_orb: float, M_s: float, R_s: float, Teff: float,
Z: float, Tmag: float, output_url: str,
contrast_curve_file: str = None, N: int = 1000000):
"""
Calculates the marginal likelihood of the DEB scenario.
Args:
time (numpy array): Time of each data point
[days from transit midpoint].
flux (numpy array): Normalized flux of each data point.
sigma (float): Normalized flux uncertainty.
P_orb (float): Orbital period [days].
M_s (float): Target star mass [Solar masses].
R_s (float): Target star radius [Solar radii].
Teff (float): Target star effective temperature [K].
Z (float): Target star metallicity [dex].
Tmag (float): Target star TESS magnitude.
output_url (string): Link to trilegal query results.
contrast_curve_file (string): Path to contrast curve file.
N (int): Number of draws for MC.
Returns:
res (dict): Best-fit properties and marginal likelihood.
res_twin (dict): Best-fit properties and marginal likelihood.
"""
lnsigma = np.log(sigma)
logg = np.log10(G*(M_s*Msun)/(R_s*Rsun)**2)
# determine target star limb darkening coefficients
this_Z = ldc_Zs[np.argmin(np.abs(ldc_Zs-Z))]
this_Teff = ldc_Teffs[np.argmin(np.abs(ldc_Teffs-Teff))]
this_logg = ldc_loggs[np.argmin(np.abs(ldc_loggs-logg))]
mask = (
(ldc_Zs == this_Z)
& (ldc_Teffs == this_Teff)
& (ldc_loggs == this_logg)
)
u1, u2 = ldc_u1s[mask], ldc_u2s[mask]
# sample from inc and q prior distributions
incs = sample_inc(np.random.rand(N))
qs = sample_q(np.random.rand(N))
# calculate properties of the drawn EBs
masses = qs*M_s
radii, Teffs = stellar_relations(
masses, np.full(N, R_s), np.full(N, Teff)
)
# calculate flux ratios in the TESS band
fluxratios = (
flux_relation(masses)
/ (flux_relation(masses) + flux_relation(np.array([M_s])))
)
# determine background star population properties
Tmags_comp, masses_comp, loggs_comp, Teffs_comp, Zs_comp = (
trilegal_results(output_url, Tmag)
)
delta_mags = Tmag - Tmags_comp
fluxratios_comp = 10**(delta_mags/2.5) / (1 + 10**(delta_mags/2.5))
N_comp = Tmags_comp.shape[0]
# draw random sample of background stars
idxs = np.random.randint(0, N_comp-1, N)
# calculate priors for companions
delta_mags = 2.5*np.log10(
fluxratios_comp[idxs]/(1-fluxratios_comp[idxs])
)
if contrast_curve_file is None:
lnprior_companion = np.full(
N, np.log10((N_comp/0.1) * (1/3600)**2 * 2.2**2)
)
lnprior_companion[lnprior_companion > 0.0] = 0.0
lnprior_companion[delta_mags > 0.0] = -np.inf
else:
separations, contrasts = file_to_contrast_curve(
contrast_curve_file
)
lnprior_companion = lnprior_background(
N_comp, np.abs(delta_mags), separations, contrasts
)
lnprior_companion[lnprior_companion > 0.0] = 0.0
lnprior_companion[delta_mags > 0.0] = -np.inf
# calculate short-period binary prior for star of mass M_s
lnprior_Mstar = lnprior_Mstar_binary(np.array([M_s]))
# calculate orbital period prior
lnprior_Porb = lnprior_Porb_binary(P_orb)
# calculate transit probability for each instance
a = ((G*(M_s+masses)*Msun)/(4*pi**2)*(P_orb*86400)**2)**(1/3)
Ptra = (radii*Rsun + R_s*Rsun)/a
a_twin = ((G*(M_s+masses)*Msun)/(4*pi**2)*(2*P_orb*86400)**2)**(1/3)
Ptra_twin = (radii*Rsun + R_s*Rsun)/a_twin
lnL = np.full(N, -np.inf)
lnL_twin = np.full(N, -np.inf)
for i in range(N):
# q < 0.95
if Ptra[i] <= 1:
inc_min = np.arccos(Ptra[i]) * 180/pi
else:
continue
if (incs[i] >= inc_min) & (qs[i] < 0.95):
lnL[i] = -0.5*ln2pi - lnsigma - lnL_EB(
time, flux, sigma, radii[i], fluxratios[i],
P_orb, incs[i], a[i], R_s, u1, u2,
companion_fluxratio=fluxratios_comp[idxs[i]],
companion_is_host=False
)
# q >= 0.95 and 2xP_orb
if Ptra_twin[i] <= 1:
inc_min = np.arccos(Ptra_twin[i]) * 180/pi
else:
continue
if (incs[i] >= inc_min) & (qs[i] >= 0.95):
lnL_twin[i] = -0.5*ln2pi - lnsigma - lnL_EB_twin(
time, flux, sigma, radii[i], fluxratios[i],
2*P_orb, incs[i], a_twin[i], R_s, u1, u2,
companion_fluxratio=fluxratios_comp[idxs[i]],
companion_is_host=False
)
# results for q < 0.95
idx = lnL.argmax()
Z = np.mean(
np.exp(lnL + lnprior_companion + lnprior_Mstar + lnprior_Porb)
)
lnZ = np.log(Z)
res = {
'M_s': M_s, 'R_s': R_s, 'u1': u1, 'u2': u2,
'P_orb': P_orb, 'inc': incs[idx], 'R_p': 0,
'M_EB': masses[idx], 'R_EB': radii[idx],
'fluxratio_EB': fluxratios[idx],
'fluxratio_comp': fluxratios_comp[idxs[idx]], 'lnZ': lnZ
}
# results for q >= 0.95 and 2xP_orb
idx = lnL_twin.argmax()
Z = np.mean(
np.exp(lnL_twin+lnprior_companion+lnprior_Mstar+lnprior_Porb)
)
lnZ = np.log(Z)
res_twin = {
'M_s': M_s, 'R_s': R_s, 'u1': u1, 'u2': u2,
'P_orb': 2*P_orb, 'inc': incs[idx], 'R_p': 0,
'M_EB': masses[idx], 'R_EB': radii[idx],
'fluxratio_EB': fluxratios[idx],
'fluxratio_comp': fluxratios_comp[idxs[idx]], 'lnZ': lnZ
}
return res, res_twin
def lnZ_BTP(time: np.ndarray, flux: np.ndarray, sigma: float,
P_orb: float, M_s: float, R_s: float, Teff: float,
Tmag: float, output_url: str,
contrast_curve_file: str = None, N: int = 1000000):
"""
Calculates the marginal likelihood of the BTP scenario.
Args:
time (numpy array): Time of each data point
[days from transit midpoint].
flux (numpy array): Normalized flux of each data point.
sigma (float): Normalized flux uncertainty.
P_orb (float): Orbital period [days].
M_s (float): Target star mass [Solar masses].
R_s (float): Target star radius [Solar radii].
Teff (float): Target star effective temperature [K].
Tmag (float): Target star TESS magnitude.
output_url (string): Link to trilegal query results.
contrast_curve_file (string): Path to contrast curve file.
N (int): Number of draws for MC.
Returns:
res (dict): Best-fit properties and marginal likelihood.
"""
lnsigma = np.log(sigma)
# determine background star population properties
Tmags_comp, masses_comp, loggs_comp, Teffs_comp, Zs_comp = (
trilegal_results(output_url, Tmag)
)
radii_comp = np.sqrt(G*masses_comp*Msun / 10**loggs_comp) / Rsun
delta_mags = Tmag - Tmags_comp
fluxratios_comp = 10**(delta_mags/2.5) / (1 + 10**(delta_mags/2.5))
N_comp = Tmags_comp.shape[0]
# determine limb darkening coefficients of background stars
u1s_comp, u2s_comp = np.zeros(N_comp), np.zeros(N_comp)
for i in range(N_comp):
this_Teff = ldc_Teffs[np.argmin(np.abs(ldc_Teffs-Teffs_comp[i]))]
this_logg = ldc_loggs[np.argmin(np.abs(ldc_loggs-loggs_comp[i]))]
mask1 = (ldc_Teffs == this_Teff) & (ldc_loggs == this_logg)
these_Zs = ldc_Zs[mask1]
this_Z = these_Zs[np.argmin(np.abs(these_Zs-Zs_comp[i]))]
mask = (
(ldc_Zs == this_Z)
& (ldc_Teffs == this_Teff)
& (ldc_loggs == this_logg)
)
u1s_comp[i], u2s_comp[i] = ldc_u1s[mask], ldc_u2s[mask]
# draw random sample of background stars
idxs = np.random.randint(0, N_comp, N)
# calculate priors for companions
delta_mags = 2.5*np.log10(
fluxratios_comp[idxs]/(1-fluxratios_comp[idxs])
)
if contrast_curve_file is None:
lnprior_companion = np.full(
N, np.log10((N_comp/0.1) * (1/3600)**2 * 2.2**2)
)
lnprior_companion[lnprior_companion > 0.0] = 0.0
lnprior_companion[delta_mags > 0.0] = -np.inf
else:
separations, contrasts = file_to_contrast_curve(
contrast_curve_file
)
lnprior_companion = lnprior_background(
N_comp, np.abs(delta_mags), separations, contrasts
)
lnprior_companion[lnprior_companion > 0.0] = 0.0
lnprior_companion[delta_mags > 0.0] = -np.inf
# calculate short-prior planet prior for stars of masses masses_comp
lnprior_Mstar = lnprior_Mstar_planet(masses_comp[idxs])
# calculate orbital period prior
lnprior_Porb = lnprior_Porb_planet(P_orb)
# sample from inc and R_p prior distributions
rps = sample_rp(np.random.rand(N), masses_comp[idxs])
incs = sample_inc(np.random.rand(N))
# calculate transit probability for each instance
a = ((G*masses_comp[idxs]*Msun)/(4*pi**2)*(P_orb*86400)**2)**(1/3)
Ptra = (rps*Rearth + radii_comp[idxs[i]]*Rsun)/a
lnL = np.full(N, -np.inf)
for i in range(N):
if Ptra[i] <= 1:
inc_min = np.arccos(Ptra[i]) * 180/pi
else:
continue
if ((incs[i] >= inc_min) & (loggs_comp[idxs[i]] >= 3.5)
& (Teffs_comp[idxs[i]] <= 10000)):
lnL[i] = -0.5*ln2pi - lnsigma - lnL_TP(
time, flux, sigma, rps[i],
P_orb, incs[i], a[i], radii_comp[idxs[i]],
u1s_comp[idxs[i]], u2s_comp[idxs[i]],
companion_fluxratio=fluxratios_comp[idxs[i]],
companion_is_host=True
)
idx = lnL.argmax()
Z = np.mean(
np.exp(lnL + lnprior_companion + lnprior_Mstar + lnprior_Porb)
)
lnZ = np.log(Z)
res = {
'M_s': masses_comp[idxs[idx]], 'R_s': radii_comp[idxs[idx]],
'u1': u1s_comp[idxs[idx]], 'u2': u2s_comp[idxs[idx]],
'P_orb': P_orb, 'inc': incs[idx], 'R_p': rps[idx],
'M_EB': 0, 'R_EB': 0, 'fluxratio_EB': 0,
'fluxratio_comp': fluxratios_comp[idxs[idx]], 'lnZ': lnZ
}
return res
def lnZ_BEB(time: np.ndarray, flux: np.ndarray, sigma: float,
P_orb: float, M_s: float, R_s: float, Teff: float,
Tmag: float, output_url: str,
contrast_curve_file: str = None, N: int = 1000000):
"""
Calculates the marginal likelihood of the BEB scenario.
Args:
time (numpy array): Time of each data point
[days from transit midpoint].
flux (numpy array): Normalized flux of each data point.
sigma (float): Normalized flux uncertainty.
P_orb (float): Orbital period [days].
M_s (float): Target star mass [Solar masses].
R_s (float): Target star radius [Solar radii].
Teff (float): Target star effective temperature [K].
Tmag (float): Target star TESS magnitude.
output_url (string): Link to trilegal query results.
contrast_curve_file (string): Path to contrast curve file.
N (int): Number of draws for MC.
Returns:
res (dict): Best-fit properties and marginal likelihood.
res_twin (dict): Best-fit properties and marginal likelihood.
"""
lnsigma = np.log(sigma)
# sample from inc and q prior distributions
incs = sample_inc(np.random.rand(N))
qs = sample_q(np.random.rand(N))
qs_comp = sample_q_companion(np.random.rand(N))
# determine background star population properties
Tmags_comp, masses_comp, loggs_comp, Teffs_comp, Zs_comp = (
trilegal_results(output_url, Tmag)
)
radii_comp = np.sqrt(G*masses_comp*Msun / 10**loggs_comp) / Rsun
delta_mags = Tmag - Tmags_comp
fluxratios_comp = 10**(delta_mags/2.5) / (1 + 10**(delta_mags/2.5))
N_comp = Tmags_comp.shape[0]
# determine limb darkening coefficients of background stars
u1s_comp, u2s_comp = np.zeros(N_comp), np.zeros(N_comp)
for i in range(N_comp):
this_Teff = ldc_Teffs[np.argmin(
np.abs(ldc_Teffs-Teffs_comp[i])
)]
this_logg = ldc_loggs[np.argmin(
np.abs(ldc_loggs-loggs_comp[i])
)]
mask1 = (ldc_Teffs == this_Teff) & (ldc_loggs == this_logg)
these_Zs = ldc_Zs[mask1]
this_Z = these_Zs[np.argmin(np.abs(these_Zs-Zs_comp[i]))]
mask = (
(ldc_Zs == this_Z)
& (ldc_Teffs == this_Teff)
& (ldc_loggs == this_logg)
)
u1s_comp[i], u2s_comp[i] = ldc_u1s[mask], ldc_u2s[mask]
# draw random sample of background stars
idxs = np.random.randint(0, N_comp, N)
# calculate properties of the drawn EBs
masses = qs*masses_comp[idxs]
radii, Teffs = stellar_relations(
masses, radii_comp[idxs], Teffs_comp[idxs]
)
# calculate flux ratios in the TESS band
fluxratios_comp_bound = (
flux_relation(masses_comp[idxs])
/ (
flux_relation(masses_comp[idxs])
+ flux_relation(np.array([M_s]))
)
)
distance_correction = fluxratios_comp[idxs]/fluxratios_comp_bound
fluxratios = (
flux_relation(masses)
/ (flux_relation(masses) + flux_relation(np.array([M_s])))
* distance_correction
)
# calculate priors for companions
delta_mags = 2.5*np.log10(
(fluxratios_comp[idxs]/(1-fluxratios_comp[idxs]))
+ (fluxratios/(1-fluxratios))
)
if contrast_curve_file is None:
lnprior_companion = np.full(
N, np.log10((N_comp/0.1) * (1/3600)**2 * 2.2**2)
)
lnprior_companion[lnprior_companion > 0.0] = 0.0
lnprior_companion[delta_mags > 0.0] = -np.inf
else:
separations, contrasts = file_to_contrast_curve(
contrast_curve_file
)
lnprior_companion = lnprior_background(
N_comp, np.abs(delta_mags), separations, contrasts
)
lnprior_companion[lnprior_companion > 0.0] = 0.0
lnprior_companion[delta_mags > 0.0] = -np.inf
# calculate short-period binary prior for stars
# with masses masses_comp
lnprior_Mstar = lnprior_Mstar_binary(masses_comp[idxs])
# calculate orbital period prior
lnprior_Porb = lnprior_Porb_binary(P_orb)
# calculate transit probability for each instance
a = (
(G*(masses_comp[idxs]+masses)*Msun)/(4*pi**2)*(P_orb*86400)**2
)**(1/3)
Ptra = (radii*Rsun + radii_comp[idxs]*Rsun)/a
a_twin = (
(G*(masses_comp[idxs]+masses)*Msun)/(4*pi**2)*(2*P_orb*86400)**2
)**(1/3)
Ptra_twin = (radii*Rsun + radii_comp[idxs]*Rsun)/a_twin
lnL = np.full(N, -np.inf)
lnL_twin = np.full(N, -np.inf)
for i in range(N):
# q < 0.95
if Ptra[i] <= 1:
inc_min = np.arccos(Ptra[i]) * 180/pi
else:
continue
if ((incs[i] >= inc_min) & (qs[i] < 0.95)
& (loggs_comp[idxs[i]] >= 3.5)
& (Teffs_comp[idxs[i]] <= 10000)):
lnL[i] = -0.5*ln2pi - lnsigma - lnL_EB(
time, flux, sigma, radii[i], fluxratios[i],
P_orb, incs[i], a[i], radii_comp[idxs[i]],
u1s_comp[idxs[i]], u2s_comp[idxs[i]],
companion_fluxratio=fluxratios_comp[idxs[i]],
companion_is_host=True
)
# q >= 0.95 and 2xP_orb
if Ptra_twin[i] <= 1:
inc_min = np.arccos(Ptra_twin[i]) * 180/pi
else:
continue
if ((incs[i] >= inc_min) & (qs[i] >= 0.95)
& (loggs_comp[idxs[i]] >= 3.5)
& (Teffs_comp[idxs[i]] <= 10000)):
lnL_twin[i] = -0.5*ln2pi - lnsigma - lnL_EB_twin(
time, flux, sigma, radii[i], fluxratios[i],
2*P_orb, incs[i], a_twin[i], radii_comp[idxs[i]],
u1s_comp[idxs[i]], u2s_comp[idxs[i]],
companion_fluxratio=fluxratios_comp[idxs[i]],
companion_is_host=True
)
# results for q < 0.95
idx = lnL.argmax()
Z = np.mean(
np.exp(lnL + lnprior_companion + lnprior_Mstar + lnprior_Porb)
)
lnZ = np.log(Z)
res = {
'M_s': masses_comp[idxs[idx]], 'R_s': radii_comp[idxs[idx]],
'u1': u1s_comp[idxs[idx]], 'u2': u2s_comp[idxs[idx]],
'P_orb': P_orb, 'inc': incs[idx], 'R_p': 0,
'M_EB': masses[idx], 'R_EB': radii[idx],
'fluxratio_EB': fluxratios[idx],
'fluxratio_comp': fluxratios_comp[idxs[idx]], 'lnZ': lnZ
}
# results for q >= 0.95 and 2xP_orb
idx = lnL_twin.argmax()
Z = np.mean(
np.exp(lnL_twin+lnprior_companion+lnprior_Mstar+lnprior_Porb)
)
lnZ = np.log(Z)
res_twin = {
'M_s': masses_comp[idxs[idx]], 'R_s': radii_comp[idxs[idx]],
'u1': u1s_comp[idxs[idx]], 'u2': u2s_comp[idxs[idx]],
'P_orb': 2*P_orb, 'inc': incs[idx], 'R_p': 0,
'M_EB': masses[idx], 'R_EB': radii[idx],
'fluxratio_EB': fluxratios[idx],
'fluxratio_comp': fluxratios_comp[idxs[idx]], 'lnZ': lnZ
}
return res, res_twin
def lnZ_NTP_unknown(time: np.ndarray, flux: np.ndarray, sigma: float,
P_orb: float, Tmag: float, output_url: str,
N: int = 1000000):
"""
Calculates the marginal likelihood of the NTP scenario for
a star of unknown properties.
Args:
time (numpy array): Time of each data point
[days from transit midpoint].
flux (numpy array): Normalized flux of each data point.
sigma (float): Normalized flux uncertainty.
P_orb (float): Orbital period [days].
Tmag (float): Target star TESS magnitude.
output_url (string): Link to trilegal query results.
N (int): Number of draws for MC.
Returns:
res (dict): Best-fit properties and marginal likelihood.
"""
lnsigma = np.log(sigma)
# determine properties of possible stars
Tmags_nearby, masses_nearby, loggs_nearby, Teffs_nearby, Zs_nearby = (
trilegal_results(output_url, Tmag)
)
mask = (Tmag-1 < Tmags_nearby) & (Tmags_nearby < Tmag+1)
Tmags_possible = Tmags_nearby[mask]
masses_possible = masses_nearby[mask]
loggs_possible = loggs_nearby[mask]
Teffs_possible = Teffs_nearby[mask]
Zs_possible = Zs_nearby[mask]
radii_possible = np.sqrt(
G*masses_possible*Msun / 10**loggs_possible
) / Rsun
N_possible = Tmags_possible.shape[0]
# determine limb darkening coefficients of background stars
u1s_possible = np.zeros(N_possible)
u2s_possible = np.zeros(N_possible)
for i in range(N_possible):
this_Teff = ldc_Teffs[np.argmin(
np.abs(ldc_Teffs-Teffs_possible[i])
)]
this_logg = ldc_loggs[np.argmin(
np.abs(ldc_loggs-loggs_possible[i])
)]
mask1 = (ldc_Teffs == this_Teff) & (ldc_loggs == this_logg)
these_Zs = ldc_Zs[mask1]
this_Z = these_Zs[np.argmin(np.abs(these_Zs-Zs_possible[i]))]
mask = (
(ldc_Zs == this_Z)
& (ldc_Teffs == this_Teff)
& (ldc_loggs == this_logg)
)
u1s_possible[i], u2s_possible[i] = ldc_u1s[mask], ldc_u2s[mask]
# draw random sample of background stars
if N_possible > 0:
idxs =
|
np.random.randint(0, N_possible, N)
|
numpy.random.randint
|
'''
measure/prepare.py
TODO:
- data fitting
- data evaluation/interpolation
'''
import os
import sys
import time
import numpy as np
import scipy.linalg as linalg
import matplotlib.pyplot as plt
def weighted_average_filter(a, w, count=1,
overwrite_a=False, overwrite_w=False):
'''Weighted mean filter along first dimension.
Returns
-------
a : numpy.ndarray (nD)
The array after the filtering has been applied `count` times.
w : numpy.ndarray (1D)
'''
a = np.array(a, float, ndmin=1, copy=not overwrite_a)
w = np.array(w, float, ndmin=1, copy=not overwrite_w)
if len(w) % 2 != 1:
raise ValueError('Number of weights (`len(w)`) must be an odd number.')
a_tmp = []
w /= w.sum()
i0 = (len(w)-1)//2
for _ in range(count):
for i in range(i0, len(a)-i0):
a_tmp.append(w.dot(a[i-i0:i+i0+1]))
a[i0:len(a)-i0] = a_tmp; a_tmp.clear()
return a
class ForceMeasurement:
def __init__(self, uk, fk):
'''
Parameters
----------
uk : 1D or 2D array of floats
Displacement vector (row) of a point at each time.
fk : 1D or 2D array of floats
Correspondin force vector (row) for each time.
Returns
-------
None
'''
uk = np.asarray(uk, float)
fk = np.asarray(fk, float)
if uk.ndim > 2:
raise TypeError('Expected `uk` to be array-like '
'with a maxium dimension of 2.')
if fk.ndim > 2:
raise TypeError('Expected `fk` to be array-like '
'with a maxium dimension of 2.')
if uk.ndim == 1:
uk = uk[:,None]
elif uk.ndim == 0:
uk = uk[None,None]
if fk.ndim == 1:
fk = fk[:,None]
elif fk.ndim == 0:
fk = fk[None,None]
if len(uk) != len(fk):
raise TypeError('Expected the same number of time points, i.e.'
'the same size of the first dimension of `uk` and `fk`.')
self.uk = uk
self.fk = fk
@classmethod
def load_from_files(cls, filepath_uk, filepath_fk, delimiter=None):
'''Load arrays of displacements and of the corresponding forces.'''
uk = np.loadtxt(filepath_uk, dtype=float, delimiter=delimiter, ndmin=1)
fk = np.loadtxt(filepath_fk, dtype=float, delimiter=delimiter, ndmin=1)
if len(uk) != len(fk):
raise TypeError('Expected the same number of time points, i.e.'
'the same size of the first dimension of `uk` and `fk`.')
return cls(uk, fk)
@staticmethod
def get_filtered_values(a, w=None, count=1):
'''Weighted average filtering in the time dimension.'''
if w is None: w = np.array((0.25,0.50,0.25))
else: w = np.array(w, ndmin=1, copy=False)
a = weighted_average_filter(a, w, count)
return a
def view_displacements(self):
'''return copy'''
return self.uk.copy()
def view_forces(self):
'''return copy'''
return self.fk.copy()
def rescale_forces(self, scale):
self.fk *= scale
def filter_displacements(self, w=None, count=1):
'''Weighted average filtering in the time dimension.'''
self.uk[:] = self.get_filtered_values(self.uk, w, count)
def filter_forces(self, w=None, count=1):
'''Weighted average filtering in the time dimension.'''
self.fk[:] = self.get_filtered_values(self.fk, w, count)
def remove_data_points(self, uk, fk, atol_u, atol_f, dryrun=False):
'''Remove points that are within tolerance of specified values.'''
atol_u **= 2
atol_f **= 2
mask_keep = np.ones(len(self.uk), dtype=bool)
for uk_i, fk_i in zip(uk, fk):
mask_match = np.logical_and(
np.sum((self.uk-uk_i)**2, axis=1) < atol_u,
np.sum((self.fk-fk_i)**2, axis=1) < atol_f)
mask_keep[mask_match] = False
mask_remove = np.logical_not(mask_keep)
ids_remove = np.argwhere(mask_remove)
uk_remove = self.uk[ids_remove].copy()
fk_remove = self.fk[ids_remove].copy()
if not dryrun and ids_remove:
self.uk = self.uk[mask_keep]
self.fk = self.fk[mask_keep]
if not self.uk.flags.owndata:
self.uk = np.array(self.uk)
if not self.fk.flags.owndata:
self.fk = np.array(self.fk)
return uk_remove, fk_remove, ids_remove
class DisplacementMeasurement:
def __init__(self, xk, uk, values_relative=False):
if not isinstance(xk, np.ndarray):
raise TypeError('Expected `xk` to be a `numpy.ndarray`.')
if not isinstance(uk, list) or not all(isinstance(uk_t, np.ndarray) for uk_t in uk):
raise TypeError('Expected `uk` to be a `list` of `numpy.ndarray`s.')
if not all(xk.shape == uk_t.shape for uk_t in uk):
raise TypeError('Expected items in `uk` to have the same shape as `xk`.')
self.values_relative = bool(values_relative)
self.xk = xk
self.uk = uk
@classmethod
def load_from_files(cls,
filepath_xk, filepath_yk, filepath_uk, filepath_vk,
delimiter=None, values_relative=False):
'''Firstly, load each position array (1D) and each (transient) value array
(2D). Secondly, combine the 1D position arrays into a single 2D array and
combine the 2D value arrays into a list of 2D arrays. Finally, return 2D
position array and the list of 2D values arrays.'''
xk = np.loadtxt(filepath_xk, dtype=float, delimiter=delimiter, ndmin=1)
yk = np.loadtxt(filepath_yk, dtype=float, delimiter=delimiter, ndmin=1)
uk = np.loadtxt(filepath_uk, dtype=float, delimiter=delimiter, ndmin=1)
vk = np.loadtxt(filepath_vk, dtype=float, delimiter=delimiter, ndmin=1)
if xk.shape != yk.shape:
raise TypeError('Expected same shapes of `xk` and `yk`.')
if uk.shape != vk.shape:
raise TypeError('Expected same shapes of `uk` and `vk`.')
# print(len(xk))
# print(len(uk))
# input("press to continue ...")
if len(xk) != len(uk):
raise TypeError('Expected the same number of points, i.e. '
'the same size of the first dimension of `xk` and `uk`.')
if uk.ndim == 1:
uk = uk[:,None] # new axis
vk = vk[:,None]
nt = uk.shape[1]
# print(nt)
# input("press to continue ...")
uk = np.split(uk, nt, axis=1)
vk = np.split(vk, nt, axis=1)
xk = np.stack((xk, yk), axis=1)
uk = [np.concatenate(uk_t, axis=1)
for uk_t in zip(uk, vk)]
return cls(xk, uk, values_relative)
def __add__(self, other):
cls = self.__class__
if not isinstance(other, cls):
raise TypeError
if other.values_relative != self.values_relative:
raise TypeError
if other.xk.shape[1] != self.xk.shape[1]:
raise TypeError
if len(other.uk) != len(self.uk):
raise TypeError
xk = np.concatenate((self.xk, other.xk), axis=0)
uk = [np.concatenate((self_uk_t, other_uk_t), axis=0)
for self_uk_t, other_uk_t in zip(self.uk, other.uk)]
return cls(xk, uk, self.values_relative)
def set_values_relative(self):
if not self.values_relative:
self.values_relative = True
for uk_t in self.uk:
uk_t -= self.xk
def set_values_total(self):
if self.values_relative:
self.values_relative = False
for uk_t in self.uk:
uk_t += self.xk
def view_coords(self):
'''return copy'''
return self.xk.copy()
def view_values(self):
'''return copy'''
return [uk_t.copy() for uk_t in self.uk]
def view_displacements(self):
uk = self.view_values()
if not self.values_relative:
for uk_t in uk:
uk_t -= self.xk
return uk
def view_positions(self):
uk = self.view_values()
if self.values_relative:
for uk_t in uk:
uk_t += self.xk
return uk
def compute_mean_coords(self):
'''Compute mean values for each time.'''
return self.xk.mean(axis=0)
def compute_mean_values(self):
'''Compute mean values for each time.'''
um = []
for uk_t in self.uk:
um.append(uk_t.mean(axis=0))
return np.stack(um, axis=0)
def compute_mean_displacements(self):
'''Compute mean values for each time.'''
um = []
for uk_t in self.view_displacements():
um.append(uk_t.mean(axis=0))
return np.stack(um, axis=0)
def compute_mean_positions(self):
'''Compute mean values for each time.'''
um = []
for uk_t in self.view_positions():
um.append(uk_t.mean(axis=0))
return np.stack(um, axis=0)
def compute_mean_rotations(self):
'''Compute mean values for each time.'''
uk = self.view_positions()
um = self.compute_mean_positions()
rk_t0 = (uk[0] - um[0]).T
rk_t0 /= np.sqrt((rk_t0**2).sum(axis=0))
th = [0.0] # incremental mean rotations
for uk_t1, um_t1 in zip(uk[1:], um[1:]):
rk_t1 = (uk_t1 - um_t1).T
rk_t1 /= np.sqrt((rk_t1**2).sum(axis=0))
s = np.cross(rk_t0, rk_t1, axis=0)
c = np.sum(rk_t0*rk_t1, axis=0)
th.append(np.arctan2(s,c).mean())
rk_t0 = rk_t1
return np.array(th).cumsum()
def offset_coords(self, x, i=slice(0,2), operator='+'):
if operator == '+':
self.xk[:,i] += x
elif operator == '-':
self.xk[:,i] -= x
else:
raise TypeError('`operator`: "+" or "-" ?')
def offset_values(self, u, i=slice(0,2), operator='+'):
if len(u) != len(self.uk):
raise TypeError('Number of time points.')
if operator == '+':
for self_uk_t, u_t in zip(self.uk, u):
self_uk_t[:,i] = self_uk_t[:,i] + u_t[i]
elif operator == '-':
for self_uk_t, u_t in zip(self.uk, u):
self_uk_t[:,i] = self_uk_t[:,i] - u_t[i]
else:
raise TypeError('`operator`: "+" or "-" ?')
def _rotate_axis(self, th):
raise NotImplementedError
if not isinstance(th, (float,int)):
th = float(th) # try any way
c = np.cos(th); s = np.sin(th)
RT = np.array([[c,s],[-s,c]])
xk = self.xk
uk = self.uk
if self.values_relative:
for uk_t in uk:
uk_t *= uk_t.dot(RT)
xk[:] = xk.dot(RT)
for uk_t in uk:
uk_t[:] -= xk
else:
for uk_t in uk:
uk_t[:] = uk_t.dot(RT)
self.xk[:] = self.xk.dot(RT)
self.xk[:] = self.xk.dot(np.array([[c,s],[-s,c]]))
def _rotate_coord_axis(self, th, x0=None):
# the opposite of rotating value
if not isinstance(th, (float,int)):
th = float(th) # try any way
if x0 is None:
x0 =
|
np.array([0.0,0.0])
|
numpy.array
|
# Copyright 2018 <NAME>. All rights reserved.
#
# Licensed under the MIT license
"""
Script for panels of Figure 1 (Zebrafish model training, evolution and navigation)
"""
import core as c
import analysis as a
from global_defs import GlobalDefs
import os
import seaborn as sns
import matplotlib as mpl
import matplotlib.pyplot as pl
import numpy as np
import h5py
from data_stores import SimulationStore
from mo_types import MoTypes
from pandas import DataFrame
from scipy.stats import wilcoxon
# file definitions
base_path = "./model_data/Adam_1e-4/sepInput_mixTrain/"
paths_1024 = [f+'/' for f in os.listdir(base_path) if "_3m1024_" in f]
paths_512 = [f+'/' for f in os.listdir(base_path) if "_3m512_" in f]
paths_256 = [f+'/' for f in os.listdir(base_path) if "_3m256_" in f]
def test_loss(path):
fname = base_path + path + "losses.hdf5"
lossfile = h5py.File(fname, "r")
test_losses = np.array(lossfile["test_losses"])
rank_errors = np.array(lossfile["test_rank_errors"])
timepoints = np.array(lossfile["test_eval"])
return timepoints, test_losses, rank_errors
def ev_path(path):
return base_path + path + "evolve/"
def mpath(path):
return base_path + path[:-1] # need to remove trailing slash
def get_bout_starts(pos: np.ndarray) -> np.ndarray:
"""
Extract bout starts from network position trace
:param pos: nx3 trace of x, y, angle at each timepoint
:return: Array of indices corresponding to bout starts
"""
spd = np.r_[0, np.sqrt(np.sum(np.diff(pos[:, :2], axis=0) ** 2, 1))] # speed
bs = np.r_[0, np.diff(spd) > 0.00098] # bout starts
return bs
def get_bout_da(pos: np.ndarray, starts: np.ndarray) -> np.ndarray:
"""
For each bout indicated by starts get the angle turned
:param pos: nx3 trace of x, y, angle at each timepoint
:param starts: Array of indices corresponding to bout starts
:return: For each bout in starts the turning angle
"""
starts = np.arange(pos.shape[0])[starts.astype(bool)]
ix_pre = starts - 10
ix_pre[ix_pre < 0] = 0
ix_post = starts + 10
ix_post[ix_post >= pos.shape[0]] = pos.shape[0]-1
da = pos[ix_post, 2] - pos[ix_pre, 2]
return da
def compute_gradient_bout_frequency(model_path, drop_list=None):
def bout_freq(pos: np.ndarray):
r = np.sqrt(np.sum(pos[:, :2]**2, 1)) # radial position
bs = get_bout_starts(pos) # bout starts
bins = np.linspace(0, GlobalDefs.circle_sim_params["radius"], 6)
bcenters = bins[:-1] + np.diff(bins)/2
cnt_r = np.histogram(r, bins)[0]
cnt_r_bs = np.histogram(r[bs > 0.1], bins)[0]
bfreq = cnt_r_bs / cnt_r * GlobalDefs.frame_rate
return bfreq, bcenters
with SimulationStore("sim_store.hdf5", std, MoTypes(False)) as sim_store:
pos_fixed = sim_store.get_sim_pos(model_path, "r", "trained", drop_list)
pos_part = sim_store.get_sim_pos(model_path, "r", "partevolve", drop_list)
pos_var = sim_store.get_sim_pos(model_path, "r", "bfevolve", drop_list)
bf_fixed, bc = bout_freq(pos_fixed)
bf_p, bc = bout_freq(pos_part)
bf_var, bc = bout_freq(pos_var)
return bc, bf_fixed, bf_p, bf_var
def run_flat_gradient(model_path, drop_list=None):
mdata = c.ModelData(model_path)
gpn = MoTypes(False).network_model()
gpn.load(mdata.ModelDefinition, mdata.LastCheckpoint)
flt_params = GlobalDefs.circle_sim_params.copy()
flt_params["t_max"] = flt_params["t_min"]
sim = MoTypes(False).rad_sim(gpn, std, **flt_params)
sim.t_max = sim.t_min # reset gradient to be flat
sim.remove = drop_list
evo_path = model_path + '/evolve/generation_weights.npy'
evo_weights = np.load(evo_path)
w = np.mean(evo_weights[-1, :, :], 0)
sim.bf_weights = w
return sim.run_simulation(GlobalDefs.n_steps, False)
def compute_da_modulation(model_path, drop_list=None):
with SimulationStore("sim_store.hdf5", std, MoTypes(False)) as sim_store:
pos_ev = sim_store.get_sim_pos(model_path, "r", "bfevolve", drop_list)
pos_flt = run_flat_gradient(model_path, drop_list)
bs_ev = get_bout_starts(pos_ev)
bs_flt = get_bout_starts(pos_flt)
# get delta angle of each bout
da_ev = get_bout_da(pos_ev, bs_ev)
da_flt = get_bout_da(pos_flt, bs_flt)
# get temperature at each bout start
temp_ev = a.temp_convert(np.sqrt(np.sum(pos_ev[bs_ev.astype(bool), :2]**2, 1)), 'r')
temp_flt = a.temp_convert(np.sqrt(np.sum(pos_flt[bs_flt.astype(bool), :2] ** 2, 1)), 'r')
# get delta-temperature effected by each previous bout
dt_ev = np.r_[0, np.diff(temp_ev)]
dt_flt = np.r_[0, np.diff(temp_flt)]
# only consider data above T_Preferred and away from the edge
valid_ev = np.logical_and(temp_ev > GlobalDefs.tPreferred, temp_ev < GlobalDefs.circle_sim_params["t_max"]-1)
valid_flt = np.logical_and(temp_flt > GlobalDefs.tPreferred, temp_flt < GlobalDefs.circle_sim_params["t_max"] - 1)
da_ev = da_ev[valid_ev]
da_flt = da_flt[valid_flt]
dt_ev = dt_ev[valid_ev]
dt_flt = dt_flt[valid_flt]
# get turn magnitude for up and down gradient
up_grad_ev = np.mean(np.abs(da_ev[dt_ev > 0.5]))
dn_grad_ev = np.mean(np.abs(da_ev[dt_ev < -0.5]))
up_grad_flt = np.mean(np.abs(da_flt[dt_flt > 0.5]))
dn_grad_flt = np.mean(np.abs(da_flt[dt_flt < -0.5]))
up_change = up_grad_ev / up_grad_flt
dn_change = dn_grad_ev / dn_grad_flt
return dn_change, up_change
if __name__ == "__main__":
save_folder = "./DataFigures/Figure1/"
if not os.path.exists(save_folder):
os.makedirs(save_folder)
sns.reset_orig()
mpl.rcParams['pdf.fonttype'] = 42
std = c.GradientData.load_standards("gd_training_data.hdf5")
# first panel - log squared error progression over training
test_time = test_loss(paths_512[0])[0]
test_256 = np.vstack([test_loss(lp)[1] for lp in paths_256])
test_512 = np.vstack([test_loss(lp)[1] for lp in paths_512])
test_1024 = np.vstack([test_loss(lp)[1] for lp in paths_1024])
fig, ax = pl.subplots()
sns.tsplot(np.log10(test_256), test_time, ax=ax, color="C2", n_boot=1000, condition="256 HU")
sns.tsplot(np.log10(test_512), test_time, ax=ax, color="C1", n_boot=1000, condition="512 HU")
sns.tsplot(
|
np.log10(test_1024)
|
numpy.log10
|
#Copyright 2010 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains tools and utilities for computing ephemerides and physical
locations of solar system objects, as well as proper motion calculations for
extrasolar objects.
.. seealso::
`Pyephem <http://rhodesmill.org/pyephem/>`_
A Pythonic implementation of the
`xephem <http://www.clearskyinstitute.com/xephem/>`_ ephemerides
algorithms.
`<NAME>. "Astronomical Algorithms" ISBN 0943396352 <http://www.willbell.com/MATH/mc1.htm>`_
An authoritative reference on coordinates, ephemerides, and related
transforms in astronomy.
`JPL Solar System Dynamics Group <http://ssd.jpl.nasa.gov/>`_
The standard source for solar system dynamics and ephemerides. Source
of DE200 and DE405 solar system models, and HORIZON ephemerides service.
.. todo:: Tutorials
Classes and Inheritance Structure
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. inheritance-diagram:: astropysics.coords.ephems
:parts: 1
Module API
^^^^^^^^^^
"""
#TODO: JPL Ephemeris option
#useful references:
#*http://www.astro.rug.nl/software/kapteyn/index.html
#*"Astronomical Algorithms" by <NAME>
#*"The IAU Resolutions on Astronomical Reference Systems,Time Scales, and Earth
# Rotation Models": http://aa.usno.navy.mil/publications/docs/Circular_179.pdf
from __future__ import division,with_statement
from ..constants import pi
import numpy as np
_twopi = 2*pi
try:
#requires Python 2.6
from abc import ABCMeta
from abc import abstractmethod
from abc import abstractproperty
from collections import Sequence,MutableSequence
except ImportError: #support for earlier versions
abstractmethod = lambda x:x
abstractproperty = property
ABCMeta = type
class MutableSequence(object):
__slots__=('__weakref__',) #support for weakrefs as necessary
class Sequence(object):
__slots__=('__weakref__',) #support for weakrefs as necessary
class EphemerisAccuracyWarning(Warning):
"""
Class for warnings due to Ephemeris accuracy issues
"""
#<--------------------Define general classes----------------------------------->
class EphemerisObject(object):
"""
This class is the superclass of objects that change sky coordinates over
time.
**Subclassing**
* Subclasses should implement the :meth:`_getCoordObj` abstract method.
This should return a :class:`astropysics.coords.coordsys.CoordinateSystem`
object of the coordinates for the current value of :attr:`jd`.
* Subclasses may implement a :meth:`_jdhook` method to perform an action
whenever the jd is changed. It must have a signature f(oldjd,newjd).
* Subclasses may implement the :meth:`getVelocity` method to return
instantaneous velocities for the coordinate at the current value of
:attr:`jd`. If this is not implemented, calling it will raise a
:exc:`NotImplementedError`.
"""
__metaclass__ = ABCMeta
name = '' #put here so it ends up in autogenerated documentation
'The name of the object.'
def __init__(self,name,validjdrange=None):
"""
:params name: The name of the object.
:params validjdrange:
Sets the jd range over which this method is valid as (minjd,maxjd).
Trying to get something outside will result in an
:`exc`:EphemerisAccuracyWarning warning. `minjd` or `maxjd` can be
None to indicate no bound.
"""
from ..obstools import jd2000
self._jd = jd2000
self._jdhook(jd2000,jd2000)
self.name = name
self._setValidjdrange(validjdrange)
def _getJd(self):
return self._jd
def _setJd(self,val):
from operator import isSequenceType
from ..obstools import calendar_to_jd
from datetime import datetime
if val == 'now':
jd = calendar_to_jd(datetime.utcnow(),tz=None)
elif hasattr(val,'year') or isSequenceType(val):
jd = calendar_to_jd(val)
else:
jd = val
if self._validrange is not None:
from warnings import warn
if self._validrange[0] is not None and jd < self._validrange[0]:
warn('JD {0} is below the valid range for this EphemerisObject'.format(jd),EphemerisAccuracyWarning)
elif self._validrange[1] is not None and jd > self._validrange[1]:
warn('JD {0} is above the valid range for this EphemerisObject'.format(jd),EphemerisAccuracyWarning)
self._jdhook(self._jd,jd)
self._jd = jd
jd = property(_getJd,_setJd,doc="""
Julian Date at which to calculate the orbital elements. Can be set either as
a scalar JD, 'now', :class:`datetime.datetime` object or a compatible tuple.
""")
def _jdhook(self,oldjd,newjd):
"""
Override in subclasses to perform an action when the jd is changed
(although before self._jd is updated).
"""
pass
@property
def validjdrange(self):
"""
The range of jds over which these ephemerides are valid. Returns a
2-tuple (minjd,maxjd), either of which can be None to indicate no bound.
"""
return self._validrange
def _setValidjdrange(self,val):
"""
Sets the jd range over which this method is valid. Trying to get
something outside will result in an `exc`:EphemerisAccuracyWarning:
Intended for use in __init__.
:param val:
The range as (minjd,maxjd), can be None to indicate no bound. If set
to None, the result will be (None,None).
"""
if val is None:
self._validrange = (None,None)
else:
v1,v2 = val
if v1 is None and v2 is None:
self._validrange = (None,None)
else:
from operator import isSequenceType
from ..obstools import calendar_to_jd
from datetime import datetime
vs = []
for v in (v1,v2):
if v is None:
vs.append(None)
elif v == 'now':
vs.append(calendar_to_jd(datetime.utcnow(),tz=None))
elif hasattr(v,'year') or isSequenceType(v):
vs.append(calendar_to_jd(v))
else:
vs.append(v)
self._validrange = tuple(vs)
def __call__(self,jds=None,coordsys=None):
"""
Computes the coordinates of this object at the specified time(s).
:param jds:
A sequence of julian dates at which to compute the coordinates, a
scalar JD, or None to use the :attr:`jd` attribute's current value.
:param coordsys:
A :class:`astropysics.coords.coordsys.CooordinateSystem` class that
specifies the type of the output coordinates, or None to use the
default coordinate type.
:returns:
A list of objects with the coordinates in the same order as `jds`,
or a single object if `jds` is None or a scalar. Outputs are
:class:`astropysics.coords.coordsys.CooordinateSystem` subclasses,
and their type is either `coordsys` or the default type if
`coordsys` is None.
"""
single = False #return an object instead of a sequence of objects
if jds is None:
single = True
res = (self._getCoordObj(),)
else:
if isinstance(jds,np.ndarray):
if jds.shape == ():
single = True
jds = jds.ravel()
elif np.isscalar(jds):
single = True
jds = (jds,)
jd0 = self.jd
try:
res = []
for jd in jds:
self.jd = jd
res.append(self._getCoordObj())
finally:
self.jd = jd0
if coordsys is not None:
res = [c.convert(coordsys) for c in res]
if single:
return res[0]
else:
return res
@abstractmethod
def _getCoordObj(self):
"""
Computes and returns the coordinate location of the object at the time
given by the :attr:`jd` attribute's value.
:returns:
The current coordinates for the object. The exact output coordinate
system is not specified, other than that it must be a subclass of
:class:`astropysics.coords.coordsys.CooordinateSystem`.
"""
raise NotImplementedError
def getVelocity(self):
"""
Computes and returns the instantaneous velocity for this object at the
time given by the :attr:`jd` attribute's value.
:returns:
The current velocity of this object. The exact type is not
specified, but it should be something that can be added to the
coordinates that are returned when the object is called.
:raises NotImplementedError:
If velocities are not implemented for this class.
"""
raise NotImplementedError
class ProperMotionObject(EphemerisObject):
"""
An object with linear proper motion relative to a specified epoch.
"""
def __init__(self,name,ra0,dec0,dra=0,ddec=0,epoch0=2000,distpc0=None,rv=0,
coordclass=None):
"""
:param str name: Name of the object.
:param float ra0: RA in degrees at the starting epoch.
:param float dec0: Dec in degrees at the starting epoch.
:param float dra: Change in RA, arcsec/yr.
:param float ddec: Proper motion in Dec, arcsec/yr.
:param epoch0: Epoch for which `ra0`,`dec0`,and`distpc0` are valid.
:param distpc0: Distance in pc at the starting epoch.
:param float rv: Radial velocity in km/s
:param coordclass:
The type of output coordinates. If None, defaults to
:class:`~astropysics.coords.coordsys.ICRSCoordinates`.
:type coordclass:
:class:`astropysics.coords.coordsys.EpochalLatLongCoordinates`
"""
from ..obstools import epoch_to_jd
from .coordsys import ICRSCoordinates
self.ra0 = ra0
self.dec0 = dec0
self.dra = dra
self.ddec = ddec
self.epoch0 = epoch0
self._jd0 = epoch_to_jd(epoch0)
self.distpc0 = distpc0
self.rv = rv
if coordclass is None:
self.coordclass = ICRSCoordinates
else:
self.coordclass = coordclass
EphemerisObject.__init__(self,name)
self.jd = self._jd0
@property
def drastar(self):
"""
Proper motion in right ascention in arcseconds. This is distinguished
from `dra` in that it is multiplied by ``cos(dec0)`` to produce a true
proper motion component.
"""
from math import cos, radians
return self.dra * cos(radians(self.dec0))
@drastar.setter
def drastar(self, val):
from math import cos, radians
self.dra = val / cos(radians(self.dec0))
@property
def ra(self):
"""
RA at the current :attr:`jd` in degrees.
"""
from math import degrees
from ..constants import asecperrad
return self.ra0 + degrees(self._dyr*self.dra/asecperrad)
@property
def dec(self):
"""
Declination at the current :attr:`jd` in degrees.
"""
from math import degrees
from ..constants import asecperrad
return self.dec0 + degrees(self._dyr*self.ddec/asecperrad)
@property
def distancepc(self):
"""
Distance at the current :attr:`jd` in parsecs.
"""
from ..constants import cmperpc,secperyr
if self.distpc0 is None:
return None
else:
return self.distpc0 + self._dyr*self.rv*secperyr*1e5/cmperpc
def _jdhook(self,oldjd,newjd):
self._dyr = (newjd - self._jd0)/365.25
def _getCoordObj(self):
from ..obstools import jd_to_epoch
if self.distancepc is None:
return self.coordclass(self.ra,self.dec,epoch=jd_to_epoch(self.jd))
else:
return self.coordclass(self.ra,self.dec,distancepc=self.distancepc,
epoch=jd_to_epoch(self.jd))
class KeplerianObject(EphemerisObject):
"""
An object with Keplerian orbital elements.
The following orbital elements are available for the current value of
:attr:`jd` as read-only properties:
* :attr:`a`
* :attr:`e`
* :attr:`i`
* :attr:`Lan`
* :attr:`L`
* :attr:`Lp`
* :attr:`M`
* :attr:`ap`
* :attr:`nu`
Additional read only properties derived from the orbital elements include:
* :attr:`P`
* :attr:`d`
* :attr:`dapo`
* :attr:`dperi`
"""
Etol = None #default set in constructor
r""" Desired accuracy for iterative calculation of eccentric anamoly (or
true anomaly) from mean anomaly. If None, default tolerance is used
(1.5e-8), or if 0, an analytic approximation will be used (:math:`E \approx
M + e (1 + e \cos M ) \sin M`). This approximation can be 10x faster to
compute but fails for e close to 1.
"""
def __init__(self,**kwargs):
"""
Keywords should be the names of orbital elements. Orbital elements can
be a callable f(T) that returns the orbital element, where T is the
Julian century from J2000. Otherwise, it must be a sequence giving
polynomial coefficients of T, specified in increasing power (i.e.
constant term first).
The orbital elements :attr:`a`,:attr:`e`, :attr:`i`, and :attr:`Lan`
must be specified, as must either :attr:`L` and :attr:`Lp` or :attr:`ap`
and :attr:`M`
Three additional keywords can be supplied:
:params outcoords:
The coordinate class that should be used as the output. Should be a
:class:`astropysics.coords.coordsys.CartesianCoordinates` subclass,
and the origin will be taken as the center of the Keplerian orbit.
Defaults to
:class:`astropysics.coords.coordsys.RectangularCoordinates`.
:params outtransfunc:
A function that maps the coordinates from the standard coordinate
system (where the x-y plane is the plane of reference) to a system
matching the type specified by `outcoords`. The function should have
the signature f(x,y,z,jd) and return (xp,yp,zp). It can also be None
to perform no trasnformation. Defaults to None.
:params Etol:
Tolerance for eccentric anomaly (see :attr:`Etol`). Defaults to
None.
Any other keywords will be passed into the constructor for
:class:`EphemerisObject`.
:except TypeError: If necessary orbital elements are missing.
"""
from .coordsys import RectangularCoordinates
self.outcoords = kwargs.pop('outcoords',RectangularCoordinates)
self.outtransfunc = kwargs.pop('outtransfunc',None)
self.Etol = kwargs.pop('Etol',23.43928)
kwnms = ('a','e','i','Lan','L','Lp','ap','M')
a,e,i,Lan,L,Lp,ap,M = [kwargs.pop(n,None) for n in kwnms]
EphemerisObject.__init__(self,**kwargs)
if a is None:
raise TypeError('Need to provide orbital element `a`')
if e is None:
raise TypeError('Need to provide orbital element `e`')
if i is None:
raise TypeError('Need to provide orbital element `i`')
if Lan is None:
raise TypeError('Need to provide orbital element `Lan`')
self._a = self._makeElement(a)
self._e = self._makeElement(e)
self._i = self._makeElement(i)
self._Lan = self._makeElement(Lan)
if L is not None and Lp is not None:
if ap is not None or M is not None:
raise TypeError('Cannot specify both `L`/`Lp` and `ap`/`M`')
self._L = self._makeElement(L)
self._Lp = self._makeElement(Lp)
elif ap is not None and M is not None:
if L is not None or Lp is not None:
raise TypeError('Cannot specify both `L`/`Lp` and `ap`/`M`')
self._ap = self._makeElement(ap)
self._M = self._makeElement(M)
def _jdhook(self,oldjd,newjd):
from ..obstools import jd2000
self._t = (newjd - jd2000)/36525.
def _makeElement(self,val):
from operator import isSequenceType
if callable(val):
return val
elif isSequenceType(val):
if len(val)==2:
a,b = val
return lambda T:a+b*T
else:
coeffs = tuple(reversed(val))
return lambda T:np.polyval(coeffs,T)
else:
raise TypeError('invalid value for orbit element %s'%val)
@property
def a(self):
"""
The semi-major axis.
"""
return self._a(self._t)
@property
def e(self):
"""
The eccentricity in radians.
"""
return self._e(self._t)
@property
def i(self):
"""
The orbital inclination in degrees.
"""
return self._i(self._t)
@property
def Lan(self):
"""
The longitude of the ascending node in degrees.
"""
return self._Lan(self._t)
@property
def L(self):
"""
The mean longitude in degrees.
"""
if hasattr(self,'_L'):
return self._L(self._t)
else:
return self.M + self.Lp
@property
def Lp(self):
"""
The longitude of the pericenter in degrees.
"""
if hasattr(self,'_Lp'):
return self._Lp(self._t)
else:
return self.ap + self.Lan
@property
def M(self):
"""
The mean anomaly in degrees.
"""
if hasattr(self,'_M'):
return self._M(self._t)
elif hasattr(self,'_bcsf'): #special hidden correction used for 3000BCE-3000CE
from math import sin,cos
b,c,s,f = self._bcsf
T = self._t
return self.L - self.Lp + b*T*T + c*cos(f*T) + s*sin(f*T)
else:
return self.L - self.Lp
@property
def ap(self):
"""
The argument of the pericenter in degrees.
"""
if hasattr(self,'_ap'):
return self._ap(self._t)
else:
return self.Lp - self.Lan
@staticmethod
def _keplerEq(E,M,e):
r"""
Kepler's equation :math:`-M + E - e \sin(E)` - used for finding E from M
"""
from math import sin
return E-M-e*sin(E)
@property
def E(self):
"""
Eccentric anamoly in degrees - calculated from mean anamoly with
accuracy given by :attr:`Etol`.
"""
from math import radians,sin,cos,degrees,pi
from scipy.optimize import fsolve
M = radians((self.M + 180)%360 - 180)
e = self.e #radians
E0 = M + e*sin(M)*(1.0 + e*cos(M))
if self.Etol==0:
Er = E0
elif self.Etol is None:
Er = fsolve(KeplerianObject._keplerEq,E0,args=(M,e))[0]
else:
Er = fsolve(KeplerianObject._keplerEq,E0,args=(M,e),xtol=self.Etol)[0]
return degrees(Er)%360
@property
def nu(self):
r"""
True anamoly in degrees (:math:`-180 < \nu < 180`) - calculated from
eccentric anamoly with accuracy given by :attr:`Etol`.
"""
from math import radians,sin,cos,atan2,sqrt,degrees
E = radians(self.E)
e = self.e
xv = cos(E) - e
yv = sqrt(1.0 - e*e) * sin(E)
return degrees(atan2(yv,xv))
@property
def d(self):
"""
Current distance from focus of attraction to object.
"""
from math import radians,cos
a = self.a
e = self.e
nu = radians(self.nu)
return a*(1-e*e)/(1+e*cos(nu))
@property
def dperi(self):
"""
Distance from focus of attraction at pericenter.
"""
return self.a*(1 - self.e)
@property
def dapo(self):
"""
Distance from focus of attraction at apocenter.
"""
return self.a*(1 + self.e)
@property
def P(self):
"""
Orbital period (if gravitational parameter GM=1).
"""
return self.a**1.5
def _getCoordObj(self):
from ..obstools import jd_to_epoch
from math import sin,cos,sqrt,radians
#orbital plane coordinates
a = self.a
E = radians(self.E)
e = self.e
xp = a*(cos(E)-e)
yp = a*sqrt(1-e*e)*sin(E)
w = radians(self.ap)
o = radians(self.Lan)
i = radians(self.i)
cw,sw = cos(w),sin(w)
co,so = cos(o),sin(o)
ci,si = cos(i),sin(i)
x = (cw*co-sw*so*ci)*xp + (-sw*co - cw*so*ci)*yp
y = (cw*so+sw*co*ci)*xp + (-sw*so + cw*co*ci)*yp
z = (sw*si)*xp + (cw*si)*yp
if self.outtransfunc:
x,y,z = self.outtransfunc(x,y,z,self._jd)
res = self.outcoords(x,y,z)
#adjust units to AU if the coordinate system has units
if hasattr(res,'unit'):
res.unit = None #convention is that None implies not to do conversions
res.unit = 'au'
#add epoch info if coordinates have an epoch
if hasattr(res,'epoch'):
res.epoch = jd_to_epoch(self.jd)
return res
def getPhase(self,viewobj='Earth',illumobj='Sun'):
"""
Computes the phase of this object. The phase is computed as viwed from
`viewobj` if illuminated by `illumobj`
:param viewobj:
The object viewing this object. Either a string with the name of a
solar system object (from the list in
:func:`list_solar_system_objects`), or a :class:`EphemerisObject`
object. If None, it is taken to be the coordinate origin.
:param viewobj:
The object illuminating this object. Either a string with the name
of a solar system object (from the list in
:func:`list_solar_system_objects`), or a :class:`EphemerisObject`
object. If None, it is taken to be the coordinate origin.
:returns:
The phase of the object as a float where 0 is new and 1 is full.
"""
from math import sqrt
c = self()
coordcls = c.__class__
x,y,z = c.x,c.y,c.z
if illumobj is None:
xi = yi = zi = 0
else:
if isinstance(illumobj,basestring):
cillum = get_solar_system_ephems(illumobj,self.jd)
else:
cillum = illumobj(self.jd)
if not isinstance(cillum,coordcls):
cillum = cillum.convert(coordcls)
xi,yi,zi = cillum.x,cillum.y,cillum.z
if viewobj is None:
xv = yv = zv = 0
else:
if isinstance(viewobj,basestring):
cview = get_solar_system_ephems(viewobj,self.jd)
else:
cview = viewobj(self.jd)
if not isinstance(cview,coordcls):
cview = cview.convert(coordcls)
xv,yv,zv = cview.x,cview.y,cview.z
xR,yR,zR = x-xv,y-yv,z-zv #view -> self vector
xs,ys,zs = xv-xi,yv-yi,zv-zi #illum -> view vector
xr,yr,zr = x-xi,y-yi,z-zi #illum -> self vector
r = sqrt(xr*xr+yr*yr+zr*zr)
R = sqrt(xR*xR+yR*yR+zR*zR)
s = sqrt(xs*xs+ys*ys+zs*zs)
return (1+(r*r + R*R - s*s)/(2*r*R))/2
def get_solar_system_ephems(objname,jds=None,coordsys=None):
"""
Retrieves an :class:`EphemerisObject` object or computes the coordinates for
a solar system object.
:param str objname:
The (case-sensitive) name of the object (see
:func:`list_solar_system_objects` for valid names)
:param jds:
The jds at which to compute the coordinates or None to return
:class:`EphemerisObject` instances.
:param coordsys:
Specifies the coordinate system class of the returned ephemerides. See
:meth:`EphemerisObject.__call__` for the details. Ignored if `jds` is
None.
:returns:
A subclass of :class:`EphemerisObject` if `jds` is None, or the
appropriate coordinate object (or objects) as detailed in
:meth:`EphemerisObject.__call__` if `jds` is not None.
"""
from inspect import isclass
from copy import copy
#entrys in _ss_ephems may be classes or objects, so do the appropriate action.
eobj = _ss_ephems[objname]
if jds is None:
if isclass(eobj):
return eobj()
else:
return copy(eobj)
else:
if isclass(eobj):
return eobj()(jds,coordsys)
else:
return eobj(jds,coordsys)
def list_solar_system_objects():
"""
Returns a list of objects that can be returned by
:func:`get_solar_system_object`.
"""
return _ss_ephems.keys()
_ss_ephems = {}
def set_solar_system_ephem_method(meth=None):
"""
Sets the type of ephemerides to use. Must be 'keplerian' for now.
"""
global _ss_ephems
if meth is None:
meth = 'keplerian'
if meth=='keplerian':
_ss_ephems = _keplerian_ephems()
else:
raise ValueError('Solar System ephemerides method %s not available'%s)
#Add in Simon 94 Moon and SOFA earth pv if needed
if 'Moon' not in _ss_ephems:
_ss_ephems['Moon'] = Moon
if 'Earth' not in _ss_ephems:
_ss_ephems['Earth'] = Earth
#<--------------Moon location from Simon 94------------------------------------>
def _ecl_to_icrs(x,y,z,jd):
"""
tilt from ecliptic to ICRS orientation
"""
#from math import sin,cos,radians
#obliquity = radians(23.43928)
#seps = sin(obliquity)
#ceps = cos(obliquity)
seps = 0.39777697800876388
ceps = 0.91748213920828747
yp = ceps*y - seps*z
zp = seps*y + ceps*z
return x,yp,zp
class Moon(KeplerianObject):
"""
A :class:`KeplerianObject` for Earth's moon based on the model of Simon et
al. 1994. The osculating elements for the "1992 values" are used to compute
the location. The output coordinates are
:class:`astropysics.coords.coordsys.RectangularGCRSCoordinates`.
"""
def __init__(self):
from math import degrees
from .coordsys import RectangularGCRSCoordinates
from ..obstools import calendar_to_jd
from ..constants import aupercm,asecperrad
auperkm = aupercm*1e5
degperasec = degrees(1./asecperrad)
jd4000b = calendar_to_jd((-3999,1,1))
jd8000 = calendar_to_jd((8000,1,1))
kw = {'name':'Moon','validjdrange':(jd4000b,jd8000),
'outcoords':RectangularGCRSCoordinates, #origin already geocentric
'outtransfunc':_ecl_to_icrs}
kw['a'] = (383397.7725*auperkm,.0040*auperkm) #Simon values are in km
kw['e'] = (.055545526,-1.6e-8)
kw['i'] = (5.15668983,
-.00008*degperasec,
.02966*degperasec,
-.000042*degperasec,
-.00000013*degperasec)
kw['Lp'] = (83.35324312,
14643420.2669*degperasec,
-38.2702*degperasec,
-.045047*degperasec,
.00021301*degperasec)
kw['Lan'] = (125.04455501,
-6967919.3631*degperasec,
6.3602*degperasec,
.007625*degperasec,
.00003586*degperasec)
kw['L'] = (218.31664563,
1732559343.48470*degperasec,
-6.3910*degperasec,
.006588*degperasec,
-.00003169*degperasec)
KeplerianObject.__init__(self,**kw)
def getPhase(self,viewobj=None,illumobj=None):
"""
Computes the phase of the Moon. This is computed as viewed from the
Earth and illuminated by the Sun if `viewobj` and `illumobj` are None -
otherwise, see :meth:`KeplerianObject.getPhase` for the meaning of the
parameters.
:returns: A float where 1 is full and 0 is new.
"""
if viewobj is None and illumobj is None:
from math import sqrt
c = self()
xg,yg,zg = c.x,c.y,c.z #relative to earth
xe,ye,ze = earth_pos_vel(self.jd,False)[0] #heliocentric
xm = xe + xg
ym = ye + yg
zm = ze + zg
r = sqrt(xm*xm+ym*ym+zm*zm)
R = sqrt(xg*xg+yg*yg+zg*zg)
s = sqrt(xe*xe+ye*ye+ze*ze)
return (1+(r*r + R*R - s*s)/(2*r*R))/2
else:
return KeplerianObject.getPhase(self,viewobj,illumobj)
#<--------------Earth location and velocity, based on SOFA epv00--------------->
# SIMPLIFIED SOLUTION from the planetary theory VSOP2000
# <NAME>, <NAME>, 2001,
# Celes. Mechanics & Dyn. Astron., 80, 3/4, 205-213
def _load_earth_series(datafn='earth_series.tab'):
"""
Load series terms from VSOP2000 simplified solution
"""
from ..utils.io import get_package_data
from numpy import array,matrix
from math import sqrt
lines = [l for l in get_package_data(datafn).split('\n') if not l.startswith('#') if not l=='']
lst = None
lsts = {}
for l in lines:
if l.endswith(':'):
#new variable
lsts[l[:-1]] = lst = []
else:
ls = l.split(',')
if ls[-1]=='':
lst.extend(ls[:-1])
else:
lst.extend(ls)
res = {}
#first add all matricies
for k,v in lsts.items():
if k.endswith('mat'):
mat = matrix(v,dtype=float)
res[k] = mat.reshape(sqrt(mat.size),sqrt(mat.size))
#now construct all the x,y,z combination series'
coeffnms = set([k[:-1] for k in lsts.keys() if not k.endswith('mat')])
#bad any coeff sets where x,y, and z don't match so that the missing entries are 0
for cnm in coeffnms:
xs = lsts[cnm+'x']
ys = lsts[cnm+'y']
zs = lsts[cnm+'z']
mx = max(max(len(xs),len(ys)),len(zs))
if len(xs)<mx:
for i in range(mx-len(xs)):
xs.append(0)
if len(ys)<mx:
for i in range(mx-len(ys)):
ys.append(0)
if len(zs)<mx:
for i in range(mx-len(zs)):
zs.append(0)
res[cnm+'coeffs'] = array([xs,ys,zs],dtype=float)
return res
_earth_series_coeffs = _load_earth_series()
def _compute_earth_series(t,coeffs0,coeffs1,coeffs2):
"""
Internal function to computes Earth location/velocity components from series
coefficients.
:param t: T = JD - JD_J2000
:param coeffs0: constant term
:param coeffs1: T^1 term
:param coeffs2: T^2 term
:returns: pos,vel
"""
#T^0 terms
acs = coeffs0[:,0::3]
bcs = coeffs0[:,1::3]
ccs = coeffs0[:,2::3]
ps = bcs + ccs*t
pos = np.sum(acs*np.cos(ps),axis=1)
vel = np.sum(-acs*ccs*np.sin(ps),axis=1)
#T^1 terms
acs = coeffs1[:,0::3]
bcs = coeffs1[:,1::3]
ccs = coeffs1[:,2::3]
cts = ccs*t
ps = bcs + cts
cps = np.cos(ps)
pos +=
|
np.sum(acs*t*cps,axis=1)
|
numpy.sum
|
"""
Module for training and evaluating position-estimating neural net
"""
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
from matplotlib import cm
import plotly.plotly as py
import plotly.tools as tls
import numpy as np
import tensorflow as tf
from tensorflow import keras
import generator
def split_data(data):
"""
Split data in target and distance array.
"""
targets = []
distances = []
for data_set in data:
targets.append(data_set[0])
distances.append(data_set[1])
return np.array(targets, dtype=float), np.array(distances, dtype=float)
def build_model(dimension_count, sensor_count):
"""
Configure and compile tensorFlow model.
"""
model = keras.Sequential([
keras.layers.Dense(16 * int(
|
np.math.sqrt(dimension_count * sensor_count)
|
numpy.math.sqrt
|
import numpy as np
import utilCV
import pylab
import cv2
import CVml
from smooth import ROF_denose
from mpl_toolkits.mplot3d import axes3d
import features
import transformation
import photography
import camera
import segmentation
def test_pca_():
imgs_str = "imgs/data/a_thumbs/"
namesamples = utilCV.get_dir_files(imgs_str, ".jpg")
img_ = cv2.imread(namesamples[0], cv2.IMREAD_UNCHANGED)
h, w = img_.shape
l_imgs = np.array([cv2.imread(img_name, cv2.IMREAD_UNCHANGED) for img_name in namesamples])
Mproj, var, mean = CVml.principal_component_analisys(l_imgs)
import pickle
with open('"imgs/data/a_thumbs/font_pca_modes.pkl', 'wb') as f:
pickle.dump(mean, f)
pickle.dump(Mproj, f)
pylab.figure()
pylab.gray()
pylab.subplot(2, 4, 1)
pylab.imshow(mean.reshape(h, w))
# seven images with larger variance (mode statistics)
for i in range(7):
pylab.subplot(2, 4, i+2)
pylab.imshow(Mproj[i].reshape(h, w))
pylab.show()
return
def test_rof_():
img_str = "imgs/test.jpg"
img = cv2.imread(img_str, cv2.IMREAD_GRAYSCALE)
h, w = img.shape
noise_img = utilCV.add_noise_r(img, sigma=10)
U, T = ROF_denose(noise_img, noise_img, TV_weight=100)
simg_U = "denoise_img"
simg_T = "diff_img"
utilCV.show_compare_r([img, noise_img, U, T], ['original', 'noise', simg_U, simg_T], 2, 2)
return
def test_harris_():
img_str = "imgs/test.jpg"
img = cv2.imread(img_str, cv2.IMREAD_GRAYSCALE)
corns = features.corner_detector(img, sigma=3, min_dist=7, threshold=0.05)
pylab.figure()
pylab.gray()
pylab.imshow(img)
pylab.plot([p[1] for p in corns], [p[0] for p in corns], 'o', markersize=2)
pylab.axis('off')
pylab.show()
return
def test_matches_Harris_():
img_str1 = "imgs/data/crans_1_small.jpg"
img_str2 = "imgs/data/crans_2_small.jpg"
img1 = cv2.imread(img_str1, cv2.IMREAD_GRAYSCALE)
img2 = cv2.imread(img_str2, cv2.IMREAD_GRAYSCALE)
px_width = 5
Harris_coord_img1 = features.corner_detector(img1, sigma=5, min_dist=px_width+2, threshold=0.15)
Harris_coord_img2 = features.corner_detector(img2, sigma=5, min_dist=px_width+2, threshold=0.15)
desc_img1 = features.calc_desciptors(img1, Harris_coord_img1, px_width)
desc_img2 = features.calc_desciptors(img2, Harris_coord_img2, px_width)
# slow operation
best_matches = features.find_best_matches(desc_img1, desc_img2)
pylab.figure()
pylab.gray()
res_img = utilCV.concat_imgs(img1, img2)
pylab.imshow(res_img)
offset_cols_ = img1.shape[1]
for i, j in enumerate(best_matches):
if (j > 0):
pylab.plot([Harris_coord_img1[i][1], Harris_coord_img2[j][1] + offset_cols_],[Harris_coord_img1[i][0], Harris_coord_img2[j][0]], 'c')
pylab.axis('off')
pylab.show()
return
def test_triangle_():
x, y = np.array(np.random.standard_normal((2, 100)))
tri = utilCV.triangulate_points(x, y)
pylab.figure()
for t in tri:
t_ext = [t[0], t[1], t[2], t[0]]
pylab.plot(x[t_ext], y[t_ext], 'r')
pylab.plot(x, y, '*')
pylab.axis('off')
pylab.show()
return
def test_warp_tri_():
img_str1 = "imgs/data/sunset_tree.jpg"
img_str2 = "imgs/data/turningtorso1.jpg"
img1 = cv2.imread(img_str1, cv2.IMREAD_UNCHANGED)
img2 = cv2.imread(img_str2, cv2.IMREAD_UNCHANGED)
x_, y_ = np.meshgrid(range(5), range(6))
x = (img1.shape[1]/4) * x_.flatten()
y = (img1.shape[0]/5) * y_.flatten()
triangles = utilCV.triangulate_points(x, y)
temp_pts = np.loadtxt('imgs/data/turningtorso1_points.txt', np.int32)
src_pts = np.vstack((y, x, np.ones((1, len(x)))))
proj_pts = np.int32(np.vstack((temp_pts[:,1], temp_pts[:,0], np.ones((1, len(temp_pts))))))
#res = transformation.partial_warp_affine(img1, img2, src_pts, proj_pts, triangles)
pylab.figure()
pylab.imshow(img2)
for tri_idx in triangles:
t_ext = [tri_idx[0], tri_idx[1], tri_idx[2], tri_idx[0]]
pylab.plot(proj_pts[1][t_ext], proj_pts[0][t_ext],'g')
pylab.axis('off')
pylab.show()
return
def test_imgreg_():
filename = 'imgs/data/jkfaces.xml'
pts = utilCV.rd_pts_faces_xml(filename, 'face') # read marker points face (eyes and mouth)
_ = photography.rigid_alignment(pts, 'imgs/data/jkfaces/')
return
def test_cameraobj_():
pts = np.loadtxt('imgs/data/house.p3d').T
pts = np.vstack((pts, np.ones(pts.shape[1])))
# camera parameter
P = np.hstack((np.eye(3), np.array([[0], [0], [-10]])))
cam_ = camera.Camera(P)
x = cam_.project(pts)
# rotate camera around random axis
r = 0.05 * np.random.rand(3)
rot = cam_.rotation_matrix(r)
test_it = 20
pylab.figure()
for i in range(test_it):
cam_.P = np.dot(cam_.P, rot)
x = cam_.project(pts)
pylab.plot(x[0], x[1], 'k.')
pylab.show()
K = np.array([[1000, 0, 500], [0, 1000, 300], [0, 0, 1]])
tmp = cam_.rotation_matrix([0, 0, 1])[:3, :3]
Rt = np.hstack((tmp, np.array([[50], [40], [30]])))
cam_2 = camera.Camera(np.dot(K, Rt))
print(K)
print(Rt)
print(cam_2.get_cam_param())
return
def prepare_mview_(num_view=3):
# load 2d points from dataset
points2D = [np.loadtxt('imgs/data/merton/00' + str(i+1) + '.corners').T for i in range(num_view)]
# load 3d points from dataset
points3D = np.loadtxt('imgs/data/merton/p3d').T
# load correspondence
corr_ = np.genfromtxt('imgs/data/merton/nview-corners', dtype=np.int32, missing_values='*')
# load camera matrix
P = [camera.Camera(np.loadtxt('imgs/data/merton/00' + str(i+1) + '.P')) for i in range(num_view)]
return points2D, points3D, corr_, P
def test_3d_():
fig = pylab.figure()
ax = fig.gca(projection='3d')
X,Y,Z = axes3d.get_test_data(0.25)
ax.plot(X.flatten(), Y.flatten(), Z.flatten(), 'o')
pylab.show()
return
def test_epipolar_pts_():
NUM_VIEW = 3
img1 = cv2.imread('imgs/data/merton/001.jpg', cv2.IMREAD_UNCHANGED)
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)
img2 = cv2.imread('imgs/data/merton/002.jpg', cv2.IMREAD_UNCHANGED)
img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2RGB)
points2D, points3D, corr, P = prepare_mview_()
pts = np.vstack((points3D, np.ones(points3D.shape[1])))
p = P[0].project(pts)
pylab.figure()
pylab.imshow(img1)
pylab.plot(points2D[0][0], points2D[0][1], 'o', markersize=3)
pylab.axis('off')
pylab.figure()
pylab.imshow(img1)
pylab.plot(p[0], p[1], 'r.')
pylab.axis('off')
fig = pylab.figure()
ax = fig.gca(projection='3d')
ax.plot(points3D[0], points3D[1], points3D[2], 'k.')
pylab.show()
return
def test_epipole_():
EPIPOLE_NUM = 5
img1 = cv2.imread('imgs/data/merton/001.jpg', cv2.IMREAD_UNCHANGED)
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)
img2 = cv2.imread('imgs/data/merton/002.jpg', cv2.IMREAD_UNCHANGED)
img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2RGB)
points2D, points3D, corr, P = prepare_mview_()
idx = (corr[:, 0] >= 0) & (corr[:, 1] > 0)
pts1 = utilCV.homogeneous_transfrom(points2D[0][:, corr[idx, 0]])
pts2 = utilCV.homogeneous_transfrom(points2D[1][:, corr[idx, 1]])
# fundamental matrix
F = transformation.find_fundamental_matrix(pts1, pts2)
# epipole
e = transformation.find_epipole(F)
# draw epipole
pylab.figure()
pylab.imshow(img1)
h, w, _ = img1.shape
for i in range(EPIPOLE_NUM):
line = np.dot(F, pts2[:, i])
param_line = np.linspace(0, w, 100)
line_val = np.array([(line[2] + line[0]*pl)/(-line[1]) for pl in param_line])
# points of straight line inside a image
idx = (line_val >= 0) & (line_val < h)
pylab.plot(param_line[idx], line_val[idx], linewidth=2)
# show epipole
#pylab.plot(e[0]/e[2], e[1]/e[2], 'r*')
pylab.axis('off')
pylab.figure()
pylab.imshow(img2)
for i in range(EPIPOLE_NUM):
pylab.plot(pts2[0, i], pts2[1, i], 'o')
pylab.axis('off')
pylab.show()
return
def test_stereo_depth_():
img1 = cv2.imread('imgs/data/tsukuba/scene1.row3.col3.ppm', cv2.IMREAD_GRAYSCALE)
img2 = cv2.imread('imgs/data/tsukuba/scene1.row3.col4.ppm', cv2.IMREAD_GRAYSCALE)
steps = 12
start = 3
# width of block norm cross-correlation
width = 9
res_img = photography.plane_sweep_ncc(img1, img2, start, steps, width)
res_img_g = photography.plane_sweep_ncc_gauss(img1, img2, start, steps, width=3)
fig, axes = pylab.subplots(nrows=1, ncols=2)
pylab.gray()
pylab.axis('off')
axes[0].imshow(res_img)
axes[1].imshow(res_img_g)
pylab.show()
return
def test_cluster_():
import scipy.cluster.vq
class1 = 1.5 * np.random.randn(100, 2)
class2 = np.random.randn(100, 2) + np.array([5, 5])
feature = np.vstack((class1, class2))
centr, var = scipy.cluster.vq.kmeans(feature, 2)
code, dist = scipy.cluster.vq.vq(feature, centr)
pylab.figure()
idx = np.where(code == 0)[0]
pylab.plot(feature[idx, 0], feature[idx, 1], '*')
idx = np.where(code == 1)[0]
pylab.plot(feature[idx, 0], feature[idx, 1], 'r.')
pylab.plot(centr[:, 0], centr[:, 1], 'go')
pylab.show()
return
def test_cluster_font_():
K = 4
imgs_str = "imgs/data/a_selected_thumbs/"
namesamples = utilCV.get_dir_files(imgs_str, ".jpg")
img_len = len(namesamples)
'''
import pickle
with open('imgs/data/a_selected_thumbs/a_pca_modes.pkl', 'rb') as f:
imgmean_ = pickle.load(f)
V = pickle.load(f)
'''
ll_imgs = np.array([cv2.imread(img_name, cv2.IMREAD_UNCHANGED) for img_name in namesamples])
V, var, imgmean_ = CVml.principal_component_analisys(ll_imgs)
imgmean = imgmean_.flatten()
# linear images
ll_imgs = np.array([img.flatten() for img in ll_imgs], np.float32)
# project to first 40 component
projected_img = np.array([np.dot(V[:40], ll_imgs[i] - imgmean) for i in range(img_len)])
import scipy.cluster.vq
projected = scipy.cluster.vq.whiten(projected_img) # normalize feature variance to 1
centroid, distortion = scipy.cluster.vq.kmeans(projected, K)
code, distance = scipy.cluster.vq.vq(projected, centroid)
# draw result
for k in range(K):
idx = np.where(code == k)[0]
pylab.figure()
pylab.gray()
for i in range(np.minimum(len(idx), 40)):
pylab.subplot(K, 10, i+1)
pylab.imshow(ll_imgs[idx[i]].reshape((25, 25)))
pylab.axis('off')
pylab.show()
return
def test_cluster_pixel_():
steps = 50 # step block
K = 3 # r g b
img_ = cv2.imread('imgs/data/empire.jpg', cv2.IMREAD_UNCHANGED)
img = cv2.cvtColor(img_, cv2.COLOR_BGR2RGB)
# divide image on area
dx_ = int(img.shape[0] / steps)
dy_ = int(img.shape[1] / steps)
import scipy.cluster.vq
import skimage.transform
# calc for color features for area
features = []
for x in range(steps):
for y in range(steps):
R = np.mean(img[x*dx_:(x+1)*dx_, y*dy_:(y+1)*dy_, 0])
G = np.mean(img[x*dx_:(x+1)*dx_, y*dy_:(y+1)*dy_, 1])
B = np.mean(img[x*dx_:(x+1)*dx_, y*dy_:(y+1)*dy_, 2])
features.append([R,G,B])
features = np.array(features, np.float32)
centroid, distortion = scipy.cluster.vq.kmeans(features, K)
code, distance = scipy.cluster.vq.vq(features, centroid)
codeimg_ = code.reshape(steps, steps)
#codeimg = cv2.resize(codeimg_, img.shape[:2], interpolation=cv2.INTER_NEAREST) #scipy.misc.imresize(codeimg_, img.shape[:2], interp='nearest')
codeimg = skimage.transform.resize(codeimg_, img.shape[:2], order=0)
pylab.figure()
pylab.imshow(codeimg)
pylab.show()
return
def test_hierarchical_cluster_():
import scipy.cluster.vq
class1 = 1.5 * np.random.randn(100, 2)
class2 = np.random.randn(100, 2) + np.array([5, 5])
features = np.vstack((class1, class2))
tree = CVml.hierarchical_cluster(features)
clusters = tree.extract_cluster(5)
print('Number of clusters: ' + str(len(clusters)))
for cl in clusters:
print(cl.get_cluster_elements())
return
def test_spectral_cluster_():
imgs_str = "imgs/data/a_selected_thumbs/"
namesamples = utilCV.get_dir_files(imgs_str, ".jpg")
img_len = len(namesamples)
l_imgs = np.array([cv2.imread(img_name, cv2.IMREAD_UNCHANGED) for img_name in namesamples])
# first k's eigenvectors
k = 5
_, _, code, _ = CVml.spectral_cluster(l_imgs, img_len, k)
for cl in range(k):
idx = np.where(code == cl)[0]
pylab.figure()
for i in range(np.minimum(len(idx), 39)):
img_ = cv2.imread(namesamples[idx[i]], cv2.IMREAD_UNCHANGED)
img = cv2.cvtColor(img_, cv2.COLOR_BGR2RGB)
pylab.subplot(4, 10, i+1)
pylab.imshow(img)
pylab.axis('equal')
pylab.axis('off')
pylab.show()
return
def test_knn_generate_data_():
N = 200
# normal distribution
class_1 = 0.6 * np.random.randn(N, 2)
class_2 = 1.2 * np.random.randn(N, 2) + np.array([5, 1])
labels = np.hstack((np.ones(N), -np.ones(N)))
import pickle
with open('imgs/data/points_normal.pkl', 'wb') as f:
pickle.dump(class_1, f)
pickle.dump(class_2, f)
pickle.dump(labels, f)
# normal distribution and ring around them
class_1 = 0.6 * np.random.randn(N, 2)
r = 0.8 * np.random.randn(N, 1) + 5
angle = 2*np.pi + np.random.randn(N, 1)
class_2 = np.hstack((r * np.cos(angle), r * np.sin(angle)))
labels = np.hstack((np.ones(N), -np.ones(N)))
with open('imgs/data/points_ring.pkl', 'wb') as f:
pickle.dump(class_1, f)
pickle.dump(class_2, f)
pickle.dump(labels, f)
return
def test_knn_():
import pickle
with open('imgs/data/points_normal.pkl', 'rb') as f:
class_1 = pickle.load(f)
class_2 = pickle.load(f)
labels = pickle.load(f)
knnmodel = CVml.KnnClassifier(labels, np.vstack((class_1, class_2)))
with open('imgs/data/points_normal_test.pkl', 'rb') as f:
class_1 = pickle.load(f)
class_2 = pickle.load(f)
labels = pickle.load(f)
utilCV.plot2D_boundary([-6, 6, -6, 6], [class_1, class_2], knnmodel, [1, -1])
pylab.show()
return
def prepare_gesture_data(gesture_dir, type_dir, pathname="imgs/data/hog_data/", template_size=(50, 50)):
size_d = len(gesture_dir)
features_ = []
labels = []
# prepare train data set
for i in range(size_d):
sub_dirs = utilCV.get_dir_files(pathname+gesture_dir[i]+type_dir, ".ppm")
for str_img in sub_dirs:
img = cv2.resize(cv2.cvtColor(cv2.imread(str_img, cv2.IMREAD_UNCHANGED), cv2.COLOR_BGR2RGB), template_size)
desc = features.hog_descs(img)
features_.append(desc.flatten())
labels.append(str_img.split('/')[-1][0])
features_ = np.array(features_)
labels = np.array(labels)
return features_, labels
def test_hog_knn_classify_():
dir_ = "imgs/data/hog_data/"
gesture_dir = ['A/', 'B/', 'C/', 'Five/', 'Point/', 'V/']
template_size = (50, 50)
size_d = len(gesture_dir)
# prepare train data set
type_dir = 'train/'
features_, labels = prepare_gesture_data(gesture_dir, type_dir, dir_, template_size)
# prepare test data set
type_dir = 'test/'
test_features_, test_labels = prepare_gesture_data(gesture_dir, type_dir, dir_, template_size)
# classify
classnames = np.unique(labels)
nbr_classes = len(classnames)
knn_classifier = CVml.KnnClassifier(labels, features_)
K = 1
res = np.array([knn_classifier.classify(test_features_[i], K) for i in range(len(test_labels))])
# accuracity
acc = np.sum(1.0 * (res == test_labels)) / len(test_labels)
print("Accuracity = %f" % acc)
class_ind = dict([(classnames[i], i) for i in range(nbr_classes)])
confuse = np.zeros((nbr_classes, nbr_classes))
for i in range(len(test_labels)):
confuse[class_ind[res[i]], class_ind[test_labels[i]]] += 1
print('Confuse matrix:')
print(classnames)
print(confuse)
return
def test_bayes_():
import pickle
with open('imgs/data/points_normal.pkl', 'rb') as f:
class_1 = pickle.load(f)
class_2 = pickle.load(f)
labels = pickle.load(f)
bsmodel = CVml.BayesClassifier()
bsmodel.train([class_1, class_2], [1, -1])
with open('imgs/data/points_normal_test.pkl', 'rb') as f:
class_1 = pickle.load(f)
class_2 = pickle.load(f)
labels = pickle.load(f)
print(bsmodel.classify(class_1[:10])[0])
utilCV.plot2D_boundary([-6, 6, -6, 6], [class_1, class_2], bsmodel, [1, -1])
pylab.show()
return
def test_bayes_classifier_():
dir_ = "imgs/data/hog_data/"
gesture_dir = ['A/', 'B/', 'C/', 'Five/', 'Point/', 'V/']
template_size = (50, 50)
size_d = len(gesture_dir)
# prepare train data set
type_dir = 'train/'
features_, labels = prepare_gesture_data(gesture_dir, type_dir, dir_, template_size)
type_dir = 'test/'
test_features_, test_labels = prepare_gesture_data(gesture_dir, type_dir, dir_, template_size)
V_, _, mean = CVml.principal_component_analisys(features_)
# DON'T WORK! check covariance matrix (eigenvalue and vec contain 'nan')!!!!
V = V_[:50]
features_ = np.array([np.dot(V, feature - mean) for feature in features_])
test_features = np.array([np.dot(V, feature - mean) for feature in test_features_])
# classify
classnames = np.unique(labels)
nbr_classes = len(classnames)
bsmodel = CVml.BayesClassifier()
bslist = [features_[np.where(labels==cl)[0]] for cl in classnames]
bsmodel.train(bslist, classnames)
res = bsmodel.classify(test_features)[0]
# calc accuracity
acc = np.sum(1.0 * (res == test_labels)) / len(test_labels) # ?
print("Accuracity = %f" % acc)
class_ind = dict([(classnames[i], i) for i in range(nbr_classes)])
confuse = np.zeros((nbr_classes, nbr_classes))
for i in range(len(test_labels)):
confuse[class_ind[res[i]], class_ind[test_labels[i]]] += 1
print('Confuse matrix:')
print(classnames)
print(confuse)
return
def test_svm_():
import pickle
import libsvm.svmutil as svmutil
with open('imgs/data/points_normal.pkl', 'rb') as f:
class_1 = np.array(pickle.load(f))
class_2 = np.array(pickle.load(f))
labels = pickle.load(f)
samples = class_1.tolist() + class_2.tolist()
# make svm classifier
prob = svmutil.svm_problem(labels, samples)
param = svmutil.svm_parameter('-t 2')
# train svm-classifier
m_ = svmutil.svm_train(prob, param)
res = svmutil.svm_predict(labels, samples, m_)
with open('imgs/data/points_normal_test.pkl', 'rb') as f:
class_1 = np.array(pickle.load(f))
class_2 = np.array(pickle.load(f))
labels = pickle.load(f)
class Predict(object):
def __init__(self):
pass
def classify_2d(self, x, y, model=m_):
pack = list(zip(x, y))
return np.array(svmutil.svm_predict([0]*len(x), pack, model)[0])
utilCV.plot2D_boundary([-6, 6, -6, 6], [class_1, class_2], Predict(), [-1, 1])
pylab.show()
return
def test_svm_classifier_():
dir_ = "imgs/data/hog_data/"
gesture_dir = ['A/', 'B/', 'C/', 'Five/', 'Point/', 'V/']
template_size = (50, 50)
size_d = len(gesture_dir)
# prepare train data set
type_dir = 'train/'
features_, labels = prepare_gesture_data(gesture_dir, type_dir, dir_, template_size)
features_ = features_.tolist()
type_dir = 'test/'
test_features_, test_labels = prepare_gesture_data(gesture_dir, type_dir, dir_, template_size)
test_features_ = test_features_.tolist()
import libsvm.svmutil as svmutil
# classify
classnames = np.unique(labels)
nbr_classes = len(classnames)
# function for label transformation
transl = {}
for i, cl in enumerate(classnames):
transl[cl], transl[i] = i, cl
# make svm classifier
def convert_labels(labels, transl):
# Convert between strings and numbers.
return [transl[l] for l in labels]
prob = svmutil.svm_problem(convert_labels(labels, transl), features_)
param = svmutil.svm_parameter('-t 0')
# train svm-classifier
svmmodel = svmutil.svm_train(prob, param)
res = svmutil.svm_predict(convert_labels(labels, transl), features_, svmmodel)
# check test set
res = svmutil.svm_predict(convert_labels(test_labels, transl), test_features_, svmmodel)[0]
res = convert_labels(res, transl)
# calc accuracity
acc = np.sum(1.0 * (res == test_labels)) / len(test_labels)
print("Accuracity = %f" % acc)
class_ind = dict([(classnames[i], i) for i in range(nbr_classes)])
confuse =
|
np.zeros((nbr_classes, nbr_classes))
|
numpy.zeros
|
import os
import os.path as op
try:
import re, json
import numpy as np
from tqdm import tqdm
from pandas import read_csv, DataFrame, isnull
except:
raise ImportError('Unable to import core tools (pandas, re, json, ' +
'tqdm)... must install to continue')
try:
import matplotlib.pyplot as plt
except:
raise ImportError('Unable to import matplotlib, install to continue.')
class MEEGbuddy:
'''
Takes in one or more .fif files for a subject that are combined together
into an MNE raw object and one behavior csv file assumed to be combined.
The bad channels are then auto-marked,
ICA is then auto-marked and then plots are presented for you to ok,
epochs are made, and autoreject is applied.
All data is saved automatically in BIDS-inspired format.
'''
def __init__(self, subject=None, session=None, run=None, task=None,
fdata=None, behavior=None, behavior_description=None,
eeg=False, meg=False, ecog=False, seeg=False,
tbuffer=1, subjects_dir=None, epochs=None, event=None,
fs_subjects_dir=None, bemf=None, srcf=None, transf=None,
preload=True, seed=551832, file=None):
'''
subject : str
the name/id of the subject
session : str
the name/id of the session (optional)
run : str
the name/id of the run (optional)
task : str
the name of the task (optional)
fdata : str|list of str
one or more .fif files with event triggers and EEG data
behavior : str
a csv file with variable names indexing a list of attributes
behavior_description : dict
dictionary desribing the behavior attributes
A 'Trial' column is required
If a 'Baseline' key is present, the epochs will be baselined using
epochs from 'Channel', Time Min' and 'Time Max' which are required as sub-keys
Events should be included under 'Events' with a key for each event
Each event should have a value with a dictionary with
'Channel', Time Min' and 'Time Max'
Responses should be included under 'Responses' the same as 'Stimuli' but
with a 'No Response' sub-sub-key for each type of 'Response' sub-key
Trials to be excluded should be in 'Exclude Trials'
All other columns can be used to condition data such as comparing epochs
'Stimulus Type'
eeg : bool
whether eeg data is present
meg : bool
whether meg data is present
ecog : bool
whether ecog data is present
seeg : bool
whether seeg data is present
t_buffer : int
amount of time to buffer epochs for time frequency analyses
subjects_dir : str
directory where derivative data should be stored
epochs : mne.Epochs object
an epochs object to save in order to start with epochs instead of raw data
event : str
the name of the event for the epochs if starting with epochs
fs_subjects_dir : str
the filepath of the freesurfer reconstruction directory
bemf : str
the filepath of the boundary element model file
srcf : str
the filepath of the source file
transf : str
the filepath of the coordinate transform file (from mne_analyze)
preload : bool or str
Use True to load data into RAM else supply a string to use memory mapping for large files
seed : int
The number to seed random analyses
file : None or str
This is used to load MEEGbuddy from a json sidecar file with the descriptors already given
'''
from mne import set_log_level
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
set_log_level("ERROR")
if file is None:
meta_data = {}
if subjects_dir is None:
subjects_dir = os.getcwd()
print('No subjects_dir supplied defaulting to current working directory')
meta_data['Subjects Directory'] = subjects_dir
name = get_bids_basename(subject, session, run, task, meg, eeg, ecog, seeg)
file = op.join(subjects_dir, 'meta_data', name + '.json')
if not op.isdir(os.path.dirname(file)):
os.makedirs(os.path.dirname(file))
if fdata is None:
raise ValueError('Please supply raw file or list of files to combine')
if not isinstance(fdata, list):
fdata = [fdata]
meta_data['Functional Data'] = fdata
if not any([meg, eeg, ecog, seeg]):
raise ValueError('All modalities are False, at least one must be changed to True')
meta_data['MEG'] = meg
meta_data['EEG'] = eeg
meta_data['ECOG'] = ecog
meta_data['SEEG'] = seeg
if subject is None:
raise ValueError('Subject name is required to differentiate subjects')
meta_data['Subject'] = subject
meta_data['Session'] = session
meta_data['Run'] = run
meta_data['Task'] = task
processes = ['meta_data', 'raw', 'epochs', 'source_estimates', 'plots',
'analyses', 'behavior', 'preprocessing']
meta_data['Process Directories'] = {}
for process in processes:
meta_data['Process Directories'][process] = \
op.join(subjects_dir, process)
try:
df = read_csv(behavior)
except:
raise ValueError('Behavior must be the path to a csv file')
if not 'Trial' in df.columns:
raise ValueError('A trial column is needed in the behavior file. ' +
'MEEGbuddy is for trial-based experiments, ' +
'if this does not describe your analysis you ' +
'may well be better off with another tool')
if not all([des in list(df.columns) for des in behavior_description]):
raise ValueError('All behavior columns are not described in ' +
'behavior description dictionary')
meta_data['Behavior'] = behavior
meta_data['Behavior Description'] = behavior_description
meta_data['Exclude Trials'] = \
(behavior_description['Trial']['Exclude Trials'] if
'Exclude Trials' in behavior_description['Trial'] else [])
if 'Events' in behavior_description['Trial']:
meta_data['Events'] = {
stim: [behavior_description['Trial']['Events'][stim]['Channel'],
behavior_description['Trial']['Events'][stim]['Time Min'],
behavior_description['Trial']['Events'][stim]['Time Max']]
for stim in behavior_description['Trial']['Events']}
else:
meta_data['Events'] = {}
if 'Responses' in behavior_description['Trial']:
meta_data['Responses'] = {
response: [behavior_description['Trial']['Responses'][response]['Channel'],
behavior_description['Trial']['Responses'][response]['Time Min'],
behavior_description['Trial']['Responses'][response]['Time Max']]
for response in behavior_description['Trial']['Responses']}
meta_data['No Response'] = {
resp: behavior_description['Trial']['Responses'][resp]['No Response']
for resp in behavior_description['Trial']['Responses'] if
'No Response' in behavior_description['Trial']['Responses'][resp]
}
else:
meta_data['Responses'], meta_data['No Response'] = {}, {}
meta_data['Baseline'] = \
[behavior_description['Trial']['Baseline']['Channel'],
behavior_description['Trial']['Baseline']['Time Min'],
behavior_description['Trial']['Baseline']['Time Max']]
meta_data['Time Buffer'] = tbuffer
meta_data['Preload'] = preload
meta_data['Seed'] = seed
if fs_subjects_dir is None:
print('Please provide the SUBJECTS_DIR specified to freesurfer ' +
'if you want to do source estimation. These files are not ' +
'copied over to save space and to preserve the original ' +
'file identies for clarity and troubleshooting. Pass ' +
'fs_subjects_dir=False to supress this warning')
else:
if fs_subjects_dir and not os.path.isdir(fs_subjects_dir):
raise ValueError('fs_subjects_dir not a directory')
meta_data['Freesufer SUBJECTS_DIR'] = fs_subjects_dir
if bemf is None:
if fs_subjects_dir:
print('Please provide the file for a boundary element model if ' +
'you want source estimation, this can be done using a ' +
'FLASH or T1 scan using MNE make_flash_bem or ' +
'make_watershed_bem respectively')
meta_data['Boundary Element Model'] = bemf
if srcf is None:
if fs_subjects_dir:
print('Please provide the file for a source space if ' +
'you want source estimation, this can be done using MNE ' +
'setup_source_space')
meta_data['Source Space'] = srcf
if transf is None:
if fs_subjects_dir:
print('Please provide the file for a coordinate transformation if ' +
'you want source estimation, this can be done using MNE ' +
'seting the SUBJECTS_DIR and SUBJECT environmental variables ' +
'running \'mne_analyze\', loading the subjects surface from ' +
'the recon-all files and the digitization data from the raw file ' +
'and then manually adjusting until the coordinate frames match. ' +
'This can then be saved out as a coordinate transform file.')
meta_data['Coordinate Transform'] = transf
with open(file, 'w') as f:
json.dump(meta_data, f)
with open(file, 'r') as f:
meta_data = json.load(f)
self.subject = meta_data['Subject']
self.session = meta_data['Session']
self.run = meta_data['Run']
self.fdata = meta_data['Functional Data']
self.behavior = meta_data['Behavior']
self.behavior_description = meta_data['Behavior Description']
self.baseline = meta_data['Baseline']
self.events = meta_data['Events']
self.responses = meta_data['Responses']
self.eeg = meta_data['EEG']
self.meg = meta_data['MEG']
self.ecog = meta_data['ECOG']
self.seeg = meta_data['SEEG']
self.task = meta_data['Task']
self.no_response = meta_data['No Response']
self.exclude_trials = meta_data['Exclude Trials']
self.tbuffer = meta_data['Time Buffer']
self.process_dirs = meta_data['Process Directories']
self.subjects_dir = meta_data['Subjects Directory']
self.fs_subjects_dir = meta_data['Freesufer SUBJECTS_DIR']
self.bemf = meta_data['Boundary Element Model']
self.srcf = meta_data['Source Space']
self.transf = meta_data['Coordinate Transform']
self.preload = meta_data['Preload']
self.seed = meta_data['Seed']
self.file = file
behf = self._fname('behavior', 'beh', 'csv') # save behavior somewhere original beh won't be overwritten
df = read_csv(self.behavior)
if op.isfile(behf):
df2 = read_csv(behf)
if (len(df) != len(df2) or len(df.columns) != len(df2.columns) or
any(df.columns != df2.columns) or not
all([(isnull(df.loc[i, col]) or
df.loc[i, col] == df2.loc[i, col] or
abs(df.loc[i, col] - df2.loc[i, col]) < 0.001)
for i in df.index for col in df.columns])):
print('Warning overwriting previous behavior file')
df.to_csv(behf, index=False)
if epochs is not None and event is not None:
epochs.save(self._fname('epochs', 'epo', 'fif', event))
def _add_meta_data(self, meta_dict, overwrite=False):
with open(self.file, 'r') as f:
meta_data = json.load(f)
for meta_key in meta_dict:
if meta_key in meta_data and not overwrite:
self._overwrite_error('Meta Data', meta_key)
meta_data[meta_key] = meta_dict[meta_key]
with open(self.file, 'w') as f:
json.dump(meta_data, f)
def _fname(self, process_dir, keyword, ftype, *tags):
# must give process dir, any tags
if process_dir == 'plots':
dirname = op.join(self.process_dirs[process_dir], keyword)
else:
dirname = (op.join(self.process_dirs[process_dir], keyword, self.subject)
if process_dir == 'analyses' else
op.join(self.process_dirs[process_dir], self.subject))
if self.session is not None:
dirname = op.join(dirname, self.session)
if self.run is not None:
dirname = op.join(dirname, self.run)
if not op.isdir(dirname):
os.makedirs(dirname)
fname = self.subject
if self.session:
fname += '_' + str(self.session)
if self.run:
fname += '_' + str(self.run)
if self.task:
fname += '_' + str(self.task)
if self.eeg:
fname += '_eeg'
if self.meg:
fname += '_meg'
if self.ecog:
fname += '_ecog'
if self.seeg:
fname += '_seeg'
for tag in tags:
if tag:
fname += '_' + str(tag).replace(' ', '_')
if not process_dir == 'plots':
fname += '-' + keyword
if ftype:
fname += '.' + str(ftype)
return op.join(dirname, fname)
def save2BIDS(self, bids_dir, overwrite=False):
try:
from mne_bids import write_raw_bids, write_anat
except:
raise ImportError('mne-bids not installed, please install to continue '+
' (pip install mne-bids)')
from pandas import read_csv
from subprocess import call
from mne.io import Raw
from mne import read_trans
if not op.isdir(bids_dir):
os.makedirs(bids_dir)
readmef = op.join(bids_dir, 'README')
if not op.isfile(readmef):
with open(readmef, 'w') as f:
f.write('Welcome to the ____ dataset, published in _____, ' +
'funded by _____, please cite ______\n\n' +
'Edit this file to describe your project')
if isinstance(self.fdata, list):
raw = self._load_raw()
self._save_raw(raw, keyword='bids')
raw = self._load_raw(keyword='bids')
else:
raw = self._load_raw()
raw = Raw(raw.filenames[0], preload=False)
session = '01' if self.session is None else self.session
run = '01' if self.run is None else self.run
bids_basename = ('sub-%s_ses-%s_task-%s_run-%s'
% (self.subject, session, self.task, run))
write_raw_bids(raw, bids_basename, output_path=bids_dir, overwrite=overwrite)
behf = op.join(bids_dir, 'sub-%s' % self.subject, 'ses-%s' % session,
'beh', bids_basename + '_beh.tsv')
if not op.isdir(op.dirname(behf)):
os.makedirs(op.dirname(behf))
df = read_csv(self.behavior)
df.to_csv(behf, sep='\t', index=False, na_rep='n/a')
behavior_descriptionf = op.join(bids_dir, 'task-%s_beh.json' % self.task)
self._check_and_save_shared_metadata(behavior_descriptionf, self.behavior_description)
if self.fs_subjects_dir:
t1f = op.join(self.fs_subjects_dir, self.subject, 'mri', 'T1.mgz')
write_anat(bids_dir, self.subject, t1f, session=session,
raw=raw, trans=read_trans(self.transf), overwrite=overwrite)
def _check_and_save_shared_metadata(self, fname, meta_data):
if op.isfile(fname):
with open(fname, 'r') as f:
meta_data2 = json.load(f)
if not self._check_dict_equal(meta_data, meta_data2):
if not self._check_dict_in_dict(meta_data, meta_data2):
raise ValueError('Mismatching information for %s ' % fname +
'information provided does not match ' +
'previous information already saved.')
else:
with open(fname, 'w') as f:
json.dump(meta_data, f)
def _check_dict_equal(self, dict0, dict1):
return (self._check_dict_in_dict(dict0, dict1) and
self._check_dict_in_dict(dict1, dict0))
def _check_dict_in_dict(self, dict0, dict1):
for k, i in dict0.items():
if k not in dict1:
return False
if isinstance(i, dict):
if not self._check_dict_equal(i, dict1[k]):
return False
return True
def _has_raw(self, keyword=None):
return op.isfile(self._fname('raw', 'raw', 'fif', keyword))
def _load_raw(self, keyword=None):
from mne.io import Raw
if keyword is None:
preload = self.preload if self.preload else op.join(self.subjects_dir,
'workfile')
f = self.fdata[0]
print('Loading Raw file(s)')
print(f)
raw = Raw(f, preload=preload, verbose=False)
raw.info['bads'] = []
for f in self.fdata[1:]:
print(f)
r = Raw(f, preload=preload, verbose=False)
r.info['bads'] = []
raw.append(r)
if self.eeg:
raw = raw.set_eeg_reference(ref_channels=[], projection=False)
raw = raw.pick_types(meg=self.meg, eeg=self.eeg, stim=True,
eog=True, ecg=True, emg=True)
elif op.isfile(self._fname('raw', 'raw', 'fif', keyword)):
raw = Raw(self._fname('raw', 'raw', 'fif', keyword),
verbose=False, preload=True)
self._file_loaded('Raw', keyword=keyword)
else:
self._no_file_error('Raw', keyword=keyword)
return self._exclude_unpicked_types(raw)
def _save_raw(self, raw, keyword=None):
if keyword is None:
raise ValueError('Keyword required for raw data ' +
'We don\'t want to save over the original data')
self._file_saved('Raw', keyword=keyword)
raw.save(self._fname('raw', 'raw', 'fif', keyword),
verbose=False, overwrite=True)
def _has_ICA(self, event=None, keyword=None):
return all([op.isfile(self._fname('preprocessing', 'ica', 'fif',
data_type, keyword, event))
for data_type in self._get_data_types()])
def _load_ICA(self, event=None, keyword=None, data_type=None):
from mne.preprocessing import read_ica
fname = self._fname('preprocessing', 'ica', 'fif',
data_type, keyword, event)
if op.isfile(fname):
ica = read_ica(fname)
self._file_loaded('ICA', event=event, data_type=data_type,
keyword=keyword)
return ica
else:
self._no_file_error('ICA', event=event, data_type=data_type,
keyword=keyword)
def _save_ICA(self, ica, event=None, keyword=None, data_type=None):
self._file_saved('ICA', event=event, data_type=data_type,
keyword=keyword)
ica.save(self._fname('preprocessing', 'ica', 'fif', data_type,
keyword, event))
def _has_epochs(self, event, keyword=None):
return op.isfile(self._fname('epochs', 'epo', 'fif', event, keyword))
def _load_epochs(self, event, keyword=None):
from mne import read_epochs
if not self._has_epochs(event, keyword=keyword):
self._no_file_error('Epochs', event=event, keyword=keyword)
epochs = read_epochs(self._fname('epochs', 'epo', 'fif', event, keyword),
verbose=False, preload=True)
self._file_loaded('Epochs', event=event, keyword=keyword)
epochs._data = epochs._data.astype('float64') # mne bug work-around
return self._exclude_unpicked_types(epochs)
def _save_epochs(self, epochs, event, keyword=None):
if keyword is None:
raise ValueError('Keyword required for epochs data ' +
'We don\'t want to save over the original data')
self._file_saved('Epochs', event=event, keyword=keyword)
epochs.save(self._fname('epochs', 'epo', 'fif', event, keyword))
def _has_evoked(self, event, data_type=None, keyword=None):
return op.isfile(self._fname('preprocessing', 'ave', 'fif',
event, data_type, keyword))
def _load_evoked(self, event, data_type=None, keyword=None):
from mne import read_evokeds
if not self._has_evoked(event, data_type=data_type, keyword=keyword):
self._no_file_error('Evoked', event=event, data_type=data_type,
keyword=keyword)
evoked = read_evokeds(self._fname('preprocessing', 'ave', 'fif', event,
data_type, keyword),
verbose=False)
self._file_loaded('Evoked', event=event, data_type=data_type,
keyword=keyword)
return evoked[0]
def _save_evoked(self, evoked, event, data_type=None, keyword=None):
self._file_saved('Evoked', event=event, data_type=data_type,
keyword=keyword)
evoked.save(self._fname('preprocessing', 'ave', 'fif', event,
data_type, keyword))
def _has_autoreject(self, event, keyword=None):
return op.isfile(self._fname('preprocessing', 'ar', 'npz',
event, keyword))
def _load_autoreject(self, event, keyword=None):
if self._has_autoreject(event, keyword=keyword):
f = np.load(self._fname('preprocessing', 'ar', 'npz',
event, keyword))
return f['ar'].item(), f['reject_log'].item()
else:
print('Autoreject must be run for ' + event)
def _save_autoreject(self, ar, reject_log, event, keyword=None):
np.savez_compressed(self._fname('preprocessing', 'ar', 'npz',
event, keyword),
ar=ar, reject_log=reject_log)
def _has_TFR(self, event, condition, value, power_type,
data_type=None, keyword=None):
fname = self._fname('analyses', 'tfr', 'npy',
event, condition, value, power_type,
data_type, keyword)
fname1b = self._fname('analyses', 'tfr_params', 'npz',
event, condition, value, power_type,
data_type, keyword)
fname2 = self._fname('analyses', 'tfr', 'npz',
event, condition, value, power_type,
data_type, keyword)
return ((op.isfile(fname) and op.isfile(fname1b)) or
op.isfile(fname2))
def _load_TFR(self, event, condition, value, power_type,
data_type=None, keyword=None):
fname = self._fname('analyses', 'tfr', 'npy',
event, condition, value, power_type,
data_type, keyword)
fname1b = self._fname('analyses', 'tfr_params', 'npz',
event, condition, value, power_type,
data_type, keyword)
fname2 = self._fname('analyses', 'tfr', 'npz',
event, condition, value, power_type,
data_type, keyword)
if op.isfile(fname) and op.isfile(fname1b):
tfr = np.load(fname)
f = np.load(fname1b)
frequencies, n_cycles = f['frequencies'], f['n_cycles']
elif op.isfile(fname2):
f = np.load(fname2)
tfr, frequencies, n_cycles = f['tfr'], f['frequencies'], f['n_cycles']
else:
self._no_file_error('TFR', power_type, event=event,
condition=condition, value=value,
data_type=data_type, keyword=keyword)
self._file_loaded('TFR', power_type, event=event,
condition=condition, value=value,
data_type=data_type, keyword=keyword)
return tfr, frequencies, n_cycles
def _save_TFR(self, tfr, frequencies, n_cycles,
event, condition, value, power_type,
data_type=None, keyword=None, compressed=True):
self._file_saved('TFR', power_type, event=event,
condition=condition, value=value,
data_type=data_type, keyword=keyword)
fname1 = self._fname('analyses', 'tfr', 'npy', event, condition,
value, power_type, data_type, keyword)
fname1b = self._fname('analyses', 'tfr_params', 'npz', event,
condition, value, power_type, data_type,
keyword)
fname2 = self._fname('analyses', 'tfr', 'npz', event,
condition, value, power_type, data_type,
keyword)
if compressed:
np.savez_compressed(fname2, tfr=tfr, frequencies=frequencies,
n_cycles=n_cycles)
if op.isfile(fname1) and op.isfile(fname1b):
os.remove(fname1)
os.remove(fname1b)
else:
np.save(fname1, tfr)
np.savez_compressed(fname1b, frequencies=frequencies,
n_cycles=n_cycles)
if op.isfile(fname2):
os.remove(fname2)
def _has_CPT(self, event, condition, value, band=None,
data_type=None, keyword=None):
return op.isfile(self._fname('analyses', 'CPT', 'npz',
event, condition, value,
band, data_type, keyword))
def _load_CPT(self, event, condition, value, band=None,
data_type=None, keyword=None):
if self._has_CPT(event, condition, value, band=band,
data_type=data_type, keyword=keyword):
f = np.load(self._fname('analyses', 'CPT', 'npz',
event, condition, value, band,
data_type, keyword))
self._file_loaded('Cluster Permutation Test', event=event,
condition=condition, value=value,
data_type=data_type, keyword=keyword)
return (f['clusters'], f['cluster_p_values'], f['times'],
f['ch_dict'].item(), f['frequencies'], f['band'])
else:
self._no_file_error('Cluster Permutation Test', band,
event=event, condition=condition, value=value,
data_type=data_type, keyword=keyword)
def _save_CPT(self, clusters, cluster_p_values, times, ch_dict, frequencies,
band, event, condition, value, data_type=None, keyword=None):
self._file_saved('Cluster Permutation Test', band,
event=event, condition=condition, value=value,
data_type=data_type, keyword=keyword)
fname = self._fname('analyses', 'CPT', 'npz', event, condition, value,
band, data_type, keyword)
np.savez_compressed(fname, clusters=clusters,
ch_dict=ch_dict, times=times,
frequencies=(frequencies if
frequencies is not None else []),
cluster_p_values=cluster_p_values,
band=band if band is not None else [])
def _has_inverse(self, event, condition, value, keyword=None):
fname = self._fname('source_estimates', 'inv', 'fif', keyword,
event, condition, value)
fname2 = self._fname('source_estimates', 'inverse_params', 'npz',
keyword, event, condition, value)
return op.isfile(fname) and op.isfile(fname2)
def _load_inverse(self, event, condition, value, keyword=None):
from mne.minimum_norm import read_inverse_operator
if self._has_inverse(event, condition, value, keyword=keyword):
fname = self._fname('source_estimates', 'inv', 'fif', keyword,
event, condition, value)
fname2 = self._fname('source_estimates', 'inverse_params', 'npz',
keyword, event, condition, value)
f = np.load(fname2)
return (read_inverse_operator(fname), f['lambda2'].item(),
f['method'].item(), f['pick_ori'].item())
else:
self._no_file_error('Inverse', event=event, condition=condition,
value=value, keyword=keyword)
def _save_inverse(self, inv, lambda2, method, pick_ori,
event, condition, value, keyword=None):
from mne.minimum_norm import write_inverse_operator
self._file_saved('Inverse', event=event, condition=condition,
value=value, keyword=keyword)
write_inverse_operator(self._fname('source_estimates', 'inv', 'fif',
keyword, event, condition, value),
inv, verbose=False)
np.savez_compressed(self._fname('source_estimates', 'inverse_params', 'npz',
keyword, event, condition, value),
lambda2=lambda2, method=method, pick_ori=pick_ori)
def _has_source(self, event, condition, value, keyword=None):
fname = self._fname('source_estimates', 'source-lh', 'stc', keyword,
event, condition, value)
return op.isfile(fname)
def _load_source(self, event, condition, value, keyword=None):
from mne import read_source_estimate
fname = self._fname('source_estimates', 'source-lh', 'stc', keyword,
event, condition, value)
if self._has_source(event, condition, value, keyword=keyword):
self._file_loaded('Source Estimate', event=event, condition=condition,
value=value, keyword=keyword)
return read_source_estimate(fname)
else:
print('Source not found for %s %s %s' % (event, condition, value))
def _save_source(self, stc, event, condition, value, keyword=None):
self._file_saved('Source Estimate', event=event, condition=condition,
value=value, keyword=keyword)
stc.save(self._fname('source_estimates', 'source', None, keyword,
event, condition, value),
ftype='stc')
def _has_PSD(self, keyword):
return op.isfile(self._fname('analyses', 'psd', 'npz', keyword))
def _load_PSD(self, keyword):
if self._has_PSD(keyword):
fname = self._fname('analyses', 'psd', 'npz', keyword)
self._file_loaded('Power Spectral Density', keyword=keyword)
return np.load(fname)['image']
else:
return None
def _save_PSD(self, image, keyword):
self._file_saved('Power Spectral Density', keyword=keyword)
np.savez_compressed(self._fname('analyses', 'psd', 'npz', keyword),
image=image)
def _file_message(self, action, ftype,*tags, event=None,
condition=None, value=None,
data_type=None, keyword=None):
return ('%s for %s' % (action, ftype) +
(' %s' % event)*(event is not None) +
(' %s' % condition)*(condition is not None) +
(' %s' % value)*(value is not None) +
(' %s' % data_type)*(data_type is not None) +
(' %s' % keyword)*(keyword is not None) +
(' %s' % ' '.join([str(tag) for tag in tags if
tag and tag is not None])))
def _file_loaded(self, ftype,*tags, event=None, condition=None,
value=None, data_type=None, keyword=None):
print(self._file_message('File loaded', ftype, event=event,
condition=condition,
value=value, data_type=data_type,
keyword=keyword,*tags))
def _file_saved(self, ftype,*tags, event=None, condition=None, value=None,
data_type=None, keyword=None):
print(self._file_message('File saved', ftype, event=event,
condition=condition,
value=value, data_type=data_type,
keyword=keyword,*tags))
def _no_file_error(self, ftype,*tags, event=None, condition=None,
value=None, data_type=None, keyword=None):
raise ValueError(self._file_message('No file found',
ftype, event=event,
condition=condition,
value=value, data_type=data_type,
keyword=keyword,*tags))
def remove(self, event=None, keyword=None):
dir_name = 'raw' if event is None else 'epochs'
suffix = 'epo' if event else 'raw'
fname = self._fname(dir_name, suffix, 'fif', event, keyword)
if op.isfile(fname):
print('Deleting %s' % fname)
os.remove(fname)
def _overwrite_error(self, ftype,*tags, event=None, condition=None,
values=None, keyword=None):
error_msg = ('%s already exists for %s' % (ftype, event) +
(' %s' % ' '.join([str(tag) for tag in tags])) +
(' %s' % keyword)*(keyword is not None) +
(' %s' % condition)*(condition is not None) +
(' %s' % (' '.join([str(v) for v in values]))
if values is not None else '') +
', use \'overwrite=True\' to overwrite')
raise ValueError(error_msg)
def _get_data_types(self):
return (['grad', 'mag']*self.meg + ['eeg']*self.eeg +
['ecog']*self.ecog + ['seeg']*self.seeg)
def preprocess(self, event=None):
# preprocessing
self.autoMarkBads()
self.findICA()
for event in self.events:
self.makeEpochs(event)
self.markAutoReject(event)
def getEvents(self, baseline=True):
return (list(self.events.keys()) + list(self.responses.keys()) +
['Baseline']*(self.baseline is not None and baseline))
def _default_aux(self, inst, eogs=None, ecgs=None):
from mne import pick_types
if eogs is None:
inds = pick_types(inst.info, meg=False, eog=True)
eogs = [inst.ch_names[ind] for ind in inds]
print('Using ' + (' '.join(eogs) if eogs else 'no channels') + ' as eogs')
if ecgs is None:
inds = pick_types(inst.info, meg=False, ecg=True)
ecgs = [inst.ch_names[ind] for ind in inds]
print('Using ' + (' '.join(ecgs) if ecgs else 'no channels') + ' as ecgs')
return eogs, ecgs
def _combine_insts(self, insts):
from mne import EpochsArray
from mne.io import RawArray, BaseRaw
if len(insts) < 1:
raise ValueError('Nothing to combine')
inst_data = insts[0]._data
inst_info = insts[0].info
for inst in insts[1:]:
inst_data = np.concatenate([inst_data, inst._data], axis=-2)
inst_info['ch_names'] += inst.info['ch_names']
inst_info['chs'] += inst.info['chs']
inst_info['nchan'] += inst.info['nchan']
inst_info['bads'] += inst.info['bads']
if isinstance(insts[0], BaseRaw):
return RawArray(inst_data, inst_info).set_annotations(insts[0].annotations)
else:
return EpochsArray(inst_data, inst_info, events=insts[0].events, tmin=insts[0].tmin)
def findICA(self, event=None, keyword_in=None, keyword_out=None,
eogs=None, ecgs=None, n_components=None, l_freq=None, h_freq=40,
detrend=1, component_optimization_n=3, tmin=None, tmax=None,
vis_tmin=None, vis_tmax=None, seed=11, overwrite=False,
overwrite_ica=False):
# keyword_out functionality was added so that ICA can be computed on
# one raw data and applied to another
# note: filter only filters evoked
from mne.io import BaseRaw
from mne.preprocessing import ICA
keyword_out = keyword_in if keyword_out is None else keyword_out
data_types = self._get_data_types()
if self._has_ICA(event=event, keyword=keyword_out) and not overwrite_ica:
raise ValueError('Use \'overwrite_ica=True\' to overwrite')
if event is None:
if self._has_raw(keyword=keyword_out) and not overwrite:
self._overwrite_error('Raw', keyword=keyword_out)
inst = self._load_raw(keyword=keyword_in)
else:
if self._has_epochs(event, keyword=keyword_out) and not overwrite:
self._overwrite_error('Epochs', event=event, keyword=keyword_out)
inst = self._load_epochs(event, keyword=keyword_in)
tmin, tmax = self._default_t(event, tmin, tmax)
inst = inst.crop(tmin=tmin, tmax=tmax)
eogs, ecgs = self._default_aux(inst, eogs, ecgs)
if not all([ch in inst.ch_names for ch in eogs + ecgs]):
raise ValueError('Auxillary channels not in channel list.')
ica_insts = []
for dt in data_types:
print(dt)
inst2 = self._pick_types(inst, dt)
ica = ICA(method='fastica', n_components=n_components,
random_state=seed)
ica.fit(inst2)
fig = ica.plot_components(picks=np.arange(ica.get_components().shape[1]),
show=False)
fig.savefig(self._fname('plots', 'components', 'jpg', dt, keyword_out))
plt.close(fig)
#
if isinstance(inst, BaseRaw):
raw = self._pick_types(inst, dt, aux=True)
all_scores = self._make_ICA_components(raw, ica, eogs, ecgs, detrend,
l_freq, h_freq, dt, keyword_out,
vis_tmin, vis_tmax)
'''if component_optimization_n:
ica = self._optimize_components(raw, ica, all_scores,
component_optimization_n,
keyword_in, kw)'''
inst2 = ica.apply(inst2, exclude=ica.exclude)
self._save_ICA(ica, event=event, data_type=dt, keyword=keyword_out)
ica_insts.append(inst2)
try:
ica_insts.append(inst.copy().pick_types(meg=False, eeg=False, eog=True,
ecg=True, stim=True, exclude=[]))
except Exception as e:
print('No eog, ecg or stim channels', e)
inst = self._combine_insts(ica_insts)
if isinstance(inst, BaseRaw):
self._save_raw(inst, keyword=keyword_out)
else:
self._save_epochs(inst, event, keyword=keyword_out)
def _optimize_components(self, raw, ica, all_scores,
component_optimization_n, keyword, data_type):
# get component_optimization_n of components
components = []
for ch in all_scores:
current_scores = list(all_scores[ch])
for n in range(component_optimization_n):
component = current_scores.index(max(current_scores))
components.append(component)
current_scores.pop(component)
# get ways to combine the components
def int2bin(n, i):
b = []
for j in range(n-1,-1,-1):
if i/(2**j):
i -= 2**j
b.append(1)
else:
b.append(0)
return list(reversed(b))
combinations = [int2bin(len(components), i) for i in range(2**len(components))]
min_score = None
evokeds = {}
for ch in all_scores:
evokeds[ch] = self._load_evoked('ica_%s' % ch, data_type=data_type,
keyword=keyword)
print('Testing ICA component combinations for minimum correlation to artifact epochs')
for combo in tqdm(combinations):
score = 0
ica.exclude = [component for i, component in enumerate(components) if combo[i]]
for ch in all_scores:
evoked = ica.apply(evokeds[ch].copy(), exclude=ica.exclude)
sfreq = int(evoked.info['sfreq'])
evoked_data = evoked.data[:, sfreq/10:-sfreq/10]
for i in range(evoked_data.shape[0]):
evoked_data[i] -= np.median(evoked_data[i])
score += abs(evoked_data).sum()*evoked_data.std(axis=0).sum()
if min_score is None or score < min_score:
best_combo = combo
min_score = score
ica.exclude = [component for i, component in enumerate(components) if best_combo[i]]
return ica
def _make_ICA_components(self, raw, ica, eogs, ecgs, detrend, l_freq, h_freq,
data_type, keyword, vis_tmin, vis_tmax):
from mne.preprocessing import create_eog_epochs, create_ecg_epochs
if vis_tmin is not None:
raw = raw.copy().crop(tmin=vis_tmin)
if vis_tmax is not None:
raw = raw.copy().crop(tmax=vis_tmax)
all_scores = {}
for ch in eogs:
try:
epochs = create_eog_epochs(raw, ch_name=ch, h_freq=8)
indices, scores = ica.find_bads_eog(epochs, ch_name=ch)
all_scores[ch] = scores
except:
print('EOG %s dead' % ch)
continue
if l_freq is not None or h_freq is not None:
epochs = epochs.filter(l_freq=l_freq, h_freq=h_freq)
evoked = epochs.average()
if detrend is not None:
evoked = evoked.detrend(detrend)
self._save_evoked(evoked, 'ica_%s' % ch,
data_type=data_type, keyword=keyword)
self._exclude_ICA_components(ica, ch, indices, scores)
for ecg in ecgs:
try:
epochs = create_ecg_epochs(raw, ch_name=ecg)
indices, scores = ica.find_bads_ecg(epochs)
all_scores[ecg] = scores
except:
print('ECG %s dead' % ecg)
continue
if l_freq is not None or h_freq is not None:
epochs = epochs.filter(l_freq=l_freq, h_freq=h_freq)
evoked = epochs.average()
if detrend is not None:
evoked = evoked.detrend(detrend)
self._save_evoked(evoked, 'ica_%s' % ecg,
data_type=data_type, keyword=keyword)
self._exclude_ICA_components(ica, ecg, indices, scores)
return all_scores
def _exclude_ICA_components(self, ica, ch, indices, scores):
for ind in indices:
if ind not in ica.exclude:
ica.exclude.append(ind)
print('Components removed for %s: ' % ch +
' '.join([str(i) for i in indices]))
fig = ica.plot_scores(scores, exclude=indices, show=False)
fig.savefig(self._fname('plots', 'source_scores', 'jpg', ch))
plt.close(fig)
def plotICA(self, event=None, keyword_in=None, keyword_out=None,
eogs=None, ecgs=None, tmin=None, tmax=None,
ylim=dict(eeg=[-40, 40], grad=[-400, 400], mag=[-1000, 1000]),
plot_properties=False, show=True):
from mne.io import BaseRaw
keyword_out = keyword_in if keyword_out is None else keyword_out
if event is None:
inst = self._load_raw(keyword=keyword_in)
else:
inst = self._load_epochs(event, keyword=keyword_in)
tmin, tmax = self._default_t(event, tmin, tmax)
inst = inst.crop(tmin=tmin, tmax=tmax)
eogs, ecgs = self._default_aux(inst, eogs, ecgs)
data_types = self._get_data_types()
ica_insts = []
for dt in data_types:
inst1b = self._pick_types(inst, dt)
inst2 = self._pick_types(inst, dt)
ica = self._load_ICA(event=event, data_type=dt,
keyword=keyword_out)
if isinstance(inst, BaseRaw):
for ch in eogs:
try:
evoked = self._load_evoked('ica_%s' % ch,
data_type=dt,
keyword=keyword_out)
self._plot_ICA_sources(ica, evoked, ch, show)
except:
print('%s dead/not working' % ch)
for ecg in ecgs:
try:
evoked = self._load_evoked('ica_%s' % ecg,
data_type=dt,
keyword=keyword_out)
self._plot_ICA_sources(ica, evoked, ecg, show)
except:
print('%s dead/not working' % ecg)
fig = ica.plot_components(picks=np.arange(ica.get_components().shape[1]),
show=False)
fig.show()
ica.plot_sources(inst2, block=show, show=show, title=self.subject)
inst2 = ica.apply(inst2, exclude=ica.exclude)
if isinstance(inst, BaseRaw):
for ch in eogs:
try:
evoked = self._load_evoked('ica_%s' % ch,
data_type=dt,
keyword=keyword_out)
self._plot_ICA_overlay(ica, evoked, ch, show)
except:
print('%s dead/not working' % ch)
for ecg in ecgs:
try:
evoked = self._load_evoked('ica_%s' % ecg,
data_type=dt,
keyword=keyword_out)
self._plot_ICA_overlay(ica, evoked, ecg, show)
except:
print('%s dead/not working' % ecg)
else:
fig = inst1b.average().plot(show=False, ylim=ylim,
window_title='Before ICA')
self._show_fig(fig, show)
fig2 = inst2.average().plot(show=False, ylim=ylim,
window_title='After ICA')
self._show_fig(fig2, show)
if plot_properties:
self._plot_ICA_properties(inst1b, ica, ica.exclude, show)
plt.show()
ica_insts.append(inst2)
self._save_ICA(ica, data_type=dt, keyword=keyword_out)
try:
ica_insts.append(inst.copy().pick_types(meg=False, eeg=False, eog=True,
ecg=True, stim=True, exclude=[]))
except Exception as e:
print('No eog, ecg or stim channels', e)
inst = self._combine_insts(ica_insts)
if isinstance(inst, BaseRaw):
self._save_raw(inst, keyword=keyword_out)
else:
self._save_epochs(inst, event, keyword=keyword_out)
def _plot_ICA_overlay(self, ica, evoked, ch, show):
evoked = evoked.detrend(1)
fig = ica.plot_overlay(evoked, show=False)
fig.suptitle('%s %s' % (self.subject, ch))
fig.savefig(self._fname('plots', 'ica_overlay', 'jpg', ch))
self._show_fig(fig, show)
def _plot_ICA_sources(self, ica, evoked, ch, show):
fig = ica.plot_sources(evoked, exclude=ica.exclude, show=False)
fig.suptitle('%s %s' % (self.subject, ch))
fig.savefig(self._fname('plots', 'ica_time_course', 'jpg', ch))
self._show_fig(fig, show)
def _plot_ICA_properties(self, inst, ica, picks, show):
figs = ica.plot_properties(inst, picks=picks, show=False)
for i, fig in enumerate(figs):
fig.suptitle('%s' % self.subject)
fig.savefig(self._fname('plots',
'ica_propterties',
'jpg', picks[i]))
self._show_fig(fig, show)
def autoMarkBads(self, keyword_in=None, keyword_out=None,
flat=dict(grad=1e-11, # T / m (gradiometers)
mag=5e-13, # T (magnetometers)
eeg=2e-5, # V (EEG channels)
),
reject=dict(grad=5e-10, # T / m (gradiometers)
mag=1e-11, # T (magnetometers)
eeg=5e-4, # V (EEG channels)
),
bad_seeds=0.25, seeds=1000, datalen=1000,
overwrite=False):
# now we will use seeding to remove bad channels
keyword_out = keyword_out if not keyword_out is None else keyword_in
if (os.path.isfile(self._fname('raw', 'raw', 'fif', keyword_out)) and
not overwrite):
print('Raw data already marked for bads, use \'overwrite=True\'' +
' to recalculate.')
return
raw = self._load_raw(keyword=keyword_in)
raw.info['bads'] = []
data_types = self._get_data_types()
rawlen = len(raw._data[0])
for dt in data_types:
print(dt)
raw2 = self._pick_types(raw, dt)
for i in range(len(raw2.ch_names)):
flat_count, reject_count = 0, 0
for j in range(seeds):
start = np.random.randint(0, rawlen-datalen)
seed = raw2._data[i, start:start+datalen]
min_c = seed.min()
max_c = seed.max()
diff_c = max_c - min_c
if diff_c < flat[dt]:
flat_count += 1
if diff_c > reject[dt]:
reject_count += 1
if flat_count > (seeds * bad_seeds):
raw.info['bads'].append(raw2.ch_names[i])
print(raw2.ch_names[i] + ' removed: flat')
elif reject_count > (seeds * bad_seeds):
raw.info['bads'].append(raw2.ch_names[i])
print(raw2.ch_names[i] + ' removed: reject')
self._save_raw(raw, keyword=keyword_out)
def closePlots(self):
plt.close('all')
def plotRaw(self, n_per_screen=20, scalings=None, keyword_in=None,
keyword_out=None, l_freq=0.5, h_freq=40, overwrite=False):
from mne import pick_types
keyword_out = keyword_in if keyword_out is None else keyword_out
if (os.path.isfile(self._fname('raw', 'raw', 'fif', keyword_out))
and not overwrite):
print('Use \'overwrite = True\' to overwrite')
return
raw = self._load_raw(keyword=keyword_in)
bads_ind = [raw.info['ch_names'].index(ch) for ch in raw.info['bads']]
this_chs_ind = list(pick_types(raw.info, meg=self.meg, eeg=self.eeg)) + bads_ind
aux_chs_ind = list(pick_types(raw.info, meg=False, eog=True, ecg=True))
order = []
n = n_per_screen-len(aux_chs_ind)
for i in range(len(this_chs_ind)//n+1):
order.append(this_chs_ind[i*n:min([len(this_chs_ind),(i+1)*n])])
order.append(aux_chs_ind)
order = np.array(np.concatenate(order), dtype=int)
if self.eeg:
raw.set_eeg_reference(ref_channels=[], projection=False)
elif self.meg:
order = None
if l_freq is not None or h_freq is not None:
raw2 = raw.copy().filter(l_freq=l_freq, h_freq=h_freq)
else:
raw2 = raw.copy()
raw2.plot(show=True, block=True, color=dict(eog='steelblue'),
title="%s Bad Channel Selection" % self.subject, order=order,
scalings=scalings)
raw.info['bads'] = raw2.info['bads']
self._save_raw(raw, keyword=keyword_out)
def interpolateBads(self, event=None, keyword_in=None, keyword_out=None):
keyword_out = keyword_in if keyword_out is None else keyword_out
if event is None:
raw = self._load_raw(keyword=keyword_in)
raw = raw.interpolate_bads(reset_bads=True)
self._save_raw(raw, keyword=keyword_out)
else:
epo = self._load_epochs(event, keyword=keyword_in)
epo = epo.interpolate_bads(reset_bads=True)
self._save_epochs(event, keyword=keyword_out)
def downsample(self, event=None, keyword_in=None, keyword_out=None,
new_sfreq=200, npad='auto', window='boxcar', n_jobs=1,
overwrite=False):
keyword_out = keyword_in if keyword_out is None else keyword_out
if event is None:
if self._has_raw(keyword=keyword_out) and not overwrite:
self._overwrite_error('Raw', keyword=keyword_out)
raw = self._load_raw(keyword=keyword_in)
raw = raw.resample(new_sfreq, npad=npad, window=window, n_jobs=n_jobs)
self._save_raw(raw, keyword=keyword_out)
else:
if self._has_epochs(event, keyword=keyword_out) and not overwrite:
self._overwrite_error('Epochs', event=event, keyword=keyword_out)
epochs = self._load_epochs(event, keyword=keyword_in)
epochs = epochs.resample(new_sfreq, npad=npad, window=window,
n_jobs=n_jobs)
self._save_epochs(epochs, event, keyword=keyword_out)
def makeEpochs(self, keyword_in=None, keyword_out=None, detrend=0,
normalized=True, overwrite=False):
keyword_out = keyword_in if keyword_out is None else keyword_out
if (all([self._has_epochs(event, keyword_out) for event in self.getEvents()])
and not overwrite):
self._overwrite_error('Epochs', keyword=keyword_out)
raw = self._load_raw(keyword=keyword_in)
n_events = None
if self.baseline:
ch, tmin, tmax = self.baseline
n_events = self._make_epochs(raw, 'Baseline', ch, tmin, tmax, detrend,
keyword_out, n_events=n_events)
if normalized:
bl_epochs = self._load_epochs('Baseline', keyword=keyword_out)
baseline_data = bl_epochs.crop(tmin=tmin, tmax=tmax).get_data()
baseline_arr = baseline_data.mean(axis=2)
for event in self.events:
ch, tmin, tmax = self.events[event]
n_events = self._make_epochs(raw, event, ch, tmin, tmax, detrend,
keyword_out, n_events=n_events)
if normalized:
self._normalize_epochs(event, keyword_out, baseline_arr, n_events)
for event in self.responses:
ch, tmin, tmax = self.responses[event]
self._make_epochs(raw, event, ch, tmin, tmax, detrend, keyword_out,
n_events=n_events, response=True)
if normalized:
self._normalize_epochs(event, keyword_out, baseline_arr,
n_events, bl_events=bl_epochs.events[:, 2])
def _normalize_epochs(self, event, keyword, baseline_arr, n_events,
bl_events=None):
from mne import EpochsArray
print('Normalizing epochs based on baseline')
epochs = self._load_epochs(event, keyword=keyword)
epochs_data = epochs.get_data()
if bl_events is not None:
include = [i for i, e in enumerate(bl_events) if
e not in self.no_response[event]]
baseline_arr = baseline_arr[include]
epochs_demeaned_data = np.array([arr - baseline_arr.T
for arr in epochs_data.T]).T
epochs = EpochsArray(epochs_demeaned_data, epochs.info,
events=epochs.events, verbose=False,
proj=False, tmin=epochs.tmin)
self._save_epochs(epochs, event, keyword=keyword)
def _find_events(self, raw, ch): #backward compatible
from mne import find_events, events_from_annotations
if isinstance(ch, tuple):
ch, event_id = ch
else:
event_id = None
try:
events = find_events(raw, stim_channel=ch, output="onset", verbose=False)
if event_id is not None:
events = events[np.where(events[:, 2]==event_id)[0]]
except ValueError as e:
events, event_id2 = events_from_annotations(raw)
if event_id is None:
events = events[np.where(events[:, 2]==event_id2[ch])[0]]
else:
events = events[np.where(events[:, 2]==event_id)[0]]
return events
def _make_epochs(self, raw, event, ch, tmin, tmax, detrend, keyword_out, n_events=None,
response=False):
from mne import Epochs
try:
events = self._find_events(raw, ch)
except:
raise ValueError('%s channel not found in raw' % event +
', maybe you meant to use normalized=False' * (event == 'Baseline'))
n_events2 = len(events)
print('%s events found: %i' % (event, n_events2))
if response:
response_events = np.setdiff1d(np.arange(n_events), self.no_response[event])
exclude = [i for i, e in enumerate(response_events) if e in self.exclude_trials]
if n_events is not None and n_events2 + len(self.no_response[event]) != n_events:
raise ValueError('%i events compared to ' % n_events +
'%i responses + %i excluded responses ' % (n_events2, diff) +
'doesn\'t add up')
events[:, 2] = response_events
else:
exclude = self.exclude_trials
if n_events is not None and n_events2 != n_events:
raise ValueError('%i events from previous stimuli, ' % n_events +
'%i events from %s' % (n_events2, event))
events[:, 2] = np.arange(n_events2)
events = np.delete(events, exclude, axis=0)
epochs = Epochs(raw, events, tmin=tmin-self.tbuffer,
tmax=tmax + self.tbuffer, baseline=None, verbose=False,
detrend=detrend, preload=True)
if self.eeg:
epochs = epochs.set_eeg_reference(ref_channels='average',
projection=False)
self._save_epochs(epochs, event, keyword=keyword_out)
return n_events2
def plotEpochs(self, event, n_epochs=20, n_channels=20, scalings=None,
tmin=None, tmax=None, l_freq=None, h_freq=None,
keyword_in=None, keyword_out=None, overwrite=False):
# note: if linear trend, apply l_freq filter
keyword_out = keyword_in if keyword_out is None else keyword_out
if self._has_epochs(event, keyword=keyword_out) and not overwrite:
self._overwrite_error('Epochs', event=event, keyword=keyword_out)
epochs = self._load_epochs(event, keyword=keyword_in)
tmin, tmax = self._default_t(event, tmin, tmax)
epochs_copy = epochs.copy().crop(tmin=tmin, tmax=tmax)
if l_freq is not None or h_freq is not None:
epochs_copy = epochs_copy.filter(l_freq=l_freq, h_freq=h_freq)
epochs_copy.plot(n_epochs=n_epochs, n_channels=n_channels, block=True,
scalings=scalings)
epochs.info['bads'] = epochs_copy.info['bads']
drop_indices = []
i = 0
for e1, e2 in zip(epochs.drop_log, epochs_copy.drop_log):
if e1 == []:
if e2 != []:
drop_indices.append(i)
i += 1
epochs = epochs.drop(drop_indices)
self._save_epochs(epochs, event, keyword=keyword_out)
def plotTopo(self, event, condition=None, values=None,
epochs=None, keyword=None, ylim={'eeg':[-30, 30]},
l_freq=None, h_freq=None, tmin=None, tmax=None, detrend=1,
seed=11, downsample=True, show=True):
epochs = self._prepare_epochs(event, epochs, keyword, tmin, tmax, l_freq, h_freq)
if condition is None:
values = ['all']
value_indices = {'all':[]}
else:
values = self._default_values(condition, values=values)
value_indices = self._get_indices(epochs, condition, values)
if downsample:
np.random.seed(seed)
nTR = min([len(value_indices[value]) for value in value_indices])
fig, axs = plt.subplots((2*self.meg+self.eeg), len(values),
figsize=(5*len(values), 5*(2*self.meg+self.eeg)))
if self.eeg and not self.meg:
if not type(axs) is np.ndarray:
axs = np.array([axs])
axs = axs[np.newaxis,:]
fig.suptitle('%s %s %s' % (self.subject, event, condition))
if not isinstance(axs, np.ndarray):
axs = np.array([axs])
for i, value in enumerate(values):
if not value in value_indices:
continue
indices = value_indices[value]
if condition is not None and downsample:
print('Subsampling %i/%i %s %s.' % (nTR, len(indices), condition,
value))
np.random.shuffle(indices)
indices = indices[:nTR]
if value == 'all':
evoked = epochs.average()
else:
evoked = epochs[indices].average()
if detrend:
evoked = evoked.detrend(order=detrend)
if self.meg:
ax = axs[0, i]
if i == 0:
ax.set_ylabel('mag')
evoked.copy().pick_types(meg='mag').plot_topo(axes=ax,
show=False, ylim=ylim)
ax2 = axs[1, i]
if i == 0:
ax2.set_ylabel('grad')
evoked.copy().pick_types(meg='grad').plot_topo(axes=ax2,
show=False, ylim=ylim)
if self.eeg:
ax = axs[2*self.meg, i]
if i == 0:
ax.set_ylabel('eeg')
evoked.copy().pick_types(eeg=True).plot_topo(axes=ax,
show=False,
ylim=ylim)
ax = axs[0, i]
ax.set_title(value)
fname = self._fname('plots', 'evoked', 'jpg', keyword, event, condition,
*value_indices.keys())
fig.savefig(fname)
self._show_fig(fig, show)
def plotTopomapBands(self, event, condition, values=None, keyword=None,
tfr_keyword=None, power_type='npl', contrast=False,
tmin=None, tmax=None, tfr=True,
bands={'theta':(4, 8), 'alpha':(8, 15),
'beta':(15, 35), 'low-gamma':(35, 80),
'high-gamma':(80, 150)},
vmin=None, vmax=None, contours=6, time_points=5, show=True):
for band in bands:
self.plotTopomap(event, condition, values=values, keyword=keyword,
contrast=contrast, tmin=tmin, tmax=tmax, tfr=True,
tfr_keyword=tfr_keyword, power_type=power_type,
band_struct=(band, bands[band][0], bands[band][1]),
vmin=vmin, vmax=vmax, contours=contours,
time_points=time_points, show=show)
def plotTopomap(self, event, condition, values=None, keyword=None,
tfr_keyword=None, power_type='npl', contrast=False,
tmin=None, tmax=None, tfr=False, band_struct=None,
vmin=None, vmax=None, contours=6, time_points=5,
show=True):
from mne.time_frequency import AverageTFR
from mne import EvokedArray
epochs = self._load_epochs(event, keyword=keyword)
values = self._default_values(condition, values, contrast)
value_indices = self._get_indices(epochs, condition, values)
tmin, tmax = self._default_t(event, tmin, tmax)
epochs = epochs.crop(tmin=tmin, tmax=tmax)
times = epochs.times
info = epochs.info
band_title = '%s ' % (band_struct[0] if band_struct is not None else '')
if tfr:
values_dict, frequencies = \
self._get_tfr_data(event, condition, values, tfr_keyword,
power_type, value_indices,
band=band_struct, mean_and_std=False,
band_mean=False)
else:
values_dict = self._get_data(epochs, values, value_indices,
mean_and_std=False)
frequencies = None
if contrast:
fig, axes = plt.subplots(1, time_points+(not tfr))
fig.suptitle(band_title + '%s %s Contrast' % (values[0], values[1]))
epochs_0 = values_dict[values[0]]
epochs_1 = values_dict[values[1]]
if tfr:
nave = min([epochs_0.shape[0], epochs_1.shape[0]])
epochs_0 = np.swapaxes(epochs_0, 2, 3)
epochs_1 = np.swapaxes(epochs_1, 2, 3)
tfr_con_data = epochs_1.mean(axis=0) - epochs_0.mean(axis=0)
evo_con = AverageTFR(info, tfr_con_data, times, frequencies, nave)
dt = (tmax-tmin)/time_points
for i, t in enumerate(np.linspace(tmin, tmax, time_points)):
evo_con.plot_topomap(colorbar=True if i == time_points-1 else False,
vmin=vmin, vmax=vmax, contours=contours, axes=axes[i],
title='time=%0.1f' % t, tmin=t-dt/2, tmax=t+dt/2, show=False)
else:
evo_con_data = evo_1.mean(axis=0) - evo_0.mean(axis=0)
evo_con = EvokedArray(evo_con_data, info, tmin=tmin)
evo_con.plot_topomap(colorbar=True, vmin=vmin, vmax=vmax,
contours=contours, axes=axes, show=False)
fig.savefig(self._fname('plots', 'topo', 'jpg', event, condition,
values[0], values[1],
'' if band_struct is None else band_struct[0]))
self._show_fig(fig, show)
else:
for i, value in enumerate(values):
fig, axes = plt.subplots(1, time_points+(not tfr))
fig.suptitle(band_title + '%s %s' % (condition, value))
epochs_data = values_dict[value]
if tfr:
nave = epochs_data.shape[0]
evo_data = np.swapaxes(epochs_data, 2, 3).mean(axis=0)
evo = AverageTFR(info, evo_data, times, frequencies, nave)
dt = (tmax-tmin)/time_points
for i, t in enumerate(np.linspace(tmin, tmax, time_points)):
evo.plot_topomap(colorbar=True if i == time_points-1 else False,
vmin=vmin, vmax=vmax, contours=contours, axes=axes[i],
title='time=%0.1f' % t, tmin=t-dt/2, tmax=t+dt/2, show=False)
else:
evo = EvokedArray(epochs_data.mean(axis=0), info, tmin=tmin)
evo.plot_topomap(colorbar=True, vmin=vmin, vmax=vmax,
contours=contours, axes=axes, show=False)
fig.savefig(self._fname('plots', 'topo', 'jpg', event, condition,
value, '' if band_struct is None else band_struct[0]))
self._show_fig(fig, show)
def dropEpochsByBehaviorIndices(self, bad_indices, event, keyword_in=None,
keyword_out=None):
keyword_out = keyword_in if keyword_out is None else keyword_out
df = read_csv(self.behavior)
epochs = self._load_epochs(event, keyword=keyword_in)
good_indices = [i for i in range(len(df)) if i not in bad_indices]
epochs_indices = self._behavior_to_epochs_indices(epochs, good_indices)
self._save_epochs(epochs[epochs_indices], event, keyword=keyword_out)
def markBadChannels(self, bad_channels, event=None, keyword_in=None,
keyword_out=None):
keyword_out = keyword_in if keyword_out is None else keyword_out
if event is None:
raw = self._load_raw(keyword=keyword_in)
raw.info['bads'] += bad_channels
self._save_raw(raw, keyword=keyword_out)
else:
if event is 'all':
for event in self.events:
self.markBadChannels(bad_channels, event=event,
keyword_in=keyword_in,
keyword_out=keyword_out)
epochs = self._load_epochs(event, keyword=keyword_in)
epochs.info['bads'] += bad_channels
self._save_epochs(epochs, event, keyword=keyword_out)
def alignBaselineEpochs(self, event, keyword=None):
epochs = self._load_epochs(event, keyword=keyword)
bl_epochs = self._load_epochs('Baseline', keyword=keyword)
exclude = [i for i in range(len(bl_epochs)) if
i not in epochs.selection]
bl_epochs.drop(exclude)
self._save_epochs(bl_epochs, 'Baseline', keyword=keyword)
def plotEvoked(self, event, condition=None, values=None,
epochs=None, keyword=None, image=True,
ylim={'eeg':[-10, 20]}, l_freq=None, h_freq=None,
tmin=None, tmax=None, detrend=1, seed=11, downsample=True,
picks=None, show=True):
from mne import pick_types
epochs = self._prepare_epochs(event, epochs, keyword, tmin, tmax,
l_freq, h_freq)
if condition is None:
values = ['all']
value_indices = {'all':[]}
nTR = len(epochs)
else:
values = self._default_values(condition, values=values)
value_indices = self._get_indices(epochs, condition, values)
if downsample:
np.random.seed(seed)
nTR = min([len(value_indices[value]) for value in value_indices])
if picks is not None:
picks = pick_types(epochs.info, meg=self.meg, eeg=self.eeg,
eog=False, include=picks)
x_dim = (1 + image) * (2 * self.meg + self.eeg)
y_dim = len(values)
fig, axs = plt.subplots(x_dim, y_dim, figsize=(5 * y_dim, 5 * x_dim))
fig.subplots_adjust(hspace=0.3, wspace=0.3)
if not image and self.eeg and not self.meg:
axs = axs[np.newaxis,:]
if not isinstance(axs, np.ndarray):
axs = np.array([axs])
for i, value in enumerate(values):
if not value in value_indices:
continue
if value == 'all':
evoked = epochs.average()
indices = range(len(epochs))
else:
indices = value_indices[value]
if condition is not None and downsample:
print('Subsampling %i/%i %s %s.' % (nTR, len(indices), condition,
value))
np.random.shuffle(indices)
indices = indices[:nTR]
evoked = epochs[indices].average()
if detrend:
evoked = evoked.detrend(order=detrend)
if y_dim > 1:
axs2 = axs[:, i]
else:
axs2 = axs
axs3 = ([axs2[0], axs2[1], axs2[2]] if self.meg and self.eeg else
[axs2[0]] if self.eeg else [axs2[0], axs2[1]])
evoked.plot(axes=axs3, show=False, ylim=ylim, picks=picks)
axs2[0].set_title('%s %s %s %s' % (self.subject, event, condition, value) +
(', %i trials used'% len(indices)))
if image:
axs3 = ([axs2[3], axs2[4], axs2[5]] if self.meg and self.eeg else
[axs2[1]] if self.eeg else [axs2[2], axs2[3]])
evoked.plot_image(axes=axs3, show=False, clim=ylim, picks=picks)
fname = self._fname('plots', 'evoked', 'jpg', keyword, event, condition,
*value_indices.keys())
fig.savefig(fname)
self._show_fig(fig, show)
def _show_fig(self, fig, show):
if show:
fig.show()
else:
plt.close(fig)
def _prepare_epochs(self, event, epochs, keyword, tmin, tmax,
l_freq, h_freq):
tmin, tmax = self._default_t(event, tmin, tmax)
if epochs is None:
epochs = self._load_epochs(event, keyword=keyword)
else:
epochs = epochs.copy()
if l_freq is not None or h_freq is not None:
epochs = epochs.filter(l_freq=l_freq, h_freq=h_freq)
epochs = epochs.crop(tmin=tmin, tmax=tmax)
return epochs
def _default_t(self, event, tmin=None, tmax=None, buffered=False):
if event in self.responses:
_, tmin2, tmax2 = self.response[event]
elif event in self.events:
_, tmin2, tmax2 = self.events[event]
elif event == 'Baseline':
_, tmin2, tmax2 = self.baseline
else:
raise ValueError('Event %s not recognized' % event)
if tmin is None:
tmin = tmin2
if tmax is None:
tmax = tmax2
if buffered:
tmin -= self.tbuffer
tmax += self.tbuffer
return tmin, tmax
def _default_vs(self, epochs_mean, epochs_std, vmin, vmax):
if vmin is None:
vmin = (epochs_mean-epochs_std).min()*1.1
if vmax is None:
vmax = (epochs_mean+epochs_std).max()*1.1
return vmin, vmax
def _behavior_to_epochs_indices(self, epochs, indices):
return [self._behavior_to_epochs_index(epochs, i) for i in indices if
self._behavior_to_epochs_index(epochs, i)]
def _behavior_to_epochs_index(self, epochs, ind):
if ind in epochs.events[:, 2]:
return list(epochs.events[:, 2]).index(ind)
def _get_binned_indices(self, epochs, condition, bins):
df = read_csv(self.behavior)
bin_indices = {}
h, edges = np.histogram([cd for cd in df[condition] if not
np.isnan(cd)], bins=bins)
for j in range(1, len(edges)):
indices = [i for i in range(len(df)) if
df[condition][i] >= edges[j-1] and
df[condition][i] <= edges[j]]
name = '%.2f-%.2f, count %i' % (edges[j-1], edges[j], len(indices))
bin_indices[name] = self._behavior_to_epochs_indices(epochs, indices)
return bin_indices
def _get_indices(self, epochs, condition, values):
from pandas import read_csv
df = read_csv(self.behavior)
n = len(df)
value_indices = {}
if len(values) > 4 and all([isinstance(val, int) or isinstance(val, float)
for val in values]):
binsize = float(value[1] - value[0])
for value in values:
if len(values) > 4 and all([isinstance(val, int) or isinstance(val, float)
for val in values]):
indices = [i for i in range(n) if
df[condition][i] >= value - binsize/2 and
value + binsize/2 >= df[condition][i]]
else:
indices = [i for i in range(n) if df[condition][i] == value]
epochs_indices = self._behavior_to_epochs_indices(epochs, indices)
if epochs_indices:
value_indices[value] = epochs_indices
value_indices['all'] = [i for value in value_indices for i in value_indices[value]]
return value_indices
def channelPlot(self, event, condition, values=None, keyword=None,
butterfly=False, contrast=False, aux=False,
tmin=None, tmax=None, vmin=None, vmax=None, show=True):
self._plotter_main(event, condition, values, butterfly=butterfly,
contrast=contrast, aux=aux, keyword=keyword,
tmin=tmin, tmax=tmax, vmin=vmin, vmax=vmax, show=show)
def plotTFR(self, event, condition, values=None, keyword=None,
tfr_keyword=None, power_type='npl', contrast=False,
butterfly=False, aux=False, tmin=None, tmax=None,
vmin=None, vmax=None, bands={'theta': (4, 8),
'alpha': (8, 15), 'beta': (15, 35), 'low-gamma': (35, 80),
'high-gamma': (80, 150)}):
# computes the time frequency representation of a particular event and
# condition or all events and conditions
# default values are frequency from 3 to 35 Hz with 32 steps and
# cycles from 3 to 10 s-1 with 32 steps
tfr_keyword = keyword if tfr_keyword is None else tfr_keyword
if bands:
for band in bands:
print(band + ' band')
fmin, fmax = bands[band]
band_struct = (band, fmin, fmax)
self._plotter_main(event, condition, values, contrast=contrast,
aux=aux, keyword=keyword, butterfly=butterfly,
tfr=True, band=band_struct,
tfr_keyword=tfr_keyword, power_type=power_type,
tmin=tmin, tmax=tmax, vmin=vmin, vmax=vmax)
else:
values = self._default_values(condition, values, contrast)
for value in values:
self._plotter_main(event, condition,[value], contrast=contrast,
aux=aux, keyword=keyword, butterfly=butterfly,
tfr=True, band=None, tfr_keyword=tfr_keyword,
power_type=power_type, tmin=tmin, tmax=tmax,
vmin=vmin, vmax=vmax)
def _setup_plot(self, ch_dict, butterfly=False, contrast=False, values=None):
if butterfly:
nplots = 1 if contrast else len(values)
fig, ax_arr = plt.subplots(1, nplots)
if len(values) == 1:
ax_arr = [ax_arr]
else:
dim1 = int(np.ceil(np.sqrt(len(ch_dict))))
dim2 = int(np.ceil(float(len(ch_dict))/dim1))
fig, ax_arr = plt.subplots(dim1, dim2, sharex=True, sharey=True)
fig.set_tight_layout(False)
fig.subplots_adjust(left=0.05, right=0.9, top=0.9, bottom=0.1,
wspace=0.01, hspace=0.2)
ax_arr = ax_arr.flatten()
for i, ax in enumerate(ax_arr):
ax.set_facecolor('white')
ax.set_frame_on(False)
if i % dim1:
ax.set_yticks([])
if i < len(ax_arr)-dim2:
ax.set_xticks([])
return fig, ax_arr
def _get_ch_dict(self, inst, aux=False):
from mne import pick_types
if aux:
chs = pick_types(inst.info, meg=False, eog=True, ecg=True)
else:
chs = pick_types(inst.info, meg=self.meg, eeg=self.eeg)
return {ch:inst.ch_names[ch] for ch in chs}
def _default_values(self, condition, values=None, contrast=False):
if values is None:
df = read_csv(self.behavior)
values = np.unique([cd for cd in df[condition] if
(type(cd) is str or type(cd) is np.string_ or
type(cd) is np.str_ or not np.isnan(cd))])
if (len(values) > 5 and
all([isinstance(val, int) or isinstance(val, float) for val in values])):
values, edges = np.histogram(values, bins=5)
if type(contrast) is list and len(contrast) == 2:
values = contrast
elif contrast:
values = [max(values), min(values)]
return values
def _get_tfr_data(self, event, condition, values, keyword, power_type,
value_indices, band=None, mean_and_std=True,
band_mean=True, data_type=None):
values_dict = {}
frequencies_old = None
for value in values:
epochs_data, frequencies,_ = self._load_TFR(event, condition, value,
power_type,
data_type=data_type,
keyword=keyword)
epochs_data = np.swapaxes(epochs_data, 2, 3)
if frequencies_old is not None and frequencies != frequencies_old:
raise ValueError('TFRs must be compared for the same ' +
'frequencies')
if band is not None:
band_name, fmin, fmax = band
band_indices = [index for index in range(len(frequencies)) if
frequencies[index] >= fmin and
frequencies[index] <= fmax]
epochs_std = \
np.sqrt(epochs_data[:,:,:, band_indices].mean(axis=3)**2 +
epochs_data[:,:,:, band_indices].std(axis=3)**2)
epochs_data = epochs_data[:,:,:, band_indices]
if band_mean:
epochs_data = epochs_data.mean(axis=3)
if mean_and_std:
if band is None:
epochs_std = np.sqrt(epochs_data.mean(axis=0)**2+
epochs_data.std(axis=0)**2)
else:
epochs_std = np.sqrt(epochs_std.mean(axis=0)**2+
epochs_std.std(axis=0)**2)
epochs_mean = epochs_data.mean(axis=0)
values_dict[value] = (epochs_mean, epochs_std)
else:
values_dict[value] = epochs_data
if band is not None:
frequencies = [f for f in frequencies if f > band[1] and f < band[2]]
return values_dict, frequencies
def _get_data(self, epochs, values, value_indices, mean_and_std=True):
epochs_data = epochs.get_data()
if mean_and_std:
epochs_std = epochs_data.std(axis=0)
epochs_mean = epochs_data.mean(axis=0)
values_dict = {'all':(epochs_mean, epochs_std)}
else:
values_dict = {'all':epochs_data}
for value in values:
indices = value_indices[value]
if mean_and_std:
epochs_std = epochs_data[indices].std(axis=0)
epochs_mean = epochs_data[indices].mean(axis=0)
values_dict[value] = (epochs_mean, epochs_std)
else:
values_dict[value] = epochs_data[indices]
return values_dict
def getEventTimes(self, event):
'''do this on the events from the raw since we don't want to have any
unassigned events for dropped epochs in case dropped epochs need
a designation for whatever reason'''
raw = self._load_raw()
stim_ch = self._get_stim_ch(event)
events = self._find_events(raw, stim_ch)
return raw.times[events[:, 0]]
def _get_stim_ch(self, event):
if event == 'Baseline':
stim_ch,_,_ = self.baseline
elif event in self.events:
stim_ch,_,_ = self.events[event]
elif event in self.responses:
stim_ch,_,_ = self.responses[event]
else:
raise ValueError('Event %s not recognized' % event)
return stim_ch
def _add_last_square_legend(self, fig,*labels):
ax = fig.add_axes([0.92, 0.1, 0.05, 0.8])
ax.axis('off')
for label in labels:
ax.plot(0, 0, label=label)
ax.legend(loc='center')
def _exclude_unpicked_types(self, inst):
return inst.pick_types(meg=self.meg, eeg=self.eeg, eog=True,
ecg=True, emg=True, stim=True, ref_meg=True,
misc=True, resp=True, chpi=True,
exci=True, ias=True, syst=True, seeg=self.seeg,
dipole=True, gof=True, bio=True, ecog=self.ecog,
fnirs=True, exclude=[])
def _pick_types(self, inst, dt, aux=False):
return inst.copy().pick_types(meg=dt if dt in ['grad', 'mag'] else False,
eeg=True if dt == 'eeg' else False,
ecog=True if dt == 'ecog' else False,
seeg=True if dt == 'seeg' else False,
eog=aux, ecg=aux)
def _plotter_main(self, event, condition, values, keyword=None,
aux=False, butterfly=False, contrast=False,
tfr=False, band=None, tfr_keyword=None,
power_type=None, tmin=None, tmax=None,
vmin=None, vmax=None, cpt=False, cpt_p=None,
cpt_keyword=None, show=True):
heatmap = tfr and band is None
epochs = self._load_epochs(event, keyword=keyword)
values = self._default_values(condition, values, contrast)
value_indices = self._get_indices(epochs, condition, values)
tmin, tmax = self._default_t(event, tmin=tmin, tmax=tmax)
epochs = epochs.crop(tmin=tmin, tmax=tmax)
times = epochs.times
for dt in self._get_data_types():
epo = self._pick_types(epochs, dt)
ch_dict = self._get_ch_dict(epo, aux=aux)
fig, axs = self._setup_plot(ch_dict, butterfly=butterfly, values=values)
if tfr:
values_dict, frequencies = \
self._get_tfr_data(event, condition, values, tfr_keyword, power_type,
value_indices, band=band, data_type=dt)
else:
values_dict = self._get_data(epo, values, value_indices)
frequencies = None
if contrast:
epochs_mean0, epochs_std0 = values_dict[values[0]]
epochs_mean1, epochs_std1 = values_dict[values[1]]
epochs_std = np.sqrt(epochs_std0**2 + epochs_std1**2)
epochs_mean = epochs_mean1-epochs_mean0
if cpt:
clusters, cluster_p_values, times, ch_dict, frequencies, band = \
self._load_CPT(event, condition,
'%s-%s' % (values[0], values[1]),
data_type=dt, keyword=cpt_keyword)
else:
clusters, cluster_p_values = None, None
self._plot_decider(epochs_mean, epochs_std, times, axs, fig, butterfly,
contrast, values, ch_dict, tfr, band, frequencies,
vmin, vmax, clusters, cluster_p_values, cpt_p)
else:
for i, value in enumerate(values):
epochs_mean, epochs_std = values_dict[value]
if cpt:
clusters, cluster_p_values, times, ch_dict, frequencies, band = \
self._load_CPT(event, condition, value,
data_type=dt,
keyword=cpt_keyword)
else:
clusters, cluster_p_values = None, None
if butterfly:
axs[i].set_title(value)
self._plot_decider(epochs_mean, epochs_std, times, axs[i], fig,
butterfly, contrast, values, ch_dict, tfr,
band, frequencies, vmin, vmax, clusters,
cluster_p_values, cpt_p)
else:
self._plot_decider(epochs_mean, epochs_std, times, axs, fig,
butterfly, contrast, values, ch_dict, tfr,
band, frequencies, vmin, vmax, clusters,
cluster_p_values, cpt_p)
if not (heatmap or butterfly):
if contrast:
self._add_last_square_legend(fig, '%s-%s' % (values[0], values[1]))
else:
self._add_last_square_legend(fig,*values)
self._prepare_fig(fig, event, condition, values, aux=aux, butterfly=butterfly,
contrast=contrast, cpt=cpt, tfr=tfr, band=band,
power_type=power_type, keyword=keyword,
data_type=dt, show=show)
def _plot_decider(self, epochs_mean, epochs_std, times, axs, fig, butterfly,
contrast, values, ch_dict, tfr, band, frequencies, vmin, vmax,
clusters, cluster_p_values, cpt_p):
if epochs_mean.shape[-1] != times.shape[0] and clusters:
raise ValueError('Time mismatch, likely you used different ' +
'times for the cluster permutation test')
vmin, vmax = self._default_vs(epochs_mean[list(ch_dict.keys())],
epochs_std[list(ch_dict.keys())], vmin, vmax)
if tfr:
if band is not None:
self._plot_band(epochs_mean, epochs_std, times, axs, ch_dict,
butterfly, vmin, vmax, clusters,
cluster_p_values, cpt_p)
else:
self._plot_heatmap(epochs_mean, epochs_std, times, axs, fig,
butterfly, ch_dict, frequencies, vmin, vmax,
clusters, cluster_p_values, cpt_p)
else:
self._plot_voltage(epochs_mean, epochs_std, times, axs, butterfly,
ch_dict, vmin, vmax, clusters,
cluster_p_values, cpt_p)
def _plot_voltage(self, epochs_mean, epochs_std, times, axs, butterfly, ch_dict,
vmin, vmax, clusters, cluster_p_values, cpt_p):
for i, ch in enumerate(ch_dict):
if butterfly:
ax = axs
else:
ax = axs[i]
ax.set_title(ch_dict[ch], fontsize=6, pad=0)
ax.axvline(0, color='k')
ax.set_ylim(vmin, vmax)
v = epochs_mean[ch]-epochs_mean[ch].mean()
lines = ax.plot(times, v)
if not butterfly:
ax.fill_between(times, v-epochs_std[ch], v+epochs_std[ch],
color=lines[0].get_color(), alpha=0.5)
if clusters is not None and not butterfly:
for c, p in zip(clusters, cluster_p_values):
if p <= cpt_p:
for i, ch in enumerate(ch_dict):
sig_times = times[np.where(c[i])[0]]
if sig_times.size > 0:
h = axs[i].axvspan(sig_times[0],
sig_times[-1],
color='r', alpha=0.3)
def _plot_heatmap(self, epochs_mean, epochs_std, times, axs, fig, butterfly,
ch_dict, frequencies, vmin, vmax, clusters,
cluster_p_values, cpt_p):
from matplotlib.colors import SymLogNorm #, LogNorm
from matplotlib.cm import cool
if clusters is not None:
current_data = np.zeros(epochs_mean.shape)
for i,(freq_cs, freq_ps) in enumerate(zip(clusters, cluster_p_values)):
for cluster, p in zip(freq_cs, freq_ps):
if p <= cpt_p:
current_data[:,:, i] += cluster*vmax
else:
current_data[:,:, i] += cluster*vmax*0.5
norm = SymLogNorm(vmax/10, vmin=vmin, vmax=vmax)
tmin, tmax = times.min(), times.max()
fmin, fmax = frequencies.min(), frequencies.max()
extent=[tmin, tmax, fmin, fmax]
aspect=1.0/(fmax-fmin)
for i, ch in enumerate(ch_dict):
if butterfly:
ax = axs
else:
ax = axs[i]
ax.set_title(ch_dict[ch], fontsize=6, pad=0)
ax.invert_yaxis()
if clusters is None:
current_data = cool(norm(epochs_mean[ch].T))
im = ax.imshow(current_data[::-1], aspect=aspect, extent=extent,
cmap=cool, norm=norm)
else:
image = np.ones((current_data.shape[0],
current_data.shape[1], 3))*vmax
image[:,:, 0] = current_data[:,:, i].T
image[:,:, 1:2] = 0
ax.imshow(image[::-1], aspect=aspect, norm=norm, extent=extent)
xbuffer = (tmax-tmin)/5
ax.set_xticks(np.round(np.linspace(tmin+xbuffer, tmax-xbuffer, 2), 2))
frequency_labels = np.round(frequencies[::10], 2)
ybuffer = (fmax-fmin)/5
ax.set_yticks(np.round(np.linspace(fmin+ybuffer, fmax-ybuffer, 3), 0))
cbar_ax = fig.add_axes([0.92, 0.1, 0.03, 0.8])
fig.colorbar(im, cax=cbar_ax)
def _plot_band(self, epochs_mean, epochs_std, times, axs, ch_dict, butterfly,
vmin, vmax, clusters, cluster_p_values, cpt_p):
for i, ch in enumerate(ch_dict):
if butterfly:
ax = axs
else:
ax = axs[i]
ax.set_title(ch_dict[ch], fontsize=6, pad=0)
lines = ax.plot(times, epochs_mean[ch])
if not butterfly:
ax.fill_between(times, epochs_mean[ch]-epochs_std[ch],
epochs_mean[ch]+epochs_std[ch],
color=lines[0].get_color(), alpha=0.5)
ax.axvline(0, color='k')
ax.set_ylim(vmin, vmax)
if clusters is not None and not butterfly:
for c, p in zip(clusters, cluster_p_values):
if p <= cpt_p:
for i, ch in enumerate(ch_dict):
sig_times = times[np.where(c[i])[0]]
if sig_times.size > 0:
h = axs[i].axvspan(sig_times[0],
sig_times[-1],
color='r', alpha=0.3)
def _prepare_fig(self, fig, event, condition, values, aux=False,
butterfly=False, contrast=False, tfr=False, band=None,
power_type=None, cpt=False, keyword=None,
data_type=None, show=True):
if tfr:
if band:
ylabel = 'Relative Abundance'
else:
ylabel = 'Frequency (Hz)'
else:
ylabel = r"$\mu$V"
fig.text(0.02, 0.5, ylabel, va='center', rotation='vertical')
fig.text(0.5, 0.02, 'Time (s)', ha='center')
fig.set_size_inches(20, 15)
title = (event + ' ' + condition + ' ' +
' '.join([str(value) for value in values]) +
' contrast'*contrast)
if tfr and band:
bandname,_,_ = band
title += (' ' + bandname + ' band')
else:
bandname = ''
if power_type:
title += (' ' + {'t':'Total', 'npl':'Non-Phase-Locked',
'pl':'Phase-Locked'}[power_type])
if data_type:
title += ' ' + data_type
fig.suptitle(title)
fig.savefig(self._fname('plots', 'channel_plot', 'jpg',
contrast*'contrast', 'tfr'*tfr, 'cpt'*cpt,
'aux'*aux, 'butterfly'*butterfly,
(bandname + '_band')*(band is not None),
power_type, keyword, data_type,
event, condition,*values))
self._show_fig(fig, show)
def makeWavelets(self, event, condition, values=None, keyword_in=None,
keyword_out=None, tmin=None, tmax=None, bl_tmin=None,
bl_tmax=None, power_type='npl',
fmin=4, fmax=150, nmin=2, nmax=75, steps=32,
compressed=False, gain_normalize=False,
baseline_subtract=False, save_baseline=True,
overwrite=False):
from mne.time_frequency import tfr_array_morlet
keyword_out = keyword_in if keyword_out is None else keyword_out
if gain_normalize and baseline_subtract:
raise ValueError('Gain normalize and baseline subtract are ' +
'both True, this does not make sense')
#note compression may not always work
values = self._default_values(condition, values)
frequencies = np.logspace(np.log10(fmin), np.log10(fmax), steps)
n_cycles = np.logspace(np.log10(nmin), np.log10(nmax), steps)
epochs = self._load_epochs(event, keyword=keyword_in)
tmin, tmax = self._default_t(event, tmin=tmin, tmax=tmax, buffered=True)
epochs = epochs.crop(tmin=tmin, tmax=tmax)
value_indices = self._get_indices(epochs, condition, values)
if gain_normalize or baseline_subtract or save_baseline:
bl_epochs = self._load_epochs('Baseline', keyword=keyword_in)
bl_tmin, bl_tmax = self._default_t('Baseline', tmin=bl_tmin,
tmax=bl_tmax, buffered=True)
bl_epochs = bl_epochs.crop(tmin=bl_tmin, tmax=bl_tmax)
bl_value_indices = self._get_indices(bl_epochs, condition, values)
for dt in self._get_data_types():
print(dt)
epo = self._pick_types(epochs, dt)
values_dict = self._get_data(epo, values, value_indices,
mean_and_std=False)
if gain_normalize or baseline_subtract or save_baseline:
bl_epo = self._pick_types(bl_epochs, dt)
bl_values_dict = self._get_data(bl_epo, values, bl_value_indices,
mean_and_std=False)
for value in values:
if (self._has_TFR(event, condition, value, power_type,
data_type=dt, keyword=keyword_out) and
not overwrite):
self._overwrite_error('TFR', power_type, keyword=keyword_out)
if gain_normalize or baseline_subtract or save_baseline:
if power_type == 't':
bl_current_data = bl_values_dict[value]
elif power_type == 'npl':
bl_current_data = (bl_values_dict[value] -
bl_values_dict[value].mean(axis=0))
elif power_type == 'pl':
bl_current_data = bl_values_dict[value].mean(axis=0)
bl_current_data = np.expand_dims(bl_current_data, axis=0)
else:
raise ValueError('Unrecognized power type %s ' % power_type +
'please use \'t\', \'npl\' or \'pl\'')
bl_tfr = tfr_array_morlet(bl_current_data,
sfreq=bl_epochs.info['sfreq'],
freqs=frequencies, n_cycles=n_cycles,
output='power')
bl_epo = bl_epo.crop(tmin=bl_tmin+self.tbuffer,
tmax=bl_tmax-self.tbuffer)
bl_tind = bl_epochs.time_as_index(bl_epo.times) #crop buffer
bl_tfr = np.take(bl_tfr, bl_tind, axis=-1)
if save_baseline:
self._save_TFR(bl_tfr, frequencies, n_cycles, 'Baseline',
condition, value, power_type,
data_type=dt, keyword=keyword_out,
compressed=compressed)
bl_power = bl_tfr.mean(axis=0).mean(axis=-1) #average over epochs, times
bl_power = bl_power[np.newaxis,:,:, np.newaxis]
if power_type == 't':
current_data = values_dict[value]
elif power_type == 'npl':
current_data = (values_dict[value] -
values_dict[value].mean(axis=0))
elif power_type == 'pl':
current_data = values_dict[value].mean(axis=0)
current_data = np.expand_dims(current_data, axis=0)
else:
raise ValueError('Unrecognized power type %s ' % power_type +
'please use \'t\', \'npl\' or \'pl\'')
tfr = tfr_array_morlet(current_data, sfreq=epochs.info['sfreq'],
freqs=frequencies, n_cycles=n_cycles,
output='power')
epo = epo.crop(tmin=tmin+self.tbuffer, tmax=tmax-self.tbuffer)
tind = epochs.time_as_index(epo.times)
tfr = np.take(tfr, tind, axis=-1)
if gain_normalize or baseline_subtract:
tile_shape = (tfr.shape[0], 1, 1, tfr.shape[-1])
bl_power = np.tile(bl_power, tile_shape)
if gain_normalize:
tfr /= bl_power #normalize by gain compared to baseline
if baseline_subtract:
tfr -= bl_power #subtract baseline power to normalize
self._save_TFR(tfr, frequencies, n_cycles, event, condition, value,
power_type, data_type=dt, keyword=keyword_out,
compressed=compressed)
del tfr
del bl_tfr
def psdMultitaper(self, keyword=None, ch='Oz', N=6, deltaN=0.25, NW=3.0,
fmin=0.5, fmax=25, BW=1.0, assign_states=True,
labels={'Wake':'red', 'Sleep':'white'}, overwrite=False,
n_jobs=10, vmin=None, vmax=None, adaptive=False,
jackknife=True, low_bias=True):
# full-night: N = 30.0 s, deltaN = 5.0 s, deltaf = 1.0 Hz, TW = 15, L = 29
# ultradian: N = 6.0 s, deltaN = 0.25 s, deltaf = 1.0 Hz, TW = 3, L = 5
# microevent: N = 2.5 s, deltaN = 0.05 s, deltaf = 4.0 Hz, TW = 5, L = 9
#ch = 'EEG072'
try:
import nitime.algorithms as tsa
except:
print('Unable to import nitime... you won\'t be able to use this feature')
from .psd_multitaper_plot_tools import ButtonClickProcessor
from joblib import Parallel, delayed
raw = self._load_raw(keyword=keyword)
ch_ind = raw.ch_names.index(ch)
raw_data = raw.get_data(picks=ch_ind).flatten()
Fs = raw.info['sfreq']
n_full_windows = int(np.floor(raw.times[-1] / N))
t_end = raw.times[int(n_full_windows * N * Fs)]
n_windows = int((n_full_windows-1) * (N / deltaN)) + 1
if self._has_PSD(keyword) and not overwrite:
image = self._load_PSD(keyword)
else:
imsize = int(Fs / 2 * N) + 1
image = np.zeros((imsize, int(n_full_windows * (N / deltaN))))
counters = np.zeros((int(n_full_windows * (N / deltaN))))
with Parallel(n_jobs=n_jobs) as parallel:
results = parallel(delayed(tsa.multi_taper_psd)(
raw_data[int(i * deltaN * Fs): int(i * deltaN * Fs)+int(N * Fs)],
Fs=Fs, NW=NW, BW=BW, adaptive=adaptive,
jackknife=jackknife, low_bias=low_bias)
for i in tqdm(range(n_windows)))
fs, psd_mts, nus = zip(*results)
for i in range(n_windows):
for j in range(i, i+int(N / deltaN)):
image[:, j] += np.log10(psd_mts[i])
counters[j] += 1
for k in range(imsize):
image[k] /= counters
f = np.linspace(0, Fs / 2, imsize)
f_inds = [i for i, freq in enumerate(f) if
(freq >= fmin and freq <= fmax)]
image = image[f_inds]
self._save_PSD(image, keyword)
fig2, ax2 = plt.subplots()
fig2.set_size_inches(12, 8)
fig2.subplots_adjust(right=0.8)
im2 = ax2.imshow(image, aspect='auto', cmap='jet', vmin=vmin, vmax=vmax)
cax2 = fig2.add_axes([0.82, 0.1, 0.05, 0.8])
fig2.colorbar(im2, cax=cax2)
fig2.suptitle('Multitaper Spectrogram')
if assign_states:
drs = {label:[] for label in labels}
fig, ax1 = plt.subplots()
fig.suptitle('Multitaper Spectrogram')
fig.set_size_inches(12, 8)
fig.subplots_adjust(right=0.7)
buttons = []
button_height = 0.8 / (len(labels) + 1)
y0 = 0.1 + button_height/(len(labels) + 1)
for label in labels:
label_ax = fig.add_axes([0.85, y0, 0.1, button_height])
y0 += button_height + button_height / (len(labels) + 1)
buttons.append(ButtonClickProcessor(label_ax, label, labels[label],
ax1, drs, image))
im = ax1.imshow(image, aspect='auto', cmap='jet', vmin=vmin, vmax=vmax)
cax = fig.add_axes([0.72, 0.1, 0.05, 0.8])
fig.colorbar(im, cax=cax)
axs = [ax1, ax2]
else:
axs = [ax2]
for ax in axs:
ax.invert_yaxis()
ax.set_yticks(np.linspace(0, image.shape[0], 10))
ax.set_yticklabels(np.round(np.linspace(fmin, fmax, 10), 1))
ax.set_ylabel('Frequency (Hz)')
ax.set_xticks(np.linspace(0, image.shape[1], 10))
ax.set_xticklabels(np.round(np.linspace(0, t_end, 10)))
ax.set_xlabel('Time (s)')
fig2.savefig(self._fname('plots', 'psd_multitaper', 'jpg',
'N_%i_dN_%.2f' % (N, deltaN),
'fmin_%.2f_fmax_%.2f_NW_%i' % (fmin, fmax, NW)))
plt.close(fig2)
if assign_states:
plt.show(fig)
state_times = {label: [] for label in labels}
for label in drs:
for dr in drs[label]:
rect = dr.rect
start = rect.get_x() * deltaN
duration = rect.get_width() * deltaN
state_times[label].append((start, (start + duration)))
return state_times
def assignConditionFromStateTimes(self, event, state_times,
condition='State', no_state='Neither'):
event_times = self.getEventTimes(event)
states = np.tile(no_state, len(event_times))
for i, t in enumerate(event_times):
for state in state_times:
if any([t <= tmax and t >= tmin for
tmin, tmax in state_times[state]]):
states[i] = state
try:
df = read_csv(self.behavior)
df[condition] = states
except Exception as e:
df = DataFrame({condition:states})
self._add_meta_data({'Behavior':self.behavior}, overwrite=True)
if not op.isdir(op.dirname(self.behavior)):
os.makedirs(op.dirname(self.behavior))
df.to_csv(self.behavior)
def CPTByBand(self, event, condition, values=None, keyword_in=None,
keyword_out=None, tfr_keyword=None, power_type='npl',
tmin=None, tmax=None, threshold=6.0, aux=False,
bands={'theta':(4, 8), 'alpha':(8, 15),
'beta':(15, 35), 'low-gamma':(35, 80),
'high-gamma':(80, 150)},
contrast=False):
for band in bands:
fmin, fmax = bands[band]
band_struct = (band, fmin, fmax)
self.CPT(event, condition, values, keyword_in=keyword_in,
keyword_out=keyword_out, tfr_keyword=tfr_keyword,
power_type=power_type, tmin=tmin, tmax=tmax,
tfr=True, threshold=threshold, band=band_struct,
contrast=contrast, aux=aux)
def CPT(self, event, condition, values=None, keyword_in=None,
keyword_out=None, tmin=None, tmax=None, bl_tmin=None, bl_tmax=None,
aux=False, alpha=0.3, threshold=6.0, tfr=False, band=None,
tfr_keyword=None, power_type='npl', contrast=False,
n_permutations=2000, n_jobs=10, overwrite=False):
keyword_out = keyword_in if keyword_out is None else keyword_out
tfr_keyword = keyword_in if tfr_keyword is None else tfr_keyword
if power_type == 'pl':
raise ValueError('No epochs to permute for phase-locked power')
values = self._default_values(condition, values, contrast)
if all([self._has_CPT(event, condition, value=value, data_type=dt,
keyword=keyword_out) for dt in self._get_data_types()
for value in values]) and not overwrite:
self._overwrite_error('CPT', event=event, condition=condition,
keyword=keyword_out)
tmin, tmax = self._default_t(event, tmin, tmax)
epochs = self._load_epochs(event, keyword=keyword_in)
epochs = epochs.crop(tmin=tmin, tmax=tmax)
value_indices = self._get_indices(epochs, condition, values)
if not contrast:
bl_epochs = self._load_epochs('Baseline', keyword=keyword_in)
bl_tmin, bl_tmax = self._default_t('Baseline', tmin=bl_tmin,
tmax=bl_tmax)
bl_epochs = bl_epochs.crop(tmin=bl_tmin, tmax=bl_tmax)
bl_value_indices = self._get_indices(bl_epochs, condition, values)
for dt in self._get_data_types():
epo = self._pick_types(epochs, dt)
ch_dict = self._get_ch_dict(epo, aux=aux)
if not contrast:
bl_epo = self._pick_types(bl_epochs, dt)
if tfr:
values_dict, frequencies = \
self._get_tfr_data(event, condition, values,
tfr_keyword, power_type, value_indices,
band=band, mean_and_std=False,
data_type=dt)
if not contrast:
bl_values_dict, bl_frequencies = \
self._get_tfr_data('Baseline', condition, values,
tfr_keyword, power_type,
bl_value_indices,
band=band, mean_and_std=False,
data_type=dt)
else:
values_dict = self._get_data(epo, values,
value_indices, mean_and_std=False)
if not contrast:
bl_values_dict = self._get_data(bl_epo, values,
bl_value_indices,
mean_and_std=False)
frequencies = None
if contrast:
epochs_data0 = values_dict[values[0]]
epochs_data1 = values_dict[values[1]]
clusters, cluster_p_values = \
self._CPT(epochs_data0, epochs_data1, threshold,
n_permutations=n_permutations, n_jobs=n_jobs)
self._save_CPT(clusters, cluster_p_values, epochs.times, ch_dict,
frequencies, band, event, condition,
'%s-%s' % (values[0], values[1]),
data_type=dt, keyword=keyword_out)
else:
for value in values:
evo_data = values_dict[value]
bl_evo_data = bl_values_dict[value]
bl_evo_data = self._equalize_baseline_length(evo_data, bl_evo_data)
clusters, cluster_p_values = \
self._CPT(evo_data, bl_evo_data, threshold,
n_permutations=n_permutations, n_jobs=n_jobs)
self._save_CPT(clusters, cluster_p_values, epochs.times, ch_dict,
frequencies, band, event, condition,
value, data_type=dt, keyword=keyword_out)
def _equalize_baseline_length(self, value_data, bl_data):
bl_len = bl_data.shape[2]
val_len = value_data.shape[2]
if bl_len < val_len:
n_reps = int(val_len / bl_len)
remainder = val_len % bl_len
print('Using %.2f ' % (n_reps + float(remainder)/bl_len) +
'repetitions of the baseline period for permuation')
baseline = np.tile(bl_data, n_reps)
remainder_baseline = np.take(bl_data,
range(bl_len-remainder, bl_len),
axis=2)
#take from the end of the baseline period
bl_data =
|
np.concatenate((baseline, remainder_baseline), axis=2)
|
numpy.concatenate
|
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta, date, time
import numpy as np
import pandas as pd
import pandas.lib as lib
import pandas.util.testing as tm
from pandas import Index
from pandas.compat import long, u, PY2
class TestInference(tm.TestCase):
def test_infer_dtype_bytes(self):
compare = 'string' if PY2 else 'bytes'
# string array of bytes
arr = np.array(list('abc'), dtype='S1')
self.assertEqual(pd.lib.infer_dtype(arr), compare)
# object array of bytes
arr = arr.astype(object)
self.assertEqual(pd.lib.infer_dtype(arr), compare)
def test_isinf_scalar(self):
# GH 11352
self.assertTrue(lib.isposinf_scalar(float('inf')))
self.assertTrue(lib.isposinf_scalar(np.inf))
self.assertFalse(lib.isposinf_scalar(-np.inf))
self.assertFalse(lib.isposinf_scalar(1))
self.assertFalse(lib.isposinf_scalar('a'))
self.assertTrue(lib.isneginf_scalar(float('-inf')))
self.assertTrue(lib.isneginf_scalar(-np.inf))
self.assertFalse(lib.isneginf_scalar(np.inf))
self.assertFalse(lib.isneginf_scalar(1))
self.assertFalse(lib.isneginf_scalar('a'))
def test_maybe_convert_numeric_infinities(self):
# see gh-13274
infinities = ['inf', 'inF', 'iNf', 'Inf',
'iNF', 'InF', 'INf', 'INF']
na_values = set(['', 'NULL', 'nan'])
pos = np.array(['inf'], dtype=np.float64)
neg = np.array(['-inf'], dtype=np.float64)
msg = "Unable to parse string"
for infinity in infinities:
for maybe_int in (True, False):
out = lib.maybe_convert_numeric(
np.array([infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['-' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, neg)
out = lib.maybe_convert_numeric(
np.array([u(infinity)], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['+' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
# too many characters
with tm.assertRaisesRegexp(ValueError, msg):
lib.maybe_convert_numeric(
np.array(['foo_' + infinity], dtype=object),
na_values, maybe_int)
def test_maybe_convert_numeric_post_floatify_nan(self):
# see gh-13314
data = np.array(['1.200', '-999.000', '4.500'], dtype=object)
expected = np.array([1.2, np.nan, 4.5], dtype=np.float64)
nan_values = set([-999, -999.0])
for coerce_type in (True, False):
out = lib.maybe_convert_numeric(data, nan_values, coerce_type)
tm.assert_numpy_array_equal(out, expected)
def test_convert_infs(self):
arr = np.array(['inf', 'inf', 'inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
self.assertTrue(result.dtype == np.float64)
arr = np.array(['-inf', '-inf', '-inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
self.assertTrue(result.dtype == np.float64)
def test_scientific_no_exponent(self):
# See PR 12215
arr = np.array(['42E', '2E', '99e', '6e'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False, True)
self.assertTrue(np.all(np.isnan(result)))
def test_convert_non_hashable(self):
# GH13324
# make sure that we are handing non-hashables
arr = np.array([[10.0, 2], 1.0, 'apple'])
result = lib.maybe_convert_numeric(arr, set(), False, True)
tm.assert_numpy_array_equal(result, np.array([np.nan, 1.0, np.nan]))
class TestTypeInference(tm.TestCase):
_multiprocess_can_split_ = True
def test_length_zero(self):
result = lib.infer_dtype(np.array([], dtype='i4'))
self.assertEqual(result, 'integer')
result = lib.infer_dtype([])
self.assertEqual(result, 'empty')
def test_integers(self):
arr = np.array([1, 2, 3, np.int64(4), np.int32(5)], dtype='O')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'integer')
arr = np.array([1, 2, 3, np.int64(4), np.int32(5), 'foo'], dtype='O')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'mixed-integer')
arr = np.array([1, 2, 3, 4, 5], dtype='i4')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'integer')
def test_bools(self):
arr = np.array([True, False, True, True, True], dtype='O')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'boolean')
arr = np.array([np.bool_(True), np.bool_(False)], dtype='O')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'boolean')
arr = np.array([True, False, True, 'foo'], dtype='O')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'mixed')
arr = np.array([True, False, True], dtype=bool)
result = lib.infer_dtype(arr)
self.assertEqual(result, 'boolean')
def test_floats(self):
arr = np.array([1., 2., 3., np.float64(4), np.float32(5)], dtype='O')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'floating')
arr = np.array([1, 2, 3, np.float64(4), np.float32(5), 'foo'],
dtype='O')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'mixed-integer')
arr = np.array([1, 2, 3, 4, 5], dtype='f4')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'floating')
arr = np.array([1, 2, 3, 4, 5], dtype='f8')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'floating')
def test_string(self):
pass
def test_unicode(self):
pass
def test_datetime(self):
dates = [datetime(2012, 1, x) for x in range(1, 20)]
index = Index(dates)
self.assertEqual(index.inferred_type, 'datetime64')
def test_date(self):
dates = [date(2012, 1, x) for x in range(1, 20)]
index = Index(dates)
self.assertEqual(index.inferred_type, 'date')
def test_to_object_array_tuples(self):
r = (5, 6)
values = [r]
result = lib.to_object_array_tuples(values)
try:
# make sure record array works
from collections import namedtuple
record = namedtuple('record', 'x y')
r = record(5, 6)
values = [r]
result = lib.to_object_array_tuples(values) # noqa
except ImportError:
pass
def test_to_object_array_width(self):
# see gh-13320
rows = [[1, 2, 3], [4, 5, 6]]
expected = np.array(rows, dtype=object)
out = lib.to_object_array(rows)
tm.assert_numpy_array_equal(out, expected)
expected = np.array(rows, dtype=object)
out = lib.to_object_array(rows, min_width=1)
tm.assert_numpy_array_equal(out, expected)
expected = np.array([[1, 2, 3, None, None],
[4, 5, 6, None, None]], dtype=object)
out = lib.to_object_array(rows, min_width=5)
tm.assert_numpy_array_equal(out, expected)
def test_object(self):
# GH 7431
# cannot infer more than this as only a single element
arr = np.array([None], dtype='O')
result = lib.infer_dtype(arr)
self.assertEqual(result, 'mixed')
def test_categorical(self):
# GH 8974
from pandas import Categorical, Series
arr = Categorical(list('abc'))
result = lib.infer_dtype(arr)
self.assertEqual(result, 'categorical')
result = lib.infer_dtype(Series(arr))
self.assertEqual(result, 'categorical')
arr = Categorical(list('abc'), categories=['cegfab'], ordered=True)
result = lib.infer_dtype(arr)
self.assertEqual(result, 'categorical')
result = lib.infer_dtype(Series(arr))
self.assertEqual(result, 'categorical')
class TestConvert(tm.TestCase):
def test_convert_objects(self):
arr = np.array(['a', 'b', np.nan, np.nan, 'd', 'e', 'f'], dtype='O')
result = lib.maybe_convert_objects(arr)
self.assertTrue(result.dtype == np.object_)
def test_convert_objects_ints(self):
# test that we can detect many kinds of integers
dtypes = ['i1', 'i2', 'i4', 'i8', 'u1', 'u2', 'u4', 'u8']
for dtype_str in dtypes:
arr = np.array(list(np.arange(20, dtype=dtype_str)), dtype='O')
self.assertTrue(arr[0].dtype == np.dtype(dtype_str))
result = lib.maybe_convert_objects(arr)
self.assertTrue(issubclass(result.dtype.type, np.integer))
def test_convert_objects_complex_number(self):
for dtype in np.sctypes['complex']:
arr = np.array(list(1j * np.arange(20, dtype=dtype)), dtype='O')
self.assertTrue(arr[0].dtype == np.dtype(dtype))
result = lib.maybe_convert_objects(arr)
self.assertTrue(issubclass(result.dtype.type, np.complexfloating))
class Testisscalar(tm.TestCase):
def test_isscalar_builtin_scalars(self):
self.assertTrue(lib.isscalar(None))
self.assertTrue(lib.isscalar(True))
self.assertTrue(lib.isscalar(False))
self.assertTrue(lib.isscalar(0.))
self.assertTrue(lib.isscalar(np.nan))
self.assertTrue(lib.isscalar('foobar'))
self.assertTrue(lib.isscalar(b'foobar'))
self.assertTrue(lib.isscalar(u('efoobar')))
self.assertTrue(lib.isscalar(datetime(2014, 1, 1)))
self.assertTrue(lib.isscalar(date(2014, 1, 1)))
self.assertTrue(lib.isscalar(time(12, 0)))
self.assertTrue(lib.isscalar(timedelta(hours=1)))
self.assertTrue(lib.isscalar(pd.NaT))
def test_isscalar_builtin_nonscalars(self):
self.assertFalse(lib.isscalar({}))
self.assertFalse(lib.isscalar([]))
self.assertFalse(lib.isscalar([1]))
self.assertFalse(lib.isscalar(()))
self.assertFalse(lib.isscalar((1, )))
self.assertFalse(lib.isscalar(slice(None)))
self.assertFalse(lib.isscalar(Ellipsis))
def test_isscalar_numpy_array_scalars(self):
self.assertTrue(lib.isscalar(np.int64(1)))
self.assertTrue(lib.isscalar(np.float64(1.)))
self.assertTrue(lib.isscalar(np.int32(1)))
self.assertTrue(lib.isscalar(np.object_('foobar')))
self.assertTrue(lib.isscalar(np.str_('foobar')))
self.assertTrue(lib.isscalar(np.unicode_(u('foobar'))))
self.assertTrue(lib.isscalar(np.bytes_(b'foobar')))
self.assertTrue(lib.isscalar(np.datetime64('2014-01-01')))
self.assertTrue(lib.isscalar(np.timedelta64(1, 'h')))
def test_isscalar_numpy_zerodim_arrays(self):
for zerodim in [np.array(1), np.array('foobar'),
np.array(np.datetime64('2014-01-01')),
np.array(
|
np.timedelta64(1, 'h')
|
numpy.timedelta64
|
"""
Tests for LAMMPS IO
"""
import eex
import numpy as np
import os
import pytest
import pandas as pd
from . import eex_find_files
@pytest.fixture(scope="module", params=["HDF5", "Memory"])
def spce_dl(request):
fname = eex_find_files.get_example_filename("lammps", "SPCE", "data.spce")
dl = eex.datalayer.DataLayer(
"test_lammps_read", )
sim_data = {'units': 'real', 'bond_style': 'harmonic', 'angle_style': 'harmonic', 'dihedral_style': 'opls',
'atom_style': 'full'}
eex.translators.lammps.read_lammps_data_file(dl, fname, sim_data, blocksize=55)
yield dl
dl.close()
def test_lammps_read_data(spce_dl):
dl = spce_dl
# Check on the data dictionary
utype = {"epsilon": "kcal / mol", "sigma": "angstrom"}
nb_param_atom1 = dl.get_nb_parameter(atom_type=1, nb_model='epsilon/sigma', utype=utype)
nb_param_atom2 = dl.get_nb_parameter(atom_type=2, nb_model='epsilon/sigma', utype=utype)
assert np.allclose([nb_param_atom1['epsilon'], nb_param_atom1['sigma']], [0.15524976551, 3.166])
assert np.allclose([nb_param_atom2['epsilon'], nb_param_atom2['sigma']], [0.0, 0.0])
assert len(dl.get_unique_atom_types()) == 2
assert len(dl.list_term_uids()[2]) == 1
assert len(dl.list_term_uids()[3]) == 1
assert len(dl.list_term_uids()[4]) == 0
assert dl.get_atom_count() == 600
assert dl.get_bond_count() == 400
assert dl.get_angle_count() == 200
assert dl.get_dihedral_count() == 0
#box_size = dl.get_box_size()
#assert box_size["x"] == pytest.approx(-12.362, 1.e-6)
def test_lammps_read_atoms(spce_dl):
dl = spce_dl
# Check Atoms
atoms = dl.get_atoms(["atom_type", "charge", "mass"])
assert atoms.shape[0] == 600
assert np.allclose(np.unique(atoms["atom_type"]), [1, 2])
assert np.allclose(
|
np.unique(atoms["charge"])
|
numpy.unique
|
'''
These tests were inspired by and use code from the tests made by
cs540-testers-SP21 for the Spring 2021 semester.
Their version (1.0) can be found here:
https://github.com/cs540-testers-SP21/hw3-tester/
Subsequently, their version was also inspired by and use code from the tests
made by cs540-testers for the Fall 2020 semester.
Their version (original) can be found here:
https://github.com/cs540-testers/hw5-tester/
'''
__maintainer__ = ['CS540-testers-SP22']
__authors__ = ['<NAME>']
__version__ = '2.0 - Production'
import unittest
import numpy as np
from hw3 import load_and_center_dataset, get_covariance, get_eig, \
get_eig_prop, project_image, display_image
data_path = 'YaleB_32x32.npy'
class TestLoadAndCenterDataset(unittest.TestCase):
def test1_test_load(self):
x = load_and_center_dataset(data_path)
# The dataset needs to have the correct shape
self.assertEqual(np.shape(x), (2414, 1024))
# The dataset should not be constant-valued
self.assertNotAlmostEqual(np.max(x) - np.min(x), 0)
def test2_test_center(self):
x = load_and_center_dataset(data_path)
# Each coordinate of our dataset should average to 0
for i in range(np.shape(x)[1]):
self.assertAlmostEqual(np.sum(x[:, i]), 0)
class TestGetCovariance(unittest.TestCase):
def test3_test_shape(self):
x = load_and_center_dataset(data_path)
S = get_covariance(x)
# S should be square and have side length d
self.assertEqual(np.shape(S), (1024, 1024))
def test4_test_values(self):
x = load_and_center_dataset(data_path)
S = get_covariance(x)
# S should be symmetric
self.assertTrue(np.all(np.isclose(S, S.T)))
# S should have non-negative values on the diagonal
self.assertTrue(np.min(np.diagonal(S)) >= 0)
class TestGetEig(unittest.TestCase):
def test5_test_small(self):
x = load_and_center_dataset(data_path)
S = get_covariance(x)
Lambda, U = get_eig(S, 2)
self.assertEqual(np.shape(Lambda), (2, 2))
self.assertTrue(np.all(np.isclose(
Lambda, [[1369142.41612494, 0], [0, 1341168.50476773]])))
# The eigenvectors should be the columns
self.assertEqual(np.shape(U), (1024, 2))
self.assertTrue(np.all(np.isclose(S @ U, U @ Lambda)))
def test6_test_large(self):
x = load_and_center_dataset(data_path)
S = get_covariance(x)
Lambda, U = get_eig(S, 1024)
self.assertEqual(np.shape(Lambda), (1024, 1024))
# Check that Lambda is diagonal
self.assertEqual(np.count_nonzero(
Lambda - np.diag(np.diagonal(Lambda))), 0)
# Check that Lambda is sorted in decreasing order
self.assertTrue(np.all(np.equal(np.diagonal(Lambda),
sorted(np.diagonal(Lambda), reverse=True))))
# The eigenvectors should be the columns
self.assertEqual(np.shape(U), (1024, 1024))
self.assertTrue(np.all(np.isclose(S @ U, U @ Lambda)))
class TestGetEigProp(unittest.TestCase):
def test7_test_small(self):
x = load_and_center_dataset(data_path)
S = get_covariance(x)
Lambda, U = get_eig_prop(S,0.07)
self.assertEqual(np.shape(Lambda), (2, 2))
self.assertTrue(np.all(np.isclose(
Lambda, [[1369142.41612494, 0], [0, 1341168.50476773]])))
# The eigenvectors should be the columns
self.assertEqual(np.shape(U), (1024, 2))
self.assertTrue(np.all(np.isclose(S @ U, U @ Lambda)))
def test8_test_large(self):
x = load_and_center_dataset(data_path)
S = get_covariance(x)
# This will select all eigenvalues/eigenvectors
Lambda, U = get_eig_prop(S, -1)
self.assertEqual(np.shape(Lambda), (1024, 1024))
# Check that Lambda is diagonal
self.assertEqual(np.count_nonzero(
Lambda - np.diag(np.diagonal(Lambda))), 0)
# Check that Lambda is sorted in decreasing order
self.assertTrue(np.all(np.equal(np.diagonal(Lambda),
sorted(np.diagonal(Lambda), reverse=True))))
# The eigenvectors should be the columns
self.assertEqual(np.shape(U), (1024, 1024))
self.assertTrue(np.all(np.isclose(S @ U, U @ Lambda)))
class Test5ProjectImage(unittest.TestCase):
def test9_test_shape_example(self):
x = load_and_center_dataset(data_path)
S = get_covariance(x)
_, U = get_eig(S, 2)
# This is the image of the "9" in the spec
projected = project_image(x[0], U)
# Projected needs to have shape (1024, )
self.assertEqual(np.shape(projected), (1024,))
# Values from implemenation(Should be correct)
self.assertAlmostEqual(np.min(projected), 0.27875793275517147)
self.assertAlmostEqual(np.max(projected), 93.22417310945808)
def test10_test_shape_two_eig_values(self):
x = load_and_center_dataset(data_path)
S = get_covariance(x)
_, U = get_eig(S, 2)
# This is the image of the "9" in the spec
projected = project_image(x[3], U)
# Projected needs to have shape (1024, )
self.assertEqual(np.shape(projected), (1024,))
# Values from implemenation(Should be correct)
self.assertAlmostEqual(np.min(projected), -102.98135151709695)
self.assertAlmostEqual(
|
np.max(projected)
|
numpy.max
|
from numpy import min, sum, mean, std, var, insert, array, multiply, where, zeros, append, isnan, nan_to_num, nansum, nanvar, nanmean, unique, ix_, nonzero, nan
from numpy.linalg import norm
from scipy.stats import f, chi2
from pandas import DataFrame
from sklearn.model_selection import KFold
from trendfitter.auxiliary.tf_aux import scores_with_missing_values
class PCA:
"""
A sklearn-like class for the NIPALs algorithm for PCA together with a toolset for
investigation.
Parameters
----------
tol : float, Optional
Value used to decide if model has converged.
loop_limit : int, Optional
Maximum number of loops before forced stop. Resets every new component.
missing_values_method : str, Optional
Defines which method will be used to evaluate missing values in future transformations.
keep_scores : boolean, Optional
Decision to save scores extracted during model fitting. If not given, assumed False.
Attributes
----------
principal_components : int, optional
Number of principal components extracted or to extract. If not given, a cross-validation
internal routine will decide this value.
cv_splits_number : int, optional
Number of splits used for cross-validation. If not given, it will be 7.
loadings : array_like
Loading parameters that define the PCA model.
q2 : [float]
Average score on the test sets during the cross-validation procedure
feature_importances_ : array_like
An array that describes the importance of each feature used to build the model
using the VIP value of each.
training_scores : array_like
Scores extracted during training of the model.
omega : array_like
If missing_values_method requires a scores covariance matrix ('TSR', 'CMR', 'PMP'),
it will be stored here.
Methods
-------
fit(X, principal_components = None, cv_splits_number = 7, int_call = False)
Runs the NIPALS algorithm to extract the principal components from X.
predict(X, principal_components = None)
Uses the model to reconstruct X using the principal components.
transform(X, principal_components = None)
Transforms the X from its original space to the latent variable space.
score(X, principal_components = None)
Returns a r² representing how much variability from X is captured in the model.
Hotellings_T2(X, principal_components = None)
Returns an array with the Hotelling's T² calculated for each row in X.
T2_limit(alpha)
Returns the Hotelling's T² limit estimated with alpha confidence level
contributions_scores_ind(X, principal_components = None)
Returns an array with the contributions to the scores of each X row.
contributions_spe(X, principal_components = None)
Returns an array with the contributions to the SPE of each X row.
SPEs(X, principal_components = None)
Returns the squared prediction errors of each row's X reconstruction.
SPE_limit(alpha)
Returns the squared prediction error limit with alpha confidence level
"""
def __init__(self, tol = 1e-12, loop_limit = 100, missing_values_method = 'TSM'):
self.principal_components = None # number of principal components to be extracted
self.cv_splits_number = None # number of splits for latent variable cross-validation
self.tol = tol # criteria for convergence
self.loop_limit = loop_limit # maximum number of loops before convergence is decided to be not attainable
self.missing_values_method = missing_values_method
self.loadings = None #loadings
self.q2 = [] # list of cross validation scores
self.feature_importances_ = None #for scikit learn use with feature selection methods
self.omega = None # scores covariance matrix for missing values score estimation
self.training_scores = None
self._training_scores_stds = None
self._chi2_params = []
def fit(self, X, principal_components = None, cv_splits_number = 7, int_call = False):
"""
Extracts the model parameters using the NIPALs algorithm [1].
Parameters
----------
X : array_like
Data used to extract the parameters and fit the model
principal_components : array_like, Optional
Number of desired principal components to be extracted
cv_splits_number : int, Optional
Number of desired splits to be used during the cross-validation routine
int_call : Boolean, optional
Flag to determine if the method should calculate certain values. If not specified
References
[1] <NAME>, <NAME>, and <NAME>, “Principal component analysis,”
Chemometrics and Intelligent Laboratory Systems, vol. 2, no. 1–3,
pp. 37–52, Aug. 1987, doi: 10.1016/0169-7439(87)80084-9.
"""
self.principal_components = principal_components # number of principal components to be extracted
self.cv_splits_number = cv_splits_number # number of splits for latent variable cross-validation
if isinstance(X, DataFrame): X = X.to_numpy() #ensuring data in the numpy format
dataset_incomplete = False
if isnan(sum(X)): dataset_incomplete = True #checking if there is missing data on the input X
#CV = False
if not(self.principal_components is None) :
numberOfLVs = self.principal_components
#CV = True
else :
numberOfLVs = X.shape[1] #maximum amount of extractable latent variables
kf = KFold(n_splits = self.cv_splits_number, shuffle = True, random_state = 1)
MatrixXModel = array(zeros(X.shape), ndmin = 2) #initializing matrix space for manipulation
#----------------------------------------------------------------------------------
#------------------------------NIPALS implementation-------------------------------
#----------------------------------------------------------------------------------
q2_final = []
for latent_variable in range(1, numberOfLVs + 1) :
scores_vec = nan_to_num(array(X[ :, 1], ndmin = 2).T) #initializing the guess by using a specific vector
MatrixX2 = X - MatrixXModel #deflation of the X matrix
counter = 0
conv = 1
while conv > self.tol and counter < self.loop_limit:
counter += 1
if dataset_incomplete:
loadings_vec = array(nansum(MatrixX2 * scores_vec, axis = 0) / nansum(((~isnan(X) * scores_vec) ** 2).T, axis = 1), ndmin = 2) # equivalent to loadings = scores'*data/scores'*scores
loadings_vec = loadings_vec / norm(loadings_vec) #normalization of loading vector
new_scores_vec = array(nansum(MatrixX2 * loadings_vec, axis = 1) / nansum(((~isnan(X) * loadings_vec) ** 2), axis = 1), ndmin = 2).T #scores calculation w/ missing data
else:
loadings_vec = array(scores_vec.T @ MatrixX2 / (scores_vec.T @ scores_vec), ndmin = 2) #loadings = scores'*data/scores'*scores
loadings_vec = loadings_vec / norm(loadings_vec) #normalization of loading vector
new_scores_vec = MatrixX2 @ loadings_vec.T #scores calculation
conv = sum((scores_vec - new_scores_vec) ** 2) #scores comparation in between loops to assess convergency
scores_vec = new_scores_vec # old t becomes new t
#After convergency, if the principal components desired quantity is undefined
#then we check if the Component is in fact significant and keep on until all
#components are there
if self.principal_components is None :
testq2 = []
for train_index, test_index in kf.split(X):
q2_model = PCA(missing_values_method = self.missing_values_method)
q2_model.fit(X[train_index], principal_components = latent_variable, int_call = True)
testq2.append(q2_model.score(X[test_index]))
q2_final.append(mean(testq2))
if latent_variable > 1:
if (q2_final[-1] < q2_final[-2] or \
q2_final[-1] - q2_final[-2] < 0.01 or \
latent_variable > min(X.shape) / 2):
self.q2 = q2_final[:-1]
self.principal_components = latent_variable - 1
if self.missing_values_method == 'TSM': break
#if significant, then we add them to the loadings and score matrixes that will be returned as method result
#prediction of this model
if latent_variable < 2:
self.loadings = loadings_vec
self.training_scores = scores_vec
else:
self.loadings = insert(self.loadings, self.loadings.shape[0], loadings_vec, axis = 0)
self.training_scores = insert(self.training_scores, self.training_scores.shape[1], scores_vec.T, axis = 1)
if not (self.principal_components) is None and latent_variable > 2 * self.principal_components: break # Ensuring to extract at least double the useful components for missing values estimation:
MatrixXModel = self.training_scores @ self.loadings
error = nan_to_num(X) - MatrixXModel
SPE = sum(error ** 2 , axis = 1)
self._chi2_params.append([mean(SPE), var(SPE)]) #for future SPE analysis
if not int_call:
self.feature_importances_ = self._VIPs_calc(X, principal_components = self.principal_components)
self.omega = self.training_scores.T @ self.training_scores # calculation of the covariance matrix
self._training_scores_stds = std(self.training_scores, axis = 0)
pass
def predict(self, X, principal_components = None):
"""
Transforms the X sample to the principal component space and back to evaluate what is
the model "prediction" of the original sample values.
Parameters
----------
X : array_like
Data used to extract the parameters and fit the model
principal_components : array_like, Optional
Number of desired principal components to be used
Returns
-------
preds : array_like
returns "predicted" X values.
"""
if isinstance(X, DataFrame): X = X.to_numpy()
if principal_components == None : principal_components = self.principal_components
preds = (self.transform(X, principal_components = principal_components)) @ self.loadings[:principal_components, :]
return preds
def transform(self, X, principal_components = None):
"""
Transforms the X sample to the principal component space .
Parameters
----------
X : array_like
Data used to extract the parameters and fit the model
principal_components : array_like, Optional
Number of desired principal components to be used
Returns
-------
result : array_like
returns X samples' scores.
"""
if isinstance(X, DataFrame) : X = X.to_numpy()
if principal_components == None : principal_components = self.principal_components
if isnan(sum(X)) :
result =
|
zeros((X.shape[0], principal_components))
|
numpy.zeros
|
""" Codebook based quantization.
This module implements quantization that is based around a codebook instead of linearly
spread across a range of values. This is more expensive to train, but can permit
a even lower amount of bits.
"""
import tensorflow as tf
import numpy as np
from functools import partial
def _evaluate_fn(fn_or_value, variable):
if callable(fn_or_value):
return fn_or_value(variable)
else:
return fn_or_value
def make_codebook_config(variable, num_bits, zero_passthrough):
num_bits = _evaluate_fn(num_bits, variable)
zero_passthrough = _evaluate_fn(zero_passthrough, variable)
num_clusters = 2 ** num_bits - int(zero_passthrough)
return {
'variable': variable,
'num_bits': num_bits,
'zero_passthrough': zero_passthrough,
'shape': variable.shape,
'num_clusters': num_clusters
}
def make_codebook_data(config, data_collections):
variable = config['variable']
shape = config['shape']
num_bits = config['num_bits']
zero_passthrough = config['zero_passthrough']
num_clusters = 2 ** num_bits
if zero_passthrough:
num_clusters = num_clusters - 1
labels = tf.get_variable('cluster_labels', shape=shape, dtype=tf.int32,
initializer=tf.zeros_initializer(),
collections=data_collections,
trainable=False)
codebook = tf.get_variable('cluster_values', shape=[num_clusters],
dtype=variable.dtype,
initializer=tf.zeros_initializer(),
collections=data_collections,
trainable=True)
if zero_passthrough:
mask = tf.get_variable('mask', shape=shape, dtype=variable.dtype,
trainable=False,
collections=data_collections,
initializer=tf.zeros_initializer())
else:
mask = None
return {
'labels': labels,
'codebook': codebook,
'mask': mask,
}
def make_codebook_quantization(config, data):
labels = data['labels']
codebook = data['codebook']
mask = data['mask']
quantized_variable = tf.gather(codebook, labels)
if mask is not None:
quantized_variable = tf.multiply(quantized_variable, mask)
return quantized_variable
def make_codebook_init(config, data):
from sklearn.cluster import KMeans
labels = data['labels']
cluster_centers = data['codebook']
num_clusters = config['num_clusters']
def fn(nonzero_values):
if
|
np.size(nonzero_values)
|
numpy.size
|
import pandas as pd
import numpy as np
from collections import defaultdict
from sklearn.metrics import f1_score,accuracy_score
import math
split_sequences=True
word2idx = {}
tag2idx = {}
pos2idx = {}
word_idx = 0
tag_idx = 0
pos_idx = 0
Xtrain = []
Ytrain = []
Ptrain=[]
currentX = []
currentY = []
currentP=[]
for line in open('train1_all.txt',encoding='utf-8'):
line = line.rstrip()
if line:
r = line.split()
word, tag, pos = r
if word not in word2idx:
word2idx[word] = word_idx
word_idx += 1
currentX.append(word2idx[word])
if tag not in tag2idx:
tag2idx[tag] = tag_idx
tag_idx += 1
currentY.append(tag2idx[tag])
if pos not in pos2idx:
pos2idx[pos] = pos_idx
pos_idx += 1
currentP.append(pos2idx[pos])
elif split_sequences:
Xtrain.append(currentX)
Ytrain.append(currentY)
Ptrain.append(currentP)
currentX = []
currentY = []
currentP=[]
if not split_sequences:
Xtrain = currentX
Ytrain = currentY
Ptrain=currentP
V = len(word2idx) + 1
M = max(max(p) for p in Ptrain) + 1
A = np.ones((M, M))
pi = np.ones(M)
for p in Ptrain:
pi[p[0]] += 1
for i in range(len(p)-1):
A[p[i], p[i+1]] += 1
A /= A.sum(axis=1, keepdims=True)
pi /= pi.sum()
# find the observation matrix
B = np.ones((M, V)) # add-one smoothing
for x, p in zip(Xtrain, Ptrain):
for xi, pii in zip(x, p):
B[pii, xi] += 1
B /= B.sum(axis=1, keepdims=True)
class HMM:
def __init__(self, M,A,B,C,C1,pi,SUFF,SUFF1,word2idx):
self.M = M # number of hidden states
self.A=A
self.B=B
self.C=C
self.C1=C1
self.pi=pi
self.SUFF=SUFF
self.SUFF1=SUFF1
self.word2idx=word2idx
def get_state_sequence(self, x):
# returns the most likely state sequence given observed sequence x
# using the Viterbi algorithm
T = len(x)
delta = np.zeros((T, self.M))
psi = np.zeros((T, self.M))
try:
delta[0] = np.log(self.pi) + np.log(self.B[:,x[0]])
except IndexError:
try:
delta[0] = np.log(self.pi) + np.log(self.C[:,SUFF.index([*word2idx][x[0]][:2])])
except IndexError:
delta[0] = np.log(self.pi)
except ValueError:
try:
delta[0] = np.log(self.pi) + np.log(self.C1[:,SUFF1.index([*word2idx][x[0]][:1])])
except ValueError:
delta[0] = np.log(self.pi)
for t in range(1, T):
for j in range(self.M):
try:
delta[t,j] = np.max(delta[t-1] + np.log(self.A[:,j])) + np.log(self.B[j, x[t]])
psi[t,j] = np.argmax(delta[t-1] + np.log(self.A[:,j]))
except IndexError:
try:
delta[t,j] = np.max(delta[t-1] + np.log(self.A[:,j])) + np.log(self.C[j, SUFF.index([*word2idx][x[t]][:2])])
psi[t,j] = np.argmax(delta[t-1] + np.log(self.A[:,j]))
except ValueError:
try:
delta[t,j] = np.max(delta[t-1] + np.log(self.A[:,j])) + np.log(self.C1[j, SUFF1.index([*word2idx][x[t]][:1])])
psi[t,j] = np.argmax(delta[t-1] + np.log(self.A[:,j]))
except ValueError:
delta[t,j] = np.max(delta[t-1] + np.log(self.A[:,j]))
psi[t,j] = np.argmax(delta[t-1] + np.log(self.A[:,j]))
except IndexError:
try:
delta[t,j] = np.max(delta[t-1] + np.log(self.A[:,j])) + np.log(self.C1[j, SUFF1.index([*word2idx][x[t]][:1])])
psi[t,j] = np.argmax(delta[t-1] + np.log(self.A[:,j]))
except IndexError:
delta[t,j] = np.max(delta[t-1] + np.log(self.A[:,j]))
psi[t,j] = np.argmax(delta[t-1] + np.log(self.A[:,j]))
# backtrack
states = np.zeros(T, dtype=np.int32)
states[T-1] = np.argmax(delta[T-1])
for t in range(T-2, -1, -1):
states[t] = psi[t+1, states[t+1]]
return states
SUFF=[]
SUFF1=[]
for w in [*word2idx]:
SUFF.append(w[:2])
SUFF1.append(w[:1])
suff_pos = defaultdict(list)
suff_pos1 = defaultdict(list)
idx=0
for suf in SUFF:
suff_pos[suf].append(idx)
idx+=1
idx=0
for suf in SUFF1:
suff_pos1[suf].append(idx)
idx+=1
C=np.ones((M,V))
C1=np.ones((M,V))
for l in suff_pos.values():
C[:,l]=B[:,l].sum(axis=1, keepdims=True)/len(l)
for l in suff_pos1.values():
C1[:,l]=B[:,l].sum(axis=1, keepdims=True)/len(l)
word_idx = len(word2idx)
w_known=len(word2idx)
word2idx_test={}
Xtest = []
currentX = []
for line in open('test1_all.txt',encoding='utf-8'):
line = line.rstrip()
if line:
r = line.split()
word = r[0]
if word not in word2idx:
word2idx_test[word] = word_idx
word2idx[word]= word_idx
word_idx += 1
else:
word2idx_test[word]=word2idx[word]
currentX.append(word2idx_test[word])
elif split_sequences:
Xtest.append(currentX)
currentX = []
hmm = HMM(M,A,B,C,C1,pi,SUFF,SUFF1,word2idx)
P1test = []
for x in Xtest:
p = hmm.get_state_sequence(x)
P1test.append(p)
Ptest=[]
list1=[]
for line in open('test1_all.txt',encoding='utf-8'):
line = line.rstrip()
if line:
r = line.split()
tag = r[2]
list1.append(pos2idx[tag])
elif split_sequences:
Ptest.append(list1)
list1 = []
Ytest=[]
list1=[]
for line in open('test1_all.txt',encoding='utf-8'):
line = line.rstrip()
if line:
r = line.split()
tag = r[1]
list1.append(tag2idx[tag])
elif split_sequences:
Ytest.append(list1)
list1 = []
def accuracy(T, Y):
# inputs are lists of lists
n_correct = 0
n_total = 0
for t, y in zip(T, Y):
n_correct += np.sum(t == y)
n_total += len(y)
return float(n_correct) / n_total
def accuracy_unknown(T, Y,X):
# inputs are lists of lists
n_correct = 0
n_total = 0
for t, y,x in zip(T, Y,X):
for ti,yi,xi in zip (t,y,x):
if xi>w_known :
n_correct += (ti == yi)
n_total += 1
return float(n_correct) / n_total
def accuracy_known(T, Y,X):
# inputs are lists of lists
n_correct = 0
n_total = 0
for t, y,x in zip(T, Y,X):
for ti,yi,xi in zip (t,y,x):
if xi<=w_known :
n_correct += (ti == yi)
n_total += 1
return float(n_correct) / n_total
def total_f1_score(T, Y):
# inputs are lists of lists
T = np.concatenate(T)
Y = np.concatenate(Y)
return f1_score(T, Y, average=None).mean()
print("test accuracy:", accuracy(P1test, Ptest))
accuracy=accuracy(P1test, Ptest)
print("test f1:", total_f1_score(P1test, Ptest))
f1=total_f1_score(P1test, Ptest)
print("test accuracy for unknown words:",accuracy_unknown(P1test, Ptest,Xtest))
unknown_ac=accuracy_unknown(Ptest, P1test,Xtest)
print("test accuracy for known words:",accuracy_known(P1test, Ptest,Xtest))
known_ac=accuracy_known(Ptest, P1test,Xtest)
Y = np.concatenate(Ytest)
P = np.concatenate(Ptest)
Z = np.concatenate(P1test)
X= np.concatenate(Xtest)
print("accuracy score for tag "+list(tag2idx.keys())[0]+" :", accuracy_score(Z[np.where(Y==0)[0]], P[np.where(Y==0)[0]]))
a11= accuracy_score(Z[np.where(Y==0)[0]], P[np.where(Y==0)[0]])
print("accuracy score for tag "+list(tag2idx.keys())[1]+" :", accuracy_score(Z[np.where(Y==1)[0]], P[np.where(Y==1)[0]]))
a12= accuracy_score(Z[np.where(Y==1)[0]], P[np.where(Y==1)[0]])
print("accuracy score for tag "+list(tag2idx.keys())[2]+" :", accuracy_score(Z[np.where(Y==2)[0]], P[np.where(Y==2)[0]]))
a13=accuracy_score(Z[np.where(Y==2)[0]], P[np.where(Y==2)[0]])
print("accuracy score for tag "+list(tag2idx.keys())[3]+" :", accuracy_score(Z[np.where(Y==3)[0]], P[np.where(Y==3)[0]]))
a14=accuracy_score(Z[np.where(Y==3)[0]], P[np.where(Y==3)[0]])
print("accuracy score for tag "+list(tag2idx.keys())[4]+" :",accuracy_score(Z[np.where(Y==4)[0]], P[np.where(Y==4)[0]]))
a15=accuracy_score(Z[np.where(Y==4)[0]], P[np.where(Y==4)[0]])
print("accuracy score for tag "+list(tag2idx.keys())[5]+" :", accuracy_score(Z[np.where(Y==5)[0]], P[np.where(Y==5)[0]]))
a16=accuracy_score(Z[np.where(Y==5)[0]], P[np.where(Y==5)[0]])
print("accuracy score for tag "+list(tag2idx.keys())[6]+" :", accuracy_score(Z[np.where(Y==6)[0]], P[np.where(Y==6)[0]]))
a17=accuracy_score(Z[np.where(Y==6)[0]], P[np.where(Y==6)[0]])
print("accuracy score for tag "+list(tag2idx.keys())[7]+" :", accuracy_score(Z[np.where(Y==7)[0]], P[np.where(Y==7)[0]]))
a18=accuracy_score(Z[np.where(Y==7)[0]], P[np.where(Y==7)[0]])
print("accuracy score for tag "+list(tag2idx.keys())[8]+" :", accuracy_score(Z[np.where(Y==8)[0]], P[np.where(Y==8)[0]]))
a19= accuracy_score(Z[np.where(Y==8)[0]], P[np.where(Y==8)[0]])
print("accuracy score for tag "+list(tag2idx.keys())[9]+" :", accuracy_score(Z[np.where(Y==9)[0]], P[np.where(Y==9)[0]]))
a110= accuracy_score(Z[np.where(Y==9)[0]], P[np.where(Y==9)[0]])
print("accuracy score for tag "+list(tag2idx.keys())[10]+" :", accuracy_score(Z[np.where(Y==10)[0]], P[np.where(Y==10)[0]]))
a111= accuracy_score(Z[np.where(Y==10)[0]], P[np.where(Y==10)[0]])
print("accuracy score for tag "+list(tag2idx.keys())[11]+" :", accuracy_score(Z[np.where(Y==11)[0]], P[np.where(Y==11)[0]]))
a112= accuracy_score(Z[np.where(Y==11)[0]], P[np.where(Y==11)[0]])
print("accuracy score for tag "+list(tag2idx.keys())[12]+" :", accuracy_score(Z[np.where(Y==12)[0]], P[np.where(Y==12)[0]]))
a113= accuracy_score(Z[np.where(Y==12)[0]], P[np.where(Y==12)[0]])
print("accuracy score for tag "+list(tag2idx.keys())[13]+" :", accuracy_score(Z[np.where(Y==13)[0]], P[np.where(Y==13)[0]]))
a114= accuracy_score(Z[np.where(Y==13)[0]], P[np.where(Y==13)[0]])
print("test f1 for tag "+list(tag2idx.keys())[0]+" :", f1_score(Z[np.where(Y==0)[0]], P[np.where(Y==0)[0]], average=None).mean())
a21= f1_score(Z[np.where(Y==0)[0]], P[np.where(Y==0)[0]], average=None).mean()
print("test f1 for tag "+list(tag2idx.keys())[1]+" :", f1_score(Z[np.where(Y==1)[0]], P[np.where(Y==1)[0]], average=None).mean())
a22= f1_score(Z[np.where(Y==1)[0]], P[np.where(Y==1)[0]], average=None).mean()
print("test f1 for tag "+list(tag2idx.keys())[2]+" :", f1_score(Z[np.where(Y==2)[0]], P[np.where(Y==2)[0]], average=None).mean())
a23=f1_score(Z[np.where(Y==2)[0]], P[np.where(Y==2)[0]], average=None).mean()
print("test f1 for tag "+list(tag2idx.keys())[3]+" :", f1_score(Z[np.where(Y==3)[0]], P[np.where(Y==3)[0]], average=None).mean())
a24=f1_score(Z[np.where(Y==3)[0]], P[np.where(Y==3)[0]], average=None).mean()
print("test f1 for tag "+list(tag2idx.keys())[4]+" :", f1_score(Z[np.where(Y==4)[0]], P[np.where(Y==4)[0]], average=None).mean())
a25=f1_score(Z[np.where(Y==4)[0]], P[np.where(Y==4)[0]], average=None).mean()
print("test f1 for tag "+list(tag2idx.keys())[5]+" :", f1_score(Z[np.where(Y==5)[0]], P[np.where(Y==5)[0]], average=None).mean())
a26=f1_score(Z[np.where(Y==5)[0]], P[np.where(Y==5)[0]], average=None).mean()
print("test f1 for tag "+list(tag2idx.keys())[6]+" :", f1_score(Z[np.where(Y==6)[0]], P[np.where(Y==6)[0]], average=None).mean())
a27=f1_score(Z[np.where(Y==6)[0]], P[np.where(Y==6)[0]], average=None).mean()
print("test f1 for tag "+list(tag2idx.keys())[7]+" :", f1_score(Z[np.where(Y==7)[0]], P[np.where(Y==7)[0]], average=None).mean())
a28=f1_score(Z[np.where(Y==7)[0]], P[np.where(Y==7)[0]], average=None).mean()
print("test f1 for tag "+list(tag2idx.keys())[8]+" :", f1_score(Z[np.where(Y==8)[0]], P[np.where(Y==8)[0]], average=None).mean())
a29= f1_score(Z[np.where(Y==8)[0]], P[np.where(Y==8)[0]], average=None).mean()
print("test f1 for tag "+list(tag2idx.keys())[9]+" :", f1_score(Z[np.where(Y==9)[0]], P[np.where(Y==9)[0]], average=None).mean())
a210= f1_score(Z[np.where(Y==9)[0]], P[np.where(Y==9)[0]], average=None).mean()
print("test f1 for tag "+list(tag2idx.keys())[10]+" :", f1_score(Z[np.where(Y==10)[0]], P[np.where(Y==10)[0]], average=None).mean())
a211= f1_score(Z[np.where(Y==10)[0]], P[np.where(Y==10)[0]], average=None).mean()
print("test f1 for tag "+list(tag2idx.keys())[11]+" :", f1_score(Z[np.where(Y==11)[0]], P[np.where(Y==11)[0]], average=None).mean())
a212= f1_score(Z[np.where(Y==11)[0]], P[np.where(Y==11)[0]], average=None).mean()
print("test f1 for tag "+list(tag2idx.keys())[12]+" :", f1_score(Z[np.where(Y==12)[0]], P[np.where(Y==12)[0]], average=None).mean())
a213= f1_score(Z[np.where(Y==12)[0]], P[np.where(Y==12)[0]], average=None).mean()
print("test f1 for tag "+list(tag2idx.keys())[13]+" :", f1_score(Z[np.where(Y==13)[0]], P[np.where(Y==13)[0]], average=None).mean())
a214= f1_score(Z[np.where(Y==13)[0]], P[np.where(Y==13)[0]], average=None).mean()
print("accuracy for unknown words for tag "+list(tag2idx.keys())[0]+" :", accuracy_score(Z[np.where(Y==0)[0][X[np.where(Y==0)[0]]>w_known]],P[np.where(Y==0)[0][X[np.where(Y==0)[0]]>w_known]]))
a31= accuracy_score(Z[np.where(Y==0)[0][X[np.where(Y==0)[0]]>w_known]],P[np.where(Y==0)[0][X[np.where(Y==0)[0]]>w_known]])
print("number of unknown words for tag "+list(tag2idx.keys())[0]+" :",len(set(np.where(X[np.where(Y==0)[0]]>w_known)[0])))
a41= len(set(np.where(X[np.where(Y==0)[0]]>w_known)[0]))
print("accuracy for unknown words for tag "+list(tag2idx.keys())[1]+" :", accuracy_score(Z[np.where(Y==1)[0][X[np.where(Y==1)[0]]>w_known]],P[np.where(Y==1)[0][X[np.where(Y==1)[0]]>w_known]]))
a32= accuracy_score(Z[np.where(Y==1)[0][X[np.where(Y==1)[0]]>w_known]],P[np.where(Y==1)[0][X[np.where(Y==1)[0]]>w_known]])
print("number of unknown words for tag "+list(tag2idx.keys())[1]+" :",len(set(np.where(X[np.where(Y==1)[0]]>w_known)[0])))
a42= len(set(np.where(X[np.where(Y==1)[0]]>w_known)[0]))
print("accuracy for unknown words for tag "+list(tag2idx.keys())[2]+" :", accuracy_score(Z[np.where(Y==2)[0][X[np.where(Y==2)[0]]>w_known]],P[np.where(Y==2)[0][X[np.where(Y==2)[0]]>w_known]]))
a33= accuracy_score(Z[np.where(Y==2)[0][X[np.where(Y==2)[0]]>w_known]],P[np.where(Y==2)[0][X[np.where(Y==2)[0]]>w_known]])
print("number of unknown words for tag "+list(tag2idx.keys())[2]+" :",len(set(np.where(X[np.where(Y==2)[0]]>w_known)[0])))
a43= len(set(np.where(X[np.where(Y==2)[0]]>w_known)[0]))
print("accuracy for unknown words for tag "+list(tag2idx.keys())[3]+" :", accuracy_score(Z[np.where(Y==3)[0][X[np.where(Y==3)[0]]>w_known]],P[np.where(Y==3)[0][X[np.where(Y==3)[0]]>w_known]]))
a34= accuracy_score(Z[np.where(Y==3)[0][X[np.where(Y==3)[0]]>w_known]],P[np.where(Y==3)[0][X[np.where(Y==3)[0]]>w_known]])
print("number of unknown words for tag "+list(tag2idx.keys())[3]+" :",len(set(np.where(X[np.where(Y==3)[0]]>w_known)[0])))
a44= len(set(np.where(X[np.where(Y==3)[0]]>w_known)[0]))
print("accuracy for unknown words for tag "+list(tag2idx.keys())[4]+" :", accuracy_score(Z[np.where(Y==4)[0][X[np.where(Y==4)[0]]>w_known]],P[np.where(Y==4)[0][X[np.where(Y==4)[0]]>w_known]]))
a35= accuracy_score(Z[np.where(Y==4)[0][X[np.where(Y==4)[0]]>w_known]],P[np.where(Y==4)[0][X[np.where(Y==4)[0]]>w_known]])
print("number of unknown words for tag "+list(tag2idx.keys())[4]+" :",len(set(np.where(X[np.where(Y==4)[0]]>w_known)[0])))
a45= len(set(np.where(X[np.where(Y==4)[0]]>w_known)[0]))
print("accuracy for unknown words for tag "+list(tag2idx.keys())[5]+" :", accuracy_score(Z[np.where(Y==5)[0][X[np.where(Y==5)[0]]>w_known]],P[np.where(Y==5)[0][X[np.where(Y==5)[0]]>w_known]]))
a36= accuracy_score(Z[np.where(Y==5)[0][X[np.where(Y==5)[0]]>w_known]],P[np.where(Y==5)[0][X[np.where(Y==5)[0]]>w_known]])
print("number of unknown words for tag "+list(tag2idx.keys())[5]+" :",len(set(np.where(X[np.where(Y==5)[0]]>w_known)[0])))
a46= len(set(np.where(X[np.where(Y==5)[0]]>w_known)[0]))
print("accuracy for unknown words for tag "+list(tag2idx.keys())[6]+" :", accuracy_score(Z[np.where(Y==6)[0][X[np.where(Y==6)[0]]>w_known]],P[np.where(Y==6)[0][X[np.where(Y==6)[0]]>w_known]]))
a37= accuracy_score(Z[np.where(Y==6)[0][X[np.where(Y==6)[0]]>w_known]],P[np.where(Y==6)[0][X[np.where(Y==6)[0]]>w_known]])
print("number of unknown words for tag "+list(tag2idx.keys())[6]+" :",len(set(np.where(X[np.where(Y==6)[0]]>w_known)[0])))
a47= len(set(np.where(X[np.where(Y==6)[0]]>w_known)[0]))
print("accuracy for unknown words for tag "+list(tag2idx.keys())[7]+" :", accuracy_score(Z[np.where(Y==7)[0][X[np.where(Y==7)[0]]>w_known]],P[np.where(Y==7)[0][X[np.where(Y==7)[0]]>w_known]]))
a38= accuracy_score(Z[np.where(Y==7)[0][X[np.where(Y==7)[0]]>w_known]],P[np.where(Y==7)[0][X[np.where(Y==7)[0]]>w_known]])
print("number of unknown words for tag "+list(tag2idx.keys())[7]+" :",len(set(np.where(X[np.where(Y==7)[0]]>608)[0])))
a48= len(set(np.where(X[np.where(Y==7)[0]]>w_known)[0]))
print("accuracy for unknown words for tag "+list(tag2idx.keys())[8]+" :", accuracy_score(Z[np.where(Y==8)[0][X[np.where(Y==8)[0]]>w_known]],P[np.where(Y==8)[0][X[np.where(Y==8)[0]]>w_known]]))
a39= accuracy_score(Z[np.where(Y==8)[0][X[np.where(Y==8)[0]]>w_known]],P[np.where(Y==8)[0][X[np.where(Y==8)[0]]>w_known]])
print("number of unknown words for tag "+list(tag2idx.keys())[8]+" :",len(set(np.where(X[np.where(Y==8)[0]]>w_known)[0])))
a49= len(set(np.where(X[np.where(Y==8)[0]]>w_known)[0]))
print("accuracy for unknown words for tag "+list(tag2idx.keys())[9]+" :", accuracy_score(Z[np.where(Y==9)[0][X[np.where(Y==9)[0]]>w_known]],P[np.where(Y==9)[0][X[np.where(Y==9)[0]]>w_known]]))
a310= accuracy_score(Z[np.where(Y==9)[0][X[np.where(Y==9)[0]]>w_known]],P[np.where(Y==9)[0][X[np.where(Y==9)[0]]>w_known]])
print("number of unknown words for tag "+list(tag2idx.keys())[9]+" :",len(set(np.where(X[np.where(Y==9)[0]]>w_known)[0])))
a410= len(set(np.where(X[np.where(Y==9)[0]]>w_known)[0]))
print("accuracy for unknown words for tag "+list(tag2idx.keys())[10]+" :", accuracy_score(Z[np.where(Y==10)[0][X[np.where(Y==10)[0]]>w_known]],P[np.where(Y==10)[0][X[np.where(Y==10)[0]]>w_known]]))
a311=accuracy_score(Z[np.where(Y==10)[0][X[np.where(Y==10)[0]]>w_known]],P[np.where(Y==10)[0][X[np.where(Y==10)[0]]>w_known]])
print("number of unknown words for tag "+list(tag2idx.keys())[10]+" :",len(set(np.where(X[np.where(Y==10)[0]]>w_known)[0])))
a411= len(set(np.where(X[np.where(Y==10)[0]]>w_known)[0]))
print("accuracy for unknown words for tag "+list(tag2idx.keys())[11]+" :", accuracy_score(Z[np.where(Y==11)[0][X[np.where(Y==11)[0]]>w_known]],P[np.where(Y==11)[0][X[np.where(Y==11)[0]]>w_known]]))
a312= accuracy_score(Z[np.where(Y==11)[0][X[np.where(Y==11)[0]]>w_known]],P[np.where(Y==11)[0][X[np.where(Y==11)[0]]>w_known]])
print("number of unknown words for tag "+list(tag2idx.keys())[11]+" :",len(set(np.where(X[np.where(Y==11)[0]]>w_known)[0])))
a412= len(set(np.where(X[np.where(Y==11)[0]]>w_known)[0]))
print("accuracy for unknown words for tag "+list(tag2idx.keys())[12]+" :", accuracy_score(Z[np.where(Y==12)[0][X[np.where(Y==12)[0]]>w_known]],P[np.where(Y==12)[0][X[np.where(Y==12)[0]]>w_known]]))
a313= accuracy_score(Z[np.where(Y==12)[0][X[np.where(Y==12)[0]]>w_known]],P[np.where(Y==12)[0][X[np.where(Y==12)[0]]>w_known]])
print("number of unknown words for tag "+list(tag2idx.keys())[12]+" :",len(set(np.where(X[np.where(Y==12)[0]]>w_known)[0])))
a413= len(set(np.where(X[np.where(Y==12)[0]]>w_known)[0]))
print("accuracy for unknown words for tag "+list(tag2idx.keys())[13]+" :", accuracy_score(Z[np.where(Y==13)[0][X[np.where(Y==13)[0]]>w_known]],P[np.where(Y==13)[0][X[np.where(Y==13)[0]]>w_known]]))
a314= accuracy_score(Z[np.where(Y==13)[0][X[np.where(Y==13)[0]]>w_known]],P[np.where(Y==13)[0][X[np.where(Y==13)[0]]>w_known]])
print("number of unknown words for tag "+list(tag2idx.keys())[13]+" :",len(set(np.where(X[np.where(Y==13)[0]]>w_known)[0])))
a414= len(set(np.where(X[np.where(Y==13)[0]]>w_known)[0]))
print("accuracy for known words for tag "+list(tag2idx.keys())[0]+" :", accuracy_score(Z[np.where(Y==0)[0][X[np.where(Y==0)[0]]<=w_known]],P[np.where(Y==0)[0][X[np.where(Y==0)[0]]<=w_known]]))
a51= accuracy_score(Z[np.where(Y==0)[0][X[np.where(Y==0)[0]]<=w_known]],P[np.where(Y==0)[0][X[np.where(Y==0)[0]]<=w_known]])
print("number of known words for tag "+list(tag2idx.keys())[0]+" :",len(set(np.where(X[np.where(Y==0)[0]]<=w_known)[0])))
a61= len(set(np.where(X[np.where(Y==0)[0]]<=w_known)[0]))
print("accuracy for known words for tag "+list(tag2idx.keys())[1]+" :", accuracy_score(Z[np.where(Y==1)[0][X[np.where(Y==1)[0]]<=w_known]],P[np.where(Y==1)[0][X[np.where(Y==1)[0]]<=w_known]]))
a52= accuracy_score(Z[np.where(Y==1)[0][X[np.where(Y==1)[0]]<=w_known]],P[np.where(Y==1)[0][X[np.where(Y==1)[0]]<=w_known]])
print("number of known words for tag "+list(tag2idx.keys())[1]+" :",len(set(np.where(X[np.where(Y==1)[0]]<=w_known)[0])))
a62= len(set(np.where(X[np.where(Y==1)[0]]<=w_known)[0]))
print("accuracy for known words for tag "+list(tag2idx.keys())[2]+" :", accuracy_score(Z[np.where(Y==2)[0][X[np.where(Y==2)[0]]<=w_known]],P[np.where(Y==2)[0][X[np.where(Y==2)[0]]<=w_known]]))
a53= accuracy_score(Z[np.where(Y==2)[0][X[np.where(Y==2)[0]]<=w_known]],P[np.where(Y==2)[0][X[np.where(Y==2)[0]]<=w_known]])
print("number of known words for tag "+list(tag2idx.keys())[2]+" :",len(set(np.where(X[np.where(Y==2)[0]]<=w_known)[0])))
a63= len(set(np.where(X[np.where(Y==2)[0]]<=w_known)[0]))
print("accuracy for known words for tag "+list(tag2idx.keys())[3]+" :", accuracy_score(Z[
|
np.where(Y==3)
|
numpy.where
|
import numpy as np
from scipy.optimize import curve_fit
#%matplotlib inline
import matplotlib.pyplot as plt
from matplotlib.ticker import ScalarFormatter
import matplotlib
import pandas as pd
from pandas import read_csv
from pathlib import Path
import datetime
def curve_model(t, a, k):
# exp curve
return a *
|
np.exp(k * t)
|
numpy.exp
|
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2021 The NiPreps Developers <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
"""Utilities to manipulate images."""
import nibabel as nb
import numpy as np
from gzip import GzipFile
def rotation2canonical(img):
"""Calculate the rotation w.r.t. cardinal axes of input image."""
img = nb.as_closest_canonical(img)
newaff = np.diag(img.header.get_zooms()[:3])
r = newaff @ np.linalg.pinv(img.affine[:3, :3])
if np.allclose(r,
|
np.eye(3)
|
numpy.eye
|
import numpy as np
import math
def clear_sky_reset2(zenith_angle: np.ndarray, Eext: np.ndarray, pressure: np.ndarray, water_vapour: np.ndarray,
ozone: np.ndarray, nitrogen_dioxide: np.ndarray, AOD550: np.ndarray, Angstrom_exponent: np.ndarray,
surface_albedo: np.ndarray):
'''Every Variable Need to be np.ndarry. np.matrix will cause fatal error'''
Angstrom_exponent[Angstrom_exponent > 2.5] = 2.5
Angstrom_exponent[Angstrom_exponent < 0] = 0
pressure[pressure > 1100] = 1100
pressure[pressure < 300] = 300
water_vapour[water_vapour > 10] = 10
water_vapour[water_vapour < 0] = 0
ozone[ozone > 0.6] = 0.6
ozone[ozone < 0] = 0
nitrogen_dioxide[nitrogen_dioxide > 0.03] = 0.03
nitrogen_dioxide[nitrogen_dioxide < 0] = 0
surface_albedo[surface_albedo > 1] = 1
surface_albedo[surface_albedo < 0] = 0
# air mass for aerosols extinction
complex_temp = np.array(zenith_angle * 180. / np.pi, dtype=np.complex)
ama = np.abs(np.power(np.cos(zenith_angle) + 0.16851 * np.power(complex_temp, 0.18198) / np.power(
95.318 - complex_temp, 1.9542), -1))
# air mass for water vapor absorption
amw = np.abs(np.power(np.cos(zenith_angle) + 0.10648 * np.power(complex_temp, 0.11423) / np.power(
93.781 - complex_temp, 1.9203), -1))
# air mass for nitrogen dioxide absorption
# amn = np.abs(np.power(np.cos(zenith_angle) + 1.1212 * np.power(zenith_angle * 180. / np.pi, 1.6132) / np.power(
# 3.2629 - zenith_angle * 180. / np.pi, 1.9203), -1))
# air mass for ozone absorption
amo = np.abs(np.power(np.cos(zenith_angle) + 1.0651 * np.power(complex_temp, 0.6379) / np.power(
101.8 - complex_temp, 2.2694), -1))
# air mass for Rayleigh scattering and uniformly mixed gases absorption
amR = np.abs(np.power(np.cos(zenith_angle) + 0.48353 * np.power(complex_temp, 0.095846) / np.power(
96.741 - complex_temp, 1.754), -1))
amRe = np.abs((pressure / 1013.25) * np.power(
np.cos(zenith_angle) + 0.48353 * (np.power(complex_temp, 0.095846)) / np.power(
96.741 - complex_temp, 1.754), -1))
# Angstrom turbidity
ang_beta = AOD550 / np.power(0.55, -1 * Angstrom_exponent)
ang_beta[ang_beta > 1.1] = 1.1
ang_beta[ang_beta < 0] = 0
'''Band 1'''
# transmittance for Rayleigh scattering
TR1 = (1 + 1.8169 * amRe - 0.033454 * np.power(amRe, 2)) / (1 + 2.063 * amRe + 0.31978 * np.power(amRe, 2))
# transmittance for uniformly mixed gases absorption
Tg1 = (1 + 0.95885 * amRe + 0.012871 * np.power(amRe, 2)) / (1 + 0.96321 * amRe + 0.015455 * np.power(amRe, 2))
# transmittance for Ozone absorption
uo = ozone
f1 = uo * (10.979 - 8.5421 * uo) / (1 + 2.0115 * uo + 40.189 * np.power(uo, 2))
f2 = uo * (-0.027589 - 0.005138 * uo) / (1 - 2.4857 * uo + 13.942 * np.power(uo, 2))
f3 = uo * (10.995 - 5.5001 * uo) / (1 + 1.6784 * uo + 42.406 * np.power(uo, 2))
To1 = (1 + f1 * amo + f2 * np.power(amo, 2)) / (1 + f3 * amo)
# transmittance for Nitrogen dioxide absorption
un = nitrogen_dioxide
g1 = (0.17499 + 41.654 * un - 2146.4 * np.power(un, 2)) / (1 + 22295. * np.power(un, 2))
g2 = un * (-1.2134 + 59.324 * un) / (1 + 8847.8 *
|
np.power(un, 2)
|
numpy.power
|
"""
This code is used to fit KRR. Note that KRR for large
datasets can require a lot of memory. For FMNIST models
we used 64GB and for synthetic datasets (with n = 10^5),
we used 128GB of RAM. This code is written for Python 2.7.13.
Inputs:
experiment_name: The name of the experiment (used for record keeping).
param_1: overloaded argument:
When kernel name = polynomial: Power of the polynomial kernel
When dataset == SYNTH: the number of observations
otherwise, the number of layers for ntk kernel
p2_ind: The constant term in the polynomial kernel.
kernel_name: The name of the kernel: ntk, gp (ReLU RF), poly (polynomial).
job_id: Job id (used for record keeping).
dataset: The name of the dataset:
FMNIST (high-frequency noise)/ NFMNIST (low-frequency noise)
CIFAR10 (low-frequency noise)/ CIFAR2 (high-frequency noise)
SYNTH (synthetic data).
noise_index: The index of the noise level. An integer typically ranging from zero (no noise) to 14.
"""
from __future__ import print_function
import cPickle as pickle
import math
import numpy as np
import os
import sys
import scipy.linalg as scl
import scipy.sparse as ss
import time
from preprocess import prep_data
experiment_name = sys.argv[1]
param_1 = np.int(sys.argv[2])
p2_ind = np.int(sys.argv[3])
kernel_name = sys.argv[4]
job_id = np.int(sys.argv[5])
dataset = sys.argv[6]
noise_index = np.int(sys.argv[7])
# Read user provided directories:
user_dirs = {}
with open("./directories.txt") as f:
for line in f:
(key, val) = line.split()
user_dirs[key] = val
# The hyper-parameters used for each dataset:
# expand: whether to one-hot-encode the labels
# mean: the mean of the labels to be removed before fitting
# reg_list: the grid of l_2 regularization intensities
# p2_grid: the grid of values used for the constant term in polynomial kernels
if dataset == 'CIFAR10':
expand = True
mean = 0.1
reg_list = 10 ** np.linspace(-6, 1, num=20)
p2_grid = 2 ** np.linspace(-3, 3, num=10)
elif dataset == 'SYNTH':
expand = False
mean = 0.0
reg_list = 10 ** np.linspace(0, 6, num=10)
p2_grid = 2 ** np.linspace(-3, 3, num=10)
elif dataset == 'CIFAR2':
expand = False
mean = 0.5
reg_list = 10 ** np.linspace(-2, 4, num=20)
# Added for RF
reg_list = np.concatenate([reg_list, 10 ** np.linspace(4.2, 10, num=20)])
p2_grid = 2 ** np.linspace(-3, 3, num=10)
elif dataset in ['FMNIST', 'NFMNIST']:
expand = True
mean = 0.1
# Changed for the new kernels
if kernel_name == 'ntk' and param_1 == 3:
reg_list = 10 ** np.linspace(-4, 3, num=20)
print('Regularization Param Chosen for Three Layer NTK')
else:
# Changed Base Case
reg_list = 10 ** np.linspace(-1, 5, num=20)
p2_grid = 2 ** np.linspace(-3, 3, num=10)
else:
print('Dataset not recognized')
expand = False
mean = 0.0
reg_list = 10 ** np.linspace(0, 6, num=20)
p2_grid = 2 ** np.linspace(-3, 3, num=10)
param_2 = p2_grid[p2_ind]
# Directory used for saving the KRR results
directory = user_dirs['rkhs_dir'] + '%s_%s_%s_%d_%d_%d_%d'%(experiment_name, dataset, kernel_name, param_1, p2_ind, noise_index, job_id)
if not os.path.exists(directory):
os.makedirs(directory)
fileName = directory + "/" + 'log_file.txt'
_file = open(fileName, 'w', buffering=1)
print('Arguments:', file=_file)
print('The noise index is %d'%(noise_index), file=_file)
print('The kernel hyper_param is %d, %d'%(param_1, p2_ind), file=_file)
print('Kernel type is: %s'%(kernel_name), file=_file)
print('Numpy version %s'%(np.version.version), file=_file)
print('Scipy version %s'%(scl.__version__), file=_file)
print('=========', file=_file)
def NTK2(X, Z):
"""This function computes NTK kernel for two-layer ReLU neural networks via
an analytic formula.
Input:
X: d times n_1 matrix, where d is the feature dimension and n_i are # obs.
Z: d times n_2 matrix, where d is the feature dimension and n_i are # obs.
output:
C: The kernel matrix of size n_1 times n_2.
"""
pi = math.pi
assert X.shape[0] == Z.shape[0]
# X is sized d \times n
nx = np.linalg.norm(X, axis=0, keepdims=True)
nx = nx.T
nz = np.linalg.norm(Z, axis=0, keepdims=True)
C = np.dot(X.T, Z) #n_1 * n_2
C = np.multiply(C, (nx ** -1))
C = np.multiply(C, (nz ** -1))
# Fixing numerical mistakes
C = np.minimum(C, 1.0)
C = np.maximum(C, -1.0)
C = np.multiply(1.0 - np.arccos(C) / pi, C) + np.sqrt(1 - np.power(C, 2)) / (2 * pi)
C = np.multiply(nx, np.multiply(C, nz))
return C
def RFK2(X, Z):
"""This function computes RF kernel for two-layer ReLU neural networks via
an analytic formula.
Input:
X: d times n_1 matrix, where d is the feature dimension and n_i are # obs.
Z: d times n_2 matrix, where d is the feature dimension and n_i are # obs.
output:
C: The kernel matrix of size n_1 times n_2.
"""
pi = math.pi
assert X.shape[0] == Z.shape[0]
# X is sized d \times n
nx = np.linalg.norm(X, axis=0, keepdims=True)
nx = nx.T
nz = np.linalg.norm(Z, axis=0, keepdims=True)
C = np.dot(X.T, Z) #n_1 * n_2
C = np.multiply(C, (nx ** -1))
C = np.multiply(C, (nz ** -1))
# Fixing numerical mistakes
C =
|
np.minimum(C, 1.0)
|
numpy.minimum
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.special import j1
from astropy import units as un
from astropy import constants as const
from scipy.interpolate import interp1d
from tqdm import tqdm
from hera_cal.redcal import get_pos_reds
from multiprocessing import Pool
from .beams import beam_gaussian
from .sky import point_source_foregrounds
c = const.c # speed of light in meters per second
"""
Notes for later implementation:
- Make the simulator be one class. Make it simulate based on the what it's given
"""
class DiffuseSim:
def __init__(self, antpos, beam, sky, phi, nu):
""" """
self.antpos = antpos
self.beam = beam
self.sky = sky
self.theta = theta
self.phi = phi
self.nu = nu
self.uv = {}
for i, vi in self.antpos.items():
for j, vj in self.antpos.items():
if i != j and self.uv.get((j, i)) is None:
self.uv[(i, j)] = np.abs(vi - vj)
self.delays = {k: self.tau(v, theta, phi) for k, v in self.uv.items()}
def simulate(self):
""" """
pass
def tau(self, b, theta, phi):
"""
Healpix map of delays
b : np.ndarray, (3,)
baseline vector
theta : np.
"""
bx, by, bz = b * un.m
l, m, n = (
np.cos(theta) * np.sin(phi),
np.cos(theta) * np.cos(phi),
np.sin(theta),
)
return (bx * l + by * m + bz * n) / const.c
class RadioSim:
"""
Class for simulating radio visibilities
"""
def __init__(self, antpos, beam, sky, theta, phi, nu):
""" """
self.antpos = antpos
self.beam = beam
self.sky = sky
self.theta = theta
self.phi = phi
self.nu = nu
self.uv = {}
for i, vi in self.antpos.items():
for j, vj in self.antpos.items():
if i != j and self.uv.get((j, i)) is None:
self.uv[(i, j)] = np.abs(vi - vj)
self.delays = {k: self.tau(v, theta, phi) for k, v in self.uv.items()}
def tau(self, b, theta, phi):
"""
Solves for the delay
b : np.ndarray, (3,)
baseline vector
theta : np.
"""
bx, by, bz = b * un.m
l, m, n = (
np.cos(theta) *
|
np.sin(phi)
|
numpy.sin
|
# -*- coding: utf-8 -*-
"""
Created on Tue May 19 15:22:04 2020
@author: zxx
"""
import random
import time
import copy
#from six.moves import xrange
import numpy as np
from numpy.random import RandomState
from dataset import load_movielens_ratings
from dataset import build_user_item_matrix
from ALS_optimize import ALS
from ALS_optimize_origin import ALS_origin
from evaluation import predict
from evaluation import RMSE
from compute_grad import compute_grad_SGLD
def random_mal_ratings(mal_user,n_item,mal_item,seed = None):
# random generator malicious users data
assert mal_item < n_item
mal_ratings = []
for u in range(mal_user):
mal_user_idx = u
mal_item_idx = random.sample(range(n_item), mal_item)
for i in range(mal_item):
mal_movie_idx = mal_item_idx[i]
RandomState(seed).rand()/0.2
mal_rating = int(5 * RandomState(seed).rand()) + 1
mal_ratings.append([mal_user_idx, mal_movie_idx, mal_rating])
return np.array(mal_ratings)
#数据类型转换的函数
def arraytorating(malarray, mal_user, n_item):
malrating = []
for u in range(mal_user):
for i in range(n_item):
if malarray[u,i] != 0 :
malrating.append([u, i, malarray[u,i]])
return np.array(malrating)
############################################################################################################
#train origin model
def optimize_model_origin(converge, n_user, n_item, n_feature, train, mean_rating_, lamda_u, lamda_v, user_features_origin_, item_features_origin_):
print("Start training model without data poisoning attacks!")
last_rmse = None
n_iters = 100
for iteration in range(n_iters):
t1 = time.time()
user_features_origin_, item_features_origin_ = ALS_origin(n_user, n_item, n_feature, train, mean_rating_, lamda_u, lamda_v, user_features_origin_, item_features_origin_)
train_preds = predict(train.take([0, 1], axis=1), user_features_origin_, item_features_origin_)
train_rmse = RMSE(train_preds, train.take(2, axis=1) - 3)
t2 = time.time()
print("The %d th iteration \t time: %ds \t RMSE: %f " % (iteration + 1, t2 - t1, train_rmse))
# stop when converge
if last_rmse and abs(train_rmse - last_rmse) < converge:
break
else:
last_rmse = train_rmse
return last_rmse
#train added attack data model
def optimize_model(converge, n_user, n_item, n_feature, mal_user, train, mean_rating_, mal_mean_rating_, mal_ratings, lamda_u, lamda_v, \
user_features_, mal_user_features_, item_features_):
print("Start training model with data poisoning attacks!")
last_rmse = None
n_iters = 100
for iteration in range(n_iters):
t1 = time.time()
user_features_, mal_user_features_, item_features_ = ALS(n_user, n_item, n_feature, mal_user, train, \
mean_rating_, mal_mean_rating_, mal_ratings, lamda_u, lamda_v, \
user_features_, mal_user_features_, item_features_)
train_preds = predict(train.take([0, 1], axis=1), user_features_, item_features_)
train_rmse = RMSE(train_preds, train.take(2, axis=1) - 3)
t2 = time.time()
print("The %d th iteration \t time: %ds \t RMSE: %f " % (iteration + 1, t2 - t1, train_rmse))
# stop when converge
if last_rmse and abs(train_rmse - last_rmse) < converge:
break
else:
last_rmse = train_rmse
return last_rmse
def main_SGLD(data_size,attack_size = 0.05,fill_size = 0.05,target_item = 22):
'''
parameters:
lamda_u: the regularization parameter of user
lamda_v: the regularization parameter of item
alpha: the proportion of malicious users
mal_item: the items of malicious users rating
n_iter: number of iteration
converge: the least RMSE between two iterations
train_pct: the proportion of train dataset
'''
lamda_u = 5e-2
lamda_v = 5e-2
# alpha = 0.01
# n_iters = 100
n_feature = 64
converge = 1e-5
# mal_item = 84
# target_item = 22
if data_size == '100K':
ratings_file = 'ratings_ml.csv'
ratings = load_movielens_ratings(ratings_file)
if data_size == '1M':
ratings = np.load('ratings_1m.npy')
#断言评分的最大值为5,最小值为1
max_rating = max(ratings[:, 2])
min_rating = min(ratings[:, 2])
assert max_rating == 5
assert min_rating == 1
train = ratings
n_user = max(train[:, 0]) + 1
n_item = max(train[:, 1]) + 1
mal_user = int(attack_size * n_user)
# mal_user = 47
mal_item = int(fill_size * n_item)
# add malicious users data
mal_ratings = random_mal_ratings(mal_user,n_item,mal_item)
#initialize the matrix U U~ and V
seed = None
user_features_ = 0.1 * RandomState(seed).rand(n_user, n_feature)
mal_user_features_ = 0.1 * RandomState(seed).rand(mal_user, n_feature)
item_features_ = 0.1 * RandomState(seed).rand(n_item, n_feature)
mean_rating_ = np.mean(train.take(2, axis=1))
mal_mean_rating_ = np.mean(mal_ratings.take(2, axis=1))
user_features_origin_ = 0.1 * RandomState(seed).rand(n_user, n_feature)
item_features_origin_ = 0.1 * RandomState(seed).rand(n_item, n_feature)
#using the algorithm of SGLD to optimize the utility function
'''
s_iters: number of iteration in SGLD
s_t: step size
Lamda: the contraint of vector
'''
print('*'*40)
#m_iters = 10
s_iters = 10
s_t = 0.2 * np.ones([s_iters])
# last_rmse = optimize_model_origin()
last_rmse = optimize_model_origin(converge, n_user, n_item, n_feature, train, mean_rating_, \
lamda_u, lamda_v, user_features_origin_, item_features_origin_)
print(last_rmse)
#n_user = max(train[:, 0]) + 1
#n_item = max(train[:, 1]) + 1
train_matrix = np.zeros((n_user, n_item))
for i in range(train.shape[0]):
train_matrix[train[i,0], train[i,1]] = train[i,2]
item_mean = np.zeros([n_item,])
item_variance = np.zeros([n_item,])
for j in range(n_item):
# j_num = sum(1 * (train_matrix[:,j] != 0))
# j_sum = sum(train_matrix[:,j])
# item_mean[j] = j_sum/j_num
# item_variance[j] = sum(pow(train_matrix[:,j][train_matrix[:,j] != 0] - item_mean[j] , 2))
#
j_num = n_user
j_sum = sum(train_matrix[:,j])
item_mean[j] = j_sum/j_num
item_variance[j] = sum(pow(train_matrix[:,j] - item_mean[j] , 2))/j_num
#mal_data = build_user_item_matrix(mal_user,n_item,mal_ratings).toarray()
mal_data = np.zeros([mal_user,n_item])
#count = np.zeros([mal_user,n_item])
for u in range(mal_user):
# print(u)
item_index = np.random.randint(1,n_item,mal_item)
for j in range(mal_item):
randomdata = np.random.normal(loc=item_mean[item_index[j]], scale=np.sqrt(item_variance[item_index[j]]))
randomdata = randomdata - item_mean[item_index[j]] + 3
if randomdata > 0:
mal_data[u,item_index[j]] = round(randomdata)
else:
mal_data[u,item_index[j]] = 0
if mal_data[u,item_index[j]] > 5:
mal_data[u,item_index[j]] = 5
# count[u,item_index[j]] = 1
#sum(sum((mal_data !=0 ) * 1))
#sum(sum(count))
mal_ratings = arraytorating(mal_data, mal_user, n_item).astype(np.int32)
mal_mean_rating_ = np.mean(mal_ratings.take(2, axis=1))
beta = 0.6
sate =
|
np.zeros([mal_user,n_item])
|
numpy.zeros
|
import json
import time
import numpy as np
from scipy import ndimage
from scipy.stats import mode
from src.utils import matrix2answer
def find_grid(image, frame=False, possible_colors=None):
"""Looks for the grid in image and returns color and size"""
grid_color = -1
size = [1, 1]
if possible_colors is None:
possible_colors = list(range(10))
for color in possible_colors:
for i in range(size[0] + 1, image.shape[0] // 2 + 1):
if (image.shape[0] + 1) % i == 0:
step = (image.shape[0] + 1) // i
if (image[(step - 1) :: step] == color).all():
size[0] = i
grid_color = color
for i in range(size[1] + 1, image.shape[1] // 2 + 1):
if (image.shape[1] + 1) % i == 0:
step = (image.shape[1] + 1) // i
if (image[:, (step - 1) :: step] == color).all():
size[1] = i
grid_color = color
if grid_color == -1 and not frame:
color_candidate = image[0, 0]
if (
(image[0] == color_candidate).all()
and (image[-1] == color_candidate).all()
and (image[:, -1] == color_candidate).all()
and (image[:, 0] == color_candidate).all()
):
grid_color, size, _ = find_grid(
image[1 : image.shape[0] - 1, 1 : image.shape[1] - 1], frame=True, possible_colors=[color_candidate]
)
return grid_color, size, frame
else:
return grid_color, size, frame
return grid_color, size, frame
def find_color_boundaries(array, color):
"""Looks for the boundaries of any color and returns them"""
if (array == color).any() == False:
return None
ind_0 = np.arange(array.shape[0])
ind_1 = np.arange(array.shape[1])
temp_0 = ind_0[(array == color).max(axis=1)] # axis 0
min_0, max_0 = temp_0.min(), temp_0.max()
temp_1 = ind_1[(array == color).max(axis=0)] # axis 1
min_1, max_1 = temp_1.min(), temp_1.max()
return min_0, max_0, min_1, max_1
def get_color_max(image, color):
"""Returns the part of the image inside the color boundaries"""
boundaries = find_color_boundaries(image, color)
if boundaries:
return (0, image[boundaries[0] : boundaries[1] + 1, boundaries[2] : boundaries[3] + 1])
else:
return 1, None
def get_pixel(image, i, j):
"""Returns the pixel by coordinates"""
if i >= image.shape[0] or j >= image.shape[1]:
return 1, None
return 0, image[i : i + 1, j : j + 1]
def get_pixel_fixed(image, i):
return 0, np.array([[i]])
def get_grid(image, grid_size, cell, frame=False):
""" returns the particular cell form the image with grid"""
if frame:
return get_grid(image[1 : image.shape[0] - 1, 1 : image.shape[1] - 1], grid_size, cell, frame=False)
if cell[0] >= grid_size[0] or cell[1] >= grid_size[1]:
return 1, None
steps = ((image.shape[0] + 1) // grid_size[0], (image.shape[1] + 1) // grid_size[1])
block = image[steps[0] * cell[0] : steps[0] * (cell[0] + 1) - 1, steps[1] * cell[1] : steps[1] * (cell[1] + 1) - 1]
return 0, block
def get_half(image, side):
""" returns the half of the image"""
if side not in ["l", "r", "t", "b", "long1", "long2"]:
return 1, None
if side == "l":
return 0, image[:, : (image.shape[1]) // 2]
elif side == "r":
return 0, image[:, -((image.shape[1]) // 2) :]
elif side == "b":
return 0, image[-((image.shape[0]) // 2) :, :]
elif side == "t":
return 0, image[: (image.shape[0]) // 2, :]
elif side == "long1":
if image.shape[0] >= image.shape[1]:
return get_half(image, "t")
else:
return get_half(image, "l")
elif side == "long2":
if image.shape[0] >= image.shape[1]:
return get_half(image, "b")
else:
return get_half(image, "r")
def get_corner(image, side):
""" returns the half of the image"""
if side not in ["tl", "tr", "bl", "br"]:
return 1, None
size = (image.shape[0]) // 2, (image.shape[1]) // 2
if side == "tl":
return 0, image[size[0] :, -size[1] :]
if side == "tr":
return 0, image[size[0] :, : size[1]]
if side == "bl":
return 0, image[: -size[0], : size[1]]
if side == "br":
return 0, image[: -size[0], -size[1] :]
def get_k_part(image, num, k):
if image.shape[0] > image.shape[1]:
max_axis = 0
max_shape = image.shape[0]
else:
max_axis = 1
max_shape = image.shape[1]
if max_shape % num != 0:
return 1, None
size = max_shape // num
if max_axis == 0:
return 0, image[k * size : (k + 1) * size]
else:
return 0, image[:, k * size : (k + 1) * size]
def get_rotation(image, k):
return 0, np.rot90(image, k)
def get_transpose(image):
return 0, np.transpose(image)
def get_roll(image, shift, axis):
return 0, np.roll(image, shift=shift, axis=axis)
def get_cut_edge(image, l, r, t, b):
"""deletes pixels from some sided of an image"""
return 0, image[t : image.shape[0] - b, l : image.shape[1] - r]
def get_resize(image, scale):
""" resizes image according to scale"""
if isinstance(scale, int):
if image.shape[0] % scale != 0 or image.shape[1] % scale != 0:
return 1, None
if image.shape[0] < scale or image.shape[1] < scale:
return 2, None
arrays = []
size = image.shape[0] // scale, image.shape[1] // scale
for i in range(scale):
for j in range(scale):
arrays.append(image[i::scale, j::scale])
result = mode(np.stack(arrays), axis=0).mode[0]
else:
size = int(image.shape[0] / scale), int(image.shape[1] / scale)
result = []
for i in range(size[0]):
result.append([])
for j in range(size[1]):
result[-1].append(image[int(i * scale), int(j * scale)])
result = np.uint8(result)
return 0, result
def get_resize_to(image, size_x, size_y):
""" resizes image according to scale"""
scale_x = image.shape[0] // size_x
scale_y = image.shape[1] // size_y
if scale_x == 0 or scale_y == 0:
return 3, None
if image.shape[0] % scale_x != 0 or image.shape[1] % scale_y != 0:
return 1, None
if image.shape[0] < scale_x or image.shape[1] < scale_y:
return 2, None
arrays = []
for i in range(scale_x):
for j in range(scale_y):
arrays.append(image[i::scale_x, j::scale_y])
result = mode(np.stack(arrays), axis=0).mode[0]
if result.max() > 10:
print(1)
return 0, result
def get_reflect(image, side):
""" returns images generated by reflections of the input"""
if side not in ["r", "l", "t", "b", "rt", "rb", "lt", "lb"]:
return 1, None
try:
if side == "r":
result = np.zeros((image.shape[0], image.shape[1] * 2 - 1))
result[:, : image.shape[1]] = image
result[:, -image.shape[1] :] = image[:, ::-1]
elif side == "l":
result = np.zeros((image.shape[0], image.shape[1] * 2 - 1))
result[:, : image.shape[1]] = image[:, ::-1]
result[:, -image.shape[1] :] = image
elif side == "b":
result = np.zeros((image.shape[0] * 2 - 1, image.shape[1]))
result[: image.shape[0], :] = image
result[-image.shape[0] :, :] = image[::-1]
elif side == "t":
result = np.zeros((image.shape[0] * 2 - 1, image.shape[1]))
result[: image.shape[0], :] = image[::-1]
result[-image.shape[0] :, :] = image
elif side == "rb":
result = np.zeros((image.shape[0] * 2 - 1, image.shape[1] * 2 - 1))
result[: image.shape[0], : image.shape[1]] = image
result[: image.shape[0], -image.shape[1] :] = image[:, ::-1]
result[-image.shape[0] :, : image.shape[1]] = image[::-1, :]
result[-image.shape[0] :, -image.shape[1] :] = image[::-1, ::-1]
elif side == "rt":
result = np.zeros((image.shape[0] * 2 - 1, image.shape[1] * 2 - 1))
result[: image.shape[0], : image.shape[1]] = image[::-1, :]
result[: image.shape[0], -image.shape[1] :] = image[::-1, ::-1]
result[-image.shape[0] :, : image.shape[1]] = image
result[-image.shape[0] :, -image.shape[1] :] = image[:, ::-1]
elif side == "lt":
result = np.zeros((image.shape[0] * 2 - 1, image.shape[1] * 2 - 1))
result[: image.shape[0], : image.shape[1]] = image[::-1, ::-1]
result[: image.shape[0], -image.shape[1] :] = image[::-1, :]
result[-image.shape[0] :, : image.shape[1]] = image[:, ::-1]
result[-image.shape[0] :, -image.shape[1] :] = image
elif side == "lb":
result = np.zeros((image.shape[0] * 2 - 1, image.shape[1] * 2 - 1))
result[: image.shape[0], : image.shape[1]] = image[:, ::-1]
result[: image.shape[0], -image.shape[1] :] = image
result[-image.shape[0] :, : image.shape[1]] = image[::-1, ::-1]
result[-image.shape[0] :, -image.shape[1] :] = image[::-1, :]
except:
return 2, None
return 0, result
def get_color_swap(image, color_1, color_2):
"""swapping two colors"""
if not (image == color_1).any() and not (image == color_2).any():
return 1, None
result = image.copy()
result[image == color_1] = color_2
result[image == color_2] = color_1
return 0, result
def get_cut(image, x1, y1, x2, y2):
if x1 >= x2 or y1 >= y2:
return 1, None
else:
return 0, image[x1:x2, y1:y2]
def get_min_block(image, full=True):
if full:
structure = [[1, 1, 1], [1, 1, 1], [1, 1, 1]]
else:
structure = [[0, 1, 0], [1, 1, 1], [0, 1, 0]]
masks, n_masks = ndimage.label(image, structure=structure)
sizes = [(masks == i).sum() for i in range(1, n_masks + 1)]
if n_masks == 0:
return 2, None
min_n = np.argmin(sizes) + 1
boundaries = find_color_boundaries(masks, min_n)
if boundaries:
return (0, image[boundaries[0] : boundaries[1] + 1, boundaries[2] : boundaries[3] + 1])
else:
return 1, None
def get_min_block_mask(image, full=True):
if full:
structure = [[1, 1, 1], [1, 1, 1], [1, 1, 1]]
else:
structure = [[0, 1, 0], [1, 1, 1], [0, 1, 0]]
masks, n_masks = ndimage.label(image, structure=structure)
sizes = [(masks == i).sum() for i in range(1, n_masks + 1)]
if n_masks == 0:
return 2, None
min_n = np.argmin(sizes) + 1
return 0, masks == min_n
def get_max_block_mask(image, full=True):
if full:
structure = [[1, 1, 1], [1, 1, 1], [1, 1, 1]]
else:
structure = [[0, 1, 0], [1, 1, 1], [0, 1, 0]]
masks, n_masks = ndimage.label(image, structure=structure)
sizes = [(masks == i).sum() for i in range(1, n_masks + 1)]
if n_masks == 0:
return 2, None
min_n = np.argmax(sizes) + 1
return 0, masks == min_n
def get_max_block(image, full=True):
if full:
structure = [[1, 1, 1], [1, 1, 1], [1, 1, 1]]
else:
structure = [[0, 1, 0], [1, 1, 1], [0, 1, 0]]
masks, n_masks = ndimage.label(image, structure=structure)
sizes = [(masks == i).sum() for i in range(1, n_masks + 1)]
if n_masks == 0:
return 2, None
max_n = np.argmax(sizes) + 1
boundaries = find_color_boundaries(masks, max_n)
if boundaries:
return (0, image[boundaries[0] : boundaries[1] + 1, boundaries[2] : boundaries[3] + 1])
else:
return 1, None
def get_block_with_side_colors(image, block_type="min", structure=0):
if structure == 0:
structure = [[1, 1, 1], [1, 1, 1], [1, 1, 1]]
else:
structure = [[0, 1, 0], [1, 1, 1], [0, 1, 0]]
masks, n_masks = ndimage.label(image, structure=structure)
if n_masks == 0:
return 2, None
unique_nums = []
for i in range(1, n_masks + 1):
unique = np.unique(image[masks == i])
unique_nums.append(len(unique))
if block_type == "min":
n = np.argmin(unique_nums) + 1
else:
n = np.argmax(unique_nums) + 1
boundaries = find_color_boundaries(masks, n)
if boundaries:
return (0, image[boundaries[0] : boundaries[1] + 1, boundaries[2] : boundaries[3] + 1])
else:
return 1, None
def get_block_with_side_colors_count(image, block_type="min", structure=0):
if structure == 0:
structure = [[1, 1, 1], [1, 1, 1], [1, 1, 1]]
else:
structure = [[0, 1, 0], [1, 1, 1], [0, 1, 0]]
masks, n_masks = ndimage.label(image, structure=structure)
if n_masks == 0:
return 2, None
unique_nums = []
for i in range(1, n_masks + 1):
unique, counts = np.unique(image[masks == i], return_counts=True)
unique_nums.append(min(counts))
if block_type == "min":
n = np.argmin(unique_nums) + 1
else:
n = np.argmax(unique_nums) + 1
boundaries = find_color_boundaries(masks, n)
if boundaries:
return (0, image[boundaries[0] : boundaries[1] + 1, boundaries[2] : boundaries[3] + 1])
else:
return 1, None
def get_color(color_dict, colors):
""" retrive the absolute number corresponding a color set by color_dict"""
for i, color in enumerate(colors):
for data in color:
equal = True
for k, v in data.items():
if k not in color_dict or v != color_dict[k]:
equal = False
break
if equal:
return i
return -1
def get_mask_from_block(image, color):
if color in np.unique(image, return_counts=False):
return 0, image == color
else:
return 1, None
def get_background(image, color):
return 0, np.uint8(np.ones_like(image) * color)
def get_mask_from_max_color_coverage(image, color):
if color in np.unique(image, return_counts=False):
boundaries = find_color_boundaries(image, color)
result = (image.copy() * 0).astype(bool)
result[boundaries[0] : boundaries[1] + 1, boundaries[2] : boundaries[3] + 1] = True
return 0, image == color
else:
return 1, None
def add_unique_colors(image, result, colors=None):
"""adds information about colors unique for some parts of the image"""
if colors is None:
colors = np.unique(image)
unique_side = [False for i in range(10)]
unique_corner = [False for i in range(10)]
half_size = (((image.shape[0] + 1) // 2), ((image.shape[1] + 1) // 2))
for (image_part, side, unique_list) in [
(image[: half_size[0]], "bottom", unique_side),
(image[-half_size[0] :], "top", unique_side),
(image[:, : half_size[1]], "right", unique_side),
(image[:, -half_size[1] :], "left", unique_side),
(image[-half_size[0] :, -half_size[1] :], "tl", unique_corner),
(image[-half_size[0] :, : half_size[1]], "tr", unique_corner),
(image[: half_size[0], : half_size[1]], "br", unique_corner),
(image[: half_size[0], -half_size[1] :], "left", unique_corner),
]:
unique = np.uint8(np.unique(image_part))
if len(unique) == len(colors) - 1:
color = [x for x in colors if x not in unique][0]
unique_list[color] = True
result["colors"][color].append({"type": "unique", "side": side})
for i in range(10):
if unique_corner[i]:
result["colors"][i].append({"type": "unique", "side": "corner"})
if unique_side[i]:
result["colors"][i].append({"type": "unique", "side": "side"})
if unique_side[i] or unique_corner[i]:
result["colors"][i].append({"type": "unique", "side": "any"})
return
def add_center_color(image, result, colors=None):
i = image.shape[0] // 4
j = image.shape[1] // 4
center = image[i : image.shape[0] - i, j : image.shape[1] - j]
values, counts = np.unique(center, return_counts=True)
if len(counts) > 0:
ind = np.argmax(counts)
color = values[ind]
result["colors"][color].append({"type": "center"})
def get_color_scheme(image, target_image=None, params=None):
"""processes original image and returns dict color scheme"""
result = {
"grid_color": -1,
"colors": [[], [], [], [], [], [], [], [], [], []],
"colors_sorted": [],
"grid_size": [1, 1],
}
if params is None:
params = ["coverage", "unique", "corners", "top", "grid"]
# preparing colors info
unique, counts = np.unique(image, return_counts=True)
colors = [unique[i] for i in np.argsort(counts)]
result["colors_sorted"] = colors
result["colors_num"] = len(colors)
for color in range(10):
# use abs color value - same for any image
result["colors"][color].append({"type": "abs", "k": color})
if len(colors) == 2 and 0 in colors:
result["colors"][[x for x in colors if x != 0][0]].append({"type": "non_zero"})
if "coverage" in params:
for k, color in enumerate(colors):
# use k-th colour (sorted by presence on image)
result["colors"][color].append({"type": "min", "k": k})
# use k-th colour (sorted by presence on image)
result["colors"][color].append({"type": "max", "k": len(colors) - k - 1})
if "unique" in params:
add_unique_colors(image, result, colors=None)
add_center_color(image, result)
if "corners" in params:
# colors in the corners of images
result["colors"][image[0, 0]].append({"type": "corner", "side": "tl"})
result["colors"][image[0, -1]].append({"type": "corner", "side": "tr"})
result["colors"][image[-1, 0]].append({"type": "corner", "side": "bl"})
result["colors"][image[-1, -1]].append({"type": "corner", "side": "br"})
if "top" in params:
# colors that are on top of other and have full vertical on horizontal line
for k in range(10):
mask = image == k
is_on_top0 = mask.min(axis=0).any()
is_on_top1 = mask.min(axis=1).any()
if is_on_top0:
result["colors"][k].append({"type": "on_top", "side": "0"})
if is_on_top1:
result["colors"][k].append({"type": "on_top", "side": "1"})
if is_on_top1 or is_on_top0:
result["colors"][k].append({"type": "on_top", "side": "any"})
if "grid" in params:
grid_color, grid_size, frame = find_grid(image)
if grid_color >= 0:
result["grid_color"] = grid_color
result["grid_size"] = grid_size
result["grid_frame"] = frame
result["colors"][grid_color].append({"type": "grid"})
return result
def add_block(target_dict, image, params_list):
array_hash = hash(matrix2answer(image))
if array_hash not in target_dict["arrays"]:
target_dict["arrays"][array_hash] = {"array": image, "params": []}
for params in params_list:
params_hash = get_dict_hash(params)
target_dict["arrays"][array_hash]["params"].append(params)
target_dict["params"][params_hash] = array_hash
def get_original(image):
return 0, image
def get_inversed_colors(image):
unique = np.unique(image)
if len(unique) != 2:
return 1, None
result = image.copy()
result[image == unique[0]] = unique[1]
result[image == unique[1]] = unique[0]
return 0, result
def generate_blocks(image, result, max_time=600, max_blocks=200000, max_masks=200000, target_image=None, params=None):
all_params = [
"initial",
"background",
"min_max_blocks",
"block_with_side_colors",
"max_area_covered",
"grid_cells",
"halves",
"corners",
"rotate",
"transpose",
"cut_edges",
"resize",
"reflect",
"cut_parts",
"swap_colors",
"k_part",
]
if not params:
params = all_params
start_time = time.time()
result["blocks"] = {"arrays": {}, "params": {}}
if "initial" in params:
# starting with the original image
add_block(result["blocks"], image, [[{"type": "original"}]])
# inverse colors
status, block = get_inversed_colors(image)
if status == 0 and block.shape[0] > 0 and block.shape[1] > 0:
add_block(result["blocks"], block, [[{"type": "inversed_colors"}]])
# adding min and max blocks
if (
("min_max_blocks" in params)
and (time.time() - start_time < max_time)
and (len(result["blocks"]["arrays"]) < max_blocks)
):
# print("min_max_blocks")
for full in [True, False]:
status, block = get_max_block(image, full)
if status == 0 and block.shape[0] > 0 and block.shape[1] > 0:
add_block(result["blocks"], block, [[{"type": "max_block", "full": full}]])
if (
("block_with_side_colors" in params)
and (time.time() - start_time < max_time)
and (len(result["blocks"]["arrays"]) < max_blocks)
):
# print("min_max_blocks")
for block_type in ["min", "max"]:
for structure in [0, 1]:
status, block = get_block_with_side_colors(image, block_type, structure)
if status == 0 and block.shape[0] > 0 and block.shape[1] > 0:
add_block(
result["blocks"],
block,
[[{"type": "block_with_side_colors", "block_type": block_type, "structure": structure}]],
)
for block_type in ["min", "max"]:
for structure in [0, 1]:
status, block = get_block_with_side_colors_count(image, block_type, structure)
if status == 0 and block.shape[0] > 0 and block.shape[1] > 0:
add_block(
result["blocks"],
block,
[[{"type": "block_with_side_colors_count", "block_type": block_type, "structure": structure}]],
)
# print(sum([len(x['params']) for x in result['blocks']['arrays'].values()]))
# adding background
if ("background" in params) and (time.time() - start_time < max_time):
# print("background")
for color in range(10):
status, block = get_background(image, color)
if status == 0 and block.shape[0] > 0 and block.shape[1] > 0:
params_list = []
for color_dict in result["colors"][color].copy():
params_list.append([{"type": "background", "color": color_dict}])
add_block(result["blocks"], block, params_list)
# adding the max area covered by each color
if ("max_area_covered" in params) and (time.time() - start_time < max_time):
# print("max_area_covered")
for color in result["colors_sorted"]:
status, block = get_color_max(image, color)
if status == 0 and block.shape[0] > 0 and block.shape[1] > 0:
params_list = []
for color_dict in result["colors"][color].copy():
params_list.append([{"type": "color_max", "color": color_dict}])
add_block(result["blocks"], block, params_list)
# adding grid cells
if (
("grid_cells" in params)
and (time.time() - start_time < max_time)
and (len(result["blocks"]["arrays"]) < max_blocks)
):
if result["grid_color"] > 0:
for i in range(result["grid_size"][0]):
for j in range(result["grid_size"][1]):
status, block = get_grid(image, result["grid_size"], (i, j), frame=result["grid_frame"])
if status == 0 and block.shape[0] > 0 and block.shape[1] > 0:
add_block(
result["blocks"],
block,
[
[
{
"type": "grid",
"grid_size": result["grid_size"],
"cell": [i, j],
"frame": result["grid_frame"],
}
]
],
)
# adding halves of the images
if ("halves" in params) and (time.time() - start_time < max_time) and (len(result["blocks"]["arrays"]) < max_blocks):
for side in ["l", "r", "t", "b", "long1", "long2"]:
status, block = get_half(image, side=side)
if status == 0 and block.shape[0] > 0 and block.shape[1] > 0:
add_block(result["blocks"], block, [[{"type": "half", "side": side}]])
# extracting pixels from image
if ("pixels" in params) and (time.time() - start_time < max_time) and (len(result["blocks"]["arrays"]) < max_blocks):
for i in range(image.shape[0]):
for j in range(image.shape[1]):
status, block = get_pixel(image, i=i, j=j)
if status == 0 and block.shape[0] > 0 and block.shape[1] > 0:
add_block(result["blocks"], block, [[{"type": "pixel", "i": i, "j": j}]])
# extracting pixels from image
if (
("pixel_fixed" in params)
and (time.time() - start_time < max_time)
and (len(result["blocks"]["arrays"]) < max_blocks)
):
for i in range(10):
status, block = get_pixel_fixed(image, i=i)
if status == 0 and block.shape[0] > 0 and block.shape[1] > 0:
add_block(result["blocks"], block, [[{"type": "pixel_fixed", "i": i}]])
# adding halves of the images
if ("k_part" in params) and (time.time() - start_time < max_time) and (len(result["blocks"]["arrays"]) < max_blocks):
for num in [3, 4]:
for k in range(num):
status, block = get_k_part(image, num=num, k=k)
if status == 0 and block.shape[0] > 0 and block.shape[1] > 0:
add_block(result["blocks"], block, [[{"type": "k_part", "num": num, "k": k}]])
# adding corners of the images
if (
("corners" in params)
and (time.time() - start_time < max_time)
and (len(result["blocks"]["arrays"]) < max_blocks)
):
for side in ["tl", "tr", "bl", "br"]:
status, block = get_corner(image, side=side)
if status == 0 and block.shape[0] > 0 and block.shape[1] > 0:
add_block(result["blocks"], block, [[{"type": "corner", "side": side}]])
main_blocks_num = len(result["blocks"])
# rotate all blocks
if ("rotate" in params) and (time.time() - start_time < max_time) and (len(result["blocks"]["arrays"]) < max_blocks):
current_blocks = result["blocks"]["arrays"].copy()
for k in range(1, 4):
for key, data in current_blocks.items():
status, block = get_rotation(data["array"], k=k)
if status == 0 and block.shape[0] > 0 and block.shape[1] > 0:
params_list = [i + [{"type": "rotation", "k": k}] for i in data["params"]]
add_block(result["blocks"], block, params_list)
# transpose all blocks
if (
("transpose" in params)
and (time.time() - start_time < max_time)
and (len(result["blocks"]["arrays"]) < max_blocks)
):
current_blocks = result["blocks"]["arrays"].copy()
for key, data in current_blocks.items():
status, block = get_transpose(data["array"])
if status == 0 and block.shape[0] > 0 and block.shape[1] > 0:
params_list = [i + [{"type": "transpose"}] for i in data["params"]]
add_block(result["blocks"], block, params_list)
# cut edges for all blocks
if (
("cut_edges" in params)
and (time.time() - start_time < max_time)
and (len(result["blocks"]["arrays"]) < max_blocks)
):
current_blocks = result["blocks"]["arrays"].copy()
for l, r, t, b in [
(1, 1, 1, 1),
(1, 0, 0, 0),
(0, 1, 0, 0),
(0, 0, 1, 0),
(0, 0, 0, 1),
(1, 1, 0, 0),
(1, 0, 0, 1),
(0, 0, 1, 1),
(0, 1, 1, 0),
]:
if time.time() - start_time < max_time:
for key, data in current_blocks.items():
status, block = get_cut_edge(data["array"], l=l, r=r, t=t, b=b)
if status == 0 and block.shape[0] > 0 and block.shape[1] > 0:
params_list = [
i + [{"type": "cut_edge", "l": l, "r": r, "t": t, "b": b}] for i in data["params"]
]
add_block(result["blocks"], block, params_list)
# resize all blocks
if ("resize" in params) and (time.time() - start_time < max_time) and (len(result["blocks"]["arrays"]) < max_blocks):
current_blocks = result["blocks"]["arrays"].copy()
for scale in [2, 3, 1 / 2, 1 / 3]:
for key, data in current_blocks.items():
status, block = get_resize(data["array"], scale)
if status == 0 and block.shape[0] > 0 and block.shape[1] > 0:
params_list = [i + [{"type": "resize", "scale": scale}] for i in data["params"]]
add_block(result["blocks"], block, params_list)
for size_x, size_y in [(2, 2), (3, 3)]:
for key, data in current_blocks.items():
status, block = get_resize_to(data["array"], size_x, size_y)
if status == 0 and block.shape[0] > 0 and block.shape[1] > 0:
params_list = [
i + [{"type": "resize_to", "size_x": size_x, "size_y": size_y}] for i in data["params"]
]
add_block(result["blocks"], block, params_list)
# reflect all blocks
if (
("reflect" in params)
and (time.time() - start_time < max_time)
and (len(result["blocks"]["arrays"]) < max_blocks)
):
current_blocks = result["blocks"]["arrays"].copy()
for side in ["r", "l", "t", "b", "rt", "rb", "lt", "lb"]:
if time.time() - start_time < max_time:
for key, data in current_blocks.items():
status, block = get_reflect(data["array"], side)
if status == 0 and block.shape[0] > 0 and block.shape[1] > 0:
params_list = [i + [{"type": "reflect", "side": side}] for i in data["params"]]
add_block(result["blocks"], block, params_list)
# cut some parts of images
if (
("cut_parts" in params)
and (time.time() - start_time < max_time)
and (len(result["blocks"]["arrays"]) < max_blocks)
):
max_x = image.shape[0]
max_y = image.shape[1]
min_block_size = 2
for x1 in range(0, max_x - min_block_size):
if time.time() - start_time < max_time:
if max_x - x1 <= min_block_size:
continue
for x2 in range(x1 + min_block_size, max_x):
for y1 in range(0, max_y - min_block_size):
if max_y - y1 <= min_block_size:
continue
for y2 in range(y1 + min_block_size, max_y):
status, block = get_cut(image, x1, y1, x2, y2)
if status == 0:
add_block(
result["blocks"], block, [[{"type": "cut", "x1": x1, "x2": x2, "y1": y1, "y2": y2}]]
)
list_param_list = []
list_blocks = []
# swap some colors
if (
("swap_colors" in params)
and (time.time() - start_time < max_time)
and (len(result["blocks"]["arrays"]) < max_blocks)
):
current_blocks = result["blocks"]["arrays"].copy()
for color_1 in range(9):
if time.time() - start_time < max_time:
for color_2 in range(color_1 + 1, 10):
for key, data in current_blocks.items():
status, block = get_color_swap(data["array"], color_1, color_2)
if status == 0 and block.shape[0] > 0 and block.shape[1] > 0:
for color_dict_1 in result["colors"][color_1].copy():
for color_dict_2 in result["colors"][color_2].copy():
list_param_list.append(
[
j
+ [{"type": "color_swap", "color_1": color_dict_1, "color_2": color_dict_2}]
for j in data["params"]
]
)
list_blocks.append(block)
for block, params_list in zip(list_blocks, list_param_list):
add_block(result["blocks"], block, params_list)
if time.time() - start_time > max_time:
print("Time is over")
if len(result["blocks"]["arrays"]) >= max_blocks:
print("Max number of blocks exceeded")
return result
def generate_masks(image, result, max_time=600, max_blocks=200000, max_masks=200000, target_image=None, params=None):
start_time = time.time()
all_params = ["initial_masks", "additional_masks", "coverage_masks", "min_max_masks"]
if not params:
params = all_params
result["masks"] = {"arrays": {}, "params": {}}
# making one mask for each generated block
current_blocks = result["blocks"]["arrays"].copy()
if ("initial_masks" in params) and (time.time() - start_time < max_time * 2):
for key, data in current_blocks.items():
for color in result["colors_sorted"]:
status, mask = get_mask_from_block(data["array"], color)
if status == 0 and mask.shape[0] > 0 and mask.shape[1] > 0:
params_list = [
{"operation": "none", "params": {"block": i, "color": color_dict}}
for i in data["params"]
for color_dict in result["colors"][color]
]
add_block(result["masks"], mask, params_list)
initial_masks = result["masks"]["arrays"].copy()
if ("initial_masks" in params) and (time.time() - start_time < max_time * 2):
for key, mask in initial_masks.items():
add_block(
result["masks"],
np.logical_not(mask["array"]),
[{"operation": "not", "params": param["params"]} for param in mask["params"]],
)
initial_masks = result["masks"]["arrays"].copy()
masks_to_add = []
processed = []
if ("additional_masks" in params) and (time.time() - start_time < max_time * 2):
for key1, mask1 in initial_masks.items():
processed.append(key1)
if time.time() - start_time < max_time * 2 and (
target_image is None
or (target_image.shape == mask1["array"].shape)
or (target_image.shape == mask1["array"].T.shape)
):
for key2, mask2 in initial_masks.items():
if key2 in processed:
continue
if (mask1["array"].shape[0] == mask2["array"].shape[0]) and (
mask1["array"].shape[1] == mask2["array"].shape[1]
):
params_list_and = []
params_list_or = []
params_list_xor = []
for param1 in mask1["params"]:
for param2 in mask2["params"]:
params_list_and.append(
{"operation": "and", "params": {"mask1": param1, "mask2": param2}}
)
params_list_or.append({"operation": "or", "params": {"mask1": param1, "mask2": param2}})
params_list_xor.append(
{"operation": "xor", "params": {"mask1": param1, "mask2": param2}}
)
masks_to_add.append(
(result["masks"], np.logical_and(mask1["array"], mask2["array"]), params_list_and)
)
masks_to_add.append(
(result["masks"], np.logical_or(mask1["array"], mask2["array"]), params_list_or)
)
masks_to_add.append(
(result["masks"], np.logical_xor(mask1["array"], mask2["array"]), params_list_xor)
)
for path, array, params_list in masks_to_add:
add_block(path, array, params_list)
# coverage_masks
if ("coverage_masks" in params) and (time.time() - start_time < max_time * 2):
for color in result["colors_sorted"][1:]:
status, mask = get_mask_from_max_color_coverage(image, color)
if status == 0 and mask.shape[0] > 0 and mask.shape[1] > 0:
params_list = [
{"operation": "coverage", "params": {"color": color_dict}}
for color_dict in result["colors"][color].copy()
]
add_block(result["masks"], mask, params_list)
# coverage_masks
if ("min_max_masks" in params) and (time.time() - start_time < max_time * 2):
status, mask = get_min_block_mask(image)
if status == 0 and mask.shape[0] > 0 and mask.shape[1] > 0:
params_list = [{"operation": "min_block"}]
add_block(result["masks"], mask, params_list)
status, mask = get_max_block_mask(image)
if status == 0 and mask.shape[0] > 0 and mask.shape[1] > 0:
params_list = [{"operation": "max_block"}]
add_block(result["masks"], mask, params_list)
if time.time() - start_time > max_time:
print("Time is over")
if len(result["blocks"]["arrays"]) >= max_masks:
print("Max number of masks exceeded")
return result
def process_image(
image, max_time=600, max_blocks=200000, max_masks=200000, target_image=None, params=None, color_params=None
):
"""processes the original image and returns dict with structured image blocks"""
result = get_color_scheme(image, target_image=target_image, params=color_params)
result = generate_blocks(image, result, max_time, max_blocks, max_masks, target_image, params, color_params)
result = generate_masks(image, result, max_time, max_blocks, max_masks, target_image, params, color_params)
return result
def get_mask_from_block_params(image, params, block_cache=None, mask_cache=None, color_scheme=None):
if mask_cache is None:
mask_cache = {{"arrays": {}, "params": {}}}
dict_hash = get_dict_hash(params)
if dict_hash in mask_cache:
mask = mask_cache["arrays"][mask_cache["params"][dict_hash]]["array"]
if len(mask) == 0:
return 1, None
else:
return 0, mask
if params["operation"] == "none":
status, block = get_predict(image, params["params"]["block"], block_cache, color_scheme)
if status != 0:
add_block(mask_cache, np.array([[]]), [params])
return 1, None
if not color_scheme:
color_scheme = get_color_scheme(image)
color_num = get_color(params["params"]["color"], color_scheme["colors"])
if color_num < 0:
add_block(mask_cache, np.array([[]]), [params])
return 2, None
status, mask = get_mask_from_block(block, color_num)
if status != 0:
add_block(mask_cache, np.array([[]]), [params])
return 6, None
add_block(mask_cache, mask, [params])
return 0, mask
elif params["operation"] == "not":
new_params = params.copy()
new_params["operation"] = "none"
status, mask = get_mask_from_block_params(
image, new_params, block_cache=block_cache, color_scheme=color_scheme, mask_cache=mask_cache
)
if status != 0:
add_block(mask_cache, np.array([[]]), [params])
return 3, None
mask = np.logical_not(mask)
add_block(mask_cache, mask, [params])
return 0, mask
elif params["operation"] in ["and", "or", "xor"]:
new_params = params["params"]["mask1"]
status, mask1 = get_mask_from_block_params(
image, new_params, block_cache=block_cache, color_scheme=color_scheme, mask_cache=mask_cache
)
if status != 0:
add_block(mask_cache, np.array([[]]), [params])
return 4, None
new_params = params["params"]["mask2"]
status, mask2 = get_mask_from_block_params(
image, new_params, block_cache=block_cache, color_scheme=color_scheme, mask_cache=mask_cache
)
if status != 0:
add_block(mask_cache,
|
np.array([[]])
|
numpy.array
|
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import SGDClassifier, LogisticRegression
from analyzer import MessageAnalyzer
import numpy as np
import nltk
import string
class TextClassifier:
def __init__(self, training_data, targets, target_indices):
self.training_data = [message.translate(string.punctuation) for message in training_data]
self.targets = targets
self.target_indices = target_indices
self.text_clf = None
self.analyzer = MessageAnalyzer(self.training_data)
freqs = self.analyzer.word_frequencies()
self.most_common_words = freqs.most_common(100)
#print(self.most_common_words)
# bag of words model where every word is a feature name w/ value of True
@staticmethod
def word_features(words):
return dict([(word, True) for word in words])
def find_features(self, line):
features = {}
for word_feature, frequency in self.most_common_words:
features[word_feature] = (word_feature in line.lower())
return features
def create_features(self):
feature_sets = [(self.find_features(words), self.targets[self.target_indices[i]]) for i, words in enumerate(self.training_data)]
# print('Feature sets:')
# print(feature_sets)
return feature_sets
def train_nltk(self):
features = self.create_features()
self.text_clf = nltk.NaiveBayesClassifier.train(features)
def test_nltk(self, test_data):
test_features = [(self.word_features(words), self.targets[self.target_indices[i]]) for i, words in enumerate(test_data)]
print("Classifier accuracy percent:", (nltk.classify.accuracy(self.text_clf, test_features)) * 100)
self.text_clf.show_most_informative_features(15)
def train(self, classifier_type='svm'):
if classifier_type == 'svm':
text_clf = Pipeline([('vect', CountVectorizer()), ('tfidf', TfidfTransformer()),
('clf-svm',
SGDClassifier(loss='hinge', penalty='l2', alpha=1e-3, n_iter=5, random_state=42))])
elif classifier_type == 'logistic':
text_clf = Pipeline([('vect', CountVectorizer(stop_words='english')),
('tfidf', TfidfTransformer()),
('clf', LogisticRegression())])
else: # default: naive bayes
text_clf = Pipeline([('vect', CountVectorizer(stop_words='english')),
('tfidf', TfidfTransformer()),
('clf', MultinomialNB())])
self.text_clf = text_clf.fit(self.training_data, self.target_indices)
def predict(self, predict_data, correct_values=None):
predicted = self.text_clf.predict(predict_data)
if correct_values:
performance =
|
np.mean(predicted == correct_values)
|
numpy.mean
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# rppy - a geophysical library for Python
# Copyright (c) 2014, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import rppy
import numpy as np
# Test reflectivity.py
def test_shuey():
err = 0.005
Vp1 = 3000
Vp2 = 4000
Vs1 = 1500
Vs2 = 2000
p1 = 2000
p2 = 2200
theta1 = np.array([32])
exp = 0.151
Rpp = rppy.reflectivity.shuey(Vp1, Vs1, p1,
Vp2, Vs2, p2,
theta1)
assert np.abs(Rpp - exp)/exp < err
def test_aki_richards():
err = 0.05
Vp1 = 3000
Vp2 = 4000
Vs1 = 1500
Vs2 = 2000
p1 = 2000
p2 = 2200
theta1 = np.array([32])
exp = 0.15351
Rpp = rppy.reflectivity.aki_richards(Vp1, Vs1, p1,
Vp2, Vs2, p2,
theta1)
assert np.abs(Rpp - exp)/exp < err
def test_bortfeld():
err = 0.01
Vp1 = 3000.
Vp2 = 4000.
Vs1 = 1500.
Vs2 = 2000.
p1 = 2000.
p2 = 2200.
theta1 = np.array([32])
exp = 0.15469135
Rpp = rppy.reflectivity.bortfeld(Vp1, Vs1, p1,
Vp2, Vs2, p2,
theta1)
assert np.abs(Rpp - exp)/exp < err
def test_snell():
err = 0.01
vp1 = 2500
vs1 = 1725
vp2 = 3800
vs2 = 1900
theta1 = np.array([30])
theta2E = 49.46
thetas1E = 20.18
thetas2E = 22.33
theta2, thetas1, thetas2, p = rppy.reflectivity.snell(vp1, vp2,
vs1, vs2,
np.radians(theta1))
assert np.abs(np.rad2deg(theta2) - theta2E) < err
assert np.abs(np.rad2deg(thetas1) - thetas1E) < err
assert np.abs(np.rad2deg(thetas2) - thetas2E) < err
def test_thomsen():
err = 0.05
C = np.zeros(shape=(6, 6))
C[0][0] = 87.26e9
C[1][1] = 87.26e9
C[2][2] = 105.8e9
C[3][3] = 57.15e9
C[4][4] = 57.15e9
C[5][5] = 40.35e9
C[0][2] = 11.95e9
C[0][1] = 6.57e9
C[0][3] = -17.18e9
p = 2646.6
eexp = -0.08762
yexp = -0.14698
dexp = -0.031453
vp, vs, e1, d1, y1, e2, d2, y2, d3 = rppy.reflectivity.thomsen(C, p)
assert np.abs(e1 - eexp)/eexp < err
assert np.abs(y1 - yexp)/yexp < err
assert np.abs(d1 - dexp)/dexp < err
def test_Cij():
err = 0.05
C = np.zeros(shape=(6, 6))
C[0][0] = 87.26e9
C[1][1] = 87.26e9
C[2][2] = 105.8e9
C[3][3] = 57.15e9
C[4][4] = 57.15e9
C[5][5] = 40.35e9
C[0][2] = 11.95e9
C[0][1] = 6.57e9
C[0][3] = -17.18e9
p = 2646.6
vp, vs, e1, d1, y1, e2, d2, y2, d3 = rppy.reflectivity.thomsen(C, p)
C2 = rppy.reflectivity.Cij(vp, vs, p, e1, d1, y1, e2, d2, y2, d3)
assert np.abs(C[0][0] - C2[0][0])/C2[0][0] < err
assert np.abs(C[2][2] - C2[2][2])/C2[2][2] < err
assert np.abs(C[5][5] - C2[5][5])/C2[5][5] < err
assert np.abs(C[0][2] - C2[0][2])/C2[0][2] < err
#def test_daley_hron_vti_against_crewes():
# err = 0.05
# vp1 = 3000
# vs1 = 1500
# p1 = 2000
# e1 = 0.0
# d1 = 0.0
#
# vp2 = 4000
# vs2 = 2000
# p2 = 2200
# e2 = 0.1
# d2 = 0.1
#
# theta = np.array([0.94406, 4.8601, 9.2657, 13.916, 18.811, 23.462, 27.622,
# 32.028, 33.986, 35.699, 37.657, 38.392, 39.860, 40.105,
# 40.594, 41.084, 41.818, 42.063])
# exp = np.array([0.18993, 0.19006, 0.18778, 0.18550, 0.18567, 0.18340,
# 0.19330, 0.21540, 0.23742, 0.26187, 0.29852, 0.33514,
# 0.37909, 0.41324, 0.45229, 0.48889, 0.53282, 0.57917])
# Rpp = rppy.reflectivity.daley_hron_vti(vp1, vs1, p1, vp2, vs2, p2, theta)
# for ind, val in enumerate(Rpp):
# assert np.abs(val - exp[ind])/exp[ind] < err
#def test_elastic_impedance():
# assert 0 == 1
#def test_extended_elastic_impedance():
# assert 0 == 1
def test_aki_richards_against_crewes():
err = 0.05
vp1 = 3000
vs1 = 1500
p1 = 2000
vp2 = 4000
vs2 = 2000
p2 = 2200
theta = np.array([1.2069, 3.6638, 6.2500, 8.3190, 10.259, 12.586, 14.526,
16.336, 18.147, 19.828, 22.543, 25.259, 27.198, 30.690,
33.922, 35.603, 37.414, 38.836, 40.388, 41.552, 42.716,
44.655, 45.302, 45.819, 46.595, 46.983, 47.371])
exp = np.array([0.19370, 0.19119, 0.18866, 0.18618, 0.18370, 0.17887,
0.17639, 0.17160, 0.16914, 0.16435, 0.15949, 0.15463,
0.15216, 0.15188, 0.15396, 0.16081, 0.16997, 0.18149,
0.19764, 0.21616, 0.23700, 0.29499, 0.32284,
0.35536, 0.40181, 0.43667, 0.47617])
Rpp = rppy.reflectivity.aki_richards(vp1, vs1, p1, vp2, vs2, p2, theta)
for ind, val in enumerate(Rpp):
assert np.abs(val - exp[ind])/exp[ind] < err
def test_bortfeld_against_crewes():
err = 0.05
vp1 = 3000
vs1 = 1500
p1 = 2000
vp2 = 4000
vs2 = 2000
p2 = 2200
theta = np.array([1.0526, 4.0789, 6.9737, 9.3421, 11.053, 12.632, 14.079,
15.921, 18.684, 21.184, 24.079, 26.842, 31.842, 34.211,
35.789, 37.237, 38.947, 40.263, 41.974, 43.026, 43.816,
44.737, 45.395, 45.921, 46.447, 46.842, 47.237, 47.500,
47.763, 47.895, 48.026, 48.158])
exp = np.array([0.19291, 0.19291, 0.19054, 0.18581, 0.18345, 0.18108,
0.17635, 0.17399, 0.16926, 0.16453, 0.15980, 0.15507,
0.15507, 0.15980, 0.16689, 0.17635, 0.19054, 0.20709,
0.23547, 0.26385, 0.28750, 0.32534, 0.36081, 0.39865,
0.44358, 0.49088, 0.53818, 0.58311, 0.64932, 0.68716,
0.73919, 0.80304])
Rpp = rppy.reflectivity.bortfeld(vp1, vs1, p1, vp2, vs2, p2, theta)
for ind, val in enumerate(Rpp):
assert np.abs(val - exp[ind])/exp[ind] < err
def test_zoeppritz_against_crewes():
err = 0.05
vp1 = 3000
vs1 = 1500
p1 = 2000
vp2 = 4000
vs2 = 2000
p2 = 2200
theta = np.array([1.4865, 5.4054, 9.0541, 12.027, 15.270, 18.514, 22.568,
25.405, 31.351, 34.054, 35.946, 37.973, 39.730, 41.216,
42.432, 43.514, 44.595, 45.541, 46.081, 46.622, 47.027,
47.568, 47.838, 48.108])
exp = np.array([0.18854, 0.18822, 0.18549, 0.18039, 0.17770, 0.17257,
0.16738, 0.16472, 0.16667, 0.16888, 0.17602, 0.18801,
0.20488, 0.22664, 0.24841, 0.27749, 0.31386, 0.35268,
0.39638, 0.43280, 0.47408, 0.53480, 0.58582, 0.64900])
Rpp = rppy.reflectivity.zoeppritz(vp1, vs1, p1, vp2, vs2, p2, theta)
for ind, val in enumerate(Rpp):
assert np.abs(val - exp[ind])/exp[ind] < err
def test_smith_gidlow_against_crewes():
err = 0.05
vp1 = 3000
vs1 = 1500
p1 = 2000
vp2 = 4000
vs2 = 2000
p2 = 2200
theta = np.array([0.68966, 2.8879, 4.5690, 7.0259, 9.2241, 11.034, 13.233,
15.560, 17.888, 20.216, 22.155, 24.483, 26.810, 29.784,
32.112, 34.440, 36.250, 37.931, 39.741, 40.776, 42.069,
43.233, 44.267, 45.043,45.690, 46.336, 46.853, 47.371])
exp = np.array([0.17979, 0.17962, 0.17484, 0.17465, 0.17215, 0.16969,
0.16719, 0.16236, 0.15753, 0.15270, 0.15022, 0.14539,
0.14288, 0.14265, 0.14247, 0.14694, 0.15378, 0.16760,
0.18142, 0.19761, 0.21844, 0.24394, 0.27641, 0.30659,
0.34142, 0.38556, 0.42970, 0.48082])
Rpp = rppy.reflectivity.smith_gidlow(vp1, vs1, p1, vp2, vs2, p2, theta)
for ind, val in enumerate(Rpp):
assert np.abs(val - exp[ind])/exp[ind] < err
def test_ruger_vti_against_crewes():
err = 0.05
vp1 = 3000
vs1 = 1500
p1 = 2000
e1 = 0
d1 = 0
vp2 = 4000
vs2 = 2000
p2 = 2200
e2 = 0.1
d2 = 0.1
theta = np.array([1.8819, 4.8339, 8.3395, 11.107, 13.690, 16.642, 20.332,
22.546, 26.052, 31.771, 33.985, 38.229, 41.181, 43.579,
45.424, 47.454, 49.114, 51.144, 52.804, 54.096, 55.387,
56.679, 57.786, 58.524, 59.446, 60.185, 61.107, 61.845,
62.583, 63.137, 63.875, 64.428, 65.166, 65.535])
exp = np.array([0.19286, 0.19048, 0.19048, 0.18571, 0.18571, 0.18095,
0.18095, 0.17619, 0.17619, 0.17381, 0.17619, 0.18333,
0.19286, 0.20952, 0.22381, 0.24048, 0.26190, 0.29048,
0.31905, 0.34524, 0.37619, 0.40714, 0.44048, 0.46905,
0.50238, 0.53333, 0.57143, 0.60714, 0.65000, 0.68810,
0.72857, 0.76905, 0.81429, 0.86190])
Rpp = rppy.reflectivity.ruger_vti(vp1, vs1, p1, e1, d1,
vp2, vs2, p2, e2, d2, theta)
for ind, val in enumerate(Rpp):
assert np.abs(val - exp[ind])/exp[ind] < err
def test_ruger_hti_against_crewes():
err = 0.05
vp1 = 3000
vs1 = 1500
p1 = 2000
e1 = 0
d1 = 0
y1 = 0
vp2 = 4000
vs2 = 2000
p2 = 2200
e2 = 0.1
d2 = 0.1
y2 = 0.3
theta = 30
phi = np.array([1.2500, 4.9342, 8.6184, 11.842, 15.526, 19.211, 22.664,
25.888, 28.421, 30.724, 34.638, 38.092, 41.546, 45.461,
49.375, 53.289, 56.974, 60.888, 65.493, 69.408, 73.783,
79.079, 84.375, 89.211])
exp = np.array([0.19816, 0.19816, 0.19678, 0.19539, 0.19263, 0.19056,
0.18711, 0.18365, 0.18020, 0.17813, 0.17329, 0.16845,
0.16431, 0.15878, 0.15326, 0.14842, 0.14359, 0.13875,
0.13391, 0.12977, 0.12632, 0.12286, 0.12079, 0.12010])
for ind, phiv in enumerate(phi):
Rpp = rppy.reflectivity.ruger_hti(vp1, vs1, p1, e1, d1, y1,
vp2, vs2, p2, e2, d2, y2,
theta, phiv)
assert np.abs(Rpp - exp[ind])/exp[ind] < err
def test_vavrycuk_hti_against_rokdoc():
err = 0.05
vp1 = 3000
vs1 = 1500
p1 = 2000
e1 = 0
d1 = 0
y1 = 0
vp2 = 4000
vs2 = 2000
p2 = 2200
e2 = 0.1
d2 = 0.1
y2 = 0.1
phi = 45
theta = np.array([1.1990, 4.5726, 7.8040, 10.309, 13.254, 15.908,
20.926, 23.583, 26.092, 28.450, 31.099, 33.891, 36.673,
38.569, 40.752, 42.055, 43.354, 44.504, 45.793, 46.793,
48.211, 49.054, 49.754, 50.740, 51.296, 51.991, 52.542,
53.086, 53.638])
exp = np.array([0.18952, 0.18858, 0.18654, 0.18301, 0.17914, 0.17490,
0.16566, 0.16068, 0.15606, 0.15253, 0.14937,
0.14732, 0.14781, 0.15008, 0.15382, 0.15788, 0.16304,
0.16855, 0.17626, 0.18285, 0.19493, 0.20407, 0.21248,
0.22272, 0.23039, 0.23989, 0.24901, 0.25960, 0.26836])
for ind, thetav in enumerate(theta):
Rpp = rppy.reflectivity.vavrycuk_psencik_hti(vp1, vs1, p1, e1, d1, y1,
vp2, vs2, p2, e2, d2, y2,
phi, thetav)
assert np.abs(Rpp - exp[ind])/exp[ind] < err
def test_ruger_HTI_ursenbach_values():
err = 0.05
vp1 = 2260
vs1 = 1428
p1 = 2600
e1 = 0
d1 = 0
y1 = 0
vp2 = 2370
vs2 = 1360
p2 = 2700
e2 = 0.05
d2 = 0.02
y2 = 0.1
theta = np.array([40, 40, 40, 40, 1, 1, 0.8], dtype=float)
phi = np.array([90, 60, 30, 0.00000001, 60, 30, 30], dtype=float)
exp = np.array([0.065, 0.072, 0.087, 0.096, 0.066, 0.066, 0.066], dtype=float)
for ind, phiv in enumerate(phi):
Rpp = rppy.reflectivity.ruger_hti(vp1, vs1, p1, e1, d1, y1,
vp2, vs2, p2, e2, d2, y2,
theta[ind], phi[ind])
assert np.abs(Rpp - exp[ind])/exp[ind] < err
def test_exact_orth_against_crewes():
err = 0.05
vp1 = 3000
vs1 = 1500
p1 = 2000
e1 = 0
d1 = 0
y1 = 0
chi1 = 0
C1 = rppy.reflectivity.Cij(vp1, vs1, p1, 0, 0, 0, e1, d1, y1, 0)
vp2 = 4000
vs2 = 2000
p2 = 2200
e2 = 0.1
d2 = 0.1
y2 = 0.3
chi2 = 0
C2 = rppy.reflectivity.Cij(vp2, vs2, p2, 0, 0, 0, e2, d2, y2, 0)
theta = np.array([30])
phi = np.array([2.55814, 8.83721, 14.65116, 19.30233, 23.48837, 26.97674,
30.69767, 33.72093, 37.44186, 41.16279, 45.58140, 49.76744,
53.95349, 59.76744, 64.41860, 70.93023, 77.20930, 83.72093,
87.67442])
exp = np.array([0.20977, 0.20851, 0.20600, 0.20349, 0.20056, 0.19763,
0.19428, 0.19093, 0.18716, 0.18298, 0.17795, 0.17335,
0.16916, 0.16288, 0.15828, 0.15367, 0.14949, 0.14740,
0.14656])
for ind, phiv in enumerate(phi):
Rpp = rppy.reflectivity.exact_ortho(C1, p1, C2, p2, chi1, chi2, phiv, theta)
assert np.abs(Rpp - exp[ind])/exp[ind] < err
# Test media.py
#def test_han_eberhart_phillips():
# assert 0 == 1
def test_kuster_toksoz():
err = 0.005
Km = 37
um = 44
Ki = 0
ui = 0
xi = 0.01
# Test spherical pores
si = 'sphere'
Kkt_exp = 36.4
ukt_exp = 43.088
em = rppy.media.kuster_toksoz(Km, um, Ki, ui, xi, si)
assert np.abs(Kkt_exp - em['K'])/Kkt_exp < err
assert np.abs(ukt_exp - em['u'])/ukt_exp < err
# Test needle pores
si = 'needle'
Kkt_exp = 36.324
ukt_exp = 42.894
em = rppy.media.kuster_toksoz(Km, um, Ki, ui, xi, si)
assert np.abs(Kkt_exp - em['K'])/Kkt_exp < err
assert np.abs(ukt_exp - em['u'])/ukt_exp < err
# Test penny pores
si = 'penny'
alpha = 0.01
Kkt_exp = 21.612
ukt_exp = 29.323
em = rppy.media.kuster_toksoz(Km, um, Ki, ui, xi, si, alpha=alpha)
print(em['K'])
print(em['u'])
assert np.abs(Kkt_exp - em['K'])/Kkt_exp < err
assert np.abs(ukt_exp - em['u'])/ukt_exp < err
def test_hashin_shtrikman():
err = 0.005
K = np.array([36, 75, 2.2])
u = np.array([45., 31., 0.])
f = np.array([0.584, 0.146, 0.270])
Kue = 26.9
Kle = 7.10
uue = 24.6
ule = 0
Ku, Kl, uu, ul = rppy.media.hashin_shtrikman(K, u, f)
assert np.abs(Ku - Kue)/Kue < err
assert np.abs(Kl - Kle)/Kue < err
assert np.abs(uu - uue)/Kue < err
assert np.abs(ul - ule)/Kue < err
#def test_voight_reuss_hill():
# assert 0 == 1
# ========================================
# Test fluids.py
#def test_ciz_shapiro():
# assert 0 == 1
def test_gassmann():
err = 0.005
Kfin = 0
K0 = 36
Kin = 12
phi = 0.2
# Saturate with gas
Kfout = 0.133
exp = 12.29
Kgas = rppy.fluid.gassmann(K0, Kin, Kfin, Kfout, phi)
assert np.abs(Kgas - exp)/exp < err
# Saturate with brine
Kfout = 3.013
exp = 17.6
Kbr = rppy.fluid.gassmann(K0, Kin, Kfin, Kfout, phi)
assert np.abs(Kbr - exp)/exp < err
def test_batzle_wang_brine():
err = 0.005
# Test low-pressure, low-temperature brine properties
T = 25
P = 5
S = 30000
expected_rho = 1.0186679
expected_Vp = 1535.572
fluid = rppy.fluid.batzle_wang(P, T, 'brine', S=S)
assert np.abs(fluid['rho'] - expected_rho)/expected_rho < err
assert np.abs(fluid['Vp'] - expected_Vp)/expected_Vp < err
def test_batzle_wang_oil():
err = 0.005
# Test low-pressure, low-temperature oil properties
T = 25
P = 5
G = 0.6
api = 21
Rg = 7
expected_rho = 0.9211315
expected_Vp = 1469.1498
fluid = rppy.fluid.batzle_wang(P, T, 'oil', G=G, api=api, Rg=Rg)
assert np.abs(fluid['rho'] - expected_rho)/expected_rho < err
assert
|
np.abs(fluid['Vp'] - expected_Vp)
|
numpy.abs
|
"""
This module contains the loss classes.
Specific losses are used for regression, binary classification or multiclass
classification.
"""
# Author: <NAME>
from abc import ABC, abstractmethod
import numpy as np
from scipy.special import expit
try: # logsumexp was moved from mist to special in 0.19
from scipy.special import logsumexp
except ImportError:
from scipy.misc import logsumexp
from .common import Y_DTYPE
from .common import G_H_DTYPE
from ._loss import _update_gradients_least_squares
from ._loss import _update_gradients_least_absolute_deviation
from ._loss import _update_gradients_hessians_binary_crossentropy
from ._loss import _update_gradients_hessians_categorical_crossentropy
class BaseLoss(ABC):
"""Base class for a loss."""
# This variable indicates whether the loss requires the leaves values to
# be updated once the tree has been trained. The trees are trained to
# predict a Newton-Raphson step (see grower._finalize_leaf()). But for
# some losses (e.g. least absolute deviation) we need to adjust the tree
# values to account for the "line search" of the gradient descent
# procedure. See the original paper Greedy Function Approximation: A
# Gradient Boosting Machine by Friedman
# (https://statweb.stanford.edu/~jhf/ftp/trebst.pdf) for the theory.
need_update_leaves_values = False
def init_gradients_and_hessians(self, n_samples, prediction_dim):
"""Return initial gradients and hessians.
Unless hessians are constant, arrays are initialized with undefined
values.
Parameters
----------
n_samples : int
The number of samples passed to `fit()`.
prediction_dim : int
The dimension of a raw prediction, i.e. the number of trees
built at each iteration. Equals 1 for regression and binary
classification, or K where K is the number of classes for
multiclass classification.
Returns
-------
gradients : ndarray, shape (prediction_dim, n_samples)
The initial gradients. The array is not initialized.
hessians : ndarray, shape (prediction_dim, n_samples)
If hessians are constant (e.g. for `LeastSquares` loss, the
array is initialized to ``1``. Otherwise, the array is allocated
without being initialized.
"""
shape = (prediction_dim, n_samples)
gradients = np.empty(shape=shape, dtype=G_H_DTYPE)
if self.hessians_are_constant:
# If the hessians are constant, we consider they are equal to 1.
# - This is correct for the half LS loss
# - For LAD loss, hessians are actually 0, but they are always
# ignored anyway.
hessians = np.ones(shape=(1, 1), dtype=G_H_DTYPE)
else:
hessians = np.empty(shape=shape, dtype=G_H_DTYPE)
return gradients, hessians
@abstractmethod
def get_baseline_prediction(self, y_train, prediction_dim):
"""Return initial predictions (before the first iteration).
Parameters
----------
y_train : ndarray, shape (n_samples,)
The target training values.
prediction_dim : int
The dimension of one prediction: 1 for binary classification and
regression, n_classes for multiclass classification.
Returns
-------
baseline_prediction : float or ndarray, shape (1, prediction_dim)
The baseline prediction.
"""
@abstractmethod
def update_gradients_and_hessians(self, gradients, hessians, y_true,
raw_predictions):
"""Update gradients and hessians arrays, inplace.
The gradients (resp. hessians) are the first (resp. second) order
derivatives of the loss for each sample with respect to the
predictions of model, evaluated at iteration ``i - 1``.
Parameters
----------
gradients : ndarray, shape (prediction_dim, n_samples)
The gradients (treated as OUT array).
hessians : ndarray, shape (prediction_dim, n_samples) or \
(1,)
The hessians (treated as OUT array).
y_true : ndarray, shape (n_samples,)
The true target values or each training sample.
raw_predictions : ndarray, shape (prediction_dim, n_samples)
The raw_predictions (i.e. values from the trees) of the tree
ensemble at iteration ``i - 1``.
"""
class LeastSquares(BaseLoss):
"""Least squares loss, for regression.
For a given sample x_i, least squares loss is defined as::
loss(x_i) = 0.5 * (y_true_i - raw_pred_i)**2
This actually computes the half least squares loss to optimize simplify
the computation of the gradients and get a unit hessian (and be consistent
with what is done in LightGBM).
"""
hessians_are_constant = True
def __call__(self, y_true, raw_predictions, average=True):
# shape (1, n_samples) --> (n_samples,). reshape(-1) is more likely to
# return a view.
raw_predictions = raw_predictions.reshape(-1)
loss = 0.5 * np.power(y_true - raw_predictions, 2)
return loss.mean() if average else loss
def get_baseline_prediction(self, y_train, prediction_dim):
return
|
np.mean(y_train)
|
numpy.mean
|
r"""
Authors: <NAME>, <NAME>
Tools for normal form games.
Definitions and Basic Concepts
------------------------------
An :math:`N`-player *normal form game* :math:`g = (I, (A_i)_{i \in I},
(u_i)_{i \in I})` consists of
- the set of *players* :math:`I = \{0, \ldots, N-1\}`,
- the set of *actions* :math:`A_i = \{0, \ldots, n_i-1\}` for each
player :math:`i \in I`, and
- the *payoff function* :math:`u_i \colon A_i \times A_{i+1} \times
\cdots \times A_{i+N-1} \to \mathbb{R}` for each player :math:`i \in
I`,
where :math:`i+j` is understood modulo :math:`N`. Note that we adopt the
convention that the 0-th argument of the payoff function :math:`u_i` is
player :math:`i`'s own action and the :math:`j`-th argument is player
(:math:`i+j`)'s action (modulo :math:`N`). A mixed action for player
:math:`i` is a probability distribution on :math:`A_i` (while an element
of :math:`A_i` is referred to as a pure action). A pure action
:math:`a_i \in A_i` is identified with the mixed action that assigns
probability one to :math:`a_i`. Denote the set of mixed actions of
player :math:`i` by :math:`X_i`. We also denote :math:`A_{-i} = A_{i+1}
\times \cdots \times A_{i+N-1}` and :math:`X_{-i} = X_{i+1} \times
\cdots \times X_{i+N-1}`.
The (pure-action) *best response correspondence* :math:`b_i \colon
X_{-i} \to A_i` for each player :math:`i` is defined by
.. math::
b_i(x_{-i}) = \{a_i \in A_i \mid
u_i(a_i, x_{-i}) \geq u_i(a_i', x_{-i})
\ \forall\,a_i' \in A_i\},
where :math:`u_i(a_i, x_{-i}) = \sum_{a_{-i} \in A_{-i}} u_i(a_i,
a_{-i}) \prod_{j=1}^{N-1} x_{i+j}(a_j)` is the expected payoff to action
:math:`a_i` against mixed actions :math:`x_{-i}`. A profile of mixed
actions :math:`x^* \in X_0 \times \cdots \times X_{N-1}` is a *Nash
equilibrium* if for all :math:`i \in I` and :math:`a_i \in A_i`,
.. math::
x_i^*(a_i) > 0 \Rightarrow a_i \in b_i(x_{-i}^*),
or equivalently, :math:`x_i^* \cdot v_i(x_{-i}^*) \geq x_i \cdot
v_i(x_{-i}^*)` for all :math:`x_i \in X_i`, where :math:`v_i(x_{-i})` is
the vector of player :math:`i`'s payoffs when the opponent players play
mixed actions :math:`x_{-i}`.
Creating a NormalFormGame
-------------------------
There are three ways to construct a `NormalFormGame` instance.
The first is to pass an array of payoffs for all the players:
>>> matching_pennies_bimatrix = [[(1, -1), (-1, 1)], [(-1, 1), (1, -1)]]
>>> g = NormalFormGame(matching_pennies_bimatrix)
>>> print(g.players[0])
Player in a 2-player normal form game with payoff array:
[[ 1, -1],
[-1, 1]]
>>> print(g.players[1])
Player in a 2-player normal form game with payoff array:
[[-1, 1],
[ 1, -1]]
If a square matrix (2-dimensional array) is given, then it is considered
to be a symmetric two-player game:
>>> coordination_game_matrix = [[4, 0], [3, 2]]
>>> g = NormalFormGame(coordination_game_matrix)
>>> print(g)
2-player NormalFormGame with payoff profile array:
[[[4, 4], [0, 3]],
[[3, 0], [2, 2]]]
The second is to specify the sizes of the action sets of the players,
which gives a `NormalFormGame` instance filled with payoff zeros, and
then set the payoff values to each entry:
>>> g = NormalFormGame((2, 2))
>>> print(g)
2-player NormalFormGame with payoff profile array:
[[[ 0., 0.], [ 0., 0.]],
[[ 0., 0.], [ 0., 0.]]]
>>> g[0, 0] = 1, 1
>>> g[0, 1] = -2, 3
>>> g[1, 0] = 3, -2
>>> print(g)
2-player NormalFormGame with payoff profile array:
[[[ 1., 1.], [-2., 3.]],
[[ 3., -2.], [ 0., 0.]]]
The third is to pass an array of `Player` instances, as explained in the
next section.
Creating a Player
-----------------
A `Player` instance is created by passing a payoff array:
>>> player0 = Player([[3, 1], [0, 2]])
>>> player0.payoff_array
array([[3, 1],
[0, 2]])
Passing an array of `Player` instances is the third way to create a
`NormalFormGame` instance.
>>> player1 = Player([[2, 0], [1, 3]])
>>> player1.payoff_array
array([[2, 0],
[1, 3]])
>>> g = NormalFormGame((player0, player1))
>>> print(g)
2-player NormalFormGame with payoff profile array:
[[[3, 2], [1, 1]],
[[0, 0], [2, 3]]]
Beware that in `payoff_array[h, k]`, `h` refers to the player's own
action, while `k` refers to the opponent player's action.
"""
import re
import numbers
import numpy as np
from numba import jit
from ..util import check_random_state
class Player(object):
"""
Class representing a player in an N-player normal form game.
Parameters
----------
payoff_array : array_like(float)
Array representing the player's payoff function, where
`payoff_array[a_0, a_1, ..., a_{N-1}]` is the payoff to the
player when the player plays action `a_0` while his N-1
opponents play actions `a_1`, ..., `a_{N-1}`, respectively.
Attributes
----------
payoff_array : ndarray(float, ndim=N)
See Parameters.
num_actions : scalar(int)
The number of actions available to the player.
num_opponents : scalar(int)
The number of opponent players.
dtype : dtype
Data type of the elements of `payoff_array`.
tol : scalar(float), default=1e-8
Default tolerance value used in determining best responses.
"""
def __init__(self, payoff_array):
self.payoff_array = np.asarray(payoff_array, order='C')
if self.payoff_array.ndim == 0:
raise ValueError('payoff_array must be an array_like')
self.num_opponents = self.payoff_array.ndim - 1
self.num_actions = self.payoff_array.shape[0]
self.dtype = self.payoff_array.dtype
self.tol = 1e-8
def __repr__(self):
# From numpy.matrix.__repr__
# Print also dtype, except for int64, float64
s = repr(self.payoff_array).replace('array', 'Player')
l = s.splitlines()
for i in range(1, len(l)):
if l[i]:
l[i] = ' ' + l[i]
return '\n'.join(l)
def __str__(self):
N = self.num_opponents + 1
s = 'Player in a {N}-player normal form game'.format(N=N)
s += ' with payoff array:\n'
s += np.array2string(self.payoff_array, separator=', ')
return s
def payoff_vector(self, opponents_actions):
"""
Return an array of payoff values, one for each own action, given
a profile of the opponents' actions.
Parameters
----------
opponents_actions : see `best_response`.
Returns
-------
payoff_vector : ndarray(float, ndim=1)
An array representing the player's payoff vector given the
profile of the opponents' actions.
"""
def reduce_last_player(payoff_array, action):
"""
Given `payoff_array` with ndim=M, return the payoff array
with ndim=M-1 fixing the last player's action to be `action`.
"""
if isinstance(action, numbers.Integral): # pure action
return payoff_array.take(action, axis=-1)
else: # mixed action
return payoff_array.dot(action)
if self.num_opponents == 1:
payoff_vector = \
reduce_last_player(self.payoff_array, opponents_actions)
elif self.num_opponents >= 2:
payoff_vector = self.payoff_array
for i in reversed(range(self.num_opponents)):
payoff_vector = \
reduce_last_player(payoff_vector, opponents_actions[i])
else: # Trivial case with self.num_opponents == 0
payoff_vector = self.payoff_array
return payoff_vector
def is_best_response(self, own_action, opponents_actions, tol=None):
"""
Return True if `own_action` is a best response to
`opponents_actions`.
Parameters
----------
own_action : scalar(int) or array_like(float, ndim=1)
An integer representing a pure action, or an array of floats
representing a mixed action.
opponents_actions : see `best_response`
tol : scalar(float), optional(default=None)
Tolerance level used in determining best responses. If None,
default to the value of the `tol` attribute.
Returns
-------
bool
True if `own_action` is a best response to
`opponents_actions`; False otherwise.
"""
if tol is None:
tol = self.tol
payoff_vector = self.payoff_vector(opponents_actions)
payoff_max = payoff_vector.max()
if isinstance(own_action, numbers.Integral):
return payoff_vector[own_action] >= payoff_max - tol
else:
return np.dot(own_action, payoff_vector) >= payoff_max - tol
def best_response(self, opponents_actions, tie_breaking='smallest',
payoff_perturbation=None, tol=None, random_state=None):
"""
Return the best response action(s) to `opponents_actions`.
Parameters
----------
opponents_actions : scalar(int) or array_like
A profile of N-1 opponents' actions, represented by either
scalar(int), array_like(float), array_like(int), or
array_like(array_like(float)). If N=2, then it must be a
scalar of integer (in which case it is treated as the
opponent's pure action) or a 1-dimensional array of floats
(in which case it is treated as the opponent's mixed
action). If N>2, then it must be an array of N-1 objects,
where each object must be an integer (pure action) or an
array of floats (mixed action).
tie_breaking : str, optional(default='smallest')
str in {'smallest', 'random', False}. Control how, or
whether, to break a tie (see Returns for details).
payoff_perturbation : array_like(float), optional(default=None)
Array of length equal to the number of actions of the player
containing the values ("noises") to be added to the payoffs
in determining the best response.
tol : scalar(float), optional(default=None)
Tolerance level used in determining best responses. If None,
default to the value of the `tol` attribute.
random_state : int or np.random.RandomState, optional
Random seed (integer) or np.random.RandomState instance to
set the initial state of the random number generator for
reproducibility. If None, a randomly initialized RandomState
is used. Relevant only when tie_breaking='random'.
Returns
-------
scalar(int) or ndarray(int, ndim=1)
If tie_breaking=False, returns an array containing all the
best response pure actions. If tie_breaking='smallest',
returns the best response action with the smallest index; if
tie_breaking='random', returns an action randomly chosen
from the best response actions.
"""
if tol is None:
tol = self.tol
payoff_vector = self.payoff_vector(opponents_actions)
if payoff_perturbation is not None:
try:
payoff_vector += payoff_perturbation
except TypeError: # type mismatch
payoff_vector = payoff_vector + payoff_perturbation
best_responses = \
np.where(payoff_vector >= payoff_vector.max() - tol)[0]
if tie_breaking == 'smallest':
return best_responses[0]
elif tie_breaking == 'random':
return self.random_choice(best_responses,
random_state=random_state)
elif tie_breaking is False:
return best_responses
else:
msg = "tie_breaking must be one of 'smallest', 'random', or False"
raise ValueError(msg)
def random_choice(self, actions=None, random_state=None):
"""
Return a pure action chosen randomly from `actions`.
Parameters
----------
actions : array_like(int), optional(default=None)
An array of integers representing pure actions.
random_state : int or np.random.RandomState, optional
Random seed (integer) or np.random.RandomState instance to
set the initial state of the random number generator for
reproducibility. If None, a randomly initialized RandomState
is used.
Returns
-------
scalar(int)
If `actions` is given, returns an integer representing a
pure action chosen randomly from `actions`; if not, an
action is chosen randomly from the player's all actions.
"""
random_state = check_random_state(random_state)
if actions is not None:
n = len(actions)
else:
n = self.num_actions
if n == 1:
idx = 0
else:
idx = random_state.randint(n)
if actions is not None:
return actions[idx]
else:
return idx
class NormalFormGame(object):
"""
Class representing an N-player normal form game.
Parameters
----------
data : array_like of Player, int (ndim=1), or float (ndim=2 or N+1)
Data to initialize a NormalFormGame. `data` may be an array of
Players, in which case the shapes of the Players' payoff arrays
must be consistent. If `data` is an array of N integers, then
these integers are treated as the numbers of actions of the N
players and a NormalFormGame is created consisting of payoffs
all 0 with `data[i]` actions for each player `i`. `data` may
also be an (N+1)-dimensional array representing payoff profiles.
If `data` is a square matrix (2-dimensional array), then the
game will be a symmetric two-player game where the payoff matrix
of each player is given by the input matrix.
dtype : data-type, optional(default=None)
Relevant only when `data` is an array of integers. Data type of
the players' payoff arrays. If not supplied, default to
numpy.float64.
Attributes
----------
players : tuple(Player)
Tuple of the Player instances of the game.
N : scalar(int)
The number of players.
nums_actions : tuple(int)
Tuple of the numbers of actions, one for each player.
"""
def __init__(self, data, dtype=None):
# data represents an array_like of Players
if hasattr(data, '__getitem__') and isinstance(data[0], Player):
N = len(data)
# Check that the shapes of the payoff arrays are consistent
# and the dtypes coincide
shape_0 = data[0].payoff_array.shape
dtype_0 = data[0].payoff_array.dtype
for i in range(1, N):
shape = data[i].payoff_array.shape
if not (
len(shape) == N and
shape == shape_0[i:] + shape_0[:i]
):
raise ValueError(
'shapes of payoff arrays must be consistent'
)
dtype = data[i].payoff_array.dtype
if dtype != dtype_0:
raise ValueError(
'dtypes of payoff arrays must coincide'
)
self.players = tuple(data)
self.dtype = dtype_0
# data represents action sizes or a payoff array
else:
data = np.asarray(data)
if data.ndim == 0: # data represents action size
# Trivial game consisting of one player
N = 1
self.players = (Player(
|
np.zeros(data)
|
numpy.zeros
|
import unittest
import numpy.testing as testing
import numpy as np
import healpy as hp
from numpy import random
import tempfile
import os
import shutil
import pytest
import healsparse
from healsparse import WIDE_MASK
class WideMasksTestCase(unittest.TestCase):
def test_make_wide_mask_map(self):
"""
Test making a wide mask map.
"""
random.seed(seed=12345)
nside_coverage = 32
nside_map = 64
n_rand = 1000
ra = np.random.random(n_rand) * 360.0
dec = np.random.random(n_rand) * 180.0 - 90.0
# Test expected errors on creating maps
# Create empty maps to test bit width
sparse_map = healsparse.HealSparseMap.make_empty(nside_coverage, nside_map,
WIDE_MASK, wide_mask_maxbits=7)
self.assertTrue(sparse_map.is_wide_mask_map)
self.assertEqual(sparse_map.wide_mask_maxbits, 8)
self.assertEqual(sparse_map._sparse_map.shape, (4, 1))
self.assertEqual(sparse_map._sentinel, 0)
# Set bits and retrieve them
pixel = np.arange(4000, 20000)
sparse_map.set_bits_pix(pixel, [4])
testing.assert_array_equal(sparse_map.check_bits_pix(pixel, [4]), True)
testing.assert_array_equal(sparse_map.check_bits_pix(pixel, [6]), False)
testing.assert_array_equal(sparse_map.check_bits_pix(pixel, [4, 6]), True)
pospix = hp.ang2pix(nside_map, ra, dec, lonlat=True, nest=True)
inds = np.searchsorted(pixel, pospix)
b, = np.where((inds > 0) & (inds < pixel.size))
comp_arr = np.zeros(pospix.size, dtype=np.bool_)
comp_arr[b] = True
testing.assert_array_equal(sparse_map.check_bits_pos(ra, dec, [4], lonlat=True), comp_arr)
sparse_map.clear_bits_pix(pixel, [4])
testing.assert_array_equal(sparse_map.check_bits_pix(pixel, [4]), False)
sparse_map.set_bits_pix(pixel, [4, 6])
testing.assert_array_equal(sparse_map.check_bits_pix(pixel, [4]), True)
testing.assert_array_equal(sparse_map.check_bits_pix(pixel, [6]), True)
testing.assert_array_equal(sparse_map.check_bits_pix(pixel, [7]), False)
# This just makes sure that the size is correct
sparse_map = healsparse.HealSparseMap.make_empty(nside_coverage, nside_map,
WIDE_MASK, wide_mask_maxbits=8)
self.assertEqual(sparse_map.wide_mask_maxbits, 8)
# And now a double-wide to test
# Note that 9 will create a 16 bit mask
sparse_map = healsparse.HealSparseMap.make_empty(nside_coverage, nside_map,
WIDE_MASK, wide_mask_maxbits=9)
self.assertEqual(sparse_map.wide_mask_maxbits, 16)
sparse_map.set_bits_pix(pixel, [12])
testing.assert_array_equal(sparse_map.check_bits_pix(pixel, [12]), True)
testing.assert_array_equal(sparse_map.check_bits_pix(pixel, [4]), False)
testing.assert_array_equal(sparse_map.check_bits_pix(pixel, [15]), False)
testing.assert_array_equal(sparse_map.check_bits_pix(pixel, [4, 12]), True)
sparse_map.set_bits_pix(pixel, [2, 3, 5, 15])
testing.assert_array_equal(sparse_map.check_bits_pix(pixel, [2]), True)
testing.assert_array_equal(sparse_map.check_bits_pix(pixel, [3]), True)
testing.assert_array_equal(sparse_map.check_bits_pix(pixel, [5]), True)
testing.assert_array_equal(sparse_map.check_bits_pix(pixel, [15]), True)
testing.assert_array_equal(sparse_map.check_bits_pix(pixel, [14]), False)
# Clear a bit in the lower field, ensure upper field is untouched.
sparse_map.clear_bits_pix(pixel, [5])
testing.assert_array_equal(sparse_map.check_bits_pix(pixel, [5]), False)
testing.assert_array_equal(sparse_map.check_bits_pix(pixel, [15]), True)
testing.assert_array_equal(sparse_map.check_bits_pix(pixel, [14]), False)
# Clear multiple bits in the lower field, ensure upper field is untouched.
sparse_map.clear_bits_pix(pixel, [2, 3])
testing.assert_array_equal(sparse_map.check_bits_pix(pixel, [2]), False)
testing.assert_array_equal(sparse_map.check_bits_pix(pixel, [3]), False)
testing.assert_array_equal(sparse_map.check_bits_pix(pixel, [15]), True)
testing.assert_array_equal(sparse_map.check_bits_pix(pixel, [14]), False)
# This makes sure the inferred size is correct
sparse_map = healsparse.HealSparseMap.make_empty(nside_coverage, nside_map,
WIDE_MASK, wide_mask_maxbits=128)
self.assertEqual(sparse_map.wide_mask_maxbits, 128)
# And do a triple-wide
sparse_map = healsparse.HealSparseMap.make_empty(nside_coverage, nside_map,
WIDE_MASK, wide_mask_maxbits=20)
self.assertEqual(sparse_map.wide_mask_maxbits, 24)
sparse_map.set_bits_pix(pixel, [5, 10, 20])
testing.assert_array_equal(sparse_map.check_bits_pix(pixel, [5]), True)
testing.assert_array_equal(sparse_map.check_bits_pix(pixel, [10]), True)
testing.assert_array_equal(sparse_map.check_bits_pix(pixel, [20]), True)
testing.assert_array_equal(sparse_map.check_bits_pix(pixel, [21]), False)
def test_wide_mask_map_fits_io(self):
"""
Test fits i/o with wide mask maps.
"""
random.seed(seed=12345)
nside_coverage = 32
nside_map = 64
n_rand = 1000
ra = np.random.random(n_rand) * 360.0
dec = np.random.random(n_rand) * 180.0 - 90.0
self.test_dir = tempfile.mkdtemp(dir='./', prefix='TestHealSparse-')
# Test with single-wide
sparse_map = healsparse.HealSparseMap.make_empty(nside_coverage, nside_map,
WIDE_MASK, wide_mask_maxbits=8)
pixel = np.arange(4000, 20000)
sparse_map.set_bits_pix(pixel, [5])
fname = os.path.join(self.test_dir, 'healsparse_map.hs')
sparse_map.write(fname, clobber=True)
sparse_map_in = healsparse.HealSparseMap.read(fname)
self.assertTrue(sparse_map_in.is_wide_mask_map)
self.assertEqual(sparse_map_in.wide_mask_maxbits, 8)
self.assertEqual(sparse_map_in._sparse_map.shape[1], 1)
self.assertEqual(sparse_map_in._wide_mask_width, 1)
self.assertEqual(sparse_map_in._sentinel, 0)
testing.assert_array_equal(sparse_map_in.check_bits_pix(pixel, [5]), True)
testing.assert_array_equal(sparse_map_in.check_bits_pix(pixel, [7]), False)
testing.assert_array_equal(sparse_map_in.check_bits_pix(pixel, [5, 7]), True)
pospix = hp.ang2pix(nside_map, ra, dec, lonlat=True, nest=True)
inds = np.searchsorted(pixel, pospix)
b, = np.where((inds > 0) & (inds < pixel.size))
comp_arr = np.zeros(pospix.size, dtype=np.bool_)
comp_arr[b] = True
testing.assert_array_equal(sparse_map_in.check_bits_pos(ra, dec, [5], lonlat=True), comp_arr)
# And read a partial map
sparse_map_in_partial = healsparse.HealSparseMap.read(fname, pixels=[1000, 1002])
self.assertTrue(sparse_map_in_partial.is_wide_mask_map)
self.assertEqual(sparse_map_in_partial.wide_mask_maxbits, 8)
self.assertEqual(sparse_map_in_partial._sparse_map.shape[1], 1)
self.assertEqual(sparse_map_in_partial._wide_mask_width, 1)
self.assertEqual(sparse_map_in_partial._sentinel, 0)
cov_pixels = sparse_map._cov_map.cov_pixels(pixel)
pixel_sub = pixel[(cov_pixels == 1000) | (cov_pixels == 1002)]
testing.assert_array_equal(sparse_map_in_partial.check_bits_pix(pixel_sub, [5]), True)
testing.assert_array_equal(sparse_map_in_partial.check_bits_pix(pixel_sub, [7]), False)
testing.assert_array_equal(sparse_map_in_partial.check_bits_pix(pixel_sub, [5, 7]), True)
# Test with double-wide
sparse_map = healsparse.HealSparseMap.make_empty(nside_coverage, nside_map,
WIDE_MASK, wide_mask_maxbits=16)
pixel = np.arange(4000, 20000)
sparse_map.set_bits_pix(pixel, [5, 10])
fname = os.path.join(self.test_dir, 'healsparse_map.hs')
sparse_map.write(fname, clobber=True)
sparse_map_in = healsparse.HealSparseMap.read(fname)
self.assertTrue(sparse_map_in.is_wide_mask_map)
self.assertEqual(sparse_map_in.wide_mask_maxbits, 16)
self.assertEqual(sparse_map_in._sparse_map.shape[1], 2)
self.assertEqual(sparse_map_in._wide_mask_width, 2)
self.assertEqual(sparse_map_in._sentinel, 0)
testing.assert_array_equal(sparse_map_in.check_bits_pix(pixel, [5]), True)
testing.assert_array_equal(sparse_map_in.check_bits_pix(pixel, [10]), True)
testing.assert_array_equal(sparse_map_in.check_bits_pix(pixel, [4]), False)
testing.assert_array_equal(sparse_map_in.check_bits_pix(pixel, [12]), False)
# And read a partial double-wide map
sparse_map_in_partial = healsparse.HealSparseMap.read(fname, pixels=[1000, 1002])
self.assertTrue(sparse_map_in_partial.is_wide_mask_map)
self.assertEqual(sparse_map_in_partial.wide_mask_maxbits, 16)
self.assertEqual(sparse_map_in_partial._sparse_map.shape[1], 2)
self.assertEqual(sparse_map_in_partial._wide_mask_width, 2)
self.assertEqual(sparse_map_in_partial._sentinel, 0)
cov_pixels = sparse_map._cov_map.cov_pixels(pixel)
pixel_sub = pixel[(cov_pixels == 1000) | (cov_pixels == 1002)]
testing.assert_array_equal(sparse_map_in_partial.check_bits_pix(pixel_sub, [5]), True)
testing.assert_array_equal(sparse_map_in_partial.check_bits_pix(pixel_sub, [10]), True)
testing.assert_array_equal(sparse_map_in_partial.check_bits_pix(pixel_sub, [4]), False)
testing.assert_array_equal(sparse_map_in_partial.check_bits_pix(pixel_sub, [12]), False)
@pytest.mark.skipif(not healsparse.parquet_shim.use_pyarrow, reason='Requires pyarrow')
def test_wide_mask_map_parquet_io(self):
"""
Test parquet i/o with wide mask maps.
"""
random.seed(seed=12345)
nside_coverage = 32
nside_map = 64
n_rand = 1000
ra = np.random.random(n_rand) * 360.0
dec = np.random.random(n_rand) * 180.0 - 90.0
self.test_dir = tempfile.mkdtemp(dir='./', prefix='TestHealSparse-')
# Test with single-wide
sparse_map = healsparse.HealSparseMap.make_empty(nside_coverage, nside_map,
WIDE_MASK, wide_mask_maxbits=8)
pixel = np.arange(4000, 20000)
sparse_map.set_bits_pix(pixel, [5])
fname = os.path.join(self.test_dir, 'healsparse_map.hsparquet')
sparse_map.write(fname, format='parquet')
sparse_map_in = healsparse.HealSparseMap.read(fname)
self.assertTrue(sparse_map_in.is_wide_mask_map)
self.assertEqual(sparse_map_in.wide_mask_maxbits, 8)
self.assertEqual(sparse_map_in._sparse_map.shape[1], 1)
self.assertEqual(sparse_map_in._wide_mask_width, 1)
self.assertEqual(sparse_map_in._sentinel, 0)
testing.assert_array_equal(sparse_map_in.check_bits_pix(pixel, [5]), True)
testing.assert_array_equal(sparse_map_in.check_bits_pix(pixel, [7]), False)
testing.assert_array_equal(sparse_map_in.check_bits_pix(pixel, [5, 7]), True)
pospix = hp.ang2pix(nside_map, ra, dec, lonlat=True, nest=True)
inds = np.searchsorted(pixel, pospix)
b, = np.where((inds > 0) & (inds < pixel.size))
comp_arr = np.zeros(pospix.size, dtype=np.bool_)
comp_arr[b] = True
testing.assert_array_equal(sparse_map_in.check_bits_pos(ra, dec, [5], lonlat=True), comp_arr)
# And read a partial map
sparse_map_in_partial = healsparse.HealSparseMap.read(fname, pixels=[1000, 1002])
self.assertTrue(sparse_map_in_partial.is_wide_mask_map)
self.assertEqual(sparse_map_in_partial.wide_mask_maxbits, 8)
self.assertEqual(sparse_map_in_partial._sparse_map.shape[1], 1)
self.assertEqual(sparse_map_in_partial._wide_mask_width, 1)
self.assertEqual(sparse_map_in_partial._sentinel, 0)
cov_pixels = sparse_map._cov_map.cov_pixels(pixel)
pixel_sub = pixel[(cov_pixels == 1000) | (cov_pixels == 1002)]
testing.assert_array_equal(sparse_map_in_partial.check_bits_pix(pixel_sub, [5]), True)
testing.assert_array_equal(sparse_map_in_partial.check_bits_pix(pixel_sub, [7]), False)
testing.assert_array_equal(sparse_map_in_partial.check_bits_pix(pixel_sub, [5, 7]), True)
# Test with double-wide
sparse_map = healsparse.HealSparseMap.make_empty(nside_coverage, nside_map,
WIDE_MASK, wide_mask_maxbits=16)
pixel = np.arange(4000, 20000)
sparse_map.set_bits_pix(pixel, [5, 10])
fname = os.path.join(self.test_dir, 'healsparse_map2.hsparquet')
sparse_map.write(fname, format='parquet')
sparse_map_in = healsparse.HealSparseMap.read(fname)
self.assertTrue(sparse_map_in.is_wide_mask_map)
self.assertEqual(sparse_map_in.wide_mask_maxbits, 16)
self.assertEqual(sparse_map_in._sparse_map.shape[1], 2)
self.assertEqual(sparse_map_in._wide_mask_width, 2)
self.assertEqual(sparse_map_in._sentinel, 0)
testing.assert_array_equal(sparse_map_in.check_bits_pix(pixel, [5]), True)
testing.assert_array_equal(sparse_map_in.check_bits_pix(pixel, [10]), True)
testing.assert_array_equal(sparse_map_in.check_bits_pix(pixel, [4]), False)
testing.assert_array_equal(sparse_map_in.check_bits_pix(pixel, [12]), False)
# And read a partial double-wide map
sparse_map_in_partial = healsparse.HealSparseMap.read(fname, pixels=[1000, 1002])
self.assertTrue(sparse_map_in_partial.is_wide_mask_map)
self.assertEqual(sparse_map_in_partial.wide_mask_maxbits, 16)
self.assertEqual(sparse_map_in_partial._sparse_map.shape[1], 2)
self.assertEqual(sparse_map_in_partial._wide_mask_width, 2)
self.assertEqual(sparse_map_in_partial._sentinel, 0)
cov_pixels = sparse_map._cov_map.cov_pixels(pixel)
pixel_sub = pixel[(cov_pixels == 1000) | (cov_pixels == 1002)]
testing.assert_array_equal(sparse_map_in_partial.check_bits_pix(pixel_sub, [5]), True)
testing.assert_array_equal(sparse_map_in_partial.check_bits_pix(pixel_sub, [10]), True)
testing.assert_array_equal(sparse_map_in_partial.check_bits_pix(pixel_sub, [4]), False)
testing.assert_array_equal(sparse_map_in_partial.check_bits_pix(pixel_sub, [12]), False)
def test_wide_mask_map_fits_io_compression(self):
"""
Test wide mask fits io with and without compression.
"""
nside_coverage = 32
nside_map = 4096
self.test_dir = tempfile.mkdtemp(dir='./', prefix='TestHealSparse-')
# Test with double-wide
sparse_map = healsparse.HealSparseMap.make_empty(nside_coverage, nside_map,
WIDE_MASK, wide_mask_maxbits=16)
sparse_map.set_bits_pix(np.arange(20000, 50000), [5])
sparse_map.set_bits_pix(np.arange(120000, 150000), [5, 10])
fname_comp = os.path.join(self.test_dir, 'test_mask_map_compressed.hs')
sparse_map.write(fname_comp, clobber=True, nocompress=False)
fname_nocomp = os.path.join(self.test_dir, 'test_mask_map_notcompressed.hs')
sparse_map.write(fname_nocomp, clobber=True, nocompress=True)
self.assertGreater(os.path.getsize(fname_nocomp), os.path.getsize(fname_comp))
sparse_map_in_comp = healsparse.HealSparseMap.read(fname_comp)
sparse_map_in_nocomp = healsparse.HealSparseMap.read(fname_nocomp)
testing.assert_array_equal(sparse_map.valid_pixels,
sparse_map_in_nocomp.valid_pixels)
testing.assert_array_equal(sparse_map[sparse_map.valid_pixels],
sparse_map_in_nocomp[sparse_map.valid_pixels])
testing.assert_array_equal(sparse_map[0: 10],
sparse_map_in_nocomp[0: 10])
testing.assert_array_equal(sparse_map.valid_pixels,
sparse_map_in_comp.valid_pixels)
testing.assert_array_equal(sparse_map[sparse_map.valid_pixels],
sparse_map_in_comp[sparse_map.valid_pixels])
testing.assert_array_equal(sparse_map[0: 10],
sparse_map_in_comp[0: 10])
def test_wide_mask_or(self):
"""
Test wide mask oring
"""
random.seed(seed=12345)
nside_coverage = 32
nside_map = 64
sparse_map1 = healsparse.HealSparseMap.make_empty(nside_coverage, nside_map, WIDE_MASK,
wide_mask_maxbits=100)
pixel1 = np.arange(4000, 20000)
pixel1 =
|
np.delete(pixel1, 15000)
|
numpy.delete
|
"""
<NAME>, <NAME>, <NAME>: Kernels on Graphs as Proximity Measures
https://hal.inria.fr/hal-01647915/document
"""
import json
import logging
import os
import unittest
from abc import ABC
from os.path import join as pj
import networkx as nx
import networkx.readwrite.json_graph as jg
import numpy as np
from joblib import Parallel, delayed
from tqdm import tqdm
from pygkernels import util
from pygkernels.cluster import SpectralClustering_rubanov
from pygkernels.data import Samples
from pygkernels.measure import scaler, logKatz_H, logComm_H, logHeat_H, logFor_H, logPPR_H, logModifPPR_H, logNHeat_H, \
logHeatPR_H
from pygkernels.score import FC
from tests.article_comparison._kernel_rubanov import Katz_R, Estrada_R, Heat_R, RegularizedLaplacian_R, logPPR_R, \
logModifPPR_R, logHeatPR_R
from tests.article_comparison._rubanov_sbm_model import RubanovStochasticBlockModel
class TestNewMeasuresEqualuty(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
util.configure_logging()
self.graph = Samples.diploma_matrix
def test_katz(self):
walk = logKatz_H(self.graph)
katz = Katz_R(self.graph)
for param in scaler.Rho(self.graph).scale_list(np.linspace(0.1, 0.9, 50)):
self.assertTrue(np.allclose(walk.get_K(param).ravel(), katz.get_K(param).ravel(), atol=0.0001),
f'error in param={param:0.3f}')
def test_estrada(self):
comm = logComm_H(self.graph)
estrada = Estrada_R(self.graph)
for param in scaler.Fraction().scale_list(np.linspace(0.1, 0.7, 50)):
self.assertTrue(np.allclose(comm.get_K(param).ravel(), estrada.get_K(param).ravel(), atol=0.0001),
f'error in param={param:0.3f}')
def test_heat(self):
heat = logHeat_H(self.graph)
heat_rubanov = Heat_R(self.graph)
for param in scaler.Fraction().scale_list(np.linspace(0.1, 0.7, 50)):
self.assertTrue(np.allclose(heat.get_K(param).ravel(), heat_rubanov.get_K(param).ravel(), atol=0.0001),
f'error in param={param:0.3f}')
def test_regularized_laplacian(self):
forest = logFor_H(self.graph)
reg_laplacian = RegularizedLaplacian_R(self.graph)
for param in scaler.Fraction().scale_list(np.linspace(0.1, 0.9, 50)):
self.assertTrue(np.allclose(forest.get_K(param).ravel(), reg_laplacian.get_K(param).ravel(), atol=0.0001),
f'error in param={param:0.3f}')
def test_logPPR(self):
logppr = logPPR_H(self.graph)
ppr_rubanov = logPPR_R(self.graph)
for param in scaler.Linear().scale_list(np.linspace(0.0, 1.0, 50)[1:-1]):
self.assertTrue(np.allclose(logppr.get_K(param).ravel(), ppr_rubanov.get_K(param).ravel(), atol=0.0001),
f'error in param={param:0.3f}')
def test_logModifPPR(self):
logppr = logModifPPR_H(self.graph)
ppr_rubanov = logModifPPR_R(self.graph)
for param in scaler.Linear().scale_list(np.linspace(0.0, 0.9, 50)[1:-1]):
self.assertTrue(np.allclose(logppr.get_K(param).ravel(), ppr_rubanov.get_K(param).ravel(), atol=0.0001),
f'error in param={param:0.3f}')
def test_logHeatPPR(self):
logppr = logHeatPR_H(self.graph)
ppr_rubanov = logHeatPR_R(self.graph)
for param in scaler.Fraction().scale_list(
|
np.linspace(0.0, 0.7, 50)
|
numpy.linspace
|
#!/usr/bin/env python
# coding: utf-8
# # Temporal-Difference Methods
#
# In this notebook, you will write your own implementations of many Temporal-Difference (TD) methods.
#
# While we have provided some starter code, you are welcome to erase these hints and write your code from scratch.
#
# ---
#
# ### Part 0: Explore CliffWalkingEnv
#
# We begin by importing the necessary packages.
# In[1]:
import sys
import gym
import numpy as np
import random
import math
from collections import defaultdict, deque
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
import check_test
from plot_utils import plot_values
# Use the code cell below to create an instance of the [CliffWalking](https://github.com/openai/gym/blob/master/gym/envs/toy_text/cliffwalking.py) environment.
# In[2]:
env = gym.make('CliffWalking-v0')
# The agent moves through a $4\times 12$ gridworld, with states numbered as follows:
# ```
# [[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
# [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23],
# [24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35],
# [36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47]]
# ```
# At the start of any episode, state `36` is the initial state. State `47` is the only terminal state, and the cliff corresponds to states `37` through `46`.
#
# The agent has 4 potential actions:
# ```
# UP = 0
# RIGHT = 1
# DOWN = 2
# LEFT = 3
# ```
#
# Thus, $\mathcal{S}^+=\{0, 1, \ldots, 47\}$, and $\mathcal{A} =\{0, 1, 2, 3\}$. Verify this by running the code cell below.
# In[3]:
print(env.action_space)
print(env.observation_space)
# In this mini-project, we will build towards finding the optimal policy for the CliffWalking environment. The optimal state-value function is visualized below. Please take the time now to make sure that you understand _why_ this is the optimal state-value function.
#
# _**Note**: You can safely ignore the values of the cliff "states" as these are not true states from which the agent can make decisions. For the cliff "states", the state-value function is not well-defined._
# In[4]:
# define the optimal state-value function
V_opt = np.zeros((4,12))
V_opt[0][0:13] = -np.arange(3, 15)[::-1]
V_opt[1][0:13] = -np.arange(3, 15)[::-1] + 1
V_opt[2][0:13] = -np.arange(3, 15)[::-1] + 2
V_opt[3][0] = -13
plot_values(V_opt)
# ### Part 1: TD Control: Sarsa
#
# In this section, you will write your own implementation of the Sarsa control algorithm.
#
# Your algorithm has four arguments:
# - `env`: This is an instance of an OpenAI Gym environment.
# - `num_episodes`: This is the number of episodes that are generated through agent-environment interaction.
# - `alpha`: This is the step-size parameter for the update step.
# - `gamma`: This is the discount rate. It must be a value between 0 and 1, inclusive (default value: `1`).
#
# The algorithm returns as output:
# - `Q`: This is a dictionary (of one-dimensional arrays) where `Q[s][a]` is the estimated action value corresponding to state `s` and action `a`.
#
# Please complete the function in the code cell below.
#
# (_Feel free to define additional functions to help you to organize your code._)
# In[5]:
def update_Q_sarsa(alpha, gamma, Q, state, action, reward, next_state=None, next_action=None):
"""Returns updated Q-value for the most recent experience."""
current = Q[state][action] # estimate in Q-table (for current state, action pair)
# get value of state, action pair at next time step
Qsa_next = Q[next_state][next_action] if next_state is not None else 0
target = reward + (gamma * Qsa_next) # construct TD target
new_value = current + (alpha * (target - current)) # get updated value
return new_value
def epsilon_greedy(Q, state, nA, eps):
"""Selects epsilon-greedy action for supplied state.
Params
======
Q (dictionary): action-value function
state (int): current state
nA (int): number actions in the environment
eps (float): epsilon
"""
if random.random() > eps: # select greedy action with probability epsilon
return np.argmax(Q[state])
else: # otherwise, select an action randomly
return random.choice(np.arange(env.action_space.n))
# In[6]:
def sarsa(env, num_episodes, alpha, gamma=1.0, plot_every=100):
nA = env.action_space.n # number of actions
Q = defaultdict(lambda: np.zeros(nA)) # initialize empty dictionary of arrays
# monitor performance
tmp_scores = deque(maxlen=plot_every) # deque for keeping track of scores
avg_scores = deque(maxlen=num_episodes) # average scores over every plot_every episodes
for i_episode in range(1, num_episodes+1):
# monitor progress
if i_episode % 100 == 0:
print("\rEpisode {}/{}".format(i_episode, num_episodes), end="")
sys.stdout.flush()
score = 0 # initialize score
state = env.reset() # start episode
eps = 1.0 / i_episode # set value of epsilon
action = epsilon_greedy(Q, state, nA, eps) # epsilon-greedy action selection
while True:
next_state, reward, done, info = env.step(action) # take action A, observe R, S'
score += reward # add reward to agent's score
if not done:
next_action = epsilon_greedy(Q, next_state, nA, eps) # epsilon-greedy action
Q[state][action] = update_Q_sarsa(alpha, gamma, Q, state, action, reward, next_state, next_action)
state = next_state # S <- S'
action = next_action # A <- A'
if done:
Q[state][action] = update_Q_sarsa(alpha, gamma, Q, state, action, reward)
tmp_scores.append(score) # append score
break
if (i_episode % plot_every == 0):
avg_scores.append(np.mean(tmp_scores))
# plot performance
plt.plot(np.linspace(0,num_episodes,len(avg_scores),endpoint=False), np.asarray(avg_scores))
plt.xlabel('Episode Number')
plt.ylabel('Average Reward (Over Next %d Episodes)' % plot_every)
plt.show()
# print best 100-episode performance
print(('Best Average Reward over %d Episodes: ' % plot_every), np.max(avg_scores))
return Q
# Use the next code cell to visualize the **_estimated_** optimal policy and the corresponding state-value function.
#
# If the code cell returns **PASSED**, then you have implemented the function correctly! Feel free to change the `num_episodes` and `alpha` parameters that are supplied to the function. However, if you'd like to ensure the accuracy of the unit test, please do not change the value of `gamma` from the default.
# In[7]:
# obtain the estimated optimal policy and corresponding action-value function
Q_sarsa = sarsa(env, 5000, .01)
# print the estimated optimal policy
policy_sarsa = np.array([np.argmax(Q_sarsa[key]) if key in Q_sarsa else -1 for key in np.arange(48)]).reshape(4,12)
check_test.run_check('td_control_check', policy_sarsa)
print("\nEstimated Optimal Policy (UP = 0, RIGHT = 1, DOWN = 2, LEFT = 3, N/A = -1):")
print(policy_sarsa)
# plot the estimated optimal state-value function
V_sarsa = ([np.max(Q_sarsa[key]) if key in Q_sarsa else 0 for key in np.arange(48)])
plot_values(V_sarsa)
# ### Part 2: TD Control: Q-learning
#
# In this section, you will write your own implementation of the Q-learning control algorithm.
#
# Your algorithm has four arguments:
# - `env`: This is an instance of an OpenAI Gym environment.
# - `num_episodes`: This is the number of episodes that are generated through agent-environment interaction.
# - `alpha`: This is the step-size parameter for the update step.
# - `gamma`: This is the discount rate. It must be a value between 0 and 1, inclusive (default value: `1`).
#
# The algorithm returns as output:
# - `Q`: This is a dictionary (of one-dimensional arrays) where `Q[s][a]` is the estimated action value corresponding to state `s` and action `a`.
#
# Please complete the function in the code cell below.
#
# (_Feel free to define additional functions to help you to organize your code._)
# In[8]:
def update_Q_sarsamax(alpha, gamma, Q, state, action, reward, next_state=None):
"""Returns updated Q-value for the most recent experience."""
current = Q[state][action] # estimate in Q-table (for current state, action pair)
Qsa_next = np.max(Q[next_state]) if next_state is not None else 0 # value of next state
target = reward + (gamma * Qsa_next) # construct TD target
new_value = current + (alpha * (target - current)) # get updated value
return new_value
# In[9]:
def q_learning(env, num_episodes, alpha, gamma=1.0, plot_every=100):
"""Q-Learning - TD Control
Params
======
num_episodes (int): number of episodes to run the algorithm
alpha (float): learning rate
gamma (float): discount factor
plot_every (int): number of episodes to use when calculating average score
"""
nA = env.action_space.n # number of actions
Q = defaultdict(lambda: np.zeros(nA)) # initialize empty dictionary of arrays
# monitor performance
tmp_scores = deque(maxlen=plot_every) # deque for keeping track of scores
avg_scores = deque(maxlen=num_episodes) # average scores over every plot_every episodes
for i_episode in range(1, num_episodes+1):
# monitor progress
if i_episode % 100 == 0:
print("\rEpisode {}/{}".format(i_episode, num_episodes), end="")
sys.stdout.flush()
score = 0 # initialize score
state = env.reset() # start episode
eps = 1.0 / i_episode # set value of epsilon
while True:
action = epsilon_greedy(Q, state, nA, eps) # epsilon-greedy action selection
next_state, reward, done, info = env.step(action) # take action A, observe R, S'
score += reward # add reward to agent's score
Q[state][action] = update_Q_sarsamax(alpha, gamma, Q, state, action, reward, next_state)
state = next_state # S <- S'
if done:
tmp_scores.append(score) # append score
break
if (i_episode % plot_every == 0):
avg_scores.append(np.mean(tmp_scores))
# plot performance
plt.plot(np.linspace(0,num_episodes,len(avg_scores),endpoint=False), np.asarray(avg_scores))
plt.xlabel('Episode Number')
plt.ylabel('Average Reward (Over Next %d Episodes)' % plot_every)
plt.show()
# print best 100-episode performance
print(('Best Average Reward over %d Episodes: ' % plot_every), np.max(avg_scores))
return Q
# Use the next code cell to visualize the **_estimated_** optimal policy and the corresponding state-value function.
#
# If the code cell returns **PASSED**, then you have implemented the function correctly! Feel free to change the `num_episodes` and `alpha` parameters that are supplied to the function. However, if you'd like to ensure the accuracy of the unit test, please do not change the value of `gamma` from the default.
# In[10]:
# obtain the estimated optimal policy and corresponding action-value function
Q_sarsamax = q_learning(env, 5000, .01)
# print the estimated optimal policy
policy_sarsamax = np.array([np.argmax(Q_sarsamax[key]) if key in Q_sarsamax else -1 for key in np.arange(48)]).reshape((4,12))
check_test.run_check('td_control_check', policy_sarsamax)
print("\nEstimated Optimal Policy (UP = 0, RIGHT = 1, DOWN = 2, LEFT = 3, N/A = -1):")
print(policy_sarsamax)
# plot the estimated optimal state-value function
plot_values([np.max(Q_sarsamax[key]) if key in Q_sarsamax else 0 for key in np.arange(48)])
# ### Part 3: TD Control: Expected Sarsa
#
# In this section, you will write your own implementation of the Expected Sarsa control algorithm.
#
# Your algorithm has four arguments:
# - `env`: This is an instance of an OpenAI Gym environment.
# - `num_episodes`: This is the number of episodes that are generated through agent-environment interaction.
# - `alpha`: This is the step-size parameter for the update step.
# - `gamma`: This is the discount rate. It must be a value between 0 and 1, inclusive (default value: `1`).
#
# The algorithm returns as output:
# - `Q`: This is a dictionary (of one-dimensional arrays) where `Q[s][a]` is the estimated action value corresponding to state `s` and action `a`.
#
# Please complete the function in the code cell below.
#
# (_Feel free to define additional functions to help you to organize your code._)
# In[11]:
def update_Q_expsarsa(alpha, gamma, nA, eps, Q, state, action, reward, next_state=None):
"""Returns updated Q-value for the most recent experience."""
current = Q[state][action] # estimate in Q-table (for current state, action pair)
policy_s = np.ones(nA) * eps / nA # current policy (for next state S')
policy_s[
|
np.argmax(Q[next_state])
|
numpy.argmax
|
import sys
import os
import logging
import cv2
import numpy as np
from omegaconf import DictConfig
from tqdm import tqdm
from utils import (
get_path_list,
get_input_data_type,
load_image,
load_video,
save_image,
save_coordinate,
save_density_map,
)
# logging setting
logging.basicConfig(
level=logging.DEBUG, format="%(asctime)s %(name)-12s %(levelname)-8s %(message)s"
)
logger = logging.getLogger(__name__)
# define control key
Q_KEY = 0x71 # q key (end)
P_KEY = 0x70 # p key (pause)
D_KEY = 0x64 # d key (delete)
S_KEY = 0x73 # s key (save data and restart)
class DensityAnnotator:
"""
DensityAnnotator is a class that labels the position of an object
with a mouse and calculates a density map for image or video data.
After annotation, save the raw image, the coordinate of the object,
and the density map for each target frame.
"""
def __init__(self, cfg: DictConfig, original_cwd: str) -> None:
"""
Initialize DensityAnnotator class by hydra config.
:param cfg: config of DensityAnnotator class
:param original_cwd: current working directory path
"""
logger.info(f"Loaded config: {cfg}")
cv2.namedWindow("click annotation points")
cv2.setMouseCallback("click annotation points", self.mouse_event)
self.sigma_pow = cfg.sigma_pow
self.mouse_event_interval = cfg.mouse_event_interval
# set frame information
self.video = None
self.frame = None
self.frame_list = []
self.width = None
self.height = None
self.features = None
self.coordinate_matrix = None
self.frame_num = 0
# set file path
self.input_file_path = None
self.input_file_path_list = get_path_list(
cfg.path.input_file_path, original_cwd
)
self.save_raw_image_dir = os.path.join(
original_cwd, cfg.path.save_raw_image_dir
)
self.save_annotated_dir = os.path.join(
original_cwd, cfg.path.save_annotated_dir
)
self.save_image_extension = cfg.path.save_image_extension
self.save_annotated_image_dir = f"{self.save_annotated_dir}/image"
self.save_annotated_coord_dir = f"{self.save_annotated_dir}/coord"
self.save_annotated_density_dir = f"{self.save_annotated_dir}/dens"
# check and create target directory
os.makedirs(self.save_raw_image_dir, exist_ok=True)
os.makedirs(self.save_annotated_image_dir, exist_ok=True)
os.makedirs(self.save_annotated_coord_dir, exist_ok=True)
os.makedirs(self.save_annotated_density_dir, exist_ok=True)
def run(self) -> None:
"""
Select the data type of the image or video from the extension
of the input data and execute the annotation.
:return: None
"""
for file_path in tqdm(self.input_file_path_list, desc="Annotation File Number"):
# initialization
self.frame_list = []
self.features = None
# load file
self.input_file_path = file_path
data_type = get_input_data_type(self.input_file_path)
logger.info(f"Annotation Data Type: {data_type}")
if data_type == "image":
self.image_annotation()
elif data_type == "video":
self.video_annotation()
else:
logger.error("Data type is invalid. Please check input file.")
sys.exit(1)
# end processing
cv2.destroyAllWindows()
def annotator_initialization(self) -> None:
"""
Initialize coordinate matrix that store the clicked position.
:return: None
"""
self.width = self.frame.shape[1]
self.height = self.frame.shape[0]
self.coordinate_matrix = np.zeros((self.width, self.height, 2), dtype="int64")
for i in range(self.width):
for j in range(self.height):
self.coordinate_matrix[i][j] = [i, j]
def image_annotation(self) -> None:
"""
A function to perform annotation on a single image.
:return: None
"""
# load input image
self.frame = load_image(self.input_file_path)
self.frame_list.append(self.frame.copy())
# frame number get from input file name
self.frame_num = os.path.splitext(os.path.basename(self.input_file_path))[0]
# initialize by frame information
self.annotator_initialization()
while True:
# display frame
cv2.imshow("click annotation points", self.frame)
# each key operation
wait_interval = self.mouse_event_interval
key = cv2.waitKey(wait_interval) & 0xFF
if key == D_KEY:
# delete the previous feature point
self.delete_point()
elif key == S_KEY:
# save current annotated data and go to next frame
self.save_annotated_data()
wait_interval = self.mouse_event_interval
break
def video_annotation(self) -> None:
"""
A function to perform annotation on movie.
This function allow to annotate multiple images cut out
from the video data at any time.
:return: None
"""
# load input video data
self.video = load_video(self.input_file_path)
# load first frame and initialize by frame information
ret, self.frame = self.video.read()
self.annotator_initialization()
# read frames at regular intervals and annotate them.
wait_interval = self.mouse_event_interval
while ret:
if wait_interval != 0:
self.features = None
self.frame_num += 1
# display current frame
cv2.imshow("click annotation points", self.frame)
# load next frame and status
ret, self.frame = self.video.read()
# each key operation
key = cv2.waitKey(wait_interval) & 0xFF
if key == Q_KEY:
# finish the annotation work
break
elif key == P_KEY:
# pause current frame and start annotation
wait_interval = 0 # wait until the end of annotation
self.frame_list.append(self.frame.copy())
# save raw image
cv2.imwrite(
f"{self.save_raw_image_dir}/{self.frame_num}{self.save_image_extension}",
self.frame,
)
elif key == D_KEY:
# delete the previous feature point
self.delete_point()
elif key == S_KEY:
# save current annotated data and go to next frame
self.save_annotated_data()
wait_interval = self.mouse_event_interval
# end processing
self.video.release()
def mouse_event(self, event: int, x: int, y: int, flags: int, param: dict) -> None:
"""
Select annotated point by left click of mouse
:param event: the type of mouse event
:param x: x coordinate of the clicked position
:param y: y coordinate of the clicked position
:param flags: the type of button or key that was pressed during the mouse event
:param param: the value of param set in the third argument of setMouseCallback
:return: None
"""
# other than left click
if event != cv2.EVENT_LBUTTONDOWN:
return
# draw and add feature point
cv2.circle(self.frame, (x, y), 4, (0, 0, 255), -1, 8, 0)
self.add_point(x, y)
cv2.imshow("click annotation points", self.frame)
def add_point(self, x, y) -> None:
"""
Add new feature point on stored list
:param x: x coordinate of the clicked position
:param y: y coordinate of the clicked position
:return: None
"""
if self.features is None:
self.features =
|
np.array([[x, y]], np.uint16)
|
numpy.array
|
import cv2
import numpy as np
def cnt_area(cnt):
area = cv2.contourArea(cnt)
return area
def my_threshold(img, thresh, min):
'''
@param img: 印章所在的图片区域
@param thresh: 二值化的阈值
@param min: 低于阈值的点被设置的颜色
@return: 去除印章之后的二值图片
'''
ret, th = cv2.threshold(img, thresh, min, cv2.THRESH_BINARY)
th1 = min - th
th1 = 255 - th1
return th1
def find_seal(image, low_range, high_range, thresh):
'''
@param image: 图片
@param low_range: 色域的下界
@param high_range: 色域的上界
@param thresh: 用于删除印章的阈值
@return: 印章所在区域和该区域删除印章之后的图片
'''
hsv_image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
th = cv2.inRange(hsv_image, low_range, high_range)
index1 = th == 255
img = np.zeros(image.shape, np.uint8)
img[:, :] = (255, 255, 255)
img[index1] = image[index1] # (0,0,255)
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
kernel = np.ones((5, 5), np.uint8)
gray = cv2.dilate(~gray, kernel, iterations=2)
contours, hierarchy = cv2.findContours(gray, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
contours.sort(key=cnt_area, reverse=True)
cnt = contours[0]
x, y, w, h = cv2.boundingRect(cnt)
seal_image = image[y:y + h, x:x + w]
b, g, r = cv2.split(seal_image)
g_b_img = my_threshold(r, thresh, 220)
r_img = my_threshold(r, thresh, 100)
seal_image[:, :, 0] = g_b_img
seal_image[:, :, 1] = g_b_img
seal_image[:, :, 2] = r_img
return [x,y,w,h], seal_image
def remove_seal(image):
'''
@param image: 原图
@return: 抹去印章的图片
'''
# image=cv2.imread(r"file.jpg")
# 上面那个印章的色域
low_range = np.array([0, 80, 150])
high_range =
|
np.array([50, 255, 205])
|
numpy.array
|
# -*- coding: utf-8 -*-
# Copyright 2016-2017 <NAME>
# see LICENSE
"""
documentation
"""
from __future__ import division, unicode_literals, print_function
import numpy as np
from functools import reduce
from molyso.generic.signal import normalize, horizontal_mean, vertical_mean, each_image_slice, \
hamming_smooth, relative_maxima
try:
from .fast_argrelextrema import relative_extrema
def relative_maxima(data, order=1):
return relative_extrema(data, order=order, cmp=1)
def relative_minima(data, order=1):
return relative_extrema(data, order=order, cmp=-1)
except ImportError:
relative_extrema = None
pass
def slicewise_product_profile(image, steps, direction='horizontal', normalize_profiles=False, shift=0.0):
mean_func = horizontal_mean if direction == 'horizontal' else vertical_mean
def dummy(a):
return a
n_func = normalize if normalize_profiles else dummy
# the next call is like 90% of all time spent on box detection ;)
slices = [
n_func(mean_func(image_slice)) + shift
for n, step, image_slice in each_image_slice(image, steps, direction=direction)
]
product_profile = reduce(lambda a, b: n_func(a*b), slices)
product_profile = n_func(product_profile)
return product_profile
def find_box(image, throw=False, subsample=1, debug=False):
assert (type(subsample) == int)
if subsample != 1:
image = image[::subsample, ::subsample]
try:
product_profile = slicewise_product_profile(image, 20)
product_profile =
|
np.gradient(product_profile)
|
numpy.gradient
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import os
import argparse
import numpy as np
import random
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from torch.autograd import Variable, grad
from tensorboardX import SummaryWriter
from QualityMetrics import Indicators
from MakeHist import MakeHist
from matplotlib import pyplot as plt
import importlib
##
## Command line parameters ##
##
parser = argparse.ArgumentParser(description='Train the selected model')
parser.add_argument('--run',type=int,help='Number of the run')
parser.add_argument('--model_ver',type=str,help='Version of the model. Must be the name of the module containing the desired model.')
parser.add_argument('--eta',type=float,default=0.12,help='Weight of the sum between the losses of the two discriminators (Only relevant for GANConv1Dv1WIa.py model).')
parser.add_argument('--alpha',type=float,default=300,help='Alpha parameter for pre-processing.')
parser.add_argument('--grad_norm_reg',type=bool,default=False,help='If gradient-norm regularization is applied.')
parser.add_argument('--gamma',type=float,default=0.01,help='Rate for gradient-norm regularization.')
parser.add_argument('--n_epochs',type=int,default=140,help='Number of epochs for training.')
parser.add_argument('--batch_size',type=int,default=20,help='Batch size.')
parser.add_argument('--lr_g',type=float,default=0.0001,help='Learning rate for the generator.')
parser.add_argument('--lr_d',type=float,default=0.00001,help='Learning rate for the discriminator.')
parser.add_argument('--n_critic',type=int,default=3,help='Number of discriminator steps per generator step.')
parser.add_argument('--n_parts',type=int,default=5,help='Split the Universe in n_parts')
opt = parser.parse_args()
model_ver = opt.model_ver
run = opt.run
# Directory for saving TensorBoard files, numpy arrays contaning the results of the attacks and the weights of trained models.
saving_dir = './Logs/'+model_ver
# Instantiate Tensorboar SummaryWriter
writer = SummaryWriter(saving_dir+'/Tensorboard/exp'+str(run))
##
## Creating Random list of clients ##
##
# First 'step' elements of the list will be selected as training data
n_parts = opt.n_parts
data_dir = "./DataSets"
dir_list = os.listdir(data_dir)
random.shuffle(dir_list)
step = int(len(dir_list)/n_parts)
# Saving the training set suffle
list_directory = saving_dir+'/npdata/dirlist'
if not os.path.exists(list_directory):
os.makedirs(list_directory)
np.save(list_directory+'/List'+str(run)+'.npy',dir_list)
# Arranging clients into subsets. The first subset will be the training set.
subset_list = []
universe = np.empty(shape=[0,336], dtype='float32')
for i in range(0,len(dir_list),step):
np_aux = np.empty(shape=[0,336], dtype='float32')
if ((len(dir_list)-i)>=step):
for j in range(step):
aux = np.load(data_dir+'/'+dir_list[i+j])
np_aux = np.append(np_aux,aux,axis=0)
universe = np.append(universe,aux,axis=0)
subset_list.append(np_aux)
# Saving alpha and maximum for transformation and inverse
# Maximum taken over the universe
alpha = opt.alpha
train_aux = np.arcsinh(universe*alpha)/alpha
save_max = np.reshape(train_aux,-1).max()
##
## Set-up ##
##
# Checking for cuda
if torch.cuda.is_available():
cuda = True
else:
cuda = False
# Loss Function
loss = nn.BCEWithLogitsLoss() #Note that this lost function integrates the softmax activation function for numerical stability.
# Instantiating generator and discriminator models
module_arch = importlib.__import__(model_ver)
generator = module_arch.GeneratorConv1D()
if model_ver == 'GANConv1Dv1WIa':
discriminator = module_arch.DiscriminatorSignal()
discriminator_I = module_arch.DiscriminatorIndicators()
else:
discriminator = module_arch.DiscriminatorConv1D()
if cuda:
generator.cuda()
discriminator.cuda()
loss.cuda()
if model_ver == 'GANConv1Dv1WIa':
discriminator_I.cuda()
Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
# Defining pre-processing transformation and inverse transformation
# Works with numpy arrays!!!
def transformation(array,alpha,save_max):
array = np.arcsinh(array*alpha)/alpha
array = (array*2.0)/save_max - 1.0
array = array[:,np.newaxis,:]
return array
# Works with pytorch tensors!!!
def inverse_trans(arrtensor,alpha,save_max):
arrtensor = (arrtensor+1.0)*save_max/2.0
return torch.sinh(arrtensor*alpha)/alpha
# Optimizer
optimizer_G = torch.optim.Adam(generator.parameters(),lr=opt.lr_g)
optimizer_D = torch.optim.Adam(discriminator.parameters(),lr=opt.lr_d)
if model_ver =='GANConv1Dv1WIa':
optimizer_D_I = torch.optim.Adam(discriminator_I.parameters(),lr=opt.lr_d)
# Loading training set
training_set = subset_list[0]
class TimeSeriesLCL(Dataset):
def __init__(self, npy_array,alpha,save_max):
self.x_train = npy_array
self.x_train = np.arcsinh(self.x_train*alpha)/alpha
self.x_train = (self.x_train*2.0)/save_max - 1.0
self.x_train = self.x_train[:,np.newaxis,:]
def __len__(self):
return self.x_train.shape[0]
def __getitem__(self, idx):
example = self.x_train[idx,]
return example
x_train = TimeSeriesLCL(training_set,alpha,save_max)
# Some parameters for training
if model_ver == 'GANConv1Dv0':
latent_space_dim = 25
else:
latent_space_dim = 42
eta = opt.eta
gamma = opt.gamma
n_epochs = opt.n_epochs
batch_size = opt.batch_size
steps_generator = opt.n_critic
steps_discriminator = 1
dataloader = DataLoader(x_train,batch_size=batch_size,shuffle=True)
generated_samples = []
real_examples = []
##
## Training ##
##
for epoch in range(n_epochs):
for i, example_batch in enumerate(dataloader):
# Ground truths for the discriminator
valid = Variable(Tensor(example_batch.shape[0], 1).fill_(1.0), requires_grad=False)
fake = Variable(Tensor(example_batch.shape[0], 1).fill_(0.0), requires_grad=False)
# Configuring input
example_batch = example_batch.type(Tensor)
real_examples.append(torch.squeeze(example_batch))
# Generating samples
z = Tensor(np.random.normal(size=[example_batch.shape[0],latent_space_dim]))
generated_sample = generator(z)
generated_samples.append(torch.squeeze(generated_sample.detach()))
if model_ver =='GANConv1Dv1WIa':
# Train generator
if i%steps_generator == 0:
optimizer_G.zero_grad()
g_loss_S = loss(discriminator(generated_sample),valid)
g_loss_I = loss(discriminator_I(generated_sample),valid)
basic_g_loss = (1.0-eta)*g_loss_S + eta*g_loss_I
basic_g_loss.backward()
optimizer_G.step()
# Train Discriminator
if i%steps_discriminator == 0:
optimizer_D.zero_grad()
real_loss = loss(discriminator(example_batch),valid)
fake_loss = loss(discriminator(generated_sample.detach()),fake)
if opt.grad_norm_reg:
basic_d_loss = (real_loss + fake_loss)/2.0
d_grad = grad(basic_d_loss,discriminator.parameters(),create_graph=True)
dn2 = torch.sqrt(sum([grd.norm()**2 for grd in d_grad]))
final_d_loss = basic_d_loss - gamma*dn2
else:
final_d_loss = (real_loss + fake_loss)/2.0
final_d_loss.backward()
optimizer_D.step()
optimizer_D_I.zero_grad()
real_loss_I = loss(discriminator_I(example_batch),valid)
fake_loss_I = loss(discriminator_I(generated_sample.detach()),fake)
d_loss_I = (real_loss_I + fake_loss_I)/2.0
d_loss_I.backward()
optimizer_D_I.step()
else:
# Train generator
if i%steps_generator == 0:
optimizer_G.zero_grad()
basic_g_loss = loss(discriminator(generated_sample),valid)
basic_g_loss.backward()
optimizer_G.step()
# Train Discriminator
if i%steps_discriminator == 0:
optimizer_D.zero_grad()
real_loss = loss(discriminator(example_batch),valid)
fake_loss = loss(discriminator(generated_sample.detach()),fake)
if opt.grad_norm_reg:
basic_d_loss = (real_loss + fake_loss)/2.0
d_grad = grad(basic_d_loss,discriminator.parameters(),create_graph=True)
dn2 = torch.sqrt(sum([grd.norm()**2 for grd in d_grad]))
final_d_loss = basic_d_loss - gamma*dn2
else:
final_d_loss = (real_loss + fake_loss)/2.0
final_d_loss.backward()
optimizer_D.step()
print(
"[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]"
% (epoch+1, n_epochs, i+1, len(dataloader), final_d_loss.item(), basic_g_loss.item())
)
# Saving the loss for the Generator and Discriminator
writer.add_scalar('Generator loss', basic_g_loss.item(), 1+i+(epoch*len(dataloader)))
writer.add_scalar('Discriminator loss', final_d_loss.item(),1+i+(epoch*len(dataloader)))
# Plotting artificially generated samples, empirical distributions of the indicators and saving plots to tensorboard.
if (((i+1)*batch_size) % 800) == 0:
generated_samples = torch.cat(generated_samples)
generated_samples = inverse_trans(generated_samples,alpha,save_max)
indicators_gen = Indicators(generated_samples)
indicators_gen = Tensor.cpu(indicators_gen)
indicators_gen = indicators_gen.data.numpy()
real_examples = torch.cat(real_examples)
real_examples = inverse_trans(real_examples,alpha,save_max)
indicators_real = Indicators(real_examples)
indicators_real = Tensor.cpu(indicators_real)
indicators_real = indicators_real.data.numpy()
g_sample = generated_samples[0,:]
g_sample = Tensor.cpu(g_sample)
g_sample = g_sample.data.numpy()
g_sample_fig = plt.figure(0)
plt.plot(g_sample)
plt.title('Generated Sample')
plt.ylabel('Energy (KWh)')
plt.xlabel('Time (half hour)')
writer.add_figure('Generated Sample', g_sample_fig,1+i+(epoch*len(dataloader)))
List_Hist_r, List_Hist_f, List_Hist_x, List_EMD, Avg_Ind_Index = MakeHist(indicators_real,indicators_gen)
mean_Hist = plt.figure(0)
plt.plot(List_Hist_x[0],List_Hist_r[0])
plt.plot(List_Hist_x[0],List_Hist_f[0])
plt.legend(['Real','Fake'])
plt.title('Empirical distribution of the mean.')
plt.xlabel('Mean')
writer.add_scalar('EMD of the mean', List_EMD[0],1+i+(epoch*len(dataloader)))
writer.add_figure('Histogram of the mean', mean_Hist,1+i+(epoch*len(dataloader)))
skewness_Hist = plt.figure(1)
plt.plot(List_Hist_x[1],List_Hist_r[1])
plt.plot(List_Hist_x[1],List_Hist_f[1])
plt.legend(['Real','Fake'])
plt.title('Empirical distribution of the skewness.')
plt.xlabel('Skewness')
writer.add_scalar('EMD of the skewness', List_EMD[1],1+i+(epoch*len(dataloader)))
writer.add_figure('Histogram of the skewness', skewness_Hist,1+i+(epoch*len(dataloader)))
CV_Hist = plt.figure(2)
plt.plot(List_Hist_x[2],List_Hist_r[2])
plt.plot(List_Hist_x[2],List_Hist_f[2])
plt.legend(['Real','Fake'])
plt.title('Empirical distribution of the CV.')
plt.xlabel('Coefficient of variation')
writer.add_scalar('EMD of the CV', List_EMD[2],1+i+(epoch*len(dataloader)))
writer.add_figure('Histogram of the CV', CV_Hist,1+i+(epoch*len(dataloader)))
kurtosis_Hist = plt.figure(3)
plt.plot(List_Hist_x[3],List_Hist_r[3])
plt.plot(List_Hist_x[3],List_Hist_f[3])
plt.legend(['Real','Fake'])
plt.title('Empirical distribution of the kurtosis.')
plt.xlabel('Kurtosis')
writer.add_scalar('EMD of the kurtosis', List_EMD[3],1+i+(epoch*len(dataloader)))
writer.add_figure('Histogram of the kurtosis', kurtosis_Hist,1+i+(epoch*len(dataloader)))
maxmean_Hist = plt.figure(4)
plt.plot(List_Hist_x[4],List_Hist_r[4])
plt.plot(List_Hist_x[4],List_Hist_f[4])
plt.legend(['Real','Fake'])
plt.title('Empirical distribution of the max-mean ratio.')
plt.xlabel('Max-mean ratio')
writer.add_scalar('EMD of the max-mean ratio', List_EMD[4],1+i+(epoch*len(dataloader)))
writer.add_figure('Histogram of the max-mean ratio', maxmean_Hist,1+i+(epoch*len(dataloader)))
writer.add_scalar('Average Indicator Index', Avg_Ind_Index,1+i+(epoch*len(dataloader)))
generated_samples = []
real_examples = []
# Saving the model
mod_directory = saving_dir+'/Trained'
if not os.path.exists(mod_directory):
os.makedirs(mod_directory)
torch.save(generator.state_dict(), mod_directory+'/GEN_run'+str(run)+'.pth')
torch.save(discriminator.state_dict(), mod_directory+'/DIS_run'+str(run)+'.pth')
print('Model Saved')
##
## Gradient Norm Attack ##
##
batch_size = 1 # batch size for the attack
generator.eval()
discriminator.eval()
# The attack itself
norms_per_subset = []
scores_per_subset = []
for i in range(n_parts):
norm = []
scores = []
for j in range(step):
examples = np.load(data_dir+'/'+dir_list[(i*step)+j])
examples = transformation(examples,alpha,save_max)
client_norm = np.empty([0])
client_score = np.empty([0])
for k in range(0,examples.shape[0],batch_size):
# Configuring Input
example_batch = Tensor(examples[k:k+batch_size,:])
# Ground truth for the discriminator
valid = Variable(Tensor(example_batch.size(0), 1).fill_(1.0), requires_grad=False)
fake = Variable(Tensor(example_batch.size(0), 1).fill_(0.0), requires_grad=False)
# Generating fake samples
z = Tensor(np.random.normal(size=[example_batch.size(0),latent_space_dim]))
generated = generator(z)
# Taking the gradient of the discriminator
valid_loss = loss(discriminator(example_batch),valid)
fake_loss = loss(discriminator(generated.detach()),fake)
total_loss = (valid_loss + fake_loss)/2.0
discriminator.zero_grad()
# total_loss.backward(retain_graph=True)
# Saving discriminator score for sample
score = discriminator(example_batch)
score = Tensor.cpu(score)
score = score.data.numpy()
client_score = np.append(client_score, score)
# Calculating the norm
d_grad = grad(total_loss,discriminator.parameters(),create_graph=True)
dn2 = torch.sqrt(sum([grd.norm()**2 for grd in d_grad]))
dn2 = dn2.detach()
dn2 = Tensor.cpu(dn2)
dn2 = dn2.data.numpy()
client_norm = np.append(client_norm,dn2)
# Saving the norm for a client
scores.append(client_score)
norm.append(client_norm)
# Loop through clients
norms_per_subset.append(norm)
scores_per_subset.append(scores)
# Loop through subsets
norms_directory = saving_dir+'/npdata/Norms'
if not os.path.exists(norms_directory):
os.makedirs(norms_directory)
np.save(norms_directory+'/SSNorm'+str(run)+'.npy',norms_per_subset)
scores_directory = saving_dir+'/npdata/Scores'
if not os.path.exists(scores_directory):
os.makedirs(scores_directory)
np.save(scores_directory+'/SSScore'+str(run)+'.npy',scores_per_subset)
##
## Classification ##
##
# Using the Norm
mean_norms_per_client = []
mean_norms_per_subset = []
std_norms_per_client = []
std_norms_per_subset = []
# Going through norms for all samples.
# Saving per client and per subset mean and std.
for i in range(len(norms_per_subset)):
norms_per_client = norms_per_subset[i]
mean_norm_client_for_subset = []
std_norm_client_for_subset = []
all_norms_subset = np.empty([0])
for j in range(step):
client_norms = norms_per_client[j]
all_norms_subset = np.append(all_norms_subset,client_norms)
mean_client_norm = np.mean(client_norms)
std_client_norm = np.std(client_norms)
mean_norm_client_for_subset.append(mean_client_norm)
std_norm_client_for_subset.append(std_client_norm)
mean_norms_per_client.append(mean_norm_client_for_subset)
mean_norms_per_subset.append(np.mean(mean_norm_client_for_subset))
std_norms_per_client.append(std_norm_client_for_subset)
std_norms_per_subset.append(np.std(all_norms_subset))
# Classifying Subset Based
subset_ranking_mean = np.argsort(mean_norms_per_subset)
subset_ranking_std = np.argsort(std_norms_per_subset)
ranksSS_mean_directory = saving_dir+'/npdata/RankMeanPSS/'
if not os.path.exists(ranksSS_mean_directory):
os.makedirs(ranksSS_mean_directory)
np.save(ranksSS_mean_directory+'RankpSubset'+str(run)+'.npy',subset_ranking_mean)
ranksSS_std_directory = saving_dir+'/npdata/RankStdPSS/'
if not os.path.exists(ranksSS_std_directory):
os.makedirs(ranksSS_std_directory)
np.save(ranksSS_std_directory+'RankpSubset'+str(run)+'.npy',subset_ranking_std)
# Classifying Client Based
mean_arb_client_ranking = []
std_arb_client_ranking = []
for j in range(100):
rand_select_norms = []
rand_select_norms_std = []
for i in range(len(mean_norms_per_client)):
selected_client = np.random.choice(step)
norm_of_client = mean_norms_per_client[i][selected_client]
std_of_client = std_norms_per_client[i][selected_client]
rand_select_norms.append(norm_of_client)
rand_select_norms_std.append(std_of_client)
aux =
|
np.argsort(rand_select_norms)
|
numpy.argsort
|
"""prism_grid.py: resqpy PrismGrid and VerticalPrismGrid class module."""
version = '24th November 2021'
import logging
log = logging.getLogger(__name__)
import numpy as np
import resqpy.crs as rqc
import resqpy.grid as grr
import resqpy.olio.intersection as meet
import resqpy.olio.transmission as rqtr
import resqpy.olio.triangulation as tri
import resqpy.olio.uuid as bu
import resqpy.olio.vector_utilities as vec
import resqpy.property as rqp
import resqpy.surface as rqs
import resqpy.weights_and_measures as wam
from resqpy.unstructured._unstructured_grid import UnstructuredGrid
class PrismGrid(UnstructuredGrid):
"""Class for unstructured grids where every cell is a triangular prism.
note:
prism cells are not constrained to have a fixed cross-section, though in practice they often will
"""
def __init__(self,
parent_model,
uuid = None,
find_properties = True,
cache_geometry = False,
title = None,
originator = None,
extra_metadata = {}):
"""Creates a new resqpy PrismGrid object (RESQML UnstructuredGrid with cell shape trisngular prism)
arguments:
parent_model (model.Model object): the model which this grid is part of
uuid (uuid.UUID, optional): if present, the new grid object is populated from the RESQML object
find_properties (boolean, default True): if True and uuid is present, a
grid property collection is instantiated as an attribute, holding properties for which
this grid is the supporting representation
cache_geometry (boolean, default False): if True and uuid is present, all the geometry arrays
are loaded into attributes of the new grid object
title (str, optional): citation title for new grid; ignored if uuid is present
originator (str, optional): name of person creating the grid; defaults to login id;
ignored if uuid is present
extra_metadata (dict, optional): dictionary of extra metadata items to add to the grid;
ignored if uuid is present
returns:
a newly created PrismGrid object
"""
super().__init__(parent_model = parent_model,
uuid = uuid,
find_properties = find_properties,
geometry_required = True,
cache_geometry = cache_geometry,
cell_shape = 'prism',
title = title,
originator = originator,
extra_metadata = extra_metadata)
if self.root is not None:
assert grr.grid_flavour(self.root) in ['PrismGrid', 'VerticalPrismGrid']
self.check_prism()
self.grid_representation = 'PrismGrid' #: flavour of grid; not much used
def check_prism(self):
"""Checks that each cell has 5 faces and each face has 3 or 4 nodes.
note:
currently only performs a cursory check, without checking nodes are shared or that there are exactly two
triangular faces without shared nodes
"""
assert self.cell_shape == 'prism'
self.cache_all_geometry_arrays()
assert self.faces_per_cell_cl is not None and self.nodes_per_face_cl is not None
assert self.faces_per_cell_cl[0] == 5 and np.all(self.faces_per_cell_cl[1:] - self.faces_per_cell_cl[:-1] == 5)
nodes_per_face_count = np.empty(self.face_count)
nodes_per_face_count[0] = self.nodes_per_face_cl[0]
nodes_per_face_count[1:] = self.nodes_per_face_cl[1:] - self.nodes_per_face_cl[:-1]
assert np.all(np.logical_or(nodes_per_face_count == 3, nodes_per_face_count == 4))
# todo: add prism specific methods for centre_point(), volume()
class VerticalPrismGrid(PrismGrid):
"""Class for unstructured grids where every cell is a vertical triangular prism.
notes:
vertical prism cells are constrained to have a fixed triangular horzontal cross-section, though top and base
triangular faces need not be horizontal; edges not involved in the triangular faces must be vertical;
this is not a native RESQML sub-class but is a resqpy concoction to allow optimisation of some methods;
face ordering within a cell is also constrained to be top, base, then the three vertical planar quadrilateral
faces; node ordering within triangular faces is constrained to ensure correspondence of nodes in triangles
within a column
"""
def __init__(self,
parent_model,
uuid = None,
find_properties = True,
cache_geometry = False,
title = None,
originator = None,
extra_metadata = {}):
"""Creates a new resqpy VerticalPrismGrid object.
arguments:
parent_model (model.Model object): the model which this grid is part of
uuid (uuid.UUID, optional): if present, the new grid object is populated from the RESQML object
find_properties (boolean, default True): if True and uuid is present, a
grid property collection is instantiated as an attribute, holding properties for which
this grid is the supporting representation
cache_geometry (boolean, default False): if True and uuid is present, all the geometry arrays
are loaded into attributes of the new grid object
title (str, optional): citation title for new grid; ignored if uuid is present
originator (str, optional): name of person creating the grid; defaults to login id;
ignored if uuid is present
extra_metadata (dict, optional): dictionary of extra metadata items to add to the grid;
ignored if uuid is present
returns:
a newly created VerticalPrismGrid object
"""
self.nk = None #: number of layers when constructed as a layered grid
super().__init__(parent_model = parent_model,
uuid = uuid,
find_properties = find_properties,
cache_geometry = cache_geometry,
title = title,
originator = originator,
extra_metadata = extra_metadata)
if self.root is not None:
assert grr.grid_flavour(self.root) in ['VerticalPrismGrid', 'PrismGrid']
self.check_prism()
if 'layer count' in self.extra_metadata:
self.nk = int(self.extra_metadata['layer count'])
self.grid_representation = 'VerticalPrismGrid' #: flavour of grid; not much used
@classmethod
def from_surfaces(cls,
parent_model,
surfaces,
column_points = None,
column_triangles = None,
title = None,
originator = None,
extra_metadata = {},
set_handedness = False):
"""Create a layered vertical prism grid from an ordered list of untorn surfaces.
arguments:
parent_model (model.Model object): the model which this grid is part of
surfaces (list of surface.Surface): list of two or more untorn surfaces ordered from
shallowest to deepest; see notes
column_points (2D numpy float array, optional): if present, the xy points to use for
the grid's triangulation; see notes
column_triangles (numpy int array of shape (M, 3), optional): if present, indices into the
first dimension of column_points giving the xy triangulation to use for the grid; see notes
title (str, optional): citation title for the new grid
originator (str, optional): name of person creating the grid; defaults to login id
extra_metadata (dict, optional): dictionary of extra metadata items to add to the grid
returns:
a newly created VerticalPrismGrid object
notes:
this method will not work for torn (faulted) surfaces, nor for surfaces with recumbent folds;
the surfaces may not cross each other, ie. the depth ordering must be consistent over the area;
the triangular pattern of the columns (in the xy plane) can be specified with the column_points
and column_triangles arguments;
if those arguments are None, the first, shallowest, surface is used as a master and determines
the triangular pattern of the columns;
where a gravity vector from a node above does not intersect a surface, the point is inherited
as a copy of the node above and will be NaNs if no surface above has an intersection;
the Surface class has methods for creating a Surface from a PointSet or a Mesh (RESQML
Grid2dRepresentation), or for a horizontal plane;
this class is represented in RESQML as an UnstructuredGridRepresentation – when a resqpy
class is written for ColumnLayerGridRepresentation, a method will be added to that class to
convert from a resqpy VerticalPrismGrid
"""
def find_pair(a, pair):
# for sorted array a of shape (N, 2) returns index in first axis of a pair
def frp(a, pair, b, c):
m = b + ((c - b) // 2)
assert m < len(a), 'pair not found in sorted array'
if np.all(a[m] == pair):
return m
assert c > b, 'pair not found in sorted array'
if a[m, 0] < pair[0]:
return frp(a, pair, m + 1, c)
elif a[m, 0] > pair[0]:
return frp(a, pair, b, m)
elif a[m, 1] < pair[1]:
return frp(a, pair, m + 1, c)
else:
return frp(a, pair, b, m)
return frp(a, pair, 0, len(a))
assert (column_points is None) == (column_triangles is None)
assert len(surfaces) > 1
for s in surfaces:
assert isinstance(s, rqs.Surface)
vpg = cls(parent_model, title = title, originator = originator, extra_metadata = extra_metadata)
assert vpg is not None
top = surfaces[0]
# set and check consistency of crs
vpg.crs_uuid = top.crs_uuid
for s in surfaces[1:]:
if not bu.matching_uuids(vpg.crs_uuid, s.crs_uuid):
# check for equivalence
assert rqc.Crs(parent_model,
uuid = vpg.crs_uuid) == rqc.Crs(parent_model,
uuid = s.crs_uuid), 'mismatching surface crs'
# fetch the data for the top surface, to be used as the master for the triangular pattern
if column_triangles is None:
top_triangles, top_points = top.triangles_and_points()
column_edges = top.distinct_edges() # ordered pairs of node indices
else:
top_triangles = column_triangles
if column_points.shape[1] == 3:
top_points = column_points
else:
top_points = np.zeros((len(column_points), 3))
top_points[:, :column_points.shape[1]] = column_points
column_surf = rqs.Surface(parent_model, crs_uuid = vpg.crs_uuid)
column_surf.set_from_triangles_and_points(column_triangles, column_points)
column_edges = column_surf.distinct_edges()
assert top_triangles.ndim == 2 and top_triangles.shape[1] == 3
assert top_points.ndim == 2 and top_points.shape[1] in [2, 3]
assert len(top_triangles) > 0
p_count = len(top_points)
bad_points = np.zeros(p_count, dtype = bool)
# setup size of arrays for the vertical prism grid
column_count = top_triangles.shape[0]
surface_count = len(surfaces)
layer_count = surface_count - 1
column_edge_count = len(column_edges)
vpg.cell_count = column_count * layer_count
vpg.node_count = p_count * surface_count
vpg.face_count = column_count * surface_count + column_edge_count * layer_count
vpg.nk = layer_count
if vpg.extra_metadata is None:
vpg.extra_metadata = {}
vpg.extra_metadata['layer count'] = vpg.nk
# setup points with copies of points for top surface, z values to be updated later
points = np.zeros((surface_count, p_count, 3))
points[:, :, :] = top_points
# arrange faces with all triangles first, followed by the vertical quadrilaterals
vpg.nodes_per_face_cl = np.zeros(vpg.face_count, dtype = int)
vpg.nodes_per_face_cl[:column_count * surface_count] = \
np.arange(3, 3 * column_count * surface_count + 1, 3, dtype = int)
quad_start = vpg.nodes_per_face_cl[column_count * surface_count - 1] + 4
vpg.nodes_per_face_cl[column_count * surface_count:] = \
np.arange(quad_start, quad_start + 4 * column_edge_count * layer_count, 4)
assert vpg.nodes_per_face_cl[-1] == 3 * column_count * surface_count + 4 * column_edge_count * layer_count
# populate nodes per face for triangular faces
vpg.nodes_per_face = np.zeros(vpg.nodes_per_face_cl[-1], dtype = int)
for surface_index in range(surface_count):
vpg.nodes_per_face[surface_index * 3 * column_count : (surface_index + 1) * 3 * column_count] = \
top_triangles.flatten() + surface_index * p_count
# populate nodes per face for quadrilateral faces
quad_nodes = np.empty((layer_count, column_edge_count, 2, 2), dtype = int)
for layer in range(layer_count):
quad_nodes[layer, :, 0, :] = column_edges + layer * p_count
# reverse order of base pairs to maintain cyclic ordering of nodes per face
quad_nodes[layer, :, 1, 0] = column_edges[:, 1] + (layer + 1) * p_count
quad_nodes[layer, :, 1, 1] = column_edges[:, 0] + (layer + 1) * p_count
vpg.nodes_per_face[3 * surface_count * column_count:] = quad_nodes.flatten()
assert vpg.nodes_per_face[-1] > 0
# set up faces per cell
vpg.faces_per_cell = np.zeros(5 * vpg.cell_count, dtype = int)
vpg.faces_per_cell_cl = np.arange(5, 5 * vpg.cell_count + 1, 5, dtype = int)
assert len(vpg.faces_per_cell_cl) == vpg.cell_count
# set cell top triangle indices
for layer in range(layer_count):
# top triangular faces of cells
vpg.faces_per_cell[5 * layer * column_count : (layer + 1) * 5 * column_count : 5] = \
layer * column_count +
|
np.arange(column_count)
|
numpy.arange
|
import numpy as np
import pytest
from syft import FloatTensor
import syft.controller
# ------ Test settings ------
decimal_accuracy = 4 # tests will verify abs(desired-actual) < 1.5 * 10**(-decimal)
verbosity = False
#
# FloatTensor tests
#
def test_float_abs():
data = np.array([-1., -2., 3., 4., 5., -6.])
expected = np.array([1., 2., 3., 4., 5., 6.])
a = FloatTensor(data)
b = a.abs()
np.testing.assert_almost_equal(b.to_numpy(), expected,
decimal=decimal_accuracy, verbose=verbosity)
# a doesn't change
np.testing.assert_almost_equal(a.to_numpy(), data,
decimal=decimal_accuracy, verbose=verbosity)
def test_float_abs_():
data = np.array([-1., -2., 3., 4., 5., -6.])
expected = np.array([1., 2., 3., 4., 5., 6.])
a = FloatTensor(data)
a.abs_()
# a does change when inlined
np.testing.assert_almost_equal(a.to_numpy(), expected,
decimal=decimal_accuracy, verbose=verbosity)
def test_float_acos():
data =
|
np.array([-0.6366, 0.2718, 0.4469, 1.3122])
|
numpy.array
|
"""
utils.py
Contains some useful functions for creating models
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import os
import pickle
import random
import tensorflow as tf
import tensorflow.contrib.slim as slim
import threading
import concurrent.futures
import init_paths
import data.load_ops as load_ops
from data.load_ops import create_input_placeholders_and_ops, get_filepaths_list
import general_utils
# from lib.savers.aws_saver import AwsSaver
AwsSaver = tf.train.Saver
import optimizers.train_steps as train_steps
import models.architectures as architectures
def get_available_devices():
from tensorflow.python.client import device_lib
gpu_options = tf.GPUOptions(allow_growth=True)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
return device_lib.list_local_devices()
def get_max_steps( num_samples_epoch, cfg , is_training=True):
if cfg['num_epochs']:
max_steps = num_samples_epoch * cfg['num_epochs'] // cfg['batch_size']
else:
max_steps = None
if not is_training:
max_steps = num_samples_epoch // cfg['batch_size']
print( 'number of steps per epoch:',
num_samples_epoch // cfg['batch_size'] )
print( 'max steps:', max_steps )
return max_steps
def load_config( cfg_dir, nopause=False ):
'''
Raises:
FileNotFoundError if 'config.py' doesn't exist in cfg_dir
'''
if not os.path.isfile( os.path.join( cfg_dir, 'config.py' ) ):
raise ImportError( 'config.py not found in {0}'.format( cfg_dir ) )
import sys
sys.path.insert( 0, cfg_dir )
from config import get_cfg
cfg = get_cfg( nopause )
# cleanup
try:
del sys.modules[ 'config' ]
except:
pass
sys.path.remove(cfg_dir)
return cfg
def print_start_info( cfg, max_steps, is_training=False ):
model_type = 'training' if is_training else 'testing'
print("--------------- begin {0} ---------------".format( model_type ))
print('number of epochs', cfg['num_epochs'])
print('batch size', cfg['batch_size'])
print('total number of training steps:', max_steps)
##################
# Model building
##################
def create_init_fn( cfg, model ):
# restore model
if cfg['model_path'] is not None:
print('******* USING SAVED MODEL *******')
checkpoint_path = cfg['model_path']
model['model'].decoder
# Create an initial assignment function.
def InitAssignFn(sess):
print('restoring model...')
sess.run(init_assign_op, init_feed_dict)
print('model restored')
init_fn = InitAssignFn
else:
print('******* TRAINING FROM SCRATCH *******')
init_fn = None
return init_fn
def setup_and_restore_model( sess, inputs, cfg, is_training=False ):
model = setup_model( inputs, cfg, is_training=False )
model[ 'saver_op' ].restore( sess, cfg[ 'model_path' ] )
return model
def setup_input_3m( cfg, is_training=False, use_filename_queue=False ):
'''
Builds input tensors from the config.
'''
inputs = {}
if is_training:
filepaths_list = get_filepaths_list( cfg[ 'train_filenames' ] )
else:
filepaths_list = get_filepaths_list( cfg[ 'val_filenames' ] )
# Generate placeholder input tensors
num_samples_epoch = filepaths_list['total_size']
inputs[ 'unit_size' ] = filepaths_list['unit_size']
filepaths_list = [ os.path.join( '/home/ubuntu/s3/meta', i) for i in filepaths_list['filename_list']]
if use_filename_queue:
filename_queue_dict = load_ops.create_filename_enqueue_op( cfg )
inputs[ 'next_idx_op' ] = filename_queue_dict[ 'dequeue_op' ]
inputs[ 'filename_enqueue_op' ] = filename_queue_dict[ 'enqueue_op' ]
inputs[ 'next_idx_placeholder'] = filename_queue_dict[ 'data_idx_placeholder' ]
inputs[ 'fname_q' ] = filename_queue_dict
placeholders, batches, load_and_enqueue, enqueue_op = create_input_placeholders_and_ops( cfg )
input_batches = list( batches ) # [ inputs, targets, mask, data_idx ]
max_steps = get_max_steps( num_samples_epoch, cfg , is_training=is_training)
inputs[ 'enqueue_op' ] = enqueue_op
inputs[ 'filepaths_list' ] = filepaths_list
inputs[ 'load_and_enqueue' ] = load_and_enqueue
inputs[ 'max_steps' ] = max_steps
inputs[ 'num_samples_epoch' ] = num_samples_epoch
inputs[ 'input_batches' ] = input_batches
inputs[ 'input_batch' ] = input_batches[0]
inputs[ 'target_batch' ] = input_batches[1]
inputs[ 'mask_batch' ] = input_batches[2]
inputs[ 'data_idxs' ] = input_batches[3]
inputs[ 'placeholders' ] = placeholders
inputs[ 'input_placeholder' ] = placeholders[0]
inputs[ 'target_placeholder' ] = placeholders[1]
inputs[ 'mask_placeholder' ] = placeholders[2]
inputs[ 'data_idx_placeholder' ] = placeholders[3]
return inputs
def setup_input( cfg, is_training=False, use_filename_queue=False ):
'''
Builds input tensors from the config.
'''
inputs = {}
if is_training:
filepaths_list = get_filepaths_list( cfg[ 'train_filenames' ] )
else:
filepaths_list = get_filepaths_list( cfg[ 'val_filenames' ] )
# Generate placeholder input tensors
num_samples_epoch = filepaths_list['total_size']
inputs[ 'unit_size' ] = filepaths_list['unit_size']
filepaths_list = [ os.path.join(cfg['root_dir'], cfg['meta_file_dir'], i) for i in filepaths_list['filename_list']]
if use_filename_queue:
filename_queue_dict = load_ops.create_filename_enqueue_op( cfg )
inputs[ 'next_idx_op' ] = filename_queue_dict[ 'dequeue_op' ]
inputs[ 'filename_enqueue_op' ] = filename_queue_dict[ 'enqueue_op' ]
inputs[ 'next_idx_placeholder'] = filename_queue_dict[ 'data_idx_placeholder' ]
inputs[ 'fname_q' ] = filename_queue_dict
placeholders, batches, load_and_enqueue, enqueue_op = create_input_placeholders_and_ops( cfg )
input_batches = list( batches ) # [ inputs, targets, mask, data_idx ]
max_steps = get_max_steps( num_samples_epoch, cfg , is_training=is_training)
inputs[ 'enqueue_op' ] = enqueue_op
inputs[ 'filepaths_list' ] = filepaths_list
inputs[ 'load_and_enqueue' ] = load_and_enqueue
inputs[ 'max_steps' ] = max_steps
inputs[ 'num_samples_epoch' ] = num_samples_epoch
inputs[ 'input_batches' ] = input_batches
inputs[ 'input_batch' ] = input_batches[0]
inputs[ 'target_batch' ] = input_batches[1]
inputs[ 'mask_batch' ] = input_batches[2]
inputs[ 'data_idxs' ] = input_batches[3]
inputs[ 'placeholders' ] = placeholders
inputs[ 'input_placeholder' ] = placeholders[0]
inputs[ 'target_placeholder' ] = placeholders[1]
inputs[ 'mask_placeholder' ] = placeholders[2]
inputs[ 'data_idx_placeholder' ] = placeholders[3]
return inputs
def setup_input_transfer(cfg, is_training=False, use_filename_queue=False):
'''
Builds input tensors from the config.
'''
inputs = {}
if is_training:
filepaths_list = get_filepaths_list( cfg[ 'train_filenames' ] )
fileinfos_list_path = cfg[ 'train_list_of_fileinfos' ]
representation_file = cfg['train_representations_file']
else:
filepaths_list = get_filepaths_list( cfg[ 'val_filenames' ] )
fileinfos_list_path = cfg[ 'val_list_of_fileinfos' ]
representation_file = cfg['val_representations_file']
if 'multiple_input_tasks' in cfg:
num_samples_epoch = filepaths_list['total_size']
else:
with open(representation_file, 'rb') as f:
representations_list = pickle.load(f)
data_used = len(np.load(fileinfos_list_path))
num_samples_epoch = min([data_used, filepaths_list['total_size'], len(representations_list['file_indexes'] ) ])
if 'data_used' in cfg:
num_samples_epoch = min(num_samples_epoch, cfg['data_used'])
inputs[ 'unit_size' ] = filepaths_list['unit_size']
# Generate placeholder input tensors
filepaths_list = [ os.path.join(cfg['root_dir'], cfg['meta_file_dir'], i) for i in filepaths_list['filename_list']]
if use_filename_queue:
filename_queue_dict = load_ops.create_filename_enqueue_op( cfg )
inputs[ 'next_idx_op' ] = filename_queue_dict[ 'dequeue_op' ]
inputs[ 'filename_enqueue_op' ] = filename_queue_dict[ 'enqueue_op' ]
inputs[ 'next_idx_placeholder'] = filename_queue_dict[ 'data_idx_placeholder' ]
inputs[ 'fname_q' ] = filename_queue_dict
placeholders, batches, load_and_enqueue, enqueue_op = create_input_placeholders_and_ops( cfg )
input_batches = list( batches ) # [ inputs, targets, mask, data_idx ]
max_steps = get_max_steps( num_samples_epoch, cfg , is_training=is_training)
inputs[ 'enqueue_op' ] = enqueue_op
inputs[ 'filepaths_list' ] = filepaths_list
inputs[ 'list_of_fileinfos' ] = fileinfos_list_path
inputs[ 'representations_file' ] = representation_file
inputs[ 'load_and_enqueue' ] = load_and_enqueue
inputs[ 'max_steps' ] = max_steps
inputs[ 'num_samples_epoch' ] = num_samples_epoch
inputs[ 'input_batches' ] = input_batches
inputs[ 'input_batch' ] = input_batches[0]
inputs[ 'representation_batch' ] = input_batches[1]
inputs[ 'target_batch' ] = input_batches[2]
inputs[ 'mask_batch' ] = input_batches[3]
inputs[ 'data_idxs' ] = input_batches[4]
inputs[ 'placeholders' ] = placeholders
inputs[ 'input_placeholder' ] = placeholders[0]
inputs[ 'representation_placeholder' ] = placeholders[1]
inputs[ 'target_placeholder' ] = placeholders[2]
inputs[ 'mask_placeholder' ] = placeholders[3]
inputs[ 'data_idx_placeholder' ] = placeholders[4]
return inputs
def setup_input_transfer_imagenet(cfg, is_training=False, use_filename_queue=False):
'''
Builds input tensors from the config.
'''
inputs = {}
if is_training:
filepaths_list = get_filepaths_list( cfg[ 'train_filenames' ] )
fileinfos_list_path = cfg[ 'train_list_of_fileinfos' ]
representation_file = cfg['train_representations_file']
else:
filepaths_list = get_filepaths_list( cfg[ 'val_filenames' ] )
fileinfos_list_path = cfg[ 'val_list_of_fileinfos' ]
representation_file = cfg['val_representations_file']
num_samples_epoch = min(filepaths_list['total_size'], cfg['data_used'])
inputs[ 'unit_size' ] = filepaths_list['unit_size']
# Generate placeholder input tensors
filepaths_list = [ os.path.join(cfg['root_dir'], cfg['meta_file_dir'], i) for i in filepaths_list['filename_list']]
if use_filename_queue:
filename_queue_dict = load_ops.create_filename_enqueue_op( cfg )
inputs[ 'next_idx_op' ] = filename_queue_dict[ 'dequeue_op' ]
inputs[ 'filename_enqueue_op' ] = filename_queue_dict[ 'enqueue_op' ]
inputs[ 'next_idx_placeholder'] = filename_queue_dict[ 'data_idx_placeholder' ]
inputs[ 'fname_q' ] = filename_queue_dict
placeholders, batches, load_and_enqueue, enqueue_op = create_input_placeholders_and_ops( cfg )
input_batches = list( batches ) # [ inputs, targets, mask, data_idx ]
max_steps = get_max_steps( num_samples_epoch, cfg , is_training=is_training)
inputs[ 'enqueue_op' ] = enqueue_op
inputs[ 'filepaths_list' ] = filepaths_list
inputs[ 'list_of_fileinfos' ] = fileinfos_list_path
inputs[ 'representations_file' ] = representation_file
inputs[ 'load_and_enqueue' ] = load_and_enqueue
inputs[ 'max_steps' ] = max_steps
inputs[ 'num_samples_epoch' ] = num_samples_epoch
inputs[ 'input_batches' ] = input_batches
inputs[ 'input_batch' ] = input_batches[0]
inputs[ 'representation_batch' ] = input_batches[1]
inputs[ 'target_batch' ] = input_batches[2]
inputs[ 'mask_batch' ] = input_batches[3]
inputs[ 'data_idxs' ] = input_batches[4]
inputs[ 'placeholders' ] = placeholders
inputs[ 'input_placeholder' ] = placeholders[0]
inputs[ 'representation_placeholder' ] = placeholders[1]
inputs[ 'target_placeholder' ] = placeholders[2]
inputs[ 'mask_placeholder' ] = placeholders[3]
inputs[ 'data_idx_placeholder' ] = placeholders[4]
return inputs
def setup_model( inputs, cfg, is_training=False ):
'''
Sets up the `model` dict, and instantiates a model in 'model',
and then calls model['model'].build
Args:
inputs: A dict, the result of setup_inputs
cfg: A dict from config.py
is_training: Bool, used for batch norm and the like
Returns:
model: A dict with 'model': cfg['model_type']( cfg ), and other
useful attributes like 'global_step'
'''
validate_model( inputs, cfg )
model = {}
model[ 'global_step' ] = slim.get_or_create_global_step()
model[ 'input_batch' ] = tf.identity( inputs[ 'input_batch' ] )
if 'representation_batch' in inputs:
model[ 'representation_batch' ] = tf.identity( inputs[ 'representation_batch' ] )
model[ 'target_batch' ] = tf.identity( inputs[ 'target_batch' ] )
model[ 'mask_batch' ] = tf.identity( inputs[ 'mask_batch' ] )
model[ 'data_idxs' ] = tf.identity( inputs[ 'data_idxs' ] )
# instantiate the model
if cfg[ 'model_type' ] == 'empty':
return model
else:
model[ 'model' ] = cfg[ 'model_type' ]( global_step=model[ 'global_step' ], cfg=cfg )
# build the model
if 'representation_batch' in inputs:
input_imgs = (inputs[ 'input_batch' ], inputs[ 'representation_batch' ])
else:
input_imgs = inputs[ 'input_batch' ]
model[ 'model' ].build_model(
input_imgs=input_imgs,
targets=inputs[ 'target_batch' ],
masks=inputs[ 'mask_batch' ],
is_training=is_training )
if is_training:
model[ 'model' ].build_train_op( global_step=model[ 'global_step' ] )
model[ 'train_op' ] = model[ 'model' ].train_op
model[ 'train_step_fn' ] = model[ 'model' ].get_train_step_fn()
model[ 'train_step_kwargs' ] = train_steps.get_default_train_step_kwargs(
global_step=model[ 'global_step' ],
max_steps=inputs[ 'max_steps' ],
log_every_n_steps=10 )
#model[ 'init_op' ] = model[ 'model' ].init_op
if hasattr( model['model'], 'init_fn' ):
model[ 'init_fn' ] = model['model'].init_fn
else:
model[ 'init_fn' ] = None
max_to_keep = cfg['num_epochs'] * 2
if 'max_ckpts_to_keep' in cfg:
max_to_keep = cfg['max_ckpts_to_keep']
model[ 'saver_op' ] = AwsSaver(max_to_keep=max_to_keep)
return model
def setup_model_chained_transfer( inputs, cfg, is_training=False ):
'''
Sets up the `model` dict, and instantiates a model in 'model',
and then calls model['model'].build
Args:
inputs: A dict, the result of setup_inputs
cfg: A dict from config.py
is_training: Bool, used for batch norm and the like
Returns:
model: A dict with 'model': cfg['model_type']( cfg ), and other
useful attributes like 'global_step'
'''
validate_model( inputs, cfg )
model = {}
model[ 'global_step' ] = 0
model[ 'input_batch' ] = tf.identity( inputs[ 'input_batch' ] )
if 'representation_batch' in inputs:
model[ 'representation_batch' ] = tf.identity( inputs[ 'representation_batch' ] )
model[ 'target_batch' ] = [tf.identity( op ) for op in inputs[ 'target_batch' ]]
model[ 'mask_batch' ] = [tf.identity( op ) for op in inputs[ 'mask_batch' ]]
model[ 'data_idxs' ] = tf.identity( inputs[ 'data_idxs' ] )
# instantiate the model
if cfg[ 'model_type' ] == 'empty':
return model
else:
model[ 'model' ] = cfg[ 'model_type' ]( global_step=model[ 'global_step' ], cfg=cfg )
# build the model
if 'representation_batch' in inputs:
input_imgs = (inputs[ 'input_batch' ], inputs[ 'representation_batch' ])
else:
input_imgs = inputs[ 'input_batch' ]
model[ 'model' ].build_model(
input_imgs=input_imgs,
targets=inputs[ 'target_batch' ],
masks=inputs[ 'mask_batch' ],
is_training=is_training )
model[ 'model' ].build_train_op( global_step=model[ 'global_step' ] )
model[ 'train_op' ] = model[ 'model' ].train_op
model[ 'train_step_fn' ] = model[ 'model' ].get_train_step_fn()
model[ 'train_step_kwargs' ] = train_steps.get_default_train_step_kwargs(
global_step=model[ 'global_step' ],
max_steps=inputs[ 'max_steps' ],
log_every_n_steps=10 )
#model[ 'init_op' ] = model[ 'model' ].init_op
if hasattr( model['model'], 'init_fn' ):
model[ 'init_fn' ] = model['model'].init_fn
else:
model[ 'init_fn' ] = None
model[ 'saver_op' ] = AwsSaver(max_to_keep=cfg['num_epochs'])
return model
def validate_model( inputs, cfg ):
general_utils.validate_config( cfg )
################
# Data loading
################
# def get_data_prefetch_threads_init_fn( inputs, cfg, is_training=False, use_filename_queue=False ):
# '''
# Builds a function which, when called with (sess, supervisor), will
# spin up a bunch of threads (exact # specified in cfg) that preload data.
# That function returns all the threading.Thread's in a list.
# '''
# if use_filename_queue:
# def data_prefetch_threads_init_fn( sess, supervisor ):
# if 'num_input' in cfg and cfg['num_input'] and 'single_filename_to_multiple' not in cfg:
# filename_load_function = load_ops.load_from_filename_queue_multiple
# queue_filename = load_ops.enqueue_filenames_multiple
# else:
# filename_load_function = load_ops.load_from_filename_queue
# queue_filename = load_ops.enqueue_filenames
# print(filename_load_function)
# print(queue_filename)
# threads = [
# threading.Thread( # add the data enqueueing threads
# target=filename_load_function,
# args=(sess, supervisor),
# kwargs={ 'input_filepaths': inputs[ 'filepaths_list' ],
# 'input_placeholder': inputs[ 'input_placeholder' ],
# 'target_placeholder': inputs[ 'target_placeholder' ],
# 'mask_placeholder': inputs[ 'mask_placeholder' ],
# 'data_idx_placeholder': inputs[ 'data_idx_placeholder' ],
# 'rs_dim': cfg['input_dim'],
# 'enqueue_op': inputs[ 'enqueue_op' ],
# 'data_idx_dequeue_op': inputs[ 'next_idx_op' ],
# 'is_training': is_training,
# 'cfg': cfg } )
# for i in xrange( cfg['num_read_threads'] ) ]
# threads.append(
# threading.Thread( # the master thread to enqueue filenames
# target=queue_filename,
# args=(sess, supervisor),
# kwargs={
# 'input_filepaths': inputs[ 'filepaths_list' ],
# 'data_idx_placeholder': inputs[ 'next_idx_placeholder' ],
# 'enqueue_op': inputs[ 'filename_enqueue_op' ],
# 'is_training': is_training,
# 'cfg': cfg } ) )
# for t in threads: t.start()
# return threads
# else:
# def data_prefetch_threads_init_fn( sess, supervisor ):
# threads = [
# threading.Thread(
# target=inputs[ 'load_and_enqueue' ],
# args=(sess, supervisor),
# kwargs={ 'input_filepaths': inputs[ 'filepaths_list' ],
# 'input_placeholder': inputs[ 'input_placeholder' ],
# 'target_placeholder': inputs[ 'target_placeholder' ],
# 'mask_placeholder': inputs[ 'mask_placeholder' ],
# 'data_idx_placeholder': inputs[ 'data_idx_placeholder' ],
# 'rs_dim': cfg['input_dim'],
# 'enqueue_op': inputs[ 'enqueue_op' ],
# 'is_training': is_training,
# 'cfg': cfg } )
# for i in xrange( cfg['num_read_threads'] ) ]
# for t in threads: t.start()
# return threads
# return data_prefetch_threads_init_fn
################
def get_data_prefetch_threads_init_fn( inputs, cfg, is_training=False, use_filename_queue=False ):
'''
Builds a function which, when called with (sess, supervisor), will
spin up a bunch of threads (exact # specified in cfg) that preload data.
That function returns all the threading.Thread's in a list.
'''
if use_filename_queue:
def data_prefetch_threads_init_fn( sess, supervisor ):
if 'num_input' in cfg and cfg['num_input'] and 'single_filename_to_multiple' not in cfg:
filename_load_function = load_ops.load_from_filename_queue_multiple
queue_filename = load_ops.enqueue_filenames_multiple
else:
filename_load_function = load_ops.load_from_filename_queue
queue_filename = load_ops.enqueue_filenames
print(filename_load_function)
print(queue_filename)
threads = [
threading.Thread( # add the data enqueueing threads
target=filename_load_function,
args=(sess, supervisor),
kwargs={ 'input_filepaths': inputs[ 'filepaths_list' ],
'input_placeholder': inputs[ 'input_placeholder' ],
'target_placeholder': inputs[ 'target_placeholder' ],
'mask_placeholder': inputs[ 'mask_placeholder' ],
'data_idx_placeholder': inputs[ 'data_idx_placeholder' ],
'rs_dim': cfg['input_dim'],
'enqueue_op': inputs[ 'enqueue_op' ],
'data_idx_dequeue_op': inputs[ 'next_idx_op' ],
'is_training': is_training,
'cfg': cfg } )
for i in range( cfg['num_read_threads'] ) ]
threads.append(
threading.Thread( # the master thread to enqueue filenames
target=queue_filename,
args=(sess, supervisor),
kwargs={
'input_filepaths': inputs[ 'filepaths_list' ],
'data_idx_placeholder': inputs[ 'next_idx_placeholder' ],
'enqueue_op': inputs[ 'filename_enqueue_op' ],
'is_training': is_training,
'cfg': cfg } ) )
for t in threads: t.start()
return threads
else:
def data_prefetch_threads_init_fn( sess, supervisor ):
from functools import partial
kwargs={ 'sess': sess,
'supervisor': supervisor,
'input_filepaths': inputs[ 'filepaths_list' ],
'step': cfg['num_read_threads'],
'unit_size' : inputs['unit_size'],
'num_samples_epoch': inputs[ 'num_samples_epoch' ],
'input_placeholder': inputs[ 'input_placeholder' ],
'target_placeholder': inputs[ 'target_placeholder' ],
'mask_placeholder': inputs[ 'mask_placeholder' ],
'data_idx_placeholder': inputs[ 'data_idx_placeholder' ],
'rs_dim': cfg['input_dim'],
'enqueue_op': inputs[ 'enqueue_op' ],
'is_training': is_training,
'cfg': cfg }
mapfunc = partial(inputs['load_and_enqueue'], **kwargs )
with concurrent.futures.ThreadPoolExecutor(max_workers=cfg['num_read_threads'] + 2) as executor:
result = executor.map(mapfunc, range(cfg['num_read_threads']))
# pool = GreenPool(size=cfg['num_read_threads'])
# threads = []
# for i in range( cfg['num_read_threads'] ):
# kwargs={ 'input_filepaths': inputs[ 'filepaths_list' ],
# 'seed': i,
# 'step': cfg['num_read_threads'],
# 'unit_size' : inputs['unit_size'],
# 'num_samples_epoch': inputs[ 'num_samples_epoch' ],
# 'input_placeholder': inputs[ 'input_placeholder' ],
# 'target_placeholder': inputs[ 'target_placeholder' ],
# 'mask_placeholder': inputs[ 'mask_placeholder' ],
# 'data_idx_placeholder': inputs[ 'data_idx_placeholder' ],
# 'rs_dim': cfg['input_dim'],
# 'enqueue_op': inputs[ 'enqueue_op' ],
# 'is_training': is_training,
# 'cfg': cfg }
# args = (sess, supervisor)
# threads.append(pool.spawn(inputs[ 'load_and_enqueue' ], *args, **kwargs))
# pool.waitall()
# threads = [
# threading.Thread(
# target=inputs[ 'load_and_enqueue' ],
# args=(sess, supervisor),
# kwargs={ 'input_filepaths': inputs[ 'filepaths_list' ],
# 'seed': i,
# 'step': cfg['num_read_threads'],
# 'unit_size' : inputs['unit_size'],
# 'num_samples_epoch': inputs[ 'num_samples_epoch' ],
# 'input_placeholder': inputs[ 'input_placeholder' ],
# 'target_placeholder': inputs[ 'target_placeholder' ],
# 'mask_placeholder': inputs[ 'mask_placeholder' ],
# 'data_idx_placeholder': inputs[ 'data_idx_placeholder' ],
# 'rs_dim': cfg['input_dim'],
# 'enqueue_op': inputs[ 'enqueue_op' ],
# 'is_training': is_training,
# 'cfg': cfg } )
# for i in range( cfg['num_read_threads'] ) ]
# for t in threads: t.start()
return result
return data_prefetch_threads_init_fn
def end_data_loading_and_sess( training_runners ):
""" Run after request_training_end """
#training_runners[ 'coord' ].join( training_runners[ 'threads' ] )
print('joined threads and done training! :)')
training_runners[ 'sess' ].close()
def request_data_loading_end( training_runners ):
""" Run after start_data_prefetch_threads """
print('Requesting coordinator to stop.')
training_runners[ 'coord' ].request_stop()
##################################
# Transferring Input workers #
def load_filepaths_list( filenames_filepath ):
"""
Reads in the list of filepaths from the given fname
Args:
fname: A path to a file containing a list of filepaths.
May be pickled or json.
Returns:
A List of filenames
"""
ext = os.path.splitext( filenames_filepath )[1]
if ext == '.json':
with open( filenames_filepath, 'r' ) as fp:
data_sources = json.load( fp )
elif ext == '.npy':
with open( filenames_filepath, 'rb' ) as fp:
data_sources = np.load( fp )
else:
with open( filenames_filepath, 'rb' ) as fp:
data_sources = pickle.load( fp )
return data_sources
def get_data_prefetch_threads_init_fn_transfer( inputs, cfg, is_training=False, use_filename_queue=False ):
'''
Builds a function which, when called with (sess, supervisor), will
spin up a bunch of threads (exact # specified in cfg) that preload data.
That function returns all the threading.Thread's in a list.
'''
##############################################################
# For kwargs, add representation file as additional input #
def data_prefetch_threads_init_fn( sess, supervisor ):
from functools import partial
import pickle
if 'multiple_input_tasks' in cfg:
with open(inputs['list_of_fileinfos'], 'rb') as f:
print(inputs['list_of_fileinfos'])
fileinfos = np.load(inputs['list_of_fileinfos'])
fileinfo_to_fileinfo_idx = {str(fi.decode('utf-8')): i for i, fi in enumerate(fileinfos)}
fileinfo_to_task_to_representation = {}
for rep_file_path in inputs['representations_file']:
# cfg['multiple_input_tasks']:
print(rep_file_path)
# with open(inputs['representations_file'].format(task=task), 'rb') as f:
with open(rep_file_path, 'rb') as f:
representations = pickle.load(f)
representation_idx_to_fileinfo_idx = representations['file_indexes']
representations = representations['representations']
fileinfo_to_task_to_representation[rep_file_path] = {}
for representation_idx, fileinfo_idx in enumerate(representation_idx_to_fileinfo_idx):
if fileinfo_idx < len(fileinfos):
fileinfo_to_task_to_representation[rep_file_path][fileinfos[fileinfo_idx].decode('utf-8')] = representations[representation_idx]
print("Prepared mapping from fileinfo to representation.")
# Define mapping fn
print("----------------------------------")
print('filenames len:', len(fileinfos))
print("----------------------------------")
def fileinfo_to_representation_fn(fileinfo):
assert type(fileinfo) is str
#if task == 'RANDOM':
# task = random.choice(inputs['representations_file'])
# assert fileinfo in fileinfo_to_task_to_representation[task]
list_of_rep = []
for t in inputs['representations_file']:
rep = fileinfo_to_task_to_representation[t][fileinfo]
if 16 not in rep.shape:
rep = np.reshape(rep, (16,16,-1))
list_of_rep.append(rep)
return
|
np.concatenate(list_of_rep, axis=-1)
|
numpy.concatenate
|
import numpy as np
from scipy.stats import chi2, multivariate_normal, norm
from scipy.special import erfinv
import pdb, traceback, sys
import time
class GlobalModelStruct:
def __init__(self, d, lambda_, sigma):
self.XTX = np.zeros([d, d])
self.Xy = np.zeros(d)
self.Covariance = np.linalg.inv(lambda_ * np.identity(n=d) + self.XTX / sigma**2)
self.Mean = np.dot(self.Covariance, self.Xy / sigma**2)
self.numDataPoint = 0
# self.v = sigma * np.sqrt(9*d*np.log(self.numDataPoint / delta))
self.d = d
self.lambda_ = lambda_
self.sigma = sigma
def removeUserSS(self, ADelta, bDelta, numDataPointDelta):
self.XTX -= ADelta
self.Xy -= bDelta
self.Covariance = np.linalg.inv(self.lambda_ * np.identity(n=self.d) + self.XTX / self.sigma**2)
self.Mean = np.dot(self.Covariance, self.Xy / self.sigma**2)
self.numDataPoint -= numDataPointDelta
# self.v = self.sigma * np.sqrt(9 * self.d * np.log(self.numDataPoint / self.delta))
def addUserSS(self, ADelta, bDelta, numDataPointDelta):
self.XTX += ADelta
self.Xy += bDelta
self.Covariance = np.linalg.inv(self.lambda_ * np.identity(n=self.d) + self.XTX / self.sigma**2)
self.Mean = np.dot(self.Covariance, self.Xy / self.sigma**2)
self.numDataPoint += numDataPointDelta
# self.v = self.sigma * np.sqrt(9 * self.d * np.log(self.numDataPoint / self.delta))
def log_predictive_datapoint(self, x, y): # compute the log prob density of data point x
size = len(x)
assert size == self.Mean.shape[0]
assert (size, size) == self.Covariance.shape
var = self.sigma**2 + np.matmul(np.matmul(x.transpose(), self.Covariance), x)
mean = x.transpose().dot(self.Mean)
norm_const = -0.5 * np.log((2 * np.pi * var))
res = -(float(y) - float(mean)) ** 2 / (2 * float(var))
return norm_const + res
def log_predictive(self, data_x, data_y):
num_data = len(data_x)
assert len(data_y) == num_data
assert data_x.shape == (num_data, self.d)
assert data_y.shape == (num_data,)
sum_logpdf = 0
for data_i in range(num_data):
sum_logpdf += self.log_predictive_datapoint(data_x[data_i], data_y[data_i])
return sum_logpdf
def sampleTheta(self, v=None):
# if v is None:
# v = self.v
theta = np.random.multivariate_normal(mean=self.Mean, cov=v**2 *self.Covariance)
return theta
class UserModelStruct:
def __init__(self, userID, dimension, alpha, lambda_, NoiseScale, delta_1, delta_2, createTime, eta, change_detection_alpha=0.01, memory_size=50):
self.userID = userID
self.d = dimension
self.alpha = alpha # use constant alpha, instead of the one defined in LinUCB
self.lambda_ = lambda_
self.delta_1 = delta_1
self.change_detection_alpha = change_detection_alpha
# LinUCB statistics
self.A = np.zeros([self.d, self.d])
self.b = np.zeros(self.d)
self.AInv = np.linalg.inv(self.A+lambda_ * np.identity(n=self.d))
self.NoiseScale = NoiseScale
self.update_num = 0 # number of times this user has been updated
self.alpha_t = self.NoiseScale * np.sqrt(
self.d * np.log(1 + self.update_num / (self.d * self.lambda_)) + 2 * np.log(1 / self.delta_1)) + np.sqrt(
self.lambda_)
self.memory_size = memory_size
# history data
self.X = np.zeros((0, self.d))
self.y = np.zeros((0,))
self.UserTheta = np.zeros(self.d)
self.UserThetaNoReg = np.zeros(self.d)
self.rank = 0
self.createTime = createTime # global time when model is created
self.time = 0 # number of times this user has been served
# for dLinUCB's change detector
self.delta_2 = delta_2
self.eta = eta # upper bound of gaussian noise
self.detectedChangePoints = [0]
self.failList = []
def resetLocalUserModel(self, createTime):
self.outDated = False
self.A = np.zeros([self.d, self.d])
self.b = np.zeros(self.d)
self.AInv = np.linalg.inv(self.A+self.lambda_ * np.identity(n=self.d))
self.UserTheta = np.zeros(self.d)
self.UserThetaNoReg = np.zeros(self.d)
self.X = np.zeros((0, self.d))
self.y = np.zeros((0,))
self.rank = 0
self.update_num = 0 # number of times this user has been updated
self.alpha_t = self.NoiseScale * np.sqrt(
self.d * np.log(1 + self.update_num / (self.d * self.lambda_)) + 2 * np.log(1 / self.delta_1)) + np.sqrt(
self.lambda_)
self.createTime = createTime # global time when model is created
self.failList = []
def updateLocalUserModel(self, articlePicked_FeatureVector, click):
# update LinUCB statistics
self.A += np.outer(articlePicked_FeatureVector, articlePicked_FeatureVector)
self.b += articlePicked_FeatureVector * click
self.AInv = np.linalg.inv(self.A + self.lambda_ * np.identity(n=self.d))
self.UserTheta = np.dot(self.AInv, self.b)
self.UserThetaNoReg = np.dot(np.linalg.pinv(self.A), self.b)
assert self.d == articlePicked_FeatureVector.shape[0]
self.update_num += 1.0
# update observation history
self.X = np.concatenate((self.X, articlePicked_FeatureVector.reshape(1, self.d)), axis=0)
self.y = np.concatenate((self.y, np.array([click])),axis=0)
if self.memory_size is not None:
if len(self.X) > self.memory_size:
self.X = self.X[-self.memory_size:]
self.y = self.y[-self.memory_size:]
self.rank = np.linalg.matrix_rank(self.X)
self.alpha_t = self.NoiseScale * np.sqrt(
self.d * np.log(1 + self.update_num / (self.d * self.lambda_)) + 2 * np.log(1 / self.delta_1)) + np.sqrt(
self.lambda_)
def getCB(self, x):
var = np.sqrt(np.dot(np.dot(x, self.AInv), x))
if self.alpha != -1:
return self.alpha * var
else:
return self.alpha_t * var
def getInstantaneousBadness(self, articlePicked, click):
# compute badness on (articlePicked, click)
mean = np.dot(self.UserTheta, articlePicked.contextFeatureVector[:self.d])
rewardEstimationError = np.abs(mean - click)
if rewardEstimationError <= self.getCB(articlePicked.contextFeatureVector[:self.d]) + self.eta:
e = 0
else:
e = 1
# Update failList
self.failList.append(e)
return e
# # compute badness on (articlePicked, click)
# if method == "ConfidenceBound": # This is the test statistic used in dLinUCB
# mean = np.dot(self.UserTheta, articlePicked.contextFeatureVector[:self.d])
# rewardEstimationError = np.abs(mean - click)
# if rewardEstimationError <= self.getCB(articlePicked.contextFeatureVector[:self.d]) + self.eta:
# e = 0
# else:
# e = 1
# elif method == "ChiSquare":
# if self.rank < self.d:
# e = 0
# else:
# x = articlePicked.contextFeatureVector[:self.d]
# if self.rank < self.d:
# e = 0
# else:
# mean = np.dot(self.UserThetaNoReg, x)
# rewardEstimationError = (mean - click)**2
# rewardEstimationErrorSTD = self.NoiseScale**2 * (1 + np.dot(np.dot(x, np.linalg.pinv(self.A-self.lambda_ * np.identity(n=self.d))), x))
# df1 = 1
#
# chiSquareStatistic = rewardEstimationError / rewardEstimationErrorSTD
# p_value = chi2.sf(x=chiSquareStatistic, df=df1)
# if p_value <= self.change_detection_alpha: # upper bound probability of false alarm
# e = 1
# else:
# e = 0
# # Update failList
# self.failList.append(e)
# return e
def detectChangeBasedOnBadness(self, ObservationInterval):
if len(self.failList) < ObservationInterval:
ObservationNum = float(len(self.failList))
badness = sum(self.failList) / ObservationNum
else:
ObservationNum = float(ObservationInterval)
badness = sum(self.failList[-ObservationInterval:]) / ObservationNum
badness_CB = np.sqrt(np.log(1.0 / self.delta_2) / (2.0 * ObservationNum))
# test badness against threshold
if badness > self.delta_1 + badness_CB:
changeFlag = 1
else:
changeFlag = 0
return changeFlag
class CoDBand:
def __init__(self, v, d, lambda_, NoiseScale, alpha_prior, tau_cd, memory_size, alpha=-1, delta_1 = 1e-2 , delta_2 = 1e-1, eta=0.3, disable_change_detector=False, true_alpha_0 = None):
self.globalModels = []
self.globalModelPeriodCounter = [] # num of stationary periods associated with each global model
self.totalPeriodCounter = 0 # total num of stationary periods
self.userID2globalModelIndex = {}
self.userModels = {} # userID : UserModelStruct
self.dimension = d
self.lambda_ = lambda_
self.NoiseScale = NoiseScale
self.v = v
## params for DP mixture
self.alpha_prior = alpha_prior
self.true_alpha_0 = true_alpha_0
if self.true_alpha_0 is not None:
self.alpha_0 = self.true_alpha_0
else:
self.alpha_0 = 1
# np.random.gamma(self.alpha_prior['a'],self.alpha_prior['b'])
self.memory_size = memory_size
## Params for change detector
self.disable_change_detector = disable_change_detector # disable change detector
self.alpha = alpha # used in CB
self.tau_cd = tau_cd
self.delta_1 = delta_1
self.delta_2 = delta_2
self.eta = eta # only used if we detect change using dLinUCB's test statistic
self.global_time = 0
self.CanEstimateUserPreference = True
self.CanEstimateUserCluster = True
def decide(self, pool_articles, userID):
# print("==== arm selection ====")
if userID not in self.userModels:
# initialize user model struct for the new user
self.userModels[userID] = UserModelStruct(userID=userID, dimension=self.dimension, alpha=self.alpha, lambda_=self.lambda_, NoiseScale=self.NoiseScale,
delta_1=self.delta_1, delta_2=self.delta_2, createTime=self.global_time, memory_size=self.memory_size,
eta=self.eta)
if self.userModels[userID].update_num == 0:
# sample a model index for the new user or the old user that has been detected to have changed
# either way they represent the beginning of a new stationary period
self.sample_z(userID)
self.cluster = []
for k, v in self.userID2globalModelIndex.items():
if v == self.userID2globalModelIndex[userID]:
self.cluster.append(self.userModels[k])
maxPTA = float('-inf')
articlePicked = None
thetaTilde = self.globalModels[self.userID2globalModelIndex[userID]].sampleTheta(self.v)
for x in pool_articles:
x_pta = np.dot(thetaTilde, x.contextFeatureVector)
if maxPTA < x_pta:
articlePicked = x
maxPTA = x_pta
return articlePicked
def updateParameters(self, articlePicked, click, userID):
self.global_time += 1
self.userModels[userID].time += 1
# print("==== update param ====")
self.userModels[userID].getInstantaneousBadness(articlePicked, click) # "ChiSquare" or "ConfidenceBound"
self.userModels[userID].updateLocalUserModel(articlePicked.contextFeatureVector, click)
self.globalModels[self.userID2globalModelIndex[userID]].addUserSS(
np.outer(articlePicked.contextFeatureVector, articlePicked.contextFeatureVector),
articlePicked.contextFeatureVector * click, 1)
# Collapsed Gibbs Sampler for global model index z and alpha_0
self.sample_z(userID) # two options: 1) only sample z for current user; 2) sample z for all users
# print("number of global models {}".format(len(self.globalModels)))
if self.true_alpha_0 is None:
self.sample_alpha_0()
if self.disable_change_detector:
changeFlag = False
else:
changeFlag = self.userModels[userID].detectChangeBasedOnBadness(self.tau_cd)
if changeFlag:
# reset user model
print("change detected!")
self.userModels[userID].resetLocalUserModel(self.global_time)
self.userModels[userID].detectedChangePoints.append(self.userModels[userID].time)
def sample_z(self, userID):
if self.userModels[userID].update_num == 0:
# if new user model
# sample model index according to the popularity/frequency of global models
# this is for new user or user that has just changed
pp = np.log(np.append(self.globalModelPeriodCounter, self.alpha_0))
pp = np.exp(pp - np.max(pp))
pp = pp / np.sum(pp)
z = np.random.choice(len(self.globalModels)+1, size=None, replace=True, p=pp)
if z == len(self.globalModels):
# new model is sampled
new_global_model = GlobalModelStruct(d=self.dimension, lambda_=self.lambda_, sigma=self.NoiseScale)
self.globalModels.append(new_global_model)
self.globalModelPeriodCounter.append(0)
self.userID2globalModelIndex[userID] = z
self.globalModelPeriodCounter[z] += 1
self.totalPeriodCounter += 1
else:
# if not new user model, collapsed Gibbs sampler for model index
temp_z = self.userID2globalModelIndex[userID]
# remove counter and sufficient statistics (ss) of userID's current period
self.globalModels[temp_z].removeUserSS(self.userModels[userID].A, self.userModels[userID].b, self.userModels[userID].update_num)
self.globalModelPeriodCounter[temp_z] -= 1
self.totalPeriodCounter -= 1
if self.globalModelPeriodCounter[temp_z] == 0:
# remove this global model
del self.globalModels[temp_z]
del self.globalModelPeriodCounter[temp_z]
# update the model index in userID2globalModelIndex (decrement by 1)
for k, v in self.userID2globalModelIndex.items():
if v > temp_z:
self.userID2globalModelIndex[k] = v-1
elif v == temp_z:
# assert: no other user has temp_z as its model index
assert k == userID
pp = np.log(np.append(self.globalModelPeriodCounter, self.alpha_0))
for k in range(0, len(self.globalModels)):
pp[k] = pp[k] + self.globalModels[k].log_predictive(self.userModels[userID].X, self.userModels[userID].y)
pp[len(self.globalModels)] += self.log_predictive_by_prior(self.userModels[userID].X, self.userModels[userID].y)
pp = np.exp(pp - np.max(pp))
pp = pp / np.sum(pp)
z = np.random.choice(len(self.globalModels) + 1, size=None, replace=True, p=pp)
if z == len(self.globalModels):
# new model is sampled
new_global_model = GlobalModelStruct(d=self.dimension, lambda_=self.lambda_, sigma=self.NoiseScale)
self.globalModels.append(new_global_model)
self.globalModelPeriodCounter.append(0)
self.userID2globalModelIndex[userID] = z
self.globalModelPeriodCounter[z] += 1
self.totalPeriodCounter += 1
self.globalModels[z].addUserSS(self.userModels[userID].A, self.userModels[userID].b, self.userModels[userID].update_num)
def sample_alpha_0(self, num_iters=20):
# Escobar and West 1995
eta = np.random.beta(self.alpha_0 + 1, self.totalPeriodCounter, 1)
pi_eta = 1 / (1 + (self.totalPeriodCounter*(self.alpha_prior['b']-np.log(eta))) / (self.alpha_prior['a']+len(self.globalModels)-1) )
for iter in range(num_iters):
if
|
np.random.random_sample()
|
numpy.random.random_sample
|
# -*- coding: utf-8 -*-
r"""
This example is a complete study of a crystal analyzer. The generator
``plot_generator()`` is rather complex and therefore needs some explanation.
The main loop changes the source type. After a flat-energy source has been
ray-traced (after ``yield``), the widths of *z* and energy distributions are
saved. Then a single line source is ray-traced and provides the width of *z*
distribution. From these 3 numbers we calculate energy resolution and, as a
check, ray-trace a third source with 7 energy lines with a spacing equal to the
previously calculated energy resolution. The source sizes, axis limits, number
of iterations etc. were determined experimentally and are given by lists in the
upper part of the script. The outputs are the plots and a text file with the
resulted energy resolutions."""
__author__ = "<NAME>"
__date__ = "08 Mar 2016"
import os, sys; sys.path.append(os.path.join('..', '..', '..')) # analysis:ignore
import numpy as np
import xrt.backends.raycing as raycing
import xrt.backends.raycing.sources as rs
#import xrt.backends.raycing.apertures as ra
import xrt.backends.raycing.oes as roe
import xrt.backends.raycing.run as rr
import xrt.backends.raycing.materials as rm
import xrt.backends.raycing.screens as rsc
import xrt.plotter as xrtp
import xrt.runner as xrtr
showIn3D = False
crystalMaterial = 'Si'
if crystalMaterial == 'Si':
d111 = 3.1354161
elif crystalMaterial == 'Ge':
d111 = 3.2662725
else:
raise
crystal = rm.CrystalDiamond((4, 4, 4), d111/4, elements=crystalMaterial)
#numiter = 16000
numiter = 60
Rm = 1e9 # meridional radius, mm
#Rs = 1000 # tmp sagittal radius, mm
Rs = 250 # tmp sagittal radius, mm
dphi = 0
beamV = 0.1/2.35 # vertical beam size
beamH = 0.1/2.35 # horizontal beam size
yAxesLim = 20
dxCrystal = 100.
dyCrystal = 100.
#dyCrystal = 50.
#dxCrystal = 300.
#dyCrystal = 70.
yAxisLim = 32 # Mythen length = 64 mm
yAxis1Line = -1.0, 0.2
yAxis7Lines = -1.0, 1.0
isDiced = True
isElliptical = True
elongation = 1.5
thetaDegree = 60
if thetaDegree == 40:
if Rs > 800:
eAxisFlat = 7.5e-3 # @ 40 deg, R=1000
else:
eAxisFlat = 3e-2 # @ 40 deg
elif thetaDegree == 60:
if Rs > 800:
eAxisFlat = 6.8e-3 # @ 60 deg, R=1000
else:
eAxisFlat = 2.0e-2 # @ 60 deg
elif thetaDegree == 80:
if Rs > 800:
eAxisFlat = 2.6e-3 # @ 80 deg, R=1000
else:
eAxisFlat = 9.0e-3 # @ 80 deg
else:
raise
class EllipticalSagittalCylinderParam(roe.OE):
def __init__(self, *args, **kwargs):
kwargs = self.pop_kwargs(**kwargs)
roe.OE.__init__(self, *args, **kwargs)
self.isParametric = True
self.reset_pqroll(self.p, self.q, self.roll)
def reset_pqroll(self, p=None, q=None, roll=None):
"""This method allows re-assignment of *p*, *q* and *roll* from
outside of the constructor.
"""
if p is not None:
self.p = p
if q is not None:
self.q = q
if roll is not None:
self.roll = roll
gamma = np.arctan2((self.p + self.q)*np.sin(self.roll),
(self.q - self.p)*np.cos(self.roll))
self.cosGamma = np.cos(gamma)
self.sinGamma = np.sin(gamma)
self.x0 = (self.q - self.p)/2. * np.sin(self.roll)
self.z0 = (self.q + self.p)/2. * np.cos(self.roll)
self.ellipseA = (self.q + self.p)/2.
self.ellipseB = np.sqrt(self.q * self.p) * np.cos(self.roll)
def pop_kwargs(self, **kwargs):
self.p = kwargs.pop('p') # source-to-mirror
self.q = kwargs.pop('q') # mirror-to-focus
return kwargs
def xyz_to_param(self, x, y, z):
xNew, zNew = raycing.rotate_y(x - self.x0, z - self.z0, self.cosGamma,
self.sinGamma)
return xNew, np.arctan2(y, -zNew),
|
np.sqrt(y**2 + zNew**2)
|
numpy.sqrt
|
import numpy as np
import copy, re, sys
from itertools import combinations
from fractions import Fraction as Q
import importlib
spam_spec = importlib.util.find_spec("solver")
found_module = spam_spec is not None
if found_module:
from solver.modules.gauss_matrix_solving import gauss_solve
else:
from gauss_matrix_solving import gauss_solve
def prvect(v):
"""Виводить вектор у звичайному вигляді, без технічних символів та слів."""
print("( ", end="")
for i in v:
print(i, end=" ")
print(")")
def prmatr(m):
"""Виводить матрицю у звичайному вигляді, без технічних символів та слів."""
print("[")
for i in m:
prvect(i)
print("]")
def prself(s):
for i in vars(s).items():
print(i)
class InputParser:
"""Клас для оброблення вхідної інформації з файлу або об'єкту.
Повертає оброблену інформацію через метод get_data."""
op_list = ["<=", ">=", "<", ">", "=", "arbitrary"]
def __init__(self, data_type, data, mute):
inner_text = ""
if data_type == "file":
with open(data) as f:
inner_text = f.read()
elif data_type == "string":
inner_text = data
elif data_type == "object":
cont = data
self.first_line_vect = list(map(Q, cont["obj_func"]))
self.task_type = cont["task_type"]
self.last_conditions = cont["last_cond"]
for i in range(len(self.last_conditions)):
self.last_conditions[i][1] = Q(self.last_conditions[i][1])
for i in range(len(cont["matrix"])):
cont["matrix"][i] = list(map(Q, cont["matrix"][i]))
matr_len = 0
for i in cont["matrix"]:
if len(i) > matr_len:
matr_len = len(i)
for i in range(len(cont["matrix"])):
if len(cont["matrix"][i]) < matr_len:
cont["matrix"][i] = cont["matrix"][i] + ([Q(0)] * (matr_len - len(cont["matrix"][i])))
if len(self.first_line_vect) < matr_len:
self.first_line_vect = self.first_line_vect + [Q(0)] * (matr_len - len(self.first_line_vect))
self.first_line_vect = np.array(self.first_line_vect)
self.main_matrix = np.array(cont["matrix"])
self.inequalities = cont["ineq"]
self.constants_vector = list(map(Q, cont["constants"]))
self.expected_error = ""
self.result = ""
self.result_list = ""
return
else:
print("Unknown format of input data")
inner_text = inner_text.replace('\t', '').replace(' ', '').split("\n")
# Обробка першого рядка з цільовою функцією
counter = 0
first_line = inner_text[counter]
while(first_line == '' or first_line[0] == '#'):
counter += 1
first_line = inner_text[counter]
first_line = InputParser._format_to_math_form(first_line)
self.task_type, self.first_line_vect = self._parse_first_line(first_line)
last_cond = ''
raw_matrix = []
raw_constants = []
self.inequalities = []
for line in inner_text[counter + 1:]:
if line == '' or line[0] == "#":
continue
elif line[:3] == ">>>":
last_cond = ""
break
elif line[0] != '|':
last_cond = line
break
# Обробка умов та заповнення відповідної їм матриці
line = InputParser._format_to_math_form(line[1:])
for i in InputParser.op_list:
if i in line:
self.inequalities.append(i)
break
curr_sym = self.inequalities[len(self.inequalities)-1]
line = line[0] + line[1:line.find(curr_sym)].replace("-", "+-") + line[line.find(curr_sym):]
parts_arr, constant = line[:line.find(curr_sym)].split("+"), line[line.find(curr_sym)+len(curr_sym):]
raw_constants.append(Q(constant))
raw_dict = {}
for i in parts_arr:
num, ind = i[:-1].split("x[")
raw_dict[int(ind)] = Q(num)
raw_list = [0] * max(raw_dict, key=int)
for k, v in raw_dict.items():
raw_list[k - 1] = v
raw_matrix.append(raw_list)
self.var_quantity = 0
for row in raw_matrix:
if len(row) > self.var_quantity:
self.var_quantity = len(row)
for k, row in enumerate(raw_matrix):
if len(row) < self.var_quantity:
for i in range(len(row), self.var_quantity):
raw_matrix[k].append(Q(0, 1))
self.main_matrix = np.array(raw_matrix)
self.constants_vector = np.array(raw_constants)
# Обробка рядка з обмеженнями змінних
self.last_conditions = self._parse_last_cond(last_cond)
# Обробка рядка з бажаним результатом розв'язку (використовується лише в тестуванні)
self.result_list = []
self.result = ""
self.expected_error = ""
counter = inner_text.index(last_cond) + 1
last_line = ""
if counter < len(inner_text):
last_line = inner_text[counter]
while(counter < len(inner_text) - 1 and last_line[:3] != '>>>'):
counter += 1
last_line = inner_text[counter]
if counter >= len(inner_text) - 1 and last_line[:3] != '>>>':
return
raw_list, result, expected_error = self._parse_results(last_line)
if raw_list != "":
for i in raw_list.split(','):
self.result_list.append(Q(i))
self.result = result
self.expected_error = expected_error
@staticmethod
def _format_to_math_form(line):
"""Видаляє з рядка всі пробіли та додає одиничні множники де потрібно."""
if line[0] == "x":
line = "1" + line
return line.replace(' ', '').replace('-x', '-1x').replace('+x', '+1x')
def _parse_first_line(self, line):
"""Отримує строку та обробляє її текст як інформацію про цільову функцію.
Форма виводу: |numpy array of Qs| [ { factor's fraction }, ... ].
Індекс кожного Q відповідає декрементованому індексу відповідної змінної.
Не підтримує некоректну вхідну інформацію та константи в цільовій функції."""
raw_array = {} # Результуючий масив, але невпорядкований
# Розділення строки, використовуючи "+" як розділювач, з подальшим записом інформації в модель цільової функції в змінній first_line_vect
# Змінна task_type містить строку ("max" або "min"), в залежності від вхідних даних
line, task_type = line[:line.find("=>")], line[line.find("=>")+2:]
line = line[0] + line[1:].replace('-', '+-')
op_arr = line.split('+')
for i in op_arr:
num, index = i[:-1].split("x[")
raw_array[int(index)] = Q(num)
first_line_vect = [Q(0,1)] * max(raw_array, key=int)
for k, v in raw_array.items():
first_line_vect[k - 1] = v
return task_type, np.array(first_line_vect)
def _parse_last_cond(self, line):
"""Отримує строку та обробляє її як таку, що містить інформацію про загальні умови.
Форма виводу: |list of tuples| [ ( { index of inequality sign }, { condition's fraction } ), ... ].
Індекс кожної пари відповідає декрементованому індексу відповідної змінної.
Змінні не мають бути написані зі знаком "-"."""
if line == "":
return [["arbitrary", Q(0)]] * self.var_quantity
cond_list = line.split(",")
raw_dict = {}
for expr in cond_list:
op_index = 0
for i in InputParser.op_list:
if i in expr:
op_sym = i
break
f_pair = [op_sym, Q(expr[expr.find(op_sym)+len(op_sym):])]
raw_dict[int(expr[2:expr.find(op_sym)-1])] = f_pair
last_conditions = [[InputParser.op_list[5], Q(0)]] * max(raw_dict, key=int)
for k, v in raw_dict.items():
last_conditions[k - 1] = v
complete_list = [["arbitrary", Q(0)]] * self.var_quantity
complete_list[:len(last_conditions)] = last_conditions
return complete_list
def _parse_results(self, line):
"""Отримує строку так обробляє її як таку, що містить інформацію про бажаний результат.
Інформація, отримана з цього методу використовується у тестуванні.
Форма виводу: |tuple| ( { масив значень відповідних змінних }, { значення цільової функції } )."""
if not "(" in line:
return "", "", line[3:]
return line[line.find("(") + 1:line.find(")")], line[line.find("|") + 1:], ""
def get_data(self):
"""Повертає об'єкт з усією обробленою інформацією, що була отримана."""
return {
"objective_function": self.first_line_vect,
"task_type": self.task_type,
"last_conditions": self.last_conditions,
"matrix": self.main_matrix,
"inequalities": self.inequalities,
"constants": self.constants_vector,
"expected_vect": self.result_list,
"expected_result": self.result,
"error": self.expected_error
}
def print_first_line(self):
"""Виводить вектор цільової функції."""
print("First line: {}\n".format(self.first_line_vect))
def print_task_type(self):
"""Виводить тип задачі."""
print("Task type: {}\n".format(self.task_type))
def print_last_cond(self):
"""Виводить вектор обмежень змінних."""
print("Last line: {}\n".format(self.last_conditions))
def print_main_matrix(self):
"""Виводить основну матрицю."""
print("Matrix: {}\n".format(self.main_matrix))
def print_constants(self):
"""Виводить вектор вільних змінних."""
print("Constants' vector: {}\n".format(self.constants_vector))
def print_inequalities(self):
"""Виводить вектор знаків рівності або нерівності з системи початкових умов."""
print("Inequalities' vector: {}\n".format(self.inequalities))
# ------ Solver class section ------
class Solver:
"""Основний клас, що містить спільні для всіх способів розв'язання методи.
Є базовим для класів, які відповідають різним способам розв'язання."""
def __init__(self, data_type, data, mute):
reader_data = ""
reader_data = InputParser(data_type, data, mute).get_data()
self.objective_function = reader_data["objective_function"]
self.task_type = reader_data["task_type"]
self.last_conditions = reader_data["last_conditions"]
self.matrix = reader_data["matrix"]
self.inequalities = reader_data["inequalities"]
self.constants = reader_data["constants"]
if data_type != "object":
self.expected_vect = np.array(reader_data["expected_vect"])
self.expected_result = Q(reader_data["expected_result"]) if reader_data["expected_result"] != "" else ""
self.expected_error = reader_data["error"]
self.result_error = ""
self.mute = mute
self.col_num = 0
self.row_num = 0
self.basis = []
self.basis_koef = np.array([])
self.obj_shift = Q(0)
self.artificial_variables = []
self.writer = Logger(self.mute)
self.writer.initiate("initial_info")
was_max = False
if self.task_type == "max":
was_max = True
self.writer.log(info=reader_data, is_max=was_max)
if was_max:
self.objective_function *= Q(-1)
def _check_if_unitary(self, vect):
"""Перевіряє чи є вектор унітарним (всі координати нульові, окрім однієї)."""
found_elem = False
for i in vect:
if i != 0:
if not found_elem:
found_elem = True
else:
return False
return found_elem
def _make_basis_column(self):
"""Зводить задану в атрибутах колонку до одиничного вектора з одиницею на місці обраного в атрибутах рядка."""
self.writer.initiate("basis_col")
if self.writer.task_type == "simple":
self.thetas = ["-"] * len(self.matrix)
else:
self.thetas = ["-"] * len(self.objective_function)
prev_table = copy.deepcopy(self._get_all_table_data())
operations_list = [1] * len(self.matrix)
if self.matrix[self.row_num][self.col_num] == 0:
raise SolvingError("В якості ведучого елемента вибрано нуль, подальші розрахунки неможливі")
return
elif self.matrix[self.row_num][self.col_num] != 1:
operations_list[self.row_num] = self.matrix[self.row_num][self.col_num]
self.constants[self.row_num] /= self.matrix[self.row_num][self.col_num]
self.matrix[self.row_num] /= self.matrix[self.row_num][self.col_num]
chosen_row = self.matrix[self.row_num]
for i in [x for x in range(len(self.matrix)) if x != self.row_num]:
operations_list[i] = self.matrix[i][self.col_num]
self.constants[i] -= self.constants[self.row_num] * self.matrix[i][self.col_num]
self.matrix[i] -= chosen_row * self.matrix[i][self.col_num]
was_changed = True
self._set_basis_koef()
self.writer.log(
p_table=prev_table,
table=self._get_all_table_data(),
op=operations_list,
row=self.row_num,
col=self.col_num
)
def _make_constants_positive(self):
"""Робить вільні члени невід'ємними.
Не підтримуються строгі нерівності."""
for i in range(len(self.matrix)):
if self.inequalities[i] == "<" or self.inequalities[i] == ">":
raise SolvingError("Строгі нерівності не підтримуються")
if self.constants[i] < 0:
self.constants[i] *= Q(-1)
self.matrix[i] *= Q(-1)
if self.inequalities[i] != "=":
self.inequalities[i] = "<=" if self.inequalities[i] == ">=" else ">="
def _make_conditions_equalities(self, canonical=False):
"""Зводить всі нерівності умов до рівностей.
По замовучванню зводить систему до псевдоканонічної форми."""
was_changed = False
for i in range(len(self.inequalities)):
sign = 1
if self.inequalities[i] == ">=":
if not canonical:
self.matrix[i] *= Q(-1)
self.constants[i] *= Q(-1)
else:
sign *= -1
self.inequalities[i] = "<="
if self.inequalities[i] == "<=":
temp_matrix = []
for j in range(len(self.matrix)):
temp_matrix.append([Q(0)] * (len(self.matrix[0]) + 1))
temp_matrix[i][-1] = sign * Q(1)
temp_matrix = np.array(temp_matrix)
temp_matrix[:,:-1] = self.matrix
self.matrix = temp_matrix
self.inequalities[i] = "="
self.objective_function = np.append(self.objective_function, Q(0))
self.last_conditions.append([">=", Q(0)])
was_changed = True
if was_changed:
self.writer.initiate("inequalities")
self.writer.log(
objective_function = self.objective_function,
constant = self.obj_shift,
matrix = self.matrix,
constants = self.constants,
inequalities = self.inequalities,
task_type = "min",
last_cond = self.last_conditions
)
def _get_basis_vectors_nums(self):
"""Повертає список змінних, чиї вектори входять до одиничної підматриці матриці."""
self.writer.initiate("show_basis")
temp_matrix = self.matrix.T
result = [-1] * len(temp_matrix[0])
for i in range(len(temp_matrix)):
num = -1
for j in range(len(temp_matrix[i])):
if temp_matrix[i][j] != 0 and temp_matrix[i][j] != 1:
num = -1
break
if temp_matrix[i][j] == 1:
if num == -1:
num = j
else:
num = -1
break
if num > -1:
result[num] = i
if -1 in result:
self.writer.log(basis=None)
else:
self.writer.log(basis=result)
return result
def _set_basis_koef(self):
"""Оновлює порядкові номери та коефіцієнти базисних змінних в цільовій функції при переході до нового базису."""
self.basis[self.row_num] = self.col_num
if self.writer.task_type == "simple":
self.basis_koef[self.row_num] = self.objective_function[self.col_num]
def _expand_objective_function_if_needed(self):
"""Додає в цільову функцію штучні змінні з нульовим коефіцієнтом."""
diff = len(self.matrix[0]) - len(self.objective_function)
if diff > 0:
num = len(self.objective_function)
temp_array = [Q(0)] * (num + diff)
temp_array[:num] = self.objective_function
self.objective_function = np.array(temp_array)
def get_result(self):
"""Повертає результат обчислень"""
errors = ""
try:
self.solve()
except SolvingError as err:
errors = str(err).replace("\n", "<br>")
if errors == "":
return self.writer.get_logs()
return "{}<div>{}</div>".format(self.writer.get_logs(), errors)
def _normalize_conditions(self):
"""Зводить задачу до аналогічної, у якій всі змінні невід'ємні."""
self.writer.initiate("normalizing")
self.substitution_queue = []
self.arbitrary_pairs = []
for i in range(len(self.last_conditions)):
if len(self.last_conditions[i][0]) == 1:
return False
elif self.last_conditions[i][0] == "<=":
for j in range(len(self.matrix)):
self.matrix[j][i] *= -1
self.substitution_queue.insert(0, (i, "*=-1"))
self.objective_function[i] *= -1
self.last_conditions[i] = [">=", self.last_conditions[i][1] * -1]
self.writer.log(index=i, op="a")
if self.last_conditions[i][0] == ">=":
if self.last_conditions[i][1] != 0:
for j in range(len(self.matrix)):
self.constants[j] -= self.matrix[j][i] * self.last_conditions[i][1]
self.obj_shift += self.objective_function[i] * self.last_conditions[i][1]
self.substitution_queue.insert(0, (i, "+={}".format(self.last_conditions[i][1])))
self.writer.log(index=i, op="b", substitution=self.last_conditions[i][1])
self.last_conditions[i][1] = Q(0)
if self.last_conditions[i][0] == "arbitrary":
new_pair = i, len(self.matrix[0])
self.writer.log(index=i, op="c")
new_matrix = []
for j in range(len(self.matrix)):
new_matrix.append([Q(0)] * (len(self.matrix[0]) + 1))
for j in range(len(self.matrix)):
new_matrix[j][-1] = -self.matrix[j][i]
new_matrix = np.array(new_matrix)
new_matrix[:,:-1] = self.matrix
self.matrix = new_matrix
self.objective_function = np.append(self.objective_function, -self.objective_function[i])
self.last_conditions[i] = [">=", Q(0)]
self.last_conditions.append([">=", Q(0)])
self.arbitrary_pairs.append(new_pair)
if len(self.arbitrary_pairs) > 0 or len(self.substitution_queue) > 0:
self.writer.log(
matrix = self.matrix,
inequalities = self.inequalities,
constants = self.constants,
last_conditions = self.last_conditions,
objective_function = self.objective_function,
constant = self.obj_shift,
task_type = "min"
)
return True
def _get_all_table_data(self):
"""Повертає всю необхідну для виведення симплекс таблиці інформацію."""
return {
"matrix": self.matrix,
"objective_function": self.objective_function,
"basis": self.basis,
"basis_koef": self.basis_koef,
"constants": self.constants,
"deltas": self.deltas,
"thetas": self.thetas
}
def _cancel_subtitution(self):
"""Повертає початкові значення змінним, якщо відбулася заміна."""
self.writer.initiate("substitution")
self.final_result = [Q(0)] * len(self.matrix[0])
for i in range(len(self.basis)):
self.writer.log(ind=self.basis[i], val=self.constants[i])
self.final_result[self.basis[i]] = self.constants[i]
if self.task_type == "max":
self.writer.log(max=True)
self.objective_function *= -1
self.writer.log(sub_queue=self.substitution_queue)
for i in self.substitution_queue:
exec("self.final_result[i[0]]" + i[1])
if "*" in i[1]:
self.objective_function[i[0]] *= Q(i[1][2:])
for i in self.arbitrary_pairs:
self.writer.log(arb1=i[0], arb2=i[1])
self.final_result[i[0]] -= self.final_result[i[1]]
def _add_artificial_basis(self):
"""Створює одиничну підматрицю за допомогою штучних змінних М-методом."""
self.writer.initiate("artificial_basis")
M = np.amax(np.array(np.append(np.append(self.matrix, self.constants), self.objective_function))) + 1
for i in range(len(self.basis)):
if self.basis[i] == -1:
temp_matrix = []
for j in range(len(self.matrix)):
temp_matrix.append([Q(0)] * (len(self.matrix[0]) + 1))
temp_matrix[i][-1] = Q(1)
temp_matrix = np.array(temp_matrix)
temp_matrix[:,:-1] = self.matrix
self.matrix = temp_matrix
self.objective_function = np.append(self.objective_function, M)
self.artificial_variables.append(len(self.objective_function) - 1)
self.last_conditions.append([">=", Q(0)])
self.basis[i] = len(self.objective_function) - 1
self.writer.log(
m = M,
matrix = self.matrix,
objective_function = self.objective_function,
constant = self.obj_shift,
constants = self.constants,
last_cond = self.last_conditions,
task_type = "min",
inequalities = self.inequalities
)
# ------ Simplex method section ------
class SimplexSolver(Solver):
"""Виконує розв'язання задачі лінійного програмування симплекс методом."""
def __init__(self, data_type, data, mute=False):
super(SimplexSolver, self).__init__(data_type, data, mute)
self.deltas = np.array([])
self.thetas = np.array([])
def print_all(self):
"""Виводить в консоль всю доступну на даний момент інформацію про розвиток розв'язку задачі."""
print(">------------------------------------------------------------<")
print("Objective func: {}".format(self.objective_function))
print("Basis constants: {}".format(self.basis_koef))
print("Basis variables: {}".format(self.basis))
print("Main matrix:\n-------------------------------")
prmatr(self.matrix)
print("-------------------------------\nConstants: {}".format(self.constants))
print("Thetas: {}".format(self.thetas))
print("Deltas: {}".format(self.deltas))
print(">------------------------------------------------------------<\n")
def _calculate_deltas(self):
"""Розраховує вектор з дельтами."""
self.writer.initiate("deltas")
temp_matrix = self.matrix.T
temp_array = []
for i in range(len(temp_matrix)):
temp_array.append(self.objective_function[i] - temp_matrix[i].dot(self.basis_koef))
self.writer.log(index=i, const=self.objective_function[i], mult1=temp_matrix[i], mult2=self.basis_koef, res=temp_array[-1])
self.deltas = np.array(temp_array)
self.writer.log(table=self._get_all_table_data())
def _calculate_thetas(self):
"""Розраховує вектор-стовпчик з відношеннями "тета"."""
self.thetas = [Q(0)] * len(self.constants)
self.writer.initiate("thetas")
for i in range(len(self.matrix)):
if self.matrix[i][self.col_num] == 0:
self.thetas[i] = -1
self.writer.log(div1=self.constants[i], div2=self.matrix[i][self.col_num], error="zerodiv", ind=self.basis[i])
elif self.matrix[i][self.col_num] < 0:
self.thetas[i] = -1
self.writer.log(div1=self.constants[i], div2=self.matrix[i][self.col_num], error="negative", ind=self.basis[i])
else:
self.thetas[i] = self.constants[i] / self.matrix[i][self.col_num]
self.writer.log(div1=self.constants[i], div2=self.matrix[i][self.col_num], res=self.thetas[i], ind=self.basis[i])
self.writer.log(table=self._get_all_table_data())
def _find_ind_of_min_theta(self):
"""Знаходить індекс ведучого рядка.
Повертає -1 якщо такого немає."""
self.writer.initiate("min_theta")
temp_min = 0
min_set = False
found_ind = -1
for i in range(len(self.thetas)):
if self.thetas[i] >= 0:
temp_min = self.thetas[i]
found_ind = i
min_set = True
break
if min_set:
for i in range(len(self.thetas)):
if self.thetas[i] < 0:
continue
if self.thetas[i] < temp_min:
temp_min = self.thetas[i]
found_ind = i
self.writer.log(ind=self.basis[found_ind] if found_ind != -1 else -1)
return found_ind
def _reset_deltas_n_thetas(self):
"""Скидає значення векторів "тета" та "дельта"."""
self.deltas = ["-"] * len(self.matrix[0])
self.thetas = ["-"] * len(self.matrix)
def _make_constants_positive_if_needed(self):
"""Якщо всі вільні члени від'ємні, то переходить до іншого базису."""
self._reset_deltas_n_thetas()
self.writer.initiate("initial_table")
self.writer.log(table=self._get_all_table_data())
for i in self.constants:
if i >= 0:
return
unset = True
for i in range(len(self.constants)):
for j in range(len(self.matrix[i])):
if self.matrix[i][j] < 0:
self.col_num = j
self.row_num = i
unset = False
break
if not unset:
break
if not unset:
self._make_basis_column()
self.basis = self._get_basis_vectors_nums()
for i in range(len(self.basis)):
self.basis_koef[i] = self.objective_function[self.basis[i]]
def _get_col_num(self, indices_list):
"""Повертає індекс ведучого стовпчика, засновуючись на векторі з дельтами."""
self.writer.initiate("get_col")
if len(indices_list) == 1:
self.writer.log(num=indices_list[0])
return indices_list[0]
for i in range(len(indices_list)):
temp_thetas = []
for j in range(len(self.matrix)):
if self.matrix[j][indices_list[i]] == 0 or (self.constants[j] == 0 and self.matrix[j][indices_list[i]] < 0):
temp_thetas.append(-1)
else:
temp_thetas.append(self.constants[j] / self.matrix[j][indices_list[i]])
for j in temp_thetas:
if j >= 0:
break
else:
indices_list[i] = -1
for i in indices_list:
if i >= 0:
self.writer.log(num=i)
return i
self.writer.log(no_col=True)
return -1
def _check_for_ambiguous_result(self):
"""Перевіряє чи відповідає небазисній змінній нульова дельта.
Якщо штучна змінна базисна, її пара теж вважається базисною."""
basis = set(self.basis)
for i in self.arbitrary_pairs:
if i[0] in basis:
basis.add(i[1])
elif i[1] in basis:
basis.add(i[0])
non_basis_set = set(range(len(self.objective_function))) - basis
for i in non_basis_set:
if self.deltas[i] == 0:
self.result_error = "infinite|{}".format(self.result)
raise SolvingError("Базисній змінній відповідає нульова дельта:\nІснує нескінченна кількість розв'язків\nОптимальне значення цільової функції: {}".format(self.result))
def _check_for_empty_allowable_area(self):
"""Перевіряє чи є у кінцевому векторі з множниками змінних штучна змінна з відмнінним від нуля множником."""
for i in self.artificial_variables:
if self.final_result[i] != 0:
self.result_error = "empty"
raise SolvingError("В оптимальному розв'язку присутня штучна змінна:\nДопустима область порожня")
def _check_if_result_is_empty(self):
"""Перевіряє чи є допустима область пустою.
Якщо область пуста, утворюється відповідний виняток."""
for i in range(len(self.constants)):
if self.basis[i] in self.artificial_variables and self.constants[i] != 0:
self.result_error = "empty"
raise SolvingError("Допустима область пуста, в оптимальному розв'язку штучній змінній відповідає значення, відмінне від нуля.")
def _get_min_delta(self):
"""Знаходить мінімальну оцінку дельта."""
self.writer.initiate("min_delta")
result = min(self.deltas)
self.writer.log(
min_delta = result
)
return result
def _final_preparations(self):
"""Записує результат у відповідні атрибути."""
self.writer.initiate("final")
self.result_vect = self.final_result[:self.initial_variables_quantity]
obj_func_val = self.objective_function[:self.initial_variables_quantity].dot(np.array(self.result_vect))
self.result = obj_func_val
self._check_for_ambiguous_result()
self._check_for_empty_allowable_area()
self.writer.log(
big_vect = self.final_result,
vect = self.result_vect,
obj_val = self.result
)
def solve(self):
"""Розв'язує задачу симплекс методом."""
self.initial_variables_quantity = len(self.matrix[0])
if not self._normalize_conditions():
raise SolvingError("В заданих умовах обмеження змінних містять строгі знаки нерівностей або знак рівності - дані вхідні дані некоректні для симплекс методу")
self._make_constants_positive()
self._make_conditions_equalities(True)
self.basis = self._get_basis_vectors_nums()
for i in self.basis:
if i == -1:
self._add_artificial_basis()
break
self.basis_koef = np.array([0] * len(self.basis))
for i in range(len(self.basis)):
self.basis_koef[i] = self.objective_function[self.basis[i]]
self._make_constants_positive_if_needed()
safety_counter = 0
while True:
safety_counter += 1
if safety_counter > 100:
raise SolvingError("Кількість ітерацій завелика, цикл зупинено")
self._reset_deltas_n_thetas()
self._calculate_deltas()
min_delta = self._get_min_delta()
if min_delta < 0:
self.col_num = self._get_col_num(np.where(self.deltas == min_delta)[0].tolist())
if self.col_num == -1:
self.result_error = "unlimited"
raise SolvingError("Неможливо обрати ведучий стовпчик, всі стовпчики з від'ємними дельта утворюють від'ємні тета:\nЦільова функція необмежена на допустимій області")
self._calculate_thetas()
self.row_num = self._find_ind_of_min_theta()
if self.row_num == -1:
self.result_error = "unlimited"
raise SolvingError("Всі тета від'ємні:\nЦільова функція необмежена на допустимій області")
self._make_basis_column()
else:
self._check_if_result_is_empty()
break
self._cancel_subtitution()
self._final_preparations()
# ------ Logger class section ------
class DualSimplexSolver(Solver):
"""Виконує розв'язання задачі лінійного програмування двоїстим симплекс методом."""
def __init__(self, data_type, data, mute=False):
super().__init__(data_type, data, mute)
self.deltas = np.array([])
self.thetas = np.array([])
self.previous_basis_sets = []
self.writer.set_task_type("dual")
def _get_first_basis(self):
"""Шукає підхожий базис.
Орієнтуючись на розмірність базису, виконує перебір
всх можливих комбінацій векторів для утворення базису,
розв'язує підсистему двоїстої задачі за обраними векторами.
Якщо розв'язок задовольняє умови двоїстої задачі, то обрані
вектори обираються підхожим базисом в такому порядку, в якому
вони утворили розв'язок підсистеми.
Якщо такий розв'язок не знайдено, повертає None."""
self.writer.initiate("find_first_compatible_basis")
t_m = self.matrix.T
t_c = self.objective_function
basis_size = len(t_m[0])
possible_basis_list = list(combinations(range(len(t_m)), basis_size))
possible_basis_list.reverse()
self.writer.log(
system=t_m,
constants=t_c
)
for possible_comb in possible_basis_list:
possible_comb =
|
np.array(possible_comb)
|
numpy.array
|
"""
VISIUALIZATION MODULE loading Parameter Matrices
CALL BY: <visiualize.py>
RETURN: Environment simulation (animated) & Plots
INFO: This Module can load a specific File Dump (cPickle) and visiualize the containig matrices onto a OpenAI Gym Environment
"""
# Some dependencies
import numpy as np
import matplotlib.pyplot as plt
import hickle as hkl
import gym
from .lif import I_syn_calc, I_gap_calc, U_neuron_calc
from .parameters import *
from .random_search_v2 import compute as compute_v2
from .random_search_v2 import observe
from .weights_nn import compute as compute_with_weights
# Initializing OpenAI Environments------------------------------------------------------
env = gym.make('CartPole-v0')
env.reset()
env_vis = []
#---------------------------------------------------------------------------------------
# Initialization----------------------------------------------------------------------------
def vis_initialize(Default_U_leak):
for i in range(0,4):
x[i] = Default_U_leak
for i in range(0,4):
u[i] = Default_U_leak
def initialize(Default_U_leak):
# Initializing Neurons and Sensors------------------------------------------------------
for i in range(0,4):
x[i] = Default_U_leak
for i in range(0,4):
u[i] = Default_U_leak
global AVA, AVD, PVC, AVB, PVD, PLM, AVM, ALM, AVA_spike, AVD_spike, PVC_spike, AVB_spike, I_PVC, I_AVD, I_AVA, I_AVB, actions_arr, angles_arr, angle_velocity_arr, totalreward, done, info, actions
AVA = np.array([Default_U_leak])
AVD = np.array([Default_U_leak])
PVC = np.array([Default_U_leak])
AVB = np.array([Default_U_leak])
PVD = np.array([])
PLM = np.array([])
AVM = np.array([])
ALM = np.array([])
AVA_spike = np.array([])
AVD_spike = np.array([])
PVC_spike =
|
np.array([])
|
numpy.array
|
# -*- coding: utf-8 -*-
'''
pathSim (c) University of Manchester 2019
pathSim is licensed under the MIT License.
To view a copy of this license, visit <http://opensource.org/licenses/MIT/>.
@author: <NAME>
@description: Basic pathway simulation
'''
import tellurium as te
import numpy as np
import pandas as pd
import statsmodels.formula.api as smf
from statsmodels.tools.eval_measures import iqr, rmse
from itertools import product
import re, os, time, csv, argparse
import matplotlib.pyplot as plt
#from sampleCompression import evaldes
from doebase.OptDes import evaldes
from viscad.viscad import createnewCad, makePDF
def modelHeader():
antinom = """
// Created by libAntimony v2.9.4
function Constant_flux__irreversible(v)
v;
end
function Henri_Michaelis_Menten__irreversible(substrate, enzyme, Km, kcat)
kcat*enzyme*substrate/(Km + substrate);
end
function Hill_Cooperativity(substrate, Shalve, V, h)
V*(substrate/Shalve)^h/(1 + (substrate/Shalve)^h);
end
function Hill_Coop2(inducer,promoter,n,kf1,kr1)
kf1*inducer^n - kr1*promoter;
end
"""
return antinom
def modelTemplate(promoter, decay=False):
""" Nsteps basic linear pathway defined using tellurium """
antinom = ''
if promoter is not None:
if decay:
antinom += """
model Prom_Upstream_Model()
"""
else:
antinom += """
model Prom_Model()
"""
else:
antinom += """
model Noprom_Model()
"""
antinom += """
// Compartments and Species:
compartment Cell;
species Substrate in Cell, Product in Cell, Enzyme in Cell;
"""
if promoter is not None:
antinom += """
species Inducer in Cell;
"""
antinom += """
species Activated_promoter in Cell;
// species Growth in Cell;
// Biomass: Growth -> Substrate; Cell*Kgf*Growth - Cell*Kgr*Substrate
// Decay: Growth -> ; Cell*Kd*Growth
"""
if decay:
antinom += """
Substrate -> ; Cell*Kd*Substrate;
"""
antinom += """
// Reactions:
//Induc: => Inducer; Cell*Constant_flux__irreversible(1);
// See doi: https://doi.org/10.1101/360040 for modeling the induction using the Hill function
"""
if promoter is not None:
antinom += """
// Induction: Inducer => Activated_promoter; Cell*Hill_Cooperativity(Inducer, Induction_Shalve, Induction_Vi, Induction_h);
Induction: Inducer => Activated_promoter; Cell*Hill_Coop2(Inducer, Activated_promoter, Induction_n, Induction_kf1, Induction_kr1);
"""
antinom += """
Expression: Activated_promoter => Enzyme; Copy_number*Cell*Expression_k1*Activated_promoter;
Leakage: => Enzyme; Cell*Constant_flux__irreversible(Leakage_vl);
Degradation: Enzyme => ; Cell*Degradation_k2*Enzyme;
Catalysis: Substrate => Product; Cell*Henri_Michaelis_Menten__irreversible(Substrate, Enzyme, Catalysis_Km, Catalysis_kcat);
// Species initializations:
Substrate = 0.5*1e-9;
Product = 0;
Enzyme = 0;
"""
if promoter is not None:
antinom += """
Inducer = 1e-2;
"""
if decay:
antinom += """
Kd = 1e-4;
"""
antinom += """
Activated_promoter = 0;
Copy_number = 1;
// Compartment initializations:
Cell = 1;
// Growth = 1;pathSim.PlotResponse()
// Variable initializations:
// Induction_Shalve = 1e-1;
// Induction_Vi = 1e7;
// Induction_h = 1.85;
Induction_n = 1.85;
Induction_kf1 = 1e3;
Induction_kr1 = 1e-1;
Expression_k1 = 1e6;
Leakage_vl = 0;
Degradation_k2 = 1e-6;
Catalysis_Km = 0.1;
Catalysis_kcat = 0.1;
Kgf = 5;
Kgr = 1;
// Other declarations:
const Cell;
end
"""
return antinom
def pathway(promoters, decay=True):
antinom = modelHeader()
if decay:
antinom += modelTemplate(1, decay)
antinom += modelTemplate(1)
antinom += modelTemplate(None)
antinom += "model *Big_Model()"+"\n"
for i in np.arange(len(promoters)):
p = promoters[i]
if p is not None:
if i == 0 and decay:
antinom += "\t"+"m%d: Prom_Upstream_Model();" % (i+1,)
else:
antinom += "\t"+"m%d: Prom_Model();" % (i+1,)
else:
antinom += "\t"+"m%d: Noprom_Model();" % (i+1,)
antinom += "\n"
for i in np.arange(len(promoters)-1):
antinom += "\t"+"m%d.Product is m%d.Substrate;" % (i+1, i+2)
antinom += "\n"
for i in np.arange(1,len(promoters)):
p = promoters[i]
if p is None:
antinom += "\t"+"m%d.Activated_promoter is m%d.Activated_promoter" %(i+1,i)
antinom += "\n"
antinom += "end\n"
return te.loada(antinom)
class Model():
def __init__(self, nsteps, promoters):
self.nsteps = nsteps
self.promoters = promoters
self.model = pathway(promoters)
self.kinetics = None
self.copy_number = None
self.leakage = None
self.degradation = None
self.SetPromoters()
def SetKinetics(self,kinetics):
self.kinetics = kinetics
for i in np.arange(kinetics):
self.model['m'+str(i+1)+'_Catalysis_kcat'] = kinetics[i][0]
self.model['m'+str(i+1)+'_Catalysis_Km'] = kinetics[i][1]
def SetCopyNumber(self,cn):
self.copy_number = cn
for i in np.arange(self.nsteps):
self.model['m'+str(i+1)+'_Copy_number'] = cn
def SetPromoters(self):
for i in np.arange(self.nsteps):
if self.promoters[i] is not None:
self.model['m'+str(i+1)+'_Expression_k1'] = self.promoters[i]
else:
self.model['m'+str(i+1)+'_Expression_k1'] = self.model['m'+str(i)+'_Expression_k1']
def SetLeakage(self,leaks):
for i in np.arange(self.nsteps):
self.model['m'+str(i+1)+'_Leakage_vl'] = leaks[i]
def SetDegradation(self,deg):
for i in np.arange(self.nsteps):
self.model['m'+str(i+1)+'_Degradation_k2'] = deg[i]
def ranges():
""" Define global ranges for random parameters """
param = {
'Catalysis': {
'Km': [1e-4, 1e-2], # Center=1 mM #[1e2, 1e3],
'kcat': [1, 1e3], # Center= 100 s^-1 #1, 1]
},
'Degradation': {
'k2': [1e-6,1e-6]#[1e-3, 1e-3]
},
# 'Induction': {
# 'Shalve': [0.1, 0.1],
# 'Vi': [1e6, 1e7],
# 'h': [2, 4]
# },
'Induction': {
'n': [2, 2],
'kf1': [1e1, 1e1],
'kr1': [1e-2,1e-2]
},
'Leakage': {
'vl': [1e-12,1e-12] #[1e-10,1e-10]#[1e-9, 1e-9]
},
}
return param
def libraries(nprom, nori):
""" Define library values for:
- Origin of replication
- Promoters
"""
param = {
'Expression': 1e-8*np.power( 10, np.random.random(nprom) ),
'Copy_number': np.power( 10, 2*np.random.random(nori) )
}
for y in param:
param[y].sort()
return param
def Parameters(nori,nprom,nsteps,nvariants):
""" Define de parameters and ranges """
par = {}
plib = libraries(nprom,nori)
par['Copy_number'] = plib['Copy_number']
par['Expression'] = plib['Expression']
par['Step'] = []
for i in np.arange(nsteps):
vals = []
for j in np.arange(nvariants):
vals.append( instance() )
par['Step'].append( vals )
return par
def Construct(par,design,noise=False):
promoters = []
for x in np.arange(1,len(design),2):
# Backbone promoter
if x == 1:
promoters.append( par['Expression'][design[x]-1] )
# For the rest of promoters, we assume half of them empty
else:
if design[x] > len(par['Expression']):
promoters.append( None )
else:
promoters.append( par['Expression'][design[x]-1] )
# Use the information about promoters to create the pathway
pw = pathway(promoters)
initModel( pw, nsteps=len(par['Step']), substrate=1.0*1e-3 )
# Init model??
# Set up the copy number
for i in np.arange(len(par['Step'])):
pw['m'+str(i+1)+'_Copy_number'] = float(par['Copy_number'][design[0]])
for i in np.arange(len(par['Step'])):
if promoters[i] is not None:
pw['m'+str(i+1)+'_Expression_k1'] = promoters[i]
else:
j = i-1
while promoters[j] is None and j > 0:
j -= 1
pw['m'+str(i+1)+'_Expression_k1'] = promoters[j]
# Set up the gene
for i in np.arange(len(par['Step'])):
enzyme = par['Step'][i][design[2+i*2]]
for val in enzyme:
(mean, std) = enzyme[val]
if noise:
p = np.random.normal( mean, std )
else:
p = mean
param = 'm{}_{}'.format( i+1, val )
pw[ param ] = p
return pw
def instance():
""" Generate an instance mean, std """
par = ranges()
vals = {}
for group in par:
for x in par[group]:
xmax = par[group][x][1]
xmin = par[group][x][0]
if xmax == xmin:
mean = xmin
else:
logmean = np.random.uniform( np.log(xmin), np.log(xmax) )
mean = np.exp( logmean )
std = mean/100.0 + np.random.rand()*(xmax-xmin)/100.0
vals['_'.join([group,x])] = ( mean,std )
return vals
def initModel(model, substrate=0.0, nsteps=5, inducer=100e-6):
""" Each step in the pathway requires the following parameter definitions:
- Induction: Shalve, Vi, h
- Expression: k1
- Degradation: k2
- Leakage: vl
- Catalysis: Km, V
- Initial substrate concentration
- Inducer concentrations
"""
# Init all species to 0
for step in np.arange(0,nsteps):
model['m'+str(step+1)+'_Substrate'] = 0
model['m'+str(step+1)+'_Enzyme'] = 0
try:
model['m'+str(step+1)+'_Inducer'] = inducer
except:
pass
try:
model['m'+str(step+1)+'_Activated_promoter'] = 0
except:
pass
model['m'+str(nsteps)+'_Product'] = 0
model['m1_Substrate'] = substrate
class metPath:
def __init__(self, steps):
""" Init model and parameters """
self.steps = steps
self.model = modelTemplate( steps )
self.vals = []
for i in np.arange( steps ):
self.vals.append( instance() )
def sample(self, initSubstrate=1.0):
""" Create a sample of the model
with given inital substrate concentration.
Start inducers.
"""
for i in np.arange( self.steps ):
v = self.vals[i]
for group in v:
for x in v[group]:
(mean, std) = v[group][x]
p = np.random.normal( mean, std )
import pdb
pdb.set_trace()
param = 'm{}_{}_{}'.format( i+1, group, x )
self.model[ param ] = p
initModel( self.model )
self.model[ 'm1_Substrate' ] = initSubstrate
for i in np.arange( self.steps ):
induc = 'm{}_Inducer'.format(i+1)
if induc in self.model:
self.model[ induc ] = 1.0
def SelectCurves(pw):
selections = []
target = None
for i in pw.timeCourseSelections:
if i.endswith('Inducer]') or i.endswith('promoter]') or i.endswith('Enzyme]') or i.endswith('Growth]'):
continue
selections.append(i)
if i.endswith('Product]'):
target = i
pw.timeCourseSelections = selections
pw.steadyStateSelections = selections
return target
def Assembly(design, steps=3, nplasmids=2, npromoters=2, variants=3):
""" Assembly the full pathway: provide the index at each position """
assemble = []
n = 0
if nplasmids == 1:
assemble.append( 0 )
else:
assemble.append( design[n] )
n += 1
if npromoters == 1:
assemble.append( 0 )
else:
assemble.append( design[n] )
n += 1
if variants == 1:
if npromoters > 1:
for i in np.arange(1, steps):
assemble.append(0)
p = n + i -1
assemble.append( design[p] )
assemble.append( 0 )
else:
for i in np.arange(1, steps):
assemble.append(0)
assemble.append( 0 )
assemble.append( 0 )
elif npromoters > 1:
assemble.extend( design[n:] )
else:
p = -1
for i in np.arange(1, steps):
assemble.append( design[p] )
p = n + i -1
assemble.append(0)
assemble.append( design[p+1] )
return assemble
def SimulateDesign(steps=3, nplasmids=2, npromoters=2, variants=3, libsize=32, show=False, timespan=3600, random=False):
print('Design')
steps = steps
variants = variants
npromoters = npromoters
nplasmids = nplasmids
libsize = libsize
positional = False
par = Parameters(nplasmids,npromoters,steps,variants)
diagnostics = evaldes( steps, variants, npromoters, nplasmids, libsize, positional, random=random )
M = diagnostics['M']
print('Build')
results = []
for i in np.arange(M.shape[0]):
design = Assembly( M[i,:], steps, nplasmids, npromoters, variants )
pw = Construct(par,design)
target = SelectCurves(pw)
s = pw.simulate(0,timespan,1000)
if show:
pw.plot(s, show=False ,xlabel='t [s]', ylabel="conc [M]")
ds = pd.DataFrame(s,columns=s.colnames)
results.append( s[target][-1] )
return pw, ds, M, results, par, diagnostics
# TO DO: multiple random sims per design? (but with same params)
def FitModel(M,results):
columns = ['C'+str(i) for i in np.arange(M.shape[1])]
dd = pd.DataFrame( M, columns=columns )
promLevels = []
for j in np.arange(3,dd.shape[1],2):
promLevels.append( int(len(dd.iloc[:,j].unique())/2) )
for i in np.arange(M.shape[0]):
for j in np.arange(M.shape[1]):
# Add exception for promoters
dd.iloc[i,j] = "L"+str(M[i,j])
if j>2 and ( (j+1) % 2 == 0):
plevel = promLevels[ int( (j-3)/2 ) ]
if M[i,j] > plevel-1:
dd.iloc[i,j] = "L"+str(plevel)
else:
dd.iloc[i,j] = "L"+str(M[i,j])
dd['y'] = results
formula = 'y ~ '+' + '.join(columns)
ols = smf.ols( formula=formula, data=dd)
res = ols.fit()
return res, dd
def BestCombinations(res, dd, random=1000):
levels = []
for j in
|
np.arange(dd.shape[1]-1)
|
numpy.arange
|
import sys, os
import numpy as np
from scipy.linalg import null_space
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
from grasp_functions import block_diag, check_equal_matrices, get_rank
from data_types import Contact, Joint, Finger
from class_grasp import Grasp
from class_jacobian import Jacobian
from quality_metrics import force_closure
zv = np.array([0, 0, 1]).reshape(3, 1)
p = np.array([2, 10, 0])
h = np.array(["H", "H"])
c1 = np.array([6, 10, 0])
c2 = np.array([-3, 10, 0])
c3 = np.array([-7, 5, 0])
R1 = np.array([[-1, 0, 0], [0, -1, 0], [0, 0, 1]])
R2 = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
R3 = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
q1c = np.array([9, 0, 0])
q2c = np.array([8, 6, 0])
q3c = np.array([-8, 0, 0])
q4c = np.array([-8, 3, 0])
q5c = np.array([-6, 7, 0])
def S(r):
rx = r[0]
ry = r[1]
rz = r[2]
return np.array([[0, -rz, ry], [rz, 0, -rx], [-ry, rx, 0]])
def fHi(h):
if h == "P":
return np.array([[1, 0, 0, 0, 0, 0]])
elif h == "S":
return np.concatenate((np.identity(4), np.zeros((4, 2))), axis=1)
else:
return np.concatenate((np.identity(3), np.zeros((3, 3))), axis=1)
def fH(h):
H = fHi(h[0])
for hi in h[1:]:
Hi = fHi(hi)
Hi = np.concatenate((np.zeros((Hi.shape[0], H.shape[1])), Hi), axis=1)
H = np.concatenate((H, np.zeros((H.shape[0], 6))), axis=1)
H = np.concatenate((H, Hi), axis=0)
return H
P1 = np.block([[np.identity(3), np.zeros((3, 3))], [S(c1 - p), np.identity(3)]])
P2 = np.block([[np.identity(3), np.zeros((3, 3))], [S(c2 - p), np.identity(3)]])
P3 = np.block([[np.identity(3), np.zeros((3, 3))], [S(c3 - p), np.identity(3)]])
pG1t = np.dot(block_diag([R1, R1]).transpose(), P1.transpose())
pG2t = np.dot(block_diag([R2, R2]).transpose(), P2.transpose())
pG3t = np.dot(block_diag([R3, R3]).transpose(), P3.transpose())
pGt = np.concatenate((pG1t, pG2t), axis=0)
H = fH(h)
# print(H.shape)
Gt = np.dot(H, pGt)
# print(Gt)
contact1 = Contact(c1, R1)
contact2 = Contact(c2, R2)
C = np.array([contact1, contact2])
grasp = Grasp(p, C)
Gtclass = grasp.Gt
if check_equal_matrices(Gt, Gtclass):
print("correct writting of Grasp Class")
else:
print("ERROR: Gt Matrices are different")
d11 = np.dot(S(c1 - q1c).transpose(), zv)
d12 = np.dot(S(c1 - q2c).transpose(), zv)
d13 = np.array([0, 0, 0]).reshape(3, 1)
d14 = np.array([0, 0, 0]).reshape(3, 1)
d15 = np.array([0, 0, 0]).reshape(3, 1)
l11 = zv
l12 = zv
l13 = np.array([0, 0, 0]).reshape(3, 1)
l14 = np.array([0, 0, 0]).reshape(3, 1)
l15 = np.array([0, 0, 0]).reshape(3, 1)
d21 = np.array([0, 0, 0]).reshape(3, 1)
d22 = np.array([0, 0, 0]).reshape(3, 1)
d23 = np.dot(S(c2 - q3c).transpose(), zv)
d24 = np.dot(S(c2 - q4c).transpose(), zv)
d25 = np.dot(S(c2 - q5c).transpose(), zv)
l21 = np.array([0, 0, 0]).reshape(3, 1)
l22 = np.array([0, 0, 0]).reshape(3, 1)
l23 = zv
l24 = zv
l25 = zv
d31 = np.array([0, 0, 0]).reshape(3, 1)
d32 = np.array([0, 0, 0]).reshape(3, 1)
d33 = np.dot(S(c3 - q3c).transpose(), zv)
d34 = np.dot(S(c3 - q4c).transpose(), zv)
d35 = np.array([0, 0, 0]).reshape(3, 1)
l31 = np.array([0, 0, 0]).reshape(3, 1)
l32 = np.array([0, 0, 0]).reshape(3, 1)
l33 = zv
l34 = zv
l35 = np.array([0, 0, 0]).reshape(3, 1)
d1 = np.concatenate((d11, d12, d13, d14, d15), axis=1)
l1 =
|
np.concatenate((l11, l12, l13, l14, l15), axis=1)
|
numpy.concatenate
|
import glob
import os
import struct
import weakref
import numpy as np
import yt.utilities.fortran_utils as fpu
from yt.data_objects.index_subobjects.octree_subset import OctreeSubset
from yt.data_objects.particle_unions import ParticleUnion
from yt.data_objects.static_output import Dataset, ParticleFile
from yt.frontends.art.definitions import (
amr_header_struct,
constants,
dmparticle_header_struct,
filename_pattern,
fluid_fields,
particle_fields,
particle_header_struct,
seek_extras,
)
from yt.frontends.art.fields import ARTFieldInfo
from yt.frontends.art.io import (
_read_art_level_info,
_read_child_level,
_read_root_level,
a2b,
b2t,
)
from yt.funcs import mylog, setdefaultattr
from yt.geometry.geometry_handler import YTDataChunk
from yt.geometry.oct_container import ARTOctreeContainer
from yt.geometry.oct_geometry_handler import OctreeIndex
from yt.geometry.particle_geometry_handler import ParticleIndex
class ARTIndex(OctreeIndex):
def __init__(self, ds, dataset_type="art"):
self.fluid_field_list = fluid_fields
self.dataset_type = dataset_type
self.dataset = weakref.proxy(ds)
self.index_filename = self.dataset.parameter_filename
self.directory = os.path.dirname(self.index_filename)
self.max_level = ds.max_level
self.float_type = np.float64
super().__init__(ds, dataset_type)
def get_smallest_dx(self):
"""
Returns (in code units) the smallest cell size in the simulation.
"""
# Overloaded
ds = self.dataset
return (ds.domain_width / ds.domain_dimensions / (2 ** self.max_level)).min()
def _initialize_oct_handler(self):
"""
Just count the number of octs per domain and
allocate the requisite memory in the oct tree
"""
nv = len(self.fluid_field_list)
self.oct_handler = ARTOctreeContainer(
self.dataset.domain_dimensions / 2, # dd is # of root cells
self.dataset.domain_left_edge,
self.dataset.domain_right_edge,
1,
)
# The 1 here refers to domain_id == 1 always for ARTIO.
self.domains = [ARTDomainFile(self.dataset, nv, self.oct_handler, 1)]
self.octs_per_domain = [dom.level_count.sum() for dom in self.domains]
self.total_octs = sum(self.octs_per_domain)
mylog.debug("Allocating %s octs", self.total_octs)
self.oct_handler.allocate_domains(self.octs_per_domain)
domain = self.domains[0]
domain._read_amr_root(self.oct_handler)
domain._read_amr_level(self.oct_handler)
self.oct_handler.finalize()
def _detect_output_fields(self):
self.particle_field_list = [f for f in particle_fields]
self.field_list = [("art", f) for f in fluid_fields]
# now generate all of the possible particle fields
for ptype in self.dataset.particle_types_raw:
for pfield in self.particle_field_list:
pfn = (ptype, pfield)
self.field_list.append(pfn)
def _identify_base_chunk(self, dobj):
"""
Take the passed in data source dobj, and use its embedded selector
to calculate the domain mask, build the reduced domain
subsets and oct counts. Attach this information to dobj.
"""
if getattr(dobj, "_chunk_info", None) is None:
# Get all octs within this oct handler
domains = [dom for dom in self.domains if dom.included(dobj.selector)]
base_region = getattr(dobj, "base_region", dobj)
if len(domains) > 1:
mylog.debug("Identified %s intersecting domains", len(domains))
subsets = [
ARTDomainSubset(base_region, domain, self.dataset) for domain in domains
]
dobj._chunk_info = subsets
dobj._current_chunk = list(self._chunk_all(dobj))[0]
def _chunk_all(self, dobj):
oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
# We pass the chunk both the current chunk and list of chunks,
# as well as the referring data source
yield YTDataChunk(dobj, "all", oobjs, None)
def _chunk_spatial(self, dobj, ngz, sort=None, preload_fields=None):
sobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
for og in sobjs:
if ngz > 0:
g = og.retrieve_ghost_zones(ngz, [], smoothed=True)
else:
g = og
yield YTDataChunk(dobj, "spatial", [g], None)
def _chunk_io(self, dobj, cache=True, local_only=False):
"""
Since subsets are calculated per domain,
i.e. per file, yield each domain at a time to
organize by IO. We will eventually chunk out NMSU ART
to be level-by-level.
"""
oobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info)
for subset in oobjs:
yield YTDataChunk(dobj, "io", [subset], None, cache=cache)
class ARTDataset(Dataset):
_index_class = ARTIndex
_field_info_class = ARTFieldInfo
def __init__(
self,
filename,
dataset_type="art",
fields=None,
storage_filename=None,
skip_particles=False,
skip_stars=False,
limit_level=None,
spread_age=True,
force_max_level=None,
file_particle_header=None,
file_particle_data=None,
file_particle_stars=None,
units_override=None,
unit_system="cgs",
):
self.fluid_types += ("art",)
if fields is None:
fields = fluid_fields
filename = os.path.abspath(filename)
self._fields_in_file = fields
self._file_amr = filename
self._file_particle_header = file_particle_header
self._file_particle_data = file_particle_data
self._file_particle_stars = file_particle_stars
self._find_files(filename)
self.parameter_filename = filename
self.skip_particles = skip_particles
self.skip_stars = skip_stars
self.limit_level = limit_level
self.max_level = limit_level
self.force_max_level = force_max_level
self.spread_age = spread_age
Dataset.__init__(
self,
filename,
dataset_type,
units_override=units_override,
unit_system=unit_system,
)
self.storage_filename = storage_filename
def _find_files(self, file_amr):
"""
Given the AMR base filename, attempt to find the
particle header, star files, etc.
"""
base_prefix, base_suffix = filename_pattern["amr"]
numericstr = file_amr.rsplit("_", 1)[1].replace(base_suffix, "")
possibles = glob.glob(os.path.dirname(os.path.abspath(file_amr)) + "/*")
for filetype, (prefix, suffix) in filename_pattern.items():
# if this attribute is already set skip it
if getattr(self, "_file_" + filetype, None) is not None:
continue
match = None
for possible in possibles:
if possible.endswith(numericstr + suffix):
if os.path.basename(possible).startswith(prefix):
match = possible
if match is not None:
mylog.info("discovered %s:%s", filetype, match)
setattr(self, "_file_" + filetype, match)
else:
setattr(self, "_file_" + filetype, None)
def __str__(self):
return self._file_amr.split("/")[-1]
def _set_code_unit_attributes(self):
"""
Generates the conversion to various physical units based
on the parameters from the header
"""
# spatial units
z = self.current_redshift
h = self.hubble_constant
boxcm_cal = self.parameters["boxh"]
boxcm_uncal = boxcm_cal / h
box_proper = boxcm_uncal / (1 + z)
aexpn = self.parameters["aexpn"]
# all other units
Om0 = self.parameters["Om0"]
ng = self.parameters["ng"]
boxh = self.parameters["boxh"]
aexpn = self.parameters["aexpn"]
hubble = self.parameters["hubble"]
r0 = boxh / ng
v0 = 50.0 * r0 * np.sqrt(Om0)
rho0 = 2.776e11 * hubble ** 2.0 * Om0
aM0 = rho0 * (boxh / hubble) ** 3.0 / ng ** 3.0
velocity = v0 / aexpn * 1.0e5 # proper cm/s
mass = aM0 * 1.98892e33
self.cosmological_simulation = True
setdefaultattr(self, "mass_unit", self.quan(mass, f"g*{ng ** 3}"))
setdefaultattr(self, "length_unit", self.quan(box_proper, "Mpc"))
setdefaultattr(self, "velocity_unit", self.quan(velocity, "cm/s"))
setdefaultattr(self, "time_unit", self.length_unit / self.velocity_unit)
def _parse_parameter_file(self):
"""
Get the various simulation parameters & constants.
"""
self.domain_left_edge = np.zeros(3, dtype="float")
self.domain_right_edge =
|
np.zeros(3, dtype="float")
|
numpy.zeros
|
# MIT License
#
# Copyright (c) 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
'''
File: wen2011.py
Description: Apply the algorithm on experimental radiance (with
experimentally measured emissivity). The goal is to find emissivity and
temperature of the surface of an aluminum alloy (AL5083) which is held at 600 K.
The experimental data can be found in the following reference:
<NAME> and <NAME>, "Examination of multispectral radiation thermometry
using linear and log-linear emissivity models for aluminum alloys," Heat Mass
Transfer, 47, 7, pp. 847-856, 2011.
Note: the value of "pix_slice" in algorithm/spectropyrometer_constants MUST
be adjusted. A lower value (e.g. 7) is here better because of the lower number
of wavelengths that are available as compared to measurements with 3,000 pixels.
'''
import numpy as np
from numpy.polynomial import Polynomial, polynomial
import matplotlib.pyplot as plt
import generate_spectrum as gs
from pixel_operations import choose_pixels, generate_combinations
from temperature_functions import optimum_temperature
from kfold import order_selection
from scipy.interpolate import splrep
# Black and gray body
bb_eps = lambda wl,T: 1.0 * np.ones(len(wl))
gr_eps = lambda wl,T: 0.1 * np.ones(len(wl))
### Generate some data
data = np.genfromtxt('data/wen-2011/AL5083-radiance.csv', delimiter=',',skip_header=1)
T = 600 # K
noisy_data = data[:,1] / (1e3 * 1e4)
wl_vec = data[:,0] * 1000 # Wavelengths are in micro-meter
pix_vec = np.linspace(0,len(wl_vec)-1,len(wl_vec))
pix_vec =
|
np.array(pix_vec,dtype=np.int64)
|
numpy.array
|
import numpy as np
import cv2
# load the calibration matrix and distortion coefficients computed in camera_calibration.ipynb
mtx_dist = np.load("mtx_dist.npy")
dist_parameters = np.load("parameters_dist.npy")
mtx_perspective = np.load("mtx_perspective.npy")
mtx_inv_perspective = np.load("mtx_inv_perspective.npy")
# approximated ratio of real distance length agains pixels of the eagle eye images
ym_per_pix = 3 / (590 - 555)
xm_per_pix = 3.7 / (1040 - 270)
def undistort_image(image):
""""
Undistort image with the read mtx and dist_parameters
"""
undst = cv2.undistort(image, mtx_dist, dist_parameters, None, mtx_dist)
return undst
def threshold(image, s_thresh=(150, 255), sx_thresh=(23, 150)):
"""
Filter image to obtain a bitmap with the way lanes
"""
hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
l_channel = hls[:, :, 1]
s_channel = hls[:, :, 2]
# Apply sobel x to l chanel
sobelx = cv2.Sobel(l_channel, cv2.CV_64F, 1, 0) # Take the derivative in x
# Absolute x derivative to accentuate lines away from horizontal
abs_sobelx = np.absolute(sobelx)
scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
# Threshold x gradient
sxbinary = np.zeros_like(scaled_sobel)
sxbinary[(scaled_sobel >= sx_thresh[0]) &
(scaled_sobel <= sx_thresh[1])] = 1
# Threshold color channel
s_binary = np.zeros_like(s_channel)
s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1
# combining thresholds
bitmap = np.logical_or(s_binary, sxbinary).astype(np.uint8)
return bitmap
def eagle_eye(image):
"""
Project the image to the eagle eye view
Note: it is assumed that the image was previously undistorted
"""
img_size = (image.shape[1], image.shape[0])
return cv2.warpPerspective(image, mtx_perspective, img_size, flags=cv2.INTER_LINEAR)
def eagle_eye_inv(image):
"""
Project the eagle eye to the normal view
"""
img_size = (image.shape[1], image.shape[0])
return cv2.warpPerspective(image, mtx_inv_perspective, img_size, flags=cv2.INTER_LINEAR)
def find_lane_pixels_window(binary_warped, nwindows=10, margin=100, minpix=40):
"""
Finds the pixels which are part of a lane using a moving window method. The binary warp
represents a binary map of the lanes in eagle view
"""
# Take a histogram of the bottom half of the image
histogram = np.sum(binary_warped[binary_warped.shape[0]//2:, :], axis=0)
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0]//2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# Set height of windows - based on nwindows above and image shape
window_height = np.int(binary_warped.shape[0]//nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated later for each window in nwindows
leftx_current = leftx_base
rightx_current = rightx_base
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = binary_warped.shape[0] - (window+1)*window_height
win_y_high = binary_warped.shape[0] - window*window_height
# Find the four below boundaries of the window ###
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
# Identify the nonzero pixels in x and y within the window ###
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window ###
if minpix < len(good_left_inds):
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if minpix < len(good_right_inds):
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices (previously was a list of lists of pixels)
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds =
|
np.concatenate(right_lane_inds)
|
numpy.concatenate
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Search for pulsars."""
import warnings
import os
import argparse
import copy
import numpy as np
from astropy import log
from astropy.table import Table
from astropy.logger import AstropyUserWarning
from .io import get_file_type
from stingray.pulse.search import (
epoch_folding_search,
z_n_search,
search_best_peaks,
)
from stingray.gti import time_intervals_from_gtis
from stingray.utils import assign_value_if_none
from stingray.pulse.modeling import fit_sinc, fit_gaussian
from stingray.stats import pf_upper_limit
from .io import load_events, EFPeriodogram, save_folding, HEN_FILE_EXTENSION
from .base import hen_root, show_progress, adjust_dt_for_power_of_two
from .base import deorbit_events, njit, prange, vectorize, float64
from .base import histogram2d, histogram, memmapped_arange
from .base import z2_n_detection_level, fold_detection_level
from .fold import filter_energy
from .ffa import _z_n_fast_cached, ffa_search, h_test
from .fake import scramble
try:
import matplotlib.pyplot as plt
HAS_MPL = True
except ImportError:
HAS_MPL = False
try:
import imageio
HAS_IMAGEIO = True
except ImportError:
HAS_IMAGEIO = False
D_OMEGA_FACTOR = 2 * np.sqrt(3)
TWOPI = 2 * np.pi
__all__ = [
"check_phase_error_after_casting_to_double",
"decide_binary_parameters",
"folding_orbital_search",
"fit",
"calculate_shifts",
"mod",
"shift_and_sum",
"z_n_fast",
"transient_search",
"plot_transient_search",
"search_with_qffa_step",
"search_with_qffa",
"search_with_ffa",
"folding_search",
"dyn_folding_search",
"main_efsearch",
"main_zsearch",
"z2_vs_pf",
"main_z2vspf",
"main_accelsearch",
"h_test",
]
def _save_df_to_csv(df, csv_file, reset=False):
if not os.path.exists(csv_file) or reset:
mode = "w"
header = True
else:
mode = "a"
header = False
df.to_csv(csv_file, header=header, index=False, mode=mode)
def check_phase_error_after_casting_to_double(tref, f, fdot=0):
"""Check the maximum error expected in the phase when casting to double."""
times = np.array(np.random.normal(tref, 0.1, 1000), dtype=np.longdouble)
times_dbl = times.astype(np.double)
phase = times * f + 0.5 * times ** 2 * fdot
phase_dbl = times_dbl * np.double(f) + 0.5 * times_dbl ** 2 * np.double(
fdot
)
return np.max(np.abs(phase_dbl - phase))
def decide_binary_parameters(
length,
freq_range,
porb_range,
asini_range,
fdot_range=[0, 0],
NMAX=10,
csv_file="db.csv",
reset=False,
):
import pandas as pd
count = 0
omega_range = [1 / porb_range[1], 1 / porb_range[0]]
columns = [
"freq",
"fdot",
"X",
"Porb",
"done",
"max_stat",
"min_stat",
"best_T0",
]
df = 1 / length
log.info(
"Recommended frequency steps: {}".format(
int(np.diff(freq_range)[0] // df + 1)
)
)
while count < NMAX:
# In any case, only the first loop deletes the file
if count > 0:
reset = False
block_of_data = []
freq = np.random.uniform(freq_range[0], freq_range[1])
fdot = np.random.uniform(fdot_range[0], fdot_range[1])
dX = 1 / (TWOPI * freq)
nX = int(np.diff(asini_range) // dX) + 1
Xs =
|
np.random.uniform(asini_range[0], asini_range[1], nX)
|
numpy.random.uniform
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import socket
import struct
import traceback # for print_stack, for debugging purposes: traceback.print_stack()
import time
import sys
import numpy as np
import logging
#import matplotlib.pyplot as plt
class CommsError(Exception):
pass
class CommsLoggeableError(CommsError):
pass
class socket_placeholder():
def __init__(self):
pass
def sendall(*args):
print("socket_placeholder::sendall(): No active socket. Was called from {}".format(sys._getframe().f_back.f_code.co_name))
traceback.print_stack()
pass
def recv(*args):
print("socket_placeholder::recv(): No active socket")
return []
class RP_PLL_device():
MAGIC_BYTES_READ_DDR = 0xABCD1221
MAGIC_BYTES_WRITE_DDR = 0xABCD1222
MAGIC_BYTES_WRITE_FILE_DDR = 0xABCD1223
MAGIC_BYTES_WRITE_REG = 0xABCD1233
MAGIC_BYTES_READ_REG = 0xABCD1234
MAGIC_BYTES_READ_BUFFER = 0xABCD1235
MAGIC_BYTES_WRITE_FILE = 0xABCD1237
MAGIC_BYTES_SHELL_COMMAND = 0xABCD1238
MAGIC_BYTES_REBOOT_MONITOR = 0xABCD1239
FPGA_BASE_ADDR = 0x40000000 # address of the main PS <-> PL memory map (GP 0 AXI master on PS)
FPGA_BASE_ADDR_XADC = 0x80000000 # address of the XADC PS <-> PL memory map (GP 1 AXI master on PS)
MAX_SAMPLES_READ_BUFFER = 2**15 # should be equal to 2**ADDRESS_WIDTH from ram_data_logger.vhd
def __init__(self, controller=None):
self.logger = logging.getLogger(__name__)
self.logger_name = ':RP_PLL'
self.sock = socket_placeholder()
self.controller = controller
self.valid_socket = False
self.type_to_format_string = {False: '=III',
True: '=IIi'}
def socketErrorEvent(self, e):
# disconnect from socket, and start reconnection timer:
print("RP_PLL::socketErrorEvent()")
if self.controller is not None:
self.controller.socketErrorEvent(e)
else:
# we are running in stand-alone mode
self.CloseTCPConnection()
raise CommsLoggeableError(e)
def CloseTCPConnection(self):
print("RP_PLL_device::CloseTCPConnection()")
self.sock = None # socket_placeholder()
self.valid_socket = False
def OpenTCPConnection(self, HOST, PORT=5000, valid_socket_for_general_comms=True):
print("RP_PLL_device::OpenTCPConnection(): HOST = '%s', PORT = %d" % (HOST, PORT))
self.HOST = HOST
self.PORT = PORT
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) # this avoids a ~33 ms on Windows before our request packets are sent (!!)
# self.sock.setblocking(1)
self.sock.settimeout(2)
try:
self.sock.connect((self.HOST, self.PORT))
self.valid_socket = valid_socket_for_general_comms
except Exception as e:
logging.error(traceback.format_exc())
self.valid_socket = False
# from http://stupidpythonideas.blogspot.ca/2013/05/sockets-are-byte-streams-not-message.html
def recvall(self, count):
buf = b''
while count:
newbuf = self.sock.recv(count)
if not newbuf: return None
buf += newbuf
count -= len(newbuf)
return buf
# Function used to send a file write command:
def write_file_on_remote(self, strFilenameLocal, strFilenameRemote):
# open local file and load into memory:
file_data =
|
np.fromfile(strFilenameLocal, dtype=np.uint8)
|
numpy.fromfile
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Unzipping Simulation, simulate the unzipping of DNA double strands
# Copyright 2018-2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TODO: Distributed calculation over the network, see
# https://eli.thegreenplace.net/2012/01/24/distributed-computing-in-python-with-multiprocessing/
"""
# Structure of a simulation
# The unzipping construct parameters
bases = simulation['settings']['bases']
nbs = simulation['settings']['nbs']
nbp = simulation['settings']['nbp']
nbs_loop = simulation['settings']['nbs_loop']
S = simulation['settings']['S']
L_p_ssDNA = simulation['settings']['L_p_ssDNA']
z = simulation['settings']['z']
pitch = simulation['settings']['pitch']
L_p_dsDNA = simulation['settings']['L_p_dsDNA']
# Other experimental parameters
radius = simulation['settings']['radius']
kappa = simulation['settings']['kappa']
c = simulation['settings']['c']
T = simulation['settings']['T']
# Parameters of the simulation
NNBP = simulation['settings']['NNBP']
x0_min = simulation['settings']['x0_min']
x0_max = simulation['settings']['x0_max']
h0 = simulation['settings']['h0']
y0 = simulation['settings']['y0']
resolution = simulation['settings']['resolution']
boltzmann_factor = simulation['settings']['boltzmann_factor']
# Variables of simulated data
XFE, XFE0 = simulation['XFE'], simulation['XFE0']
# extension, number of unzipped basepairs, force extension of the construct
X = XFE['EX_avg']
# 3D position of the stage
X0 = XFE['X0']
# average number of unzipped basepairs
NUZ0_avg = XFE['NUZ0_avg']
# most probable number of unzipped basepairs
NUZ0_max = XFE['NUZ0_max_W0']
# Average force acting on the construct
F0_avg = XFE['F0_avg']
# Most probable force acting on the construct
F0_max = XFE['F0_max_W0']
The unzipping simulation package was written with several calculation speed
improvements. An old version of the unzipping simulation was carefully tested
with different settings and algorithms, to find the best strategy of getting
the simulation done as fast as possible. The results are documented here to
give an insight into the different approaches, i.e. parallelization,
boltzmann_factor, binary search of estimated number of unzipped basepairs,
iteratively use simulated number of unzipped basepairs for one stage
displacement of a previous calculation as the estimated number of unzipped
basepairs for the subsequent calculation, precalculation and bufering of
WLC/FJC extension/force/energy models:
# timeit results:
#x0 = 800e-9
#xfe0 = xfe0_nuz(A0, bases=bases, nbs=nbs, nbp=nbp, nbs_loop=nbs_loop,
# radius=radius, kappa=kappa,
# S=S, L_p_ssDNA=L_p_ssDNA, z=z,
# pitch=pitch, L_p_dsDNA=L_p_dsDNA,
# NNBP=NNBP, c=c, T=T,
# boltzmann_factor=1e-5)
# Time needed with current settings:
# RS: 140 s
# R0: 884 s
# RI: 180 s
# RP: 12368 s
# Calculation of E_ext_ssDNA buffer on multiple cores
# 121.9 s
# Calculation times for force extension curve with differing boltzmann_factor on mulitple cpu cores
# boltzmann_factor
# 1e-2 35 s, STD = 55.043 fN (compared to gold standard), deviation seen of up to ~ 500 fN and partly distorted force/extension curve
# 1e-3 40 s, STD = 4.525 fN (compared to gold standard), deviation seen of up to 30 fN
# 1e-4 45 s, STD = 2.519 fN (compared to gold standard), deviation seen of only up to sub fN
# 1e-5 50 s, STD = 2.513 fN (compared to gold standard)
# 1e-6 54 s, STD = 1.363 fN (compared to gold standard)
# 1e-7 57 s, STD = 64.751 aN (compared to gold standard)
# 1e-8 62 s, STD = 3.281 aN (compared to gold standard)
# 1e-9 64 s, STD = 0.170 aN (compared to gold standard)
# 0e-9 806 s, gold standard
# Calculation times for force extension curve with boltzmann_factor = 1e-5 and iterative on one cpu
# F_ssDNA buffer calculation would need roughly the same amount of time as E_ext_ssDNA calculation
# F_ssDNA would be only called once for every call of E_ext_ssDNA -> not reasonable to do buffer F_ssDNA
# nuz_est iterative
# E_ext_ssDNA buffered
# ext_dsDNA_wlc buffered
# F_ssDNA buffered
# without_b buffer_calc with_b total_one_run
# + + + - elapsed time: 138.7 s -> 34.675 s + 121.9 s -> 156.575 s 371 % -> only feasable, if calculated ~ 12 x
# + - + - elapsed time: 168.6 s -> 42.15 s 100 % -> only feasable, if 10 s per calculation important
# + + - - elapsed time: 2853.1 s -> 713.275 s + 121.9 s -> 835.175 s 1981 %
# + - - - elapsed time: 2872.2 s -> 718.05 s 1704 %
# - + + - elapsed time: 173.3 s -> 43.325 s + 121.9 s -> 165.225 s 392 % -> only feasable, if calculated ~ 12 x
# - - + - elapsed time: 215.1 s -> 53.775 s 128 % -> most feasable settings
# - + - - elapsed time: 3XXX.X s -> not measured, only estimated
# - - - - elapsed time: 3641.0 s -> 910.25 s 2160 %
"""
import cloudpickle
import hashlib
import math
import mpmath
import numpy as np
import os
import pickle
import time
import warnings
from multiprocessing import Pool
from scipy import constants
from scipy.integrate import quad
from scipy.optimize import fminbound, minimize_scalar
# from scipy.optimize import fmin_l_bfgs_b, minimize, brent
# minimize_scalar alone causes imprecise results
from scipy.optimize._minimize import _minimize_lbfgsb
from matplotlib import pyplot as plt
# import cn_plot_style as cnps
# Boltzmann constant
kB = constants.value('Boltzmann constant')
cal = constants.calorie
Na = constants.Avogadro
kcal = 1e+3*cal
# Set digits of precision
mpmath.mp.dps = 30
_ssDNA_DEFAULT = {
'S': 800e-12,
'L_p': 7.97e-10,
'z': 0.537e-9
}
_dsDNA_DEFAULT = {
'pitch': 0.338e-9,
'L_p': 50e-9
}
_E_pair = {
# Huguet et. al. 2010, table 1, 1M NaCl
# Energies from Huguet 2010 are given for 298 K
# one Purine and one Pyrimidine or
# two succesive Purines/Pyrimidines with same bases
'AA': 1.23*kcal/Na,
'TT': 1.23*kcal/Na, #
'AC': 1.49*kcal/Na,
'TG': 1.49*kcal/Na, #
'AG': 1.36*kcal/Na,
'TC': 1.36*kcal/Na, #
'CA': 1.66*kcal/Na,
'GT': 1.66*kcal/Na, #
'CC': 1.93*kcal/Na,
'GG': 1.93*kcal/Na, #
'GA': 1.47*kcal/Na,
'CT': 1.47*kcal/Na, #
# two succesive Purines/Pyrimidines with different bases
'AT': 1.17*kcal/Na,
'CG': 2.37*kcal/Na,
'GC': 2.36*kcal/Na,
'TA': 0.84*kcal/Na,
# TODO: include proper energy term for the first and last bp
# Ander PhD thesis 2011
# kB*T = 4.1 pN*nm -> T ~ 298 K
'A': 1.2*kB*298,
'T': 1.2*kB*298,
'G': 3.4*kB*298,
'C': 3.4*kB*298
# energies Bockelmann et. al. 1997
# for S=800pN, L_p_ssDNA=0.75nm, z=0.61nm/bp
# 'AT': 1.3*kB*298
# 'GC': 2.9*kB*298
# for S=800pN, L_p_ssDNA=1.35nm, z=0.56nm/bp
# 'AT': 1.6*kB*298
# 'GC': 3.2*kB*298
}
_M_pair = {
# Huguet et. al. 2010, table 1, NaCl concentration correction factor
# Energies from Huguet et. al. 2010 are given for 298 K
'AA': 0.145*kcal/Na,
'TT': 0.145*kcal/Na, #
'AC': 0.099*kcal/Na,
'TG': 0.099*kcal/Na, #
'AG': 0.070*kcal/Na,
'TC': 0.070*kcal/Na, #
'CA': 0.091*kcal/Na,
'GT': 0.091*kcal/Na, #
'CC': 0.063*kcal/Na,
'GG': 0.063*kcal/Na, #
'GA': 0.155*kcal/Na,
'CT': 0.155*kcal/Na, #
'AT': 0.117*kcal/Na,
'CG': 0.132*kcal/Na,
'GC': 0.079*kcal/Na,
'TA': 0.091*kcal/Na,
}
_DH_pair = {
# Huguet et. al. 2010, table 2, enthalpy (kcal/mol)
'AA': 7.28*kcal/Na,
'TT': 7.28*kcal/Na, #
'AC': 5.80*kcal/Na,
'TG': 5.80*kcal/Na, #
'AG': 5.21*kcal/Na,
'TC': 5.21*kcal/Na, #
'CA': 8.96*kcal/Na,
'GT': 8.96*kcal/Na, #
'CC': 8.57*kcal/Na,
'GG': 8.57*kcal/Na, #
'GA': 8.16*kcal/Na,
'CT': 8.16*kcal/Na, #
'AT': 4.63*kcal/Na,
'CG': 9.66*kcal/Na,
'GC': 10.10*kcal/Na,
'TA': 8.31*kcal/Na
}
_DS_pair = {
# Huguet et. al. 2010, table 2, entropy (cal/mol)
'AA': 20.28*cal/Na,
'TT': 20.28*cal/Na, #
'AC': 14.46*cal/Na,
'TG': 14.46*cal/Na, #
'AG': 12.89*cal/Na,
'TC': 12.89*cal/Na, #
'CA': 24.48*cal/Na,
'GT': 24.48*cal/Na, #
'CC': 22.30*cal/Na,
'GG': 22.30*cal/Na, #
'GA': 22.46*cal/Na,
'CT': 22.46*cal/Na, #
'AT': 11.62*cal/Na,
'CG': 24.43*cal/Na,
'GC': 25.96*cal/Na,
'TA': 25.06*cal/Na
}
def get_unzipping_simulation(simulation_settings_file, simulations_dir=None,
simulation_file=None, read=True, save=True,
**kwargs):
"""
simulation_settings_file : str
Filepath to a simulation file with the settings for the simulation.
simulations_dir : str
Directory from where to read and to where to save the simulation.
Defaults to '.'.
simulation_file : str
Name of the simulation file to load. If no name is supplied it defaults
to 'hash_key' of the settings of the `simulation_settings_file`, as
returned by the function `get_key()` + the file extension '.p'.
read : bool
Try to read a preexisting simulation_file. If the file does not exist
or `read` is False, do the simulation with the function
`simulate_unzipping()`.
save : bool
Save the simulation result, if the simulation could not be read.
**kwargs
Keyword arguments with settings overwriting the default settings of the
`simulation_settings_file`.
"""
# Get simulation settings and settings encoded as hash key
simulation = simulation_settings(simulation_settings_file, **kwargs)
hash_key = get_key(**simulation['settings'])
# Get path of simulation
simulations_dir = '.' if simulations_dir is None else simulations_dir
simulation_file = simulation_file or ''.join((hash_key, '.p'))
simulation_file = os.path.join(simulations_dir, simulation_file)
# Load or do the simulation
if read and os.path.isfile(simulation_file):
with open(simulation_file, 'rb') as f:
simulation = pickle.load(f)
return simulation
else:
# Do the simulation
simulation = simulate_unzipping(simulation)
# Save the simulation
if save:
directory = os.path.dirname(simulation_file)
os.makedirs(directory, exist_ok=True)
with open(simulation_file, 'wb') as f:
pickle.dump(simulation, f)
return simulation
def simulation_settings(simulation_file, **kwargs):
# Get simulation settings
with open(simulation_file, 'rb') as f:
simulation = pickle.load(f)
simulation['settings'].update(kwargs)
return {'settings': simulation['settings']}
def get_key(x0_min, x0_max, y0, h0, resolution,
bases, nbs, nbp, nbs_loop,
radius, kappa,
S, L_p_ssDNA, z,
pitch, L_p_dsDNA,
NNBP, c, e_loop, T, boltzmann_factor,
**kwargs):
hasher = hashlib.md5()
for c in [x0_min, x0_max, y0, h0, resolution,
bases.capitalize(), nbs, nbp, nbs_loop,
radius,
S, L_p_ssDNA, z,
pitch, L_p_dsDNA,
NNBP, c, e_loop, T, boltzmann_factor]:
hasher.update(bytes(str(c), 'ASCII'))
hasher.update(kappa)
key = hasher.hexdigest()
return key
def simulate_unzipping(simulation_settings, processes=8):
simulation = simulation_settings
# Set the unzipping construct parameters
bases = simulation['settings']['bases']
nbs = simulation['settings']['nbs']
nbp = simulation['settings']['nbp']
nbs_loop = simulation['settings']['nbs_loop']
S = simulation['settings']['S']
L_p_ssDNA = simulation['settings']['L_p_ssDNA']
z = simulation['settings']['z']
pitch = simulation['settings']['pitch']
L_p_dsDNA = simulation['settings']['L_p_dsDNA']
# Set other experimental parameters
radius = simulation['settings']['radius']
kappa = simulation['settings']['kappa']
c = simulation['settings']['c']
e_loop = simulation['settings']['e_loop']
T = simulation['settings']['T']
# Set parameters for the simulation
NNBP = simulation['settings']['NNBP']
x0_min = simulation['settings']['x0_min']
x0_max = simulation['settings']['x0_max']
h0 = simulation['settings']['h0']
y0 = simulation['settings']['y0']
resolution = simulation['settings']['resolution']
boltzmann_factor = simulation['settings']['boltzmann_factor']
XFE, XFE0 = unzipping_force_energy(x0_min, x0_max, y0=y0, h0=h0,
resolution=resolution,
processes=processes,
bases=bases, nbs=nbs, nbp=nbp,
nbs_loop=nbs_loop,
radius=radius, kappa=kappa,
S=S, L_p_ssDNA=L_p_ssDNA, z=z,
pitch=pitch, L_p_dsDNA=L_p_dsDNA,
NNBP=NNBP, c=c, e_loop=e_loop, T=T,
boltzmann_factor=boltzmann_factor,
individual_points=True)
simulation['XFE'] = XFE
simulation['XFE0'] = XFE0
return simulation
def unzipping_force_energy(x0_min, x0_max, y0=0.0, h0=0.0, resolution=1e-9,
processes=8,
bases='', nbs=0, nbp=0, nbs_loop=0,
radius=0.0, kappa=None,
S=None, L_p_ssDNA=None, z=None,
pitch=None, L_p_dsDNA=None,
NNBP=False, c=0, e_loop=0.0, T=298.2,
spacing=5, min_stepsize=10,
boltzmann_factor=1e-9,
individual_points=False, verbose=False,
F_ssDNA_mod=None, E_ext_ssDNA_mod=None,
ext_dsDNA_wlc_mod=None):
# Assign DNA model functions to the variables of the global (module) scope,
# such that `multiprocessing.Pool` will see these variables.
global F_ssDNA
global E_ext_ssDNA
global ext_dsDNA_wlc
# Set DNA model functions to the unbuffered default functions
F_ssDNA = F_ssDNA_mod or _F_ssDNA
E_ext_ssDNA = E_ext_ssDNA_mod or _E_ext_ssDNA
# Initialize the approximations of the ssDNA/dsDNA model functions with
# fixed model function parameters and substitute the original DNA model
# functions
# F_ssDNA is implicitly buffered with `ext_dsDNA_wlc`.
# Buffered `E_ext_ssDNA` does not speed up calculation.
# E_ext_ssDNA = \
# init_buf_E_ext_ssDNA(read=False, write=False, filename='E_ext_ssDNA',
# processes=processes,
# bases=bases, nbs=nbs, nbs_loop=nbs_loop,
# S=S, L_p=L_p_ssDNA, z=z, T=T)
ext_dsDNA_wlc = ext_dsDNA_wlc_mod or \
init_buf_ext_dsDNA_wlc(nbp=nbp, pitch=pitch, L_p=L_p_dsDNA, T=T)
resolution = int(np.round((x0_max - x0_min) / resolution + 1))
X0 = np.linspace(x0_min, x0_max, resolution)
# Speed up calculation with the multiprocessing package by taking the
# nuz_est from previous calculation for each subsequent calculation
xfe0_nuz = _xfe0_nuz_chained()
# Define a closure to be executed by the pool
def f(x0):
print('\rCalculating equilibrium for stage displacement x0 = {:.3e}'
'...'.format(x0), end='', flush=True)
A0 = attachment_point(x0, y0=y0, h0=h0, radius=radius)
return xfe0_nuz(A0, bases=bases, nbs=nbs, nbp=nbp, nbs_loop=nbs_loop,
radius=radius, kappa=kappa,
S=S, L_p_ssDNA=L_p_ssDNA, z=z,
pitch=pitch, L_p_dsDNA=L_p_dsDNA,
NNBP=NNBP, c=c, e_loop=e_loop, T=T,
spacing=spacing, min_stepsize=min_stepsize,
boltzmann_factor=boltzmann_factor)
f = unboundfunction(f)
# Process function in pool with 8 parallelly executed processes
with Pool(processes=processes) as pool:
start = time.time()
XFE0 = pool.map(f, X0)
stop = time.time()
print('\nDone, elapsed time: {:.1f} s'.format(stop - start))
# combine all individually simulated points into one array
XFE = {
'X0': np.array([xfe0['settings']['A0'][0] for xfe0 in XFE0]),
'EX_avg': np.array([xfe0['EX_avg'] for xfe0 in XFE0]),
'NUZ0_avg': np.array([xfe0['NUZ0_avg'] for xfe0 in XFE0]),
'NUZ0_max_W0': np.array([xfe0['NUZ0_max_W0'] for xfe0 in XFE0]),
'D0_avg': np.array([xfe0['D0_avg'] for xfe0 in XFE0]),
'F0_avg': np.array([xfe0['F0_avg'] for xfe0 in XFE0]),
'F0_max_W0': np.array([xfe0['F0_max_W0'] for xfe0 in XFE0]),
'settings': {
'x0_min': x0_min,
'x0_max': x0_max,
'y0': y0,
'h0': h0,
'resolution': resolution,
'bases': bases,
'nbs': nbs,
'nbp': nbp,
'nbs_loop': nbs_loop,
'radius': radius,
'kappa': kappa,
'S': S,
'L_p_ssDNA': L_p_ssDNA,
'z': z,
'pitch': pitch,
'L_p_dsDNA': L_p_dsDNA,
'NNBP': NNBP,
'c': c,
'e_loop': e_loop,
'T': T,
'spacing': spacing,
'min_stepsize': min_stepsize,
'boltzmann_factor': boltzmann_factor
}
}
if individual_points:
return XFE, XFE0
return XFE
def attachment_point(x0, y0=0.0, h0=0.0, radius=0.0):
"""
x0 : float
Position of the stage x (m) relative to the trap center.
y0 : float
Position of the stage y (m) relative to the trap center.
h0 : float
Distance (m) of the bead surface to the glass surface, if
the bead is in its resting position, i.e. no force in the
vertical direction is applied.
radius : float
Radius of the bead (m).
"""
return np.array([x0, y0, - (h0 + radius)])
def xfe0_nuz(A0, bases='', nuz_est=-1, nbs=0, nbp=0, nbs_loop=0,
radius=0.0, kappa=None,
S=None, L_p_ssDNA=None, z=None,
pitch=None, L_p_dsDNA=None,
NNBP=False, c=0, e_loop=0.0, T=298.2,
spacing=5, min_stepsize=10,
boltzmann_factor=1e-9, verbose=False):
"""
Calculate the equilibrium extensions, forces and energies for a given stage
displacement `x0` for most probable numbers of unzipped basepairs and find
the number of unzipped bases, at which the unzipping fork will most likely
fluctuate. Fluctuations of the extensions of the DNA and the bead in the
trap are ignored.
Speed up calculation, i.e. only select the number of unzipped bases, at
which the unzipping fork will fluctuate. Calculate only around the most
likely nuz and, if no nuz_est is given, perform binary search to find most
likely nuz, first.
Parameters
----------
A0 : np.ndarray of type float
Position (m) of the DNA attachment point on the glass surface relative
to the trap center: [x, y, z].
bases : str
Sequence of sense strand of dsDNA which is (will be) unzipped
nuz_est : int
Estimate number of unzipped basepairs. 0 <= `nuz_est` <= `nuz_max`.
If `nuz_est` < 0, the number is approximated automatically with a
binary search using the function `approx_eq_nuz`.
nbs : int
Number of extra ssDNA bases in the construct
nbp : int
Number of basepairs of dsDNA spacer
nbs_loop : int
Number of extra ssDNA bases in the hairpin
radius : float
Radius of the bead (m).
kappa : float or np.ndarray of type float
Stiffness for x, [x, z], or [x, y, z] of lever (handle) attached to DNA
in N/m.
boltzmann_factor : float
The minimum probability each number of unzipped basepairs (nuz) state
has to have relative to the most probable one to be considered in the
calculation. The smaller the boltzmann_factor, the more exact the
result is. The larger the boltzmann factor is, the faster the
calculation. The default of 1e-9 corresponds to more than 20 kT
difference (mpmath.exp(-20) > 1e-9).
"""
# Maximum number of unzippabel bps
nuz_max = len(bases)
# If hairpin exists, add one possible unzipping event representative for
# opening the hairpin
if nbs_loop > 0:
nuz_max += 1
# Either select number of unzipped basepairs with low energy state only, or
# select all possible number of unzipped basepairs.
if boltzmann_factor <= 0:
# All nuz will be calculated, start in the middle
nuz_est = int(round(nuz_max / 2))
elif nuz_est < 0:
# Autodetermine the approximate nuz which will have the lowest energy
# state, i.e. will be in equilibrium between open and closed state.
nuz_est = approx_eq_nuz(A0, bases=bases, nbs=nbs, nbp=nbp,
radius=radius, kappa=kappa,
S=S, L_p_ssDNA=L_p_ssDNA, z=z,
pitch=pitch, L_p_dsDNA=L_p_dsDNA,
NNBP=NNBP, c=c, T=T,
spacing=spacing, min_stepsize=min_stepsize,
verbose=verbose)
else:
# Set nuz_est to valid value
nuz_est = max(0, nuz_est)
nuz_est = min(nuz_est, nuz_max)
# Find and calculate energy for the most likely nuzes, i.e. nuzes with a
# low energy state. First, define a function to calculate force, extension,
# energy, and weight for a given number of unzipped basepairs and define
# variables to be filled upon calculation.
NUZ0 = []
EX_ss = []
EX_ds = []
D0 = []
F0 = []
E0 = []
W0 = []
def eq_few0(nuz, w0_likely):
ex_ss, ex_ds, d0, f0, e0 = \
equilibrium_xfe0(A0, bases=bases, nuz=nuz, nbs=nbs, nbp=nbp,
nbs_loop=nbs_loop,
radius=radius, kappa=kappa,
S=S, L_p_ssDNA=L_p_ssDNA, z=z,
pitch=pitch, L_p_dsDNA=L_p_dsDNA,
NNBP=NNBP, c=c, e_loop=e_loop, T=T,
verbose=verbose)
NUZ0.append(nuz)
EX_ss.append(ex_ss)
EX_ds.append(ex_ds)
D0.append(d0)
F0.append(f0)
E0.append(e0)
w0 = mpmath.exp(- e0 / (kB*T))
W0.append(w0)
# Select new minimum energy, if energy is smaller than all previous
# calculated energies, and if energy is not calculated from max nuz
# opened, which could result in a huge drop of the energy, due to the
# loop opening and therefore in too many falsly neglected calculated
# energies, due to boltzmann_factor selection.
if w0 > w0_likely and nuz < nuz_max:
w0_likely = w0
stop = False
# Stop if energy difference > - mpmath.log(boltzmann_factor).
if w0_likely != 0 and w0 / w0_likely < boltzmann_factor:
stop = True
return w0_likely, stop
# Then, Calculate energy for estimated nuz.
w0_likely, _ = eq_few0(nuz_est, 0)
# Iteratively find and calculate energy for the most likely nuzes:
# Calculate energies of neighbouring nuzes (nuz_left / nuz_right) starting
# from the estimated nuz. The most likely nuz is constantly tracked. The
# calculation is stopped, if either the weight of the nuzes do differ more
# than the weight of the most likely nuz times the boltzmann_factor or if
# there are no more nuzes left to calculate energies from.
nuz_left = nuz_est - 1
nuz_right = nuz_est + 1
stop_left = nuz_left < 0
stop_right = nuz_right > nuz_max
while not (stop_left and stop_right):
if not stop_left:
w0_likely, stop_left = eq_few0(nuz_left, w0_likely)
nuz_left -= 1
# stop, if nuz_left is negative
stop_left = stop_left or nuz_left < 0
if not stop_right:
w0_likely, stop_right = eq_few0(nuz_right, w0_likely)
nuz_right += 1
# stop, if nuz_right is larger than number of unzippable basepairs
stop_right = stop_right or nuz_right > nuz_max
# Finally, select all nuzes that are at least equally likely as the most
# likely nuz times the boltzmann_factor.
W0 = np.array(W0)
idx_vld = W0 / w0_likely >= boltzmann_factor
# Sort nuz and other values in ascending order
NUZ0 = np.array(NUZ0)[idx_vld]
idx_srt = np.argsort(NUZ0)
NUZ0 = NUZ0[idx_srt]
EX_ss = np.array(EX_ss)[idx_vld][idx_srt]
EX_ds = np.array(EX_ds)[idx_vld][idx_srt]
D0 = np.array(D0)[idx_vld][idx_srt]
F0 = np.array(F0)[idx_vld][idx_srt]
E0 = np.array(E0)[idx_vld][idx_srt]
W0 = W0[idx_vld][idx_srt]
# Calculate weighted averages of unzipped basepairs, bead displacements,
# force, and extension of the construct
W0_sum = W0.sum()
P0 = W0 / W0_sum
NUZ0_avg = (NUZ0 * W0).sum() / W0_sum
D0_avg = (D0 * W0[np.newaxis].T).sum(axis=0) / W0_sum
F0_avg = (F0 * W0).sum() / W0_sum
EX_avg = ((EX_ss + EX_ds) * W0).sum() / W0_sum
# Select values of most likely state
idx_max = W0.argmax()
NUZ0_max_W0 = NUZ0[idx_max]
F0_max_W0 = F0[idx_max]
W0_max = W0[idx_max]
r = {
'EX_avg': EX_avg,
'NUZ0': NUZ0,
'NUZ0_avg': NUZ0_avg,
'NUZ0_max_W0': NUZ0_max_W0,
'D0': D0,
'D0_avg': D0_avg,
'F0': F0,
'F0_avg': F0_avg,
'F0_max_W0': F0_max_W0,
'E0': E0,
'W0': W0,
'W0_max': W0_max,
'P0': P0,
'settings': {
'A0': A0,
'bases': bases,
'nbs': nbs,
'nbp': nbp,
'nbs_loop': nbs_loop,
'radius': radius,
'kappa': kappa,
'S': S,
'L_p_ssDNA': L_p_ssDNA,
'z': z,
'pitch': pitch,
'L_p_dsDNA': L_p_dsDNA,
'NNBP': NNBP,
'c': c,
'e_loop': e_loop,
'T': T,
'spacing': spacing,
'min_stepsize': min_stepsize,
'boltzmann_factor': boltzmann_factor
}
}
return r
class _xfe0_nuz_chained(object):
"""Speed up calculation of xfe0_nuz by taking the nuz_est from previous
calculation for next calculation
The object of this class is a drop in replacement for the original
`xfe0_nuz` function, if using the the multiprocessing package.
Each process gets its own copy of the a _xfe0_nuz_chained object, which is
initialized with nuz_est = -1. Upon each call nuz_est is set to the
previous outcome of the calculated NUZ0_avg.
"""
def __init__(self):
self.nuz_est = -1
def __call__(self, A0, bases='', nuz_est=-1, nbs=0, nbp=0, nbs_loop=0,
radius=0.0, kappa=None,
S=None, L_p_ssDNA=None, z=None,
pitch=None, L_p_dsDNA=None,
NNBP=False, c=0, e_loop=0.0, T=298.2,
spacing=5, min_stepsize=10,
boltzmann_factor=1e-9, verbose=False):
if nuz_est == -1:
nuz_est = self.nuz_est
r = xfe0_nuz(A0, bases=bases, nuz_est=nuz_est, nbs=nbs, nbp=nbp,
nbs_loop=nbs_loop,
radius=radius, kappa=kappa,
S=S, L_p_ssDNA=L_p_ssDNA, z=z,
pitch=pitch, L_p_dsDNA=L_p_dsDNA,
NNBP=NNBP, c=c, e_loop=e_loop, T=T,
spacing=spacing, min_stepsize=min_stepsize,
boltzmann_factor=boltzmann_factor, verbose=verbose)
self.nuz_est = int(round(r['NUZ0_avg']))
return r
def approx_eq_nuz(A0, bases='', nbs=0, nbp=0,
radius=0.0, kappa=None,
S=None, L_p_ssDNA=None, z=None,
pitch=None, L_p_dsDNA=None,
NNBP=False, c=None, T=298.2,
spacing=5, min_stepsize=10, verbose=False):
"""
Find the approximate number of unzipped basepairs the unzipping construct
automatically adjust itself when in equilibrium.
The search is performed in a binary mode, i.e. the number of calculations
to find the number of unzipped basepairs is of class O(log(n)), where n is
the number of basepairs in the unzipping seagment.
"""
# maximal number of unzipped basepairs
nuz_max = len(bases)
# verify sppacing and set limits for nuz
spacing = min(spacing, nuz_max)
minimum = 0
maximum = nuz_max - spacing
# initialize step size and starting nuz
step = int(round((maximum - minimum) / 2))
nuz = int(round((maximum - minimum) / 2))
def unzip_for_eq(nuz=0):
"""
Calculate the gradient of the energy.
Return True, if unzipping construct has to be further unzipped, to
reach equilibrium. Return False, if unziping construct has to be
further annealed, to reach equilibrium. Ignore the opening of the
endloop (nbs_loop=0, e_loop=0.0) for finding the minimum of the total
energy, to avoid falsly high numbers of unzipped basepairs, due to
energy jump upon opening of the end loop.
"""
nuzl = nuz
nuzr = nuz + spacing
_, _, _, f0l, e0l = \
equilibrium_xfe0(A0, bases=bases, nuz=nuzl, nbs=nbs, nbp=nbp,
nbs_loop=0, radius=radius, kappa=kappa,
S=S, L_p_ssDNA=L_p_ssDNA, z=z,
pitch=pitch, L_p_dsDNA=L_p_dsDNA,
NNBP=NNBP, c=c, e_loop=0.0, T=T,
verbose=verbose)
_, _, _, f0r, e0r = \
equilibrium_xfe0(A0, bases=bases, nuz=nuzr, nbs=nbs, nbp=nbp,
nbs_loop=0, radius=radius, kappa=kappa,
S=S, L_p_ssDNA=L_p_ssDNA, z=z,
pitch=pitch, L_p_dsDNA=L_p_dsDNA,
NNBP=NNBP, c=c, e_loop=0.0, T=T,
verbose=verbose)
return e0l > e0r
# Search for the approximate number of unzipped basepairs, to be in
# equilibrium
i = 0
while step > min_stepsize:
i += 1
if unzip_for_eq(nuz=nuz):
if verbose:
print('nuz + step -> new: {} + {}'.format(nuz, step),
end=' -> ')
nuz += step
else:
if verbose:
print('nuz - step -> new: {} - {}'.format(nuz, step),
end=' -> ')
nuz -= step
if verbose:
print(nuz)
if nuz < minimum or nuz > maximum:
# unzipping construct has to be either fully closed or fully opened
# to be in equilibrium -> stop the loop and return either 0 or
# nuz_max
step = 0
nuz = max(0, nuz)
nuz = min(nuz, nuz_max)
# half the stepsize
step = int(round(step / 2))
if verbose:
print('Number of iterations to find approximation of eq nuz: {}'
''.format(i))
return nuz
def equilibrium_xfe0(A0, bases='', nuz=0, nbs=0, nbp=0, nbs_loop=0,
radius=0.0, kappa=None,
S=None, L_p_ssDNA=None, z=None,
pitch=None, L_p_dsDNA=None,
NNBP=False, c=None, e_loop=0.0, T=298.2,
verbose=False):
"""
Calculate the equilibrium extension, force, and energy for a given stage
displacement `x0` and a fixed set of the following parameters.
Parameters
----------
A0 : np.ndarray of type float
Position (m) of the DNA attachment point on the glass surface relative
to the trap center: [x, y, z].
nuz : int
Number of unzipped basepairs
nbs : int
Number of extra ssDNA bases in the construct
nbp : int
Number of basepairs of dsDNA spacer
nbs_loop : int
Number of extra ssDNA bases in the hairpin
radius : float
Radius of the bead (m).
kappa : np.ndarray of type float
Stiffness for [x, y, z] of lever (handle) attached to DNA in N/m.
"""
# One unzipped basepair leads to 2 free ssDNA bases
nbs = 2*nuz + nbs
# If unzipping fork has reached the last basepair and the end loop of the
# unzipping construct should be unzipped, elongate the ssDNA by nbs_loop
# bases
if nbs_loop > 0 and nuz >= len(bases) + 1:
nbs += nbs_loop
# Calculate most probable force for
# number of unzipped bases nbs and
# number of basepairs nbp and
# stage displacement x0 = A0[0]
f0, d0 = F_0(A0, nbs=nbs, S=S, L_p_ssDNA=L_p_ssDNA, z=z, T=T,
nbp=nbp, pitch=pitch, L_p_dsDNA=L_p_dsDNA,
radius=radius, kappa=kappa,
verbose=verbose)
# Calculate most probable extension for most probable force for
# both of the two ssDNA strands and
# one dsDNA strand for
# number of unzipped base pairs nuz
ex_ss = ext_ssDNA(f0, nbs=nbs, S=S, L_p=L_p_ssDNA, z=z, T=T)
ex_ds = ext_dsDNA_wlc(f0, nbp=nbp, pitch=pitch, L_p=L_p_dsDNA, T=T)
e0 = E_tot(bases=bases, nuz=nuz, nbs=nbs, ex_ss=ex_ss,
nbp=nbp, ex_ds=ex_ds,
displacement=d0, kappa=kappa,
S=S, L_p_ssDNA=L_p_ssDNA, z=z,
pitch=pitch, L_p_dsDNA=L_p_dsDNA,
NNBP=NNBP, c=c, e_loop=e_loop, T=T, verbose=verbose)
if verbose:
template = "nuz: {:03d}, f0: {:.3e}, e0: {:.3e}"
print(template.format(nuz, f0, e0))
return ex_ss, ex_ds, d0, f0, e0
def F_0(A0, nbs=0, S=None, L_p_ssDNA=None, z=None, T=298.2,
nbp=0, pitch=None, L_p_dsDNA=None,
f_min=0e-12, f_max=200e-12, xtol=1e-18,
radius=0.0, kappa=None,
verbose=False):
"""
Find most probable force for a given sample displacement `A0`, a number of
separated basepairs `nuz`, and number of basepairs `nbp` of dsDNA spacers.
Extended 3D version of the equation (10) in Bockelmann 1998.
Parameters
----------
A0 : np.ndarray of type float
Position (m) of the DNA attachment point on the glass surface relative
to the trap center: [x, y, z].
nbs : int
Number of bases of ssDNA strand
nbp : int
Number of basepairs of dsDNA spacers
radius : float
Radius of the bead (m).
kappa : float or np.ndarray of type float
Stiffness for x, [x, z], or [x, y, z] of lever (handle) attached to DNA
in N/m.
"""
# Find the equilibrium force f at given nuz and A0
def f_lev_cost(f, return_d=False):
ex_ss = ext_ssDNA(f, nbs=nbs, S=S, L_p=L_p_ssDNA, z=z, T=T)
ex_ds = ext_dsDNA_wlc(f, nbp=nbp, pitch=pitch, L_p=L_p_dsDNA, T=T)
if A0[1] == 0.0 and len(kappa) == 2:
# Simulate in 2D only (3x as fast as 3D)
x0 = A0[0]
z0 = - A0[2] - radius
f_construct, d = F_construct_2D(x0, z0=z0,
ex_ss=ex_ss, ex_ds=ex_ds,
radius=radius, kappa=kappa)
else:
# Simulate in 3D
f_construct, d = F_construct_3D(A0, ex_ss=ex_ss, ex_ds=ex_ds, f_dna=f,
radius=radius, kappa=kappa,
verbose=verbose)
if return_d:
return d
return (f - f_construct)**2
# Find the force, which will result in input extension
f0 = fminbound(f_lev_cost,
f_min,
f_max,
xtol=xtol)
# calculate bead displacements in the trap for found force
d = f_lev_cost(f0, return_d=True)
return f0, d
def ext_ssDNA(F, nbs=0, S=None, L_p=None, z=None, T=298.2, avoid_neg_ext=True):
"""
Freely jointed chain (FJC) model, relating the total polymer length
ext_ssDNA to an applied force F.
<NAME>., <NAME>., <NAME>. (1998). "DNA strand
separation studied by single molecule force measurements". Physical Review
E, 58(2), 2386-94.
<NAME>, <NAME>, <NAME> (1996). "Overstretching
B-DNA: The Elastic Response of Individual Double-Stranded and
Single-Stranded DNA Molecules". Science Reports, 271, 795-799
Contour length of ssDNA: L_0 = nbs*z
Kuhn length b (in FJC b = 2 * persistence length), in paper: b = 1.5 nm
Parameters
----------
nbs : int
Number of bases of ssDNA
F : float
Force in N
S : float
Stretch modulus in N. Defaults to 800e-12 m.
L_p : float
Persistence length in m. Defaults to 0.797e-9 m.
z : float
Length of a single base in m. Defaults to 0.537e-9 m.
T : float
Temperature in K.
avoid_neg_ext : bool
Avoid negative extension due to rounding errors close to zero.
Returns
-------
float
Extension in m
"""
S = S or _ssDNA_DEFAULT['S']
L_p = L_p or _ssDNA_DEFAULT['L_p']
z = z or _ssDNA_DEFAULT['z']
if F == 0:
return 0
if nbs <= 0:
return 0
sign = 1
if F < 0:
F = -F
sign = -1
# Prevent float overflow in downstream calculation leading to
# too high x value. The exact value seems to depend on the system.
# On a laptop with Intel(R) Core(TM) i7-7500U CPU @ 2.70GHz a
# value of F < 1.28323360182078e-26 was sufficient. However, on
# another Intel system it was not. Therefore, to be one the save
# side, choose a value as a break criterion of F < 1e-17 N
# (i.e. 10 aN), which is more than sufficiently small to not
# affect a precise determination of x and still should work on
# most systems.
# Alternatively, one could use mpmath.coth, but would have to live
# with a 10 fold increase in execution time.
if F < 1e-17:
return 0
b = 2*L_p
# modified FJC model incorporating Kuhn segments that can stretch
# entropic (coth term) and stretching (F/S term) contribution
x = nbs*z * (coth(F*b / (kB*T)) - kB*T / (F*b)) * (1 + F/S)
if avoid_neg_ext:
x = max(x, 0)
return sign * x
def coth(x):
"""
Cotangens hyperbolicus
"""
typ = type(x)
if typ is mpmath.mpf or typ is mpmath.mpc:
return mpmath.coth(x)
else:
# return np.cosh(x) / np.sinh(x) # can cause overflows
return (1 + np.exp(-2*x)) / (1 - np.exp(-2*x))
def _ext_dsDNA_wlc(F, nbp=0, pitch=None, L_p=None, T=298.2, x_min=0e-12,
xtol=1e-15):
# See also default value in function in `F_dsDNA_wlc`
pitch = pitch or _dsDNA_DEFAULT['pitch']
if F == 0:
return 0
if nbp <= 0:
return 0
sign = 1
if F < 0:
F = -F
sign = -1
# WLC only valid in the interval x = (-L_0, L_0)
# - 1e-25 # corresponds to force of inf # >> 2e+9 N
x_max = nbp * pitch
# Numerical invert ext_dsDNA function
def ext_dsDNA_cost(x):
return (F_dsDNA_wlc(x, nbp=nbp, pitch=pitch, L_p=L_p, T=T) - F)**2
# Find the force, which will result in input extension
# To speed up the minimization, first find an unprecise
# answer with a quick algorithm and than make it precise
# x_fit = minimize_scalar(ext_dsDNA_cost,
# bounds=(x_min, x_max),
# options={'xtol': xtol}).x
# x_min = x_fit - 1e-10
# x_max = x_fit + 1e-10
x_fit = fminbound(ext_dsDNA_cost,
x_min,
x_max,
xtol=xtol)
return sign * x_fit
def _F_ssDNA(x, nbs=0, S=None, L_p=None, z=None, T=298.2, f_min=None,
f_max=None, xtol=None, avoid_neg_F=True):
"""
Freely jointed chain (FJC) model, relating the applied force F to a
total polymer length ext_ssDNA.
Contour length of ssDNA: L_SS = j*z
Kuhn length b (in FJC b = 2 * persistence length), in paper: b=1.5e-9
Parameters
----------
nbs : int
Number of bases of ssDNA
x : float
Extension in m
S : float
Stretch modulus in N
L_p : float
Persistence length in m
z : float
Length of a single base in m
T : float
Temperature in K
"""
f_min = f_min or 0e-12
f_max = f_max or 200e-12
xtol = xtol or 1e-18
if x == 0:
return 0
if nbs <= 0:
return 0
sign = 1
if x < 0:
x = -x
sign = -1
# Numerical invert ext_ssDNA function
def f_ssDNA_cost(f):
return (ext_ssDNA(f, nbs=nbs, S=S, L_p=L_p, z=z, T=T,
avoid_neg_ext=False) - x)**2
# Find the force, which will result in input extension
# To speed up the minimization, first find an unprecise
# answer with a quick algorithm and than make it precise
# f_fit = minimize(f_ssDNA_cost,
# x0=12e-9,
# bounds=((f_min, f_max), ),
# tol=xtol).x
# f_fit = brent(f_ssDNA_cost,
# brack=(f_min, f_max),
# tol=xtol)
f_fit = minimize_scalar(f_ssDNA_cost,
bounds=(f_min, f_max),
options={'xtol': xtol}).x
f_min = f_fit - 1e-10
f_max = f_fit + 1e-10
f_fit = fminbound(f_ssDNA_cost,
f_min,
f_max,
xtol=xtol)
if avoid_neg_F:
f_fit = max(f_fit, 0)
return sign * f_fit
def F_dsDNA_wlc(x, nbp=0, pitch=None, L_p=None, T=298.2):
"""
A worm-like chain model.
Parameters
----------
x : float
Extension (m)
pitch : float
Contour length (m). Also denoted as 'L_0'.
L_p : float
Persistence length (m)
T : float
Temperature (K)
Returns
-------
1D numpy.ndarray of type float
Force (N).
"""
pitch = pitch or _dsDNA_DEFAULT['pitch']
L_p = L_p or _dsDNA_DEFAULT['L_p']
if x == 0:
return 0
if nbp <= 0:
return 0
sign = 1
if x < 0:
x = -x
sign = -1
# Contour length
L_0 = nbp*pitch
if x >= L_0:
return float('inf') * sign
# <NAME>.; <NAME>. "Stretching DNA". Macromolecules. 1995. 28:
# 8759–8770. doi:10.1021/ma00130a008
# F = kB * T / L_p * (1 / (4 * (1 - x / L_0)**2) - 1/4 + x / L_0)
# Relative extension
x = x / L_0
# <NAME>. "Improved approximations for some polymer extension
# models". Rehol Acta. 2016. doi:10.1007/s00397-016-0977-9
F = kB * T / L_p * (1 / (4 * (1 - x)**2) - 1/4 + x - 0.8 * x**2.15)
return F * sign
def F_construct_2D(x0, z0=0.0, ex_ss=0.0, ex_ds=0.0, radius=0.0, kappa=0.0,
xtol=1e-18):
"""
Parameters
----------
x0 : float
Total displacement (m)
z0 : float
Distance of the bead surface to the glass surface, if
the bead is in its resting position, i.e. no force in
the vertical direction is applied (m).
ex_ss : float
Extension of ssDNA (m)
ex_ds : float
Extension of dsDNA (m)
radius : float
Radius of the bead/handle (m)
kappa : float or np.ndarray of type float
Spring constant (N/m). If `kappa` is of type float, only one axis
(i.e. X or Y) is considered. If `kappa` is of type np.ndarray, the
first number is X (or Y) axis and the second number is Z.
"""
# Pythagoras:
# a is horizontal distance of attachment point to the center of the bead
# b is vertical distance of the surface to the center of the bead
# c is extension of the construct (ex_ss + ex_ds) plus the bead radius (r)
# dx is the horizontal displacement of the bead (x or y)
# dz is the vertical displacement of the bead (z)
r = radius
z0_r = z0 + r
# a = x0 - dx
# b = z0_r - dz
c = ex_ss + ex_ds + r
# a**2 + b**2 = c**2
# ->
# (x0 - dx)**2 + (z0_r - dz)**2 = c**2
# dz = z0_r - math.sqrt(c**2 - (x0 - dx)**2)
# dx = x0 - math.sqrt(c**2 - (z0_r - dz)**2)
# construct is longer than possible stretching with dx/dz >= 0.
# -> bead will equilibrate in the middle of the trap with zero force
if c**2 >= x0**2 + (z0_r)**2:
fxz = 0.0
dxz = np.array([0.0, 0.0])
return fxz, dxz
# If z0 is 0 or the stiffness of z is 0 bead will always
# touch the surface and dx only depends on x0, ex_ss, ex_ds, and r.
if z0 == 0 or isinstance(kappa, float):
if not isinstance(kappa, float):
kappa = kappa[0]
dx = x0 - math.sqrt(c**2 - r**2)
dz = z0
dxz = np.array([dx, dz])
# force that need to be acting on the construct to
# result in a corresponding horizontal force (in x/y)
fx = dx * kappa # * (x0 - dx) / c
cos_alpha = (x0 - dx) / c
fxz = fx / cos_alpha
return fxz, dxz
# displacement dz dependent upon dx
def _dz(dx):
# print('z0 {:.1e}, c {:.1e}, x0 {:.1e}, dx {:.1e}'
# ''.format(z0, c, x0, dx))
return z0_r - math.sqrt(c**2 - (x0 - dx)**2)
# displacement dx dependent upon dz
def _dx(dz):
# x0 8.0e-07, c 1.3e-07, z0 2.0e-07, r 0.0e+00, dz 0.0e+00
# print('x0 {:.1e}, c {:.1e}, z0 {:.1e}, r {:.1e}, dz {:.1e}'
# ''.format(x0, c, z0, r, dz))
return x0 - math.sqrt(c**2 - (z0_r - dz)**2)
# difference of the ratio of the force in x/z to the ratio of a/b
# the construct with the handle equilibrates where diff == 0
def diff_tan_fxz_ab(dx):
a = x0 - dx
b = z0_r - _dz(dx)
fx = dx * kappa[0]
fz = _dz(dx) * kappa[1]
diff = b/a - fz/fx
# diff = math.sqrt(c**2 - (x0 - dx)**2)
# / (x0 - dx)
# - (_dz(dx) * kappa[1])
# / (dx * kappa[0])
return diff**2
# if construct is shorter than z0_r, dz has to be at least the difference
dz_min = max(0, z0_r - c)
# dz can be at max as large as z0, then the bead touches the surface
dz_max = z0
# dx has to be at least x0 - c
dx_min = max(0, x0 - c, _dx(dz_max))
dx_max = max(0, _dx(dz_min))
# print('dx_min {:.1e}, dx_max {:.1e}'.format(dx_min, dx_max))
# Calculate the displacement of x (and z), where the angle between the
# force vector of the construct and the force vector of the bead
# displacement is 0° (180°)
# Unfortunately, there is no analytical solution to this ...
dx = fminbound(diff_tan_fxz_ab, dx_min, dx_max, xtol=xtol)
# the force needed to be acting on the construct to result in a
# corresponding force acting on the handle
# the resulting force is the combination of the horizontal force acting on
# the handle and the normal force of the bead touching the surface and/or
# the vertical trapping force acting on the handle
fx = dx * kappa[0]
cos_alpha = (x0 - dx) / c
# print(fx / f(dx) - cos_alpha)
fxz = fx / cos_alpha
# print(dx, dz_min, dz_max, dx_min, dx_max)
# dz = z0_r - math.sqrt(c**2 - (x0 - dx)**2)
# print('dx {:.1e}, dz {:.1e}'
# ''.format(dx, z0_r - math.sqrt(c**2 - (x0 - dx)**2)))
# a = x0 - dx
# b = z0_r - _dz(dx)
# print('x0 {:.3e}, a {:.3e}, b {:.3e}, c {:.3e}'.format(x0, a, b, c))
# #print('dx {:.3e}, dz {:.3e}, fx{:.1e}, fz {:.1e}'
# # ''.format(dx, _dz(dx), dx*kappa[0], _dz(dx)*kappa[1]))
# #print('dzmin {:.1e}, dzmax {:.1e}, dxmin {:.1e}, dxmax {:.1e}, f {:.1e}'
# # ''.format(dz_min, dz_max, dx_min, dx_max, f(dx)))
dxz = np.array([dx, _dz(dx)])
return fxz, dxz
def F_construct_3D(A0, ex_ss=0.0, ex_ds=0.0, f_dna=0.0, radius=0.0, kappa=None,
factr=1e5, gtol=1e-5, eps_angle=1e-8,
verbose=False, deep_verbose=False, print_result=False,
return_plus=False):
"""
Origin of the coordinate system is the center of the trap [0, 0, 0].
The coordinates are given for a right handed cartesian coordinate system.
Parameters
----------
A0 : np.ndarray of type float
Position (m) of the DNA attachment point on the glass surface relative
to the trap center: [x, y, z].
ex_ss : float
Extension of ssDNA (m).
ex_ds : float
Extension of dsDNA (m).
f_dna : float
Force (N) acting on the DNA construct that corresponds to the
extensions `ex_ss` and `ex_ds`.
radius : float
Radius of the bead/handle (m)
kappa : np.ndarray of type float
Stiffness for [x, z] or [x, y, z] of lever (handle) attached to DNA in
N/m.
factr : float, optional
The iteration stops when
``(f^k - f^{k+1})/max{|f^k|,|f^{k+1}|,1} <= factr * eps``,
where ``eps`` is the machine precision, which is automatically
generated by the code. Typical values for `factr` are: 1e12 for
low accuracy; 1e7 for moderate accuracy; 10.0 for extremely
high accuracy. See Notes for relationship to `ftol`, which is exposed
(instead of `factr`) by the `scipy.optimize.minimize` interface to
L-BFGS-B.
gtol : float
The iteration will stop when ``max{|proj g_i | i = 1, ..., n}
<= gtol`` where ``pg_i`` is the i-th component of the
projected gradient.
eps_angle : float
Step size used for numerical approximation of the jacobian for the
fitting of the angle of the DNA/bead construct.
Notes
-----
The option `ftol` is exposed via the `scipy.optimize.minimize` interface,
but calling `scipy.optimize.fmin_l_bfgs_b` directly exposes `factr`. The
relationship between the two is ``ftol = factr * numpy.finfo(float).eps``.
I.e., `factr` multiplies the default machine floating-point precision to
arrive at `ftol`
"""
if kappa is None:
kappa = np.array([0, 0, 0])
# Length of the DNA construct
l_dna = ex_ss + ex_ds
# Initial distance of DNA attachment point on the glass surface to the bead
# center, i.e. the length of the DNA/bead construct
l_c = l_dna + radius
# Distance of the DNA glass attachment point to the trap center
l_A0 = np.linalg.norm(A0)
# Check, if the DNA/bead construct is longer than the distance of the
# attachment point to the trap center. If the DNA/bead construct is longer
# than possible stretching with displacement ||d|| >= 0, the bead will
# equilibrate in the middle of the trap with zero force.
if l_c >= l_A0:
angles_c = np.array(cart2sph(*-A0))[1:]
success_c = True
d = np.zeros(3)
f_bead = np.zeros(3)
if verbose:
print('DNA/bead construct extension too long, assume construct'
'pointing through the trap center')
print('Return zero foce.')
else:
# Fit the angles of the DNA/bead construct
angles_c, success_c \
= minimize_angle_cost(A0, l_c, radius, kappa, init_c=None,
factr=factr, gtol=gtol,
eps=eps_angle, verbose=deep_verbose,
print_result=print_result)
# Calculate optimized displacement and corresponding forces
d, f_bead = cost_angle_opp_force_bead_attachment(angles_c, l_c, A0,
kappa, cost=False)
f_mag = np.linalg.norm(f_bead)
if verbose:
print('a_c: {}°'.format(
angles_c*180/math.pi))
print('f_bead: {:.3f} pN, f_dna: {:.3f} pN'.format(
np.linalg.norm(f_bead)*1e12,
np.linalg.norm(-f_bead)*1e12))
print()
if return_plus:
f_dna = - f_bead
return f_dna, f_bead, d, angles_c
return f_mag, d
def cart2sph(x, y, z, offset_phi=0, positive_phi=False):
"""
offset_phi : float
angle in Euclidian plane that should point in the direction of positive
x
"""
# cart2sph -- Transform Cartesian to spherical coordinates
# Spherical coordinates (r, θ, φ) as commonly used in physics (ISO
# convention): radial distance r, inclination θ (theta), and azimuth φ
# (phi).
hxy = math.hypot(x, y)
r = math.hypot(hxy, z)
theta = math.atan2(hxy, z)
phi = math.atan2(y, x) - offset_phi
if positive_phi and phi < 0:
phi += 2 * math.pi
return r, theta, phi
def sph2cart(r, theta, phi, offset_phi=0):
"""
offset_phi : float
angle in Euclidian plane that points in the directon of positive x
"""
# sph2cart -- Transform spherical to Cartesian coordinates
# Spherical coordinates (r, θ, φ) as commonly used in physics (ISO
# convention): radial distance r, inclination θ (theta), and azimuth φ
# (phi).
phi += offset_phi
rsin_theta = r * math.sin(theta)
x = rsin_theta * math.cos(phi)
y = rsin_theta * math.sin(phi)
z = r * math.cos(theta)
return x, y, z
def coord_sph2cart(theta, phi, v, offset_phi=0):
# v is vector with components pointing in the direction of the
# v[0] radius vector
# v[1] circle formed by changing theta (inclination)
# v[2] circle formed by changin phi (azimut)
# returns a vector rotated according to the local orthogonal unit vectors
# of the spherical coordinate system
phi += offset_phi
sint = math.sin(theta)
cost = math.cos(theta)
sinp = math.sin(phi)
cosp = math.cos(phi)
return np.array([
[sint*cosp, cost*cosp, -sinp],
[sint*sinp, cost*sinp, cosp],
[cost, -sint, 0]
]).dot(v)
def angle(v1, v2):
# angle between two vectors
# return math.atan2(np.linalg.norm(np.cross(v1,v2)), np.dot(v1,v2))
# does not work as well for small angles, but is faster:
cos_theta = v1.dot(v2) / (np.linalg.norm(v1) * np.linalg.norm(v2))
cos_theta = max(-1, cos_theta)
cos_theta = min(1, cos_theta)
return math.acos(cos_theta)
def parallel_cmp(v1, v2):
# project v1 onto v2
# component of v1 parallel to v2
amp_v2 = np.linalg.norm(v2)
if amp_v2 == 0:
return(v2)
return (v1.dot(v2) / amp_v2**2) * v2
def orthogonal_cmp(v1, v2):
# component of v1 orthogonal to v2
return v1 - parallel_cmp(v1, v2)
def minimize_angle_cost(A0, l_c, radius, kappa,
init_c=None, copy_init=False, factr=1e5, gtol=1e-5,
eps=1e-8, verbose=False, print_result=False):
ftol = factr * np.finfo(float).eps
# Set offset_phi_c to the direction of attachment point -> trap center
_, theta_c, offset_phi_c = cart2sph(*-A0)
# Boundaries of theta and phi for the DNA attachment point -> bead center
# vector.
# The construct can point straight upwards to sidewards, where the bead
# would touch the glass surface
theta_c_min = 0
sin_theta = min(1, radius / l_c)
theta_c_max = math.pi / 2 - math.asin(sin_theta)
# The construct can point towards the hemisphere of the trap center
# (i.e. +/- 90°)
phi_c_min = - math.pi / 2
phi_c_max = math.pi / 2
bounds = ((theta_c_min, theta_c_max), (phi_c_min, phi_c_max))
if init_c is None:
# Find proper start values for theta_c and phi_c
# Geometrically assume the DNA attachment point -> bead center vector
# pointing towards the center of the trap.
init_c = np.array([theta_c, 0])
else:
# correct phi of init_c by the offset of phi
if copy_init:
init_c = init_c.copy()
init_c[1] -= offset_phi_c
init_c = normalize_phi(init_c)
if verbose:
print('## ANGLE CONSTRUCT MINIMIZATION ##')
print('bounds theta: {:.2f}° -> {:.2f}°, phi: {:.2f}° -> {:.2f}°'
''.format(theta_c_min*180/math.pi,
theta_c_max*180/math.pi,
normalize(phi_c_min + offset_phi_c)*180/math.pi,
normalize(phi_c_max + offset_phi_c)*180/math.pi))
print('offset phi: {:.2f}°'.format(offset_phi_c*180/math.pi))
# Iteratively change theta and phi of the DNA attachment point -> bead
# center vector such a way that the angle of the attachment point -> bead
# center vector c and the force vector f_bead are pointing in the exact
# opposite direction (method 'L-BFGS-B').
res = _minimize_lbfgsb(cost_angle_opp_force_bead_attachment,
x0=init_c,
bounds=bounds,
args=(l_c, A0, kappa, offset_phi_c, True, verbose),
ftol=ftol,
gtol=gtol,
eps=eps)
angles_c = res.x
success_c = res.success
# correct for the offset phi
angles_c[1] += offset_phi_c
angles_c = normalize_phi(angles_c)
init_c[1] += offset_phi_c
if verbose:
d, f_bead = cost_angle_opp_force_bead_attachment(angles_c, l_c, A0,
kappa, cost=False,
verbose=False)
print('----------')
print('## ANGLE CONSTRUCT RESULT ##')
if print_result:
print(res)
print('----------')
print('ANGLE: θ (deg) φ (deg)')
print('f_bead: {:8.3f} {:8.3f}'.format(
*np.array(cart2sph(*f_bead))[1:]*180/math.pi))
print('construct: {:8.3f} {:8.3f}'.format(*angles_c*180/math.pi))
print('-> force: {:.3f} pN'.format(np.sqrt(np.sum(f_bead**2))*1e12))
print()
return angles_c, success_c
def normalize_phi(angles):
angles[1] = normalize(angles[1])
return angles
def normalize(phi):
if phi > math.pi:
phi -= 2*math.pi
if phi <= -math.pi:
phi += 2*math.pi
return phi
def cost_angle_opp_force_bead_attachment(angles_c, l_c, A0, kappa,
offset_phi_c=0, cost=True,
verbose=False):
"""
l_c : float
length of attachment point to bead center
"""
if verbose:
print(' # CALCULATE ANGLE CONSTRUCT COST ...')
print(' theta_c: {:.6f}, delta_phi_c: {:.6f}'.format(
*angles_c*180/math.pi))
# 1. calculate attachment point -> bead center vector of the construct for
# given theta and phi
c = np.array(sph2cart(l_c, *angles_c, offset_phi=offset_phi_c))
# 2. calculate position of the center of the bead (i.e. displacement
# vector) for a given attachment point -> bead center vector c
d = A0 + c
# 3. calculate force vector of bead due to displacement
f_bead = - d * kappa
if cost:
# 4. calculate the angle between f_bead and the vector opposing the r
# vector, which is connected to the attachment point -> bead center
# vector c (i.e. the vector opposing the force vector along the bead
# center / DNA attachment point axis). If they are pointing in the same
# direction, angle_opp is 0.
angle_opp = angle(f_bead, c)
# print(angle_opp*180/math.pi)
if verbose:
print(' f_bead_theta_phi: {}°'.format(
np.array(cart2sph(*f_bead))[1:]*180/math.pi))
print(' c_theta_phi: {}°'.format(angles_c*180/math.pi))
print(' angle_opp: {:.3f}°'.format(angle_opp*180/math.pi))
if cost:
return angle_opp**2
else:
return d, f_bead
def E_pair(bases, NNBP=False, c=None, T=None):
"""
Work necessary to separate base pairs A-T and G-C of a DNA double helix.
Includes the contributions of unpairing, unstacking, and rearrangement of
bases.
Parmeters
---------
bases : str
Sequence of bases 'A', 'T', 'C', and 'G'.
NNBP : bool
Nearest-neighbour base-pair determination of the base-pair energies
c : float
Concentration of monovalent cations in mol, defaults to 1 M.
T : float
T is not considered
"""
c = 1 if c is None else c
bases = bases.upper()
if NNBP:
# TODO: include proper energy term for the first and last bp
e_pair = [_E_pair[''.join((a, b))]
for a, b
in zip(bases[:-1], bases[1:])]
m_pair = [_M_pair[''.join((a, b))]
for a, b
in zip(bases[:-1], bases[1:])]
e_pair = np.array(e_pair)
m_pair = np.array(m_pair)
e = e_pair + m_pair * np.log(c)
else:
e = np.array([_E_pair[base] for base in bases])
return e
def _E_pair_T(bases, NNBP=False, c=None, T=298.2):
"""
Work necessary to separate base pairs A-T and G-C of a DNA double helix.
Includes the contributions of unpairing, unstacking, and rearrangement of
bases.
Parmeters
---------
bases : str
Sequence of bases 'A', 'T', 'C', and 'G'.
NNBP : bool
Nearest-neighbour base-pair determination of the base-pair energies
c : float
Concentration of monovalent cations in mol
T : float
Temperature in K
"""
c = 1 if c is None else c
bases = bases.upper()
if NNBP:
dh_pair = [_DH_pair[''.join((a, b))]
for a, b
in zip(bases[:-1], bases[1:])]
ds_pair = [_DS_pair[''.join((a, b))]
for a, b
in zip(bases[:-1], bases[1:])]
m_pair = [_M_pair[''.join((a, b))]
for a, b
in zip(bases[:-1], bases[1:])]
dh_pair = np.array(dh_pair)
ds_pair = np.array(ds_pair)
m_pair = np.array(m_pair)
# salt dependent entropy
# only entropy depends on salt concentration
ds_pair_salt = ds_pair - m_pair/298 * np.log(c)
# temperature dependent energy
e_pair = dh_pair - T*ds_pair_salt
e = e_pair # + m_pair * np.log(c)
else:
e = np.array([_E_pair[base] for base in bases])
return e
def E_unzip_DNA(bases, nuz=0, NNBP=False, c=None, T=298.2):
"""
Work necessary to separate two single strands of DNA double helix of `nuz`
base pairs.
Includes the contributions of unpairing, unstacking, and rearrangement of
bases.
Parameters
----------
bases : str
Sequence of bases 'A', 'T', 'C', and 'G'.
nuz : int or float
Number of base(pair)s up to where the unpairing energy should be
calculated ([1,`nuz`]). If `nuz` is 1, calculate energy for first
basepair.
T : float
Temperature in K
"""
if nuz <= 0:
return 0
# if NNBP:
# TODO: include proper energy term for the first and last bp
ni = int(nuz)
nr = nuz % 1
E = np.sum(E_pair(bases[:ni], NNBP=NNBP, c=c, T=T))
E += np.sum(E_pair(bases[ni-1:ni+1], NNBP=NNBP, c=c, T=T)) * nr
return E
def _E_ext_ssDNA(x, nbs=0, S=None, L_p=None, z=None, T=298.2):
"""
Elastic energy stored in a single strand of j bases
extended by force F to length x.
Parameters
----------
nbs : intf
Number of bases of ssDNA
x : float
Extension of ssDNA in m
z : float
Length of a single base in m
"""
if nbs <= 0:
return 0
if x < 0:
x = -x
# Slow variant of numerical integration
# E _fjc = quad(F_ssDNA, 0, x, (j, S, L_p, z, T))[0]
f = F_ssDNA(x, nbs=nbs, S=S, L_p=L_p, z=z, T=T)
# integral_ext_dF = ext_ssDNA_int(f, j, S=S, L_p=L_p, z=z, T=T)
# The ext_ssDNA_int seems to be not correct -> numerical integration
integral_ext_dF = quad(ext_ssDNA, 0, f, (nbs, S, L_p, z, T))[0]
E_fjc = f * x - integral_ext_dF
# There is no negative energy.
E_fjc = max(E_fjc, 0)
return E_fjc
def E_ext_dsDNA_wlc(x, nbp=0, pitch=None, L_p=None, T=298.2):
"""
Elastic energy stored in a double strand of nbp basepairs
extended by force F to length x.
Integral of the worm-like chain model [1].
[1] <NAME>.; <NAME>. "Stretching DNA". Macromolecules. 1995.
28: 8759–8770. doi:10.1021/ma00130a008
Parameters
----------
x : float
Extension (m)
L_0 : float
Contour length (m)
L_p : float
Persistence length (m)
T : float
Temperature (K)
"""
pitch = pitch or _dsDNA_DEFAULT['pitch']
L_p = L_p or _dsDNA_DEFAULT['L_p']
if nbp <= 0:
return 0
if x < 0:
x = -x
L_0 = nbp*pitch
# WLC only valid in the interval x = (-L_0, L_0)
# Higher x would lead to wrongly calculated energies.
# if x > L_0, even negative energies are possible, which
# would lead to exceptionally high valies in the partition
# function.
if x >= L_0:
return float('inf')
def integral(x):
# from wolfram alpha
# return (kB * T * (L_0**2 / (L_0 - x) + (2 * x**2) / L_0 - x)) / (4 * L_p)
# (k T (L^2/(L - x) + (2 x^2)/L - x))/(4 P)
# <NAME>. "Improved approximations for some polymer extension
# models". Rehol Acta. 2016. doi:10.1007/s00397-016-0977-9
return (kB * T * (L_0**2 / (L_0 - x) + (2 * x**2) / L_0 - 1.01587 * x *
(x/L_0)**2.15 - x)) / (4 * L_p)
# (k T (L^2/(L - x) + (2 x^2)/L - 1.01587 x^1 (x/L)^2.15 - x))/(4 P)
return integral(x) - integral(0)
def E_lev(displacement, kappa):
"""
The elastic energy of the lever/handle.
Parameters
----------
kappa : float
Stiffness of lever in N/m
displacement : float
Displacement of lever in m
"""
return 1/2 * kappa * displacement**2
def E_tot(bases='', nuz=0, nbs=0, ex_ss=0.0, nbp=0, ex_ds=0.0,
displacement=0.0, kappa=0.0,
S=None, L_p_ssDNA=None, z=None,
pitch=None, L_p_dsDNA=None,
NNBP=False, c=None, e_loop=0.0, T=298.2, verbose=False):
"""
Parameters
----------
bases : str
Sequence of sense strand of dsDNA which is (will be) unzipped
nuz : int
Number of unzipped basepairs to calculate the unzip energy.
nbs : int
Number of ssDNA bases
ex_ss : float
Extension of an ssDNA strand
nbp : int
Number of basepairs of the spacer dsDNA
ex_ds : float
Extension of the spacer dsDNA
kappa : float
Stiffness of lever (handle) attached to DNA in N/m
e_loop : float
Free energy for opening the last bp and terminal hairpin (kcal/mol).
"""
e_ext_ssDNA = E_ext_ssDNA(ex_ss, nbs=nbs, S=S, L_p=L_p_ssDNA, z=z, T=T)
e_ext_dsDNA = E_ext_dsDNA_wlc(ex_ds, nbp=nbp, pitch=pitch, L_p=L_p_dsDNA,
T=T)
e_unzip_DNA = E_unzip_DNA(bases, nuz=nuz, NNBP=NNBP, c=c, T=T)
e_lev = np.sum(E_lev(displacement, kappa))
# Include proper energy term for opening the terminal hairpin, only if all
# bps are already unzipped and hairpin is to be opened
if nuz >= len(bases) + 1:
e_loop = e_loop*kcal/Na
else:
e_loop = 0.0
e_total = e_ext_ssDNA + e_ext_dsDNA + e_unzip_DNA + e_lev + e_loop
if verbose:
print('E_ext_ssDNA: ' + str(e_ext_ssDNA/(kB*T)))
print('E_ext_dsDNA: ' + str(e_ext_dsDNA/(kB*T)))
print('E_unzip_DNA: ' + str(e_unzip_DNA/(kB*T)))
print('E_lev: ' + str(e_lev/(kB*T)))
return e_total
def plot_simulated_force_extension(simulation, x=None, y=None, yXYZ=None,
axes=None, ylim=None, theta=False):
# Get data to be plotted
sim_values = get_simulation_values(simulation, df_xyz=True)
e = sim_values['extension']
f = sim_values['force']
forceXYZ = sim_values['forceXYZ']
nuz = sim_values['nuz']
theta = theta if 'theta' in sim_values else False
if theta:
th = sim_values['theta']
if axes is None:
fig, axes = plt.subplots(2, 1)
else:
fig = axes[0].get_figure()
ax = axes[0]
ax2 = ax.twinx()
ax2._get_lines.prop_cycler = ax._get_lines.prop_cycler
# Plot simulated unzipping curve
ax.plot(e * 1e9, f * 1e12, label='Force microsphere')
# Plot measured unzipping curve
if x is not None and y is not None:
ax.plot(x * 1e9, y * 1e12)
# Plot number of simulated unzipped basepairs
ax2.plot(e * 1e9, nuz, color='cyan')
ax.grid(True)
ax.set_xlabel('(Apparent) ext of construct (nm)')
ax.set_ylabel('Force (pN)')
ax2.set_ylabel('# unzip bps')
ylim = ylim or (-1, 18)
ax.set_ylim(ylim)
# Plot individual forces
ax = axes[1]
if theta:
ax2 = plt.twinx(ax=ax)
ax2.xaxis.set_visible(False)
# Plot simulated unzipping curves
ax.plot(e * 1e9, forceXYZ * 1e12)
# Plot measured unzipping curves
if x is not None and yXYZ is not None:
ax.plot(x * 1e9, np.abs(yXYZ) * 1e12)
if theta:
# Plot differenc of angle r0 and r
ax2.plot(e * 1e9, th * 180 / math.pi, color='cyan')
ax2.set_ylabel(r'$\theta$ diff (°)')
ax.grid(True)
ax.set_xlabel('(Apparent) ext of construct (nm)')
ax.set_ylabel('Force (pN)')
ax.set_ylim(ylim)
return fig, axes
def plot_unzip_energy(x0, y0=0.0, h0=0.0, bases='', nuz_est=-1, nbs=0, nbp=0,
nbs_loop=0,
radius=0.0, kappa=None,
S=None, L_p_ssDNA=None, z=None,
pitch=None, L_p_dsDNA=None,
NNBP=False, c=0, e_loop=0.0, T=298.2,
spacing=5, min_stepsize=10,
boltzmann_factor=1e-9,
verbose=False, axes=None):
A0 = attachment_point(x0, y0=y0, h0=h0, radius=radius)
xfe0 = xfe0_nuz(A0, bases=bases, nuz_est=nuz_est, nbs=nbs, nbp=nbp,
nbs_loop=nbs_loop,
radius=radius, kappa=kappa,
S=S, L_p_ssDNA=L_p_ssDNA, z=z,
pitch=pitch, L_p_dsDNA=L_p_dsDNA,
NNBP=NNBP, c=c, e_loop=e_loop, T=T,
spacing=spacing, min_stepsize=min_stepsize,
boltzmann_factor=boltzmann_factor,
verbose=verbose)
# with cnps.cn_plot('notebook') as cnp:
if axes is None:
fig, ax = plt.subplots()
ax2 = ax.twinx()
ax2._get_lines.prop_cycler = ax._get_lines.prop_cycler
# ax2 = cnps.second_ax(link_ax=ax)
# ax2.xaxis.set_visible(False)
else:
ax, ax2 = axes
fig = ax.get_figure()
nuz = xfe0['NUZ0']
energy = xfe0['E0']
min_e = energy.min()
# normalize energy relative to min_e in unzits of kT
energy -= min_e
energy /= kB*T
# displacement = xfe0['D0']
boltzmann_factor = xfe0['W0'] / np.sum(xfe0['W0'])
cumsum = np.cumsum(xfe0['W0']) / np.sum(xfe0['W0'])
# if cnp is not None:
# ax.plot(nuz, energy, c=cnp.color)
# # ax.axhline(xfe0['D0_avg'], c=cnp.color)
# ax2.plot(nuz, boltzmann_factor, c=cnp.color)
# ax2.plot(nuz, cumsum, c=cnp.color)
# else:
ax.plot(nuz, energy)
# ax.axhline(xfe0['D0_avg'])
ax2.plot(nuz, boltzmann_factor)
ax2.plot(nuz, cumsum)
ax.axvline(xfe0['NUZ0_avg'], c='magenta')
ax.set_xlabel('Number of unzipped basepairs')
ax.set_ylabel('Energy difference ($k_{B}*T$)')
ax2.set_ylabel('Boltzmann factor')
return fig, ax, ax2
def get_simulation_values(simulation, extension=True, force=True, nuz=True,
df_xyz=False, weighted_energies=False,
energy_keys=None):
"""
Get extension, force, and number of unzipped basepairs of a simulation.
Parmaters
---------
extension : bool
Return the extension.
force : bool
Return the force.
nuz : bool
Return the number of unzipped basepairs.
df_xyz : bool
Return the individual xyz components of displacement and force.
weighted_energies : bool
Calculate and return weighted and averaged energies as returned by the
the function `get_weighted_energies()`.
energy_keys : list of str
Energies to be calculated. Possible options and defaulting to
[ 'e_ext_ssDNA', 'e_ext_dsDNA', 'e_unzip_DNA', 'e_lev' ].
"""
# Set variables of simulated data
XFE, XFE0 = simulation['XFE'], simulation['XFE0']
# Get extension, force, and number of unzipped basepairs ...
# Extension of the construct
try:
EX_avg = XFE['EX_avg']
except KeyError:
# Old simulation object with different key
EX_avg = XFE['X']
# Select data which was properly fitted
idx_valid = (EX_avg != 0)
return_value = {}
if extension:
return_value['extension'] = EX_avg[idx_valid]
if force:
return_value['force'] = XFE['F0_avg'][idx_valid]
if nuz:
return_value['nuz'] = XFE['NUZ0_avg'][idx_valid]
if df_xyz:
try:
D0XYZ_avg = XFE['D0_avg'][idx_valid]
except KeyError:
# Old simulation object
D0XYZ_avg = np.array([xfe0['D0_avg'] for xfe0 in XFE0])
return_value['displacementXYZ'] = D0XYZ_avg
kappa = XFE['settings']['kappa']
F0XYZ_avg = kappa * D0XYZ_avg
return_value['forceXYZ'] = F0XYZ_avg
if weighted_energies:
E0s_avg = get_weighted_energies(simulation, keys=energy_keys)
for key, E0_avg in E0s_avg.items():
return_value[key] = E0_avg[idx_valid]
return return_value
def get_weighted_energies(simulation, keys=None, processes=8):
"""
Get weighted and averaged energies from simulation
Parameters
----------
simulation : dict
The simulation to get the weighted averaged energies from.
keys : str or list of str
Energies to be calculated. Possible values are and defaulting to
[ 'e_ext_ssDNA', 'e_ext_dsDNA', 'e_unzip_DNA', 'e_lev' ].
Returns
-------
dict
Weighted and averaged energies for ssDNA and dsDNA extension,
lever/handle displacement, and basepair unzipping.
"""
# Speed up energy calculation with multiprocessing and per buffering of the
# dsDNA_wlc model for all processes of multiprocessing
nbp = simulation['settings']['nbp']
pitch = simulation['settings']['pitch']
L_p_dsDNA = simulation['settings']['L_p_dsDNA']
T = simulation['settings']['T']
global ext_dsDNA_wlc
ext_dsDNA_wlc = init_buf_ext_dsDNA_wlc(nbp=nbp, pitch=pitch, L_p=L_p_dsDNA,
T=T)
# Define the keys of the energies to be calculated
keys = get_energy_keys(keys=keys)
# Defince closure to be used with multiprocessing
def f(i):
# Get all unweighted energies for simulation point `i` and calculate
# the averaged weighted energies
XFE0 = simulation['XFE0'][i]
D0 = XFE0['D0']
F0 = XFE0['F0']
NUZ0 = XFE0['NUZ0']
W0 = XFE0['W0']
W0_sum = W0.sum()
E0s = get_energies(simulation, D0, F0, NUZ0, keys=keys)
E0s_avg = []
for E0 in E0s.values():
E0s_avg.append(np.sum(E0 * W0) / W0_sum)
return E0s_avg
f = unboundfunction(f)
# Get all averaged and weighted energies for all simulation points
with Pool(processes=processes) as pool:
E0_avg_lists = pool.map(f, range(len(simulation['XFE0'])))
# Get and combine all calculated energy points according to their key and
# finally convert lists to arrays
E0s_avg = {}
for key in keys:
E0s_avg[key] = []
for E0_avg_list in E0_avg_lists:
for key, E0_avg in zip(keys, E0_avg_list):
E0s_avg[key].append(E0_avg)
for key, E0_avg in E0s_avg.items():
E0s_avg[key] =
|
np.array(E0_avg)
|
numpy.array
|
import numpy as np
import matplotlib.pyplot as plt
import h5py
import time
import copy
from random import randint
#load MNIST data
MNIST_data = h5py.File('MNISTdata.hdf5', 'r')
x_train = np.float32(MNIST_data['x_train'][:] )
y_train = np.int32(np.array(MNIST_data['y_train'][:,0]))
x_test = np.float32( MNIST_data['x_test'][:] )
y_test = np.int32( np.array( MNIST_data['y_test'][:,0] ) )
MNIST_data.close()
#Implementation of stochastic gradient descent algorithm
#number of inputs
num_xdimx = 28
num_xdimy = 28
#dimension of filters
num_k = 3
num_k = 3
num_c = 4
#number of outputs
num_outputs = 10
#initialize the model parameters with Xivier Initialization
model = {}
model['K'] = np.random.randn(num_k,num_k,num_c)
for i in range(num_c):
model['K'][:,:,i] /= np.sqrt(num_k * num_k)
model['W'] = np.random.randn(num_outputs, num_xdimy - num_k + 1, num_xdimx - num_k + 1, num_c)
for i in range(num_outputs):
model['W'][i,:,:,:] /= np.sqrt((num_xdimy - num_k + 1)*(num_xdimx - num_k + 1)*num_c)
model['b'] = np.zeros((num_outputs, 1))
def conv_single_layer(x_new, K_filter):
# Retrieve the dim
(n_x_dimy, n_x_dimx) = x_new.shape
(n_k_dimy, n_k_dimx, n_k_c) = K_filter.shape
# initialize variables
Z = np.zeros((n_x_dimy - n_k_dimy + 1, n_x_dimx - n_k_dimx + 1, n_k_c))
# use stack to save some reused memory of x_new
stack = np.zeros((n_x_dimy - n_k_dimy + 1, n_x_dimx - n_k_dimx + 1, n_k_dimy, n_k_dimx))
for h in range(n_x_dimy - n_k_dimy + 1):
for w in range(n_x_dimx - n_k_dimx + 1):
stack[h, w, :, :] = x_new[h:(h+n_k_dimy), w:(w+n_k_dimx)]
# run the convolution for multi-channels
# PS!!!: use np.tensordot to calculate the single layer within one stroke
for n_c in range(n_k_c):
Z[:, :, n_c] = np.tensordot(stack, K_filter[:, :, n_c], axes=([2, 3], [0, 1]))
H = sigmoid(Z)
return Z, H
def conv_single_layer_single_channel(x_new, K_filter):
# Retrieve the dim
(n_x_dimy, n_x_dimx) = x_new.shape
(n_k_dimy, n_k_dimx) = K_filter.shape
stack = np.zeros((n_x_dimy - n_k_dimy + 1, n_x_dimx - n_k_dimx + 1, n_k_dimy, n_k_dimx))
for h in range(n_x_dimy - n_k_dimy + 1):
for w in range(n_x_dimx - n_k_dimx + 1):
stack[h, w, :, :] = x_new[h:(h+n_k_dimy), w:(w+n_k_dimx)]
Z = np.tensordot(stack, K_filter, axes=([2, 3], [0, 1]))
return Z
def sigmoid(x, derivative=False):
sigm = 1. / (1. + np.exp(-x))
if derivative:
return sigm * (1. - sigm)
return sigm
def indicator_function(y, num_output):
vec = np.zeros((num_output, 1))
vec[y] = 1
return vec
def softmax_function(z):
z_max = np.max(z)
z_new = z - z_max
ZZ = np.exp(z_new) / np.sum(
|
np.exp(z_new)
|
numpy.exp
|
import numpy as np
import math as m
import time
RAD2DEG = 57.3
DEG2METER = 111392.84
DEG2RAD = 1/57.3
track_width = 1.2
max_curvature = 0.3
ratio = 0.35
W = np.zeros(4) # doesn't matter how you initialize it.
Q = np.zeros(4)
V_lim = 10
lat_Acc_limit = 10
lon_Acc_limit = 10
C_min = 0.1
def set_track_width(tw):
global track_width
track_width = (tw)*5
def set_normalization_weights(weights):
global W
W = weights
def set_term_weights(weights):
global Q
Q = weights
def set_V_A_lim(Vmax,lat_A_max,lon_A_max):
global V_lim
V_lim = Vmax
global lat_Acc_limit
lat_Acc_limit = lat_A_max
global lon_Acc_limit
lon_Acc_limit = lon_A_max
global C_min
C_min = lat_Acc_limit/(V_lim**2)
def distancecalcy(y1,y2,x1,x2):
delX = (x2-x1);
delY = (y2-y1);
delX *= delX;
delY *= delY;
return m.sqrt(delX + delY);
def anglecalcy(x1,x2,y1,y2):
angle = RAD2DEG*m.atan2((y2-y1),(x2-x1));
if(angle<0):
angle += 360;
return angle;
def angle_difference(x1,x2,x3,y1,y2,y3):
angle1 = anglecalcy(x1,x2,y1,y2)
angle2 = anglecalcy(x2,x3,y2,y3)
angle_diff = m.fabs(angle1-angle2)
if(angle_diff>360):
angle_diff -= 360
return angle_diff
def wrap_360(angle):
if(angle>360):
angle -= 360
if(angle<0):
angle += 360
return angle
def generate_slopes(X,Y):
circuit = False
if(distancecalcy(Y[0],Y[-1],X[0],X[-1])<1):
circuit = True
slope = np.empty_like(X)
for i in range(1,len(X)-1):
angle1 = anglecalcy( X[i-1], X[i], Y[i-1], Y[i] )
angle2 = anglecalcy( X[i], X[i+1], Y[i], Y[i+1] )
if(m.fabs(angle1 - angle2) > 180):
angle1 -= 360
slope[i] = ( angle1 + angle2 )*0.5
if(circuit):
angle1 = anglecalcy( X[-2], X[-1], Y[-2], Y[-1] )
angle2 = anglecalcy( X[0], X[1], Y[0], Y[1] )
if(m.fabs(angle1 - angle2) > 180):
angle1 -= 360
slope[0] = ( angle1 + angle2 )*0.5;
slope[-1] = slope[0]
else:
slope[0] = anglecalcy( X[0], X[1], Y[0], Y[1] );
slope[-1] = anglecalcy( X[-2], X[-1], Y[-2], Y[-1] )
return slope
def acute_angle(A,B):
a = m.fabs(A-B)
while(a>180):
a -= 180
return a
def area(x1, y1, angle1, x2, y2, angle2):
X = distancecalcy(y1,y2,x1,x2)
base = anglecalcy(x1,x2,y1,y2)
B = acute_angle(angle1,base)*DEG2RAD
C = acute_angle(angle2,base)*DEG2RAD
A = m.pi - (B+C)
return m.fabs(((m.sin(B)*m.sin(C)/m.sin(A))))*X**2
def get_Intermediate_Points(slope1, slope2, X1, X2, Y1, Y2):
global track_width
global ratio
int1 = np.zeros(2)
int2 = np.zeros(2)
d = distancecalcy(Y2,Y1,X2,X1)
ratio = 0.4 - 0.06*(d/(track_width+d))
# if(d>track_width):
# d = track_width
int1[0] = X1 + ratio*m.cos(slope1*DEG2RAD)*d
int1[1] = Y1 + ratio*m.sin(slope1*DEG2RAD)*d
int2[0] = X2 - ratio*m.cos(slope2*DEG2RAD)*d
int2[1] = Y2 - ratio*m.sin(slope2*DEG2RAD)*d
return int1,int2
def get_bezier(X1,X2,Y1,Y2,slope1,slope2):
int1,int2 = get_Intermediate_Points(slope1,slope2,X1,X2,Y1,Y2)
Px = np.array([X1,int1[0],int2[0],X2])
Py = np.array([Y1,int1[1],int2[1],Y2])
t = np.arange(0,1,0.05)
T = np.array([(1-t)**3,3*t*(1-t)**2,3*t*t*(1-t),t**3])
Bx = T[0]*Px[0] + T[1]*Px[1] + T[2]*Px[2] + T[3]*Px[3]
By = T[0]*Py[0] + T[1]*Py[1] + T[2]*Py[2] + T[3]*Py[3]
return Bx,By
def arc_length(X1,Y1,X2,Y2,X3,Y3,X4,Y4):
L1 = distancecalcy(Y1,Y2,X1,X2)
L2 = distancecalcy(Y2,Y3,X2,X3)
L3 = distancecalcy(Y3,Y4,X3,X4)
L4 = distancecalcy(Y4,Y1,X4,X1)
L = L1+L2+L3
L = 0.5*(L+L4)
return L
def get_T(X1,Y1,X2,Y2,X3,Y3,X4,Y4):
L1 = distancecalcy(Y1,Y2,X1,X2)
L2 = distancecalcy(Y2,Y3,X2,X3)
L3 = distancecalcy(Y3,Y4,X3,X4)
L4 = distancecalcy(Y4,Y1,X4,X1)
L = L1+L2+L3
L = 0.5*(L+L4)
t1 = 0.5*(L1/(L1+L2))
t2 = 1 - 0.5*(L3/(L3+L2))
return np.array([t1,t2])
def Curv(t,KX1,KX2,KX3,KY1,KY2,KY3):
delX = t*t*KX1 + t*KX2 + KX3
delY = t*t*KY1 + t*KY2 + KY3
del2X = 2*t*KX1 + KX2
del2Y = 2*t*KY1 + KY2
denominator = delX*delX + delY*delY
dummy = denominator
denominator *= denominator*denominator
denominator = np.sqrt(denominator)
del3Y = 2*KY1
del3X = 2*KX1
second_denominator = denominator*dummy
dK = ((del3Y*delX - del3X*delY)/denominator) - (3*(delX*del2Y - del2X*delY)*(delX*del2X + delY*del2Y)/second_denominator)
sub_term_1 = (delX*del2Y - del2X*delY)
sub_term_2 = 2*(delX*del2X + delY*del2Y)
third_denominator = np.fabs(second_denominator*dummy)
sub_term_3 = (del3Y*delX - del3X*delY)
sub_term_4 = 2*(del2X**2 + del2Y**2 + del3X*delX+del3Y*delY)
sub_term_5 = - del3X*del2Y + del3Y*del2X
term_1 = 3.75*(sub_term_1*(sub_term_2**2))/third_denominator
term_2 = -3*(sub_term_3*sub_term_2)/second_denominator
term_3 = -1.5*(sub_term_1*sub_term_4)/second_denominator
term_4 = sub_term_5/denominator
d2K = term_1 + term_2 + term_3 + term_4
return dK,d2K
def check_range(x,i):
if(i):
if(x>1):
return 1
if(x<0.5):
return 0.5
return x
if(x<0):
return 0
if(x>0.5):
return 0.5
return x
def C_from_K_t(t,KX1,KX2,KX3,KY1,KY2,KY3):
delX = t*t*KX1 + t*KX2 + KX3
delY = t*t*KY1 + t*KY2 + KY3
del2X = 2*t*KX1 + KX2
del2Y = 2*t*KY1 + KY2
denominator = delX*delX + delY*delY
dummy = denominator
denominator *= denominator*denominator
denominator = np.sqrt(denominator)
Curvature = ((delX*del2Y) - (delY*del2X))
Curvature /= denominator
return Curvature
def V_A_from_K_t(t,KX1,KX2,KX3,KY1,KY2,KY3):
delX = t*t*KX1 + t*KX2 + KX3
delY = t*t*KY1 + t*KY2 + KY3
del2X = 2*t*KX1 + KX2
del2Y = 2*t*KY1 + KY2
return delX,delY,del2X,del2Y
def time_from_P_K_t(t,KX1,KX2,KX3,KY1,KY2,KY3,Px,Py,C):
global V_lim,lat_Acc_limit,lon_Acc_limit,C_min
res = 0.01
var = np.arange(0,1,res)
dX,dY,d2X,d2Y = V_A_from_K_t(var,KX1,KX2,KX3,KY1,KY2,KY3)
#bezier velocity
Vb = np.sqrt(dX**2 + dY**2)
tangent_i = dX/Vb# normalize
tangent_j = dY/Vb
#bezier lat lon acceleration (body frame)
Ablon = d2X*tangent_i + d2Y*tangent_j
Ablat = d2X*tangent_j - d2Y*tangent_i# longitudenal and lateral acceleration
index = np.argmax(C)
t_max = np.round(t[index],2)
C_max = C[index]
if(np.fabs(C_max)<C_min):
C_max = C_min
#find min. velocity along the bez. curve
V_min = np.sqrt(lat_Acc_limit/np.fabs(C_max))
dXm,dYm,d2Xm,d2Ym = V_A_from_K_t(t_max,KX1,KX2,KX3,KY1,KY2,KY3)
#find min. bez. V,A
Vbmin = np.sqrt(dXm**2 + dYm**2)
Abmax = np.sqrt(d2Xm**2 + d2Ym**2)
#find scaling
fV = V_min/Vbmin
fA = fV*fV
#scale acc, vel, time
Alon = fA*Ablon
Alat = fA*Ablat
sec_time = var/fV # fV*(0..1)
V = fV*Vb
dist = V*res/fV # distance between points along the curve
u = int(t_max/res)
# clip the velocity and acceleration. use clipped velocity to find clipped acceleration
for i in range(3):
cap_V = np.clip(V,0,V_lim)
# print(cap_V) # you can use this to check what the velocity would look like along the curve
cap_Alon = np.clip(np.diff(cap_V/sec_time),-lon_Acc_limit,lon_Acc_limit)
# time instant for min vel:
cap_Alon[:u] *= -1
V[:-1] = V_min + np.cumsum(cap_Alon)*fV*res
V[-1] = V[-2] # because terms are lost on diffing
new_time = np.sum(dist/V)/np.sum(dist/V_min)
return new_time
def cmp(a,b):
return (a > b) ^ (a < b)
def get_Curvature(X1,Y1,X2,Y2,X3,Y3,X4,Y4,t):
Px = np.array([X1,X2,X3,X4])
Py = np.array([Y1,Y2,Y3,Y4])
KX1 = 9*X2 + 3*X4 - 3*X1 - 9*X3
KY1 = 9*Y2 + 3*Y4 - 3*Y1 - 9*Y3
KX2 = 6*X1 - 12*X2 + 6*X3
KY2 = 6*Y1 - 12*Y2 + 6*Y3
KX3 = 3*(X2 - X1)
KY3 = 3*(Y2 - Y1)
#using newton rhapshody method to find best estimates for curvature
for i in range(2):
count = 0
h =1
for j in range(3):
dk,d2K = Curv(t[i],KX1,KX2,KX3,KY1,KY2,KY3)
if(j>=1):
last_h = h
h = dk/d2K
if(h*last_h<0):
h = cmp(h,0)*min(m.fabs(last_h/2),m.fabs(h))
else:
h = dk/d2K
if(np.isnan(h)):
t[i] = 0.5
break
t[i] = t[i]-h
t[i] = check_range(t[i],i)
Curvature = np.zeros(4)
t = np.array([t[0],t[1],0,1])
Curvature = C_from_K_t(t,KX1,KX2,KX3,KY1,KY2,KY3)
section_time = time_from_P_K_t(t,KX1,KX2,KX3,KY1,KY2,KY3,Px,Py,Curvature)
var = np.arange(0,1,0.01)
dk,d2K = Curv(var,KX1,KX2,KX3,KY1,KY2,KY3)
return Curvature,dk,d2K,section_time
def s_k(X, Y, slope1, destX, destY, slope2):
int1,int2 = get_Intermediate_Points( slope1, slope2, X, destX, Y, destY)
t = get_T(X, Y, int1[0], int1[1], int2[0], int2[1], destX, destY)
Curvature,dk,d2K,section_time = get_Curvature(X, Y, int1[0], int1[1], int2[0], int2[1], destX, destY, t)
k_st = Curvature[2]
k_en = Curvature[3]
Curvature = np.max(
|
np.fabs(Curvature)
|
numpy.fabs
|
'''
使用tensorflow完成张量图的收缩,可以使用梯度下降迭代
'''
import re, os, sys, math, unittest
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow.compat.v1 as tf
import random
from random import shuffle, choice
from itertools import product
# from scipy.sparse import csr_matrix
# from scipy.sparse.csgraph import connected_components
import operator
import numpy as np
np.set_printoptions(precision=4)
# from decorator import decorator
import operator
from functools import reduce
# from scipy.io import loadmat
from tensorflow.python.framework.ops import Tensor
def prod(iterable):
return reduce(operator.mul, iterable, 1)
def letter_range(n):
for c in range(97, 97+n):
yield chr(c)
class RealTensor(object):
def __init__(self, name='no_name', from_variable=None, shape=[1, 2, 3, 4], trainable=True,
initializer=tf.random_normal_initializer(mean=0.0, stddev=1.0), identity=None):
if from_variable is not None:
self.name = name
self.identity = identity
self.shape = from_variable.get_shape().as_list()
self.tensor = tf.identity(from_variable, name=self.name)
else:
self.name = name
self.identity = identity
self.shape = shape
self.initializer = initializer
if isinstance(self.initializer, Tensor):
self.tensor = tf.get_variable(name = self.name, initializer = self.initializer, trainable=trainable)
else:
self.tensor = tf.get_variable(name = self.name, shape = self.shape,
initializer = self.initializer, trainable=trainable)
def __call__(self):
return self.tensor
class TensorNetwork(object):
def __init__(self, adj_matrix, name_list=None, initializer_list=None, trainable_list=None, scope='TensorNetwork'):
self.shape = adj_matrix.shape
assert self.shape[0] == self.shape[1], 'adj_matrix must be a square matrix.'
self.dim = self.shape[0]
self.adj_matrix = np.empty(self.shape, dtype=object)
self.scope = scope
self.output_count = 0
self.output_order = []
for i in np.diag(adj_matrix):
if i == 0:
self.output_order.append([])
else:
self.output_order.append([self.output_count])
self.output_count += 1
self.id_matrix = np.empty(self.shape, dtype=object)
tril_idx = np.tril_indices(self.dim, -1)
if np.sum(adj_matrix[tril_idx]) == 0:
adj_matrix[tril_idx] += adj_matrix[(tril_idx[1], tril_idx[0])]
# graph = np.copy(adj_matrix)
# graph[np.diag_indices(self.dim)] = 0
# graph[graph>0] = 1
# graph = csr_matrix(graph)
# n_components = connected_components(csgraph=graph, directed=False, return_labels=False)
# if not n_components == 1:
# print ('The network is seperated by {} parts.'.format(n_components))
for idx in np.ndindex(self.shape):
self.adj_matrix[idx] = [ adj_matrix[idx] ]
if self.adj_matrix[idx][0] == 0:
self.adj_matrix[idx].clear()
# if idx[0] == idx[1]:
# self.adj_matrix[idx].append(1)
if name_list is not None:
assert self.dim == len(name_list), 'Length of name_list does not match number of cores.'
self.name_list = name_list
else:
self.name_list = list(letter_range(self.dim))
if trainable_list is None:
trainable_list = [True] * self.dim
if initializer_list is None:
initializer_list = [tf.random_normal_initializer(mean=0.0, stddev=1.0)] * self.dim
with tf.variable_scope(self.scope):
self.cores = [ RealTensor(name=self.name_list[t], shape=list(filter((0).__ne__, adj_matrix[t].tolist())),
trainable=trainable_list[t], initializer=initializer_list[t]) for t in range(self.dim) ]
def __repr__(self):
return self.reduction()
def __add__(self, TN_b):
return self.reduction() + N_b.reduction()
def __sub_(self, TN_b):
return self.reduction() - N_b.reduction()
def __mul_(self, TN_b):
return __tf_matmul__(self.reduction(), N_b.reduction())
def giff_cores(self):
return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.scope)
def __outter_product__(self):
tar = tf.reshape(self.cores[1](), [1, -1])
des = tf.reshape(self.cores[0](), [-1, 1])
reduced_core = tf.reshape(tf.matmul(des, tar), [-1])
self.adj_matrix[0][0] = self.adj_matrix[0][0] + self.adj_matrix[1][1]
self.output_order[1] = self.output_order[0] + self.output_order[1]
self.output_order.pop(0)
self.adj_matrix = np.delete(self.adj_matrix, 1, 0)
self.adj_matrix = np.delete(self.adj_matrix, 1, 1)
self.dim -= 1
reduced_core_name = self.name_list[1]+self.name_list[0]
self.cores.pop(1)
self.cores.pop(0)
self.cores.insert(0, RealTensor(name=reduced_core_name, from_variable=reduced_core))
self.name_list.pop(1)
self.name_list.pop(0)
self.name_list.insert(0, reduced_core_name)
def __reduce_cores__(self, target_destination):
target, destination = target_destination
for idx in np.ndindex((self.dim, self.dim)):
if len(self.adj_matrix[idx]) > 0:
self.id_matrix[idx] = self.cores[idx[1]].name[0]
else:
self.id_matrix[idx] = ''
for idx, c in enumerate(self.cores):
c.identity = reduce(operator.add, self.id_matrix[idx])
# print (self.id_matrix)
# print (self.adj_matrix)
target_shape = [ int(np.prod(i)) for i in self.adj_matrix[target].tolist() ]
destination_shape = [ int(np.prod(i)) for i in self.adj_matrix[destination].tolist() ]
# print (self.cores[target](), target_shape)
# print (self.cores[destination](), destination_shape)
tar = tf.reshape(self.cores[target](), target_shape)
des = tf.reshape(self.cores[destination](), destination_shape)
tar_trans_list, des_trans_list = list(range(self.dim)), list(range(self.dim))
tar_trans_list = tar_trans_list + [tar_trans_list.pop(destination)]
des_trans_list = [des_trans_list.pop(target)] + des_trans_list
tar, des = tf.transpose(tar, tar_trans_list), tf.transpose(des, des_trans_list)
reduced_core = self.__tf_matmul__(tar, des)
# print (reduced_core)
reduced_trans_list = list(range(self.dim*2-2))
reduce_trans_list_des = reduced_trans_list.pop(destination+self.dim-2)
reduce_trans_list_tar = reduced_trans_list.pop(target)
reduced_trans_list_len = len(reduced_trans_list)//2
reduced_trans_list = [ [ reduced_trans_list[i], reduced_trans_list[i+reduced_trans_list_len] ] for i in range(reduced_trans_list_len)]
reduced_trans_list.insert(destination-1, [reduce_trans_list_tar, reduce_trans_list_des] )
reduced_trans_list = [ k for j in reduced_trans_list for k in j]
# for i in range(self.dim-1):
# reduced_trans_list += [i, i+self.dim-1]
# print (reduced_trans_list)
reduced_core = tf.transpose(reduced_core, reduced_trans_list)
reduced_core = tf.squeeze(reduced_core)
self.adj_matrix[destination, destination] = self.adj_matrix[target, target] + self.adj_matrix[destination, destination]
self.output_order[destination] = self.output_order[target] + self.output_order[destination]
self.output_order.pop(target)
inherit = list(range(self.dim))
inherit.remove(target)
inherit.remove(destination)
# print (self.adj_matrix)
for i in inherit:
self.cores[i].tensor = tf.reshape(self.cores[i](), [ int(np.prod(z)) for z in self.adj_matrix[i].tolist() if int(np.prod(z))>1 ])
self.adj_matrix[destination][i] = self.adj_matrix[target][i] + self.adj_matrix[destination][i]
self.adj_matrix[i][destination] = self.adj_matrix[i][target] + self.adj_matrix[i][destination]
self.id_matrix[i][destination] = self.id_matrix[i][target] + self.id_matrix[i][destination]
self.id_matrix[i][target] = ''
self.adj_matrix =
|
np.delete(self.adj_matrix, target, 1)
|
numpy.delete
|
import os
import random
import numpy as np
import pandas as pd
from helper import *
import matplotlib
import matplotlib.pyplot as plt
matplotlib.rcParams['figure.figsize'] = [5, 5]
matplotlib.rcParams['figure.dpi'] = 200
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
import torchvision
import torchvision.transforms as transforms
import torchvision.models as models
from tqdm import tqdm
from data_helper import UnlabeledDataset, LabeledDataset
from helper import collate_fn, draw_box
import utils
random.seed(0)
|
np.random.seed(0)
|
numpy.random.seed
|
import collections
import functools
import theano
import theano.tensor as T
import theano.tensor.slinalg
import numpy as np
import scipy.linalg
import scipy.misc
from theano.ifelse import ifelse
class OrderedSet(collections.OrderedDict, collections.MutableSet):
# source: http://stackoverflow.com/questions/1653970/does-python-have-an-ordered-set
def update(self, *args, **kwargs):
if kwargs:
raise TypeError("update() takes no keyword arguments")
for s in args:
for e in s:
self.add(e)
def add(self, elem):
self[elem] = None
def discard(self, elem):
self.pop(elem, None)
def __le__(self, other):
return all(e in other for e in self)
def __lt__(self, other):
return self <= other and self != other
def __ge__(self, other):
return all(e in self for e in other)
def __gt__(self, other):
return self >= other and self != other
def __repr__(self):
return 'OrderedSet([%s])' % (', '.join(map(repr, self.keys())))
def __str__(self):
return '{%s}' % (', '.join(map(repr, self.keys())))
difference = property(lambda self: self.__sub__)
difference_update = property(lambda self: self.__isub__)
intersection = property(lambda self: self.__and__)
intersection_update = property(lambda self: self.__iand__)
issubset = property(lambda self: self.__le__)
issuperset = property(lambda self: self.__ge__)
symmetric_difference = property(lambda self: self.__xor__)
symmetric_difference_update = property(lambda self: self.__ixor__)
union = property(lambda self: self.__or__)
# ----------------------------------------------------------------- #
# ----------------------------------------------------------------- #
class BlockDiagOp(theano.Op):
"""Block diagonal."""
def make_node(self, *arrays):
"""create an `Apply` node."""
arrs = [theano.tensor.as_tensor_variable(ar) for ar in arrays]
atyp = arrs[0].type()
return theano.Apply(self, arrs, [atyp])
def perform(self, node, inputs_storage, output_storage):
"""perform python implementation."""
out = output_storage[0]
bdiag = scipy.linalg.block_diag(*inputs_storage)
out[0] = bdiag
class ExpmSSOp(theano.Op):
"""Matrix exponential using scaling and squaring."""
# properties attributes
__props__ = ()
# `itypes` and `otypes` attributes are
# compulsory if `make_node` method is not defined.
# They are the type of input and output respectively
# itypes = [theano.tensor.fmatrix]
# otypes = [theano.tensor.fmatrix]
# Compulsory if itypes and otypes are not defined
def make_node(self, mat):
"""create an `Apply` node."""
mat = theano.tensor.as_tensor_variable(mat)
return theano.Apply(self, [mat], [mat.type()])
def perform(self, node, inputs_storage, output_storage):
"""perform python implementation."""
mat = inputs_storage[0]
out = output_storage[0]
cond = np.linalg.norm(mat, np.inf)
powr = np.ceil(np.log2(cond)) + 1
scale = 2 ** powr
expm = scipy.linalg.expm(mat / scale)
sqrm = np.linalg.matrix_power(
expm,
int(scale))
out[0] = sqrm
def infer_shape(self, node, input_shapes):
return input_shapes
class ExpmSSEOp(theano.Op):
"""Matrix exponential using scaling and squaring the eigenvalues."""
# properties attributes
__props__ = ()
# `itypes` and `otypes` attributes are
# compulsory if `make_node` method is not defined.
# They are the type of input and output respectively
# itypes = [theano.tensor.fmatrix]
# otypes = [theano.tensor.fmatrix]
# Compulsory if itypes and otypes are not defined
def make_node(self, mat):
"""create an `Apply` node."""
mat = theano.tensor.as_tensor_variable(mat)
return theano.Apply(self, [mat], [mat.type()])
def perform(self, node, inputs_storage, output_storage):
"""perform python implementation."""
mat = inputs_storage[0]
out = output_storage[0]
cond = np.linalg.norm(mat, np.inf)
powr = np.ceil(np.log2(cond)) + 1
scale = 2 ** powr
expm = scipy.linalg.expm(mat / scale)
# squaring the eigenvalues
Ed, EV = np.linalg.eig(expm)
iEV = np.linalg.inv(EV)
Ed = np.diag(Ed ** scale)
sqrm = EV.dot(Ed).dot(iEV)
out[0] = sqrm
def infer_shape(self, node, input_shapes):
return input_shapes
# ----------------------------------------------------------------- #
# ----------------------------------------------------------------- #
def sym_shasi(name=None, value=0, **kwargs):
# int64
value = int(value)
return theano.shared(value, name, strict=False, allow_downcast=True, **kwargs)
def sym_shasf(name=None, value=0, **kwargs):
# float32
value = np.cast[theano.config.floatX](value)
return theano.shared(value, name, strict=False, allow_downcast=True, **kwargs)
def sym_shavf(name=None, **kwargs):
value = np.zeros((0,), dtype=theano.config.floatX)
return theano.shared(value, name, strict=False, allow_downcast=True, **kwargs)
def sym_shamf(name=None, **kwargs):
value = np.zeros((0, 0), dtype=theano.config.floatX)
return theano.shared(value, name, strict=False, allow_downcast=True, **kwargs)
def sym_shavi(name=None, **kwargs):
value = np.zeros((0,), dtype=int)
return theano.shared(value, name, strict=False, allow_downcast=True, **kwargs)
def sym_shami(name=None, **kwargs):
value = np.zeros((0, 0), dtype=int)
return theano.shared(value, name, strict=False, allow_downcast=True, **kwargs)
def sym_kron(A, B):
return T.slinalg.kron(A, B)
def sym_inv(A):
return T.nlinalg.matrix_inverse(A)
def sym_dot(A, B):
return T.nlinalg.matrix_dot(A, B)
def sym_pinv(A):
return T.nlinalg.pinv(A)
def num_expm(A):
# numerical matrix exponential using Pade approximation
# with scaling and squaring
norm = np.linalg.norm(A, np.inf)
n = np.ceil(
|
np.log2(norm)
|
numpy.log2
|
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 5 21:32:52 2017
@author: hsd
"""
import numpy as np
from scipy import stats
import ReadData
##################################################
### tools
##################################################
def LongThresCrossing(ts, thres):
cnt = 0
pair_flag = 1
pre_loc = 0
width = []
for i in range(len(ts)-1):
if (ts[i] - thres) * (ts[i+1] - thres) < 0:
cnt += 1
if pair_flag == 1:
width.append(i-pre_loc)
pair_flag = 0
else:
pair_flag = 1
pre_loc = i
if ts[i] == thres and (ts[i-1] - thres) * (ts[i+1] - thres) < 0:
cnt += 1
if len(width) > 1:
return [cnt, np.mean(width)]
else:
return [cnt, 0.0]
##################################################
### get features
##################################################
def short_basic_stat(ts):
global feature_list
Range = max(ts) - min(ts)
Var = np.var(ts)
Skew = stats.skew(ts)
Kurtosis = stats.kurtosis(ts)
Median = np.median(ts)
feature_list.extend(['ShortBasicStat_Range',
'ShortBasicStat_Var',
'ShortBasicStat_Skew',
'ShortBasicStat_Kurtosis',
'ShortBasicStat_Median'])
return [Range, Var, Skew, Kurtosis, Median]
def short_zero_crossing(ts):
global feature_list
feature_list.extend(['short_zero_crossing_cnt'])
cnt = 0
for i in range(len(ts)-1):
if ts[i] * ts[i+1] < 0:
cnt += 1
if ts[i] == 0 and ts[i-1] * ts[i+1] < 0:
cnt += 1
return [cnt]
##################################################
### get all features
##################################################
def get_short_stat_wave_feature(table, pid_list, long_pid_list):
'''
short stat feature, actually long feature
Electrocardiogram Feature Extraction and Pattern Recognition Using a Novel Windowing Algorithm
row of out feature is 8000+
TODO: more on how to detect PT waves
'''
global feature_list
feature_list = []
print('extract GetShortStatWaveFeature begin')
features = []
pid_short_dic = {}
### no-preprocess, performs better
for i in range(len(pid_list)):
if pid_list[i] in pid_short_dic.keys():
pid_short_dic[pid_list[i]].append(table[i])
else:
pid_short_dic[pid_list[i]] = [table[i]]
step = 0
for pid in long_pid_list:
if pid in pid_short_dic.keys() and len(pid_short_dic[pid])-2 > 0:
### init
QRS_peak_list = []
QRS_area_list = []
PR_interval_list = []
QRS_duration_list = []
QT_interval_list = []
QT_corrected_list = []
vent_rate_list = []
RQ_amp_list = []
RS_amp_list = []
ST_amp_list = []
PQ_amp_list = []
QS_amp_list = []
RP_amp_list = []
RT_amp_list = []
ST_interval_list = []
RS_interval_list = []
T_peak_list = []
P_peak_list = []
Q_peak_list = []
R_peak_list = []
S_peak_list = []
RS_slope_list = []
ST_slope_list = []
NF_list = []
Fwidth_list = []
### select short data of one patient
sub_table = pid_short_dic[pid]
for i in range(len(sub_table)-2):
prev_ts = sub_table[i]
ts = sub_table[i+1]
### select each short data
T_start = round(0.15 * len(ts))
T_end = round(0.55 * len(ts))
P_start = round(0.65 * len(ts))
P_end = round(0.95 * len(ts))
T_wave = ts[T_start:T_end]
P_wave = ts[P_start:P_end]
T_peak = max(T_wave)
P_peak = max(P_wave)
Q_peak = min(prev_ts[-6:])
R_peak = ts[0]
S_peak = min(ts[:6])
T_loc = np.argmax(T_wave)
P_loc = np.argmax(P_wave)
Q_loc = -np.argmin(prev_ts[-6:])
R_loc = 0
S_loc = np.argmin(ts[:6])
### features, recent add (2)
QRS_peak = max(ts)
QRS_area = np.sum(np.abs(prev_ts[Q_loc: 0])) + np.sum(np.abs(ts[0: S_loc]))
### features (5)
PR_interval = P_loc - 0
QRS_duration = S_loc - Q_loc
QT_interval = T_loc - Q_loc
QT_corrected = QT_interval / len(ts)
if QRS_duration == 0:
vent_rate = 0
else:
vent_rate = 1 / QRS_duration
### number of f waves (2)
TQ_interval = ts[T_loc:Q_loc]
thres = np.mean(TQ_interval) + (T_peak - np.mean(TQ_interval))/50
NF, Fwidth = LongThresCrossing(TQ_interval, thres)
### more features (16)
RQ_amp = R_peak - Q_peak
RS_amp = R_peak - S_peak
ST_amp = T_peak - S_peak
PQ_amp = P_peak - Q_peak
QS_amp = Q_peak - S_peak
RP_amp = R_peak - P_peak
RT_amp = R_peak - T_peak
ST_interval = T_loc - S_loc
RS_interval = S_loc - R_loc
T_peak = T_peak
P_peak = P_peak
Q_peak = Q_peak
R_peak = R_peak
S_peak = S_peak
if RS_interval == 0:
RS_slope = 0
else:
RS_slope = RS_amp / RS_interval
if ST_interval == 0:
ST_slope = 0
else:
ST_slope = ST_amp / ST_interval
### add to list
QRS_peak_list.append(QRS_peak)
QRS_area_list.append(QRS_area)
PR_interval_list.append(PR_interval)
QRS_duration_list.append(QRS_duration)
QT_interval_list.append(QT_interval)
QT_corrected_list.append(QT_corrected)
vent_rate_list.append(vent_rate)
NF_list.append(NF)
Fwidth_list.append(Fwidth)
RQ_amp_list.append(RQ_amp)
RS_amp_list.append(RS_amp)
ST_amp_list.append(ST_amp)
PQ_amp_list.append(PQ_amp)
QS_amp_list.append(QS_amp)
RP_amp_list.append(RP_amp)
RT_amp_list.append(RT_amp)
ST_interval_list.append(ST_interval)
RS_interval_list.append(RS_interval)
T_peak_list.append(T_peak)
P_peak_list.append(P_peak)
Q_peak_list.append(Q_peak)
R_peak_list.append(R_peak)
S_peak_list.append(S_peak)
RS_slope_list.append(RS_slope)
ST_slope_list.append(ST_slope)
features.append([np.mean(QRS_peak_list),
np.mean(QRS_area_list),
np.mean(PR_interval_list),
np.mean(QRS_duration_list),
np.mean(QT_interval_list),
np.mean(QT_corrected_list),
np.mean(vent_rate_list),
np.mean(RQ_amp_list),
np.mean(RS_amp_list),
np.mean(ST_amp_list),
np.mean(PQ_amp_list),
np.mean(QS_amp_list),
np.mean(RP_amp_list),
np.mean(RT_amp_list),
np.mean(ST_interval_list),
np.mean(RS_interval_list),
np.mean(T_peak_list),
np.mean(P_peak_list),
np.mean(Q_peak_list),
np.mean(R_peak_list),
np.mean(S_peak_list),
np.mean(RS_slope_list),
np.mean(ST_slope_list),
np.mean(NF_list),
np.mean(Fwidth_list),
np.max(QRS_peak_list),
np.max(QRS_area_list),
np.max(PR_interval_list),
np.max(QRS_duration_list),
np.max(QT_interval_list),
np.max(QT_corrected_list),
np.max(vent_rate_list),
np.max(RQ_amp_list),
np.max(RS_amp_list),
np.max(ST_amp_list),
np.max(PQ_amp_list),
np.max(QS_amp_list),
np.max(RP_amp_list),
np.max(RT_amp_list),
np.max(ST_interval_list),
np.max(RS_interval_list),
np.max(T_peak_list),
np.max(P_peak_list),
np.max(Q_peak_list),
np.max(R_peak_list),
np.max(S_peak_list),
np.max(RS_slope_list),
np.max(ST_slope_list),
np.max(NF_list),
np.max(Fwidth_list),
np.min(QRS_peak_list),
np.min(QRS_area_list),
np.min(PR_interval_list),
np.min(QRS_duration_list),
np.min(QT_interval_list),
np.min(QT_corrected_list),
np.min(vent_rate_list),
np.min(RQ_amp_list),
np.min(RS_amp_list),
np.min(ST_amp_list),
np.min(PQ_amp_list),
np.min(QS_amp_list),
np.min(RP_amp_list),
np.min(RT_amp_list),
np.min(ST_interval_list),
np.min(RS_interval_list),
np.min(T_peak_list),
np.min(P_peak_list),
np.min(Q_peak_list),
np.min(R_peak_list),
np.min(S_peak_list),
np.min(RS_slope_list),
np.min(ST_slope_list),
np.min(NF_list),
np.min(Fwidth_list),
|
np.std(QRS_peak_list)
|
numpy.std
|
"""
Stores all of the terms used inside the VPT2 representations
"""
import itertools
import numpy as np, functools as fp, itertools as ip, time, enum
from McUtils.Numputils import SparseArray, levi_cevita3, vec_tensordot, vec_outer
from McUtils.Data import UnitsData
from McUtils.Scaffolding import Logger, NullLogger, Checkpointer, NullCheckpointer
from McUtils.Parallelizers import Parallelizer
from McUtils.Zachary import TensorDerivativeConverter, TensorExpansionTerms
from ..Molecools import Molecule, MolecularVibrations, MolecularNormalModes
from .Common import PerturbationTheoryException
__all__ = [
"ExpansionTerms",
"KineticTerms",
"PotentialTerms",
"DipoleTerms",
"CoriolisTerm",
"PotentialLikeTerm"
]
class DumbTensor:
"""
A wrapper to make tensor algebra suck less
"""
def __init__(self, tensor):
self.t = tensor
@property
def shape(self):
return self.t.shape
@staticmethod
def _dot(*t, axes=None):
"""
Flexible tensordot
"""
if len(t) == 1:
return t[0]
if any(isinstance(x, int) for x in t):
return 0
def tdot(a, b, **kw):
if hasattr(a, "tensordot"):
if 'axes' not in kw:
kw['axes'] = [-1, 0]
td = a.tensordot(b, **kw)
else:
try:
td = np.tensordot(a, b, **kw)
except ValueError:
if 'axes' not in kw:
axes = [-1, 0]
else:
axes = kw['axes']
raise ValueError("Shape-mismatch for sum: {} x {} along axes {}".format(a.shape, b.shape, axes))
return td
def td(a, b):
if isinstance(a, int) or isinstance(b[0], int):
res = 0
else:
res = tdot(a, b[0], axes=b[1])
return res
if axes is None:
axes = [1] * (len(t) - 1)
return fp.reduce(td, zip(t[1:], axes), t[0])
def dot(self, b, *args, **kwargs):
if isinstance(b, DumbTensor):
b = b.t
return type(self)(self._dot(self.t, b, *args, **kwargs))
@staticmethod
def _shift(a, *s):
if isinstance(a, int):
return a
def shift_inds(n, i, j):
if i < j:
x = list(range(i)) + list(range(i + 1, j + 1)) + [i] + list(range(j + 1, n))
else:
x = list(range(j)) + [i] + list(range(j, i)) + list(range(i + 1, n))
return x
shiftIJ = lambda a, ij: np.transpose(a, shift_inds(a.ndim, *ij))
return fp.reduce(shiftIJ, s, a)
def shift(self, *args, **kwargs):
return type(self)(self._shift(self.t, *args, **kwargs))
def transpose(self, *perm):
return type(self)(self.t.transpose(perm))
@staticmethod
def _contract_dim(R, targ_dim):
# we figure out how much we're off by
# and go from there, assuming that pairs of
# dimensions to be contracted show up at the end
for i in range(R.ndim - targ_dim):
l_pos = R.ndim - (i + 2)
gloobers = R.shape[:l_pos]
if i > 0:
r_pos = -i
groobers = R.shape[r_pos:]
else:
groobers = ()
R = R.reshape(gloobers + (-1,) + groobers)
return R
def contract_dim(self, targ_dim):
return type(self)(self._contract_dim(self.t, targ_dim))
def __add__(self, other):
if isinstance(other, DumbTensor):
other = other.t
return type(self)(self.t+other)
def __radd__(self, other):
if isinstance(other, DumbTensor):
other = other.t
return type(self)(self.t+other)
def __matmul__(self, other):
return self.dot(other)
def __getitem__(self, item):
"""
:type item: slice
"""
a = item.start
b = item.stop
return self.shift([a, b])
class MixedDerivativeHandlingModes(enum.Enum):
Unhandled = "unhandled"
Numerical = "numerical"
Analytical = "analytical"
Averaged = "averaged"
class JacobianKeys(enum.Enum):
CartesiansByInternals = "CartesiansByInternals"
InternalsByCartesians = "InternalsByCartesians"
InternalsByCartesianModes = "InternalsByModes"
CartesianModesByInternals = "ModesByInternals"
CartesiansByInternalModes = "CartesiansByModes"
InternalModesByCartesians = "ModesByCartesians"
CartesianModesByInternalModes = "CartesianModesByInternalModes"
InternalModesByCartesianModes = "InternalModesByCartesianModes"
InternalModesByInternals = "InternalModesByInternals"
InternalsByInternalModes = "InternalsByInternalModes"
CartesianModesByCartesians = "CartesianModesByCartesians"
CartesiansByCartesianModes = "CartesiansByCartesianModes"
class ExpansionTerms:
"""
Base class for kinetic, potential, and dipole derivative terms
"""
# backpropagate_internals = False # just a flag that can be set to use Cartesian results _but_ do it with
# # terms backpropagated from the internals
# mixed_derivative_handling_mode = "unhandled"
# undimensionalize_normal_modes = True
# numerical_jacobians = True
# eckart_embed_derivatives = True
# strip_dummy_atoms = False
# strip_embedding_coordinates = False
# so they can be tracked/propagated up more easily
__props__ = (
"logger",
"parallelizer",
"checkpointer",
"undimensionalize",
"numerical_jacobians",
"eckart_embed_derivatives",
"eckart_embed_planar_ref_tolerance",
"strip_dummies",
"strip_embedding",
"mixed_derivative_handling_mode",
"backpropagate_internals",
"direct_propagate_cartesians",
"zero_mass_term",
"internal_fd_mesh_spacing",
"internal_fd_stencil",
"cartesian_fd_mesh_spacing",
"cartesian_fd_stencil",
"cartesian_analytic_deriv_order",
"internal_by_cartesian_order",
"cartesian_by_internal_order",
"jacobian_warning_threshold",
"coordinate_transformations",
"coordinate_derivatives",
)
_cached_jacobians = {}
def __init__(self,
molecule,
modes=None,
mode_selection=None,
use_internal_modes=None,
logger=None,
parallelizer=None,
checkpointer=None,
undimensionalize=None,
numerical_jacobians=True,
eckart_embed_derivatives=True,
eckart_embed_planar_ref_tolerance=None,
strip_dummies=False,
strip_embedding=True,
mixed_derivative_handling_mode="unhandled",
backpropagate_internals=False,
direct_propagate_cartesians=False,
zero_mass_term=1e7,
internal_fd_mesh_spacing=1.0e-3,
internal_fd_stencil=None,
cartesian_fd_mesh_spacing=1.0e-2,
cartesian_fd_stencil=None,
cartesian_analytic_deriv_order=0,
internal_by_cartesian_order=3,
cartesian_by_internal_order=4,
jacobian_warning_threshold=1e4,
coordinate_transformations=None,
coordinate_derivatives=None
):
"""
:param molecule: the molecule we're doing the expansion for
:type molecule: Molecule
:param modes: normal modes in Cartesian coordinates
:type modes: MolecularVibrations
:param mode_selection: the selection of modes to use
:type mode_selection: None | Iterable[int]
:param undimensionalize: whether or not we need to do some units fuckery on the modes
:type undimensionalize: bool
"""
self._terms = None
self.molecule = molecule
self.strip_dummies = strip_dummies
self.strip_embedding = strip_embedding
self.backpropagate_internals = backpropagate_internals
self.direct_propagate_cartesians = direct_propagate_cartesians
self.zero_mass_term = zero_mass_term
self.internal_fd_mesh_spacing = internal_fd_mesh_spacing
self.internal_fd_stencil = internal_fd_stencil
self.cartesian_fd_mesh_spacing = cartesian_fd_mesh_spacing
self.cartesian_fd_stencil = cartesian_fd_stencil
self.cartesian_analytic_deriv_order = cartesian_analytic_deriv_order
self.internal_by_cartesian_order = internal_by_cartesian_order
self.cartesian_by_internal_order = cartesian_by_internal_order
self.jacobian_warning_threshold = jacobian_warning_threshold
self.internal_coordinates = molecule.internal_coordinates
self.coords = molecule.coords
self.masses = molecule._atomic_masses()# * UnitsData.convert("AtomicMassUnits", "AtomicUnitOfMass")
self.use_internal_modes = use_internal_modes
if modes is None:
modes = molecule.normal_modes.modes
self._modes = modes.basis
if undimensionalize is None:
undimensionalize = not self._check_internal_modes(clean=False)
if undimensionalize:
self.raw_modes = modes
modes = self.undimensionalize(self.masses, self._modes)
else:
self.raw_modes = None
modes = self._modes
if mode_selection is not None:
modes = modes[mode_selection]
self._modes = modes
self.mode_sel = mode_selection
self.freqs = self.modes.freqs
self._inert_frame = None
self.reembed=eckart_embed_derivatives
self.reembed_tol=eckart_embed_planar_ref_tolerance
self.all_numerical=numerical_jacobians
if logger is None:
logger = NullLogger()
self.logger = logger
if parallelizer is None:
parallelizer = Parallelizer.lookup(None)
self.parallelizer = parallelizer
if checkpointer is None:
checkpointer = NullCheckpointer()
self.checkpointer = checkpointer
if coordinate_derivatives is not None:
self._cached_jacobians[molecule] = coordinate_derivatives
if coordinate_transformations is not None:
self._cached_transforms[molecule] = coordinate_transformations
else:
try:
transf = self.checkpointer['coordinate_transforms']
except (OSError, KeyError):
pass
else:
self._cached_transforms[molecule] = {JacobianKeys(k):v for k,v in transf.items()}
if not isinstance(mixed_derivative_handling_mode, MixedDerivativeHandlingModes):
self.mixed_derivative_handling_mode = MixedDerivativeHandlingModes(mixed_derivative_handling_mode)
@property
def num_atoms(self):
"""
Gets the number of atoms (excluding dummies if `strip_dummies` is `True`)
:return:
:rtype:
"""
if self.strip_dummies:
n = np.sum(self.masses > 0, dtype=int)
else:
n = len(self.masses)
return n
def _check_internal_modes(self, modes=None, clean=True):
if self.use_internal_modes is not None:
if clean and self.use_internal_modes:
self._reshape_internal_modes()
return self.use_internal_modes
if modes is None:
modes = self._modes
mat = modes.matrix
is_internal = mat.shape[0] == self.coords.shape[0] * self.coords.shape[1] - 6
self.use_internal_modes = is_internal
if clean and is_internal:
self._reshape_internal_modes()
return is_internal
def _reshape_internal_modes(self):
QR = self._modes.matrix # derivatives of Q with respect to the internals
# we need to add zeros for the orientation coordinates
if not self.strip_embedding and QR.shape[0] != 3 * self.num_atoms:
_QR = QR
QR = np.zeros((3 * self.num_atoms, _QR.shape[1]))
embedding_coords = [0, 1, 2, 4, 5, 8]
good_coords = np.setdiff1d(np.arange(3 * self.num_atoms), embedding_coords)
QR[good_coords, :] = _QR
self._modes.matrix = QR
RQ = self._modes.inverse # derivatives of internals with respect to Q
if not self.strip_embedding and RQ.shape[1] != 3 * self.num_atoms:
_RQ = RQ
# we need to add zeros for the orientation coordinates
RQ = np.zeros((_RQ.shape[0], 3 * self.num_atoms))
embedding_coords = [0, 1, 2, 4, 5, 8]
good_coords = np.setdiff1d(np.arange(3 * self.num_atoms), embedding_coords)
RQ[:, good_coords] = _RQ
self._modes.inverse = RQ
@property
def modes(self):
# if self._check_internal_modes():
# J, = self.get_cart_jacobs([1])
# return np.dot(J, self._modes)
# else:
# # cartesian modes
return self._modes
def undimensionalize(self, masses, modes):
"""
Removes units from normal modes
:param masses:
:type masses:
:param modes:
:type modes:
:return:
:rtype:
"""
L = modes.matrix.T
freqs = modes.freqs
freq_conv = np.sqrt(np.broadcast_to(freqs[:, np.newaxis], L.shape))
if self._check_internal_modes(clean=False):
conv = freq_conv
L = L * conv
Linv = modes.inverse / conv
else:
mass_conv = np.sqrt(np.broadcast_to(self._tripmass(masses)[np.newaxis, :], L.shape))
conv = freq_conv * mass_conv
L = L * conv
Linv = (L / freq_conv**2)
modes = type(modes)(self.molecule, L.T, inverse=Linv, freqs=freqs)
return modes
def _tripmass(self, masses):
if self.strip_dummies:
masses = masses[masses > 0]
else:
masses = masses.copy()
masses[masses < 0] = self.zero_mass_term
return np.broadcast_to(masses[np.newaxis, :], (3, len(masses))).T.flatten()
def get_terms(self, order=None):
"""
Gets the terms up to the given order
:param order:
:type order:
:return:
:rtype:
"""
raise NotImplementedError("base class")
def get_term(self, t):
"""
Provides the term at order `t`
:param t:
:type t:
:return:
:rtype:
"""
if self._terms is None or len(self._terms) < t+1:
self._terms = self.get_terms(order=t)
return self._terms[t]
@property
def terms(self):
if self._terms is None:
self._terms = self.get_terms()
return self._terms
def __getitem__(self, item):
return self.get_term(item)
@staticmethod
def _weight_derivatives(t, order = None):
if isinstance(t, int):
return t
weighted = t
if order is None:
order = len(t.shape)
if order > 1:
s = t.shape
weights = np.ones(s)
all_inds = list(range(len(s)))
for i in range(2, order + 1):
for inds in ip.combinations(all_inds, i):
# define a diagonal slice through
sel = tuple(slice(None, None, None) if a not in inds else np.arange(s[a]) for a in all_inds)
weights[sel] = 1 / np.math.factorial(i)
weighted = weighted * weights
# print(weights, weighted.array)
return weighted
def get_int_jacobs(self, jacs):
"""
Gets the specified Internal->Cartesian Jacobians
:param jacs:
:type jacs:
:return:
:rtype:
"""
intcds = self.internal_coordinates
ccoords = self.coords
carts = ccoords.system
internals = intcds.system
if self.molecule not in self._cached_jacobians:
self._cached_jacobians[self.molecule] = {
'int': [],
'cart': []
}
exist_jacs = self._cached_jacobians[self.molecule]['int']
max_jac = max(jacs)
need_jacs = [x+1 for x in range(0, max_jac) if x >= len(exist_jacs) or exist_jacs[x] is None]
if len(need_jacs) > 0:
stencil = (max(need_jacs) + 2 + (1+max(need_jacs))%2) if self.internal_fd_stencil is None else self.internal_fd_stencil
# odd behaves better
with Parallelizer.lookup(self.parallelizer) as par:
new_jacs = [
x.squeeze() if isinstance(x, np.ndarray) else x
for x in intcds.jacobian(carts, need_jacs,
# odd behaves better
mesh_spacing=self.internal_fd_mesh_spacing,
stencil=stencil,
all_numerical=self.all_numerical,
converter_options=dict(
reembed=self.reembed,
planar_ref_tolerance=self.reembed_tol,
strip_dummies=self.strip_dummies
),
parallelizer=par
)]
# np.set_printoptions
# with np.printoptions(linewidth=1e8, threshold=1e8, floatmode='fixed', precision=10):
# raise Exception(str(np.round(new_jacs[0].reshape(9, 9)[(3, 6, 7), :], 12)))
for j,v in zip(need_jacs, new_jacs):
for d in range(j-len(exist_jacs)):
exist_jacs.append(None)
exist_jacs[j-1] = v
return [exist_jacs[j-1] for j in jacs]
def get_cart_jacobs(self, jacs):
"""
Gets the specified Cartesian->Internal Jacobians
:param jacs:
:type jacs:
:return:
:rtype:
"""
intcds = self.internal_coordinates
ccoords = self.coords
carts = ccoords.system
internals = intcds.system
if self.molecule not in self._cached_jacobians:
self._cached_jacobians[self.molecule] = {
'int': [],
'cart': []
}
exist_jacs = self._cached_jacobians[self.molecule]['cart']
max_jac = max(jacs)
need_jacs = [x+1 for x in range(0, max_jac) if x >= len(exist_jacs) or exist_jacs[x] is None]
if len(need_jacs) > 0:
stencil = (max(need_jacs) + 2 + (1+max(need_jacs))%2) if self.cartesian_fd_stencil is None else self.cartesian_fd_stencil
# odd behaves better
with Parallelizer.lookup(self.parallelizer) as par:
new_jacs = [
x.squeeze() if isinstance(x, np.ndarray) else x
for x in ccoords.jacobian(internals, need_jacs,
mesh_spacing=self.cartesian_fd_mesh_spacing,
stencil=stencil,
# all_numerical=True,
analytic_deriv_order=self.cartesian_analytic_deriv_order,
converter_options=dict(strip_dummies=self.strip_dummies),
parallelizer=par
)
]
if need_jacs[0] > self.cartesian_analytic_deriv_order:
new_jacs = new_jacs[self.cartesian_analytic_deriv_order:]
for j, v in zip(need_jacs, new_jacs):
for d in range(j - len(exist_jacs)):
exist_jacs.append(None)
exist_jacs[j - 1] = v
return [exist_jacs[j-1] for j in jacs]
@property
def inertial_frame(self):
"""
Provides the inertial axis frame
:return:
:rtype:
"""
if self._inert_frame is None:
# Need to put B in Hartree?
# I've got moments of inertia in amu * bohr^2 at the moment
# So we convert (amu * bohr^2) to (m_e * bohr^2) since hb^2/(m_e bohr^2) == E_h
mom_i, eigs = self.molecule.inertial_eigensystem
B_e = 1 / (2 * mom_i)# * UnitsData.convert("AtomicMassUnits", "AtomicUnitOfMass"))
# print(B_e * UnitsData.convert("Hartrees", "Wavenumbers") )
self._inert_frame = B_e, eigs
return self._inert_frame
def inertial_frame_derivatives(self):
if self.strip_dummies:
real_pos = self.masses > 0
mass = self.masses[real_pos]
crds = self.molecule.coords[real_pos, :]
else:
mass = self.masses.copy()
mass[mass < 0] = self.zero_mass_term
crds = self.molecule.coords
mass = np.sqrt(mass)
carts = mass[:, np.newaxis] * crds # mass-weighted Cartesian coordinates
### compute basic inertia tensor derivatives
# first derivs are computed as a full (nAt, 3, I_rows (3), I_cols (3)) tensor
# and then reshaped to (nAt * 3, I_rows, I_cols)
eyeXeye = np.eye(9).reshape(3, 3, 3, 3).transpose((2, 0, 1, 3))
I0Y_1 = np.tensordot(carts, eyeXeye, axes=[1, 0])
nAt = carts.shape[0]
nY = nAt * 3
I0Y_21 = (
np.reshape(np.eye(3), (9,))[np.newaxis, :, np.newaxis]
* carts[:, np.newaxis, :]
) # a flavor of outer product
I0Y_21 = I0Y_21.reshape((nAt, 3, 3, 3))
I0Y_2 = (I0Y_21 + I0Y_21.transpose((0, 1, 3, 2)))
I0Y = 2 * I0Y_1 - I0Y_2
I0Y = I0Y.reshape((nY, 3, 3))
# second derivatives are 100% independent of coorinates
# only the diagonal blocks are non-zero, so we compute that block
# and then tile appropriately
keyXey = np.eye(9).reshape(3, 3, 3, 3)
I0YY_nn = 2 * eyeXeye - (keyXey + keyXey.transpose((0, 1, 3, 2)))
I0YY = np.zeros((nAt, 3, nAt, 3, 3, 3))
for n in range(nAt):
I0YY[n, :, n, :, :, :] = I0YY_nn
I0YY = I0YY.reshape((nY, nY, 3, 3))
return [I0Y, I0YY]
def moment_of_inertia_derivs(self, order):
B_e, _ = self.inertial_frame
YQ = self.modes.inverse # derivatives of Q with respect to the Cartesians
u_0 = 2 * np.diag(B_e) # reconstruct inertia tensor
if order > 0:
IdY, _ = self.inertial_frame_derivatives() # only ever two of these
IdQ = np.tensordot(YQ, IdY, axes=[1, 0])
A = np.tensordot(IdQ, u_0, axes=[2, 0])
all_derivs = [u_0]
for i in range(order):
# take original term and multiply in a . u_0
u = all_derivs[-1]
u = np.moveaxis(np.tensordot(u, A, axes=[1, 1]), -1, 1)
all_derivs.append(u)
# then add in the binomial expansion temrs
for i,u in enumerate(all_derivs):
all_derivs[i] = (-1)**i * (i+1) / (2**i) * u
return all_derivs
_cached_transforms = {}
def get_coordinate_transforms(self,
internal_by_cartesian_order=None,
cartesian_by_internal_order=None,
current_cache=None
):
if internal_by_cartesian_order is None:
internal_by_cartesian_order = self.internal_by_cartesian_order
if cartesian_by_internal_order is None:
cartesian_by_internal_order = self.cartesian_by_internal_order
if current_cache is None and self.molecule in self._cached_transforms:
current_cache = self._cached_transforms[self.molecule]
if (
current_cache is None
or len(current_cache[JacobianKeys.CartesiansByInternals]) < cartesian_by_internal_order
or len(current_cache[JacobianKeys.InternalsByCartesians]) < internal_by_cartesian_order
):
if current_cache is None:
current_cache = {}
if self.logger is not None:
self.logger.log_print(
[
"Getting coordinate transforms for {m}",
"Embedding axes: {a}"
],
m=self.molecule,
a=self.internal_coordinates.system.converter_options["axes_labels"]
)
# fill out
if (
JacobianKeys.CartesiansByInternals not in current_cache
or len(current_cache[JacobianKeys.CartesiansByInternals]) < cartesian_by_internal_order
):
# For speed reasons we've introduced class-level caching of these terms
if self.logger is not None:
start = time.time()
self.logger.log_print(
"Getting d^nX/dR^n up to order {o}...",
o=cartesian_by_internal_order
)
cart_by_internal_jacobs = self.get_int_jacobs(list(range(1, cartesian_by_internal_order+1)))
if self.logger is not None:
end = time.time()
self.logger.log_print(
"took {t}s",
t=round(end-start, 3)
)
# The finite difference preserves too much shape by default
_contract_dim = DumbTensor._contract_dim
_ = []
for i,x in enumerate(cart_by_internal_jacobs):
if isinstance(x, int) or x.ndim == 2+i:
_.append(x)
elif x.ndim > 2+i:
_.append(_contract_dim(x, 2+i))
else:
raise ValueError("bad shape for Cartesian by internal jacobian {} ({})".format(
i, x.shape
))
cart_by_internal_jacobs = _
# we'll strip off the embedding coords just in case
if self.strip_embedding:
embedding_coords = [0, 1, 2, 4, 5, 8]
good_coords = np.setdiff1d(np.arange(3*self.num_atoms), embedding_coords)
for i, x in enumerate(cart_by_internal_jacobs):
bad_spots = np.where(np.abs(x) > self.jacobian_warning_threshold)
bad_bad_spots = bad_spots # so we don't lose it
if len(bad_spots) > 0: # numpy fuckery
bad_spots = bad_spots[0]
if len(bad_spots) > 0:
m = np.max(np.abs(x[bad_bad_spots]))
self.logger.log_print('WARNING: maximum d^{i}X/dR^{i} term is {m}. '
'This will likely mess up G-matrix terms and is probably coming from a planar structure. '
'Setting to zero, but `jacobian_warning_threshold` can be increased if this is expected '
'All terms >{t} (base shape:{s}): {b}',
i=i+1,
m=m,
s=x.shape,
b=np.array(bad_bad_spots).T.tolist(),
t=self.jacobian_warning_threshold
)
x[bad_bad_spots] = 0.
# raise Exception(";_;")
# Need to then mass weight
masses = self.masses
mass_conv = np.sqrt(self._tripmass(masses))
# mass weight the derivs w.r.t internals
internal_weighting = mass_conv
_ = []
for i, x in enumerate(cart_by_internal_jacobs):
internal_weighting = np.expand_dims(internal_weighting, 0)
if isinstance(x, int):
_.append(x)
else:
x = x * internal_weighting
if self.strip_embedding:
for j in range(i+1):
x = np.take(x, good_coords, axis=j)
_.append(x)
cart_by_internal_jacobs = _
current_cache[JacobianKeys.CartesiansByInternals] = cart_by_internal_jacobs
else:
cart_by_internal_jacobs = current_cache[JacobianKeys.CartesiansByInternals]
if (
JacobianKeys.InternalsByCartesians not in current_cache
or len(current_cache[JacobianKeys.InternalsByCartesians]) < internal_by_cartesian_order
):
if self.logger is not None:
start = time.time()
self.logger.log_print(
"Getting d^nR/dX^n up to order {o}...",
o=internal_by_cartesian_order
)
int_by_cartesian_jacobs = self.get_cart_jacobs(list(range(1, internal_by_cartesian_order + 1)))
# m = np.max([np.max(np.abs(x)) for x in int_by_cartesian_jacobs])
if self.logger is not None:
end = time.time()
self.logger.log_print(
"took {t}s",
t=round(end-start, 3)
)
# raise Exception([x.shape for x in int_by_cartesian_jacobs])
_contract_dim = DumbTensor._contract_dim
_ = []
for i,x in enumerate(int_by_cartesian_jacobs):
if isinstance(x, int) or x.ndim == 2+i:
_.append(x)
elif x.ndim > 2+i:
_.append(_contract_dim(x, 2+i))
else:
raise ValueError("bad shape for internal by Cartesian jacobian {} ({})".format(
i, x.shape
))
int_by_cartesian_jacobs = _
# we'll strip off the embedding coords just in case
if self.strip_embedding:
embedding_coords = [0, 1, 2, 4, 5, 8]
good_coords = np.setdiff1d(np.arange(3*self.num_atoms), embedding_coords)
for i,x in enumerate(int_by_cartesian_jacobs):
bad_spots = np.where(np.abs(x) > self.jacobian_warning_threshold)
bad_bad_spots = bad_spots # so we don't lose it
if len(bad_spots) > 0: # numpy fuckery
bad_spots = bad_spots[0]
if len(bad_spots) > 0:
m = np.max(np.abs(x[bad_bad_spots]))
self.logger.log_print('WARNING: maximum d^{i}R/dX^{i} term is {m}. '
'This will likely mess up G-matrix terms and is probably coming from a planar structure. '
'Setting to zero, but `jacobian_warning_threshold` can be increased if this is expected. '
'All terms >{t} (base shape:{s}): {b}',
i=i+1,
m=m,
s=x.shape,
b=np.array(bad_bad_spots).T,
t=self.jacobian_warning_threshold
)
x[bad_bad_spots] = 0.
# Need to then mass weight
masses = self.masses
mass_conv = np.sqrt(self._tripmass(masses))
# mass weight the derivs w.r.t cartesians
cartesian_weighting = mass_conv
mc = mass_conv
_ = []
for i, x in enumerate(int_by_cartesian_jacobs):
cartesian_weighting = np.expand_dims(cartesian_weighting, -1)#[..., np.newaxis]
if isinstance(x, int):
_.append(x)
else:
x = x / cartesian_weighting
if self.strip_embedding:
x = np.take(x, good_coords, axis=-1)
_.append(x)
mc = np.expand_dims(mc, 0)
cartesian_weighting = cartesian_weighting * mc
int_by_cartesian_jacobs = _
current_cache[JacobianKeys.InternalsByCartesians] = int_by_cartesian_jacobs
else:
int_by_cartesian_jacobs = current_cache[JacobianKeys.InternalsByCartesians]
if self._check_internal_modes():
self.use_internal_modes = True
QR = self.modes.matrix # derivatives of Q with respect to the internals
# we need to add zeros for the orientation coordinates
if not self.strip_embedding and QR.shape[0] != 3*self.num_atoms:
_QR = QR
QR = np.zeros((3*self.num_atoms, _QR.shape[1]))
embedding_coords = [0, 1, 2, 4, 5, 8]
good_coords = np.setdiff1d(np.arange(3*self.num_atoms), embedding_coords)
QR[good_coords, :] = _QR
self.modes.matrix = QR
RQ = self.modes.inverse # derivatives of internals with respect to Q
if not self.strip_embedding and RQ.shape[1] != 3 * self.num_atoms:
_RQ = RQ
# we need to add zeros for the orientation coordinates
RQ = np.zeros((_RQ.shape[0], 3*self.num_atoms))
embedding_coords = [0, 1, 2, 4, 5, 8]
good_coords = np.setdiff1d(np.arange(3*self.num_atoms), embedding_coords)
RQ[:, good_coords] = _RQ
self.modes.inverse = RQ
if (
JacobianKeys.CartesiansByInternalModes not in current_cache
or len(current_cache[JacobianKeys.CartesiansByInternalModes]) < len(cart_by_internal_jacobs)
):
x_derivs = cart_by_internal_jacobs#(YR, YRR, YRRR, YRRRR)
Q_derivs = [RQ] + [0]*(len(cart_by_internal_jacobs) - 1)
YQ_derivs = TensorDerivativeConverter(Q_derivs, x_derivs,
jacobians_name='Q',
values_name='X'
).convert(order=len(cart_by_internal_jacobs))#, check_arrays=True)
current_cache[JacobianKeys.CartesiansByInternalModes] = YQ_derivs
if (
JacobianKeys.InternalModesByCartesians not in current_cache
or len(current_cache[JacobianKeys.InternalModesByCartesians]) < len(int_by_cartesian_jacobs)
):
QY_derivs = TensorDerivativeConverter(int_by_cartesian_jacobs,
[QR] + [0]*(len(int_by_cartesian_jacobs) - 1)
).convert(order=len(int_by_cartesian_jacobs))#, check_arrays=True)
current_cache[JacobianKeys.InternalModesByCartesians] = QY_derivs
else:
# tr_modes = self.molecule.translation_rotation_modes[1].T
QY = self.modes.matrix # derivatives of Q with respect to the Cartesians
YQ = self.modes.inverse # derivatives of Cartesians with respect to Q
# YQ = np.concatenate([
# tr_modes,
# YQ # derivatives of Cartesians with respect to Q
# ], axis=0)
# QY = np.concatenate([
# tr_modes.T,
# QY # derivatives of Cartesians with respect to Q
# ], axis=1)
if (
JacobianKeys.InternalsByCartesianModes not in current_cache
or len(current_cache[JacobianKeys.InternalsByCartesianModes]) < internal_by_cartesian_order
):
RQ_derivs = TensorDerivativeConverter(
[YQ] + [0]*(len(int_by_cartesian_jacobs) - 1),
int_by_cartesian_jacobs
).convert(order=len(int_by_cartesian_jacobs))#, check_arrays=True)
current_cache[JacobianKeys.InternalsByCartesianModes] = RQ_derivs
else:
RQ_derivs = current_cache[JacobianKeys.InternalsByCartesianModes]
if (
JacobianKeys.CartesianModesByInternals not in current_cache
or len(current_cache[JacobianKeys.CartesianModesByInternals]) < cartesian_by_internal_order
):
QR_derivs = TensorDerivativeConverter(
cart_by_internal_jacobs,
[QY] + [0]*(len(cart_by_internal_jacobs) - 1)
).convert(order=len(cart_by_internal_jacobs))
current_cache[JacobianKeys.CartesianModesByInternals] = QR_derivs
else:
QR_derivs = current_cache[JacobianKeys.CartesianModesByInternals]
if (
JacobianKeys.CartesiansByInternalModes not in current_cache
or len(current_cache[JacobianKeys.CartesiansByInternalModes]) < len(cart_by_internal_jacobs)
):
x_derivs = cart_by_internal_jacobs#(YR, YRR, YRRR, YRRRR)
Q_derivs = RQ_derivs[:1] + [0]*(len(cart_by_internal_jacobs) - 1)
YQ_derivs = TensorDerivativeConverter(Q_derivs, x_derivs,
jacobians_name='Q',
values_name='X'
).convert(order=len(cart_by_internal_jacobs))#, check_arrays=True)
# self._get_tensor_derivs(
# YQ_derivs, (QY, 0, 0, 0),
# mixed_XQ=False
# )
current_cache[JacobianKeys.CartesiansByInternalModes] = YQ_derivs
if (
JacobianKeys.CartesianModesByInternalModes not in current_cache
or len(current_cache[JacobianKeys.CartesianModesByInternalModes]) < len(cart_by_internal_jacobs)
):
# modes = self.molecule.translation_rotation_modes[1]
# # raise Exception(
# # modes @ modes.T,
# # modes.T @ modes,
# # self.molecule.translation_rotation_modes[1].shape,
# # self.modes.inverse.shape
# # )
# YQ2 = np.concatenate([
# modes,
# self.modes.inverse # derivatives of Cartesians with respect to Q
# ], axis=0)
YQ_derivs = current_cache[JacobianKeys.CartesiansByInternalModes]
qQ_derivs = TensorDerivativeConverter(YQ_derivs,
[QY] + [0] * (len(cart_by_internal_jacobs) - 1),
jacobians_name='YQ',
values_name='qY'
).convert(order=len(cart_by_internal_jacobs))#, check_arrays=True)
# raise Exception(qQ_derivs[0][6:, 6:])
current_cache[JacobianKeys.CartesianModesByInternalModes] = qQ_derivs
if (
JacobianKeys.InternalModesByCartesians not in current_cache
or len(current_cache[JacobianKeys.InternalModesByCartesians]) < len(int_by_cartesian_jacobs)
):
QR = QR_derivs[0]
QY_derivs = TensorDerivativeConverter(int_by_cartesian_jacobs,
[QR] + [0]*(len(int_by_cartesian_jacobs) - 1)
).convert(order=len(int_by_cartesian_jacobs))#, check_arrays=True)
current_cache[JacobianKeys.InternalModesByCartesians] = QY_derivs
if (
JacobianKeys.InternalModesByCartesianModes not in current_cache
or len(current_cache[JacobianKeys.InternalModesByCartesianModes]) < len(int_by_cartesian_jacobs)
):
RQ_derivs = current_cache[JacobianKeys.InternalsByCartesianModes]
QR = QR_derivs[0]
Qq_derivs = TensorDerivativeConverter(RQ_derivs,
[QR] + [0] * (len(RQ_derivs) - 1),
jacobians_name='Rq',
values_name='qR'
).convert(order=len(RQ_derivs))
current_cache[JacobianKeys.InternalModesByCartesianModes] = Qq_derivs
if (
JacobianKeys.CartesianModesByCartesians not in current_cache
or len(current_cache[JacobianKeys.CartesianModesByCartesians]) < len(cart_by_internal_jacobs)
):
current_cache[JacobianKeys.CartesianModesByCartesians] = [self.modes.matrix] + [0]*(len(cart_by_internal_jacobs)-1)
if (
JacobianKeys.CartesiansByCartesianModes not in current_cache
or len(current_cache[JacobianKeys.CartesiansByCartesianModes]) < len(cart_by_internal_jacobs)
):
current_cache[JacobianKeys.CartesiansByCartesianModes] = [self.modes.inverse] + [0] * (len(cart_by_internal_jacobs) - 1)
if (
JacobianKeys.InternalModesByInternals not in current_cache
or len(current_cache[JacobianKeys.InternalModesByInternals]) < len(int_by_cartesian_jacobs)
):
YR = current_cache[JacobianKeys.CartesiansByInternals][0]
QY = current_cache[JacobianKeys.InternalModesByCartesians][0]
current_cache[JacobianKeys.InternalModesByInternals] = [YR@QY] + [0]*(len(int_by_cartesian_jacobs)-1)
if (
JacobianKeys.InternalsByInternalModes not in current_cache
or len(current_cache[JacobianKeys.InternalsByInternalModes]) < len(int_by_cartesian_jacobs)
):
RY = current_cache[JacobianKeys.InternalsByCartesians][0]
YQ = current_cache[JacobianKeys.CartesiansByInternalModes][0]
current_cache[JacobianKeys.InternalsByInternalModes] = [YQ@RY] + [0]*(len(int_by_cartesian_jacobs)-1)
self._cached_transforms[self.molecule] = current_cache
with self.checkpointer:
try:
self.checkpointer['coordinate_transforms'] = {k.value:v for k,v in current_cache.items()}
except (OSError, KeyError):
pass
return current_cache#self._cached_transforms[self.molecule]
@property
def cartesian_L_matrix(self):
return self.get_cartesians_by_cartesian_modes(1)[0]
def get_cartesians_by_cartesian_modes(self, order=None):
# print(dict(
# cartesian_by_internal_order=order,
# internal_by_cartesian_order=min(order, self.internal_by_cartesian_order)
# ))
base = self.get_coordinate_transforms(
cartesian_by_internal_order=order,
internal_by_cartesian_order=None if order is None else min(order, self.internal_by_cartesian_order)
)[JacobianKeys.CartesiansByCartesianModes]
if order is not None:
if len(base) < order:
raise ValueError("insufficient {} (have {} but expected {})".format(
'CartesiansByInternalModes',
len(base),
order
))
base = base[:order]
return base
@property
def cartesian_L_inverse(self):
return self.get_cartesian_modes_by_cartesians(1)[0]
def get_cartesian_modes_by_cartesians(self, order=None):
base = self.get_coordinate_transforms(
cartesian_by_internal_order=order,
internal_by_cartesian_order=None if order is None else min(order, self.internal_by_cartesian_order)
)[JacobianKeys.CartesianModesByCartesians]
if order is not None:
if len(base) < order:
raise ValueError("insufficient {} (have {} but expected {})".format(
'CartesiansByInternalModes',
len(base),
order
))
base = base[:order]
return base
@property
def internal_L_matrix(self):
return self.get_internal_modes_by_internals(1)[0]
def get_internal_modes_by_internals(self, order=None, strip_embedding=True):
# print(dict(
# cartesian_by_internal_order=order,
# internal_by_cartesian_order=min(order, self.internal_by_cartesian_order)
# ))
base = self.get_coordinate_transforms(
cartesian_by_internal_order=order,
internal_by_cartesian_order=None if order is None else min(order, self.internal_by_cartesian_order)
)[JacobianKeys.InternalModesByInternals]
if order is not None:
if len(base) < order:
raise ValueError("insufficient {} (have {} but expected {})".format(
'InternalModesByInternal',
len(base),
order
))
base = base[:order]
if strip_embedding and not self.strip_embedding:
embedding_coords = [0, 1, 2, 4, 5, 8]
good_coords = np.setdiff1d(np.arange(3 * self.num_atoms), embedding_coords)
base = [t[good_coords,] if not isinstance(t, int) else t for t in base]
return base
@property
def internal_L_inverse(self):
return self.get_internals_by_internal_modes(1)[0]
def get_internals_by_internal_modes(self, order=None, strip_embedding=True):
base = self.get_coordinate_transforms(
cartesian_by_internal_order=order,
internal_by_cartesian_order=None if order is None else min(order, self.internal_by_cartesian_order)
)[JacobianKeys.InternalsByInternalModes]
if order is not None:
if len(base) < order:
raise ValueError("insufficient {} (have {} but expected {})".format(
'CartesiansByInternalModes',
len(base),
order
))
base = base[:order]
if strip_embedding and not self.strip_embedding:
embedding_coords = [0, 1, 2, 4, 5, 8]
good_coords = np.setdiff1d(np.arange(3 * self.num_atoms), embedding_coords)
base = [t[..., good_coords] if not isinstance(t, int) else t for t in base]
return base
@property
def cartesians_by_modes(self):
return self.get_cartesians_by_modes()
def get_cartesians_by_modes(self, order=None):
# print(dict(
# cartesian_by_internal_order=order,
# internal_by_cartesian_order=min(order, self.internal_by_cartesian_order)
# ))
base = self.get_coordinate_transforms(
cartesian_by_internal_order=order,
internal_by_cartesian_order=None if order is None else min(order, self.internal_by_cartesian_order)
)[JacobianKeys.CartesiansByInternalModes]
if order is not None:
if len(base) < order:
raise ValueError("insufficient {} (have {} but expected {})".format(
'CartesiansByInternalModes',
len(base),
order
))
base = base[:order]
return base
@property
def modes_by_cartesians(self):
return self.get_coordinate_transforms()[JacobianKeys.InternalModesByCartesians]
def get_modes_by_cartesians(self, order=None, strip_embedding=True):
base = self.get_coordinate_transforms(
cartesian_by_internal_order=None if order is None else min(order, self.cartesian_by_internal_order),
internal_by_cartesian_order=order
)[JacobianKeys.InternalModesByCartesians]
if order is not None:
if len(base) < order:
raise ValueError("insufficient {} (have {} but expected {})".format(
'InternalModesByCartesians',
len(base),
order
))
base = base[:order]
return base
@property
def cartesians_by_internals(self):
return self.get_coordinate_transforms()[JacobianKeys.CartesiansByInternals]
def get_cartesians_by_internals(self, order=None, strip_embedding=False):
base = self.get_coordinate_transforms(
cartesian_by_internal_order=order,
internal_by_cartesian_order=None if order is None else min(order, self.internal_by_cartesian_order)
)[JacobianKeys.CartesiansByInternals]
if order is not None:
if len(base) < order:
raise ValueError("insufficient {} (have {} but expected {})".format(
'CartesiansByInternals',
len(base),
order
))
base = base[:order]
if strip_embedding and not self.strip_embedding:
embedding_coords = [0, 1, 2, 4, 5, 8]
good_coords = np.setdiff1d(np.arange(3 * self.num_atoms), embedding_coords)
base = [t[np.ix_(*((good_coords,)*(t.ndim-1)))] for t in base]
return base
@property
def internals_by_cartesians(self):
return self.get_coordinate_transforms()[JacobianKeys.InternalsByCartesians]
def get_internals_by_cartesians(self, order=None, strip_embedding=False):
base = self.get_coordinate_transforms(
cartesian_by_internal_order=None if order is None else min(order, self.cartesian_by_internal_order),
internal_by_cartesian_order=order
)[JacobianKeys.InternalsByCartesians]
if order is not None:
if len(base) < order:
raise ValueError("insufficient {} (have {} but expected {})".format(
'InternalsByCartesians',
len(base),
order
))
base = base[:order]
if strip_embedding and not self.strip_embedding:
embedding_coords = [0, 1, 2, 4, 5, 8]
good_coords = np.setdiff1d(np.arange(3 * self.num_atoms), embedding_coords)
base = [t[..., good_coords] for t in base]
return base
@property
def cartesian_modes_by_internal_modes(self):
return self.get_coordinate_transforms()[JacobianKeys.CartesianModesByInternalModes]
def get_cartesian_modes_by_internal_modes(self, order=None):
base = self.get_coordinate_transforms(
cartesian_by_internal_order=order,
internal_by_cartesian_order=None if order is None else min(order, self.internal_by_cartesian_order)
)[JacobianKeys.CartesianModesByInternalModes]
if order is not None:
if len(base) < order:
raise ValueError("insufficient {} (have {} but expected {})".format(
'CartesianModesByInternalModes',
len(base),
order
))
base = base[:order]
return base
@property
def internal_modes_by_cartesian_modes(self):
return self.get_coordinate_transforms()[JacobianKeys.InternalModesByCartesianModes]
def get_internal_modes_by_cartesian_modes(self, order=None):
base = self.get_coordinate_transforms(
cartesian_by_internal_order=None if order is None else min(order, self.cartesian_by_internal_order),
internal_by_cartesian_order=order
)[JacobianKeys.InternalModesByCartesianModes]
if order is not None:
if len(base) < order:
raise ValueError("insufficient {} (have {} but expected {})".format(
'InternalModesByCartesianModes',
len(base),
order
))
base = base[:order]
return base
class PotentialTerms(ExpansionTerms):
"""
A helper class that can transform the derivatives of the potential from Cartesian to normal coordinates
"""
__props__ = ExpansionTerms.__props__ + (
"potential_derivatives",
"check_input_force_constants",
"hessian_tolerance",
"grad_tolerance",
"freq_tolerance"
)
def __init__(self,
molecule,
mixed_derivs=None,
modes=None,
potential_derivatives=None,
mode_selection=None,
logger=None,
parallelizer=None,
checkpointer=None,
check_input_force_constants=True,
allow_higher_potential_terms=False,
hessian_tolerance=1.0e-4,
grad_tolerance=1.0e-4,
freq_tolerance=2e-3,
**opts
):
"""
:param molecule: the molecule that will supply the potential derivatives
:type molecule: Molecule
:param mixed_derivs: whether or not the pulled derivatives are partially derivatives along the normal coords
:type mixed_derivs: bool
:param modes: the normal modes to use when doing calculations
:type modes: None | MolecularVibrations
:param mode_selection: the subset of normal modes to use
:type mode_selection: None | Iterable[int]
"""
super().__init__(molecule, modes, mode_selection=mode_selection,
logger=logger, parallelizer=parallelizer, checkpointer=checkpointer,
**opts
)
self.check_input_force_constants=check_input_force_constants
self.hessian_tolerance = hessian_tolerance
self.grad_tolerance = grad_tolerance
self.freq_tolerance = freq_tolerance
self.mixed_derivs = mixed_derivs # we can figure this out from the shape in the future
self._input_derivs = potential_derivatives
self._v_derivs = None #
self.allow_higher_potential_terms=allow_higher_potential_terms
@property
def v_derivs(self):
if self._v_derivs is None:
if self._input_derivs is None:
self._input_derivs = self.molecule.potential_surface.derivatives
self._v_derivs = self._canonicalize_derivs(self.freqs, self.masses, self._input_derivs)
return self._v_derivs
@v_derivs.setter
def v_derivs(self, v):
self._v_derivs = v
def _check_mode_terms(self, derivs=None):
modes_n = len(self.modes.freqs)
if derivs is None:
derivs = self.v_derivs
for d in derivs:
if d.shape != (modes_n,) * len(d.shape):
return False
return True
def _canonicalize_derivs(self, freqs, masses, derivs):
if self._check_mode_terms(derivs):
return derivs
if len(derivs) == 3:
grad, fcs, fds = derivs
try:
fcs = fcs.array
except AttributeError:
fcs, thirds, fourths = derivs
grad = None
else:
thirds = fds.third_deriv_array
fourths = fds.fourth_deriv_array
elif len(derivs) == 4:
grad, fcs, thirds, fourths = derivs
else:
grad = derivs[0]
fcs = derivs[1]
thirds = derivs[2] if len(derivs) > 2 else None
fourths = derivs[3] if len(derivs) > 3 else None
n = self.num_atoms
modes_n = len(self.modes.freqs)
internals_n = 3 * n - 6
coord_n = 3 * n
if len(derivs) > 2 and self.mode_sel is not None and thirds.shape[0] == internals_n:
thirds = thirds[(self.mode_sel,)]
if len(derivs) > 3 and self.mode_sel is not None and fourths.shape[0] == internals_n:
if not isinstance(self.mode_sel, slice):
fourths = fourths[
|
np.ix_(self.mode_sel, self.mode_sel)
|
numpy.ix_
|
#####################################################################################
# CLASSICS - CalcuLAtionS of Self Interaction Cross Sections #
# by <NAME>, <NAME>, <NAME>, <NAME> and <NAME> #
#####################################################################################
# Requirements: python3, numpy, scipy
#
# This code provides the following functions:
#
# sigma(kappa, beta, mode, sign):
# Returns approximate analytical cross sections for the classical regime (kappa > 1) for given arguments
# kappa: Dimensionless momentum in the centre-of-mass frame, given by kappa = m_\chi v / (2 m_\phi)
# beta: Rescaled strength of the Yukawa potential, given by beta = 2 \alpha_\chi m_\phi / (m_\chi v^2)
# mode: Can take one of the following values:
# 'T': Returns the momentum transfer cross section for distinguishable particles
# 'V': Returns the viscosity cross section for distinguishable particles
# 'even': Returns the viscosity cross section for identical particles with even spatial wave function
# 'odd': Returns the viscosity cross section for identical particles with odd spatial wave function
# 'scalar: Returns the viscosity cross section for identical scalar particles
# 'fermion': Returns the viscosity cross section for identical fermions (averaged over initial spins)
# 'vector': Returns the viscosity cross section for identical vector particles (averaged over initial spins)
# If no mode is specified, the default option is 'T'
# sign: Can take one of the following values:
# 'attractive': Attractive Yukawa potential
# 'repulsive': Repulsive Yukawa potential
# If no sign is specified, the default option is 'attractive'
#
# sigma_Hulthen(kappa, beta, mode, sign, eps):
# Returns approximate analytical cross sections for the quantum regime (kappa < 1) for S-wave-only scattering under the Hulthen approximation, following Tulin, Yu & Zurek (arXiv:1302.3898)
# The arguments are the same as above, with the addition of
# eps: Numerical constant with default value 1.6.
#
# sigma_combined(kappa, beta, mode, sign):
# Returns the appropriate cross section depending on kappa, i.e. sigma for kappa > 1 and sigma_Hulthen for kappa < 0.4.
# To ensure continuity, the code linearly interpolates between the two different regimes between kappa = 0.4 and kappa = 1.
# The arguments are the same as above.
#
# averagedsigma(kappa0, beta0, mode, sign):
# Returns the averaged cross section for a Maxwell-Boltzmann distribution with velocity dispersion v0 based on pre-calculated tables.
# The arguments are the same as above with kappa0 = kappa(v = v0) and beta0 = beta(v = v0).
#
# IMPORTANT: The return values of all functions are dimensionless and need to be multiplied with (pi / m_phi^2) in order to obtain actual cross sections.
#
# Note: The option "approximate_eta" below determines whether the code should use approximate asymptotic expressions of the modified Bessel functions for large argument
# approximate_eta = True is slightly faster but inaccurate for small kappa
# approximate_eta = False is slightly slower but gives the best accuracy
import numpy as np
from numpy import sqrt, pi, sin, cos, log, exp, euler_gamma
from scipy.special import kn, gamma, loggamma
from scipy.interpolate import RectBivariateSpline
approximate_eta = False
# Definition of auxillary functions
lmin = lambda beta, kappa: max(1./2.,beta*kappa)
lminp = lambda beta, kappa: max(1.,2.*beta*kappa)
turn = lambda beta, betalow, a: exp(-(max(beta, betalow) - betalow)*a)
if approximate_eta:
eta = lambda x: -2.*log(x/2.)-1-2.*euler_gamma+(1-euler_gamma-log(x/2.))*x**2.
else:
eta = lambda x: x**2 * (- kn(1,x)**2 + kn(2,x)*kn(0,x))
zeta = lambda kappa, beta, lmin: (max(lmin, beta*kappa)**2 - lmin**2)/(2*kappa**2*beta**2) + eta(max(lmin, beta*kappa)/kappa)
lambdaT = (1.+cos(2.)+2*sin(2.))/2.
lambdaV = (9.-cos(4.)-4.*sin(4.))/16.
sigmaT_smallbeta = lambda beta, kappa: 2. * beta**2. * zeta(kappa, beta, 0.5)
sigmaV_smallbeta = lambda beta, kappa, lmin: 4. * beta**2. * zeta(kappa, 2.*beta, lmin)
def sigmaTatt(beta, kappa):
if beta < 1: return sigmaT_smallbeta(beta,kappa)*turn(beta,0.2,-0.64)
elif beta > 50: return 2. * log(beta) * (log(log(beta)) + 1)
else: return 4.7*log(beta + 0.82)
def sigmaTrep(beta, kappa):
if beta <1: return sigmaT_smallbeta(beta,kappa)*turn(beta,0.2,0.53)
elif beta > 50: return lambdaT * (log(2.*beta)-log(log(2.*beta)))**2.
else: return 2.9*log(beta + 0.47)
def sigmaVatt(beta, kappa, lmin):
if beta < 0.5: return sigmaV_smallbeta(beta,kappa,lmin)*turn(beta,0.1,-0.67)
elif beta > 25: return (1 + log(beta)- 1/(2.*log(beta)))**2/2.
else: return 2.5*log(beta + 1.05)
def sigmaVrep(beta, kappa, lmin):
if beta < 0.5: return sigmaV_smallbeta(beta,kappa,lmin)*turn(beta,0.1,0.370562)
elif beta > 25: return log(2. * beta) * (lambdaV * log(2. * beta) - (2.*lambdaV - 1) * log(log(2.*beta)))
else: return 2.8*log(beta + 0.80)
# Reading tabulated grids
modes = ['T','V','even','odd','scalar','fermion','vector']
signs = ['attractive','repulsive']
mode_factor = {'T': 1, 'V': 2/3., 'even': 4/3., 'odd': 0, 'scalar': 4/3., 'fermion': 1/3., 'vector': 8/9.}
beta0grid = np.logspace(-5,5, 101, endpoint=True)
kappa0grid = np.logspace(-3,3, 61, endpoint=True)
averagedsigmainterdict = {}
#averagedsigmadict = {}
for mode in modes:
for sign in signs:
outputname_data = 'sigma'+mode+'list_'+sign+'.txt'
averagedsigmagrid = np.loadtxt(outputname_data)
averagedsigmaarray = np.array(averagedsigmagrid)[:,2].reshape((len(kappa0grid),len(beta0grid))) + 1e-100
averagedsigmainterdict[mode+sign] = RectBivariateSpline(np.log10(kappa0grid),
|
np.log10(beta0grid)
|
numpy.log10
|
from collections import Counter
import numpy as np
import copy
TTA_COUNT = 11
CLASSES = [
"HTC-1-M7",
"LG-Nexus-5x",
"Motorola-Droid-Maxx",
"Motorola-Nexus-6",
"Motorola-X",
"Samsung-Galaxy-Note3",
"Samsung-Galaxy-S4",
"Sony-NEX-7",
"iPhone-4s",
"iPhone-6"
]
def _geometric_mean(list_preds):
result =
|
np.ones((2640, 10))
|
numpy.ones
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import sys
sys.path.append("../")
from op_test import OpTest
import paddle.fluid as fluid
import paddle.fluid.core as core
class TestSequencePadOp(OpTest):
def set_attr(self):
self.x_shape = [12, 10]
self.x_len_lod = [[2, 3, 4, 3]]
self.pad_value = [1.0]
self.padded_length = -1
self.dtype = 'float64'
def set_data(self):
x_data = np.random.uniform(0.1, 0.5, self.x_shape).astype(self.dtype)
pad_value_data = np.array(self.pad_value).astype(self.dtype)
self.inputs = {
'X': (x_data, self.x_len_lod),
'PadValue': pad_value_data
}
self.attrs = {'padded_length': self.padded_length}
def compute(self):
# get padded length
padded_length = self.padded_length
x_len_lod_0 = self.x_len_lod[0]
if padded_length == -1:
max_seq_len = 0
for l in x_len_lod_0:
max_seq_len = max(max_seq_len, l)
padded_length = max_seq_len
# do padding
x_data = self.inputs['X'][0]
pad_value_data = self.inputs['PadValue']
if pad_value_data.shape == (1, ):
pad_value_data = np.broadcast_to(
pad_value_data, shape=x_data.shape[1:])
padded_sequences = []
start_idx = 0
for l in x_len_lod_0:
end_idx = start_idx + l
seq = x_data[start_idx:end_idx]
to_pad_len = padded_length - l
for _ in range(to_pad_len):
seq = np.append(seq, pad_value_data[np.newaxis, :], axis=0)
padded_sequences.append(seq)
start_idx = end_idx
out_data = np.array(padded_sequences)
length =
|
np.array(self.x_len_lod[0])
|
numpy.array
|
# Copyright 2021 UChicago Argonne, LLC
# Author:
# - <NAME> and <NAME>, Argonne National Laboratory
# - <NAME>, Idaho National Laboratory
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0.txt
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Author: <NAME>
Date : 07/30/2020
"""
from __future__ import division, print_function , unicode_literals, absolute_import
#External Modules---------------------------------------------------------------
import numpy as np
import os
import math
from scipy import signal
from scipy import io
from scipy.interpolate import interp1d
from datetime import datetime
import csv
import sys
import matplotlib.pyplot as plt
import xml.etree.ElementTree as ET
#External Modules End-----------------------------------------------------------
#Internal Modules---------------------------------------------------------------
from PluginBaseClasses.ExternalModelPluginBase import ExternalModelPluginBase
#Internal Modules End-----------------------------------------------------------
class RefGov_unparameterized_SIMO(ExternalModelPluginBase):
# External Model plugin class, Reference Governor
#################################
#### RAVEN API methods BEGIN ####
#################################
def _readMoreXML(self, container, xmlNode):
"""
Method to read the portion of the XML that belongs to this plugin
@ In, container, object, self-like object where all the variables can be stored
@ In, xmlNode, xml.etree.ElementTree.Element, XML node that needs to be read
@ Out, None
"""
""" Initialization of 4 entries """
container.constants = {}
# Extract the output Variable name: container.outputVariables = ['V1', 'V1min', 'V1max']
outputVarsNode = xmlNode.find("outputVariables")
# print(outputVarsNode)
if outputVarsNode is None: # if cannot find the "outputVariables" tag, return error
raise IOError("RG Plugin: <outputVariables> XML block must be inputted!")
# container.outputVariables = outputVarsNode.text.strip()
container.outputVariables = [var.strip() for var in outputVarsNode.text.split(",")]
# print(container.outputVariables) # ['V1', 'V1min', 'V1max']
for child in xmlNode:
# print(child.tag)
# xmlNode is the Nodes within the section of <ExternalModel>.
# child.tag are the strings containing each node name. child.tag == child.tag.strip()
if child.tag.strip() == "variables":
# get verbosity if it exists
# Extract Variable names: container.variables = ['Vi', 'Pi']
container.variables = [var.strip() for var in child.text.split(",")]
# print(container.variables) # ['V1', 'V1min', 'V1max', 'P1']
# if container.outputVariable not in container.variables:
# raise IOError("RG Plug-in: "+container.outputVariable+" variable MUST be present in the <variables> definition!")
container.constants['Sys_State_x']=[] # place holder
if child.tag.strip() == "constant":
# Extract the constant names and their values: container.constants = {'TimeInterval': 3600.0}
# varName has to be provided in the <constant> entry
if "varName" not in child.attrib:
raise IOError("RG Plug-in: attribute varName must be present in <coefficient> XML node!")
# extract the system state variable, the only vector
if child.attrib['varName'] == "Sys_State_x":
container.constants['Sys_State_x'] = [float(var.strip()) for var in child.text.split(",")]
else:
container.constants[child.attrib['varName']] = float(child.text)
# print(container.constants) # {'TimeInterval': 3600.0}
# print(a)
Min_counter = 0; Max_counter = 0
for key, value in container.constants.items(): # count the inputs
if key.startswith('Min_Target'):
Min_counter += 1
# print(Min_counter,key)
elif key.startswith('Max_Target'):
Max_counter += 1
# print(Max_counter, key)
# print(Min_counter, Max_counter)
container.RG_Min_Targets = []
container.RG_Max_Targets = []
if Min_counter ==0 or Max_counter ==0: # check if Min/Max entry exists
raise IOError("RG Plug-in: Missing 'Min_Target' or 'Max_Target' inputs!")
else:
if Min_counter != Max_counter: # check if Min and Max have the same length
raise IOError("RG Plug-in: 'Min_Target' and 'Max_Target' are different in size!")
else:
for i in range(0,Min_counter):
try:
container.RG_Min_Targets.append(container.constants['Min_Target%d' % (i+1)])
except:
raise IOError("RG Plug-in: 'Min_Target%d' does not exist!" % (i+1))
try:
container.RG_Max_Targets.append(container.constants['Max_Target%d' % (i+1)])
except:
raise IOError("RG Plug-in: 'Max_Target%d' does not exist!" % (i+1))
# print(container.RG_Min_Targets)
# print(container.RG_Max_Targets)
# print(a)
# check if yMin < yMax is satisfied
a = np.asarray(container.RG_Max_Targets)-np.asarray(container.RG_Min_Targets)
# print(a)
if any(n<=0 for n in a):
# print("negative found")
raise IOError("RG Plug-in: 'Min_Targets < Max_Targets' is not satisfied. Check the <ExternalModel> node!")
inputvariables = set(container.variables)-set(container.outputVariables)
container.variables = inputvariables
# print(container.variables) # {'P1'}
def initialize(self, container, runInfoDict, inputFiles):
"""
Method to initialize this plugin
@ In, container, object, self-like object where all the variables can be stored
@ In, runInfoDict, dict, dictionary containing all the RunInfo parameters (XML node <RunInfo>)
@ In, inputFiles, list, list of input files (if any)
@ Out, None
"""
# print("\n###############################################\n")
# # print(runInfoDict['WorkingDir'])
# print(inputFiles[1].__dict__)
# print("\n###############################################\n")
# initialization: ensure each var has an initial value
# for var in container.variables:
# if var not in container.coefficients:
# container.coefficients[var] = 1.0
# print("ExamplePlugin: not found coefficient for variable "+var+". Default value is 1.0!")
# container.stepSize = (container.endValue - container.startValue)/float(container.numberPoints)
def createNewInput(self, container, inputs, samplerType, **Kwargs):
# Extract the matrix file name
for item in inputs:
# print(item)
if 'UserGenerated' in item.__class__.__name__: # look for the file input that contains the path to XML file
# Assemble the filename
MatrixFileName = item.__dict__['_File__path']+item.__dict__['_File__base']+'.'+item.__dict__['_File__ext']
# print(MatrixFileName)
f = open("MatrixFilePath.txt","w")
f.write(MatrixFileName)
f.close()
# Remove this item from inputs list
inputs.remove(item)
if 'MatrixFileName' not in locals():
f = open("MatrixFilePath.txt","r")
MatrixFileName = f.read()
f.close()
# print(MatrixFileName)
# Load the XML file containing the ABC matrices
container.Tss, container.n, container.m, container.p, container.para_array, container.UNorm_list, container.XNorm_list, container.XLast_list, container.YNorm_list, container.A_list, container.B_list, container.C_list, container.eig_A_array = read_unparameterized_XML(MatrixFileName)
# Tss is the sampling period of discrete A,B,C matrices
if len(container.RG_Min_Targets)!=container.p or len(container.RG_Max_Targets)!=container.p:
sys.exit('ERROR: Check the size of "Min_Target" ({}) or "Max_Target" ({}). \n\tBoth should contain {} items.\n'.format(len(container.RG_Min_Targets), len(container.RG_Max_Targets), container.p))
""" Keep only the profiles with YNorm within the [y_min, y_max] range """
container.para_array, container.UNorm_list, container.XNorm_list, container.XLast_list, container.YNorm_list, container.A_list, container.B_list, container.C_list, container.eig_A_array = check_YNorm_within_Range(
container.RG_Min_Targets, container.RG_Max_Targets, container.para_array, container.UNorm_list, container.XNorm_list, container.XLast_list, container.YNorm_list, container.A_list, container.B_list, container.C_list, container.eig_A_array)
if container.YNorm_list == []:
sys.exit('ERROR: No proper linearization point (YNorm) found in Matrix File. \n\tPlease provide a state space profile linearized within the [Min_Target, Max_Target] range\n')
max_eigA_id = container.eig_A_array.argmax()
container.A_m = container.A_list[max_eigA_id]; container.B_m = container.B_list[max_eigA_id]; container.C_m = container.C_list[max_eigA_id]; container.D_m = np.zeros((container.p,container.m)) # all zero D matrix
# print(container.eig_A_array)
# print(max_eigA_id)
# print("\n###############################################\n")
return Kwargs['SampledVars']
def run(self, container, Inputs):
"""
This is a simple example of the run method in a plugin.
This method takes the variables in input and computes
oneOutputOfThisPlugin(t) = var1Coefficient*exp(var1*t)+var2Coefficient*exp(var2*t) ...
@ In, container, object, self-like object where all the variables can be stored
@ In, Inputs, dict, dictionary of inputs from RAVEN
"""
""" Process the input from XML file """
# extract the power setpoint from Inputs, type == <class 'float'>
for var in container.variables:
r_value = Inputs[var]
# print("\n###############################################\n")
# print("r_value=", r_value, type(r_value))
""" MOAS steps Limit """
g = int(container.constants['MOASsteps']) # numbers of steps to look forward
""" Select the correct profile with ABCD matrices """
# Find the correct profile according to r_value
profile_id = (np.abs(container.para_array - r_value)).argmin()
# print(profile_id)
# Retrive the correct A, B, C matrices
A_d = container.A_list[profile_id]; B_d = container.B_list[profile_id]; C_d = container.C_list[profile_id]; D_d = np.zeros((container.p,container.m)) # all zero D matrix
# Retrive the correct y_0, r_0 and X
y_0 = container.YNorm_list[profile_id]; r_0 = float(container.UNorm_list[profile_id]);
xLast=container.XLast_list[profile_id]; xNorm=container.XNorm_list[profile_id]
# print(type(r_0))
""" XLast and r_value """
if container.constants['Sys_State_x']==[]: # if user didn't supply the final system state vector
X_Last_RG = np.asarray(xLast - xNorm)
else:
X_Last_RG = np.asarray(container.constants['Sys_State_x']) - np.asarray(xNorm)
# print("X_Last_RG=", X_Last_RG, type(X_Last_RG))
# print(a)
r_value_RG = float(r_value) - r_0
""" Calculate Maximal Output Admissible Set (MOAS) """
s = [] # type == <class 'list'>
for i in range(0,container.p):
s.append([abs(container.RG_Max_Targets[i] - y_0[i])])
s.append([abs(y_0[i] - container.RG_Min_Targets[i])])
# print(s)
H, h = fun_MOAS_noinf(A_d, B_d, C_d, D_d, s, g) # H and h, type = <class 'numpy.ndarray'>
# print("H:\n", H); print("h:\n", h)
""" Call the Reference Governor to mild the r_value """
v_RG = fun_RG_SISO(0, X_Last_RG, r_value_RG, H, h, container.p) # v_RG: type == <class 'numpy.ndarray'>
""" 2nd adjustment """
# MOAS for the steps "g+1" - step "2g"
Hm, hm = fun_MOAS_noinf(container.A_m, container.B_m, container.C_m, container.D_m, s, g)
# Calculate the max/min for v, ensuring the hm-Hxm*x(g+1) always positive for the next g steps.
v_max, v_min = fun_2nd_gstep_calc(X_Last_RG, Hm, hm, container.A_m, container.B_m, g)
if v_RG < v_min:
v_RG = v_min
elif v_RG > v_max:
v_RG = v_max
# Provide the Output variable Vi with value
container.__dict__[container.outputVariables[0]] = v_RG + r_0
container.__dict__[container.outputVariables[1]] = v_min + r_0
container.__dict__[container.outputVariables[2]] = v_max + r_0
###############################
#### RAVEN API methods END ####
###############################
##################################
#### Sub Functions Definition ####
##################################
def read_unparameterized_XML(MatrixFileName):
tree = ET.parse(MatrixFileName)
root = tree.getroot()
para_array=[]; UNorm_list = []; XNorm_list = []; XLast_list = []; YNorm_list =[]
A_Re_list = []; B_Re_list = []; C_Re_list = []; A_Im_list = []; B_Im_list = []; C_Im_list = []
for child1 in root:
# print(' ',child1.tag) # DMDrom
for child2 in child1:
# print(' > ', child2.tag) # ROM, DMDcModel
for child3 in child2:
# print(' > > ', child3.tag) # dmdTimeScale, UNorm, XNorm, XLast, Atilde, Btilde, Ctilde
if child3.tag == 'dmdTimeScale':
# print(child3.text)
Temp_txtlist = child3.text.split(' ')
Temp_floatlist = [float(item) for item in Temp_txtlist]
TimeScale = np.asarray(Temp_floatlist)
TimeInterval = TimeScale[1]-TimeScale[0]
# print(TimeInterval) #10.0
if child3.tag == 'UNorm':
for child4 in child3:
# print(' > > > ', child4.tag)
# print(' > > > ', child4.attrib)
para_array.append(0)
Temp_txtlist = child4.text.split(' ')
Temp_floatlist = [float(item) for item in Temp_txtlist]
UNorm_list.append(np.asarray(Temp_floatlist))
para_array = np.asarray(para_array)
# print(para_array)
# print(UNorm_list)
# print(np.shape(self.UNorm))
if child3.tag == 'XNorm':
for child4 in child3:
Temp_txtlist = child4.text.split(' ')
Temp_floatlist = [float(item) for item in Temp_txtlist]
XNorm_list.append(np.asarray(Temp_floatlist))
# print(XNorm_list)
# print(np.shape(self.XNorm))
if child3.tag == 'XLast':
for child4 in child3:
Temp_txtlist = child4.text.split(' ')
Temp_floatlist = [float(item) for item in Temp_txtlist]
XLast_list.append(np.asarray(Temp_floatlist))
# print(XLast_list)
# print(np.shape(self.XLast))
if child3.tag == 'YNorm':
for child4 in child3:
Temp_txtlist = child4.text.split(' ')
Temp_floatlist = [float(item) for item in Temp_txtlist]
YNorm_list.append(np.asarray(Temp_floatlist))
# print(YNorm_list)
# print(YNorm_list[0])
# print(np.shape(YNorm_list))
# print(np.shape(self.YNorm))
for child4 in child3:
for child5 in child4:
# print(' > > > ', child5.tag) # real, imaginary, matrixShape, formatNote
if child5.tag == 'real':
Temp_txtlist = child5.text.split(' ')
Temp_floatlist = [float(item) for item in Temp_txtlist]
# print(Temp_txtlist)
# print(Temp_floatlist)
if child3.tag == 'Atilde':
A_Re_list.append(
|
np.asarray(Temp_floatlist)
|
numpy.asarray
|
from xml.dom import minidom
import pyperclip
import numpy as np
import json
import tqdm
import multiprocessing
import lief
import re
class ImportsFeatures(object):
dim = 1000
APIsList = []
def FindApi(self,ApiName,ApiList):
for x in range(len(ApiList)) :
if ApiList[x] == ApiName:
return x
return -1
def GetAPIList(self):
f = open("MalNet\\apis.txt", "r")
data = f.readlines()
return [x.strip() for x in data]
def VectorizeFromJson(self,y):
APis_Features = np.zeros(shape=(len(self.APIsList)))
for lib in y["imports"]:
for api in y["imports"][lib]:
apiindex = self.FindApi(api,self.APIsList)
if apiindex != -1:
APis_Features[apiindex] = 1
return APis_Features
def VectorizeFromRaw(self,PE):
APis_Features = np.zeros(shape=(len(self.APIsList)))
for lib in PE.imports:
for api in lib.entries:
apiindex = self.FindApi(api.name,self.APIsList)
if apiindex != -1:
APis_Features[apiindex] = 1
return APis_Features
def __init__(self):
self.APIsList = self.GetAPIList()
class ExtraFeatures(object):
dim = 11
def FindEntryChar(self,PE):
try:
return [str(char).split(".")[1] for char in PE.section_from_rva(PE.entrypoint-PE.optional_header.imagebase).characteristics_lists]
except lief.not_found:
return None
def FindSection(self,y,names):
for x in y["section"]["sections"]:
for i in names:
if i == x["name"]:
return x
return ""
def checklist(self,list,value):
for x in list:
if x == value:
return 1
return 0
def VectorizeFromJson(self,y):
is_EXECUTABLE_IMAGE = self.checklist(y["header"]["coff"]["characteristics"],"EXECUTABLE_IMAGE")
is_DLL = self.checklist(y["header"]["coff"]["characteristics"],"DLL")
is_reloc_stripped = self.checklist(y["header"]["coff"]["characteristics"],"RELOCS_STRIPPED")
is_LARGE_ADDRESS_AWARE = self.checklist(y["header"]["coff"]["characteristics"],"LARGE_ADDRESS_AWARE")
is_Win_GUI = self.checklist(y["header"]["optional"]["subsystem"],"WINDOWS_GUI")
is_HIGH_ENTROPY_VA = self.checklist(y["header"]["optional"]["dll_characteristics"],"HIGH_ENTROPY_VA")
is_NX_COMPAT = self.checklist(y["header"]["optional"]["dll_characteristics"],"NX_COMPAT")
is_DYNAMIC_BASE = self.checklist(y["header"]["optional"]["dll_characteristics"],"DYNAMIC_BASE")
is_GUARD_CF = self.checklist(y["header"]["optional"]["dll_characteristics"],"GUARD_CF")
is_EP_in_writable_section = 0
if y["section"]["entry"] != "":
is_EP_in_writable_section = self.checklist(self.FindSection(y,[y["section"]["entry"]])["props"],"MEM_WRITE")
has_writable_executable_section = 0
for x in y["section"]["sections"]:
if self.checklist(x,"MEM_WRITE") and self.checklist(x,"MEM_EXECUTE") :
has_writable_executable_section = 1
return [is_EXECUTABLE_IMAGE,is_DLL,is_reloc_stripped,is_LARGE_ADDRESS_AWARE,is_Win_GUI,is_HIGH_ENTROPY_VA,
is_NX_COMPAT,is_DYNAMIC_BASE,is_GUARD_CF,is_EP_in_writable_section,has_writable_executable_section]
def VectorizeFromRaw(self,PE):
exe_char = [str(c).split('.')[1] for c in PE.header.characteristics_list]
is_EXECUTABLE_IMAGE = self.checklist(exe_char,"EXECUTABLE_IMAGE")
is_DLL = self.checklist(exe_char,"DLL")
is_reloc_stripped = self.checklist(exe_char,"RELOCS_STRIPPED")
is_LARGE_ADDRESS_AWARE = self.checklist(exe_char,"LARGE_ADDRESS_AWARE")
is_Win_GUI = self.checklist([str(PE.optional_header.subsystem).split('.')[1]],"WINDOWS_GUI")
dll_char = [str(c).split('.')[1] for c in PE.optional_header.dll_characteristics_lists]
is_HIGH_ENTROPY_VA = self.checklist(dll_char,"HIGH_ENTROPY_VA")
is_NX_COMPAT = self.checklist(dll_char,"NX_COMPAT")
is_DYNAMIC_BASE = self.checklist(dll_char,"DYNAMIC_BASE")
is_GUARD_CF = self.checklist(dll_char,"GUARD_CF")
is_EP_in_writable_section = 0
Entry_char = self.FindEntryChar(PE)
if Entry_char != None:
is_EP_in_writable_section = self.checklist(Entry_char,"MEM_WRITE")
has_writable_executable_section = 0
for x in PE.sections:
section_char = [str(char).split(".")[1] for char in x.characteristics_lists]
if self.checklist(section_char,"MEM_WRITE") and self.checklist(section_char,"MEM_EXECUTE"):
has_writable_executable_section = 1
break
return [is_EXECUTABLE_IMAGE,is_DLL,is_reloc_stripped,is_LARGE_ADDRESS_AWARE,is_Win_GUI,is_HIGH_ENTROPY_VA,
is_NX_COMPAT,is_DYNAMIC_BASE,is_GUARD_CF,is_EP_in_writable_section,has_writable_executable_section]
class SectionsFeatures(object):
dim = 21
def checklist(self,list,value):
for x in list:
if x == value:
return 1
return 0
def FindSection(self,y,names):
for x in y["section"]["sections"]:
for i in names:
if i == x["name"]:
return x
return ""
def FindSectionRaw(self,PEsections,names):
for x in PEsections:
for i in names:
if x.name == i:
return x
return None
def VectorizeFromJson(self,y):
VSectionsFeautues = []
code = self.FindSection(y,[".text","CODE"])
if code != "":
VSectionsFeautues[0:6] = [code["size"],code["entropy"],code["vsize"],self.checklist(code["props"],"CNT_CODE"),self.checklist(code["props"],"MEM_EXECUTE"),self.checklist(code["props"],"MEM_READ"),self.checklist(code["props"],"MEM_WRITE")]
else:
VSectionsFeautues[0:6] = [0,0,0,0,0,0,0]
rsrc = self.FindSection(y,[".rsrc"])
if rsrc != "":
VSectionsFeautues[7:13] = [rsrc["size"],rsrc["entropy"],rsrc["vsize"],self.checklist(rsrc["props"],"CNT_CODE"),self.checklist(rsrc["props"],"MEM_EXECUTE"),self.checklist(rsrc["props"],"MEM_READ"),self.checklist(rsrc["props"],"MEM_WRITE")]
else:
VSectionsFeautues[7:13] = [0,0,0,0,0,0,0]
data = self.FindSection(y,[".data","DATA"])
if data != "":
VSectionsFeautues[14:20] = [data["size"],data["entropy"],data["vsize"],self.checklist(data["props"],"CNT_CODE"),self.checklist(data["props"],"MEM_EXECUTE"),self.checklist(data["props"],"MEM_READ"),self.checklist(data["props"],"MEM_WRITE")]
else:
VSectionsFeautues[14:20] = [0,0,0,0,0,0,0]
return VSectionsFeautues
def GetSectionFeaturesRaw(self,PESections,SectionNames):
code = self.FindSectionRaw(PESections,SectionNames)
if code is not None:
code_char = [str(char).split(".")[1] for char in code.characteristics_lists]
return [code.size,code.entropy,code.virtual_size,self.checklist(code_char,"CNT_CODE"),self.checklist(code_char,"MEM_EXECUTE"),self.checklist(code_char,"MEM_READ"),self.checklist(code_char,"MEM_WRITE")]
else:
return [0,0,0,0,0,0,0]
def VectorizeFromRaw(self,PE):
VSectionsFeautues = []
VSectionsFeautues[0:6] = self.GetSectionFeaturesRaw(PE.sections,[".text","CODE"])
VSectionsFeautues[7:13] = self.GetSectionFeaturesRaw(PE.sections,[".rsrc"])
VSectionsFeautues[14:20] = self.GetSectionFeaturesRaw(PE.sections,[".data","DATA"])
return VSectionsFeautues
class DataDirectoryFeatures(object):
dim = 15 * 2
def VectorizeFromJson(self,y):
VDataDirectory = []
for x in y["datadirectories"]:
VDataDirectory.append(x["size"])
VDataDirectory.append(x["virtual_address"])
if len(VDataDirectory) == 0:
return [0] * 30
return VDataDirectory
def VectorizeFromRaw(self,PE):
VDataDirectory = []
for data_directory in PE.data_directories:
d = data_directory.type
VDataDirectory.append(data_directory.size)
VDataDirectory.append(data_directory.rva)
return VDataDirectory
class HeadersFeatures(object):
dim = 29
def GetEntropy(self,allstrings):
# map printable characters 0x20 - 0x7f to an int array consisting of
# 0-95, inclusive
as_shifted_string = [b - ord(b'\x20') for b in b''.join(allstrings)]
c = np.bincount(as_shifted_string, minlength=96) # histogram count
# distribution of characters in printable strings
csum = c.sum()
p = c.astype(np.float32) / csum
wh = np.where(c)[0]
H = np.sum(-p[wh] * np.log2(p[wh])) # entropy
return H
def VectorizeFromJson(self,y):
return [y["strings"]["numstrings"],y["strings"]["avlength"],y["strings"]["printables"],y["strings"]["entropy"],y["strings"]["paths"],y["strings"]["urls"],y["strings"]["registry"],y["strings"]["MZ"],
y["general"]["size"], y["general"]["vsize"], y["general"]["has_debug"], y["general"]["exports"], y["general"]["imports"], y["general"]["has_relocations"], y["general"]["has_resources"], y["general"]["has_signature"], y["general"]["has_tls"], y["general"]["symbols"],
y["header"]["optional"]["major_image_version"],y["header"]["optional"]["minor_image_version"],y["header"]["optional"]["major_linker_version"],y["header"]["optional"]["minor_linker_version"],y["header"]["optional"]["major_operating_system_version"],y["header"]["optional"]["minor_operating_system_version"],y["header"]["optional"]["major_subsystem_version"],y["header"]["optional"]["minor_subsystem_version"],
y["header"]["optional"]["sizeof_code"],y["header"]["optional"]["sizeof_headers"],y["header"]["optional"]["sizeof_heap_commit"]]
def VectorizeFromRaw(self,Bytes,PE):
allstrings = re.compile(b'[\x20-\x7f]{5,}').findall(Bytes)
string_lengths = [len(s) for s in allstrings]
paths = len(re.compile(b'c:\\\\', re.IGNORECASE).findall(Bytes))
urls = len(re.compile(b'https?://', re.IGNORECASE).findall(Bytes))
registry = len(re.compile(b'HKEY_').findall(Bytes))
MZ = len(re.compile(b'MZ').findall(Bytes))
return [len(allstrings),sum(string_lengths) / len(allstrings),len(allstrings),
self.GetEntropy(allstrings),paths,urls,registry,MZ,
len(Bytes),int(PE.virtual_size),int(PE.has_debug),len(PE.exported_functions),len(PE.imported_functions),
int(PE.has_relocations),int(PE.has_resources),int(PE.has_signature),int(PE.has_tls),len(PE.symbols),
PE.optional_header.major_image_version,PE.optional_header.minor_image_version,PE.optional_header.major_linker_version,PE.optional_header.minor_linker_version,PE.optional_header.major_operating_system_version,PE.optional_header.minor_operating_system_version,PE.optional_header.major_subsystem_version,PE.optional_header.minor_subsystem_version,
PE.optional_header.sizeof_code,PE.optional_header.sizeof_headers,PE.optional_header.sizeof_heap_commit]
class EntropyFeatures(object):
dim = 256
step = 1024
window = 2048
def _entropy_bin_counts(self, block):
# coarse histogram, 16 bytes per bin
c = np.bincount(block >> 4, minlength=16) # 16-bin histogram
p = c.astype(np.float32) / self.window
wh = np.where(c)[0]
H = np.sum(-p[wh] * np.log2(p[wh])) * 2 # * x2 b.c. we reduced information by half: 256 bins (8 bits) to 16 bins (4
# bits)
Hbin = int(H * 2) # up to 16 bins (max entropy is 8 bits)
if Hbin == 16: # handle entropy = 8.0 bits
Hbin = 15
return Hbin, c
def VectorizeFromJson(self,y):
return y["byteentropy"]
def VectorizeFromRaw(self, bytes):
output =
|
np.zeros((16, 16), dtype=np.int)
|
numpy.zeros
|
"""Tests for pixel alogrithms"""
# pylint: disable=no-name-in-module,redefined-outer-name,no-value-for-parameter
# pyright: reportGeneralTypeIssues=false
import numpy as np
import pytest
from seasmon_xr.ops import (
autocorr,
autocorr_1d,
autocorr_tyx,
lroo,
ws2dgu,
ws2doptv,
ws2doptvp,
ws2doptvplc,
ws2dpgu,
)
from seasmon_xr.ops.spi import brentq, gammafit, spifun
@pytest.fixture
def ts():
"""Testdata"""
np.random.seed(42)
x = np.random.gamma(1, size=10)
return x
def test_lroo(ts):
x_lroo = lroo(np.array((ts > 0.9) * 1, dtype="uint8"))
assert x_lroo == 3
def pearson_reference(X, Y):
return ((X - X.mean()) * (Y - Y.mean())).mean() / (X.std() * Y.std())
def autocorr_1d_reference(x, nodata=None):
if nodata is not None:
_x = x.astype("float64")
_x[x == nodata] = np.nan
x = _x
X = x[:-1]
Y = x[1:]
if np.isnan(x).any():
X, Y = X.copy(), Y.copy()
X[np.isnan(X)] = np.nanmean(X)
Y[np.isnan(Y)] = np.nanmean(Y)
return pearson_reference(X, Y)
def test_autocorr(ts):
ac = autocorr(ts.reshape(1, 1, -1))
np.testing.assert_almost_equal(ac, 0.00398337)
np.testing.assert_almost_equal(autocorr_1d(ts), 0.00398337)
np.testing.assert_almost_equal(autocorr_1d_reference(ts), 0.00398337)
np.testing.assert_almost_equal(autocorr_tyx(ts.reshape(-1, 1, 1)), 0.00398337)
def test_autocorr_nodata(ts_ndvi):
nodata, ts = ts_ndvi
rr = autocorr_1d(ts, nodata)
rr_ref = autocorr_1d_reference(ts, nodata)
assert rr == pytest.approx(rr_ref, rel=1e-3)
def test_brentq():
x = brentq(xa=0.6446262296476516, xb=1.5041278691778537, s=0.5278852360624721)
assert x == pytest.approx(1.083449238500003)
def test_gammafit(ts):
parameters = gammafit(ts)
assert parameters == pytest.approx((1.083449238500003, 0.9478709674697126))
def test_spi(ts):
xspi = spifun(ts.reshape(1, 1, -1))
assert xspi.shape == (1, 1, 10)
np.testing.assert_array_equal(
xspi[0, 0, :],
[-382.0, 1654.0, 588.0, 207.0, -1097.0, -1098.0, -1677.0, 1094.0, 213.0, 514.0],
)
def test_spi_nofit(ts):
xspi = spifun(ts.reshape(1, 1, -1), a=1, b=2)
assert xspi.shape == (1, 1, 10)
np.testing.assert_array_equal(
xspi[0, 0, :],
[
-809.0,
765.0,
-44.0,
-341.0,
-1396.0,
-1396.0,
-1889.0,
343.0,
-336.0,
-101.0,
],
)
def test_spi_selfit(ts):
xspi = spifun(ts.reshape(1, 1, -1), cal_start=0, cal_stop=3)
assert xspi.shape == (1, 1, 10)
np.testing.assert_array_equal(
xspi[0, 0, :],
[
-1211.0,
1236.0,
-32.0,
-492.0,
-2099.0,
-2099.0,
-2833.0,
572.0,
-484.0,
-120.0,
],
)
def test_spi_selfit_2(ts):
cal_start = 2
cal_stop = 8
a, b = gammafit(ts[cal_start:cal_stop])
xspi_ref = spifun(ts.reshape(1, 1, -1), a=a, b=b)
xspi = spifun(ts.reshape(1, 1, -1), cal_start=cal_start, cal_stop=cal_stop)
np.testing.assert_equal(xspi, xspi_ref)
def test_ws2dgu(ts):
_ts = ts * 10
z = ws2dgu(_ts, 10, 0)
np.testing.assert_array_equal(z, [15, 14, 12, 9, 8, 7, 7, 9, 10, 12])
def test_ws2dpgu(ts):
_ts = ts * 10
z = ws2dpgu(_ts, 10, 0, 0.9)
np.testing.assert_array_equal(z, [26, 24, 22, 20, 18, 17, 16, 15, 15, 14])
def test_ws2doptv(ts):
_ts = ts * 10
z, l = ws2doptv(_ts, 0, np.arange(-2, 2))
np.testing.assert_array_equal(z, [10, 21, 16, 9, 3, 2, 5, 13, 12, 12])
assert l == pytest.approx(0.31622776601683794)
def test_ws2doptvp(ts):
_ts = ts * 10
z, l = ws2doptvp(_ts, 0, 0.9,
|
np.arange(-2, 2)
|
numpy.arange
|
import math
import numpy as np
# activation function
def sigmoid(x):
return 1 / (1 + np.exp(-x))
# this generates a sin wave with length 200, which is what np.arange's first parameter here means.
# shape is (200,)
sin_wave = np.array([math.sin(x) for x in np.arange(200)])
# arrays holding our data
X = []
Y = []
seq_len = 50
# we get 150 num_records
num_records = len(sin_wave) - seq_len
# we do this 100 times. For each set we take the next 50 values.
# starts with : [0:50], [1:51], [2:52], ... [99:149], of course excluding the last value.
# the last value (index 50 for first subset) is put in Y, because that is the value we want to predict.
for i in range(num_records - 50):
X.append(sin_wave[i:i + seq_len])
Y.append(sin_wave[i + seq_len])
X = np.array(X)
# this function adds brackets at a certain level of the array.
# axis = 0 is for outer brackets. So it adds [ array ].
# axis = 1 is for some inner brackets. So for a 1D array, it adds brackets around all indexes in that 1D array. [ [array[0]], [array[1]],...]
# axis = 2 is for some deeper inner brackets. It needs to be at least a 2D array, so that it puts it on each of the indexes of that 2D array.
X = np.expand_dims(X, axis=2)
Y = np.array(Y)
Y = np.expand_dims(Y, axis=1)
# making the validation sets. Same thing as before, using the next possible 50 sets we can create out of the sin wave.
X_val = []
Y_val = []
for i in range(num_records - 50, num_records):
X_val.append(sin_wave[i:i + seq_len])
Y_val.append(sin_wave[i + seq_len])
X_val = np.array(X_val)
X_val = np.expand_dims(X_val, axis=2)
Y_val = np.array(Y_val)
Y_val = np.expand_dims(Y_val, axis=1)
# learning rate of the algorithm
learning_rate = 0.0001
# nb of epochs for the machine to learn
nepoch = 25
# length of sequence
T = 50
# dimension of the hidden layer
hidden_dim = 100
# dimension of the output layer
output_dim = 1
# back propagation through time value
bptt_truncate = 5
# using gradient value clipping: if a gradient below -10, it gets the min_clip_value. If a gradient above 10, it gets
# the max_clip_value.
min_clip_value = -10
max_clip_value = 10
# different weights layers
# first takes your T dimension x hidden_dim
U = np.random.uniform(0, 1, (hidden_dim, T))
# hidden_dim x hidden_dim for the hidden layer
W = np.random.uniform(0, 1, (hidden_dim, hidden_dim))
# output layer weights.
V = np.random.uniform(0, 1, (output_dim, hidden_dim))
# epochs for the machine to learn
for epoch in range(nepoch):
# LOSS ON TRAINING DATA
# check loss on train
loss = 0.0
# do a forward pass to get prediction
for i in range(Y.shape[0]):
x, y = X[i], Y[i] # get input, output values of each record
# here, prev-s is the value of the previous activation of hidden layer; which is initialized as all zeros
prev_s = np.zeros((hidden_dim, 1))
for t in range(T):
# this is my sequence. I am going to take a single value of the sequence.
new_input = np.zeros(x.shape) # we then do a forward pass for every timestep in the sequence
new_input[t] = x[t] # for this, we define a single input for that timestep
# now we multiply our value by our weights of the first level
mulu = np.dot(U, new_input)
# we then multiply our weights of the second level by the previous activation of the hidden layer.
mulw = np.dot(W, prev_s)
# we add those two matrices
add = mulw + mulu
# and we activate that result.
s = sigmoid(add)
# we then process the result with our weights of the level V.
mulv = np.dot(V, s)
# so this is the activation result of the mulu + mulw result.
prev_s = s
# calculate error (attributing value to error with loss) of mulv, calculated in the for loop.
loss_per_record = (y - mulv) ** 2 / 2
loss += loss_per_record
loss = loss / float(y.shape[0])
# LOSS ON VALIDATION DATA
# (Same thing as was done on training data)
# check loss on val
val_loss = 0.0
for i in range(Y_val.shape[0]):
x, y = X_val[i], Y_val[i]
prev_s = np.zeros((hidden_dim, 1))
for t in range(T):
new_input = np.zeros(x.shape)
new_input[t] = x[t]
mulu =
|
np.dot(U, new_input)
|
numpy.dot
|
#!/usr/local/bin/python3
import sklearn
import sklearn.model_selection as skMS
import sklearn.preprocessing as skPP
import numpy as np
import pandas as pd
def createDataframe():
"""
Reads csv file containing names and ethnicities
Returns:
pandas dataframe with cols [['name', 'ethnicity']]
"""
return pd.read_csv("names.txt")
def encodeNames(df):
"""
Converts Nx2 dataframe where N is samples into NxD+1 dataframe
where D columns are the individual character encoding of each name with max
name length zero padding and 1 column is the label encoding
Input:
dataframe: pandas dataframe with cols [['name', 'ethnicity']]
Returns:
encodedDataFrame: pandas dataframe with dims NxD with all characters encoded
labelDataFrame: pandas dataframe with dims Nx1 with labels
nameEncoder: scikit name encoding model
labelEncoder: scikit label encoding model
labelBinarizer: Transforms ints to one hot encodings
maxCharCount: number of chars per name with padding
"""
# Get all characters that need to be encoded
# Get max name character count
charSet = set()
maxCharCount = 0
for name in df['name']:
for char in name:
charSet.add(char)
if len(name) > maxCharCount:
maxCharCount = len(name)
chars = list(charSet)
# Name Encoding
nameEncoder = skPP.LabelEncoder() # char based
nameEncoder.fit(chars)
# Encode names and expand to list of size maxCharCount
# Can probably do this using pandas apply more efficiently
encodedNames = -1*
|
np.ones([df.shape[0], maxCharCount])
|
numpy.ones
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 2021/1/18:上午10:23
# @Author : <NAME>
import rospy
import tf
from tf.transformations import quaternion_matrix
import numpy as np
import geometry_msgs.msg
# 监听TF变换,获得广义坐标系之间的平移和四元数表示的旋转变换,并广播
class GetTransformationMatrix(object):
def __init__(self):
self.all_frame_list = ['uams/base_link', 'link1', 'link2',
'link3', 'link4', 'end_effector_link']
# 将各个关节坐标进行编号映射
self.frame_dict = {
0: 'uams/base_link',
1: 'link1',
2: 'link2',
3: 'link3',
4: 'link4',
5: 'end_effector_link'}
sorted(self.frame_dict.keys())
self.listener = tf.TransformListener()
self.rate = rospy.Rate(1.0)
self.broad = tf.TransformBroadcaster()
self.temp = geometry_msgs.msg.TransformStamped()
def is_interlinked_frame(self, link_a, link_b):
# 判断两个frame是否相通
if self.listener.canTransform(link_a, link_b, rospy.Time(0)):
return True
else:
return False
def compute_jacobian_mat(self,mat_list):
mat_list = [np.transpose(x) for x in mat_list]
def _stitch_by_row(mat,r_index):
zero = np.array([[0], [0], [0], [0], [0], [0]])
I = np.eye(6)
z = np.array([[0], [0], [0], [0], [0], [1]])
def _dot(a,b):
c = np.dot(a,b)
return c
if r_index == 1:
row_temp = I
for i in range(4):
row_temp = np.hstack((row_temp,zero))
elif r_index == 2:
row_temp = np.hstack((mat[0], z))
for i in range(3):
row_temp = np.hstack((row_temp, zero))
elif r_index == 3:
row_temp = np.hstack((mat[1], _dot(mat[5],z)))
row_temp = np.hstack((row_temp,z))
for i in range(2):
row_temp = np.hstack((row_temp, zero))
elif r_index == 4:
row_temp = np.hstack((mat[2], _dot(mat[6], z)))
row_temp = np.hstack((row_temp, _dot(mat[9], z)))
row_temp = np.hstack((row_temp, z))
row_temp = np.hstack((row_temp, zero))
elif r_index ==5:
row_temp = np.hstack((mat[3], _dot(mat[7], z)))
row_temp = np.hstack((row_temp, _dot(mat[10], z)))
row_temp = np.hstack((row_temp, _dot(mat[12], z)))
row_temp =
|
np.hstack((row_temp, z))
|
numpy.hstack
|
import unittest
import numpy as np
from random import uniform
try:
from dataUncert.variable import variable
except ModuleNotFoundError:
from variable import variable
class test(unittest.TestCase):
def testSingleNumber(self):
A = variable(1.3, 'm')
B = variable(2.0, 'm', 0.01)
C = variable([1.0, 1.3], 'L/min', np.array([20, 30]))
D = variable(np.array([11, 1111]), 'L/min', [2.1, 3.9])
self.assertEqual(A.value, 1.3)
self.assertEqual(A.unit, 'm')
self.assertEqual(A.uncert, 0)
self.assertEqual(B.value, 2.0)
self.assertEqual(B.unit, 'm')
self.assertEqual(B.uncert, 0.01)
np.testing.assert_equal(C.value, [1.0, 1.3])
self.assertEqual(C.unit, 'L/min')
np.testing.assert_equal(C.uncert, [20, 30])
np.testing.assert_equal(D.value, [11.0, 1111.0])
self.assertEqual(D.unit, 'L/min')
np.testing.assert_equal(D.uncert, [2.1, 3.9])
with self.assertRaises(Exception) as context:
variable(1.3, 'm', 'hej')
self.assertTrue("could not convert string to float: 'hej'" in str(context.exception))
with self.assertRaises(Exception) as context:
variable('med', 'm', 1.0)
self.assertTrue("could not convert string to float: 'med'" in str(context.exception))
with self.assertRaises(Exception) as context:
variable(1.3, 'm', [1.0, 2.3])
self.assertTrue("The value is a number but the uncertanty is a <class 'list'>" in str(context.exception))
with self.assertRaises(Exception) as context:
variable(1.3, 'm', np.array([1.0, 2.3]))
self.assertTrue("The value is a number but the uncertanty is a <class 'numpy.ndarray'>" in str(context.exception))
with self.assertRaises(Exception) as context:
variable(np.array([1.0, 2.3]), 'm', 1.5)
self.assertTrue("The value is a list-like object but the uncertanty is a number" in str(context.exception))
with self.assertRaises(Exception) as context:
variable([1.0, 2.3], 'm', 1.5)
self.assertTrue("The value is a list-like object but the uncertanty is a number" in str(context.exception))
def test_add(self):
A = variable(12.3, 'L/min', uncert=2.6)
B = variable(745.1, 'L/min', uncert=53.9)
A_vec = variable([12.3, 54.3, 91.3], 'L/min', uncert=[2.6, 5.4, 10.56])
B_vec = variable([745.1, 496.13, 120.54], 'L/min', uncert=[53.9, 24.75, 6.4])
C = A + B
self.assertAlmostEqual(C.value, 12.3 + 745.1)
self.assertEqual(C.unit, 'L/min')
self.assertAlmostEqual(C.uncert, np.sqrt((1 * 2.6)**2 + (1 * 53.9)**2))
C.convert('m3/s')
self.assertAlmostEqual(C.value, (12.3 + 745.1) / 1000 / 60)
self.assertEqual(C.unit, 'm3/s')
self.assertAlmostEqual(C.uncert, np.sqrt((1 * 2.6 / 1000 / 60)**2 + (1 * 53.9 / 1000 / 60)**2))
C_vec = A_vec + B_vec
np.testing.assert_array_equal(C_vec.value, np.array([12.3 + 745.1, 54.3 + 496.13, 91.3 + 120.54]))
self.assertEqual(C_vec.unit, 'L/min')
np.testing.assert_array_equal(
C_vec.uncert,
np.array([
np.sqrt((1 * 2.6)**2 + (1 * 53.9)**2),
np.sqrt((1 * 5.4)**2 + (1 * 24.75)**2),
np.sqrt((1 * 10.56)**2 + (1 * 6.4)**2),
]))
C_vec.convert('mL/h')
np.testing.assert_almost_equal(C_vec.value, np.array([(12.3 + 745.1) * 1000 * 60, (54.3 + 496.13) * 1000 * 60, (91.3 + 120.54) * 1000 * 60]))
self.assertEqual(C_vec.unit, 'mL/h')
np.testing.assert_almost_equal(
C_vec.uncert,
np.array([
np.sqrt((1 * 2.6 * 1000 * 60)**2 + (1 * 53.9 * 1000 * 60)**2),
np.sqrt((1 * 5.4 * 1000 * 60)**2 + (1 * 24.75 * 1000 * 60)**2),
np.sqrt((1 * 10.56 * 1000 * 60)**2 + (1 * 6.4 * 1000 * 60)**2),
]))
def test_sub(self):
A = variable(12.3, 'L/min', uncert=2.6)
B = variable(745.1, 'L/min', uncert=53.9)
A_vec = variable([12.3, 54.3, 91.3], 'L/min', uncert=[2.6, 5.4, 10.56])
B_vec = variable([745.1, 496.13, 120.54], 'L/min', uncert=[53.9, 24.75, 6.4])
C = A - B
self.assertAlmostEqual(C.value, 12.3 - 745.1)
self.assertEqual(C.unit, 'L/min')
self.assertAlmostEqual(C.uncert, np.sqrt((1 * 2.6)**2 + (1 * 53.9)**2))
C.convert('kL/s')
self.assertAlmostEqual(C.value, (12.3 - 745.1) / 1000 / 60)
self.assertEqual(C.unit, 'kL/s')
self.assertAlmostEqual(C.uncert, np.sqrt((1 * 2.6 / 1000 / 60)**2 + (1 * 53.9 / 1000 / 60)**2))
C_vec = A_vec - B_vec
np.testing.assert_array_equal(C_vec.value, np.array([12.3 - 745.1, 54.3 - 496.13, 91.3 - 120.54]))
self.assertEqual(C_vec.unit, 'L/min')
np.testing.assert_array_equal(
C_vec.uncert,
np.array([
np.sqrt((1 * 2.6)**2 + (1 * 53.9)**2),
np.sqrt((1 * 5.4)**2 + (1 * 24.75)**2),
np.sqrt((1 * 10.56)**2 + (1 * 6.4)**2),
]))
C_vec.convert('mm3 / h')
np.testing.assert_almost_equal(C_vec.value, np.array([12.3 - 745.1, 54.3 - 496.13, 91.3 - 120.54]) * 1000000 * 60, decimal=5)
self.assertEqual(C_vec.unit, 'mm3/h')
np.testing.assert_almost_equal(
C_vec.uncert,
np.array([
np.sqrt((1 * 2.6 * 1000000 * 60)**2 + (1 * 53.9 * 1000000 * 60)**2),
np.sqrt((1 * 5.4 * 1000000 * 60)**2 + (1 * 24.75 * 1000000 * 60)**2),
np.sqrt((1 * 10.56 * 1000000 * 60)**2 + (1 * 6.4 * 1000000 * 60)**2),
]), decimal=5)
with self.assertRaises(Exception) as context:
A.convert('m')
self.assertTrue('You cannot convert from [L/min] to [m]' in str(context.exception))
def test_add_with_different_units(self):
A = variable(12.3, 'L/min', uncert=2.6)
B = variable(745.1, 'm', uncert=53.9)
A_vec = variable([12.3, 54.3, 91.3], 'L/min', uncert=[2.6, 5.4, 10.56])
B_vec = variable([745.1, 496.13, 120.54], 'm', uncert=[53.9, 24.75, 6.4])
with self.assertRaises(Exception) as context:
A + B
self.assertTrue('You tried to add a variable in [L/min] to a variable in [m], but the units does not match' in str(context.exception))
with self.assertRaises(Exception) as context:
A_vec + B_vec
self.assertTrue('You tried to add a variable in [L/min] to a variable in [m], but the units does not match' in str(context.exception))
with self.assertRaises(Exception) as context:
A.convert('m')
self.assertTrue('You cannot convert from [L/min] to [m]' in str(context.exception))
def test_sub_with_different_units(self):
A = variable(12.3, 'L/min', uncert=2.6)
B = variable(745.1, 'm', uncert=53.9)
A_vec = variable([12.3, 54.3, 91.3], 'L/min', uncert=[2.6, 5.4, 10.56])
B_vec = variable([745.1, 496.13, 120.54], 'm', uncert=[53.9, 24.75, 6.4])
with self.assertRaises(Exception) as context:
A - B
self.assertTrue('You tried to subtract a variable in [m] from a variable in [L/min], but the units does not match' in str(context.exception))
with self.assertRaises(Exception) as context:
A_vec - B_vec
self.assertTrue('You tried to subtract a variable in [m] from a variable in [L/min], but the units does not match' in str(context.exception))
with self.assertRaises(Exception) as context:
A.convert('m')
self.assertTrue('You cannot convert from [L/min] to [m]' in str(context.exception))
def test_multiply(self):
A = variable(12.3, 'L/min', uncert=2.6)
B = variable(745.1, 'm', uncert=53.9)
A_vec = variable([12.3, 54.3, 91.3], 'L/min', uncert=[2.6, 5.4, 10.56])
B_vec = variable([745.1, 496.13, 120.54], 'm', uncert=[53.9, 24.75, 6.4])
C = A * B
self.assertAlmostEqual(C.value, 12.3 * 745.1)
self.assertEqual(C.unit, 'L-m/min')
self.assertAlmostEqual(C.uncert, np.sqrt((745.1 * 2.6)**2 + (12.3 * 53.9)**2))
C_vec = A_vec * B_vec
np.testing.assert_array_equal(C_vec.value, np.array([12.3 * 745.1, 54.3 * 496.13, 91.3 * 120.54]))
self.assertEqual(C_vec.unit, 'L-m/min')
np.testing.assert_array_equal(
C_vec.uncert,
np.array([
np.sqrt((745.1 * 2.6)**2 + (12.3 * 53.9)**2),
np.sqrt((496.13 * 5.4)**2 + (54.3 * 24.75)**2),
np.sqrt((120.54 * 10.56)**2 + (91.3 * 6.4)**2),
]))
C_vec.convert('m3-km / s')
np.testing.assert_array_equal(C_vec.value, np.array([12.3 * 745.1, 54.3 * 496.13, 91.3 * 120.54]) / 1000 / 1000 / 60)
self.assertEqual(C_vec.unit, 'm3-km/s')
np.testing.assert_almost_equal(
C_vec.uncert,
np.array([
np.sqrt((745.1 / 1000 * 2.6 / 1000 / 60)**2 + (12.3 / 1000 / 60 * 53.9 / 1000)**2),
np.sqrt((496.13 / 1000 * 5.4 / 1000 / 60)**2 + (54.3 / 1000 / 60 * 24.75 / 1000)**2),
np.sqrt((120.54 / 1000 * 10.56 / 1000 / 60)**2 + (91.3 / 1000 / 60 * 6.4 / 1000)**2),
]), decimal=7)
def test_divide(self):
A = variable(12.3, 'L/min', uncert=2.6)
B = variable(745.1, 'm', uncert=53.9)
A_vec = variable([12.3, 54.3, 91.3], 'L/min', uncert=[2.6, 5.4, 10.56])
B_vec = variable([745.1, 496.13, 120.54], 'm', uncert=[53.9, 24.75, 6.4])
C = A / B
self.assertAlmostEqual(C.value, 12.3 / 745.1)
self.assertEqual(C.unit, 'L/min-m')
self.assertAlmostEqual(C.uncert, np.sqrt((1 / 745.1 * 2.6)**2 + (12.3 / (745.1**2) * 53.9)**2))
C.convert('m3/h-mm')
self.assertAlmostEqual(C.value, 12.3 / 745.1 / 1000 * 60 / 1000)
self.assertEqual(C.unit, 'm3/h-mm')
self.assertAlmostEqual(C.uncert, np.sqrt((1 / (745.1 * 1000) * 2.6 / 1000 * 60)**2 + (12.3 / ((745.1)**2) * 53.9 / 1000 * 60 / 1000)**2))
C_vec = A_vec / B_vec
np.testing.assert_array_equal(C_vec.value, np.array([12.3 / 745.1, 54.3 / 496.13, 91.3 / 120.54]))
self.assertEqual(C_vec.unit, 'L/min-m')
np.testing.assert_array_equal(
C_vec.uncert,
np.array([
np.sqrt((1 / 745.1 * 2.6)**2 + (12.3 / (745.1)**2 * 53.9)**2),
np.sqrt((1 / 496.13 * 5.4)**2 + (54.3 / (496.13)**2 * 24.75)**2),
np.sqrt((1 / 120.54 * 10.56)**2 + (91.3 / (120.54)**2 * 6.4)**2),
]))
C_vec.convert('m3 / h -mm')
np.testing.assert_almost_equal(C_vec.value, np.array([12.3 / 745.1, 54.3 / 496.13, 91.3 / 120.54]) / 1000 * 60 / 1000)
self.assertEqual(C_vec.unit, 'm3/h-mm')
np.testing.assert_almost_equal(
C_vec.uncert,
np.array([
np.sqrt((1 / 745.1 * 2.6 / 1000 * 60 / 1000)**2 + (12.3 / (745.1)**2 * 53.9 / 1000 * 60 / 1000)**2),
np.sqrt((1 / 496.13 * 5.4 / 1000 * 60 / 1000)**2 + (54.3 / (496.13)**2 * 24.75 / 1000 * 60 / 1000)**2),
np.sqrt((1 / 120.54 * 10.56 / 1000 * 60 / 1000)**2 + (91.3 / (120.54)**2 * 6.4 / 1000 * 60 / 1000)**2),
]))
def test_add_unit_order(self):
A = variable(10, 'm-K')
B = variable(3, 'K-m')
A_vec = variable([12.3, 54.3, 91.3], 'K-m', uncert=[2.6, 5.4, 10.56])
B_vec = variable([745.1, 496.13, 120.54], 'm-K', uncert=[53.9, 24.75, 6.4])
C = A + B
C_vec = A_vec + B_vec
def test_sub_unit_order(self):
A = variable(10, 'm-K')
B = variable(3, 'K-m')
A_vec = variable([12.3, 54.3, 91.3], 'K-m', uncert=[2.6, 5.4, 10.56])
B_vec = variable([745.1, 496.13, 120.54], 'm-K', uncert=[53.9, 24.75, 6.4])
C = A - B
C_vec = A_vec - B_vec
def test_pow(self):
A = variable(12.3, 'L/min', uncert=2.6)
B = variable(745.1, 'm', uncert=53.9)
C = variable(745.1, '1', uncert=53.9)
D = variable(0.34, '1', uncert=0.01)
A_vec = variable([12.3, 54.3, 91.3], 'L/min', uncert=[2.6, 5.4, 10.56])
B_vec = variable([745.1, 496.13, 120.54], 'm', uncert=[53.9, 24.75, 6.4])
C_vec = variable([745.1, 496.13, 120.54], '1', uncert=[53.9, 24.75, 6.4])
D_vec = variable([0.34, 0.64, 0.87], '1', uncert=[0.01, 0.084, 0.12])
with self.assertRaises(Exception) as context:
A ** B
self.assertTrue('The exponent can not have a unit' in str(context.exception))
with self.assertRaises(Exception) as context:
A_vec ** B_vec
self.assertTrue('The exponent can not have a unit' in str(context.exception))
E = C**D
self.assertAlmostEqual(E.value, 745.1**0.34)
self.assertEqual(E.unit, '1')
self.assertAlmostEqual(E.uncert, np.sqrt((0.34 * 745.1**(0.34 - 1) * 53.9)**2 + (745.1**0.34 * np.log(745.1) * 0.01)**2))
E_vec = C_vec**D_vec
np.testing.assert_array_equal(E_vec.value, np.array([745.1**0.34, 496.13**0.64, 120.54**0.87]))
self.assertEqual(E_vec.unit, '1')
np.testing.assert_array_equal(
E_vec.uncert,
np.array([
np.sqrt((0.34 * 745.1**(0.34 - 1) * 53.9)**2 + (745.1**0.34 * np.log(745.1) * 0.01)**2),
np.sqrt((0.64 * 496.13**(0.64 - 1) * 24.75)**2 + (496.13**0.64 * np.log(496.13) * 0.084)**2),
np.sqrt((0.87 * 120.54**(0.87 - 1) * 6.4)**2 + (120.54**0.87 * np.log(120.54) * 0.12)**2)
]))
F = A**2
self.assertAlmostEqual(F.value, (12.3)**2)
self.assertEqual(F.unit, 'L2/min2')
self.assertAlmostEqual(F.uncert, np.sqrt((2 * 12.3**(2 - 1) * 2.6)**2))
F.convert('m6/s2')
self.assertAlmostEqual(F.value, (12.3 / 1000 / 60)**2)
self.assertEqual(F.unit, 'm6/s2')
self.assertAlmostEqual(F.uncert, np.sqrt((2 * (12.3 / 1000 / 60)**(2 - 1) * 2.6 / 1000 / 60)**2))
F_vec = A_vec**2
np.testing.assert_array_equal(F_vec.value, np.array([(12.3)**2, 54.3**2, 91.3**2]))
self.assertEqual(F_vec.unit, 'L2/min2')
np.testing.assert_array_equal(
F_vec.uncert,
np.array([
np.sqrt((2 * 12.3**(2 - 1) * 2.6)**2),
np.sqrt((2 * 54.3**(2 - 1) * 5.4)**2),
np.sqrt((2 * 91.3**(2 - 1) * 10.56)**2)
]))
F_vec.convert('m6 / s2')
np.testing.assert_almost_equal(F_vec.value, np.array([(12.3 / 1000 / 60)**2, (54.3 / 1000 / 60)**2, (91.3 / 1000 / 60)**2]))
self.assertEqual(F_vec.unit, 'm6/s2')
np.testing.assert_almost_equal(
F_vec.uncert,
np.array([
np.sqrt((2 * 12.3 / 1000 / 60**(2 - 1) * 2.6 / 1000 / 60)**2),
np.sqrt((2 * 54.3 / 1000 / 60**(2 - 1) * 5.4 / 1000 / 60)**2),
|
np.sqrt((2 * 91.3 / 1000 / 60**(2 - 1) * 10.56 / 1000 / 60)**2)
|
numpy.sqrt
|
# -*- coding: utf-8 -*-
"""
"""
from __future__ import division
import sys
sys.path.insert(0,'../../')
sys.path.insert(0,'..')
import numpy as np
#import mayavi.mlab as mlab
#from scipy.stats import norm
#import matplotlib as plt
from mpl_toolkits.mplot3d import Axes3D
from bayes_opt import BayesOpt
from bayes_opt.batchBO.batch_pvrs import BatchPVRS
#from bayes_opt import PradaBayOptBatch
import matplotlib.patches as patches
import matplotlib.pyplot as plt
from matplotlib import gridspec
from sklearn.metrics.pairwise import euclidean_distances
from bayes_opt.acquisition_maximization import acq_max
from scipy.stats import norm as norm_dist
import random
from bayes_opt.acquisition_functions import AcquisitionFunction, unique_rows
import os
from pylab import *
cdict = {'red': ((0.0, 0.0, 0.0),
(0.5, 1.0, 0.7),
(1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.0),
(0.5, 1.0, 0.0),
(1.0, 1.0, 1.0)),
'blue': ((0.0, 0.0, 0.0),
(0.5, 1.0, 0.0),
(1.0, 0.5, 1.0))}
#my_cmap = matplotlib.colors.LinearSegmentedColormap('my_colormap',cdict,256)
#my_cmap = plt.get_cmap('cubehelix')
my_cmap = plt.get_cmap('Blues')
counter = 0
#class Visualization(object):
#def __init__(self,bo):
#self.plot_gp=0
#self.posterior=0
#self.myBo=bo
def plot_bo(bo):
if bo.dim==1:
plot_bo_1d(bo)
if bo.dim==2:
plot_bo_2d(bo)
def plot_acq_bo_1d(bo):
global counter
counter=counter+1
func=bo.f
#x_original = np.linspace(bo.bounds[0,0], bo.bounds[0,1], 100)
x = np.linspace(bo.scalebounds[0,0], bo.scalebounds[0,1], 1000)
x_original=x*bo.max_min_gap+bo.bounds[:,0]
y_original = func(x_original)
#y = func(x)
#y_original=mu*np.std(bo.Y_original)+np.mean(bo.Y_original)
fig=plt.figure(figsize=(10, 10))
fig.suptitle('Gaussian Process and Utility Function After {} Points'.format(len(bo.X)), fontdict={'size':18})
gs = gridspec.GridSpec(8, 1, height_ratios=[3, 1,1,1,1,1,1,1])
axis = plt.subplot(gs[0])
acq_UCB = plt.subplot(gs[1])
acq_EI = plt.subplot(gs[2])
acq_POI = plt.subplot(gs[3])
#acq_TS2 = plt.subplot(gs[5])
acq_ES = plt.subplot(gs[4])
acq_PES = plt.subplot(gs[5])
acq_MRS = plt.subplot(gs[6])
acq_Consensus = plt.subplot(gs[7])
mu, sigma = bo.posterior(x)
#mu_original=mu*(np.max(y_original)-np.min(y_original))+np.mean(y_original)
mu_original=mu*np.std(bo.Y_original)+np.mean(bo.Y_original)
sigma_original=sigma*np.std(bo.Y_original)+np.mean(bo.Y_original)**2
axis.plot(x_original, y_original, linewidth=3, label='Real Function')
axis.plot(bo.X_original.flatten(), bo.Y_original, 'D', markersize=8, label=u'Observations', color='r')
axis.plot(x_original, mu_original, '--', color='k', label='GP mean')
#samples*bo.max_min_gap+bo.bounds[:,0]
temp_xaxis=np.concatenate([x_original, x_original[::-1]])
#temp_xaxis=temp*bo.max_min_gap+bo.bounds[:,0]
temp_yaxis_original=np.concatenate([mu_original - 1.9600 * sigma_original, (mu_original + 1.9600 * sigma_original)[::-1]])
temp_yaxis=np.concatenate([mu - 1.9600 * sigma, (mu + 1.9600 * sigma)[::-1]])
temp_yaxis_original2=temp_yaxis*np.std(bo.Y_original)+np.mean(bo.Y_original)
axis.fill(temp_xaxis, temp_yaxis_original2,alpha=.6, fc='c', ec='None', label='95% CI')
axis.set_xlim((np.min(x_original), np.max(x_original)))
#axis.set_ylim((None, None))
axis.set_ylabel('f(x)', fontdict={'size':16})
axis.set_xlabel('x', fontdict={'size':16})
# UCB
acq_func={}
acq_func['name']='ucb'
acq_func['kappa']=2
acq_func['dim']=1
acq_func['scalebounds']=bo.scalebounds
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(x.reshape((-1, 1)), bo.gp, np.max(bo.Y))
acq_UCB.plot(x_original, utility, label='Utility Function', color='purple')
acq_UCB.plot(x_original[np.argmax(utility)], np.max(utility), '*', markersize=15,
label=u'Next Best Guess', markerfacecolor='gold', markeredgecolor='k', markeredgewidth=1)
# check batch BO
try:
nSelectedPoints=np.int(bo.NumPoints[-1])
except:
nSelectedPoints=1
max_point=np.max(utility)
#acq_UCB.plot(bo.X_original[-nSelectedPoints:], max_point.repeat(nSelectedPoints), 'v', markersize=15,
#label=u'Previous Selection', markerfacecolor='green', markeredgecolor='k', markeredgewidth=1)
acq_UCB.set_xlim((np.min(x_original), np.max(x_original)))
acq_UCB.set_ylabel('UCB', fontdict={'size':16})
acq_UCB.set_xlabel('x', fontdict={'size':16})
# EI
acq_func={}
acq_func['name']='ei'
acq_func['dim']=1
acq_func['scalebounds']=bo.scalebounds
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(x.reshape((-1, 1)), bo.gp, np.max(bo.Y))
acq_EI.plot(x_original, utility, label='Utility Function', color='purple')
acq_EI.plot(x_original[np.argmax(utility)], np.max(utility), '*', markersize=15,
label=u'Next Best Guess', markerfacecolor='gold', markeredgecolor='k', markeredgewidth=1)
max_point=np.max(utility)
#acq_EI.plot(bo.X_original[-nSelectedPoints:], max_point.repeat(nSelectedPoints), 'v', markersize=15,
#label=u'Previous Selection', markerfacecolor='green', markeredgecolor='k', markeredgewidth=1)
acq_EI.set_xlim((np.min(x_original), np.max(x_original)))
acq_EI.set_ylabel('EI', fontdict={'size':16})
acq_EI.set_xlabel('x', fontdict={'size':16})
# POI
acq_func={}
acq_func['name']='poi'
acq_func['dim']=1
acq_func['scalebounds']=bo.scalebounds
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(x.reshape((-1, 1)), bo.gp, np.max(bo.Y))
acq_POI.plot(x_original, utility, label='Utility Function', color='purple')
acq_POI.plot(x_original[np.argmax(utility)], np.max(utility), '*', markersize=15,
label=u'Next Best Guess', markerfacecolor='gold', markeredgecolor='k', markeredgewidth=1)
max_point=np.max(utility)
#acq_POI.plot(bo.X_original[-nSelectedPoints:], max_point.repeat(nSelectedPoints), 'v', markersize=15,
#label=u'Previous Selection', markerfacecolor='green', markeredgecolor='k', markeredgewidth=1)
acq_POI.set_xlim((np.min(x_original), np.max(x_original)))
acq_POI.set_ylabel('POI', fontdict={'size':16})
acq_POI.set_xlabel('x', fontdict={'size':16})
#axis.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
#acq_EI.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
# MRS
acq_func={}
acq_func['name']='mrs'
acq_func['dim']=1
acq_func['scalebounds']=bo.scalebounds
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(x.reshape((-1, 1)), bo.gp, np.max(bo.Y))
acq_MRS.plot(x_original, utility, label='Utility Function', color='purple')
acq_MRS.plot(x_original[np.argmax(utility)], np.max(utility), '*', markersize=15,
label=u'Next Best Guess', markerfacecolor='gold', markeredgecolor='k', markeredgewidth=1)
max_point=np.max(utility)
#acq_MRS.plot(bo.X_original[-nSelectedPoints:], max_point.repeat(nSelectedPoints), 'v', markersize=15,
#label=u'Previous Selection', markerfacecolor='green', markeredgecolor='k', markeredgewidth=1)
acq_MRS.set_xlim((np.min(x_original), np.max(x_original)))
acq_MRS.set_ylabel('MRS', fontdict={'size':16})
acq_MRS.set_xlabel('x', fontdict={'size':16})
# PES
acq_func={}
acq_func['name']='pes'
acq_func['dim']=1
acq_func['scalebounds']=bo.scalebounds
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(x.reshape((-1, 1)), bo.gp, np.max(bo.Y))
acq_PES.plot(x_original, utility, label='Utility Function', color='purple')
acq_PES.plot(x_original[np.argmax(utility)], np.max(utility), '*', markersize=15,
label=u'Next Best Guess', markerfacecolor='gold', markeredgecolor='k', markeredgewidth=1)
max_point=np.max(utility)
#acq_PES.plot(bo.X_original[-nSelectedPoints:], max_point.repeat(nSelectedPoints), 'v', markersize=15,
#label=u'Previous Selection', markerfacecolor='green', markeredgecolor='k', markeredgewidth=1)
acq_PES.set_xlim((np.min(x_original), np.max(x_original)))
acq_PES.set_ylabel('PES', fontdict={'size':16})
acq_PES.set_xlabel('x', fontdict={'size':16})
# TS1
acq_func={}
acq_func['name']='consensus'
acq_func['dim']=1
acq_func['scalebounds']=bo.scalebounds
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(x.reshape((-1, 1)), bo.gp, np.max(bo.Y))
acq_Consensus.plot(x_original, utility, label='Utility Function', color='purple')
temp=np.asarray(myacq.object.xstars)
xt_suggestion_original=temp*bo.max_min_gap+bo.bounds[:,0]
acq_Consensus.plot(xt_suggestion_original, [np.max(utility)]*xt_suggestion_original.shape[0], 's', markersize=15,
label=u'Next Best Guess', markerfacecolor='red', markeredgecolor='k', markeredgewidth=1)
max_point=np.max(utility)
acq_Consensus.plot(x_original[np.argmax(utility)], np.max(utility), '*', markersize=15,
label=u'Next Best Guess', markerfacecolor='gold', markeredgecolor='k', markeredgewidth=1)
#acq_TS.plot(bo.X_original[-nSelectedPoints:], max_point.repeat(nSelectedPoints), 'v', markersize=15,
#label=u'Previous Selection', markerfacecolor='green', markeredgecolor='k', markeredgewidth=1)
acq_Consensus.set_xlim((np.min(x_original), np.max(x_original)))
#acq_TS.set_ylim((np.min(utility)*0.9, np.max(utility)*1.1))
acq_Consensus.set_ylabel('Consensus', fontdict={'size':16})
acq_Consensus.set_xlabel('x', fontdict={'size':16})
# ES
acq_func={}
acq_func['name']='es'
acq_func['dim']=1
acq_func['scalebounds']=bo.scalebounds
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(x.reshape((-1, 1)), bo.gp, np.max(bo.Y))
acq_ES.plot(x_original, utility, label='Utility Function', color='purple')
acq_ES.plot(x_original[np.argmax(utility)], np.max(utility), '*', markersize=15,
label=u'Next Best Guess', markerfacecolor='gold', markeredgecolor='k', markeredgewidth=1)
max_point=np.max(utility)
#acq_ES.plot(bo.X_original[-nSelectedPoints:], max_point.repeat(nSelectedPoints), 'v', markersize=15,
#label=u'Previous Selection', markerfacecolor='green', markeredgecolor='k', markeredgewidth=1)
acq_ES.set_xlim((np.min(x_original), np.max(x_original)))
acq_ES.set_ylabel('ES', fontdict={'size':16})
acq_ES.set_xlabel('x', fontdict={'size':16})
strFileName="{:d}_GP_acquisition_functions.eps".format(counter)
fig.savefig(strFileName, bbox_inches='tight')
#axis.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
#acq_TS.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
def plot_acq_bo_1d_vrs(bo):
global counter
counter=counter+1
func=bo.f
#x_original = np.linspace(bo.bounds[0,0], bo.bounds[0,1], 100)
x = np.linspace(bo.scalebounds[0,0], bo.scalebounds[0,1], 1000)
x_original=x*bo.max_min_gap+bo.bounds[:,0]
y_original = func(x_original)
#y = func(x)
#y_original=mu*np.std(bo.Y_original)+np.mean(bo.Y_original)
fig=plt.figure(figsize=(10, 11))
#fig.suptitle('Gaussian Process and Utility Function After {} Points'.format(len(bo.X)), fontdict={'size':18})
gs = gridspec.GridSpec(8, 1, height_ratios=[2, 1,1,1,1,1,1,1])
axis = plt.subplot(gs[0])
acq_UCB = plt.subplot(gs[1])
acq_EI = plt.subplot(gs[2])
#acq_POI = plt.subplot(gs[3])
#acq_TS2 = plt.subplot(gs[5])
acq_MES = plt.subplot(gs[3])
acq_ES = plt.subplot(gs[4])
acq_MRS = plt.subplot(gs[5])
acq_PES = plt.subplot(gs[6])
acq_Consensus = plt.subplot(gs[7])
mu, sigma = bo.posterior(x)
# get maximum of mu function
mu_max=mu.max()
#mu_original=mu*(np.max(y_original)-np.min(y_original))+np.mean(y_original)
mu_original=mu*np.std(bo.Y_original)+np.mean(bo.Y_original)
sigma_original=sigma*np.std(bo.Y_original)+np.mean(bo.Y_original)**2
axis.plot(x_original, y_original, linewidth=3, label='f(x)')
axis.plot(bo.X_original.flatten(), bo.Y_original, 'o', markersize=8, label=u'Data X', color='g')
axis.plot(x_original, mu_original, '--', color='k', label='$\mu(x)$')
#samples*bo.max_min_gap+bo.bounds[:,0]
temp_xaxis=np.concatenate([x_original, x_original[::-1]])
#temp_xaxis=temp*bo.max_min_gap+bo.bounds[:,0]
temp_yaxis_original=np.concatenate([mu_original - 1.9600 * sigma_original, (mu_original + 1.9600 * sigma_original)[::-1]])
temp_yaxis=np.concatenate([mu - 1.9600 * sigma, (mu + 1.9600 * sigma)[::-1]])
temp_yaxis_original2=temp_yaxis*np.std(bo.Y_original)+np.mean(bo.Y_original)
axis.fill(temp_xaxis, temp_yaxis_original2,alpha=.3, fc='c', ec='None', label='$\sigma(x)$')
axis.get_xaxis().set_visible(False)
axis.set_yticklabels([])
axis.set_xticklabels([])
axis.set_xlim((np.min(x_original)-0.05, np.max(x_original)+0.05))
#axis.set_ylim((None, None))
axis.set_ylabel('f(x)', fontdict={'size':16})
axis.set_xlabel('x', fontdict={'size':16})
axis.legend(loc='center left', bbox_to_anchor=(0.01, 1.15),prop={'size':16},ncol=6)
# UCB
acq_func={}
acq_func['name']='ucb'
acq_func['kappa']=2
acq_func['dim']=1
acq_func['scalebounds']=bo.scalebounds
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(x.reshape((-1, 1)), bo.gp, np.max(bo.Y))
acq_UCB.plot(x_original, utility, label='Utility Function', color='purple')
acq_UCB.plot(x_original[np.argmax(utility)], np.max(utility), 's', markersize=10,
label=u'Next Best Guess', markerfacecolor='red', markeredgecolor='k', markeredgewidth=1)
# check batch BO
try:
nSelectedPoints=np.int(bo.NumPoints[-1])
except:
nSelectedPoints=1
max_point=np.max(utility)
acq_UCB.get_xaxis().set_visible(False)
acq_UCB.set_yticklabels([])
acq_UCB.set_xticklabels([])
#acq_UCB.get_yaxis().set_visible(False)
#acq_UCB.plot(bo.X_original[-nSelectedPoints:], max_point.repeat(nSelectedPoints), 'v', markersize=15,
#label=u'Previous Selection', markerfacecolor='green', markeredgecolor='k', markeredgewidth=1)
acq_UCB.set_xlim((np.min(x_original)-0.05, np.max(x_original)+0.05))
acq_UCB.set_ylim((np.min(utility), 1.2*np.max(utility)))
acq_UCB.set_ylabel('UCB', fontdict={'size':16})
acq_UCB.set_xlabel('x', fontdict={'size':16})
# EI
acq_func={}
acq_func['name']='ei'
acq_func['dim']=1
acq_func['scalebounds']=bo.scalebounds
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(x.reshape((-1, 1)), bo.gp, np.max(bo.Y))
acq_EI.plot(x_original, utility, label='Utility Function', color='purple')
acq_EI.plot(x_original[np.argmax(utility)], np.max(utility), 's', markersize=10,
label=u'Next Best Guess', markerfacecolor='red', markeredgecolor='k', markeredgewidth=1)
max_point=np.max(utility)
acq_EI.get_xaxis().set_visible(False)
#acq_EI.get_yaxis().set_visible(False)
#acq_EI.plot(bo.X_original[-nSelectedPoints:], max_point.repeat(nSelectedPoints), 'v', markersize=15,
#label=u'Previous Selection', markerfacecolor='green', markeredgecolor='k', markeredgewidth=1)
acq_EI.set_yticklabels([])
acq_EI.set_xticklabels([])
acq_EI.set_xlim((np.min(x_original)-0.05, np.max(x_original)+0.05))
acq_EI.set_ylim((np.min(utility), 1.2*np.max(utility)))
acq_EI.set_ylabel('EI', fontdict={'size':16})
#acq_EI.set_xlabel('x', fontdict={'size':16})
#axis.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
#acq_EI.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
xstars=[]
ystars=[]
# TS1
# finding the xt of Thompson Sampling
numXtar=100
for ii in range(numXtar):
mu_acq={}
mu_acq['name']='thompson'
mu_acq['dim']=bo.dim
mu_acq['scalebounds']=bo.scalebounds
acq_mu=AcquisitionFunction(mu_acq)
xt_TS = acq_max(ac=acq_mu.acq_kind,gp=bo.gp,bounds=bo.scalebounds ,opt_toolbox='scipy')
xstars.append(xt_TS)
yt_TS=acq_mu.acq_kind(xt_TS,bo.gp,y_max=np.max(bo.Y))
if yt_TS>mu_max:
ystars.append(yt_TS)
if not ystars:
ystars.append([mu_max])
temp=np.asarray(xstars)
xt_suggestion_original=temp*bo.max_min_gap+bo.bounds[:,0]
# MRS
acq_func={}
acq_func['name']='mrs'
acq_func['dim']=1
acq_func['scalebounds']=bo.scalebounds
acq_func['xstars']=xstars
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(x.reshape((-1, 1)), bo.gp, np.max(bo.Y))
#temp=np.asarray(myacq.object.xstars)
#xt_suggestion_original=temp*bo.max_min_gap+bo.bounds[:,0]
mymean=np.percentile([np.min(utility),np.max(utility)],20)
acq_MRS.plot(x_original, utility, label='Utility Function', color='purple')
acq_MRS.plot(xt_suggestion_original, [mymean]*xt_suggestion_original.shape[0], '*', markersize=12,
label=u'Next Best Guess', markerfacecolor='yellow', markeredgecolor='k', markeredgewidth=1)
acq_MRS.plot(x_original[np.argmax(utility)], np.max(utility), 's', markersize=10,
label=u'Next Best Guess', markerfacecolor='red', markeredgecolor='k', markeredgewidth=1)
#acq_MRS.plot(xt_suggestion_original, [np.max(utility)]*xt_suggestion_original.shape[0], 's', markersize=15,
#label=u'Next Best Guess', markerfacecolor='yellow', markeredgecolor='k', markeredgewidth=1)
max_point=np.max(utility)
acq_MRS.get_xaxis().set_visible(False)
acq_MRS.set_yticklabels([])
acq_MRS.set_xticklabels([])
#acq_MRS.get_yaxis().set_visible(False)
#acq_MRS.plot(bo.X_original[-nSelectedPoints:], max_point.repeat(nSelectedPoints), 'v', markersize=15,
#label=u'Previous Selection', markerfacecolor='green', markeredgecolor='k', markeredgewidth=1)
acq_MRS.set_xlim((np.min(x_original)-0.05, np.max(x_original)+0.05))
acq_MRS.set_ylim((np.min(utility), 1.2*np.max(utility)))
acq_MRS.set_ylabel('MRS', fontdict={'size':16})
#acq_MRS.set_xlabel('x', fontdict={'size':16})
# MES
acq_func={}
acq_func['name']='mes'
acq_func['dim']=1
acq_func['scalebounds']=bo.scalebounds
acq_func['xstars']=xstars
acq_func['ystars']=ystars
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(x.reshape((-1, 1)), bo.gp, np.max(bo.Y))
#temp=np.asarray(myacq.object.xstars)
#xt_suggestion_original=temp*bo.max_min_gap+bo.bounds[:,0]
acq_MES.plot(x_original, utility, label='Utility Function', color='purple')
acq_MES.plot(x_original[np.argmax(utility)], np.max(utility), 's', markersize=10,
label=u'Next Best Guess', markerfacecolor='red', markeredgecolor='k', markeredgewidth=1)
#acq_MRS.plot(xt_suggestion_original, [np.max(utility)]*xt_suggestion_original.shape[0], 's', markersize=15,
#label=u'Next Best Guess', markerfacecolor='yellow', markeredgecolor='k', markeredgewidth=1)
max_point=np.max(utility)
acq_MES.get_xaxis().set_visible(False)
acq_MES.set_yticklabels([])
acq_MES.set_xticklabels([])
#acq_MES.get_yaxis().set_visible(False)
#acq_MRS.plot(bo.X_original[-nSelectedPoints:], max_point.repeat(nSelectedPoints), 'v', markersize=15,
#label=u'Previous Selection', markerfacecolor='green', markeredgecolor='k', markeredgewidth=1)
acq_MES.set_xlim((np.min(x_original)-0.05, np.max(x_original)+0.05))
acq_MES.set_ylim((np.min(utility), 1.2*np.max(utility)))
acq_MES.set_ylabel('MES', fontdict={'size':16})
#acq_MES.set_xlabel('x', fontdict={'size':16})
# PES
acq_func={}
acq_func['name']='pes'
acq_func['dim']=1
acq_func['scalebounds']=bo.scalebounds
acq_func['xstars']=xstars
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(x.reshape((-1, 1)), bo.gp, np.max(bo.Y))
#temp=np.asarray(myacq.object.xstars)
#xt_suggestion_original=temp*bo.max_min_gap+bo.bounds[:,0]
mymean=np.percentile([np.min(utility),np.max(utility)],20)
acq_PES.plot(x_original, utility, label='Utility Function', color='purple')
acq_PES.plot(xt_suggestion_original, [mymean]*xt_suggestion_original.shape[0], '*', markersize=12,
label=u'Next Best Guess', markerfacecolor='yellow', markeredgecolor='k', markeredgewidth=1)
acq_PES.plot(x_original[np.argmax(utility)], np.max(utility), 's', markersize=10,
label=u'Selected point $x_t$', markerfacecolor='red', markeredgecolor='k', markeredgewidth=1)
max_point=np.max(utility)
acq_PES.get_xaxis().set_visible(False)
acq_PES.set_yticklabels([])
acq_PES.set_xticklabels([])
#acq_PES.plot(bo.X_original[-nSelectedPoints:], max_point.repeat(nSelectedPoints), 'v', markersize=15,
#label=u'Previous Selection', markerfacecolor='green', markeredgecolor='k', markeredgewidth=1)
acq_PES.set_xlim((np.min(x_original)-0.05, np.max(x_original)+0.05))
acq_PES.set_ylim((np.min(utility), 1.2*np.max(utility)))
acq_PES.set_ylabel('PES', fontdict={'size':16})
acq_PES.set_xlabel('x', fontdict={'size':16})
#acq_PES.get_yaxis().set_visible(False)
### VRS
acq_func={}
acq_func['name']='vrs_of_ts'
acq_func['dim']=1
acq_func['scalebounds']=bo.scalebounds
acq_func['xstars']=xstars
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(x.reshape((-1, 1)), bo.gp, np.max(bo.Y))
#mytest=np.vstack((x.reshape(-1,1),bo.gp.X))
#utility_existing_X = myacq.acq_kind(mytest, bo.gp, np.max(bo.Y))
#utility=0-utility
temp=np.asarray(myacq.object.xstars)
xt_suggestion_original=temp*bo.max_min_gap+bo.bounds[:,0]
acq_Consensus.plot(x_original, utility, label=r'$\alpha(x)$', color='purple')
#acq_Consensus.plot(x_original, [np.asscalar(myacq.object.average_predvar)]*len(x_original), label=r'threshold', color='black')
#print np.asscalar(myacq.object.average_predvar)
#print np.min(utility)
mymean=np.percentile([np.min(utility),np.max(utility)],20)
acq_Consensus.plot(xt_suggestion_original, [mymean]*xt_suggestion_original.shape[0], '*', markersize=12,
label='$x^*$ samples', markerfacecolor='yellow', markeredgecolor='k', markeredgewidth=1)
max_point=np.max(utility)
acq_Consensus.plot(x_original[np.argmax(utility)], np.max(utility), 's', markersize=10,
label=u'Selected point $x_t$', markerfacecolor='red', markeredgecolor='k', markeredgewidth=1)
#acq_Consensus.get_yaxis().set_visible(False)
#acq_TS.plot(bo.X_original[-nSelectedPoints:], max_point.repeat(nSelectedPoints), 'v', markersize=15,
#label=u'Previous Selection', markerfacecolor='green', markeredgecolor='k', markeredgewidth=1)
acq_Consensus.set_xlim((np.min(x_original)-0.05, np.max(x_original)+0.05))
acq_Consensus.set_ylim((np.min(utility), 1.2*np.max(utility)))
acq_Consensus.set_yticklabels([])
#acq_TS.set_ylim((np.min(utility)*0.9, np.max(utility)*1.1))
acq_Consensus.set_ylabel('PVRS', fontdict={'size':16})
acq_Consensus.set_xlabel('x', fontdict={'size':16})
acq_Consensus.legend(loc='center left', bbox_to_anchor=(0.01, -1.1),prop={'size':16},ncol=3)
#acq_ES.get_xaxis().set_visible(False)
# ES
acq_func={}
acq_func['name']='es'
acq_func['dim']=1
acq_func['scalebounds']=bo.scalebounds
acq_func['xstars']=xstars
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(x.reshape((-1, 1)), bo.gp, np.max(bo.Y))
#temp=np.asarray(myacq.object.xstars)
#xt_suggestion_original=temp*bo.max_min_gap+bo.bounds[:,0]
mymean=np.percentile([np.min(utility),np.max(utility)],20)
acq_ES.plot(x_original, utility, label='Utility Function', color='purple')
acq_ES.plot(xt_suggestion_original, [mymean]*xt_suggestion_original.shape[0], '*', markersize=12,
label=u'Next Best Guess', markerfacecolor='yellow', markeredgecolor='k', markeredgewidth=1)
acq_ES.plot(x_original[np.argmax(utility)], np.max(utility), 's', markersize=10,
label=u'Next Best Guess', markerfacecolor='red', markeredgecolor='k', markeredgewidth=1)
#max_point=np.max(utility)
#acq_ES.plot(bo.X_original[-nSelectedPoints:], max_point.repeat(nSelectedPoints), 'v', markersize=15,
#label=u'Previous Selection', markerfacecolor='green', markeredgecolor='k', markeredgewidth=1)
#acq_ES.get_yaxis().set_visible(False)
acq_ES.get_xaxis().set_visible(False)
acq_ES.set_yticklabels([])
acq_ES.set_xticklabels([])
acq_ES.set_xlim((np.min(x_original)-0.05, np.max(x_original)+0.05))
acq_ES.set_ylim((np.min(utility), 1.2*np.max(utility)))
acq_ES.set_ylabel('ES', fontdict={'size':16})
#acq_ES.set_xlabel('x', fontdict={'size':16})
strFileName="{:d}_GP_acquisition_functions_vrs.eps".format(counter)
fig.savefig(strFileName, bbox_inches='tight')
#axis.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
#acq_TS.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
def plot_bo_1d(bo):
func=bo.f
#x_original = np.linspace(bo.bounds[0,0], bo.bounds[0,1], 100)
x = np.linspace(bo.scalebounds[0,0], bo.scalebounds[0,1], 1000)
x_original=x*bo.max_min_gap+bo.bounds[:,0]
y_original = func(x_original)
#y = func(x)
#y_original=mu*np.std(bo.Y_original)+np.mean(bo.Y_original)
fig=plt.figure(figsize=(8, 5))
fig.suptitle('Gaussian Process and Utility Function After {} Points'.format(len(bo.X)), fontdict={'size':18})
gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1])
axis = plt.subplot(gs[0])
acq = plt.subplot(gs[1])
mu, sigma = bo.posterior(x)
#mu_original=mu*(np.max(y_original)-np.min(y_original))+np.mean(y_original)
mu_original=mu*np.std(bo.Y_original)+np.mean(bo.Y_original)
sigma_original=sigma*np.std(bo.Y_original)+np.mean(bo.Y_original)**2
axis.plot(x_original, y_original, linewidth=3, label='Real Function')
axis.plot(bo.X_original.flatten(), bo.Y_original, 'D', markersize=8, label=u'Observations', color='r')
axis.plot(x_original, mu_original, '--', color='k', label='GP mean')
#samples*bo.max_min_gap+bo.bounds[:,0]
temp_xaxis=np.concatenate([x_original, x_original[::-1]])
#temp_xaxis=temp*bo.max_min_gap+bo.bounds[:,0]
temp_yaxis_original=np.concatenate([mu_original - 1.9600 * sigma_original, (mu_original + 1.9600 * sigma_original)[::-1]])
temp_yaxis=np.concatenate([mu - 1.9600 * sigma, (mu + 1.9600 * sigma)[::-1]])
temp_yaxis_original2=temp_yaxis*np.std(bo.Y_original)+np.mean(bo.Y_original)
axis.fill(temp_xaxis, temp_yaxis_original2,alpha=.6, fc='c', ec='None', label='95% CI')
axis.set_xlim((np.min(x_original), np.max(x_original)))
#axis.set_ylim((None, None))
axis.set_ylabel('f(x)', fontdict={'size':16})
axis.set_xlabel('x', fontdict={'size':16})
utility = bo.acq_func.acq_kind(x.reshape((-1, 1)), bo.gp, np.max(bo.Y))
acq.plot(x_original, utility, label='Utility Function', color='purple')
acq.plot(x_original[np.argmax(utility)], np.max(utility), '*', markersize=15,
label=u'Next Best Guess', markerfacecolor='gold', markeredgecolor='k', markeredgewidth=1)
# check batch BO
try:
nSelectedPoints=np.int(bo.NumPoints[-1])
except:
nSelectedPoints=1
max_point=np.max(utility)
acq.plot(bo.X_original[-nSelectedPoints:], max_point.repeat(nSelectedPoints), 'v', markersize=15,
label=u'Previous Selection', markerfacecolor='green', markeredgecolor='k', markeredgewidth=1)
acq.set_xlim((np.min(x_original), np.max(x_original)))
#acq.set_ylim((0, np.max(utility) + 0.5))
#acq.set_ylim((np.min(utility), 1.1*np.max(utility)))
acq.set_ylabel('Acq', fontdict={'size':16})
acq.set_xlabel('x', fontdict={'size':16})
axis.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
acq.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
def plot_bo_1d_variance(bo):
global counter
counter=counter+1
func=bo.f
#x_original = np.linspace(bo.bounds[0,0], bo.bounds[0,1], 100)
x = np.linspace(bo.scalebounds[0,0], bo.scalebounds[0,1], 1000)
x_original=x*bo.max_min_gap+bo.bounds[:,0]
y_original = func(x_original)
#y = func(x)
#y_original=mu*np.std(bo.Y_original)+np.mean(bo.Y_original)
#fig=plt.figure(figsize=(8, 5))
fig, ax1 = plt.subplots(figsize=(8.5, 4))
mu, sigma = bo.posterior(x)
mu_original=mu*np.std(bo.Y_original)+np.mean(bo.Y_original)
sigma_original=sigma*(np.max(bo.Y_original)-np.min(bo.Y_original))
utility = bo.acq_func.acq_kind(x.reshape((-1, 1)), bo.gp, np.max(bo.Y))
def distance_function(x,X):
Euc_dist=euclidean_distances(x,X)
dist=Euc_dist.min(axis=1)
return dist
utility_distance=distance_function(x.reshape((-1, 1)),bo.X)
idxMaxVar=np.argmax(utility)
#idxMaxVar=[idx for idx,val in enumerate(utility) if val>=0.995]
ax1.plot(x_original, utility, label='GP $\sigma(x)$', color='purple')
ax1.scatter(x_original[idxMaxVar], utility[idxMaxVar], marker='s',label='x=argmax $\sigma(x)$', color='blue',linewidth=2)
#ax1.scatter(x_original[idxMaxVar], utility[idxMaxVar], label='$||x-[x]||$', color='blue',linewidth=2)
ax1.plot(bo.X_original.flatten(), [0]*len(bo.X_original.flatten()), 'D', markersize=10, label=u'Observations', color='r')
idxMaxDE=np.argmax(utility_distance)
ax2 = ax1.twinx()
ax2.plot(x_original, utility_distance, label='$d(x)=||x-[x]||^2$', color='black')
ax2.plot(x_original[idxMaxDE], utility_distance[idxMaxDE], 'o',label='x=argmax d(x)', color='black',markersize=10)
ax2.set_ylim((0, 0.45))
ax1.set_xlim((np.min(x_original)-0.01, 0.01+np.max(x_original)))
ax1.set_ylim((-0.02, np.max(utility) + 0.05))
#acq.set_ylim((np.min(utility), 1.1*np.max(utility)))
ax1.set_ylabel(r'$\sigma(x)$', fontdict={'size':18})
ax2.set_ylabel('d(x)', fontdict={'size':18})
ax1.set_xlabel('x', fontdict={'size':18})
#axis.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
#ax1.legend(loc=2, bbox_to_anchor=(1.1, 1), borderaxespad=0.,fontsize=14)
#ax2.legend(loc=2, bbox_to_anchor=(1.1, 0.3), borderaxespad=0.,fontsize=14)
plt.title('Exploration by GP variance vs distance',fontsize=22)
ax1.legend(loc=3, bbox_to_anchor=(0.05,-0.32,1, -0.32), borderaxespad=0.,fontsize=14,ncol=4)
ax2.legend(loc=3, bbox_to_anchor=(0.05,-0.46,1, -0.46), borderaxespad=0.,fontsize=14,ncol=2)
#plt.legend(fontsize=14)
strFolder="P:\\03.Research\\05.BayesianOptimization\\PradaBayesianOptimization\\demo_geometric"
strFileName="{:d}_var_DE.eps".format(counter)
strPath=os.path.join(strFolder,strFileName)
fig.savefig(strPath, bbox_inches='tight')
def plot_acq_bo_2d_vrs(bo):
global counter
counter=counter+1
func=bo.f
#x_original = np.linspace(bo.bounds[0,0], bo.bounds[0,1], 100)
x1 = np.linspace(bo.scalebounds[0,0], bo.scalebounds[0,1], 50)
x2 = np.linspace(bo.scalebounds[1,0], bo.scalebounds[1,1], 50)
x1g,x2g=np.meshgrid(x1,x2)
X=np.c_[x1g.flatten(), x2g.flatten()]
x1_ori = np.linspace(bo.bounds[0,0], bo.bounds[0,1], 50)
x2_ori = np.linspace(bo.bounds[1,0], bo.bounds[1,1], 50)
x1g_ori,x2g_ori=np.meshgrid(x1_ori,x2_ori)
X_ori=np.c_[x1g_ori.flatten(), x2g_ori.flatten()]
fig=plt.figure(figsize=(14, 20))
#fig.suptitle('Gaussian Process and Utility Function After {} Points'.format(len(bo.X)), fontdict={'size':18})
#gs = gridspec.GridSpec(7, 1, height_ratios=[1,1,1,1,1,1,1])
nRows=6
axis_mean2d = fig.add_subplot(nRows, 2, 1)
axis_variance2d = fig.add_subplot(nRows, 2, 2)
acq_UCB = fig.add_subplot(nRows, 2, 3)
#acq_EI =fig.add_subplot(nRows, 2,4)
#acq_POI = plt.subplot(gs[3])
acq_ES = fig.add_subplot(nRows, 2, 4)
acq_PES = fig.add_subplot(nRows, 2, 5)
acq_MRS = fig.add_subplot(nRows, 2, 6)
#acq_ydist = fig.add_subplot(nRows, 2, 8)
acq_VRS = fig.add_subplot(nRows, 2, 7)
acq_Batch_VRS_B_2 = fig.add_subplot(nRows, 2, 8)
acq_Batch_VRS_B_3 = fig.add_subplot(nRows, 2, 9)
acq_Batch_VRS_B_4 = fig.add_subplot(nRows, 2, 10)
mu, sigma = bo.posterior(X)
#mu_original=mu*(np.max(y_original)-np.min(y_original))+np.mean(y_original)
#mu_original=mu*np.std(bo.Y_original)+np.mean(bo.Y_original)
#sigma_original=sigma*np.std(bo.Y_original)+np.mean(bo.Y_original)**2
# get maximum of mu function
mu_max=mu.max()
# mean
CS=axis_mean2d.contourf(x1g_ori,x2g_ori,mu.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2 = plt.contour(CS, levels=CS.levels[::2],colors='r',origin='lower',hold='on')
axis_mean2d.scatter(bo.X_original[:,0],bo.X_original[:,1], label=u'Observations', color='g')
axis_mean2d.set_title('Gaussian Process Mean $\mu(x)$',fontsize=16)
axis_mean2d.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
axis_mean2d.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
fig.colorbar(CS, ax=axis_mean2d, shrink=0.9)
axis_mean2d.get_xaxis().set_visible(False)
axis_mean2d.get_yaxis().set_visible(False)
# variance
CS=axis_variance2d.contourf(x1g_ori,x2g_ori,sigma.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2 = plt.contour(CS, levels=CS.levels[::2],colors='r',origin='lower',hold='on')
axis_variance2d.scatter(bo.X_original[:,0],bo.X_original[:,1], label=u'Observations', color='g')
axis_variance2d.set_title('Gaussian Process Variance $\sigma(x)$',fontsize=16)
axis_variance2d.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
axis_variance2d.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
fig.colorbar(CS, ax=axis_variance2d, shrink=0.9)
axis_variance2d.get_xaxis().set_visible(False)
axis_variance2d.get_yaxis().set_visible(False)
# UCB
acq_func={}
acq_func['name']='ucb'
acq_func['kappa']=2
acq_func['dim']=2
acq_func['scalebounds']=bo.scalebounds
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(X, bo.gp)
CS_acq=acq_UCB.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
idxBest=np.argmax(utility)
acq_UCB.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Data')
acq_UCB.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='s',color='r',s=100,label='Peak')
#acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],color='r',s=30,label='Previous Selection')
#acq_UCB.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Selected')
xt_UCB=X[idxBest,:]
acq_UCB.set_title('UCB',fontsize=16)
acq_UCB.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
acq_UCB.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
fig.colorbar(CS_acq, ax=acq_UCB, shrink=0.9)
acq_UCB.get_xaxis().set_visible(False)
acq_UCB.get_yaxis().set_visible(False)
"""
# EI
acq_func={}
acq_func['name']='ei'
acq_func['kappa']=2
acq_func['dim']=2
acq_func['scalebounds']=bo.scalebounds
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(X, bo.gp)
CS_acq=acq_EI.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
idxBest=np.argmax(utility)
acq_EI.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Data')
acq_EI.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='*',color='r',s=300,label='Peak')
#acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],color='r',s=30,label='Previous Selection')
#acq_EI.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Selected')
xt_EI=X[idxBest,:]
acq_EI.set_title('EI',fontsize=16)
acq_EI.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
acq_EI.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
fig.colorbar(CS_acq, ax=acq_EI, shrink=0.9)
"""
# ==================================================================================
# finding the xt of Thompson Sampling then use for PES, ES and VRS
y_max=np.max(bo.Y)
xstars=[]
y_stars=[]
xstars_VRS=[]
numXtar=25*bo.dim
for ii in range(numXtar):
mu_acq={}
mu_acq['name']='thompson'
mu_acq['dim']=bo.dim
mu_acq['scalebounds']=bo.scalebounds
acq_mu=AcquisitionFunction(mu_acq)
xt_TS = acq_max(ac=acq_mu.acq_kind,gp=bo.gp,bounds=bo.scalebounds ,opt_toolbox='scipy')
y_xt_TS=acq_mu.acq_kind(xt_TS,bo.gp)
#if y_xt_TS>mu_max:
y_stars.append(y_xt_TS)
xstars.append(xt_TS)
#if y_xt_TS>=y_max:
xstars_VRS.append(xt_TS)
# MRS
acq_func={}
acq_func['name']='mes'
acq_func['kappa']=2
acq_func['dim']=2
acq_func['scalebounds']=bo.scalebounds
acq_func['ystars']=y_stars
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(X, bo.gp)
CS_acq=acq_MRS.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
idxBest=np.argmax(utility)
acq_MRS.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Data')
#acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],color='r',s=30,label='Previous Selection')
#acq_MRS.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Selected')
xt_suggestion_original=xstars*bo.max_min_gap+bo.bounds[:,0]
acq_MRS.scatter(xt_suggestion_original[:,0],xt_suggestion_original[:,1],marker='*',color='y',s=150,label='xstars')
acq_MRS.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='s',color='r',s=100,label='Peak')
acq_MRS.set_title('MES',fontsize=16)
acq_MRS.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
acq_MRS.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
fig.colorbar(CS_acq, ax=acq_MRS, shrink=0.9)
acq_MRS.get_xaxis().set_visible(False)
acq_MRS.get_yaxis().set_visible(False)
"""
# plot distribution of y_star
mu_ydist, std_ydist = norm_dist.fit(y_stars)
# Plot the histogram.
acq_ydist.hist(y_stars,bins=20,normed=True,alpha=.6,color='g',label=ur'Histogram of $y^*$')
# Plot the PDF.
x = np.linspace(np.min(y_stars), np.max(y_stars), 100)
p = norm_dist.pdf(x, mu_ydist, std_ydist)
acq_ydist.plot(x,p,'k', linewidth=2,label='Gaussian curve')
acq_ydist.legend()
acq_ydist.set_title(ur'Distribution of $y^*$',fontsize=16)
"""
# PES
acq_func={}
acq_func['name']='pes'
acq_func['kappa']=2
acq_func['dim']=2
acq_func['scalebounds']=bo.scalebounds
#acq_func['xstars']=xstars
acq_func['xstars']=xstars_VRS
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(X, bo.gp)
CS_acq=acq_PES.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
idxBest=np.argmax(utility)
acq_PES.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Data')
#acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],color='r',s=30,label='Previous Selection')
#acq_PES.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Selected')
temp=np.asarray(myacq.object.x_stars)
temp=temp.reshape(-1,2)
# convert from scale data points to original data points
xt_suggestion_original=temp*bo.max_min_gap+bo.bounds[:,0]
acq_PES.scatter(xt_suggestion_original[:,0],xt_suggestion_original[:,1],marker='*',color='y',s=150,label='xstars')
acq_PES.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='s',color='r',s=100,label='Peak')
xt_PES=X[idxBest,:]
acq_PES.set_title('PES',fontsize=16)
acq_PES.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
acq_PES.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
fig.colorbar(CS_acq, ax=acq_PES, shrink=0.9)
acq_PES.get_xaxis().set_visible(False)
acq_PES.get_yaxis().set_visible(False)
# ES
acq_func={}
acq_func['name']='es'
acq_func['kappa']=2
acq_func['dim']=2
acq_func['scalebounds']=bo.scalebounds
acq_func['xstars']=xstars
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(X, bo.gp)
CS_acq=acq_ES.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
idxBest=np.argmax(utility)
acq_ES.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Data')
#acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],color='r',s=30,label='Previous Selection')
#acq_ES.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Selected')
temp=np.asarray(myacq.object.x_stars)
#temp=temp.reshape(-1,2)
# convert from scale data points to original data points
xt_suggestion_original=temp*bo.max_min_gap+bo.bounds[:,0]
acq_ES.scatter(xt_suggestion_original[:,0],xt_suggestion_original[:,1],marker='*',color='y',s=150,label='xstars')
acq_ES.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='s',color='r',s=100,label='Peak')
#xt_ES=X[idxBest,:]
acq_ES.set_title('ES',fontsize=16)
acq_ES.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
acq_ES.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
fig.colorbar(CS_acq, ax=acq_ES, shrink=0.9)
acq_ES.get_xaxis().set_visible(False)
acq_ES.get_yaxis().set_visible(False)
#xstars.append(xt_UCB)
#xstars.append(xt_EI)
#xstars.append(xt_ES)
#xstars.append(xt_PES)
# Variance Reduction Search
acq_func={}
acq_func['name']='pvrs'
acq_func['kappa']=2
acq_func['n_xstars_x_dim']=50
acq_func['dim']=2
acq_func['scalebounds']=bo.scalebounds
acq_func['xstars']=xstars_VRS
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(X, bo.gp)
CS_acq=acq_VRS.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
idxBest=np.argmax(utility)
acq_VRS.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Data')
temp=np.asarray(myacq.object.xstars)
# convert from scale data points to original data points
xt_suggestion_original=temp*bo.max_min_gap+bo.bounds[:,0]
acq_VRS.scatter(xt_suggestion_original[:,0],xt_suggestion_original[:,1],marker='*',color='y',s=150,label='xstars')
acq_VRS.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='s',color='r',s=100,label='Peak')
#acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],color='r',s=30,label='Previous Selection')
#acq_ES.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Selected')
acq_VRS.set_title('PVRS',fontsize=16)
acq_VRS.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
acq_VRS.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
fig.colorbar(CS_acq, ax=acq_VRS, shrink=0.9)
acq_VRS.get_xaxis().set_visible(False)
acq_VRS.get_yaxis().set_visible(False)
# Batch Variance Reduction Search B=2
acq_Batch_VRS_B_2.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Data')
temp=np.asarray(myacq.object.xstars)
# convert from scale data points to original data points
xt_suggestion_original=temp*bo.max_min_gap+bo.bounds[:,0]
acq_Batch_VRS_B_2.scatter(xt_suggestion_original[:,0],xt_suggestion_original[:,1],marker='*',color='y',s=150,label='xstars')
#acq_VRS.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='*',color='r',s=300,label='Peak')
acq_func={}
acq_func['name']='ucb'
acq_func['kappa']=2
acq_func['dim']=2
acq_func['xstars']=xstars_VRS
acq_params={}
acq_params['acq_func']=acq_func
acq_params['optimize_gp']=1
acq_params['n_xstars']=100
func_params={}
func_params['bounds']=bo.bounds
func_params['f']=func
gp_params = {'lengthscale':0.1*2,'noise_delta':0.00000001}
bo2=BatchPVRS(gp_params,func_params, acq_params)
bo2.init_with_data(bo.X_original,bo.Y_original)
#new_X=bo2.maximize_batch_sequential_greedy_PVRS(gp_params,B=3)
new_X,temp=bo2.maximize_batch_PVRS_iterative_greedy(gp_params,B=2)
new_X_original=new_X*bo.max_min_gap+bo.bounds[:,0]
acq_Batch_VRS_B_2.scatter(new_X_original[:,0],new_X_original[:,1],marker='s',color='r',s=100,label='Selected')
#acq_ES.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Selected')
acq_Batch_VRS_B_2.set_title('Batch PVRS B=2',fontsize=16)
acq_Batch_VRS_B_2.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
acq_Batch_VRS_B_2.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
fig.colorbar(CS_acq, ax=acq_Batch_VRS_B_2, shrink=0.9)
acq_Batch_VRS_B_2.get_xaxis().set_visible(False)
acq_Batch_VRS_B_2.get_yaxis().set_visible(False)
# Batch Variance Reduction Search B=3
acq_Batch_VRS_B_3.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Existing data X')
temp=np.asarray(myacq.object.xstars)
# convert from scale data points to original data points
xt_suggestion_original=temp*bo.max_min_gap+bo.bounds[:,0]
acq_Batch_VRS_B_3.scatter(xt_suggestion_original[:,0],xt_suggestion_original[:,1],marker='*',color='y',s=150,label=r'$x^*$ samples')
#acq_VRS.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='*',color='r',s=300,label='Peak')
acq_func={}
acq_func['name']='ucb'
acq_func['kappa']=2
acq_func['dim']=2
acq_func['xstars']=xstars_VRS
acq_params={}
acq_params['acq_func']=acq_func
acq_params['optimize_gp']=1
acq_params['n_xstars']=100
func_params={}
func_params['bounds']=bo.bounds
func_params['f']=func
gp_params = {'lengthscale':0.1*2,'noise_delta':0.00000001}
bo2=BatchPVRS(gp_params,func_params, acq_params)
bo2.init_with_data(bo.X_original,bo.Y_original)
#new_X=bo2.maximize_batch_sequential_greedy_PVRS(gp_params,B=3)
new_X,temp=bo2.maximize_batch_PVRS_iterative_greedy(gp_params,B=3)
new_X_original=new_X*bo.max_min_gap+bo.bounds[:,0]
acq_Batch_VRS_B_3.scatter(new_X_original[:,0],new_X_original[:,1],marker='s',color='r',s=100,label='Selected point $x_t$')
#acq_ES.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Selected')
acq_Batch_VRS_B_3.set_title('Batch PVRS B=3',fontsize=16)
acq_Batch_VRS_B_3.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
acq_Batch_VRS_B_3.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
fig.colorbar(CS_acq, ax=acq_Batch_VRS_B_3, shrink=0.9)
acq_Batch_VRS_B_3.get_xaxis().set_visible(False)
acq_Batch_VRS_B_3.get_yaxis().set_visible(False)
acq_Batch_VRS_B_3.legend(loc='center left', bbox_to_anchor=(0.01, -0.2),prop={'size':20},ncol=3)
# Batch Variance Reduction Search B=4
acq_Batch_VRS_B_4.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Data')
temp=np.asarray(myacq.object.xstars)
# convert from scale data points to original data points
xt_suggestion_original=temp*bo.max_min_gap+bo.bounds[:,0]
acq_Batch_VRS_B_4.scatter(xt_suggestion_original[:,0],xt_suggestion_original[:,1],marker='*',color='y',s=150,label='xstars')
#acq_VRS.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='*',color='r',s=300,label='Peak')
acq_func={}
acq_func['name']='ucb'
acq_func['kappa']=2
acq_func['dim']=2
acq_func['xstars']=xstars_VRS
acq_params={}
acq_params['acq_func']=acq_func
acq_params['optimize_gp']=1
acq_params['n_xstars']=100
func_params={}
func_params['bounds']=bo.bounds
func_params['f']=func
gp_params = {'lengthscale':0.1*2,'noise_delta':0.00000001}
bo2=BatchPVRS(gp_params,func_params, acq_params)
bo2.init_with_data(bo.X_original,bo.Y_original)
#new_X=bo2.maximize_batch_sequential_greedy_PVRS(gp_params,B=3)
new_X,temp=bo2.maximize_batch_PVRS_iterative_greedy(gp_params,B=4)
new_X_original=new_X*bo.max_min_gap+bo.bounds[:,0]
acq_Batch_VRS_B_4.scatter(new_X_original[:,0],new_X_original[:,1],marker='s',color='r',s=100,label='Selected')
#acq_ES.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Selected')
acq_Batch_VRS_B_4.set_title('Batch PVRS B=4',fontsize=16)
acq_Batch_VRS_B_4.set_xlim(bo.bounds[0,0]-0.1, bo.bounds[0,1]+0.1)
acq_Batch_VRS_B_4.set_ylim(bo.bounds[1,0]-0.1, bo.bounds[1,1]+0.1)
fig.colorbar(CS_acq, ax=acq_Batch_VRS_B_4, shrink=0.9)
acq_Batch_VRS_B_4.get_xaxis().set_visible(False)
acq_Batch_VRS_B_4.get_yaxis().set_visible(False)
strFileName="{:d}_GP2d_acquisition_functions_vrs.eps".format(counter)
fig.savefig(strFileName, bbox_inches='tight')
#axis.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
#acq_TS.legend(loc=2, bbox_to_anchor=(1.01, 1), borderaxespad=0.)
def plot_acq_bo_2d_vrs_3x2(bo):
global counter
counter=counter+1
func=bo.f
#x_original = np.linspace(bo.bounds[0,0], bo.bounds[0,1], 100)
x1 = np.linspace(bo.scalebounds[0,0], bo.scalebounds[0,1], 50)
x2 = np.linspace(bo.scalebounds[1,0], bo.scalebounds[1,1], 50)
x1g,x2g=np.meshgrid(x1,x2)
X=np.c_[x1g.flatten(), x2g.flatten()]
x1_ori = np.linspace(bo.bounds[0,0], bo.bounds[0,1], 50)
x2_ori = np.linspace(bo.bounds[1,0], bo.bounds[1,1], 50)
x1g_ori,x2g_ori=np.meshgrid(x1_ori,x2_ori)
X_ori=np.c_[x1g_ori.flatten(), x2g_ori.flatten()]
fig=plt.figure(figsize=(14, 16))
#fig.suptitle('Gaussian Process and Utility Function After {} Points'.format(len(bo.X)), fontdict={'size':18})
#gs = gridspec.GridSpec(7, 1, height_ratios=[1,1,1,1,1,1,1])
nRows=4
axis_mean2d = fig.add_subplot(nRows, 2, 1)
axis_variance2d = fig.add_subplot(nRows, 2, 2)
acq_UCB = fig.add_subplot(nRows, 2, 3)
acq_ES = fig.add_subplot(nRows, 2, 4)
acq_PES = fig.add_subplot(nRows, 2, 5)
acq_VRS = fig.add_subplot(nRows, 2, 6)
acq_Batch_VRS_B_2 = fig.add_subplot(nRows, 2, 7)
acq_Batch_VRS_B_3 = fig.add_subplot(nRows, 2, 8)
#acq_Batch_VRS_B_4 = fig.add_subplot(nRows, 2, 10)
mu, sigma = bo.posterior(X)
#mu_original=mu*(np.max(y_original)-np.min(y_original))+np.mean(y_original)
#mu_original=mu*np.std(bo.Y_original)+np.mean(bo.Y_original)
#sigma_original=sigma*np.std(bo.Y_original)+np.mean(bo.Y_original)**2
# get maximum of mu function
mu_max=mu.max()
# mean
CS=axis_mean2d.contourf(x1g_ori,x2g_ori,mu.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2 = plt.contour(CS, levels=CS.levels[::2],colors='r',origin='lower',hold='on')
axis_mean2d.scatter(bo.X_original[:,0],bo.X_original[:,1], label=u'Observations', color='g')
axis_mean2d.set_title('Gaussian Process Mean $\mu(x)$',fontsize=16)
axis_mean2d.set_xlim(bo.bounds[0,0]-0.1, bo.bounds[0,1]+0.1)
axis_mean2d.set_ylim(bo.bounds[1,0]-0.1, bo.bounds[1,1]+0.1)
fig.colorbar(CS, ax=axis_mean2d, shrink=0.9)
axis_mean2d.get_xaxis().set_visible(False)
axis_mean2d.get_yaxis().set_visible(False)
# variance
CS=axis_variance2d.contourf(x1g_ori,x2g_ori,sigma.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2 = plt.contour(CS, levels=CS.levels[::2],colors='r',origin='lower',hold='on')
axis_variance2d.scatter(bo.X_original[:,0],bo.X_original[:,1], label=u'Observations', color='g')
axis_variance2d.set_title('Gaussian Process Variance $\sigma(x)$',fontsize=16)
axis_variance2d.set_xlim(bo.bounds[0,0], bo.bounds[0,1])
axis_variance2d.set_ylim(bo.bounds[1,0], bo.bounds[1,1])
fig.colorbar(CS, ax=axis_variance2d, shrink=0.9)
axis_variance2d.get_xaxis().set_visible(False)
axis_variance2d.get_yaxis().set_visible(False)
# UCB
acq_func={}
acq_func['name']='ucb'
acq_func['kappa']=2
acq_func['dim']=2
acq_func['scalebounds']=bo.scalebounds
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(X, bo.gp)
CS_acq=acq_UCB.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
idxBest=np.argmax(utility)
acq_UCB.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Data')
acq_UCB.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='s',color='r',s=100,label='Peak')
#acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],color='r',s=30,label='Previous Selection')
#acq_UCB.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Selected')
xt_UCB=X[idxBest,:]
acq_UCB.set_title('UCB',fontsize=16)
acq_UCB.set_xlim(bo.bounds[0,0]-0.1, bo.bounds[0,1]+0.1)
acq_UCB.set_ylim(bo.bounds[1,0]-0.1, bo.bounds[1,1]+0.1)
fig.colorbar(CS_acq, ax=acq_UCB, shrink=0.9)
acq_UCB.get_xaxis().set_visible(False)
acq_UCB.get_yaxis().set_visible(False)
# ==================================================================================
# finding the xt of Thompson Sampling then use for PES, ES and VRS
y_max=np.max(bo.Y)
xstars=[]
y_stars=[]
xstars_VRS=[]
numXtar=25*bo.dim
for ii in range(numXtar):
mu_acq={}
mu_acq['name']='thompson'
mu_acq['dim']=bo.dim
mu_acq['scalebounds']=bo.scalebounds
acq_mu=AcquisitionFunction(mu_acq)
xt_TS = acq_max(ac=acq_mu.acq_kind,gp=bo.gp,bounds=bo.scalebounds ,opt_toolbox='scipy')
y_xt_TS=acq_mu.acq_kind(xt_TS,bo.gp)
#if y_xt_TS>mu_max:
y_stars.append(y_xt_TS)
xstars.append(xt_TS)
#if y_xt_TS>=y_max:
xstars_VRS.append(xt_TS)
# ES
acq_func={}
acq_func['name']='es'
acq_func['kappa']=2
acq_func['dim']=2
acq_func['scalebounds']=bo.scalebounds
acq_func['xstars']=xstars
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(X, bo.gp)
CS_acq=acq_ES.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
idxBest=np.argmax(utility)
acq_ES.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Data')
#acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],color='r',s=30,label='Previous Selection')
#acq_ES.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Selected')
temp=np.asarray(myacq.object.x_stars)
#temp=temp.reshape(-1,2)
# convert from scale data points to original data points
xt_suggestion_original=temp*bo.max_min_gap+bo.bounds[:,0]
acq_ES.scatter(xt_suggestion_original[:,0],xt_suggestion_original[:,1],marker='*',color='y',s=150,label='xstars')
acq_ES.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='s',color='r',s=100,label='Peak')
#xt_ES=X[idxBest,:]
acq_ES.set_title('ES',fontsize=16)
acq_ES.set_xlim(bo.bounds[0,0]-0.1, bo.bounds[0,1]+0.1)
acq_ES.set_ylim(bo.bounds[1,0]-0.1, bo.bounds[1,1]+0.1)
fig.colorbar(CS_acq, ax=acq_ES, shrink=0.9)
acq_ES.get_xaxis().set_visible(False)
acq_ES.get_yaxis().set_visible(False)
#xstars.append(xt_UCB)
#xstars.append(xt_EI)
#xstars.append(xt_ES)
#xstars.append(xt_PES)
# PES
acq_func={}
acq_func['name']='pes'
acq_func['kappa']=2
acq_func['dim']=2
acq_func['scalebounds']=bo.scalebounds
#acq_func['xstars']=xstars
acq_func['xstars']=xstars_VRS
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(X, bo.gp)
CS_acq=acq_PES.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
idxBest=np.argmax(utility)
acq_PES.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Data')
#acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],color='r',s=30,label='Previous Selection')
#acq_PES.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Selected')
temp=np.asarray(myacq.object.x_stars)
temp=temp.reshape(-1,2)
# convert from scale data points to original data points
xt_suggestion_original=temp*bo.max_min_gap+bo.bounds[:,0]
acq_PES.scatter(xt_suggestion_original[:,0],xt_suggestion_original[:,1],marker='*',color='y',s=150,label='xstars')
acq_PES.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='s',color='r',s=100,label='Peak')
xt_PES=X[idxBest,:]
acq_PES.set_title('PES',fontsize=16)
acq_PES.set_xlim(bo.bounds[0,0]-0.1, bo.bounds[0,1]+0.1)
acq_PES.set_ylim(bo.bounds[1,0]-0.1, bo.bounds[1,1]+0.1)
fig.colorbar(CS_acq, ax=acq_PES, shrink=0.9)
acq_PES.get_xaxis().set_visible(False)
acq_PES.get_yaxis().set_visible(False)
"""
# plot distribution of y_star
mu_ydist, std_ydist = norm_dist.fit(y_stars)
# Plot the histogram.
acq_ydist.hist(y_stars,bins=20,normed=True,alpha=.6,color='g',label=ur'Histogram of $y^*$')
# Plot the PDF.
x = np.linspace(np.min(y_stars), np.max(y_stars), 100)
p = norm_dist.pdf(x, mu_ydist, std_ydist)
acq_ydist.plot(x,p,'k', linewidth=2,label='Gaussian curve')
acq_ydist.legend()
acq_ydist.set_title(ur'Distribution of $y^*$',fontsize=16)
"""
# Variance Reduction Search
acq_func={}
acq_func['name']='vrs'
acq_func['kappa']=2
acq_func['n_xstars_x_dim']=50
acq_func['dim']=2
acq_func['scalebounds']=bo.scalebounds
acq_func['xstars']=xstars_VRS
myacq=AcquisitionFunction(acq_func)
utility = myacq.acq_kind(X, bo.gp)
CS_acq=acq_VRS.contourf(x1g_ori,x2g_ori,utility.reshape(x1g.shape),cmap=my_cmap,origin='lower')
#CS2_acq = plt.contour(CS_acq, levels=CS_acq.levels[::2],colors='r',origin='lower',hold='on')
idxBest=np.argmax(utility)
temp=np.asarray(myacq.object.xstars)
# convert from scale data points to original data points
xt_suggestion_original=temp*bo.max_min_gap+bo.bounds[:,0]
acq_VRS.scatter(xt_suggestion_original[:,0],xt_suggestion_original[:,1],marker='*',color='y',s=150,label='xstars')
acq_VRS.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Data')
acq_VRS.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='s',color='r',s=100,label='Peak')
#acq2d.scatter(bo.X_original[-1,0],bo.X_original[-1,1],color='r',s=30,label='Previous Selection')
#acq_ES.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Selected')
acq_VRS.set_title('PVRS',fontsize=16)
acq_VRS.set_xlim(bo.bounds[0,0]-0.1, bo.bounds[0,1]+0.1)
acq_VRS.set_ylim(bo.bounds[1,0]-0.1, bo.bounds[1,1]+0.1)
fig.colorbar(CS_acq, ax=acq_VRS, shrink=0.9)
acq_VRS.get_xaxis().set_visible(False)
acq_VRS.get_yaxis().set_visible(False)
# Batch Variance Reduction Search B=2
acq_Batch_VRS_B_2.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Existing data X')
temp=np.asarray(myacq.object.xstars)
# convert from scale data points to original data points
xt_suggestion_original=temp*bo.max_min_gap+bo.bounds[:,0]
acq_Batch_VRS_B_2.scatter(xt_suggestion_original[:,0],xt_suggestion_original[:,1],marker='*',color='y',s=150,label=r'$x^*$ samples')
#acq_VRS.scatter(X_ori[idxBest,0],X_ori[idxBest,1],marker='*',color='r',s=300,label='Peak')
acq_func={}
acq_func['name']='ucb'
acq_func['kappa']=2
acq_func['dim']=2
acq_func['xstars']=xstars_VRS
acq_params={}
acq_params['acq_func']=acq_func
acq_params['optimize_gp']=1
acq_params['n_xstars']=100
func_params={}
func_params['bounds']=bo.bounds
func_params['f']=func
gp_params = {'lengthscale':0.1*2,'noise_delta':0.00000001}
bo2=BatchPVRS(gp_params,func_params, acq_params)
bo2.init_with_data(bo.X_original,bo.Y_original)
#new_X=bo2.maximize_batch_sequential_greedy_PVRS(gp_params,B=3)
new_X,temp=bo2.maximize_batch_PVRS_iterative_greedy(gp_params,B=2)
new_X_original=new_X*bo.max_min_gap+bo.bounds[:,0]
acq_Batch_VRS_B_2.scatter(new_X_original[:,0],new_X_original[:,1],marker='s',color='r',s=100,label='Selected point $x_t$')
#acq_ES.scatter(bo.X_original[-1,0],bo.X_original[-1,1],marker='*', color='green',s=100,label='Selected')
acq_Batch_VRS_B_2.set_title('B-PVRS B=2',fontsize=16)
acq_Batch_VRS_B_2.set_xlim(bo.bounds[0,0]-0.1, bo.bounds[0,1]+0.1)
acq_Batch_VRS_B_2.set_ylim(bo.bounds[1,0]-0.1, bo.bounds[1,1]+0.1)
fig.colorbar(CS_acq, ax=acq_Batch_VRS_B_2, shrink=0.9)
acq_Batch_VRS_B_2.get_xaxis().set_visible(False)
acq_Batch_VRS_B_2.get_yaxis().set_visible(False)
# Batch Variance Reduction Search B=3
acq_Batch_VRS_B_3.scatter(bo.X_original[:,0],bo.X_original[:,1],color='g',label='Existing data X')
temp=
|
np.asarray(myacq.object.xstars)
|
numpy.asarray
|
"""
Functions to train RNN using modified version of FORCE (i.e. temporally restricted error kernel).
Functions to test, train RNN using original FORCE, save data, training/test performance plots, evaluate error rate
written in Python 3.8.3
@ Elham
"""
print('train_force is executing\n')
import time
import pickle
import numpy as np
from scipy import sparse
from matplotlib import pyplot as plt
from drawnow import *
from SPM_task import *
def initialize_net(params, dist='Gauss'):
'''
initialize network states and initial params
Args:
params: Dictionary containing all parameters
dist: Distribution for initialization of weights and states -- can be either 'Gauss' or 'Uniform'
'''
net_prs = params['network']
train_prs = params['train']
msc_prs = params['msc']
N = net_prs['N']
rng = np.random.RandomState(msc_prs['seed'])
Pw = np.eye(N)/train_prs['alpha_w'] #inverse correlation matrix
Pd = np.eye(N)/train_prs['alpha_d']
std = 1/np.sqrt(net_prs['pg'] * N)
J = std * sparse.random(N, N, density=net_prs['pg'],
random_state=msc_prs['seed'], data_rvs=rng.randn).toarray() #connectivity matrix
if dist == 'Gauss':
x = 0.1 * rng.randn(N, 1)
wf = (1. * rng.randn(N, net_prs['d_output'])) / net_prs['fb_var']
wi = (1. * rng.randn(N, net_prs['d_input'])) / net_prs['input_var']
wfd = (1. * rng.randn(N, net_prs['d_input'])) / net_prs['fb_var']
elif dist == 'Uniform':
print('initialization is uniform')
x = 0.1 * (2 * rng.rand(N, 1) -1)
wf = (2 * rng.rand(N, net_prs['d_output']) - 1) / net_prs['fb_var']
wi = (2 * rng.rand(N, net_prs['d_input']) - 1) / net_prs['input_var']
wfd = (2 * rng.rand(N, net_prs['d_input']) - 1) / net_prs['fb_var']
wo = np.zeros([N, net_prs['d_output']])
#wfd = wfd * 0.
wd = np.zeros([N, net_prs['d_input']])
return Pw, Pd, J, x, wf, wo, wfd, wd, wi
def zero_fat_mats(params, t_trial, is_train=True):
'''
initialize zero matrix
'''
if is_train:
total_size = params['n_train'] + params['n_train_ext']
elif not is_train:
total_size = params['n_test']
total_steps = int(total_size * t_trial / params['dt'])
z_mat = np.zeros(total_steps)
zd_mat = np.zeros([params['d_input'], total_steps])
x_mat = np.zeros([params['N'], total_steps])
r_mat = np.zeros([params['N'], total_steps])
wo_dot = np.zeros([total_steps, params['d_output']])
wd_dot = np.zeros([total_steps, params['d_input']])
return z_mat, zd_mat, x_mat, r_mat, wo_dot, wd_dot
def train(network, task_prs, exp_mat, target_mat, dummy_mat, input_digits, dist='Gauss'):
"""
Main function to implement training using modified FORCE algorithm
exp_mat: sequence of trials (d_input X trial_len*trial_size)
target_mat: target signal for training network output zo (d_output X trial_len*trial_size)
dummy_mat: target signal for training dummy outputs (memory encoding) zd (d_input X trial_len*trial_size)
input_digits: digits in trials
Return: x (final state) and params (updated with trained weights)
"""
tic = time.time()
params = network.params
train_steps = int((params['n_train'] + params['n_train_ext'])* task_prs['t_trial'] / params['dt'])
time_steps = np.arange(0, train_steps, 1)
# initialization
wo, wd = params['wo'], params['wd']
x = network.x
r = np.tanh(x)
z = np.matmul(wo.T, r)
zd = np.matmul(wd.T, r)
z_mat, zd_mat, x_mat, r_mat, wo_dot, wd_dot = zero_fat_mats(network.params, task_prs['t_trial'], is_train=True)
# start training
for i in range(train_steps):
z_mat[i] = z
zd_mat[:, i] = zd.reshape(-1)
x_mat[:, i] = x.reshape(-1)
r_mat[:, i] = r.reshape(-1)
z, zd = network.memory_trial(exp_mat[:, i])
wo_dot[i], wd_dot[i,:] = network.update_weights(i, dummy_mat[:,i], target_mat[:,i])
toc = time.time()
#print('\n', 'train time = ' , (toc-tic)/60)
#print('read out norm = ', np.linalg.norm(network.params['wo']))
#print('dummy norm = ', np.linalg.norm(network.params['wd'], axis=0, keepdims=True))
task_prs['counter'] = i
return network, task_prs
def test(network, task_prs, exp_mat, target_mat, dummy_mat, input_digits):
"""
Function to visualize if network has learned the task and get trial conclusion as initial condition of next trial
x_train: final state of network at training phase
Return: initial state and activity
"""
params = network.params
wo, wd = params['wo'], params['wd']
wf, wfd, wi = params['wf'], params['wfd'], params['wi']
JT, J = params['JT'], params['J']
dt, tau, g = params['dt'], params['tau'], params['g']
test_steps = int(params['n_test'] * task_prs['t_trial'] / dt)
time_steps = np.arange(0, test_steps, 1)
counter = task_prs['counter']
exp_mat = exp_mat[:, counter+1:]
target_mat = target_mat[:, counter+1:]
dummy_mat = dummy_mat[:, counter+1:]
test_digits = input_digits[params['n_train']+ params['n_train_ext']:]
output = task_prs['output_encoding']
correct = 0
i00, i01, i10, i11 = 0, 0, 0, 0
i02, i20, i22, i12, i21 = 0, 0, 0, 0, 0
x = network.x
r = np.tanh(x)
z = np.matmul(wo.T, r)
zd =
|
np.matmul(wd.T, r)
|
numpy.matmul
|
from __future__ import print_function
import numpy as np
from scipy.spatial.distance import cdist
def is_iterable(obj):
try:
some_object_iterator = iter(obj)
return True
except TypeError as te:
return False
class fisheye():
"""A class for fisheye transformations
Parameters
----------
Attributes
----------
"""
def __init__(self,R,mode='default',d=4,xw=0.25,focus=None):
assert(d > 0)
assert(xw >= 0.0 and xw<=1.0)
assert(mode in ('Sarkar', 'default', 'root'))
self.R = R
self.d = d
self.set_focus(focus)
if mode == 'Sarkar':
self.xw = 0
else:
self.xw = xw
if mode == 'root':
assert(d > 1)
self.mode = mode
self.old_xw = None
self._compute_parameters()
def _compute_parameters(self):
d = self.d
xw = self.xw
if self.mode in ('default', 'Sarkar'):
self.f2 = lambda x: (x+self.d*x) / (self.d*x + self.A2)
self.f2_inverse = lambda x: self.A2 * x / (self.d * (1-x) + 1)
elif self.mode == 'root':
self.f2 = lambda x: (self.d/self.A2*x)**(1./self.d)
self.f2_inverse = lambda x: self.A2 / self.d * x**self.d
self.f1 = lambda x: 1 - (-1./self.A1 + np.sqrt(1/self.A1**2 + 2*(1-x)/self.A1))
self.f1_inverse = lambda x: x - self.A1/2 * (1-x)**2
if xw == 0.0:
self.A1 = 0
if self.mode == 'root':
self.A2 = d
else:
self.A2 = 1
elif xw == 1.0:
self.A2 = 1
self.A1 = 0
else:
if self.mode == 'default':
X = np.array([[ xw**2/2., 1 - ((d+1)*xw / (d*xw+1)) ],
[ xw, - (d+1) / (d*xw+1)**2 ]])
elif self.mode == 'root':
X = np.array([[ xw**2/2, ((1-xw)**d)/d],
[xw, -(1-xw)**(d-1)]])
b = -np.array([xw-1,1])
self.A1, self.A2 = np.linalg.inv(X).dot(b)
xc = self.A1/2 * xw**2 + xw
self.xc = 1 - xc
def set_magnification(self,d):
assert(d > 0)
if self.mode == 'root':
assert(d>=1)
self.d = d
self._compute_parameters()
def set_demagnification_width(self,xw):
assert(xw >= 0.0 and xw<=1.0)
if self.mode == 'Sarkar':
self.xw = 0
else:
self.xw = xw
self._compute_parameters()
def set_radius(self,R):
self.R = R
def set_mode(self,mode):
assert(mode in ('Sarkar', 'default', 'root'))
if mode == 'Sarkar' and self.mode != 'Sarkar':
self.old_xw = self.xw
self.xw = 0.0
if mode != 'Sarkar' and self.old_xw is not None:
self.xw = self.old_xw
self.old_xw = None
self.mode = mode
if self.mode == 'root':
assert(self.d>=1)
self._compute_parameters()
def set_focus(self,focus):
if not is_iterable(focus):
focus = np.array([focus])
if not type(focus) == np.ndarray:
focus = np.array(focus)
self.focus = focus
def radial(self,pos):
is_scalar = not is_iterable(focus)
if is_scalar:
pos = np.array([[pos]])
else:
if len(pos.shape) == 1:
pos = pos.reshape((len(pos.shape),1))
def fisheye_function(self,r):
result = np.copy(r)
if self.xc > 0 and self.xc < 1:
result[r<=self.xc] = self.f2(r[r<=self.xc])
ndcs = np.logical_and(r>self.xc, r<1)
result[ndcs] = self.f1(r[ndcs])
elif self.xc == 1:
result[r<1] = self.f2(r[r<1])
return result
def fisheye_function_inverse(self,r):
result = np.copy(r)
if self.xw > 0 and self.xw < 1:
result[r<=1-self.xw] = self.f2_inverse(r[r<=1-self.xw])
ndcs = np.logical_and(r>1-self.xw, r<1)
result[ndcs] = self.f1_inverse(r[ndcs])
elif self.xw == 0:
result[r<1] = self.f2_inverse(r[r<1])
return result
def cartesian(self,pos,inverse=False):
if not type(pos) == np.ndarray:
pos = np.array(pos)
original_shape = pos.shape
if len(pos.shape) == 1:
pos = pos.reshape((1,pos.shape[0]))
newpos = np.empty_like(pos)
for dim in range(pos.shape[1]):
r = pos[:,dim] - self.focus[dim]
x = np.abs(r) / self.R
theta = np.sign(r)
if not inverse:
newx = self.fisheye_function(x)
else:
newx = self.fisheye_function_inverse(x)
newpos[:,dim] = self.focus[dim] + theta * self.R * newx
newpos = newpos.reshape(original_shape)
return newpos
def inverse_cartesian(self,pos):
return self.cartesian(pos, inverse=True)
def radial_2D(self,pos,inverse=False):
if not type(pos) == np.ndarray:
pos = np.array(pos)
original_shape = pos.shape
if len(pos.shape) == 1:
pos = pos.reshape((1,pos.shape[0]))
theta = np.arctan2(pos[:,1]-self.focus[1], pos[:,0]-self.focus[0])
x = cdist(pos, self.focus.reshape(1,len(self.focus))).flatten() / self.R
if not inverse:
newx = self.fisheye_function(x)
else:
newx = self.fisheye_function_inverse(x)
newpos = np.empty_like(pos)
newpos[:,0] = self.focus[0] + np.cos(theta) * self.R * newx
newpos[:,1] = self.focus[1] + np.sin(theta) * self.R * newx
newpos = newpos.reshape(original_shape)
return newpos
def inverse_radial_2D(self,pos):
return self.radial_2D(pos, inverse=True)
def scale_radial_2D(self,pos,radii,inverse=False):
if not type(pos) == np.ndarray:
pos = np.array(pos)
original_shape = pos.shape
if len(pos.shape) == 1:
pos = pos.reshape((1,pos.shape[0]))
theta = np.arctan2(pos[:,1]-self.focus[1], pos[:,0]-self.focus[0])
x = cdist(pos, self.focus.reshape(1,len(self.focus))).flatten() / self.R
x2 = x + radii / self.R
if not inverse:
newx = self.fisheye_function(x)
newx2 = self.fisheye_function(x2)
else:
newx = self.fisheye_function_inverse(x)
newx2 = self.fisheye_function_inverse(x2)
newpos = np.empty_like(pos)
newpos2 = np.empty_like(pos)
newpos[:,0] = self.focus[0] + np.cos(theta) * self.R * newx
newpos[:,1] = self.focus[1] + np.sin(theta) * self.R * newx
newpos2[:,0] = self.focus[0] + np.cos(theta) * self.R * newx2
newpos2[:,1] = self.focus[1] + np.sin(theta) * self.R * newx2
radii =
|
np.linalg.norm(newpos2 - newpos,axis=1)
|
numpy.linalg.norm
|
"""
the main class for gpu accelerated bpm propagation
<EMAIL>
"""
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
from gputools import OCLArray, OCLImage, OCLProgram, get_device, OCLMultiReductionKernel
from gputools import fft, fft_plan
import gputools
# gputools.init_device(useDevice = 0)
# from gputools import OCLReductionKernel
from .focus_field_cylindrical import focus_field_cylindrical, focus_field_cylindrical_plane
from .focus_field_beam import focus_field_beam, focus_field_beam_plane
from .focus_field_lattice import focus_field_lattice, focus_field_lattice_plane
from six.moves import range
from six.moves import zip
def absPath(myPath):
import sys
import os
""" Get absolute path to resource, works for dev and for PyInstaller """
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = sys._MEIPASS
return os.path.join(base_path, os.path.basename(myPath))
except Exception:
base_path = os.path.abspath(os.path.dirname(__file__))
return os.path.join(base_path, myPath)
def _next_power_of_2(n):
return int(2**np.ceil(np.log2(n)))
class Bpm3d(object):
"""
the main class for gpu accelerated bpm propagation
"""
_real_type = np.float32
_complex_type = np.complex64
def __init__(self, size=None,
shape=None,
units=None,
dn=None,
lam=.5,
n0=1.,
simul_xy=None,
simul_z=1,
n_volumes=1,
enforce_subsampled=False,
fftplan_kwargs={}):
"""
Parameters
----------
size: (Sx,Sy,Sz)
the size of the geometry in microns (Sx,Sy,Sz)
shape: (Nx,Ny,Nz)
the shape of the geometry in pixels (Nx,Ny,Nz)
units: (dx,dy,dz)
the voxelsizes in microns (dx,dy,dz)
dn: ndarray (float32|complex64)
refractive index distribution, dn.shape != (Nz,Ny,Nx)
lam: float
the wavelength in microns
n0: float
the refractive index of the surrounding media
simul_xy: (Nx,Ny,Nz), optional
the shape of the 2d computational geometry in pixels (Nx,Ny)
(e.g. subsampling in xy)
simul_z: int, optional
the subsampling factor along z
n_volumes: int
splits the domain into chunks if GPU memory is not
large enough (will be set automatically)
Example
-------
>>> m = Bpm3d(size = (10,10,10),shape = (256,256,256),units = (0.1,0.1,0.1),lam = 0.488,n0 = 1.33)
"""
if shape is None and dn is None:
raise ValueError("either shape or dn have to be given!")
if not (shape is None or dn is None) and dn.shape!=shape[::-1]:
raise ValueError("shape != dn.shape")
if size is None and units is None:
raise ValueError("either size or units has to be given!")
if not (size is None or units is None):
raise ValueError("either give size or units but not both!")
assert n_volumes>0
if shape is None:
shape = dn.shape[::-1]
if not units is None:
size = [(s-1.)*u for s, u in zip(shape, units)]
self.n_volumes = n_volumes
self.fftplan_kwargs = fftplan_kwargs
if simul_xy is None:
simul_xy = tuple([_next_power_of_2(n) for n in shape[:2]])
self._setup(shape=shape, size=size, lam=lam, n0=n0,
simul_xy=simul_xy,
simul_z=simul_z,
enforce_subsampled=enforce_subsampled)
self._setup_dn(dn)
def _copy_arr_with_correct_type(self, arr):
"""if arr is of the acceptable types, returns arr, else
returns a copy with the acceptable type"""
return arr.astype(Bpm3d._complex_type, copy=False) if np.iscomplexobj(arr) else arr.astype(Bpm3d._real_type,
copy=False)
def _setup(self, shape, size, lam, n0,
simul_xy, simul_z, enforce_subsampled):
"""
sets up the internal variables e.g. propagators etc...
:param size: the size of the geometry in pixels (Nx,Ny,Nz)
:param units: the phyiscal units of each voxel in microns (dx,dy,dz)
:param lam: the wavelength of light in microns
:param n0: the refractive index of the surrounding media,
dn=None means free propagation
:param use_fresnel_approx: if True, uses fresnel approximation for propagator
"""
self.shape = shape
self.size = size
self.units = tuple([s/(n-1.) for s, n in zip(size, shape)])
self.lam = lam
self.simul_xy = simul_xy
self.simul_z = simul_z
self.dx, self.dy = 1.*np.array(self.size[:2])/(np.array(self.simul_xy)-1)
self.dz = 1.*self.size[-1]/(self.shape[-1]-1)/self.simul_z
# self.dz = 1.*self.size[-1]/self.shape[-1]/self.simul_z
self.n0 = n0
self.k0 = 2.*np.pi/self.lam
self._is_subsampled = enforce_subsampled or ((self.shape[:2]!=self.simul_xy) or (simul_z>1))
self.maxNz = int(np.ceil(1.*self.shape[-1]/self.n_volumes))+1
self._setup_gpu()
def _setup_dn(self, dn):
if dn is None:
self.dn = None
else:
if dn.shape!=self.shape[::-1]:
raise ValueError("shape != dn.shape")
self.dn = dn
if self.n_volumes==1:
self._transfer_dn(dn)
# FIXME: this is still stupid
if not self.dn is None:
self.dn_mean = np.mean(np.real(self.dn), axis=(1, 2))
def _transfer_dn(self, dn):
if self._is_subsampled:
self._im_dn = OCLImage.from_array(self._copy_arr_with_correct_type(dn))
else:
self._buf_dn = OCLArray.from_array(self._copy_arr_with_correct_type(dn))
def _setup_gpu(self):
dev = get_device()
self._queue = dev.queue
self._ctx = dev.context
prog = OCLProgram(absPath("kernels/bpm_3d_kernels.cl"))
# the buffers/ images
Nx, Ny = self.simul_xy
Nx0, Ny0 = self.shape[:2]
self._plan = fft_plan((Ny, Nx), **self.fftplan_kwargs)
self._buf_plane = OCLArray.empty((Ny, Nx), np.complex64)
self._buf_H = OCLArray.empty((Ny, Nx), np.complex64)
self._img_xy = OCLImage.empty((Ny, Nx), dtype=np.float32, num_channels=2)
# buffer for the weighted dn average
self.intens_g = OCLArray.empty((1, Ny, Nx), dtype=Bpm3d._real_type)
self.intens_dn_g = OCLArray.empty((1, Ny, Nx), dtype=Bpm3d._real_type)
self.intens_sum_g = OCLArray.zeros((), dtype=Bpm3d._real_type)
self.intens_dn_sum_g = OCLArray.zeros((), dtype=Bpm3d._real_type)
# the kernels
self._kernel_compute_propagator = prog.compute_propagator
self._kernel_compute_propagator.set_scalar_arg_dtypes((None,)+(np.float32,)*5)
self._kernel_compute_propagator_buf = prog.compute_propagator_buf
self._kernel_compute_propagator_buf.set_scalar_arg_dtypes((None,)+(np.float32,)*5+(None,)*2)
self._kernel_mult_complex = prog.mult
self._kernel_im_to_buf_field = prog.img_to_buf_field
self._kernel_im_to_buf_intensity = prog.img_to_buf_intensity
self._kernel_im_to_im_intensity = prog.img_to_img_intensity
self._kernel_buf_to_buf_field = prog.buf_to_buf_field
self._kernel_buf_to_buf_intensity = prog.buf_to_buf_intensity
self._kernel_mult_dn_img_float = prog.mult_dn_image
self._kernel_mult_dn_buf_float = prog.mult_dn
self._kernel_mult_dn_img_complex = prog.mult_dn_image_complex
self._kernel_mult_dn_buf_complex = prog.mult_dn_complex
self._kernel_mult_dn_img_float_local = prog.mult_dn_image_local
self._kernel_mult_dn_buf_float_local = prog.mult_dn_local
self._kernel_mult_dn_img_complex_local = prog.mult_dn_image_complex_local
self._kernel_mult_dn_buf_complex_local = prog.mult_dn_complex_local
self._kernel_reduction = OCLMultiReductionKernel(np.float32,
neutral="0", reduce_expr="a+b",
map_exprs=["a[i]", "b[i]"],
arguments="__global float *a, __global float *b")
self._fill_propagator(self.n0)
def _mult_dn(self, buf, zPos, dn0):
if (self._is_subsampled and self.dn.dtype==Bpm3d._complex_type) or \
(not self._is_subsampled and self._buf_dn.dtype==Bpm3d._complex_type):
self._mult_dn_complex(buf, zPos, dn0)
else:
self._mult_dn_float(buf, zPos, dn0)
def _mult_dn_float(self, buf, zPos, dn0):
if self._is_subsampled:
self._kernel_mult_dn_img_float(self._queue, self.simul_xy, None,
buf.data, self._im_dn,
np.float32(self.k0*self.dz),
np.float32(dn0),
np.float32(zPos/(self.shape[-1]-1.)))
else:
Nx, Ny = self.shape[:2]
self._kernel_mult_dn_buf_float(self._queue, self.shape[:2], None,
buf.data, self._buf_dn.data,
np.float32(self.k0*self.dz),
np.float32(dn0),
np.int32(zPos*Nx*Ny))
def _mult_dn_complex(self, buf, zPos, dn0):
if self._is_subsampled:
self._kernel_mult_dn_img_complex(self._queue, self.simul_xy, None,
buf.data, self._im_dn,
np.float32(self.k0*self.dz),
np.float32(dn0),
np.float32(zPos/(self.shape[-1]-1.)))
else:
Nx, Ny = self.shape[:2]
self._kernel_mult_dn_buf_complex(self._queue, self.shape[:2], None,
buf.data, self._buf_dn.data,
np.float32(self.k0*self.dz),
np.float32(dn0),
np.int32(zPos*Nx*Ny))
def _mult_dn_local(self, buf, zPos, buf_g_sum, buf_dng_sum, buf_sum1, buf_sum2):
if (self._is_subsampled and self.dn.dtype==Bpm3d._complex_type) or \
(not self._is_subsampled and self._buf_dn.dtype==Bpm3d._complex_type):
self._mult_dn_complex_local(buf, zPos, buf_g_sum, buf_dng_sum, buf_sum1, buf_sum2)
else:
self._mult_dn_float_local(buf, zPos, buf_g_sum, buf_dng_sum, buf_sum1, buf_sum2)
def _mult_dn_float_local(self, buf, zPos, buf_g_sum, buf_dng_sum, buf_sum1, buf_sum2):
if self._is_subsampled:
self._kernel_mult_dn_img_float_local(self._queue, self.simul_xy, None,
buf.data, self._im_dn,
np.float32(self.k0*self.dz),
buf_g_sum.data,
buf_dng_sum.data,
np.float32(zPos/(self.shape[-1]-1.)),
buf_sum1.data,
buf_sum2.data,
)
else:
Nx, Ny = self.shape[:2]
self._kernel_mult_dn_buf_float_local(self._queue, self.shape[:2], None,
buf.data, self._buf_dn.data,
np.float32(self.k0*self.dz),
buf_g_sum.data,
buf_dng_sum.data,
np.int32(zPos*Nx*Ny),
buf_sum1.data,
buf_sum2.data, )
def _mult_dn_complex_local(self, buf, zPos, buf_g_sum, buf_dng_sum, buf_sum1, buf_sum2):
if self._is_subsampled:
self._kernel_mult_dn_img_complex_local(self._queue, self.simul_xy, None,
buf.data, self._im_dn,
np.float32(self.k0*self.dz),
buf_g_sum.data,
buf_dng_sum.data,
np.float32(zPos/(self.shape[-1]-1.)),
buf_sum1.data,
buf_sum2.data, )
else:
Nx, Ny = self.shape[:2]
self._kernel_mult_dn_buf_complex_local(self._queue, self.shape[:2], None,
buf.data, self._buf_dn.data,
np.float32(self.k0*self.dz),
buf_g_sum.data,
buf_dng_sum.data,
np.int32(zPos*Nx*Ny),
buf_sum1.data,
buf_sum2.data, )
def _copy_down_img(self, im, buf, offset):
Nx, Ny = self.shape[:2]
if buf.dtype.type==Bpm3d._complex_type:
self._kernel_im_to_buf_field(self._queue, (Nx, Ny), None,
im, buf.data,
np.int32(offset))
elif buf.dtype.type==Bpm3d._real_type:
self._kernel_im_to_buf_intensity(self._queue, (Nx, Ny), None,
im, buf.data,
np.int32(offset))
else:
assert False
def _copy_down_buf(self, buf1, buf2, offset):
Nx, Ny = self.shape[:2]
if buf2.dtype.type==Bpm3d._complex_type:
self._kernel_buf_to_buf_field(self._queue, (Nx*Ny,), None,
buf1.data, buf2.data,
np.int32(offset))
elif buf2.dtype.type==Bpm3d._real_type:
self._kernel_buf_to_buf_intensity(self._queue, (Nx*Ny,), None,
buf1.data, buf2.data,
np.int32(offset))
else:
assert False
def _fill_propagator(self, n0):
self._kernel_compute_propagator(self._queue, self.simul_xy, None,
self._buf_H.data,
n0, self.k0, self.dx, self.dy, self.dz)
def _fill_propagator_buf(self, n0, buf1, buf2):
"""the same as _fill_propagator(n0+buf1[0]/buf2[0])"""
self._kernel_compute_propagator_buf(self._queue, self.simul_xy, None,
self._buf_H.data,
n0, self.k0, self.dx, self.dy, self.dz,
buf1.data, buf2.data)
def _mult_complex(self, buf1, buf2):
"""buf1 *= buf2"""
Nx, Ny = self.simul_xy
self._kernel_mult_complex(self._queue, (Nx*Ny,), None,
buf1.data, buf2.data)
# the predefined initial fields
def u0_plane(self, phi=0):
return np.exp(1.j*phi)*
|
np.ones(self.simul_xy[::-1], np.complex64)
|
numpy.ones
|
# your ID here
import time
import sys
import argparse
from collections import defaultdict, Counter
import numpy as np
DNA = dict(A='T',T='A',G='C',C='G',N='N')
DEFAULT_COST = {(k1,k2): 1 if k2 != k1 else -1 for k1 in DNA for k2 in DNA}
DEFAULT_COST.update({('N',k): 0 for k in DNA})
DEFAULT_COST.update({(k,'N'): 0 for k in DNA})
GAP = '-'
def fasta_iter(file):
with open(file) as fasta:
hdr, seq = '', ''
for line in fasta:
line = line.strip()
if line.startswith('>'):
if seq: yield hdr, seq
hdr = line[1:]
seq = ''
else:
seq += line
if seq: yield hdr, seq # yield last seuqence
def revcomp(seq):
return ''.join(DNA[c] for c in seq[::-1])
def align(a, b, pcost=None, gcost=1, overlap=True):
"""
:param a: a DNA string (with potentially unknown bases "N"s)
:param b: a DNA string (with potentially unknown bases "N"s)
:param pcost: a cost associated with every base pairing, default is 1 per mismatch (N's excluded) and -1 per match
:param gcost: gap cost
:param overlap: whether looking for an overlap alignment (i.e. no penalty for flanking gaps)
:return: the alignment score, and a gapped string pair
"""
if pcost is None: pcost = DEFAULT_COST
A, B = len(a) + 1, len(b) + 1
s, p = np.zeros((A, B)), {}
s[0, :], s[:, 0] =
|
np.arange(B)
|
numpy.arange
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
general image utils
"""
import cv2
import numpy as np
def get_transform(center, scale, res, ratio=200, rot=0):
"""
generate trainsform matrix
"""
h = ratio * scale
t =
|
np.zeros((3, 3))
|
numpy.zeros
|
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler, OneHotEncoder, MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.compose import ColumnTransformer
from imblearn.over_sampling import RandomOverSampler
def preprocess_raw_data(test_size=0.2):
"""
Read the raw data file in ../data/raw/ and preprocess that data.
Columns
-------
LIMIT_BAL: Amount of given credit in NT dollars
SEX: Gender (1=male, 2=female)
EDUCATION: (1=graduate school, 2=university, 3=high school,
4=others, 5=unknown, 6=unknown)
MARRIAGE: Marital status (1=married, 2=single, 3=others)
AGE: Age in years
PAY_0: Repayment status in September, 2005 (-1=pay duly,
1=payment delay for one month,
2=payment delay for two months, ...
8=payment delay for eight months,
9=payment delay for nine months and above)
PAY_2: Repayment status in August, 2005 (scale same as above)
PAY_3: Repayment status in July, 2005 (scale same as above)
PAY_4: Repayment status in June, 2005 (scale same as above)
PAY_5: Repayment status in May, 2005 (scale same as above)
PAY_6: Repayment status in April, 2005 (scale same as above)
BILL_AMT1: Amount of bill statement in September, 2005 (NT dollar)
BILL_AMT2: Amount of bill statement in August, 2005 (NT dollar)
BILL_AMT3: Amount of bill statement in July, 2005 (NT dollar)
BILL_AMT4: Amount of bill statement in June, 2005 (NT dollar)
BILL_AMT5: Amount of bill statement in May, 2005 (NT dollar)
BILL_AMT6: Amount of bill statement in April, 2005 (NT dollar)
PAY_AMT1: Amount of previous payment in September, 2005 (NT dollar)
PAY_AMT2: Amount of previous payment in August, 2005 (NT dollar)
PAY_AMT3: Amount of previous payment in July, 2005 (NT dollar)
PAY_AMT4: Amount of previous payment in June, 2005 (NT dollar)
PAY_AMT5: Amount of previous payment in May, 2005 (NT dollar)
PAY_AMT6: Amount of previous payment in April, 2005 (NT dollar)
default.payment.next.month: Default payment (1=yes, 0=no)
Parameters
----------
new_filename : string
Filename for the new file with processed data.
scale : bool
If True, scale data
remove_outliers : bool
If True, remove known outliers in data set.
Returns
-------
None
Save train and test data in separate files in ../data/processed/
"""
print("\nPreprocessing\n")
# Raw data
dataframe = pd.read_excel("./data/raw/defaulted_cc-clients.xls")
# 0th row and column are dataframe headers and indices
data = dataframe.to_numpy()[1:, 1:]
N = data.shape[0]
# ----- Remove Outliers -----
# Identify indices with correct values, then reassign the data
# Gender [1, 2]
correct_gender = np.logical_or(data[:, 1] == 1, data[:, 1] == 2)
data = data[correct_gender]
# Eduaction [1, 2, 3, 4]
correct_education = np.logical_and(data[:, 2] >= 1, data[:, 2] <= 4)
data = data[correct_education]
# Marrital status [1, 2, 3]
correct_marrital_status = np.logical_and(data[:, 3] >= 1, data[:, 3] <= 3)
data = data[correct_marrital_status]
# Age, reasonable to assume in range [10, 110]
correct_age = np.logical_and(data[:, 4] > 10, data[:, 4] < 110)
data = data[correct_age]
# Repayment status for 6 previous months [-2, -1, ... , 8 , 9]
for i in range(5, 11):
repayment_status = np.logical_and(data[:, i] >= -2, data[:, i] <= 9)
data = data[repayment_status]
X_repayment_status_minus_two = np.zeros((data.shape[0], 6))
X_repayment_status_minus_one = np.zeros((data.shape[0], 6))
X_repayment_status_minus_two[data[:, 5:11] == -2] = 1.0
X_repayment_status_minus_one[data[:, 5:11] == -1] = 1.0
data[:, 5:11][np.logical_or(data[:, 5:11] == -2, data[:, 5:11] == -1)] = 0
print(f"\t{N -data.shape[0]} outliers removed.")
# ----- Split data set -----
X_categorical = data[:, 1:4]
X_continuous = np.concatenate((data[:, 0:1], data[:, 4:-1]), axis=1)
y = data[:, -1]
# ----- One Hot Encoding for categorical columns -----
# categories = [[1, 2], [1, 2, 3, 4], [1, 2, 3]]
enc = OneHotEncoder(handle_unknown="error", categories="auto")
preprocessor = ColumnTransformer(transformers=[("onehot", enc, [0, 1, 2])])
X_one_hot_encoded = preprocessor.fit_transform(X_categorical)
X = np.concatenate(
(
X_one_hot_encoded,
X_repayment_status_minus_two,
X_repayment_status_minus_one,
X_continuous,
),
axis=1,
)
p = X_one_hot_encoded.shape[1]
p += X_repayment_status_minus_one.shape[1]
p += X_repayment_status_minus_two.shape[
1
] # index that separates OneHot and continuous colums
# ----- Split of training and test -----
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size)
# ----- Scaling of continuous columns -----
scl = MinMaxScaler(feature_range=(0.0, 1.0))
# --- Scale both train/test according to training set:
scl.fit(X_train[:, p:].astype("float64")) # only continuous columns
X_train[:, p:] = scl.transform(X_train[:, p:].astype("float64"))
X_test[:, p:] = scl.transform(X_test[:, p:].astype("float64"))
# ----- Upscale training data -----
# fraction (yi=1 / yi=0) = 1.0, this is achieved by
# randomly resampling the rows with least occurence
upscale = RandomOverSampler(sampling_strategy=1.0)
X_train, y_train = upscale.fit_resample(X_train, y_train.astype("int"))
# ----- Ensure that the datatype is float -----
X_train = X_train.astype("float64")
X_test = X_test.astype("float64")
y_train = y_train.astype("float64")
y_test = y_test.astype("float64")
# ----- Save new files-----
|
np.savez("./data/processed/train_data.npz", X=X_train, y=y_train)
|
numpy.savez
|
import math
import numpy as np
import scipy.misc as msc
import scipy.ndimage as img
from PIL import Image
import matplotlib.pyplot as plt
def calculate_product_vector(initial_vector):
result = np.array([1])
for i in initial_vector:
second_half = i * result
result = np.append(result, second_half)
return result
if __name__ == "__main__":
rule110 = np.array([-1, 1, 1, -1, 1, 1, 1, -1])
rule126 =
|
np.array([-1, 1, 1, 1, 1, 1, 1, -1])
|
numpy.array
|
"""
Copyright 2020 inzapp Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"),
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import random
import sys
from glob import glob
from time import time
import cv2
import numpy as np
import tensorflow as tf
from sbd_box_colors import colors
os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'
img_type = cv2.IMREAD_GRAYSCALE
train_image_path = r'.'
test_img_path = r'.'
sys.setrecursionlimit(100000)
lr = 0.01
momentum = 0.95
batch_size = 2
epoch = 50
validation_ratio = 0.2
input_shape = (256, 256)
output_shape = (32, 32)
bbox_percentage_threshold = 0.25
font_scale = 0.4
img_channels = 3 if img_type == cv2.IMREAD_COLOR else 1
live_view_previous_time = time()
total_image_paths = []
total_image_count = 0
class_names = []
class_count = 0
x_min = 0
y_min = 0
x_max = 0
y_max = 0
class SbdDataGenerator(tf.keras.utils.Sequence):
"""
Custom data generator for SBD model.
Usage:
generator = SbdDataGenerator(image_paths=train_image_paths, augmentation=True)
"""
def __init__(self, image_paths, augmentation):
self.init_label()
self.image_paths = image_paths
self.augmentation = augmentation
self.random_indexes = np.arange(len(self.image_paths))
np.random.shuffle(self.random_indexes)
def __getitem__(self, index):
global img_type, batch_size, input_shape, img_channels, class_count
batch_x = []
batch_y = []
start_index = index * batch_size
for i in range(start_index, start_index + batch_size):
cur_img_path = self.image_paths[self.random_indexes[i]]
x = cv2.imread(cur_img_path, img_type)
if x.shape[1] > input_shape[1] or x.shape[0] > input_shape[0]:
x = cv2.resize(x, (input_shape[1], input_shape[0]), interpolation=cv2.INTER_AREA)
else:
x = cv2.resize(x, (input_shape[1], input_shape[0]), interpolation=cv2.INTER_LINEAR)
with open(f'{cur_img_path[:-4]}.txt', mode='rt') as file:
label_lines = file.readlines()
y = [np.zeros(input_shape, dtype=np.uint8) for _ in range(class_count)]
for label_line in label_lines:
class_index, cx, cy, w, h = list(map(float, label_line.split(' ')))
x1, y1, x2, y2 = cx - w / 2, cy - h / 2, cx + w / 2, cy + h / 2
x1, y1, x2, y2 = int(x1 * input_shape[1]), int(y1 * input_shape[0]), int(x2 * input_shape[1]), int(y2 * input_shape[0])
cv2.rectangle(y[int(class_index)], (x1, y1), (x2, y2), (255, 255, 255), -1)
if self.augmentation:
if random.choice([0, 1]) == 1:
if img_channels == 1:
x = cv2.cvtColor(x, cv2.COLOR_GRAY2BGR)
hsv = cv2.cvtColor(x, cv2.COLOR_BGR2HSV)
hsv = np.asarray(hsv).astype('float32')
hsv = np.moveaxis(hsv, -1, 0)
if random.choice([0, 1]) == 1:
hsv[1] *= random.uniform(0.25, 1.75)
if random.choice([0, 1]) == 1:
hsv[2] *= random.uniform(0.75, 1.25)
hsv = np.moveaxis(hsv, 0, -1)
hsv = np.clip(hsv, 0, 255).astype('uint8')
x = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
x = np.asarray(x).astype('float32')
x = np.clip(x, 0, 255).astype('uint8')
if img_channels == 1:
x = cv2.cvtColor(x, cv2.COLOR_BGR2GRAY)
if random.choice([0, 1]) == 1:
top_padding = random.randrange(0, int(input_shape[0] * 0.15), 1)
bottom_padding = random.randrange(0, int(input_shape[0] * 0.15), 1)
left_padding = random.randrange(0, int(input_shape[1] * 0.15), 1)
right_padding = random.randrange(0, int(input_shape[1] * 0.15), 1)
x = cv2.copyMakeBorder(
src=x,
top=top_padding,
bottom=bottom_padding,
left=left_padding,
right=right_padding,
borderType=cv2.BORDER_CONSTANT,
value=0
)
x = cv2.resize(x, (input_shape[1], input_shape[0]), interpolation=cv2.INTER_AREA)
for j in range(len(y)):
y[j] = cv2.copyMakeBorder(
src=y[j],
top=top_padding,
bottom=bottom_padding,
left=left_padding,
right=right_padding,
borderType=cv2.BORDER_CONSTANT,
value=0
)
if random.choice([0, 1]) == 1:
value = random.choice([0.0, 127.0, 255.0])
x += ((value - np.asarray(x)) * 0.1).astype('uint8')
for j in range(len(y)):
y[j] = self.compress(y[j])
x = np.asarray(x).reshape((input_shape[0], input_shape[1], img_channels)).astype('float32') / 255.
y = np.moveaxis(np.asarray(y), 0, -1)
y = np.asarray(y).reshape((output_shape[0], output_shape[1], class_count)).astype('float32')
batch_x.append(x)
batch_y.append(y)
batch_x = np.asarray(batch_x)
batch_y = np.asarray(batch_y)
return batch_x, batch_y
def __len__(self):
global batch_size
return int(np.floor(len(self.image_paths) / batch_size))
def on_epoch_end(self):
np.random.shuffle(self.random_indexes)
@staticmethod
def compress(y):
"""
Compress sbd label to between 0 or 1.
:param y: masked sbd label to be compressed.
"""
global input_shape, output_shape
assert input_shape[1] % output_shape[1] == 0
assert input_shape[0] % output_shape[0] == 0
grid_width = int(input_shape[1] / output_shape[1])
grid_height = int(input_shape[0] / output_shape[0])
grid_area = float(grid_width * grid_height)
compressed_y = []
for grid_y in range(0, input_shape[0], grid_height):
row = []
for grid_x in range(0, input_shape[1], grid_width):
grid = y[grid_y:grid_y + grid_height, grid_x:grid_x + grid_width]
score = cv2.countNonZero(grid) / grid_area
row.append(score)
compressed_y.append(row)
return
|
np.asarray(compressed_y)
|
numpy.asarray
|
# Modules normally used
import numpy as np
import cv2
def applyFilter(img, filter):
newimg = np.float64(img.copy())
rows, columns = img.shape
f_rows, f_columns = filter.shape
f_rows_half = np.uint8(f_rows / 2)
f_columns_half = np.uint8(f_columns / 2)
for x in range(0, rows):
for y in range(0, columns):
submat = img[max(0, x-f_rows_half):min(rows, x+f_rows_half+1), max(0, y-f_columns_half):min(columns, y+f_columns_half+1)]
f_submat = filter[max(f_rows_half-x, 0):f_rows-max(0, x+f_rows_half-rows+1), max(f_columns_half-y, 0):f_columns-max(0, y+f_columns_half-columns+1)]
newimg[x, y] =
|
np.sum(submat*f_submat)
|
numpy.sum
|
"""Functions that can be used to visualize bed files and annotate TADs"""
# own libaries
# from .classifier import Classifier
# third party libraries
import numpy as np
import pathlib
import pathlib
import pandas as pd
from sklearn.metrics import confusion_matrix
from itertools import permutations
from sklearn import preprocessing
from sklearn import impute
# plotting
import seaborn as sns
import networkx as nx
import matplotlib.pyplot as plt
from matplotlib.ticker import StrMethodFormatter
from matplotlib.font_manager import fontManager, FontProperties
def plot_confusion_matrix(y_true, y_pred, classes,
normalize=False,
title=None,
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
# if not title:
# if normalize:
# title = 'Normalized confusion matrix'
# else:
# title = 'Confusion matrix, without normalization'
# Compute confusion matrix
plt.rcParams.update({'font.size': 25, 'axes.labelsize': 25,'xtick.labelsize':25,'ytick.labelsize':25})
cm = confusion_matrix(y_true, y_pred)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
fig, ax = plt.subplots(figsize=(12,10))
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=
|
np.arange(cm.shape[1])
|
numpy.arange
|
import os
import pysam
import numpy
import glob
def write_css(outfile):
with open(outfile, 'w') as o:
o.write('''@import url(https://fonts.googleapis.com/css?family=Poppins:300,400,700);
:root {
--blue: #007bff;
--indigo: #6610f2;
--purple: #6f42c1;
--pink: #e83e8c;
--red: #dc3545;
--orange: #fd7e14;
--yellow: #ffc107;
--green: #28a745;
--teal: #20c997;
--cyan: #17a2b8;
--white: #fff;
--gray: #6c757d;
--gray-dark: #343a40;
--primary: #512479;
--secondary: #333;
--success: #28a745;
--info: #17a2b8;
--warning: #ffc107;
--danger: #dc3545;
--light: #f8f9fa;
--dark: #343a40;
--breakpoint-xs: 0;
--breakpoint-sm: 576px;
--breakpoint-md: 768px;
--breakpoint-lg: 992px;
--breakpoint-xl: 1200px;
--font-family-sans-serif: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol";
--font-family-monospace: SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace; }
*,
*::before,
*::after {
box-sizing: border-box; }
.toggle-vis {
color: green !important;
cursor: pointer;
}
.greened {
color: red !important;
}
html {
font-family: sans-serif;
line-height: 1.15;
-webkit-text-size-adjust: 100%;
-ms-text-size-adjust: 100%;
-ms-overflow-style: scrollbar;
-webkit-tap-highlight-color: rgba(0, 0, 0, 0); }
@-ms-viewport {
width: device-width; }
article, aside, dialog, figcaption, figure, footer, header, hgroup, main, nav, section {
display: block; }
body {
margin: 0;
font-family: "Poppins", sans-serif;
font-size: 1rem;
font-weight: 300;
line-height: 2.4;
color: #999;
text-align: left;
background-color: #fff; }
[tabindex="-1"]:focus {
outline: 0 !important; }
hr {
box-sizing: content-box;
height: 0;
overflow: visible; }
h1, h2, h3, h4, h5, h6 {
margin-top: 0;
margin-bottom: 0.5rem; }
p {
margin-top: 0;
margin-bottom: 1rem; }
abbr[title],
abbr[data-original-title] {
text-decoration: underline;
text-decoration: underline dotted;
cursor: help;
border-bottom: 0; }
address {
margin-bottom: 1rem;
font-style: normal;
line-height: inherit; }
ol,
ul,
dl {
margin-top: 0;
margin-bottom: 1rem; }
ol ol,
ul ul,
ol ul,
ul ol {
margin-bottom: 0; }
dt {
font-weight: 700; }
dd {
margin-bottom: .5rem;
margin-left: 0; }
blockquote {
margin: 0 0 1rem; }
dfn {
font-style: italic; }
b,
strong {
font-weight: bolder; }
small {
font-size: 80%; }
sub,
sup {
position: relative;
font-size: 75%;
line-height: 0;
vertical-align: baseline; }
sub {
bottom: -.25em; }
sup {
top: -.5em; }
a {
color: #512479;
text-decoration: none;
background-color: transparent;
-webkit-text-decoration-skip: objects; }
a:hover {
color: #523047;
text-decoration: underline; }
a:not([href]):not([tabindex]) {
color: inherit;
text-decoration: none; }
a:not([href]):not([tabindex]):hover, a:not([href]):not([tabindex]):focus {
color: inherit;
text-decoration: none; }
a:not([href]):not([tabindex]):focus {
outline: 0; }
pre,
code,
kbd,
samp {
font-family: monospace, monospace;
font-size: 1em; }
pre {
margin-top: 0;
margin-bottom: 1rem;
overflow: auto;
-ms-overflow-style: scrollbar; }
figure {
margin: 0 0 1rem; }
img {
vertical-align: middle;
border-style: none; }
svg:not(:root) {
overflow: hidden; }
table {
border-collapse: collapse; }
caption {
padding-top: 0.75rem;
padding-bottom: 0.75rem;
color: #6c757d;
text-align: left;
caption-side: bottom; }
th {
text-align: inherit; }
label {
display: inline-block;
margin-bottom: .5rem; }
button {
border-radius: 0; }
button:focus {
outline: 1px dotted;
outline: 5px auto -webkit-focus-ring-color; }
input,
button,
select,
optgroup,
textarea {
margin: 0;
font-family: inherit;
font-size: inherit;
line-height: inherit; }
button,
input {
overflow: visible; }
button,
select {
text-transform: none; }
button,
html [type="button"],
[type="reset"],
[type="submit"] {
-webkit-appearance: button; }
button::-moz-focus-inner,
[type="button"]::-moz-focus-inner,
[type="reset"]::-moz-focus-inner,
[type="submit"]::-moz-focus-inner {
padding: 0;
border-style: none; }
input[type="radio"],
input[type="checkbox"] {
box-sizing: border-box;
padding: 0; }
input[type="date"],
input[type="time"],
input[type="datetime-local"],
input[type="month"] {
-webkit-appearance: listbox; }
textarea {
overflow: auto;
resize: vertical; }
fieldset {
min-width: 0;
padding: 0;
margin: 0;
border: 0; }
legend {
display: block;
width: 100%;
max-width: 100%;
padding: 0;
margin-bottom: .5rem;
font-size: 1.5rem;
line-height: inherit;
color: inherit;
white-space: normal; }
progress {
vertical-align: baseline; }
[type="number"]::-webkit-inner-spin-button,
[type="number"]::-webkit-outer-spin-button {
height: auto; }
[type="search"] {
outline-offset: -2px;
-webkit-appearance: none; }
[type="search"]::-webkit-search-cancel-button,
[type="search"]::-webkit-search-decoration {
-webkit-appearance: none; }
::-webkit-file-upload-button {
font: inherit;
-webkit-appearance: button; }
output {
display: inline-block; }
summary {
display: list-item;
cursor: pointer; }
template {
display: none; }
[hidden] {
display: none !important; }
h1, h2, h3, h4, h5, h6,
.h1, .h2, .h3, .h4, .h5, .h6 {
margin-bottom: 0.5rem;
font-family: inherit;
font-weight: 500;
line-height: 1.2;
color: inherit; }
h1, .h1 {
font-size: 2.5rem; }
h2, .h2 {
font-size: 2rem; }
h3, .h3 {
font-size: 1.75rem; }
h4, .h4 {
font-size: 1.5rem; }
h5, .h5 {
font-size: 1.25rem; }
h6, .h6 {
font-size: 1rem; }
.lead {
font-size: 1.25rem;
font-weight: 300; }
.display-1 {
font-size: 6rem;
font-weight: 300;
line-height: 1.2; }
.display-2 {
font-size: 5.5rem;
font-weight: 300;
line-height: 1.2; }
.display-3 {
font-size: 4.5rem;
font-weight: 300;
line-height: 1.2; }
.display-4 {
font-size: 3.5rem;
font-weight: 300;
line-height: 1.2; }
hr {
margin-top: 1rem;
margin-bottom: 1rem;
border: 0;
border-top: 1px solid rgba(0, 0, 0, 0.1); }
small,
.small {
font-size: 80%;
font-weight: 400; }
mark,
.mark {
padding: 0.2em;
background-color: #fcf8e3; }
.list-unstyled {
padding-left: 0;
list-style: none; }
.list-inline {
padding-left: 0;
list-style: none; }
.list-inline-item {
display: inline-block; }
.list-inline-item:not(:last-child) {
margin-right: 0.5rem; }
.initialism {
font-size: 90%;
text-transform: uppercase; }
.blockquote {
margin-bottom: 1rem;
font-size: 1.25rem; }
.blockquote-footer {
display: block;
font-size: 80%;
color: #6c757d; }
.blockquote-footer::before {
content: "\2014 \00A0"; }
.img-fluid {
max-width: 100%;
height: auto; }
.img-thumbnail {
padding: 0.25rem;
background-color: #fff;
border: 1px solid #dee2e6;
border-radius: 0px;
max-width: 100%;
height: auto; }
.figure {
display: inline-block; }
.figure-img {
margin-bottom: 0.5rem;
line-height: 1; }
.figure-caption {
font-size: 90%;
color: #6c757d; }
code,
kbd,
pre,
samp {
font-family: SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace; }
code {
font-size: 87.5%;
color: #e83e8c;
word-break: break-word; }
a > code {
color: inherit; }
kbd {
padding: 0.2rem 0.4rem;
font-size: 87.5%;
color: #fff;
background-color: #212529;
border-radius: 0px; }
kbd kbd {
padding: 0;
font-size: 100%;
font-weight: 700; }
pre {
display: block;
font-size: 87.5%;
color: #212529; }
pre code {
font-size: inherit;
color: inherit;
word-break: normal; }
.pre-scrollable {
max-height: 340px;
overflow-y: scroll; }
.container {
width: 100%;
padding-right: 15px;
padding-left: 15px;
margin-right: auto;
margin-left: auto; }
@media (min-width: 576px) {
.container {
max-width: 740px; } }
@media (min-width: 768px) {
.container {
max-width: 960px; } }
@media (min-width: 992px) {
.container {
max-width: 1060px; } }
@media (min-width: 1200px) {
.container {
max-width: 1280px; } }
.container-fluid {
width: 100%;
padding-right: 15px;
padding-left: 15px;
margin-right: auto;
margin-left: auto; }
.row {
display: flex;
flex-wrap: wrap;
margin-right: -15px;
margin-left: -15px; }
.no-gutters {
margin-right: 0;
margin-left: 0; }
.no-gutters > .col,
.no-gutters > [class*="col-"] {
padding-right: 0;
padding-left: 0; }
.col-1, .col-2, .col-3, .col-4, .col-5, .col-6, .col-7, .col-8, .col-9, .col-10, .col-11, .col-12, .col,
.col-auto, .col-sm-1, .col-sm-2, .col-sm-3, .col-sm-4, .col-sm-5, .col-sm-6, .col-sm-7, .col-sm-8, .col-sm-9, .col-sm-10, .col-sm-11, .col-sm-12, .col-sm,
.col-sm-auto, .col-md-1, .col-md-2, .col-md-3, .col-md-4, .col-md-5, .col-md-6, .col-md-7, .col-md-8, .col-md-9, .col-md-10, .col-md-11, .col-md-12, .col-md,
.col-md-auto, .col-lg-1, .col-lg-2, .col-lg-3, .col-lg-4, .col-lg-5, .col-lg-6, .col-lg-7, .col-lg-8, .col-lg-9, .col-lg-10, .col-lg-11, .col-lg-12, .col-lg,
.col-lg-auto, .col-xl-1, .col-xl-2, .col-xl-3, .col-xl-4, .col-xl-5, .col-xl-6, .col-xl-7, .col-xl-8, .col-xl-9, .col-xl-10, .col-xl-11, .col-xl-12, .col-xl,
.col-xl-auto {
position: relative;
width: 100%;
min-height: 1px;
padding-right: 15px;
padding-left: 15px; }
.col {
flex-basis: 0;
flex-grow: 1;
max-width: 100%; }
.col-auto {
flex: 0 0 auto;
width: auto;
max-width: none; }
.col-1 {
flex: 0 0 8.3333333333%;
max-width: 8.3333333333%; }
.col-2 {
flex: 0 0 16.6666666667%;
max-width: 16.6666666667%; }
.col-3 {
flex: 0 0 25%;
max-width: 25%; }
.col-4 {
flex: 0 0 33.3333333333%;
max-width: 33.3333333333%; }
.col-5 {
flex: 0 0 41.6666666667%;
max-width: 41.6666666667%; }
.col-6 {
flex: 0 0 50%;
max-width: 50%; }
.col-7 {
flex: 0 0 58.3333333333%;
max-width: 58.3333333333%; }
.col-8 {
flex: 0 0 66.6666666667%;
max-width: 66.6666666667%; }
.col-9 {
flex: 0 0 75%;
max-width: 75%; }
.col-10 {
flex: 0 0 83.3333333333%;
max-width: 83.3333333333%; }
.col-11 {
flex: 0 0 91.6666666667%;
max-width: 91.6666666667%; }
.col-12 {
flex: 0 0 100%;
max-width: 100%; }
.order-first {
order: -1; }
.order-last {
order: 13; }
.order-0 {
order: 0; }
.order-1 {
order: 1; }
.order-2 {
order: 2; }
.order-3 {
order: 3; }
.order-4 {
order: 4; }
.order-5 {
order: 5; }
.order-6 {
order: 6; }
.order-7 {
order: 7; }
.order-8 {
order: 8; }
.order-9 {
order: 9; }
.order-10 {
order: 10; }
.order-11 {
order: 11; }
.order-12 {
order: 12; }
.offset-1 {
margin-left: 8.3333333333%; }
.offset-2 {
margin-left: 16.6666666667%; }
.offset-3 {
margin-left: 25%; }
.offset-4 {
margin-left: 33.3333333333%; }
.offset-5 {
margin-left: 41.6666666667%; }
.offset-6 {
margin-left: 50%; }
.offset-7 {
margin-left: 58.3333333333%; }
.offset-8 {
margin-left: 66.6666666667%; }
.offset-9 {
margin-left: 75%; }
.offset-10 {
margin-left: 83.3333333333%; }
.offset-11 {
margin-left: 91.6666666667%; }
@media (min-width: 576px) {
.col-sm {
flex-basis: 0;
flex-grow: 1;
max-width: 100%; }
.col-sm-auto {
flex: 0 0 auto;
width: auto;
max-width: none; }
.col-sm-1 {
flex: 0 0 8.3333333333%;
max-width: 8.3333333333%; }
.col-sm-2 {
flex: 0 0 16.6666666667%;
max-width: 16.6666666667%; }
.col-sm-3 {
flex: 0 0 25%;
max-width: 25%; }
.col-sm-4 {
flex: 0 0 33.3333333333%;
max-width: 33.3333333333%; }
.col-sm-5 {
flex: 0 0 41.6666666667%;
max-width: 41.6666666667%; }
.col-sm-6 {
flex: 0 0 50%;
max-width: 50%; }
.col-sm-7 {
flex: 0 0 58.3333333333%;
max-width: 58.3333333333%; }
.col-sm-8 {
flex: 0 0 66.6666666667%;
max-width: 66.6666666667%; }
.col-sm-9 {
flex: 0 0 75%;
max-width: 75%; }
.col-sm-10 {
flex: 0 0 83.3333333333%;
max-width: 83.3333333333%; }
.col-sm-11 {
flex: 0 0 91.6666666667%;
max-width: 91.6666666667%; }
.col-sm-12 {
flex: 0 0 100%;
max-width: 100%; }
.order-sm-first {
order: -1; }
.order-sm-last {
order: 13; }
.order-sm-0 {
order: 0; }
.order-sm-1 {
order: 1; }
.order-sm-2 {
order: 2; }
.order-sm-3 {
order: 3; }
.order-sm-4 {
order: 4; }
.order-sm-5 {
order: 5; }
.order-sm-6 {
order: 6; }
.order-sm-7 {
order: 7; }
.order-sm-8 {
order: 8; }
.order-sm-9 {
order: 9; }
.order-sm-10 {
order: 10; }
.order-sm-11 {
order: 11; }
.order-sm-12 {
order: 12; }
.offset-sm-0 {
margin-left: 0; }
.offset-sm-1 {
margin-left: 8.3333333333%; }
.offset-sm-2 {
margin-left: 16.6666666667%; }
.offset-sm-3 {
margin-left: 25%; }
.offset-sm-4 {
margin-left: 33.3333333333%; }
.offset-sm-5 {
margin-left: 41.6666666667%; }
.offset-sm-6 {
margin-left: 50%; }
.offset-sm-7 {
margin-left: 58.3333333333%; }
.offset-sm-8 {
margin-left: 66.6666666667%; }
.offset-sm-9 {
margin-left: 75%; }
.offset-sm-10 {
margin-left: 83.3333333333%; }
.offset-sm-11 {
margin-left: 91.6666666667%; } }
@media (min-width: 768px) {
.col-md {
flex-basis: 0;
flex-grow: 1;
max-width: 100%; }
.col-md-auto {
flex: 0 0 auto;
width: auto;
max-width: none; }
.col-md-1 {
flex: 0 0 8.3333333333%;
max-width: 8.3333333333%; }
.col-md-2 {
flex: 0 0 16.6666666667%;
max-width: 16.6666666667%; }
.col-md-3 {
flex: 0 0 25%;
max-width: 25%; }
.col-md-4 {
flex: 0 0 33.3333333333%;
max-width: 33.3333333333%; }
.col-md-5 {
flex: 0 0 41.6666666667%;
max-width: 41.6666666667%; }
.col-md-6 {
flex: 0 0 50%;
max-width: 50%; }
.col-md-7 {
flex: 0 0 58.3333333333%;
max-width: 58.3333333333%; }
.col-md-8 {
flex: 0 0 66.6666666667%;
max-width: 66.6666666667%; }
.col-md-9 {
flex: 0 0 75%;
max-width: 75%; }
.col-md-10 {
flex: 0 0 83.3333333333%;
max-width: 83.3333333333%; }
.col-md-11 {
flex: 0 0 91.6666666667%;
max-width: 91.6666666667%; }
.col-md-12 {
flex: 0 0 100%;
max-width: 100%; }
.order-md-first {
order: -1; }
.order-md-last {
order: 13; }
.order-md-0 {
order: 0; }
.order-md-1 {
order: 1; }
.order-md-2 {
order: 2; }
.order-md-3 {
order: 3; }
.order-md-4 {
order: 4; }
.order-md-5 {
order: 5; }
.order-md-6 {
order: 6; }
.order-md-7 {
order: 7; }
.order-md-8 {
order: 8; }
.order-md-9 {
order: 9; }
.order-md-10 {
order: 10; }
.order-md-11 {
order: 11; }
.order-md-12 {
order: 12; }
.offset-md-0 {
margin-left: 0; }
.offset-md-1 {
margin-left: 8.3333333333%; }
.offset-md-2 {
margin-left: 16.6666666667%; }
.offset-md-3 {
margin-left: 25%; }
.offset-md-4 {
margin-left: 33.3333333333%; }
.offset-md-5 {
margin-left: 41.6666666667%; }
.offset-md-6 {
margin-left: 50%; }
.offset-md-7 {
margin-left: 58.3333333333%; }
.offset-md-8 {
margin-left: 66.6666666667%; }
.offset-md-9 {
margin-left: 75%; }
.offset-md-10 {
margin-left: 83.3333333333%; }
.offset-md-11 {
margin-left: 91.6666666667%; } }
@media (min-width: 992px) {
.col-lg {
flex-basis: 0;
flex-grow: 1;
max-width: 100%; }
.col-lg-auto {
flex: 0 0 auto;
width: auto;
max-width: none; }
.col-lg-1 {
flex: 0 0 8.3333333333%;
max-width: 8.3333333333%; }
.col-lg-2 {
flex: 0 0 16.6666666667%;
max-width: 16.6666666667%; }
.col-lg-3 {
flex: 0 0 25%;
max-width: 25%; }
.col-lg-4 {
flex: 0 0 33.3333333333%;
max-width: 33.3333333333%; }
.col-lg-5 {
flex: 0 0 41.6666666667%;
max-width: 41.6666666667%; }
.col-lg-6 {
flex: 0 0 50%;
max-width: 50%; }
.col-lg-7 {
flex: 0 0 58.3333333333%;
max-width: 58.3333333333%; }
.col-lg-8 {
flex: 0 0 66.6666666667%;
max-width: 66.6666666667%; }
.col-lg-9 {
flex: 0 0 75%;
max-width: 75%; }
.col-lg-10 {
flex: 0 0 83.3333333333%;
max-width: 83.3333333333%; }
.col-lg-11 {
flex: 0 0 91.6666666667%;
max-width: 91.6666666667%; }
.col-lg-12 {
flex: 0 0 100%;
max-width: 100%; }
.order-lg-first {
order: -1; }
.order-lg-last {
order: 13; }
.order-lg-0 {
order: 0; }
.order-lg-1 {
order: 1; }
.order-lg-2 {
order: 2; }
.order-lg-3 {
order: 3; }
.order-lg-4 {
order: 4; }
.order-lg-5 {
order: 5; }
.order-lg-6 {
order: 6; }
.order-lg-7 {
order: 7; }
.order-lg-8 {
order: 8; }
.order-lg-9 {
order: 9; }
.order-lg-10 {
order: 10; }
.order-lg-11 {
order: 11; }
.order-lg-12 {
order: 12; }
.offset-lg-0 {
margin-left: 0; }
.offset-lg-1 {
margin-left: 8.3333333333%; }
.offset-lg-2 {
margin-left: 16.6666666667%; }
.offset-lg-3 {
margin-left: 25%; }
.offset-lg-4 {
margin-left: 33.3333333333%; }
.offset-lg-5 {
margin-left: 41.6666666667%; }
.offset-lg-6 {
margin-left: 50%; }
.offset-lg-7 {
margin-left: 58.3333333333%; }
.offset-lg-8 {
margin-left: 66.6666666667%; }
.offset-lg-9 {
margin-left: 75%; }
.offset-lg-10 {
margin-left: 83.3333333333%; }
.offset-lg-11 {
margin-left: 91.6666666667%; } }
@media (min-width: 1200px) {
.col-xl {
flex-basis: 0;
flex-grow: 1;
max-width: 100%; }
.col-xl-auto {
flex: 0 0 auto;
width: auto;
max-width: none; }
.col-xl-1 {
flex: 0 0 8.3333333333%;
max-width: 8.3333333333%; }
.col-xl-2 {
flex: 0 0 16.6666666667%;
max-width: 16.6666666667%; }
.col-xl-3 {
flex: 0 0 25%;
max-width: 25%; }
.col-xl-4 {
flex: 0 0 33.3333333333%;
max-width: 33.3333333333%; }
.col-xl-5 {
flex: 0 0 41.6666666667%;
max-width: 41.6666666667%; }
.col-xl-6 {
flex: 0 0 50%;
max-width: 50%; }
.col-xl-7 {
flex: 0 0 58.3333333333%;
max-width: 58.3333333333%; }
.col-xl-8 {
flex: 0 0 66.6666666667%;
max-width: 66.6666666667%; }
.col-xl-9 {
flex: 0 0 75%;
max-width: 75%; }
.col-xl-10 {
flex: 0 0 83.3333333333%;
max-width: 83.3333333333%; }
.col-xl-11 {
flex: 0 0 91.6666666667%;
max-width: 91.6666666667%; }
.col-xl-12 {
flex: 0 0 100%;
max-width: 100%; }
.order-xl-first {
order: -1; }
.order-xl-last {
order: 13; }
.order-xl-0 {
order: 0; }
.order-xl-1 {
order: 1; }
.order-xl-2 {
order: 2; }
.order-xl-3 {
order: 3; }
.order-xl-4 {
order: 4; }
.order-xl-5 {
order: 5; }
.order-xl-6 {
order: 6; }
.order-xl-7 {
order: 7; }
.order-xl-8 {
order: 8; }
.order-xl-9 {
order: 9; }
.order-xl-10 {
order: 10; }
.order-xl-11 {
order: 11; }
.order-xl-12 {
order: 12; }
.offset-xl-0 {
margin-left: 0; }
.offset-xl-1 {
margin-left: 8.3333333333%; }
.offset-xl-2 {
margin-left: 16.6666666667%; }
.offset-xl-3 {
margin-left: 25%; }
.offset-xl-4 {
margin-left: 33.3333333333%; }
.offset-xl-5 {
margin-left: 41.6666666667%; }
.offset-xl-6 {
margin-left: 50%; }
.offset-xl-7 {
margin-left: 58.3333333333%; }
.offset-xl-8 {
margin-left: 66.6666666667%; }
.offset-xl-9 {
margin-left: 75%; }
.offset-xl-10 {
margin-left: 83.3333333333%; }
.offset-xl-11 {
margin-left: 91.6666666667%; } }
.table {
width: 100%;
max-width: 100%;
margin-bottom: 1rem;
font-size: 14px;
color: #000000;
background-color: transparent; }
.table th,
.table td {
padding: 0.75rem;
vertical-align: top;
border-top: 1px solid #dee2e6; }
.table thead th {
vertical-align: bottom;
border-bottom: 2px solid #dee2e6; }
.table tbody + tbody {
border-top: 2px solid #dee2e6; }
.table .table {
background-color: #fff; }
tr:hover {background-color: #f0f0f0;}
@media (max-width: 575.98px) {
.table-responsive-sm {
display: block;
width: 100%;
overflow-x: auto;
-webkit-overflow-scrolling: touch;
-ms-overflow-style: -ms-autohiding-scrollbar; }
.table-responsive-sm > .table-bordered {
border: 0; } }
@media (max-width: 767.98px) {
.table-responsive-md {
display: block;
width: 100%;
overflow-x: auto;
-webkit-overflow-scrolling: touch;
-ms-overflow-style: -ms-autohiding-scrollbar; }
.table-responsive-md > .table-bordered {
border: 0; } }
@media (max-width: 991.98px) {
.table-responsive-lg {
display: block;
width: 100%;
overflow-x: auto;
-webkit-overflow-scrolling: touch;
-ms-overflow-style: -ms-autohiding-scrollbar; }
.table-responsive-lg > .table-bordered {
border: 0; } }
@media (max-width: 1199.98px) {
.table-responsive-xl {
display: block;
width: 100%;
overflow-x: auto;
-webkit-overflow-scrolling: touch;
-ms-overflow-style: -ms-autohiding-scrollbar; }
.table-responsive-xl > .table-bordered {
border: 0; } }
.table-responsive {
display: block;
width: 100%;
overflow-x: auto;
-webkit-overflow-scrolling: touch;
-ms-overflow-style: -ms-autohiding-scrollbar; }
.table-responsive > .table-bordered {
border: 0; }
.form-control {
display: block;
width: 100%;
padding: 0.375rem 0.75rem;
font-size: 1rem;
line-height: 2.4;
color: #495057;
background-color: #fff;
background-clip: padding-box;
border: 1px solid #ced4da;
border-radius: 0px;
transition: border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out; }
.form-control::-ms-expand {
background-color: transparent;
border: 0; }
.form-control:focus {
color: #495057;
background-color: #fff;
border-color: #737373;
outline: 0;
box-shadow: 0 0 0 0.2rem rgba(51, 51, 51, 0.25); }
.form-control::placeholder {
color: #6c757d;
opacity: 1; }
.form-control:disabled, .form-control[readonly] {
background-color: #e9ecef;
opacity: 1; }
select.form-control:not([size]):not([multiple]) {
height: calc(3.15rem + 2px); }
select.form-control:focus::-ms-value {
color: #495057;
background-color: #fff; }
.form-control-file,
.form-control-range {
display: block;
width: 100%; }
.col-form-label {
padding-top: calc(0.375rem + 1px);
padding-bottom: calc(0.375rem + 1px);
margin-bottom: 0;
font-size: inherit;
line-height: 2.4; }
.col-form-label-lg {
padding-top: calc(0.5rem + 1px);
padding-bottom: calc(0.5rem + 1px);
font-size: 1.25rem;
line-height: 1.5; }
.col-form-label-sm {
padding-top: calc(0.25rem + 1px);
padding-bottom: calc(0.25rem + 1px);
font-size: 0.875rem;
line-height: 1.5; }
.form-control-plaintext {
display: block;
width: 100%;
padding-top: 0.375rem;
padding-bottom: 0.375rem;
margin-bottom: 0;
line-height: 2.4;
background-color: transparent;
border: solid transparent;
border-width: 1px 0; }
.form-control-plaintext.form-control-sm, .input-group-sm > .form-control-plaintext.form-control,
.input-group-sm > .input-group-prepend > .form-control-plaintext.input-group-text,
.input-group-sm > .input-group-append > .form-control-plaintext.input-group-text,
.input-group-sm > .input-group-prepend > .form-control-plaintext.btn,
.input-group-sm > .input-group-append > .form-control-plaintext.btn, .form-control-plaintext.form-control-lg, .input-group-lg > .form-control-plaintext.form-control,
.input-group-lg > .input-group-prepend > .form-control-plaintext.input-group-text,
.input-group-lg > .input-group-append > .form-control-plaintext.input-group-text,
.input-group-lg > .input-group-prepend > .form-control-plaintext.btn,
.input-group-lg > .input-group-append > .form-control-plaintext.btn {
padding-right: 0;
padding-left: 0; }
.form-control-sm, .input-group-sm > .form-control,
.input-group-sm > .input-group-prepend > .input-group-text,
.input-group-sm > .input-group-append > .input-group-text,
.input-group-sm > .input-group-prepend > .btn,
.input-group-sm > .input-group-append > .btn {
padding: 0.25rem 0.5rem;
font-size: 0.875rem;
line-height: 1.5;
border-radius: 0px; }
select.form-control-sm:not([size]):not([multiple]), .input-group-sm > select.form-control:not([size]):not([multiple]),
.input-group-sm > .input-group-prepend > select.input-group-text:not([size]):not([multiple]),
.input-group-sm > .input-group-append > select.input-group-text:not([size]):not([multiple]),
.input-group-sm > .input-group-prepend > select.btn:not([size]):not([multiple]),
.input-group-sm > .input-group-append > select.btn:not([size]):not([multiple]) {
height: calc(1.8125rem + 2px); }
.form-control-lg, .input-group-lg > .form-control,
.input-group-lg > .input-group-prepend > .input-group-text,
.input-group-lg > .input-group-append > .input-group-text,
.input-group-lg > .input-group-prepend > .btn,
.input-group-lg > .input-group-append > .btn {
padding: 0.5rem 1rem;
font-size: 1.25rem;
line-height: 1.5;
border-radius: 0px; }
select.form-control-lg:not([size]):not([multiple]), .input-group-lg > select.form-control:not([size]):not([multiple]),
.input-group-lg > .input-group-prepend > select.input-group-text:not([size]):not([multiple]),
.input-group-lg > .input-group-append > select.input-group-text:not([size]):not([multiple]),
.input-group-lg > .input-group-prepend > select.btn:not([size]):not([multiple]),
.input-group-lg > .input-group-append > select.btn:not([size]):not([multiple]) {
height: calc(2.875rem + 2px); }
.form-group {
margin-bottom: 1rem; }
.form-text {
display: block;
margin-top: 0.25rem; }
.form-row {
display: flex;
flex-wrap: wrap;
margin-right: -5px;
margin-left: -5px; }
.form-row > .col,
.form-row > [class*="col-"] {
padding-right: 5px;
padding-left: 5px; }
.form-check {
position: relative;
display: block;
padding-left: 1.25rem; }
.form-check-input {
position: absolute;
margin-top: 0.3rem;
margin-left: -1.25rem; }
.form-check-input:disabled ~ .form-check-label {
color: #6c757d; }
.form-check-label {
margin-bottom: 0; }
.form-check-inline {
display: inline-flex;
align-items: center;
padding-left: 0;
margin-right: 0.75rem; }
.form-check-inline .form-check-input {
position: static;
margin-top: 0;
margin-right: 0.3125rem;
margin-left: 0; }
.valid-feedback {
display: none;
width: 100%;
margin-top: 0.25rem;
font-size: 80%;
color: #28a745; }
.valid-tooltip {
position: absolute;
top: 100%;
z-index: 5;
display: none;
max-width: 100%;
padding: .5rem;
margin-top: .1rem;
font-size: .875rem;
line-height: 1;
color: #fff;
background-color: rgba(40, 167, 69, 0.8);
border-radius: .2rem; }
.was-validated .form-control:valid, .form-control.is-valid,
.was-validated .custom-select:valid,
.custom-select.is-valid {
border-color: #28a745; }
.was-validated .form-control:valid:focus, .form-control.is-valid:focus,
.was-validated .custom-select:valid:focus,
.custom-select.is-valid:focus {
border-color: #28a745;
box-shadow: 0 0 0 0.2rem rgba(40, 167, 69, 0.25); }
.was-validated .form-control:valid ~ .valid-feedback,
.was-validated .form-control:valid ~ .valid-tooltip, .form-control.is-valid ~ .valid-feedback,
.form-control.is-valid ~ .valid-tooltip,
.was-validated .custom-select:valid ~ .valid-feedback,
.was-validated .custom-select:valid ~ .valid-tooltip,
.custom-select.is-valid ~ .valid-feedback,
.custom-select.is-valid ~ .valid-tooltip {
display: block; }
.was-validated .form-check-input:valid ~ .form-check-label, .form-check-input.is-valid ~ .form-check-label {
color: #28a745; }
.was-validated .form-check-input:valid ~ .valid-feedback,
.was-validated .form-check-input:valid ~ .valid-tooltip, .form-check-input.is-valid ~ .valid-feedback,
.form-check-input.is-valid ~ .valid-tooltip {
display: block; }
.was-validated .custom-control-input:valid ~ .custom-control-label, .custom-control-input.is-valid ~ .custom-control-label {
color: #28a745; }
.was-validated .custom-control-input:valid ~ .custom-control-label::before, .custom-control-input.is-valid ~ .custom-control-label::before {
background-color: #71dd8a; }
.was-validated .custom-control-input:valid ~ .valid-feedback,
.was-validated .custom-control-input:valid ~ .valid-tooltip, .custom-control-input.is-valid ~ .valid-feedback,
.custom-control-input.is-valid ~ .valid-tooltip {
display: block; }
.was-validated .custom-control-input:valid:checked ~ .custom-control-label::before, .custom-control-input.is-valid:checked ~ .custom-control-label::before {
background-color: #34ce57; }
.was-validated .custom-control-input:valid:focus ~ .custom-control-label::before, .custom-control-input.is-valid:focus ~ .custom-control-label::before {
box-shadow: 0 0 0 1px #fff, 0 0 0 0.2rem rgba(40, 167, 69, 0.25); }
.was-validated .custom-file-input:valid ~ .custom-file-label, .custom-file-input.is-valid ~ .custom-file-label {
border-color: #28a745; }
.was-validated .custom-file-input:valid ~ .custom-file-label::before, .custom-file-input.is-valid ~ .custom-file-label::before {
border-color: inherit; }
.was-validated .custom-file-input:valid ~ .valid-feedback,
.was-validated .custom-file-input:valid ~ .valid-tooltip, .custom-file-input.is-valid ~ .valid-feedback,
.custom-file-input.is-valid ~ .valid-tooltip {
display: block; }
.was-validated .custom-file-input:valid:focus ~ .custom-file-label, .custom-file-input.is-valid:focus ~ .custom-file-label {
box-shadow: 0 0 0 0.2rem rgba(40, 167, 69, 0.25); }
.invalid-feedback {
display: none;
width: 100%;
margin-top: 0.25rem;
font-size: 80%;
color: #dc3545; }
.invalid-tooltip {
position: absolute;
top: 100%;
z-index: 5;
display: none;
max-width: 100%;
padding: .5rem;
margin-top: .1rem;
font-size: .875rem;
line-height: 1;
color: #fff;
background-color: rgba(220, 53, 69, 0.8);
border-radius: .2rem; }
.was-validated .form-control:invalid, .form-control.is-invalid,
.was-validated .custom-select:invalid,
.custom-select.is-invalid {
border-color: #dc3545; }
.was-validated .form-control:invalid:focus, .form-control.is-invalid:focus,
.was-validated .custom-select:invalid:focus,
.custom-select.is-invalid:focus {
border-color: #dc3545;
box-shadow: 0 0 0 0.2rem rgba(220, 53, 69, 0.25); }
.was-validated .form-control:invalid ~ .invalid-feedback,
.was-validated .form-control:invalid ~ .invalid-tooltip, .form-control.is-invalid ~ .invalid-feedback,
.form-control.is-invalid ~ .invalid-tooltip,
.was-validated .custom-select:invalid ~ .invalid-feedback,
.was-validated .custom-select:invalid ~ .invalid-tooltip,
.custom-select.is-invalid ~ .invalid-feedback,
.custom-select.is-invalid ~ .invalid-tooltip {
display: block; }
.was-validated .form-check-input:invalid ~ .form-check-label, .form-check-input.is-invalid ~ .form-check-label {
color: #dc3545; }
.was-validated .form-check-input:invalid ~ .invalid-feedback,
.was-validated .form-check-input:invalid ~ .invalid-tooltip, .form-check-input.is-invalid ~ .invalid-feedback,
.form-check-input.is-invalid ~ .invalid-tooltip {
display: block; }
.was-validated .custom-control-input:invalid ~ .custom-control-label, .custom-control-input.is-invalid ~ .custom-control-label {
color: #dc3545; }
.was-validated .custom-control-input:invalid ~ .custom-control-label::before, .custom-control-input.is-invalid ~ .custom-control-label::before {
background-color: #efa2a9; }
.was-validated .custom-control-input:invalid ~ .invalid-feedback,
.was-validated .custom-control-input:invalid ~ .invalid-tooltip, .custom-control-input.is-invalid ~ .invalid-feedback,
.custom-control-input.is-invalid ~ .invalid-tooltip {
display: block; }
.was-validated .custom-control-input:invalid:checked ~ .custom-control-label::before, .custom-control-input.is-invalid:checked ~ .custom-control-label::before {
background-color: #e4606d; }
.was-validated .custom-control-input:invalid:focus ~ .custom-control-label::before, .custom-control-input.is-invalid:focus ~ .custom-control-label::before {
box-shadow: 0 0 0 1px #fff, 0 0 0 0.2rem rgba(220, 53, 69, 0.25); }
.was-validated .custom-file-input:invalid ~ .custom-file-label, .custom-file-input.is-invalid ~ .custom-file-label {
border-color: #dc3545; }
.was-validated .custom-file-input:invalid ~ .custom-file-label::before, .custom-file-input.is-invalid ~ .custom-file-label::before {
border-color: inherit; }
.was-validated .custom-file-input:invalid ~ .invalid-feedback,
.was-validated .custom-file-input:invalid ~ .invalid-tooltip, .custom-file-input.is-invalid ~ .invalid-feedback,
.custom-file-input.is-invalid ~ .invalid-tooltip {
display: block; }
.was-validated .custom-file-input:invalid:focus ~ .custom-file-label, .custom-file-input.is-invalid:focus ~ .custom-file-label {
box-shadow: 0 0 0 0.2rem rgba(220, 53, 69, 0.25); }
.form-inline {
display: flex;
flex-flow: row wrap;
align-items: center; }
.form-inline .form-check {
width: 100%; }
@media (min-width: 576px) {
.form-inline label {
display: flex;
align-items: center;
justify-content: center;
margin-bottom: 0; }
.form-inline .form-group {
display: flex;
flex: 0 0 auto;
flex-flow: row wrap;
align-items: center;
margin-bottom: 0; }
.form-inline .form-control {
display: inline-block;
width: auto;
vertical-align: middle; }
.form-inline .form-control-plaintext {
display: inline-block; }
.form-inline .input-group {
width: auto; }
.form-inline .form-check {
display: flex;
align-items: center;
justify-content: center;
width: auto;
padding-left: 0; }
.form-inline .form-check-input {
position: relative;
margin-top: 0;
margin-right: 0.25rem;
margin-left: 0; }
.form-inline .custom-control {
align-items: center;
justify-content: center; }
.form-inline .custom-control-label {
margin-bottom: 0; } }
.btn {
display: inline-block;
font-weight: 400;
text-align: center;
white-space: nowrap;
vertical-align: middle;
user-select: none;
border: 1px solid transparent;
padding: 0.375rem 0.75rem;
font-size: 1rem;
line-height: 2.4;
border-radius: 0px;
transition: color 0.15s ease-in-out, background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out; }
.btn:hover, .btn:focus {
text-decoration: none; }
.btn:focus, .btn.focus {
outline: 0;
box-shadow: 0 0 0 0.2rem rgba(51, 51, 51, 0.25); }
.btn.disabled, .btn:disabled {
opacity: 0.65; }
.btn:not(:disabled):not(.disabled) {
cursor: pointer; }
.btn:not(:disabled):not(.disabled):active, .btn:not(:disabled):not(.disabled).active {
background-image: none; }
a.btn.disabled,
fieldset:disabled a.btn {
pointer-events: none; }
.btn-primary {
color: #fff;
background-color: #512479;
border-color: #512479; }
.btn-primary:hover {
color: #fff;
background-color: #6a3e5c;
border-color: #49075e; }
.btn-primary:focus, .btn-primary.focus {
box-shadow: 0 0 0 0.2rem rgba(130, 76, 113, 0.5); }
.btn-primary.disabled, .btn-primary:disabled {
color: #fff;
background-color: #512479;
border-color: #512479; }
.btn-primary:not(:disabled):not(.disabled):active, .btn-primary:not(:disabled):not(.disabled).active, .show > .btn-primary.dropdown-toggle {
color: #fff;
background-color: #49075e;
border-color: #5a344e; }
.btn-primary:not(:disabled):not(.disabled):active:focus, .btn-primary:not(:disabled):not(.disabled).active:focus, .show > .btn-primary.dropdown-toggle:focus {
box-shadow: 0 0 0 0.2rem rgba(130, 76, 113, 0.5); }
.btn-secondary {
color: #fff;
background-color: #333;
border-color: #333; }
.btn-secondary:hover {
color: #fff;
background-color: #202020;
border-color: #1a1a1a; }
.btn-secondary:focus, .btn-secondary.focus {
box-shadow: 0 0 0 0.2rem rgba(51, 51, 51, 0.5); }
.btn-secondary.disabled, .btn-secondary:disabled {
color: #fff;
background-color: #333;
border-color: #333; }
.btn-secondary:not(:disabled):not(.disabled):active, .btn-secondary:not(:disabled):not(.disabled).active, .show > .btn-secondary.dropdown-toggle {
color: #fff;
background-color: #1a1a1a;
border-color: #131313; }
.btn-secondary:not(:disabled):not(.disabled):active:focus, .btn-secondary:not(:disabled):not(.disabled).active:focus, .show > .btn-secondary.dropdown-toggle:focus {
box-shadow: 0 0 0 0.2rem rgba(51, 51, 51, 0.5); }
.btn-success {
color: #fff;
background-color: #28a745;
border-color: #28a745; }
.btn-success:hover {
color: #fff;
background-color: #218838;
border-color: #1e7e34; }
.btn-success:focus, .btn-success.focus {
box-shadow: 0 0 0 0.2rem rgba(40, 167, 69, 0.5); }
.btn-success.disabled, .btn-success:disabled {
color: #fff;
background-color: #28a745;
border-color: #28a745; }
.btn-success:not(:disabled):not(.disabled):active, .btn-success:not(:disabled):not(.disabled).active, .show > .btn-success.dropdown-toggle {
color: #fff;
background-color: #1e7e34;
border-color: #1c7430; }
.btn-success:not(:disabled):not(.disabled):active:focus, .btn-success:not(:disabled):not(.disabled).active:focus, .show > .btn-success.dropdown-toggle:focus {
box-shadow: 0 0 0 0.2rem rgba(40, 167, 69, 0.5); }
.btn-info {
color: #fff;
background-color: #17a2b8;
border-color: #17a2b8; }
.btn-info:hover {
color: #fff;
background-color: #138496;
border-color: #117a8b; }
.btn-info:focus, .btn-info.focus {
box-shadow: 0 0 0 0.2rem rgba(23, 162, 184, 0.5); }
.btn-info.disabled, .btn-info:disabled {
color: #fff;
background-color: #17a2b8;
border-color: #17a2b8; }
.btn-info:not(:disabled):not(.disabled):active, .btn-info:not(:disabled):not(.disabled).active, .show > .btn-info.dropdown-toggle {
color: #fff;
background-color: #117a8b;
border-color: #10707f; }
.btn-info:not(:disabled):not(.disabled):active:focus, .btn-info:not(:disabled):not(.disabled).active:focus, .show > .btn-info.dropdown-toggle:focus {
box-shadow: 0 0 0 0.2rem rgba(23, 162, 184, 0.5); }
.btn-warning {
color: #212529;
background-color: #ffc107;
border-color: #ffc107; }
.btn-warning:hover {
color: #212529;
background-color: #e0a800;
border-color: #d39e00; }
.btn-warning:focus, .btn-warning.focus {
box-shadow: 0 0 0 0.2rem rgba(255, 193, 7, 0.5); }
.btn-warning.disabled, .btn-warning:disabled {
color: #212529;
background-color: #ffc107;
border-color: #ffc107; }
.btn-warning:not(:disabled):not(.disabled):active, .btn-warning:not(:disabled):not(.disabled).active, .show > .btn-warning.dropdown-toggle {
color: #212529;
background-color: #d39e00;
border-color: #c69500; }
.btn-warning:not(:disabled):not(.disabled):active:focus, .btn-warning:not(:disabled):not(.disabled).active:focus, .show > .btn-warning.dropdown-toggle:focus {
box-shadow: 0 0 0 0.2rem rgba(255, 193, 7, 0.5); }
.btn-danger {
color: #fff;
background-color: #dc3545;
border-color: #dc3545; }
.btn-danger:hover {
color: #fff;
background-color: #c82333;
border-color: #bd2130; }
.btn-danger:focus, .btn-danger.focus {
box-shadow: 0 0 0 0.2rem rgba(220, 53, 69, 0.5); }
.btn-danger.disabled, .btn-danger:disabled {
color: #fff;
background-color: #dc3545;
border-color: #dc3545; }
.btn-danger:not(:disabled):not(.disabled):active, .btn-danger:not(:disabled):not(.disabled).active, .show > .btn-danger.dropdown-toggle {
color: #fff;
background-color: #bd2130;
border-color: #b21f2d; }
.btn-danger:not(:disabled):not(.disabled):active:focus, .btn-danger:not(:disabled):not(.disabled).active:focus, .show > .btn-danger.dropdown-toggle:focus {
box-shadow: 0 0 0 0.2rem rgba(220, 53, 69, 0.5); }
.btn-light {
color: #212529;
background-color: #f8f9fa;
border-color: #f8f9fa; }
.btn-light:hover {
color: #212529;
background-color: #e2e6ea;
border-color: #dae0e5; }
.btn-light:focus, .btn-light.focus {
box-shadow: 0 0 0 0.2rem rgba(248, 249, 250, 0.5); }
.btn-light.disabled, .btn-light:disabled {
color: #212529;
background-color: #f8f9fa;
border-color: #f8f9fa; }
.btn-light:not(:disabled):not(.disabled):active, .btn-light:not(:disabled):not(.disabled).active, .show > .btn-light.dropdown-toggle {
color: #212529;
background-color: #dae0e5;
border-color: #d3d9df; }
.btn-light:not(:disabled):not(.disabled):active:focus, .btn-light:not(:disabled):not(.disabled).active:focus, .show > .btn-light.dropdown-toggle:focus {
box-shadow: 0 0 0 0.2rem rgba(248, 249, 250, 0.5); }
.btn-dark {
color: #fff;
background-color: #343a40;
border-color: #343a40; }
.btn-dark:hover {
color: #fff;
background-color: #23272b;
border-color: #1d2124; }
.btn-dark:focus, .btn-dark.focus {
box-shadow: 0 0 0 0.2rem rgba(52, 58, 64, 0.5); }
.btn-dark.disabled, .btn-dark:disabled {
color: #fff;
background-color: #343a40;
border-color: #343a40; }
.btn-dark:not(:disabled):not(.disabled):active, .btn-dark:not(:disabled):not(.disabled).active, .show > .btn-dark.dropdown-toggle {
color: #fff;
background-color: #1d2124;
border-color: #171a1d; }
.btn-dark:not(:disabled):not(.disabled):active:focus, .btn-dark:not(:disabled):not(.disabled).active:focus, .show > .btn-dark.dropdown-toggle:focus {
box-shadow: 0 0 0 0.2rem rgba(52, 58, 64, 0.5); }
.btn-outline-primary {
color: #512479;
background-color: transparent;
background-image: none;
border-color: #512479; }
.btn-outline-primary:hover {
color: #fff;
background-color: #512479;
border-color: #512479; }
.btn-outline-primary:focus, .btn-outline-primary.focus {
box-shadow: 0 0 0 0.2rem rgba(130, 76, 113, 0.5); }
.btn-outline-primary.disabled, .btn-outline-primary:disabled {
color: #512479;
background-color: transparent; }
.btn-outline-primary:not(:disabled):not(.disabled):active, .btn-outline-primary:not(:disabled):not(.disabled).active, .show > .btn-outline-primary.dropdown-toggle {
color: #fff;
background-color: #512479;
border-color: #512479; }
.btn-outline-primary:not(:disabled):not(.disabled):active:focus, .btn-outline-primary:not(:disabled):not(.disabled).active:focus, .show > .btn-outline-primary.dropdown-toggle:focus {
box-shadow: 0 0 0 0.2rem rgba(130, 76, 113, 0.5); }
.btn-outline-secondary {
color: #333;
background-color: transparent;
background-image: none;
border-color: #333; }
.btn-outline-secondary:hover {
color: #fff;
background-color: #333;
border-color: #333; }
.btn-outline-secondary:focus, .btn-outline-secondary.focus {
box-shadow: 0 0 0 0.2rem rgba(51, 51, 51, 0.5); }
.btn-outline-secondary.disabled, .btn-outline-secondary:disabled {
color: #333;
background-color: transparent; }
.btn-outline-secondary:not(:disabled):not(.disabled):active, .btn-outline-secondary:not(:disabled):not(.disabled).active, .show > .btn-outline-secondary.dropdown-toggle {
color: #fff;
background-color: #333;
border-color: #333; }
.btn-outline-secondary:not(:disabled):not(.disabled):active:focus, .btn-outline-secondary:not(:disabled):not(.disabled).active:focus, .show > .btn-outline-secondary.dropdown-toggle:focus {
box-shadow: 0 0 0 0.2rem rgba(51, 51, 51, 0.5); }
.btn-outline-success {
color: #28a745;
background-color: transparent;
background-image: none;
border-color: #28a745; }
.btn-outline-success:hover {
color: #fff;
background-color: #28a745;
border-color: #28a745; }
.btn-outline-success:focus, .btn-outline-success.focus {
box-shadow: 0 0 0 0.2rem rgba(40, 167, 69, 0.5); }
.btn-outline-success.disabled, .btn-outline-success:disabled {
color: #28a745;
background-color: transparent; }
.btn-outline-success:not(:disabled):not(.disabled):active, .btn-outline-success:not(:disabled):not(.disabled).active, .show > .btn-outline-success.dropdown-toggle {
color: #fff;
background-color: #28a745;
border-color: #28a745; }
.btn-outline-success:not(:disabled):not(.disabled):active:focus, .btn-outline-success:not(:disabled):not(.disabled).active:focus, .show > .btn-outline-success.dropdown-toggle:focus {
box-shadow: 0 0 0 0.2rem rgba(40, 167, 69, 0.5); }
.btn-outline-info {
color: #17a2b8;
background-color: transparent;
background-image: none;
border-color: #17a2b8; }
.btn-outline-info:hover {
color: #fff;
background-color: #17a2b8;
border-color: #17a2b8; }
.btn-outline-info:focus, .btn-outline-info.focus {
box-shadow: 0 0 0 0.2rem rgba(23, 162, 184, 0.5); }
.btn-outline-info.disabled, .btn-outline-info:disabled {
color: #17a2b8;
background-color: transparent; }
.btn-outline-info:not(:disabled):not(.disabled):active, .btn-outline-info:not(:disabled):not(.disabled).active, .show > .btn-outline-info.dropdown-toggle {
color: #fff;
background-color: #17a2b8;
border-color: #17a2b8; }
.btn-outline-info:not(:disabled):not(.disabled):active:focus, .btn-outline-info:not(:disabled):not(.disabled).active:focus, .show > .btn-outline-info.dropdown-toggle:focus {
box-shadow: 0 0 0 0.2rem rgba(23, 162, 184, 0.5); }
.btn-outline-warning {
color: #ffc107;
background-color: transparent;
background-image: none;
border-color: #ffc107; }
.btn-outline-warning:hover {
color: #212529;
background-color: #ffc107;
border-color: #ffc107; }
.btn-outline-warning:focus, .btn-outline-warning.focus {
box-shadow: 0 0 0 0.2rem rgba(255, 193, 7, 0.5); }
.btn-outline-warning.disabled, .btn-outline-warning:disabled {
color: #ffc107;
background-color: transparent; }
.btn-outline-warning:not(:disabled):not(.disabled):active, .btn-outline-warning:not(:disabled):not(.disabled).active, .show > .btn-outline-warning.dropdown-toggle {
color: #212529;
background-color: #ffc107;
border-color: #ffc107; }
.btn-outline-warning:not(:disabled):not(.disabled):active:focus, .btn-outline-warning:not(:disabled):not(.disabled).active:focus, .show > .btn-outline-warning.dropdown-toggle:focus {
box-shadow: 0 0 0 0.2rem rgba(255, 193, 7, 0.5); }
.btn-outline-danger {
color: #dc3545;
background-color: transparent;
background-image: none;
border-color: #dc3545; }
.btn-outline-danger:hover {
color: #fff;
background-color: #dc3545;
border-color: #dc3545; }
.btn-outline-danger:focus, .btn-outline-danger.focus {
box-shadow: 0 0 0 0.2rem rgba(220, 53, 69, 0.5); }
.btn-outline-danger.disabled, .btn-outline-danger:disabled {
color: #dc3545;
background-color: transparent; }
.btn-outline-danger:not(:disabled):not(.disabled):active, .btn-outline-danger:not(:disabled):not(.disabled).active, .show > .btn-outline-danger.dropdown-toggle {
color: #fff;
background-color: #dc3545;
border-color: #dc3545; }
.btn-outline-danger:not(:disabled):not(.disabled):active:focus, .btn-outline-danger:not(:disabled):not(.disabled).active:focus, .show > .btn-outline-danger.dropdown-toggle:focus {
box-shadow: 0 0 0 0.2rem rgba(220, 53, 69, 0.5); }
.btn-outline-light {
color: #f8f9fa;
background-color: transparent;
background-image: none;
border-color: #f8f9fa; }
.btn-outline-light:hover {
color: #212529;
background-color: #f8f9fa;
border-color: #f8f9fa; }
.btn-outline-light:focus, .btn-outline-light.focus {
box-shadow: 0 0 0 0.2rem rgba(248, 249, 250, 0.5); }
.btn-outline-light.disabled, .btn-outline-light:disabled {
color: #f8f9fa;
background-color: transparent; }
.btn-outline-light:not(:disabled):not(.disabled):active, .btn-outline-light:not(:disabled):not(.disabled).active, .show > .btn-outline-light.dropdown-toggle {
color: #212529;
background-color: #f8f9fa;
border-color: #f8f9fa; }
.btn-outline-light:not(:disabled):not(.disabled):active:focus, .btn-outline-light:not(:disabled):not(.disabled).active:focus, .show > .btn-outline-light.dropdown-toggle:focus {
box-shadow: 0 0 0 0.2rem rgba(248, 249, 250, 0.5); }
.btn-outline-dark {
color: #343a40;
background-color: transparent;
background-image: none;
border-color: #343a40; }
.btn-outline-dark:hover {
color: #fff;
background-color: #343a40;
border-color: #343a40; }
.btn-outline-dark:focus, .btn-outline-dark.focus {
box-shadow: 0 0 0 0.2rem rgba(52, 58, 64, 0.5); }
.btn-outline-dark.disabled, .btn-outline-dark:disabled {
color: #343a40;
background-color: transparent; }
.btn-outline-dark:not(:disabled):not(.disabled):active, .btn-outline-dark:not(:disabled):not(.disabled).active, .show > .btn-outline-dark.dropdown-toggle {
color: #fff;
background-color: #343a40;
border-color: #343a40; }
.btn-outline-dark:not(:disabled):not(.disabled):active:focus, .btn-outline-dark:not(:disabled):not(.disabled).active:focus, .show > .btn-outline-dark.dropdown-toggle:focus {
box-shadow: 0 0 0 0.2rem rgba(52, 58, 64, 0.5); }
.btn-link {
font-weight: 400;
color: #512479;
background-color: transparent; }
.btn-link:hover {
color: #523047;
text-decoration: underline;
background-color: transparent;
border-color: transparent; }
.btn-link:focus, .btn-link.focus {
text-decoration: underline;
border-color: transparent;
box-shadow: none; }
.btn-link:disabled, .btn-link.disabled {
color: #6c757d; }
.btn-lg, .btn-group-lg > .btn {
padding: 0.5rem 1rem;
font-size: 1.25rem;
line-height: 1.5;
border-radius: 0px; }
.btn-sm, .btn-group-sm > .btn {
padding: 0.25rem 0.5rem;
font-size: 0.875rem;
line-height: 1.5;
border-radius: 0px; }
.btn-block {
display: block;
width: 100%; }
.btn-block + .btn-block {
margin-top: 0.5rem; }
input[type="submit"].btn-block,
input[type="reset"].btn-block,
input[type="button"].btn-block {
width: 100%; }
.fade {
opacity: 0;
transition: opacity 0.15s linear; }
.fade.show {
opacity: 1; }
.collapse {
display: none; }
.collapse.show {
display: block; }
tr.collapse.show {
display: table-row; }
tbody.collapse.show {
display: table-row-group; }
.collapsing {
position: relative;
height: 0;
overflow: hidden;
transition: height 0.35s ease; }
.dropup,
.dropdown {
position: relative; }
.dropdown-toggle::after {
display: inline-block;
width: 0;
height: 0;
margin-left: 0.255em;
vertical-align: 0.255em;
content: "";
border-top: 0.3em solid;
border-right: 0.3em solid transparent;
border-bottom: 0;
border-left: 0.3em solid transparent; }
.dropdown-toggle:empty::after {
margin-left: 0; }
.dropdown-menu {
position: absolute;
top: 100%;
left: 0;
z-index: 1000;
display: none;
float: left;
min-width: 10rem;
padding: 0.5rem 0;
margin: 0.125rem 0 0;
font-size: 1rem;
color: #999;
text-align: left;
list-style: none;
background-color: #fff;
background-clip: padding-box;
border: 1px solid rgba(0, 0, 0, 0.15);
border-radius: 0px; }
.dropup .dropdown-menu {
margin-top: 0;
margin-bottom: 0.125rem; }
.dropup .dropdown-toggle::after {
display: inline-block;
width: 0;
height: 0;
margin-left: 0.255em;
vertical-align: 0.255em;
content: "";
border-top: 0;
border-right: 0.3em solid transparent;
border-bottom: 0.3em solid;
border-left: 0.3em solid transparent; }
.dropup .dropdown-toggle:empty::after {
margin-left: 0; }
.dropright .dropdown-menu {
margin-top: 0;
margin-left: 0.125rem; }
.dropright .dropdown-toggle::after {
display: inline-block;
width: 0;
height: 0;
margin-left: 0.255em;
vertical-align: 0.255em;
content: "";
border-top: 0.3em solid transparent;
border-bottom: 0.3em solid transparent;
border-left: 0.3em solid; }
.dropright .dropdown-toggle:empty::after {
margin-left: 0; }
.dropright .dropdown-toggle::after {
vertical-align: 0; }
.dropleft .dropdown-menu {
margin-top: 0;
margin-right: 0.125rem; }
.dropleft .dropdown-toggle::after {
display: inline-block;
width: 0;
height: 0;
margin-left: 0.255em;
vertical-align: 0.255em;
content: ""; }
.dropleft .dropdown-toggle::after {
display: none; }
.dropleft .dropdown-toggle::before {
display: inline-block;
width: 0;
height: 0;
margin-right: 0.255em;
vertical-align: 0.255em;
content: "";
border-top: 0.3em solid transparent;
border-right: 0.3em solid;
border-bottom: 0.3em solid transparent; }
.dropleft .dropdown-toggle:empty::after {
margin-left: 0; }
.dropleft .dropdown-toggle::before {
vertical-align: 0; }
.dropdown-divider {
height: 0;
margin: 0.5rem 0;
overflow: hidden;
border-top: 1px solid #e9ecef; }
.dropdown-item {
display: block;
width: 100%;
padding: 0.25rem 1.5rem;
clear: both;
font-weight: 400;
color: #212529;
text-align: inherit;
white-space: nowrap;
background-color: transparent;
border: 0; }
.dropdown-item:hover, .dropdown-item:focus {
color: #16181b;
text-decoration: none;
background-color: #f8f9fa; }
.dropdown-item.active, .dropdown-item:active {
color: #fff;
text-decoration: none;
background-color: #333; }
.dropdown-item.disabled, .dropdown-item:disabled {
color: #6c757d;
background-color: transparent; }
.dropdown-menu.show {
display: block; }
.dropdown-header {
display: block;
padding: 0.5rem 1.5rem;
margin-bottom: 0;
font-size: 0.875rem;
color: #6c757d;
white-space: nowrap; }
.btn-group,
.btn-group-vertical {
position: relative;
display: inline-flex;
vertical-align: middle; }
.btn-group > .btn,
.btn-group-vertical > .btn {
position: relative;
flex: 0 1 auto; }
.btn-group > .btn:hover,
.btn-group-vertical > .btn:hover {
z-index: 1; }
.btn-group > .btn:focus, .btn-group > .btn:active, .btn-group > .btn.active,
.btn-group-vertical > .btn:focus,
.btn-group-vertical > .btn:active,
.btn-group-vertical > .btn.active {
z-index: 1; }
.btn-group .btn + .btn,
.btn-group .btn + .btn-group,
.btn-group .btn-group + .btn,
.btn-group .btn-group + .btn-group,
.btn-group-vertical .btn + .btn,
.btn-group-vertical .btn + .btn-group,
.btn-group-vertical .btn-group + .btn,
.btn-group-vertical .btn-group + .btn-group {
margin-left: -1px; }
.btn-toolbar {
display: flex;
flex-wrap: wrap;
justify-content: flex-start; }
.btn-toolbar .input-group {
width: auto; }
.btn-group > .btn:first-child {
margin-left: 0; }
.btn-group > .btn:not(:last-child):not(.dropdown-toggle),
.btn-group > .btn-group:not(:last-child) > .btn {
border-top-right-radius: 0;
border-bottom-right-radius: 0; }
.btn-group > .btn:not(:first-child),
.btn-group > .btn-group:not(:first-child) > .btn {
border-top-left-radius: 0;
border-bottom-left-radius: 0; }
.dropdown-toggle-split {
padding-right: 0.5625rem;
padding-left: 0.5625rem; }
.dropdown-toggle-split::after {
margin-left: 0; }
.btn-sm + .dropdown-toggle-split, .btn-group-sm > .btn + .dropdown-toggle-split {
padding-right: 0.375rem;
padding-left: 0.375rem; }
.btn-lg + .dropdown-toggle-split, .btn-group-lg > .btn + .dropdown-toggle-split {
padding-right: 0.75rem;
padding-left: 0.75rem; }
.btn-group-vertical {
flex-direction: column;
align-items: flex-start;
justify-content: center; }
.btn-group-vertical .btn,
.btn-group-vertical .btn-group {
width: 100%; }
.btn-group-vertical > .btn + .btn,
.btn-group-vertical > .btn + .btn-group,
.btn-group-vertical > .btn-group + .btn,
.btn-group-vertical > .btn-group + .btn-group {
margin-top: -1px;
margin-left: 0; }
.btn-group-vertical > .btn:not(:last-child):not(.dropdown-toggle),
.btn-group-vertical > .btn-group:not(:last-child) > .btn {
border-bottom-right-radius: 0;
border-bottom-left-radius: 0; }
.btn-group-vertical > .btn:not(:first-child),
.btn-group-vertical > .btn-group:not(:first-child) > .btn {
border-top-left-radius: 0;
border-top-right-radius: 0; }
.btn-group-toggle > .btn,
.btn-group-toggle > .btn-group > .btn {
margin-bottom: 0; }
.btn-group-toggle > .btn input[type="radio"],
.btn-group-toggle > .btn input[type="checkbox"],
.btn-group-toggle > .btn-group > .btn input[type="radio"],
.btn-group-toggle > .btn-group > .btn input[type="checkbox"] {
position: absolute;
clip: rect(0, 0, 0, 0);
pointer-events: none; }
.input-group {
position: relative;
display: flex;
flex-wrap: wrap;
align-items: stretch;
width: 100%; }
.input-group > .form-control,
.input-group > .custom-select,
.input-group > .custom-file {
position: relative;
flex: 1 1 auto;
width: 1%;
margin-bottom: 0; }
.input-group > .form-control:focus,
.input-group > .custom-select:focus,
.input-group > .custom-file:focus {
z-index: 3; }
.input-group > .form-control + .form-control,
.input-group > .form-control + .custom-select,
.input-group > .form-control + .custom-file,
.input-group > .custom-select + .form-control,
.input-group > .custom-select + .custom-select,
.input-group > .custom-select + .custom-file,
.input-group > .custom-file + .form-control,
.input-group > .custom-file + .custom-select,
.input-group > .custom-file + .custom-file {
margin-left: -1px; }
.input-group > .form-control:not(:last-child),
.input-group > .custom-select:not(:last-child) {
border-top-right-radius: 0;
border-bottom-right-radius: 0; }
.input-group > .form-control:not(:first-child),
.input-group > .custom-select:not(:first-child) {
border-top-left-radius: 0;
border-bottom-left-radius: 0; }
.input-group > .custom-file {
display: flex;
align-items: center; }
.input-group > .custom-file:not(:last-child) .custom-file-label, .input-group > .custom-file:not(:last-child) .custom-file-label::before {
border-top-right-radius: 0;
border-bottom-right-radius: 0; }
.input-group > .custom-file:not(:first-child) .custom-file-label, .input-group > .custom-file:not(:first-child) .custom-file-label::before {
border-top-left-radius: 0;
border-bottom-left-radius: 0; }
.input-group-prepend,
.input-group-append {
display: flex; }
.input-group-prepend .btn,
.input-group-append .btn {
position: relative;
z-index: 2; }
.input-group-prepend .btn + .btn,
.input-group-prepend .btn + .input-group-text,
.input-group-prepend .input-group-text + .input-group-text,
.input-group-prepend .input-group-text + .btn,
.input-group-append .btn + .btn,
.input-group-append .btn + .input-group-text,
.input-group-append .input-group-text + .input-group-text,
.input-group-append .input-group-text + .btn {
margin-left: -1px; }
.input-group-prepend {
margin-right: -1px; }
.input-group-append {
margin-left: -1px; }
.input-group-text {
display: flex;
align-items: center;
padding: 0.375rem 0.75rem;
margin-bottom: 0;
font-size: 1rem;
font-weight: 400;
line-height: 2.4;
color: #495057;
text-align: center;
white-space: nowrap;
background-color: #e9ecef;
border: 1px solid #ced4da;
border-radius: 0px; }
.input-group-text input[type="radio"],
.input-group-text input[type="checkbox"] {
margin-top: 0; }
.input-group > .input-group-prepend > .btn,
.input-group > .input-group-prepend > .input-group-text,
.input-group > .input-group-append:not(:last-child) > .btn,
.input-group > .input-group-append:not(:last-child) > .input-group-text,
.input-group > .input-group-append:last-child > .btn:not(:last-child):not(.dropdown-toggle),
.input-group > .input-group-append:last-child > .input-group-text:not(:last-child) {
border-top-right-radius: 0;
border-bottom-right-radius: 0; }
.input-group > .input-group-append > .btn,
.input-group > .input-group-append > .input-group-text,
.input-group > .input-group-prepend:not(:first-child) > .btn,
.input-group > .input-group-prepend:not(:first-child) > .input-group-text,
.input-group > .input-group-prepend:first-child > .btn:not(:first-child),
.input-group > .input-group-prepend:first-child > .input-group-text:not(:first-child) {
border-top-left-radius: 0;
border-bottom-left-radius: 0; }
.custom-control {
position: relative;
display: block;
min-height: 2.4rem;
padding-left: 1.5rem; }
.custom-control-inline {
display: inline-flex;
margin-right: 1rem; }
.custom-control-input {
position: absolute;
z-index: -1;
opacity: 0; }
.custom-control-input:checked ~ .custom-control-label::before {
color: #fff;
background-color: #333; }
.custom-control-input:focus ~ .custom-control-label::before {
box-shadow: 0 0 0 1px #fff, 0 0 0 0.2rem rgba(51, 51, 51, 0.25); }
.custom-control-input:active ~ .custom-control-label::before {
color: #fff;
background-color: #8c8c8c; }
.custom-control-input:disabled ~ .custom-control-label {
color: #6c757d; }
.custom-control-input:disabled ~ .custom-control-label::before {
background-color: #e9ecef; }
.custom-control-label {
margin-bottom: 0; }
.custom-control-label::before {
position: absolute;
top: 0.7rem;
left: 0;
display: block;
width: 1rem;
height: 1rem;
pointer-events: none;
content: "";
user-select: none;
background-color: #dee2e6; }
.custom-control-label::after {
position: absolute;
top: 0.7rem;
left: 0;
display: block;
width: 1rem;
height: 1rem;
content: "";
background-repeat: no-repeat;
background-position: center center;
background-size: 50% 50%; }
.custom-checkbox .custom-control-label::before {
border-radius: 0px; }
.custom-checkbox .custom-control-input:checked ~ .custom-control-label::before {
background-color: #333; }
.custom-checkbox .custom-control-input:checked ~ .custom-control-label::after {
background-image: url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 8 8'%3E%3Cpath fill='%23fff' d='M6.564.75l-3.59 3.612-1.538-1.55L0 4.26 2.974 7.25 8 2.193z'/%3E%3C/svg%3E"); }
.custom-checkbox .custom-control-input:indeterminate ~ .custom-control-label::before {
background-color: #333; }
.custom-checkbox .custom-control-input:indeterminate ~ .custom-control-label::after {
background-image: url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 4 4'%3E%3Cpath stroke='%23fff' d='M0 2h4'/%3E%3C/svg%3E"); }
.custom-checkbox .custom-control-input:disabled:checked ~ .custom-control-label::before {
background-color: rgba(130, 76, 113, 0.5); }
.custom-checkbox .custom-control-input:disabled:indeterminate ~ .custom-control-label::before {
background-color: rgba(130, 76, 113, 0.5); }
.custom-radio .custom-control-label::before {
border-radius: 50%; }
.custom-radio .custom-control-input:checked ~ .custom-control-label::before {
background-color: #333; }
.custom-radio .custom-control-input:checked ~ .custom-control-label::after {
background-image: url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='-4 -4 8 8'%3E%3Ccircle r='3' fill='%23fff'/%3E%3C/svg%3E"); }
.custom-radio .custom-control-input:disabled:checked ~ .custom-control-label::before {
background-color: rgba(130, 76, 113, 0.5); }
.custom-select {
display: inline-block;
width: 100%;
height: calc(3.15rem + 2px);
padding: 0.375rem 1.75rem 0.375rem 0.75rem;
line-height: 2.4;
color: #495057;
vertical-align: middle;
background: #fff url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 4 5'%3E%3Cpath fill='%23343a40' d='M2 0L0 2h4zm0 5L0 3h4z'/%3E%3C/svg%3E") no-repeat right 0.75rem center;
background-size: 8px 10px;
border: 1px solid #ced4da;
border-radius: 0px;
appearance: none; }
.custom-select:focus {
border-color: #737373;
outline: 0;
box-shadow: inset 0 1px 2px rgba(0, 0, 0, 0.075), 0 0 5px rgba(115, 115, 115, 0.5); }
.custom-select:focus::-ms-value {
color: #495057;
background-color: #fff; }
.custom-select[multiple], .custom-select[size]:not([size="1"]) {
height: auto;
padding-right: 0.75rem;
background-image: none; }
.custom-select:disabled {
color: #6c757d;
background-color: #e9ecef; }
.custom-select::-ms-expand {
opacity: 0; }
.custom-select-sm {
height: calc(1.8125rem + 2px);
padding-top: 0.375rem;
padding-bottom: 0.375rem;
font-size: 75%; }
.custom-select-lg {
height: calc(2.875rem + 2px);
padding-top: 0.375rem;
padding-bottom: 0.375rem;
font-size: 125%; }
.custom-file {
position: relative;
display: inline-block;
width: 100%;
height: calc(3.15rem + 2px);
margin-bottom: 0; }
.custom-file-input {
position: relative;
z-index: 2;
width: 100%;
height: calc(3.15rem + 2px);
margin: 0;
opacity: 0; }
.custom-file-input:focus ~ .custom-file-control {
border-color: #737373;
box-shadow: 0 0 0 0.2rem rgba(51, 51, 51, 0.25); }
.custom-file-input:focus ~ .custom-file-control::before {
border-color: #737373; }
.custom-file-input:lang(en) ~ .custom-file-label::after {
content: "Browse"; }
.custom-file-label {
position: absolute;
top: 0;
right: 0;
left: 0;
z-index: 1;
height: calc(3.15rem + 2px);
padding: 0.375rem 0.75rem;
line-height: 2.4;
color: #495057;
background-color: #fff;
border: 1px solid #ced4da;
border-radius: 0px; }
.custom-file-label::after {
position: absolute;
top: 0;
right: 0;
bottom: 0;
z-index: 3;
display: block;
height: calc(calc(3.15rem + 2px) - 1px * 2);
padding: 0.375rem 0.75rem;
line-height: 2.4;
color: #495057;
content: "Browse";
background-color: #e9ecef;
border-left: 1px solid #ced4da;
border-radius: 0 0px 0px 0; }
.nav {
display: flex;
flex-wrap: wrap;
padding-left: 0;
margin-bottom: 0;
list-style: none; }
.nav-link {
display: block;
padding: 0.5rem 1rem; }
.nav-link:hover, .nav-link:focus {
text-decoration: none; }
.nav-link.disabled {
color: #6c757d; }
.nav-tabs {
border-bottom: 1px solid #dee2e6; }
.nav-tabs .nav-item {
margin-bottom: -1px; }
.nav-tabs .nav-link {
border: 1px solid transparent;
border-top-left-radius: 0px;
border-top-right-radius: 0px; }
.nav-tabs .nav-link:hover, .nav-tabs .nav-link:focus {
border-color: #e9ecef #e9ecef #dee2e6; }
.nav-tabs .nav-link.disabled {
color: #6c757d;
background-color: transparent;
border-color: transparent; }
.nav-tabs .nav-link.active,
.nav-tabs .nav-item.show .nav-link {
color: #495057;
background-color: #fff;
border-color: #dee2e6 #dee2e6 #fff; }
.nav-tabs .dropdown-menu {
margin-top: -1px;
border-top-left-radius: 0;
border-top-right-radius: 0; }
.nav-pills .nav-link {
border-radius: 0px; }
.nav-pills .nav-link.active,
.nav-pills .show > .nav-link {
color: #fff;
background-color: #333; }
.nav-fill .nav-item {
flex: 1 1 auto;
text-align: center; }
.nav-justified .nav-item {
flex-basis: 0;
flex-grow: 1;
text-align: center; }
.tab-content > .tab-pane {
display: none; }
.tab-content > .active {
display: block; }
.navbar {
position: relative;
display: flex;
flex-wrap: wrap;
align-items: center;
justify-content: space-between;
padding: 0.5rem 1rem; }
.navbar > .container,
.navbar > .container-fluid {
display: flex;
flex-wrap: wrap;
align-items: center;
justify-content: space-between; }
.navbar-brand {
display: inline-block;
padding-top: 0.2rem;
padding-bottom: 0.2rem;
margin-right: 1rem;
font-size: 1.25rem;
line-height: inherit;
white-space: nowrap; }
.navbar-brand:hover, .navbar-brand:focus {
text-decoration: none; }
.navbar-nav {
display: flex;
flex-direction: column;
padding-left: 0;
margin-bottom: 0;
list-style: none; }
.navbar-nav .nav-link {
padding-right: 0;
padding-left: 0; }
.navbar-nav .dropdown-menu {
position: static;
float: none; }
.navbar-text {
display: inline-block;
padding-top: 0.5rem;
padding-bottom: 0.5rem; }
.navbar-collapse {
flex-basis: 100%;
flex-grow: 1;
align-items: center; }
.navbar-toggler {
padding: 0.25rem 0.75rem;
font-size: 1.25rem;
line-height: 1;
background-color: transparent;
border: 1px solid transparent;
border-radius: 0px; }
.navbar-toggler:hover, .navbar-toggler:focus {
text-decoration: none; }
.navbar-toggler:not(:disabled):not(.disabled) {
cursor: pointer; }
.navbar-toggler-icon {
display: inline-block;
width: 1.5em;
height: 1.5em;
vertical-align: middle;
content: "";
background: no-repeat center center;
background-size: 100% 100%; }
@media (max-width: 575.98px) {
.navbar-expand-sm > .container,
.navbar-expand-sm > .container-fluid {
padding-right: 0;
padding-left: 0; } }
@media (min-width: 576px) {
.navbar-expand-sm {
flex-flow: row nowrap;
justify-content: flex-start; }
.navbar-expand-sm .navbar-nav {
flex-direction: row; }
.navbar-expand-sm .navbar-nav .dropdown-menu {
position: absolute; }
.navbar-expand-sm .navbar-nav .dropdown-menu-right {
right: 0;
left: auto; }
.navbar-expand-sm .navbar-nav .nav-link {
padding-right: 0.5rem;
padding-left: 0.5rem; }
.navbar-expand-sm > .container,
.navbar-expand-sm > .container-fluid {
flex-wrap: nowrap; }
.navbar-expand-sm .navbar-collapse {
display: flex !important;
flex-basis: auto; }
.navbar-expand-sm .navbar-toggler {
display: none; }
.navbar-expand-sm .dropup .dropdown-menu {
top: auto;
bottom: 100%; } }
@media (max-width: 767.98px) {
.navbar-expand-md > .container,
.navbar-expand-md > .container-fluid {
padding-right: 0;
padding-left: 0; } }
@media (min-width: 768px) {
.navbar-expand-md {
flex-flow: row nowrap;
justify-content: flex-start; }
.navbar-expand-md .navbar-nav {
flex-direction: row; }
.navbar-expand-md .navbar-nav .dropdown-menu {
position: absolute; }
.navbar-expand-md .navbar-nav .dropdown-menu-right {
right: 0;
left: auto; }
.navbar-expand-md .navbar-nav .nav-link {
padding-right: 0.5rem;
padding-left: 0.5rem; }
.navbar-expand-md > .container,
.navbar-expand-md > .container-fluid {
flex-wrap: nowrap; }
.navbar-expand-md .navbar-collapse {
display: flex !important;
flex-basis: auto; }
.navbar-expand-md .navbar-toggler {
display: none; }
.navbar-expand-md .dropup .dropdown-menu {
top: auto;
bottom: 100%; } }
@media (max-width: 991.98px) {
.navbar-expand-lg > .container,
.navbar-expand-lg > .container-fluid {
padding-right: 0;
padding-left: 0; } }
@media (min-width: 992px) {
.navbar-expand-lg {
flex-flow: row nowrap;
justify-content: flex-start; }
.navbar-expand-lg .navbar-nav {
flex-direction: row; }
.navbar-expand-lg .navbar-nav .dropdown-menu {
position: absolute; }
.navbar-expand-lg .navbar-nav .dropdown-menu-right {
right: 0;
left: auto; }
.navbar-expand-lg .navbar-nav .nav-link {
padding-right: 0.5rem;
padding-left: 0.5rem; }
.navbar-expand-lg > .container,
.navbar-expand-lg > .container-fluid {
flex-wrap: nowrap; }
.navbar-expand-lg .navbar-collapse {
display: flex !important;
flex-basis: auto; }
.navbar-expand-lg .navbar-toggler {
display: none; }
.navbar-expand-lg .dropup .dropdown-menu {
top: auto;
bottom: 100%; } }
@media (max-width: 1199.98px) {
.navbar-expand-xl > .container,
.navbar-expand-xl > .container-fluid {
padding-right: 0;
padding-left: 0; } }
@media (min-width: 1200px) {
.navbar-expand-xl {
flex-flow: row nowrap;
justify-content: flex-start; }
.navbar-expand-xl .navbar-nav {
flex-direction: row; }
.navbar-expand-xl .navbar-nav .dropdown-menu {
position: absolute; }
.navbar-expand-xl .navbar-nav .dropdown-menu-right {
right: 0;
left: auto; }
.navbar-expand-xl .navbar-nav .nav-link {
padding-right: 0.5rem;
padding-left: 0.5rem; }
.navbar-expand-xl > .container,
.navbar-expand-xl > .container-fluid {
flex-wrap: nowrap; }
.navbar-expand-xl .navbar-collapse {
display: flex !important;
flex-basis: auto; }
.navbar-expand-xl .navbar-toggler {
display: none; }
.navbar-expand-xl .dropup .dropdown-menu {
top: auto;
bottom: 100%; } }
.navbar-expand {
flex-flow: row nowrap;
justify-content: flex-start; }
.navbar-expand > .container,
.navbar-expand > .container-fluid {
padding-right: 0;
padding-left: 0; }
.navbar-expand .navbar-nav {
flex-direction: row; }
.navbar-expand .navbar-nav .dropdown-menu {
position: absolute; }
.navbar-expand .navbar-nav .dropdown-menu-right {
right: 0;
left: auto; }
.navbar-expand .navbar-nav .nav-link {
padding-right: 0.5rem;
padding-left: 0.5rem; }
.navbar-expand > .container,
.navbar-expand > .container-fluid {
flex-wrap: nowrap; }
.navbar-expand .navbar-collapse {
display: flex !important;
flex-basis: auto; }
.navbar-expand .navbar-toggler {
display: none; }
.navbar-expand .dropup .dropdown-menu {
top: auto;
bottom: 100%; }
.navbar-light .navbar-brand {
color: rgba(0, 0, 0, 0.9); }
.navbar-light .navbar-brand:hover, .navbar-light .navbar-brand:focus {
color: rgba(0, 0, 0, 0.9); }
.navbar-light .navbar-nav .nav-link {
color: rgba(0, 0, 0, 0.5); }
.navbar-light .navbar-nav .nav-link:hover, .navbar-light .navbar-nav .nav-link:focus {
color: rgba(0, 0, 0, 0.7); }
.navbar-light .navbar-nav .nav-link.disabled {
color: rgba(0, 0, 0, 0.3); }
.navbar-light .navbar-nav .show > .nav-link,
.navbar-light .navbar-nav .active > .nav-link,
.navbar-light .navbar-nav .nav-link.show,
.navbar-light .navbar-nav .nav-link.active {
color: rgba(0, 0, 0, 0.9); }
.navbar-light .navbar-toggler {
color: rgba(0, 0, 0, 0.5);
border-color: rgba(0, 0, 0, 0.1); }
.navbar-light .navbar-toggler-icon {
background-image: url("data:image/svg+xml;charset=utf8,%3Csvg viewBox='0 0 30 30' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath stroke='rgba(0, 0, 0, 0.5)' stroke-width='2' stroke-linecap='round' stroke-miterlimit='10' d='M4 7h22M4 15h22M4 23h22'/%3E%3C/svg%3E"); }
.navbar-light .navbar-text {
color: rgba(0, 0, 0, 0.5); }
.navbar-light .navbar-text a {
color: rgba(0, 0, 0, 0.9); }
.navbar-light .navbar-text a:hover, .navbar-light .navbar-text a:focus {
color: rgba(0, 0, 0, 0.9); }
.navbar-dark .navbar-brand {
color: #fff; }
.navbar-dark .navbar-brand:hover, .navbar-dark .navbar-brand:focus {
color: #fff; }
.navbar-dark .navbar-nav .nav-link {
color: rgba(255, 255, 255, 0.5); }
.navbar-dark .navbar-nav .nav-link:hover, .navbar-dark .navbar-nav .nav-link:focus {
color: rgba(255, 255, 255, 0.75); }
.navbar-dark .navbar-nav .nav-link.disabled {
color: rgba(255, 255, 255, 0.25); }
.navbar-dark .navbar-nav .show > .nav-link,
.navbar-dark .navbar-nav .active > .nav-link,
.navbar-dark .navbar-nav .nav-link.show,
.navbar-dark .navbar-nav .nav-link.active {
color: #fff; }
.navbar-dark .navbar-toggler {
color: rgba(255, 255, 255, 0.5);
border-color: rgba(255, 255, 255, 0.1); }
.navbar-dark .navbar-toggler-icon {
background-image: url("data:image/svg+xml;charset=utf8,%3Csvg viewBox='0 0 30 30' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath stroke='rgba(255, 255, 255, 0.5)' stroke-width='2' stroke-linecap='round' stroke-miterlimit='10' d='M4 7h22M4 15h22M4 23h22'/%3E%3C/svg%3E"); }
.navbar-dark .navbar-text {
color: rgba(255, 255, 255, 0.5); }
.navbar-dark .navbar-text a {
color: #fff; }
.navbar-dark .navbar-text a:hover, .navbar-dark .navbar-text a:focus {
color: #fff; }
.card {
position: relative;
display: flex;
flex-direction: column;
min-width: 0;
word-wrap: break-word;
background-color: #fff;
background-clip: border-box;
border: 1px solid rgba(0, 0, 0, 0.125);
border-radius: 0px; }
.card > hr {
margin-right: 0;
margin-left: 0; }
.card > .list-group:first-child .list-group-item:first-child {
border-top-left-radius: 0px;
border-top-right-radius: 0px; }
.card > .list-group:last-child .list-group-item:last-child {
border-bottom-right-radius: 0px;
border-bottom-left-radius: 0px; }
.card-body {
flex: 1 1 auto;
padding: 1.25rem; }
.card-title {
margin-bottom: 0.75rem; }
.card-subtitle {
margin-top: -0.375rem;
margin-bottom: 0; }
.card-text:last-child {
margin-bottom: 0; }
.card-link:hover {
text-decoration: none; }
.card-link + .card-link {
margin-left: 1.25rem; }
.card-header {
padding: 0.75rem 1.25rem;
margin-bottom: 0;
background-color: rgba(0, 0, 0, 0.03);
border-bottom: 1px solid rgba(0, 0, 0, 0.125); }
.card-header:first-child {
border-radius: calc(0px - 1px) calc(0px - 1px) 0 0; }
.card-header + .list-group .list-group-item:first-child {
border-top: 0; }
.card-footer {
padding: 0.75rem 1.25rem;
background-color: rgba(0, 0, 0, 0.03);
border-top: 1px solid rgba(0, 0, 0, 0.125); }
.card-footer:last-child {
border-radius: 0 0 calc(0px - 1px) calc(0px - 1px); }
.card-header-tabs {
margin-right: -0.625rem;
margin-bottom: -0.75rem;
margin-left: -0.625rem;
border-bottom: 0; }
.card-header-pills {
margin-right: -0.625rem;
margin-left: -0.625rem; }
.card-img-overlay {
position: absolute;
top: 0;
right: 0;
bottom: 0;
left: 0;
padding: 1.25rem; }
.card-img {
width: 100%;
border-radius: calc(0px - 1px); }
.card-img-top {
width: 100%;
border-top-left-radius: calc(0px - 1px);
border-top-right-radius: calc(0px - 1px); }
.card-img-bottom {
width: 100%;
border-bottom-right-radius: calc(0px - 1px);
border-bottom-left-radius: calc(0px - 1px); }
.card-deck {
display: flex;
flex-direction: column; }
.card-deck .card {
margin-bottom: 15px; }
@media (min-width: 576px) {
.card-deck {
flex-flow: row wrap;
margin-right: -15px;
margin-left: -15px; }
.card-deck .card {
display: flex;
flex: 1 0 0%;
flex-direction: column;
margin-right: 15px;
margin-bottom: 0;
margin-left: 15px; } }
.card-group {
display: flex;
flex-direction: column; }
.card-group > .card {
margin-bottom: 15px; }
@media (min-width: 576px) {
.card-group {
flex-flow: row wrap; }
.card-group > .card {
flex: 1 0 0%;
margin-bottom: 0; }
.card-group > .card + .card {
margin-left: 0;
border-left: 0; }
.card-group > .card:first-child {
border-top-right-radius: 0;
border-bottom-right-radius: 0; }
.card-group > .card:first-child .card-img-top,
.card-group > .card:first-child .card-header {
border-top-right-radius: 0; }
.card-group > .card:first-child .card-img-bottom,
.card-group > .card:first-child .card-footer {
border-bottom-right-radius: 0; }
.card-group > .card:last-child {
border-top-left-radius: 0;
border-bottom-left-radius: 0; }
.card-group > .card:last-child .card-img-top,
.card-group > .card:last-child .card-header {
border-top-left-radius: 0; }
.card-group > .card:last-child .card-img-bottom,
.card-group > .card:last-child .card-footer {
border-bottom-left-radius: 0; }
.card-group > .card:only-child {
border-radius: 0px; }
.card-group > .card:only-child .card-img-top,
.card-group > .card:only-child .card-header {
border-top-left-radius: 0px;
border-top-right-radius: 0px; }
.card-group > .card:only-child .card-img-bottom,
.card-group > .card:only-child .card-footer {
border-bottom-right-radius: 0px;
border-bottom-left-radius: 0px; }
.card-group > .card:not(:first-child):not(:last-child):not(:only-child) {
border-radius: 0; }
.card-group > .card:not(:first-child):not(:last-child):not(:only-child) .card-img-top,
.card-group > .card:not(:first-child):not(:last-child):not(:only-child) .card-img-bottom,
.card-group > .card:not(:first-child):not(:last-child):not(:only-child) .card-header,
.card-group > .card:not(:first-child):not(:last-child):not(:only-child) .card-footer {
border-radius: 0; } }
.card-columns .card {
margin-bottom: 0.75rem; }
@media (min-width: 576px) {
.card-columns {
column-count: 3;
column-gap: 1.25rem; }
.card-columns .card {
display: inline-block;
width: 100%; } }
.breadcrumb {
display: flex;
flex-wrap: wrap;
padding: 0.75rem 1rem;
margin-bottom: 1rem;
list-style: none;
background-color: #e9ecef;
border-radius: 0px; }
.breadcrumb-item + .breadcrumb-item::before {
display: inline-block;
padding-right: 0.5rem;
padding-left: 0.5rem;
color: #6c757d;
content: "/"; }
.breadcrumb-item + .breadcrumb-item:hover::before {
text-decoration: underline; }
.breadcrumb-item + .breadcrumb-item:hover::before {
text-decoration: none; }
.breadcrumb-item.active {
color: #6c757d; }
.pagination {
display: flex;
padding-left: 0;
list-style: none;
border-radius: 0px; }
.page-link {
position: relative;
display: block;
padding: 0.5rem 0.75rem;
margin-left: -1px;
line-height: 1.25;
color: #512479;
background-color: #fff;
border: 1px solid #dee2e6; }
.page-link:hover {
color: #523047;
text-decoration: none;
background-color: #e9ecef;
border-color: #dee2e6; }
.page-link:focus {
z-index: 2;
outline: 0;
box-shadow: 0 0 0 0.2rem rgba(51, 51, 51, 0.25); }
.page-link:not(:disabled):not(.disabled) {
cursor: pointer; }
.page-item:first-child .page-link {
margin-left: 0;
border-top-left-radius: 0px;
border-bottom-left-radius: 0px; }
.page-item:last-child .page-link {
border-top-right-radius: 0px;
border-bottom-right-radius: 0px; }
.page-item.active .page-link {
z-index: 1;
color: #fff;
background-color: #333;
border-color: #333; }
.page-item.disabled .page-link {
color: #6c757d;
pointer-events: none;
cursor: auto;
background-color: #fff;
border-color: #dee2e6; }
.pagination-lg .page-link {
padding: 0.75rem 1.5rem;
font-size: 1.25rem;
line-height: 1.5; }
.pagination-lg .page-item:first-child .page-link {
border-top-left-radius: 0px;
border-bottom-left-radius: 0px; }
.pagination-lg .page-item:last-child .page-link {
border-top-right-radius: 0px;
border-bottom-right-radius: 0px; }
.pagination-sm .page-link {
padding: 0.25rem 0.5rem;
font-size: 0.875rem;
line-height: 1.5; }
.pagination-sm .page-item:first-child .page-link {
border-top-left-radius: 0px;
border-bottom-left-radius: 0px; }
.pagination-sm .page-item:last-child .page-link {
border-top-right-radius: 0px;
border-bottom-right-radius: 0px; }
.badge {
display: inline-block;
padding: 0.25em 0.4em;
font-size: 75%;
font-weight: 700;
line-height: 1;
text-align: center;
white-space: nowrap;
vertical-align: baseline;
border-radius: 0px; }
.badge:empty {
display: none; }
.btn .badge {
position: relative;
top: -1px; }
.badge-pill {
padding-right: 0.6em;
padding-left: 0.6em;
border-radius: 10rem; }
.badge-primary {
color: #fff;
background-color: #512479; }
.badge-primary[href]:hover, .badge-primary[href]:focus {
color: #fff;
text-decoration: none;
background-color: #49075e; }
.badge-secondary {
color: #fff;
background-color: #333; }
.badge-secondary[href]:hover, .badge-secondary[href]:focus {
color: #fff;
text-decoration: none;
background-color: #1a1a1a; }
.badge-success {
color: #fff;
background-color: #28a745; }
.badge-success[href]:hover, .badge-success[href]:focus {
color: #fff;
text-decoration: none;
background-color: #1e7e34; }
.badge-info {
color: #fff;
background-color: #17a2b8; }
.badge-info[href]:hover, .badge-info[href]:focus {
color: #fff;
text-decoration: none;
background-color: #117a8b; }
.badge-warning {
color: #212529;
background-color: #ffc107; }
.badge-warning[href]:hover, .badge-warning[href]:focus {
color: #212529;
text-decoration: none;
background-color: #d39e00; }
.badge-danger {
color: #fff;
background-color: #dc3545; }
.badge-danger[href]:hover, .badge-danger[href]:focus {
color: #fff;
text-decoration: none;
background-color: #bd2130; }
.badge-light {
color: #212529;
background-color: #f8f9fa; }
.badge-light[href]:hover, .badge-light[href]:focus {
color: #212529;
text-decoration: none;
background-color: #dae0e5; }
.badge-dark {
color: #fff;
background-color: #343a40; }
.badge-dark[href]:hover, .badge-dark[href]:focus {
color: #fff;
text-decoration: none;
background-color: #1d2124; }
.jumbotron {
padding: 2rem 1rem;
margin-bottom: 2rem;
background-color: #e9ecef;
border-radius: 0px; }
@media (min-width: 576px) {
.jumbotron {
padding: 4rem 2rem; } }
.jumbotron-fluid {
padding-right: 0;
padding-left: 0;
border-radius: 0; }
.alert {
position: relative;
padding: 0.75rem 1.25rem;
margin-bottom: 1rem;
border: 1px solid transparent;
border-radius: 0px; }
.alert-heading {
color: inherit; }
.alert-link {
font-weight: 700; }
.alert-dismissible {
padding-right: 4rem; }
.alert-dismissible .close {
position: absolute;
top: 0;
right: 0;
padding: 0.75rem 1.25rem;
color: inherit; }
.alert-primary {
color: #44283b;
background-color: #e6dbe3;
border-color: #dccdd7; }
.alert-primary hr {
border-top-color: #d2becb; }
.alert-primary .alert-link {
color: #24151f; }
.alert-secondary {
color: #1b1b1b;
background-color: #d6d6d6;
border-color: #c6c6c6; }
.alert-secondary hr {
border-top-color: #b9b9b9; }
.alert-secondary .alert-link {
color: #020202; }
.alert-success {
color: #155724;
background-color: #d4edda;
border-color: #c3e6cb; }
.alert-success hr {
border-top-color: #b1dfbb; }
.alert-success .alert-link {
color: #0b2e13; }
.alert-info {
color: #0c5460;
background-color: #d1ecf1;
border-color: #bee5eb; }
.alert-info hr {
border-top-color: #abdde5; }
.alert-info .alert-link {
color: #062c33; }
.alert-warning {
color: #856404;
background-color: #fff3cd;
border-color: #ffeeba; }
.alert-warning hr {
border-top-color: #ffe8a1; }
.alert-warning .alert-link {
color: #533f03; }
.alert-danger {
color: #721c24;
background-color: #f8d7da;
border-color: #f5c6cb; }
.alert-danger hr {
border-top-color: #f1b0b7; }
.alert-danger .alert-link {
color: #491217; }
.alert-light {
color: #818182;
background-color: #fefefe;
border-color: #fdfdfe; }
.alert-light hr {
border-top-color: #ececf6; }
.alert-light .alert-link {
color: #686868; }
.alert-dark {
color: #1b1e21;
background-color: #d6d8d9;
border-color: #c6c8ca; }
.alert-dark hr {
border-top-color: #b9bbbe; }
.alert-dark .alert-link {
color: #040505; }
@keyframes progress-bar-stripes {
from {
background-position: 1rem 0; }
to {
background-position: 0 0; } }
.progress {
display: flex;
height: 1rem;
overflow: hidden;
font-size: 0.75rem;
background-color: #e9ecef;
border-radius: 0px; }
.progress-bar {
display: flex;
flex-direction: column;
justify-content: center;
color: #fff;
text-align: center;
background-color: #512479;
transition: width 0.6s ease; }
.progress-bar-striped {
background-image: linear-gradient(45deg, rgba(255, 255, 255, 0.15) 25%, transparent 25%, transparent 50%, rgba(255, 255, 255, 0.15) 50%, rgba(255, 255, 255, 0.15) 75%, transparent 75%, transparent);
background-size: 1rem 1rem; }
.progress-bar-animated {
animation: progress-bar-stripes 1s linear infinite; }
.media {
display: flex;
align-items: flex-start; }
.media-body {
flex: 1; }
.list-group {
display: flex;
flex-direction: column;
padding-left: 0;
margin-bottom: 0; }
.list-group-item-action {
width: 100%;
color: #495057;
text-align: inherit; }
.list-group-item-action:hover, .list-group-item-action:focus {
color: #495057;
text-decoration: none;
background-color: #f8f9fa; }
.list-group-item-action:active {
color: #999;
background-color: #e9ecef; }
.list-group-item {
position: relative;
display: block;
padding: 0.75rem 1.25rem;
margin-bottom: -1px;
background-color: #fff;
border: 1px solid rgba(0, 0, 0, 0.125); }
.list-group-item:first-child {
border-top-left-radius: 0px;
border-top-right-radius: 0px; }
.list-group-item:last-child {
margin-bottom: 0;
border-bottom-right-radius: 0px;
border-bottom-left-radius: 0px; }
.list-group-item:hover, .list-group-item:focus {
z-index: 1;
text-decoration: none; }
.list-group-item.disabled, .list-group-item:disabled {
color: #6c757d;
background-color: #fff; }
.list-group-item.active {
z-index: 2;
color: #fff;
background-color: #333;
border-color: #333; }
.list-group-flush .list-group-item {
border-right: 0;
border-left: 0;
border-radius: 0; }
.list-group-flush:first-child .list-group-item:first-child {
border-top: 0; }
.list-group-flush:last-child .list-group-item:last-child {
border-bottom: 0; }
.list-group-item-primary {
color: #44283b;
background-color: #dccdd7; }
.list-group-item-primary.list-group-item-action:hover, .list-group-item-primary.list-group-item-action:focus {
color: #44283b;
background-color: #d2becb; }
.list-group-item-primary.list-group-item-action.active {
color: #fff;
background-color: #44283b;
border-color: #44283b; }
.list-group-item-secondary {
color: #1b1b1b;
background-color: #c6c6c6; }
.list-group-item-secondary.list-group-item-action:hover, .list-group-item-secondary.list-group-item-action:focus {
color: #1b1b1b;
background-color: #b9b9b9; }
.list-group-item-secondary.list-group-item-action.active {
color: #fff;
background-color: #1b1b1b;
border-color: #1b1b1b; }
.list-group-item-success {
color: #155724;
background-color: #c3e6cb; }
.list-group-item-success.list-group-item-action:hover, .list-group-item-success.list-group-item-action:focus {
color: #155724;
background-color: #b1dfbb; }
.list-group-item-success.list-group-item-action.active {
color: #fff;
background-color: #155724;
border-color: #155724; }
.list-group-item-info {
color: #0c5460;
background-color: #bee5eb; }
.list-group-item-info.list-group-item-action:hover, .list-group-item-info.list-group-item-action:focus {
color: #0c5460;
background-color: #abdde5; }
.list-group-item-info.list-group-item-action.active {
color: #fff;
background-color: #0c5460;
border-color: #0c5460; }
.list-group-item-warning {
color: #856404;
background-color: #ffeeba; }
.list-group-item-warning.list-group-item-action:hover, .list-group-item-warning.list-group-item-action:focus {
color: #856404;
background-color: #ffe8a1; }
.list-group-item-warning.list-group-item-action.active {
color: #fff;
background-color: #856404;
border-color: #856404; }
.list-group-item-danger {
color: #721c24;
background-color: #f5c6cb; }
.list-group-item-danger.list-group-item-action:hover, .list-group-item-danger.list-group-item-action:focus {
color: #721c24;
background-color: #f1b0b7; }
.list-group-item-danger.list-group-item-action.active {
color: #fff;
background-color: #721c24;
border-color: #721c24; }
.list-group-item-light {
color: #818182;
background-color: #fdfdfe; }
.list-group-item-light.list-group-item-action:hover, .list-group-item-light.list-group-item-action:focus {
color: #818182;
background-color: #ececf6; }
.list-group-item-light.list-group-item-action.active {
color: #fff;
background-color: #818182;
border-color: #818182; }
.list-group-item-dark {
color: #1b1e21;
background-color: #c6c8ca; }
.list-group-item-dark.list-group-item-action:hover, .list-group-item-dark.list-group-item-action:focus {
color: #1b1e21;
background-color: #b9bbbe; }
.list-group-item-dark.list-group-item-action.active {
color: #fff;
background-color: #1b1e21;
border-color: #1b1e21; }
.close {
float: right;
font-size: 1.5rem;
font-weight: 700;
line-height: 1;
color: #000;
text-shadow: 0 1px 0 #fff;
opacity: .5; }
.close:hover, .close:focus {
color: #000;
text-decoration: none;
opacity: .75; }
.close:not(:disabled):not(.disabled) {
cursor: pointer; }
button.close {
padding: 0;
background-color: transparent;
border: 0;
-webkit-appearance: none; }
.modal-open {
overflow: hidden; }
.modal {
position: fixed;
top: 0;
right: 0;
bottom: 0;
left: 0;
z-index: 1050;
display: none;
overflow: hidden;
outline: 0; }
.modal-open .modal {
overflow-x: hidden;
overflow-y: auto; }
.modal-dialog {
position: relative;
width: auto;
margin: 0.5rem;
pointer-events: none; }
.modal.fade .modal-dialog {
transition: transform 0.3s ease-out;
transform: translate(0, -25%); }
.modal.show .modal-dialog {
transform: translate(0, 0); }
.modal-dialog-centered {
display: flex;
align-items: center;
min-height: calc(100% - (0.5rem * 2)); }
.modal-content {
position: relative;
display: flex;
flex-direction: column;
width: 100%;
pointer-events: auto;
background-color: #fff;
background-clip: padding-box;
border: 1px solid rgba(0, 0, 0, 0.2);
border-radius: 0px;
outline: 0; }
.modal-backdrop {
position: fixed;
top: 0;
right: 0;
bottom: 0;
left: 0;
z-index: 1040;
background-color: #000; }
.modal-backdrop.fade {
opacity: 0; }
.modal-backdrop.show {
opacity: 0.5; }
.modal-header {
display: flex;
align-items: flex-start;
justify-content: space-between;
padding: 1rem;
border-bottom: 1px solid #e9ecef;
border-top-left-radius: 0px;
border-top-right-radius: 0px; }
.modal-header .close {
padding: 1rem;
margin: -1rem -1rem -1rem auto; }
.modal-title {
margin-bottom: 0;
line-height: 2.4; }
.modal-body {
position: relative;
flex: 1 1 auto;
padding: 1rem; }
.modal-footer {
display: flex;
align-items: center;
justify-content: flex-end;
padding: 1rem;
border-top: 1px solid #e9ecef; }
.modal-footer > :not(:first-child) {
margin-left: .25rem; }
.modal-footer > :not(:last-child) {
margin-right: .25rem; }
.modal-scrollbar-measure {
position: absolute;
top: -9999px;
width: 50px;
height: 50px;
overflow: scroll; }
@media (min-width: 576px) {
.modal-dialog {
max-width: 500px;
margin: 1.75rem auto; }
.modal-dialog-centered {
min-height: calc(100% - (1.75rem * 2)); }
.modal-sm {
max-width: 300px; } }
@media (min-width: 992px) {
.modal-lg {
max-width: 800px; } }
.tooltip {
position: absolute;
z-index: 1070;
display: block;
margin: 0;
font-family: "Poppins", sans-serif;
font-style: normal;
font-weight: 400;
line-height: 2.4;
text-align: left;
text-align: start;
text-decoration: none;
text-shadow: none;
text-transform: none;
letter-spacing: normal;
word-break: normal;
word-spacing: normal;
white-space: normal;
line-break: auto;
font-size: 0.875rem;
word-wrap: break-word;
opacity: 0; }
.tooltip.show {
opacity: 0.9; }
.tooltip .arrow {
position: absolute;
display: block;
width: 0.8rem;
height: 0.4rem; }
.tooltip .arrow::before {
position: absolute;
content: "";
border-color: transparent;
border-style: solid; }
.bs-tooltip-top, .bs-tooltip-auto[x-placement^="top"] {
padding: 0.4rem 0; }
.bs-tooltip-top .arrow, .bs-tooltip-auto[x-placement^="top"] .arrow {
bottom: 0; }
.bs-tooltip-top .arrow::before, .bs-tooltip-auto[x-placement^="top"] .arrow::before {
top: 0;
border-width: 0.4rem 0.4rem 0;
border-top-color: #000; }
.bs-tooltip-right, .bs-tooltip-auto[x-placement^="right"] {
padding: 0 0.4rem; }
.bs-tooltip-right .arrow, .bs-tooltip-auto[x-placement^="right"] .arrow {
left: 0;
width: 0.4rem;
height: 0.8rem; }
.bs-tooltip-right .arrow::before, .bs-tooltip-auto[x-placement^="right"] .arrow::before {
right: 0;
border-width: 0.4rem 0.4rem 0.4rem 0;
border-right-color: #000; }
.bs-tooltip-bottom, .bs-tooltip-auto[x-placement^="bottom"] {
padding: 0.4rem 0; }
.bs-tooltip-bottom .arrow, .bs-tooltip-auto[x-placement^="bottom"] .arrow {
top: 0; }
.bs-tooltip-bottom .arrow::before, .bs-tooltip-auto[x-placement^="bottom"] .arrow::before {
bottom: 0;
border-width: 0 0.4rem 0.4rem;
border-bottom-color: #000; }
.bs-tooltip-left, .bs-tooltip-auto[x-placement^="left"] {
padding: 0 0.4rem; }
.bs-tooltip-left .arrow, .bs-tooltip-auto[x-placement^="left"] .arrow {
right: 0;
width: 0.4rem;
height: 0.8rem; }
.bs-tooltip-left .arrow::before, .bs-tooltip-auto[x-placement^="left"] .arrow::before {
left: 0;
border-width: 0.4rem 0 0.4rem 0.4rem;
border-left-color: #000; }
.tooltip-inner {
max-width: 200px;
padding: 0.25rem 0.5rem;
color: #fff;
text-align: center;
background-color: #000;
border-radius: 0px; }
.popover {
position: absolute;
top: 0;
left: 0;
z-index: 1060;
display: block;
max-width: 276px;
font-family: "Poppins", sans-serif;
font-style: normal;
font-weight: 400;
line-height: 2.4;
text-align: left;
text-align: start;
text-decoration: none;
text-shadow: none;
text-transform: none;
letter-spacing: normal;
word-break: normal;
word-spacing: normal;
white-space: normal;
line-break: auto;
font-size: 0.875rem;
word-wrap: break-word;
background-color: #fff;
background-clip: padding-box;
border: 1px solid rgba(0, 0, 0, 0.2);
border-radius: 0px; }
.popover .arrow {
position: absolute;
display: block;
width: 1rem;
height: 0.5rem;
margin: 0 0px; }
.popover .arrow::before, .popover .arrow::after {
position: absolute;
display: block;
content: "";
border-color: transparent;
border-style: solid; }
.bs-popover-top, .bs-popover-auto[x-placement^="top"] {
margin-bottom: 0.5rem; }
.bs-popover-top .arrow, .bs-popover-auto[x-placement^="top"] .arrow {
bottom: calc((0.5rem + 1px) * -1); }
.bs-popover-top .arrow::before, .bs-popover-auto[x-placement^="top"] .arrow::before,
.bs-popover-top .arrow::after,
.bs-popover-auto[x-placement^="top"] .arrow::after {
border-width: 0.5rem 0.5rem 0; }
.bs-popover-top .arrow::before, .bs-popover-auto[x-placement^="top"] .arrow::before {
bottom: 0;
border-top-color: rgba(0, 0, 0, 0.25); }
.bs-popover-top .arrow::after, .bs-popover-auto[x-placement^="top"] .arrow::after {
bottom: 1px;
border-top-color: #fff; }
.bs-popover-right, .bs-popover-auto[x-placement^="right"] {
margin-left: 0.5rem; }
.bs-popover-right .arrow, .bs-popover-auto[x-placement^="right"] .arrow {
left: calc((0.5rem + 1px) * -1);
width: 0.5rem;
height: 1rem;
margin: 0px 0; }
.bs-popover-right .arrow::before, .bs-popover-auto[x-placement^="right"] .arrow::before,
.bs-popover-right .arrow::after,
.bs-popover-auto[x-placement^="right"] .arrow::after {
border-width: 0.5rem 0.5rem 0.5rem 0; }
.bs-popover-right .arrow::before, .bs-popover-auto[x-placement^="right"] .arrow::before {
left: 0;
border-right-color: rgba(0, 0, 0, 0.25); }
.bs-popover-right .arrow::after, .bs-popover-auto[x-placement^="right"] .arrow::after {
left: 1px;
border-right-color: #fff; }
.bs-popover-bottom, .bs-popover-auto[x-placement^="bottom"] {
margin-top: 0.5rem; }
.bs-popover-bottom .arrow, .bs-popover-auto[x-placement^="bottom"] .arrow {
top: calc((0.5rem + 1px) * -1); }
.bs-popover-bottom .arrow::before, .bs-popover-auto[x-placement^="bottom"] .arrow::before,
.bs-popover-bottom .arrow::after,
.bs-popover-auto[x-placement^="bottom"] .arrow::after {
border-width: 0 0.5rem 0.5rem 0.5rem; }
.bs-popover-bottom .arrow::before, .bs-popover-auto[x-placement^="bottom"] .arrow::before {
top: 0;
border-bottom-color: rgba(0, 0, 0, 0.25); }
.bs-popover-bottom .arrow::after, .bs-popover-auto[x-placement^="bottom"] .arrow::after {
top: 1px;
border-bottom-color: #fff; }
.bs-popover-bottom .popover-header::before, .bs-popover-auto[x-placement^="bottom"] .popover-header::before {
position: absolute;
top: 0;
left: 50%;
display: block;
width: 1rem;
margin-left: -0.5rem;
content: "";
border-bottom: 1px solid #f7f7f7; }
.bs-popover-left, .bs-popover-auto[x-placement^="left"] {
margin-right: 0.5rem; }
.bs-popover-left .arrow, .bs-popover-auto[x-placement^="left"] .arrow {
right: calc((0.5rem + 1px) * -1);
width: 0.5rem;
height: 1rem;
margin: 0px 0; }
.bs-popover-left .arrow::before, .bs-popover-auto[x-placement^="left"] .arrow::before,
.bs-popover-left .arrow::after,
.bs-popover-auto[x-placement^="left"] .arrow::after {
border-width: 0.5rem 0 0.5rem 0.5rem; }
.bs-popover-left .arrow::before, .bs-popover-auto[x-placement^="left"] .arrow::before {
right: 0;
border-left-color: rgba(0, 0, 0, 0.25); }
.bs-popover-left .arrow::after, .bs-popover-auto[x-placement^="left"] .arrow::after {
right: 1px;
border-left-color: #fff; }
.popover-header {
padding: 0.5rem 0.75rem;
margin-bottom: 0;
font-size: 1rem;
color: inherit;
background-color: #f7f7f7;
border-bottom: 1px solid #ebebeb;
border-top-left-radius: calc(0px - 1px);
border-top-right-radius: calc(0px - 1px); }
.popover-header:empty {
display: none; }
.popover-body {
padding: 0.5rem 0.75rem;
color: #999; }
.carousel {
position: relative; }
.carousel-inner {
position: relative;
width: 100%;
overflow: hidden; }
.carousel-item {
position: relative;
display: none;
align-items: center;
width: 100%;
transition: transform 0.6s ease;
backface-visibility: hidden;
perspective: 1000px; }
.carousel-item.active,
.carousel-item-next,
.carousel-item-prev {
display: block; }
.carousel-item-next,
.carousel-item-prev {
position: absolute;
top: 0; }
.carousel-item-next.carousel-item-left,
.carousel-item-prev.carousel-item-right {
transform: translateX(0); }
@supports (transform-style: preserve-3d) {
.carousel-item-next.carousel-item-left,
.carousel-item-prev.carousel-item-right {
transform: translate3d(0, 0, 0); } }
.carousel-item-next,
.active.carousel-item-right {
transform: translateX(100%); }
@supports (transform-style: preserve-3d) {
.carousel-item-next,
.active.carousel-item-right {
transform: translate3d(100%, 0, 0); } }
.carousel-item-prev,
.active.carousel-item-left {
transform: translateX(-100%); }
@supports (transform-style: preserve-3d) {
.carousel-item-prev,
.active.carousel-item-left {
transform: translate3d(-100%, 0, 0); } }
.carousel-control-prev,
.carousel-control-next {
position: absolute;
top: 0;
bottom: 0;
display: flex;
align-items: center;
justify-content: center;
width: 2%;
color: #512479;
text-align: center;
opacity: 1; }
.carousel-control-prev:hover, .carousel-control-prev:focus,
.carousel-control-next:hover,
.carousel-control-next:focus {
color: #512479;
text-decoration: none;
outline: 0;
opacity: .9; }
.carousel-control-prev {
left: 0; }
.carousel-control-next {
right: 0; }
.carousel-control-prev-icon,
.carousel-control-next-icon {
display: inline-block;
width: 20px;
height: 20px;
background: transparent no-repeat center center;
background-size: 100% 100%; }
.carousel-control-prev-icon {
background-image: url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' fill='%23512479' viewBox='0 0 8 8'%3E%3Cpath d='M5.25 0l-4 4 4 4 1.5-1.5-2.5-2.5 2.5-2.5-1.5-1.5z'/%3E%3C/svg%3E"); }
.carousel-control-next-icon {
background-image: url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' fill='%23512479' viewBox='0 0 8 8'%3E%3Cpath d='M2.75 0l-1.5 1.5 2.5 2.5-2.5 2.5 1.5 1.5 4-4-4-4z'/%3E%3C/svg%3E"); }
.carousel-indicators {
position: absolute;
right: 0;
bottom: 10px;
left: 0;
z-index: 15;
display: flex;
justify-content: center;
padding-left: 0;
margin-right: 2%;
margin-left: 2%;
list-style: none; }
.carousel-indicators li {
position: relative;
flex: 0 1 auto;
width: 30px;
height: 3px;
margin-right: 3px;
margin-left: 3px;
text-indent: -999px;
background-color: rgba(255, 255, 255, 0.5); }
.carousel-indicators li::before {
position: absolute;
top: -10px;
left: 0;
display: inline-block;
width: 100%;
height: 10px;
content: ""; }
.carousel-indicators li::after {
position: absolute;
bottom: -10px;
left: 0;
display: inline-block;
width: 100%;
height: 10px;
content: ""; }
.carousel-indicators .active {
background-color: #fff; }
.carousel-caption {
position: absolute;
right: 15%;
bottom: 20px;
left: 15%;
z-index: 10;
padding-top: 20px;
padding-bottom: 20px;
color: #fff;
text-align: center; }
.align-baseline {
vertical-align: baseline !important; }
.align-top {
vertical-align: top !important; }
.align-middle {
vertical-align: middle !important; }
.align-bottom {
vertical-align: bottom !important; }
.align-text-bottom {
vertical-align: text-bottom !important; }
.align-text-top {
vertical-align: text-top !important; }
.bg-primary {
background-color: #512479 !important; }
a.bg-primary:hover, a.bg-primary:focus,
button.bg-primary:hover,
button.bg-primary:focus {
background-color: #49075e !important; }
.bg-secondary {
background-color: #333 !important; }
a.bg-secondary:hover, a.bg-secondary:focus,
button.bg-secondary:hover,
button.bg-secondary:focus {
background-color: #1a1a1a !important; }
.bg-success {
background-color: #28a745 !important; }
a.bg-success:hover, a.bg-success:focus,
button.bg-success:hover,
button.bg-success:focus {
background-color: #1e7e34 !important; }
.bg-info {
background-color: #17a2b8 !important; }
a.bg-info:hover, a.bg-info:focus,
button.bg-info:hover,
button.bg-info:focus {
background-color: #117a8b !important; }
.bg-warning {
background-color: #ffc107 !important; }
a.bg-warning:hover, a.bg-warning:focus,
button.bg-warning:hover,
button.bg-warning:focus {
background-color: #d39e00 !important; }
.bg-danger {
background-color: #dc3545 !important; }
a.bg-danger:hover, a.bg-danger:focus,
button.bg-danger:hover,
button.bg-danger:focus {
background-color: #bd2130 !important; }
.bg-light {
background-color: #f8f9fa !important; }
a.bg-light:hover, a.bg-light:focus,
button.bg-light:hover,
button.bg-light:focus {
background-color: #dae0e5 !important; }
.bg-dark {
background-color: #343a40 !important; }
a.bg-dark:hover, a.bg-dark:focus,
button.bg-dark:hover,
button.bg-dark:focus {
background-color: #1d2124 !important; }
.bg-white {
background-color: #fff !important; }
.bg-transparent {
background-color: transparent !important; }
.border {
border: 1px solid #dee2e6 !important; }
.border-top {
border-top: 1px solid #dee2e6 !important; }
.border-right {
border-right: 1px solid #dee2e6 !important; }
.border-bottom {
border-bottom: 1px solid #dee2e6 !important; }
.border-left {
border-left: 1px solid #dee2e6 !important; }
.border-0 {
border: 0 !important; }
.border-top-0 {
border-top: 0 !important; }
.border-right-0 {
border-right: 0 !important; }
.border-bottom-0 {
border-bottom: 0 !important; }
.border-left-0 {
border-left: 0 !important; }
.border-primary {
border-color: #512479 !important; }
.border-secondary {
border-color: #333 !important; }
.border-success {
border-color: #28a745 !important; }
.border-info {
border-color: #17a2b8 !important; }
.border-warning {
border-color: #ffc107 !important; }
.border-danger {
border-color: #dc3545 !important; }
.border-light {
border-color: #f8f9fa !important; }
.border-dark {
border-color: #343a40 !important; }
.border-white {
border-color: #fff !important; }
.rounded {
border-radius: 0px !important; }
.rounded-top {
border-top-left-radius: 0px !important;
border-top-right-radius: 0px !important; }
.rounded-right {
border-top-right-radius: 0px !important;
border-bottom-right-radius: 0px !important; }
.rounded-bottom {
border-bottom-right-radius: 0px !important;
border-bottom-left-radius: 0px !important; }
.rounded-left {
border-top-left-radius: 0px !important;
border-bottom-left-radius: 0px !important; }
.rounded-circle {
border-radius: 50% !important; }
.rounded-0 {
border-radius: 0 !important; }
.clearfix::after {
display: block;
clear: both;
content: ""; }
.d-none {
display: none !important; }
.d-inline {
display: inline !important; }
.d-inline-block {
display: inline-block !important; }
.d-block {
display: block !important; }
.d-table {
display: table !important; }
.d-table-row {
display: table-row !important; }
.d-table-cell {
display: table-cell !important; }
.d-flex {
display: flex !important; }
.d-inline-flex {
display: inline-flex !important; }
@media (min-width: 576px) {
.d-sm-none {
display: none !important; }
.d-sm-inline {
display: inline !important; }
.d-sm-inline-block {
display: inline-block !important; }
.d-sm-block {
display: block !important; }
.d-sm-table {
display: table !important; }
.d-sm-table-row {
display: table-row !important; }
.d-sm-table-cell {
display: table-cell !important; }
.d-sm-flex {
display: flex !important; }
.d-sm-inline-flex {
display: inline-flex !important; } }
@media (min-width: 768px) {
.d-md-none {
display: none !important; }
.d-md-inline {
display: inline !important; }
.d-md-inline-block {
display: inline-block !important; }
.d-md-block {
display: block !important; }
.d-md-table {
display: table !important; }
.d-md-table-row {
display: table-row !important; }
.d-md-table-cell {
display: table-cell !important; }
.d-md-flex {
display: flex !important; }
.d-md-inline-flex {
display: inline-flex !important; } }
@media (min-width: 992px) {
.d-lg-none {
display: none !important; }
.d-lg-inline {
display: inline !important; }
.d-lg-inline-block {
display: inline-block !important; }
.d-lg-block {
display: block !important; }
.d-lg-table {
display: table !important; }
.d-lg-table-row {
display: table-row !important; }
.d-lg-table-cell {
display: table-cell !important; }
.d-lg-flex {
display: flex !important; }
.d-lg-inline-flex {
display: inline-flex !important; } }
@media (min-width: 1200px) {
.d-xl-none {
display: none !important; }
.d-xl-inline {
display: inline !important; }
.d-xl-inline-block {
display: inline-block !important; }
.d-xl-block {
display: block !important; }
.d-xl-table {
display: table !important; }
.d-xl-table-row {
display: table-row !important; }
.d-xl-table-cell {
display: table-cell !important; }
.d-xl-flex {
display: flex !important; }
.d-xl-inline-flex {
display: inline-flex !important; } }
@media print {
.d-print-none {
display: none !important; }
.d-print-inline {
display: inline !important; }
.d-print-inline-block {
display: inline-block !important; }
.d-print-block {
display: block !important; }
.d-print-table {
display: table !important; }
.d-print-table-row {
display: table-row !important; }
.d-print-table-cell {
display: table-cell !important; }
.d-print-flex {
display: flex !important; }
.d-print-inline-flex {
display: inline-flex !important; } }
.embed-responsive {
position: relative;
display: block;
width: 100%;
padding: 0;
overflow: hidden; }
.embed-responsive::before {
display: block;
content: ""; }
.embed-responsive .embed-responsive-item,
.embed-responsive iframe,
.embed-responsive embed,
.embed-responsive object,
.embed-responsive video {
position: absolute;
top: 0;
bottom: 0;
left: 0;
width: 100%;
height: 100%;
border: 0; }
.embed-responsive-21by9::before {
padding-top: 42.8571428571%; }
.embed-responsive-16by9::before {
padding-top: 56.25%; }
.embed-responsive-4by3::before {
padding-top: 75%; }
.embed-responsive-1by1::before {
padding-top: 100%; }
.flex-row {
flex-direction: row !important; }
.flex-column {
flex-direction: column !important; }
.flex-row-reverse {
flex-direction: row-reverse !important; }
.flex-column-reverse {
flex-direction: column-reverse !important; }
.flex-wrap {
flex-wrap: wrap !important; }
.flex-nowrap {
flex-wrap: nowrap !important; }
.flex-wrap-reverse {
flex-wrap: wrap-reverse !important; }
.justify-content-start {
justify-content: flex-start !important; }
.justify-content-end {
justify-content: flex-end !important; }
.justify-content-center {
justify-content: center !important; }
.justify-content-between {
justify-content: space-between !important; }
.justify-content-around {
justify-content: space-around !important; }
.align-items-start {
align-items: flex-start !important; }
.align-items-end {
align-items: flex-end !important; }
.align-items-center {
align-items: center !important; }
.align-items-baseline {
align-items: baseline !important; }
.align-items-stretch {
align-items: stretch !important; }
.align-content-start {
align-content: flex-start !important; }
.align-content-end {
align-content: flex-end !important; }
.align-content-center {
align-content: center !important; }
.align-content-between {
align-content: space-between !important; }
.align-content-around {
align-content: space-around !important; }
.align-content-stretch {
align-content: stretch !important; }
.align-self-auto {
align-self: auto !important; }
.align-self-start {
align-self: flex-start !important; }
.align-self-end {
align-self: flex-end !important; }
.align-self-center {
align-self: center !important; }
.align-self-baseline {
align-self: baseline !important; }
.align-self-stretch {
align-self: stretch !important; }
@media (min-width: 576px) {
.flex-sm-row {
flex-direction: row !important; }
.flex-sm-column {
flex-direction: column !important; }
.flex-sm-row-reverse {
flex-direction: row-reverse !important; }
.flex-sm-column-reverse {
flex-direction: column-reverse !important; }
.flex-sm-wrap {
flex-wrap: wrap !important; }
.flex-sm-nowrap {
flex-wrap: nowrap !important; }
.flex-sm-wrap-reverse {
flex-wrap: wrap-reverse !important; }
.justify-content-sm-start {
justify-content: flex-start !important; }
.justify-content-sm-end {
justify-content: flex-end !important; }
.justify-content-sm-center {
justify-content: center !important; }
.justify-content-sm-between {
justify-content: space-between !important; }
.justify-content-sm-around {
justify-content: space-around !important; }
.align-items-sm-start {
align-items: flex-start !important; }
.align-items-sm-end {
align-items: flex-end !important; }
.align-items-sm-center {
align-items: center !important; }
.align-items-sm-baseline {
align-items: baseline !important; }
.align-items-sm-stretch {
align-items: stretch !important; }
.align-content-sm-start {
align-content: flex-start !important; }
.align-content-sm-end {
align-content: flex-end !important; }
.align-content-sm-center {
align-content: center !important; }
.align-content-sm-between {
align-content: space-between !important; }
.align-content-sm-around {
align-content: space-around !important; }
.align-content-sm-stretch {
align-content: stretch !important; }
.align-self-sm-auto {
align-self: auto !important; }
.align-self-sm-start {
align-self: flex-start !important; }
.align-self-sm-end {
align-self: flex-end !important; }
.align-self-sm-center {
align-self: center !important; }
.align-self-sm-baseline {
align-self: baseline !important; }
.align-self-sm-stretch {
align-self: stretch !important; } }
@media (min-width: 768px) {
.flex-md-row {
flex-direction: row !important; }
.flex-md-column {
flex-direction: column !important; }
.flex-md-row-reverse {
flex-direction: row-reverse !important; }
.flex-md-column-reverse {
flex-direction: column-reverse !important; }
.flex-md-wrap {
flex-wrap: wrap !important; }
.flex-md-nowrap {
flex-wrap: nowrap !important; }
.flex-md-wrap-reverse {
flex-wrap: wrap-reverse !important; }
.justify-content-md-start {
justify-content: flex-start !important; }
.justify-content-md-end {
justify-content: flex-end !important; }
.justify-content-md-center {
justify-content: center !important; }
.justify-content-md-between {
justify-content: space-between !important; }
.justify-content-md-around {
justify-content: space-around !important; }
.align-items-md-start {
align-items: flex-start !important; }
.align-items-md-end {
align-items: flex-end !important; }
.align-items-md-center {
align-items: center !important; }
.align-items-md-baseline {
align-items: baseline !important; }
.align-items-md-stretch {
align-items: stretch !important; }
.align-content-md-start {
align-content: flex-start !important; }
.align-content-md-end {
align-content: flex-end !important; }
.align-content-md-center {
align-content: center !important; }
.align-content-md-between {
align-content: space-between !important; }
.align-content-md-around {
align-content: space-around !important; }
.align-content-md-stretch {
align-content: stretch !important; }
.align-self-md-auto {
align-self: auto !important; }
.align-self-md-start {
align-self: flex-start !important; }
.align-self-md-end {
align-self: flex-end !important; }
.align-self-md-center {
align-self: center !important; }
.align-self-md-baseline {
align-self: baseline !important; }
.align-self-md-stretch {
align-self: stretch !important; } }
@media (min-width: 992px) {
.flex-lg-row {
flex-direction: row !important; }
.flex-lg-column {
flex-direction: column !important; }
.flex-lg-row-reverse {
flex-direction: row-reverse !important; }
.flex-lg-column-reverse {
flex-direction: column-reverse !important; }
.flex-lg-wrap {
flex-wrap: wrap !important; }
.flex-lg-nowrap {
flex-wrap: nowrap !important; }
.flex-lg-wrap-reverse {
flex-wrap: wrap-reverse !important; }
.justify-content-lg-start {
justify-content: flex-start !important; }
.justify-content-lg-end {
justify-content: flex-end !important; }
.justify-content-lg-center {
justify-content: center !important; }
.justify-content-lg-between {
justify-content: space-between !important; }
.justify-content-lg-around {
justify-content: space-around !important; }
.align-items-lg-start {
align-items: flex-start !important; }
.align-items-lg-end {
align-items: flex-end !important; }
.align-items-lg-center {
align-items: center !important; }
.align-items-lg-baseline {
align-items: baseline !important; }
.align-items-lg-stretch {
align-items: stretch !important; }
.align-content-lg-start {
align-content: flex-start !important; }
.align-content-lg-end {
align-content: flex-end !important; }
.align-content-lg-center {
align-content: center !important; }
.align-content-lg-between {
align-content: space-between !important; }
.align-content-lg-around {
align-content: space-around !important; }
.align-content-lg-stretch {
align-content: stretch !important; }
.align-self-lg-auto {
align-self: auto !important; }
.align-self-lg-start {
align-self: flex-start !important; }
.align-self-lg-end {
align-self: flex-end !important; }
.align-self-lg-center {
align-self: center !important; }
.align-self-lg-baseline {
align-self: baseline !important; }
.align-self-lg-stretch {
align-self: stretch !important; } }
@media (min-width: 1200px) {
.flex-xl-row {
flex-direction: row !important; }
.flex-xl-column {
flex-direction: column !important; }
.flex-xl-row-reverse {
flex-direction: row-reverse !important; }
.flex-xl-column-reverse {
flex-direction: column-reverse !important; }
.flex-xl-wrap {
flex-wrap: wrap !important; }
.flex-xl-nowrap {
flex-wrap: nowrap !important; }
.flex-xl-wrap-reverse {
flex-wrap: wrap-reverse !important; }
.justify-content-xl-start {
justify-content: flex-start !important; }
.justify-content-xl-end {
justify-content: flex-end !important; }
.justify-content-xl-center {
justify-content: center !important; }
.justify-content-xl-between {
justify-content: space-between !important; }
.justify-content-xl-around {
justify-content: space-around !important; }
.align-items-xl-start {
align-items: flex-start !important; }
.align-items-xl-end {
align-items: flex-end !important; }
.align-items-xl-center {
align-items: center !important; }
.align-items-xl-baseline {
align-items: baseline !important; }
.align-items-xl-stretch {
align-items: stretch !important; }
.align-content-xl-start {
align-content: flex-start !important; }
.align-content-xl-end {
align-content: flex-end !important; }
.align-content-xl-center {
align-content: center !important; }
.align-content-xl-between {
align-content: space-between !important; }
.align-content-xl-around {
align-content: space-around !important; }
.align-content-xl-stretch {
align-content: stretch !important; }
.align-self-xl-auto {
align-self: auto !important; }
.align-self-xl-start {
align-self: flex-start !important; }
.align-self-xl-end {
align-self: flex-end !important; }
.align-self-xl-center {
align-self: center !important; }
.align-self-xl-baseline {
align-self: baseline !important; }
.align-self-xl-stretch {
align-self: stretch !important; } }
.float-left {
float: left !important; }
.float-right {
float: right !important; }
.float-none {
float: none !important; }
@media (min-width: 576px) {
.float-sm-left {
float: left !important; }
.float-sm-right {
float: right !important; }
.float-sm-none {
float: none !important; } }
@media (min-width: 768px) {
.float-md-left {
float: left !important; }
.float-md-right {
float: right !important; }
.float-md-none {
float: none !important; } }
@media (min-width: 992px) {
.float-lg-left {
float: left !important; }
.float-lg-right {
float: right !important; }
.float-lg-none {
float: none !important; } }
@media (min-width: 1200px) {
.float-xl-left {
float: left !important; }
.float-xl-right {
float: right !important; }
.float-xl-none {
float: none !important; } }
.position-static {
position: static !important; }
.position-relative {
position: relative !important; }
.position-absolute {
position: absolute !important; }
.position-fixed {
position: fixed !important; }
.position-sticky {
position: sticky !important; }
.fixed-top {
position: fixed;
top: 0;
right: 0;
left: 0;
z-index: 1030; }
.fixed-bottom {
position: fixed;
right: 0;
bottom: 0;
left: 0;
z-index: 1030; }
@supports (position: sticky) {
.sticky-top {
position: sticky;
top: 0;
z-index: 1020; } }
.sr-only {
position: absolute;
width: 1px;
height: 1px;
padding: 0;
overflow: hidden;
clip: rect(0, 0, 0, 0);
white-space: nowrap;
clip-path: inset(50%);
border: 0; }
.sr-only-focusable:active, .sr-only-focusable:focus {
position: static;
width: auto;
height: auto;
overflow: visible;
clip: auto;
white-space: normal;
clip-path: none; }
.w-25 {
width: 25% !important; }
.w-50 {
width: 50% !important; }
.w-75 {
width: 75% !important; }
.w-100 {
width: 100% !important; }
.h-25 {
height: 25% !important; }
.h-50 {
height: 50% !important; }
.h-75 {
height: 75% !important; }
.h-100 {
height: 100% !important; }
.mw-100 {
max-width: 100% !important; }
.mh-100 {
max-height: 100% !important; }
.m-0 {
margin: 0 !important; }
.mt-0,
.my-0 {
margin-top: 0 !important; }
.mr-0,
.mx-0 {
margin-right: 0 !important; }
.mb-0,
.my-0 {
margin-bottom: 0 !important; }
.ml-0,
.mx-0 {
margin-left: 0 !important; }
.m-1 {
margin: 0.25rem !important; }
.mt-1,
.my-1 {
margin-top: 0.25rem !important; }
.mr-1,
.mx-1 {
margin-right: 0.25rem !important; }
.mb-1,
.my-1 {
margin-bottom: 0.25rem !important; }
.ml-1,
.mx-1 {
margin-left: 0.25rem !important; }
.m-2 {
margin: 0.5rem !important; }
.mt-2,
.my-2 {
margin-top: 0.5rem !important; }
.mr-2,
.mx-2 {
margin-right: 0.5rem !important; }
.mb-2,
.my-2 {
margin-bottom: 0.5rem !important; }
.ml-2,
.mx-2 {
margin-left: 0.5rem !important; }
.m-3 {
margin: 1rem !important; }
.mt-3,
.my-3 {
margin-top: 1rem !important; }
.mr-3,
.mx-3 {
margin-right: 1rem !important; }
.mb-3,
.my-3 {
margin-bottom: 1rem !important; }
.ml-3,
.mx-3 {
margin-left: 1rem !important; }
.m-4 {
margin: 1.5rem !important; }
.mt-4,
.my-4 {
margin-top: 1.5rem !important; }
.mr-4,
.mx-4 {
margin-right: 1.5rem !important; }
.mb-4,
.my-4 {
margin-bottom: 1.5rem !important; }
.ml-4,
.mx-4 {
margin-left: 1.5rem !important; }
.m-5 {
margin: 3rem !important; }
.mt-5,
.my-5 {
margin-top: 3rem !important; }
.mr-5,
.mx-5 {
margin-right: 3rem !important; }
.mb-5,
.my-5 {
margin-bottom: 3rem !important; }
.ml-5,
.mx-5 {
margin-left: 3rem !important; }
.p-0 {
padding: 0 !important; }
.pt-0,
.py-0 {
padding-top: 0 !important; }
.pr-0,
.px-0 {
padding-right: 0 !important; }
.pb-0,
.py-0 {
padding-bottom: 0 !important; }
.pl-0,
.px-0 {
padding-left: 0 !important; }
.p-1 {
padding: 0.25rem !important; }
.pt-1,
.py-1 {
padding-top: 0.25rem !important; }
.pr-1,
.px-1 {
padding-right: 0.25rem !important; }
.pb-1,
.py-1 {
padding-bottom: 0.25rem !important; }
.pl-1,
.px-1 {
padding-left: 0.25rem !important; }
.p-2 {
padding: 0.5rem !important; }
.pt-2,
.py-2 {
padding-top: 0.5rem !important; }
.pr-2,
.px-2 {
padding-right: 0.5rem !important; }
.pb-2,
.py-2 {
padding-bottom: 0.5rem !important; }
.pl-2,
.px-2 {
padding-left: 0.5rem !important; }
.p-3 {
padding: 1rem !important; }
.pt-3,
.py-3 {
padding-top: 1rem !important; }
.pr-3,
.px-3 {
padding-right: 1rem !important; }
.pb-3,
.py-3 {
padding-bottom: 1rem !important; }
.pl-3,
.px-3 {
padding-left: 1rem !important; }
.p-4 {
padding: 1.5rem !important; }
.pt-4,
.py-4 {
padding-top: 1.5rem !important; }
.pr-4,
.px-4 {
padding-right: 1.5rem !important; }
.pb-4,
.py-4 {
padding-bottom: 1.5rem !important; }
.pl-4,
.px-4 {
padding-left: 1.5rem !important; }
.p-5 {
padding: 3rem !important; }
.pt-5,
.py-5 {
padding-top: 3rem !important; }
.pr-5,
.px-5 {
padding-right: 3rem !important; }
.pb-5,
.py-5 {
padding-bottom: 3rem !important; }
.pl-5,
.px-5 {
padding-left: 3rem !important; }
.m-auto {
margin: auto !important; }
.mt-auto,
.my-auto {
margin-top: auto !important; }
.mr-auto,
.mx-auto {
margin-right: auto !important; }
.mb-auto,
.my-auto {
margin-bottom: auto !important; }
.ml-auto,
.mx-auto {
margin-left: auto !important; }
@media (min-width: 576px) {
.m-sm-0 {
margin: 0 !important; }
.mt-sm-0,
.my-sm-0 {
margin-top: 0 !important; }
.mr-sm-0,
.mx-sm-0 {
margin-right: 0 !important; }
.mb-sm-0,
.my-sm-0 {
margin-bottom: 0 !important; }
.ml-sm-0,
.mx-sm-0 {
margin-left: 0 !important; }
.m-sm-1 {
margin: 0.25rem !important; }
.mt-sm-1,
.my-sm-1 {
margin-top: 0.25rem !important; }
.mr-sm-1,
.mx-sm-1 {
margin-right: 0.25rem !important; }
.mb-sm-1,
.my-sm-1 {
margin-bottom: 0.25rem !important; }
.ml-sm-1,
.mx-sm-1 {
margin-left: 0.25rem !important; }
.m-sm-2 {
margin: 0.5rem !important; }
.mt-sm-2,
.my-sm-2 {
margin-top: 0.5rem !important; }
.mr-sm-2,
.mx-sm-2 {
margin-right: 0.5rem !important; }
.mb-sm-2,
.my-sm-2 {
margin-bottom: 0.5rem !important; }
.ml-sm-2,
.mx-sm-2 {
margin-left: 0.5rem !important; }
.m-sm-3 {
margin: 1rem !important; }
.mt-sm-3,
.my-sm-3 {
margin-top: 1rem !important; }
.mr-sm-3,
.mx-sm-3 {
margin-right: 1rem !important; }
.mb-sm-3,
.my-sm-3 {
margin-bottom: 1rem !important; }
.ml-sm-3,
.mx-sm-3 {
margin-left: 1rem !important; }
.m-sm-4 {
margin: 1.5rem !important; }
.mt-sm-4,
.my-sm-4 {
margin-top: 1.5rem !important; }
.mr-sm-4,
.mx-sm-4 {
margin-right: 1.5rem !important; }
.mb-sm-4,
.my-sm-4 {
margin-bottom: 1.5rem !important; }
.ml-sm-4,
.mx-sm-4 {
margin-left: 1.5rem !important; }
.m-sm-5 {
margin: 3rem !important; }
.mt-sm-5,
.my-sm-5 {
margin-top: 3rem !important; }
.mr-sm-5,
.mx-sm-5 {
margin-right: 3rem !important; }
.mb-sm-5,
.my-sm-5 {
margin-bottom: 3rem !important; }
.ml-sm-5,
.mx-sm-5 {
margin-left: 3rem !important; }
.p-sm-0 {
padding: 0 !important; }
.pt-sm-0,
.py-sm-0 {
padding-top: 0 !important; }
.pr-sm-0,
.px-sm-0 {
padding-right: 0 !important; }
.pb-sm-0,
.py-sm-0 {
padding-bottom: 0 !important; }
.pl-sm-0,
.px-sm-0 {
padding-left: 0 !important; }
.p-sm-1 {
padding: 0.25rem !important; }
.pt-sm-1,
.py-sm-1 {
padding-top: 0.25rem !important; }
.pr-sm-1,
.px-sm-1 {
padding-right: 0.25rem !important; }
.pb-sm-1,
.py-sm-1 {
padding-bottom: 0.25rem !important; }
.pl-sm-1,
.px-sm-1 {
padding-left: 0.25rem !important; }
.p-sm-2 {
padding: 0.5rem !important; }
.pt-sm-2,
.py-sm-2 {
padding-top: 0.5rem !important; }
.pr-sm-2,
.px-sm-2 {
padding-right: 0.5rem !important; }
.pb-sm-2,
.py-sm-2 {
padding-bottom: 0.5rem !important; }
.pl-sm-2,
.px-sm-2 {
padding-left: 0.5rem !important; }
.p-sm-3 {
padding: 1rem !important; }
.pt-sm-3,
.py-sm-3 {
padding-top: 1rem !important; }
.pr-sm-3,
.px-sm-3 {
padding-right: 1rem !important; }
.pb-sm-3,
.py-sm-3 {
padding-bottom: 1rem !important; }
.pl-sm-3,
.px-sm-3 {
padding-left: 1rem !important; }
.p-sm-4 {
padding: 1.5rem !important; }
.pt-sm-4,
.py-sm-4 {
padding-top: 1.5rem !important; }
.pr-sm-4,
.px-sm-4 {
padding-right: 1.5rem !important; }
.pb-sm-4,
.py-sm-4 {
padding-bottom: 1.5rem !important; }
.pl-sm-4,
.px-sm-4 {
padding-left: 1.5rem !important; }
.p-sm-5 {
padding: 3rem !important; }
.pt-sm-5,
.py-sm-5 {
padding-top: 3rem !important; }
.pr-sm-5,
.px-sm-5 {
padding-right: 3rem !important; }
.pb-sm-5,
.py-sm-5 {
padding-bottom: 3rem !important; }
.pl-sm-5,
.px-sm-5 {
padding-left: 3rem !important; }
.m-sm-auto {
margin: auto !important; }
.mt-sm-auto,
.my-sm-auto {
margin-top: auto !important; }
.mr-sm-auto,
.mx-sm-auto {
margin-right: auto !important; }
.mb-sm-auto,
.my-sm-auto {
margin-bottom: auto !important; }
.ml-sm-auto,
.mx-sm-auto {
margin-left: auto !important; } }
@media (min-width: 768px) {
.m-md-0 {
margin: 0 !important; }
.mt-md-0,
.my-md-0 {
margin-top: 0 !important; }
.mr-md-0,
.mx-md-0 {
margin-right: 0 !important; }
.mb-md-0,
.my-md-0 {
margin-bottom: 0 !important; }
.ml-md-0,
.mx-md-0 {
margin-left: 0 !important; }
.m-md-1 {
margin: 0.25rem !important; }
.mt-md-1,
.my-md-1 {
margin-top: 0.25rem !important; }
.mr-md-1,
.mx-md-1 {
margin-right: 0.25rem !important; }
.mb-md-1,
.my-md-1 {
margin-bottom: 0.25rem !important; }
.ml-md-1,
.mx-md-1 {
margin-left: 0.25rem !important; }
.m-md-2 {
margin: 0.5rem !important; }
.mt-md-2,
.my-md-2 {
margin-top: 0.5rem !important; }
.mr-md-2,
.mx-md-2 {
margin-right: 0.5rem !important; }
.mb-md-2,
.my-md-2 {
margin-bottom: 0.5rem !important; }
.ml-md-2,
.mx-md-2 {
margin-left: 0.5rem !important; }
.m-md-3 {
margin: 1rem !important; }
.mt-md-3,
.my-md-3 {
margin-top: 1rem !important; }
.mr-md-3,
.mx-md-3 {
margin-right: 1rem !important; }
.mb-md-3,
.my-md-3 {
margin-bottom: 1rem !important; }
.ml-md-3,
.mx-md-3 {
margin-left: 1rem !important; }
.m-md-4 {
margin: 1.5rem !important; }
.mt-md-4,
.my-md-4 {
margin-top: 1.5rem !important; }
.mr-md-4,
.mx-md-4 {
margin-right: 1.5rem !important; }
.mb-md-4,
.my-md-4 {
margin-bottom: 1.5rem !important; }
.ml-md-4,
.mx-md-4 {
margin-left: 1.5rem !important; }
.m-md-5 {
margin: 3rem !important; }
.mt-md-5,
.my-md-5 {
margin-top: 3rem !important; }
.mr-md-5,
.mx-md-5 {
margin-right: 3rem !important; }
.mb-md-5,
.my-md-5 {
margin-bottom: 3rem !important; }
.ml-md-5,
.mx-md-5 {
margin-left: 3rem !important; }
.p-md-0 {
padding: 0 !important; }
.pt-md-0,
.py-md-0 {
padding-top: 0 !important; }
.pr-md-0,
.px-md-0 {
padding-right: 0 !important; }
.pb-md-0,
.py-md-0 {
padding-bottom: 0 !important; }
.pl-md-0,
.px-md-0 {
padding-left: 0 !important; }
.p-md-1 {
padding: 0.25rem !important; }
.pt-md-1,
.py-md-1 {
padding-top: 0.25rem !important; }
.pr-md-1,
.px-md-1 {
padding-right: 0.25rem !important; }
.pb-md-1,
.py-md-1 {
padding-bottom: 0.25rem !important; }
.pl-md-1,
.px-md-1 {
padding-left: 0.25rem !important; }
.p-md-2 {
padding: 0.5rem !important; }
.pt-md-2,
.py-md-2 {
padding-top: 0.5rem !important; }
.pr-md-2,
.px-md-2 {
padding-right: 0.5rem !important; }
.pb-md-2,
.py-md-2 {
padding-bottom: 0.5rem !important; }
.pl-md-2,
.px-md-2 {
padding-left: 0.5rem !important; }
.p-md-3 {
padding: 1rem !important; }
.pt-md-3,
.py-md-3 {
padding-top: 1rem !important; }
.pr-md-3,
.px-md-3 {
padding-right: 1rem !important; }
.pb-md-3,
.py-md-3 {
padding-bottom: 1rem !important; }
.pl-md-3,
.px-md-3 {
padding-left: 1rem !important; }
.p-md-4 {
padding: 1.5rem !important; }
.pt-md-4,
.py-md-4 {
padding-top: 1.5rem !important; }
.pr-md-4,
.px-md-4 {
padding-right: 1.5rem !important; }
.pb-md-4,
.py-md-4 {
padding-bottom: 1.5rem !important; }
.pl-md-4,
.px-md-4 {
padding-left: 1.5rem !important; }
.p-md-5 {
padding: 3rem !important; }
.pt-md-5,
.py-md-5 {
padding-top: 3rem !important; }
.pr-md-5,
.px-md-5 {
padding-right: 3rem !important; }
.pb-md-5,
.py-md-5 {
padding-bottom: 3rem !important; }
.pl-md-5,
.px-md-5 {
padding-left: 3rem !important; }
.m-md-auto {
margin: auto !important; }
.mt-md-auto,
.my-md-auto {
margin-top: auto !important; }
.mr-md-auto,
.mx-md-auto {
margin-right: auto !important; }
.mb-md-auto,
.my-md-auto {
margin-bottom: auto !important; }
.ml-md-auto,
.mx-md-auto {
margin-left: auto !important; } }
@media (min-width: 992px) {
.m-lg-0 {
margin: 0 !important; }
.mt-lg-0,
.my-lg-0 {
margin-top: 0 !important; }
.mr-lg-0,
.mx-lg-0 {
margin-right: 0 !important; }
.mb-lg-0,
.my-lg-0 {
margin-bottom: 0 !important; }
.ml-lg-0,
.mx-lg-0 {
margin-left: 0 !important; }
.m-lg-1 {
margin: 0.25rem !important; }
.mt-lg-1,
.my-lg-1 {
margin-top: 0.25rem !important; }
.mr-lg-1,
.mx-lg-1 {
margin-right: 0.25rem !important; }
.mb-lg-1,
.my-lg-1 {
margin-bottom: 0.25rem !important; }
.ml-lg-1,
.mx-lg-1 {
margin-left: 0.25rem !important; }
.m-lg-2 {
margin: 0.5rem !important; }
.mt-lg-2,
.my-lg-2 {
margin-top: 0.5rem !important; }
.mr-lg-2,
.mx-lg-2 {
margin-right: 0.5rem !important; }
.mb-lg-2,
.my-lg-2 {
margin-bottom: 0.5rem !important; }
.ml-lg-2,
.mx-lg-2 {
margin-left: 0.5rem !important; }
.m-lg-3 {
margin: 1rem !important; }
.mt-lg-3,
.my-lg-3 {
margin-top: 1rem !important; }
.mr-lg-3,
.mx-lg-3 {
margin-right: 1rem !important; }
.mb-lg-3,
.my-lg-3 {
margin-bottom: 1rem !important; }
.ml-lg-3,
.mx-lg-3 {
margin-left: 1rem !important; }
.m-lg-4 {
margin: 1.5rem !important; }
.mt-lg-4,
.my-lg-4 {
margin-top: 1.5rem !important; }
.mr-lg-4,
.mx-lg-4 {
margin-right: 1.5rem !important; }
.mb-lg-4,
.my-lg-4 {
margin-bottom: 1.5rem !important; }
.ml-lg-4,
.mx-lg-4 {
margin-left: 1.5rem !important; }
.m-lg-5 {
margin: 3rem !important; }
.mt-lg-5,
.my-lg-5 {
margin-top: 3rem !important; }
.mr-lg-5,
.mx-lg-5 {
margin-right: 3rem !important; }
.mb-lg-5,
.my-lg-5 {
margin-bottom: 3rem !important; }
.ml-lg-5,
.mx-lg-5 {
margin-left: 3rem !important; }
.p-lg-0 {
padding: 0 !important; }
.pt-lg-0,
.py-lg-0 {
padding-top: 0 !important; }
.pr-lg-0,
.px-lg-0 {
padding-right: 0 !important; }
.pb-lg-0,
.py-lg-0 {
padding-bottom: 0 !important; }
.pl-lg-0,
.px-lg-0 {
padding-left: 0 !important; }
.p-lg-1 {
padding: 0.25rem !important; }
.pt-lg-1,
.py-lg-1 {
padding-top: 0.25rem !important; }
.pr-lg-1,
.px-lg-1 {
padding-right: 0.25rem !important; }
.pb-lg-1,
.py-lg-1 {
padding-bottom: 0.25rem !important; }
.pl-lg-1,
.px-lg-1 {
padding-left: 0.25rem !important; }
.p-lg-2 {
padding: 0.5rem !important; }
.pt-lg-2,
.py-lg-2 {
padding-top: 0.5rem !important; }
.pr-lg-2,
.px-lg-2 {
padding-right: 0.5rem !important; }
.pb-lg-2,
.py-lg-2 {
padding-bottom: 0.5rem !important; }
.pl-lg-2,
.px-lg-2 {
padding-left: 0.5rem !important; }
.p-lg-3 {
padding: 1rem !important; }
.pt-lg-3,
.py-lg-3 {
padding-top: 1rem !important; }
.pr-lg-3,
.px-lg-3 {
padding-right: 1rem !important; }
.pb-lg-3,
.py-lg-3 {
padding-bottom: 1rem !important; }
.pl-lg-3,
.px-lg-3 {
padding-left: 1rem !important; }
.p-lg-4 {
padding: 1.5rem !important; }
.pt-lg-4,
.py-lg-4 {
padding-top: 1.5rem !important; }
.pr-lg-4,
.px-lg-4 {
padding-right: 1.5rem !important; }
.pb-lg-4,
.py-lg-4 {
padding-bottom: 1.5rem !important; }
.pl-lg-4,
.px-lg-4 {
padding-left: 1.5rem !important; }
.p-lg-5 {
padding: 3rem !important; }
.pt-lg-5,
.py-lg-5 {
padding-top: 3rem !important; }
.pr-lg-5,
.px-lg-5 {
padding-right: 3rem !important; }
.pb-lg-5,
.py-lg-5 {
padding-bottom: 3rem !important; }
.pl-lg-5,
.px-lg-5 {
padding-left: 3rem !important; }
.m-lg-auto {
margin: auto !important; }
.mt-lg-auto,
.my-lg-auto {
margin-top: auto !important; }
.mr-lg-auto,
.mx-lg-auto {
margin-right: auto !important; }
.mb-lg-auto,
.my-lg-auto {
margin-bottom: auto !important; }
.ml-lg-auto,
.mx-lg-auto {
margin-left: auto !important; } }
@media (min-width: 1200px) {
.m-xl-0 {
margin: 0 !important; }
.mt-xl-0,
.my-xl-0 {
margin-top: 0 !important; }
.mr-xl-0,
.mx-xl-0 {
margin-right: 0 !important; }
.mb-xl-0,
.my-xl-0 {
margin-bottom: 0 !important; }
.ml-xl-0,
.mx-xl-0 {
margin-left: 0 !important; }
.m-xl-1 {
margin: 0.25rem !important; }
.mt-xl-1,
.my-xl-1 {
margin-top: 0.25rem !important; }
.mr-xl-1,
.mx-xl-1 {
margin-right: 0.25rem !important; }
.mb-xl-1,
.my-xl-1 {
margin-bottom: 0.25rem !important; }
.ml-xl-1,
.mx-xl-1 {
margin-left: 0.25rem !important; }
.m-xl-2 {
margin: 0.5rem !important; }
.mt-xl-2,
.my-xl-2 {
margin-top: 0.5rem !important; }
.mr-xl-2,
.mx-xl-2 {
margin-right: 0.5rem !important; }
.mb-xl-2,
.my-xl-2 {
margin-bottom: 0.5rem !important; }
.ml-xl-2,
.mx-xl-2 {
margin-left: 0.5rem !important; }
.m-xl-3 {
margin: 1rem !important; }
.mt-xl-3,
.my-xl-3 {
margin-top: 1rem !important; }
.mr-xl-3,
.mx-xl-3 {
margin-right: 1rem !important; }
.mb-xl-3,
.my-xl-3 {
margin-bottom: 1rem !important; }
.ml-xl-3,
.mx-xl-3 {
margin-left: 1rem !important; }
.m-xl-4 {
margin: 1.5rem !important; }
.mt-xl-4,
.my-xl-4 {
margin-top: 1.5rem !important; }
.mr-xl-4,
.mx-xl-4 {
margin-right: 1.5rem !important; }
.mb-xl-4,
.my-xl-4 {
margin-bottom: 1.5rem !important; }
.ml-xl-4,
.mx-xl-4 {
margin-left: 1.5rem !important; }
.m-xl-5 {
margin: 3rem !important; }
.mt-xl-5,
.my-xl-5 {
margin-top: 3rem !important; }
.mr-xl-5,
.mx-xl-5 {
margin-right: 3rem !important; }
.mb-xl-5,
.my-xl-5 {
margin-bottom: 3rem !important; }
.ml-xl-5,
.mx-xl-5 {
margin-left: 3rem !important; }
.p-xl-0 {
padding: 0 !important; }
.pt-xl-0,
.py-xl-0 {
padding-top: 0 !important; }
.pr-xl-0,
.px-xl-0 {
padding-right: 0 !important; }
.pb-xl-0,
.py-xl-0 {
padding-bottom: 0 !important; }
.pl-xl-0,
.px-xl-0 {
padding-left: 0 !important; }
.p-xl-1 {
padding: 0.25rem !important; }
.pt-xl-1,
.py-xl-1 {
padding-top: 0.25rem !important; }
.pr-xl-1,
.px-xl-1 {
padding-right: 0.25rem !important; }
.pb-xl-1,
.py-xl-1 {
padding-bottom: 0.25rem !important; }
.pl-xl-1,
.px-xl-1 {
padding-left: 0.25rem !important; }
.p-xl-2 {
padding: 0.5rem !important; }
.pt-xl-2,
.py-xl-2 {
padding-top: 0.5rem !important; }
.pr-xl-2,
.px-xl-2 {
padding-right: 0.5rem !important; }
.pb-xl-2,
.py-xl-2 {
padding-bottom: 0.5rem !important; }
.pl-xl-2,
.px-xl-2 {
padding-left: 0.5rem !important; }
.p-xl-3 {
padding: 1rem !important; }
.pt-xl-3,
.py-xl-3 {
padding-top: 1rem !important; }
.pr-xl-3,
.px-xl-3 {
padding-right: 1rem !important; }
.pb-xl-3,
.py-xl-3 {
padding-bottom: 1rem !important; }
.pl-xl-3,
.px-xl-3 {
padding-left: 1rem !important; }
.p-xl-4 {
padding: 1.5rem !important; }
.pt-xl-4,
.py-xl-4 {
padding-top: 1.5rem !important; }
.pr-xl-4,
.px-xl-4 {
padding-right: 1.5rem !important; }
.pb-xl-4,
.py-xl-4 {
padding-bottom: 1.5rem !important; }
.pl-xl-4,
.px-xl-4 {
padding-left: 1.5rem !important; }
.p-xl-5 {
padding: 3rem !important; }
.pt-xl-5,
.py-xl-5 {
padding-top: 3rem !important; }
.pr-xl-5,
.px-xl-5 {
padding-right: 3rem !important; }
.pb-xl-5,
.py-xl-5 {
padding-bottom: 3rem !important; }
.pl-xl-5,
.px-xl-5 {
padding-left: 3rem !important; }
.m-xl-auto {
margin: auto !important; }
.mt-xl-auto,
.my-xl-auto {
margin-top: auto !important; }
.mr-xl-auto,
.mx-xl-auto {
margin-right: auto !important; }
.mb-xl-auto,
.my-xl-auto {
margin-bottom: auto !important; }
.ml-xl-auto,
.mx-xl-auto {
margin-left: auto !important; } }
.text-justify {
text-align: justify !important; }
.text-nowrap {
white-space: nowrap !important; }
.text-truncate {
overflow: hidden;
text-overflow: ellipsis;
white-space: nowrap; }
.text-left {
text-align: left !important; }
.text-right {
text-align: right !important; }
.text-center {
text-align: center !important; }
@media (min-width: 576px) {
.text-sm-left {
text-align: left !important; }
.text-sm-right {
text-align: right !important; }
.text-sm-center {
text-align: center !important; } }
@media (min-width: 768px) {
.text-md-left {
text-align: left !important; }
.text-md-right {
text-align: right !important; }
.text-md-center {
text-align: center !important; } }
@media (min-width: 992px) {
.text-lg-left {
text-align: left !important; }
.text-lg-right {
text-align: right !important; }
.text-lg-center {
text-align: center !important; } }
@media (min-width: 1200px) {
.text-xl-left {
text-align: left !important; }
.text-xl-right {
text-align: right !important; }
.text-xl-center {
text-align: center !important; } }
.text-lowercase {
text-transform: lowercase !important; }
.text-uppercase {
text-transform: uppercase !important; }
.text-capitalize {
text-transform: capitalize !important; }
.font-weight-light {
font-weight: 300 !important; }
.font-weight-normal {
font-weight: 400 !important; }
.font-weight-bold {
font-weight: 700 !important; }
.font-italic {
font-style: italic !important; }
.text-white {
color: #fff !important; }
.text-primary {
color: #512479 !important; }
a.text-primary:hover, a.text-primary:focus {
color: #49075e !important; }
.text-secondary {
color: #333 !important; }
a.text-secondary:hover, a.text-secondary:focus {
color: #1a1a1a !important; }
.text-success {
color: #28a745 !important; }
a.text-success:hover, a.text-success:focus {
color: #1e7e34 !important; }
.text-info {
color: #17a2b8 !important; }
a.text-info:hover, a.text-info:focus {
color: #117a8b !important; }
.text-warning {
color: #ffc107 !important; }
a.text-warning:hover, a.text-warning:focus {
color: #d39e00 !important; }
.text-danger {
color: #dc3545 !important; }
a.text-danger:hover, a.text-danger:focus {
color: #bd2130 !important; }
.text-light {
color: #f8f9fa !important; }
a.text-light:hover, a.text-light:focus {
color: #dae0e5 !important; }
.text-dark {
color: #343a40 !important; }
a.text-dark:hover, a.text-dark:focus {
color: #1d2124 !important; }
.text-muted {
color: #6c757d !important; }
.text-hide {
font: 0/0 a;
color: transparent;
text-shadow: none;
background-color: transparent;
border: 0; }
.visible {
visibility: visible !important; }
.invisible {
visibility: hidden !important; }
@media print {
*,
*::before,
*::after {
text-shadow: none !important;
box-shadow: none !important; }
a:not(.btn) {
text-decoration: underline; }
abbr[title]::after {
content: " (" attr(title) ")"; }
pre {
white-space: pre-wrap !important; }
pre,
blockquote {
border: 1px solid #999;
page-break-inside: avoid; }
thead {
display: table-header-group; }
tr,
img {
page-break-inside: avoid; }
p,
h2,
h3 {
orphans: 3;
widows: 3; }
h2,
h3 {
page-break-after: avoid; }
@page {
size: a3; }
body {
min-width: 992px !important; }
.container {
min-width: 992px !important; }
.navbar {
display: none; }
.badge {
border: 1px solid #000; }
.table {
border-collapse: collapse !important; }
.table td,
.table th {
background-color: #fff !important; }
.table-bordered th,
.table-bordered td {
border: 1px solid #ddd !important; } }
/********************************************************************
ZYPOP - HTTPS://ZYPOPWEBTEMPLATES.COM
FREE WEB TEMPLATES
********************************************************************/
/**
This file contains the core CSS for this template, built on the Bootstrap framework
Set the variable values in _variables.scss
**/
/**
Page structure and elements
**/
.bg-primary {
background-color: #512479 !important; }
body {
background: #512479; }
main {
padding-top: 1em; }
.text-primary {
color: #512479 !important; }
.container-fluid {
background-color: #fff; }
#sidebar {
width: auto;
padding: 0 15px;
position: auto;
overflow: scroll;
height: 100%; }
.right-sidebar #sidebar {
right: 0; }
#content {
margin-left: 0; }
a, .page-link {
color: #512479, 10%;
font-weight: bold; }
a:hover, a:focus, a:active, .page-link:hover, .page-link:focus, .page-link:active {
color: #49075e; }
.page-item.active .page-link {
border-color: #512479;
background-color: #512479; }
fieldset {
margin-bottom: 1rem;
display: block;
border-top: 1px solid #ccc; }
fieldset legend {
width: auto;
padding-right: 0.5rem;
font-size: 1.1rem;
font-weight: bold; }
table th {
background-color: #512479;
color: #fff;
border-color: #523047 !important;
border-bottom: 1px solid #523047; }
blockquote {
display: block;
border-left: 5px solid #ccc;
padding: 0.5rem;
color: #666;
margin-bottom: 1rem; }
.btn {
font-weight: bold; }
.btn-secondary, .badge-secondary,
.btn-primary, .badge-primary {
color: #fff; }
.btn-outline-primary:hover, .btn-outline-primary:focus, .btn-outline-primary:active {
color: #fff; }
h1, h2, h3, h4, h5, h6, .h1, .h2, .h3, .h4, .h5, .h6 {
color: #333;
font-weight: bold; }
/**
Jumbotron / Slider
**/
.jumbotron-narrow {
padding: 2rem; }
.jumbotron-wrap .container {
padding-bottom: 1rem; }
#mainCarousel, .static-slider {
border-bottom: 5px solid #ebebeb;
border-radius: 0px; }
.jumbotron-wrap .jumbotron {
background: #f8f8f8;
margin-bottom: 0; }
.jumbotron-wrap h1, .jumbotron-wrap .h1 {
color: #5f5f5f; }
/**
Header
**/
header {
margin-top: 3.5rem;
padding: 0 30px 15px; }
header h1 {
color: #2d1927;
font-size: 2.5rem;
text-align: left;
line-height: 2.5rem;
letter-spacing: -0.1rem;
margin-bottom: 0; }
header h1 span {
color: #fff; }
.sidebar-social-icons {
margin-top: 20px; }
.sidebar-social-icons a {
color: white; }
.sidebar-social-icons a:hover, .sidebar-social-icons a:focus, .sidebar-social-icons a:active {
color: #333; }
/**
Navbar
**/
.navbar-toggler {
margin: 0.5rem; }
.navbar, #mainNavbar {
padding: 0;
width: 100%; }
.navbar ul {
padding: 0;
list-style: none; }
.navbar ul.sub-navbar {
padding-left: 20px;
background-color: #2d1927; }
.navbar .active ul.sub-navbar {
background-color: #fff; }
.mobile-header-controls {
display: flex;
flex-wrap: wrap;
align-items: center;
justify-content: space-between; }
#mainNavbar .nav-link {
padding: 15px 30px;
color: #fff;
background-color: #49075e; }
#mainNavbar .nav-link:hover, #mainNavbar .nav-link:focus, #mainNavbar .nav-link:active {
text-decoration: underline; }
#mainNavbar ul.sub-navbar .nav-link {
background-color: #2d1927; }
#mainNavbar .active .nav-link {
color: #512479;
background-color: #fff !important; }
.navbar-dark .navbar-brand {
color: #fff; }
.navbar-dark .navbar-brand:hover, .navbar-dark .navbar-brand:focus, .navbar-dark .navbar-brand:active {
color: #fff; }
.navbar-dark .navbar-brand span {
color: #2d1927; }
/**
Footer
**/
.footer-container {
padding-top: 1rem;
padding-bottom: 1rem; }
footer {
color: #acacac;
font-size: 0.9rem; }
.footer-lists {
background-color: #f8f8f8;
padding: 30px;
margin-bottom: 1rem;
border-top: 5px solid #ebebeb; }
.footer-bottom {
color: #c5c5c5; }
.footer-bottom a {
color: #acacac; }
.footer-bottom a:hover, .footer-bottom a:focus, .footer-bottom a:active {
color: #9f9f9f;
border-bottom-color: #9f9f9f; }
.footer-lists h4 {
color: #929292; }
.social-icons a {
margin-right: 15px;
border-bottom: none; }
.footer-lists ul {
list-style: none;
margin: 0;
padding: 0; }
.footer-lists ul li {
padding: 0.2rem 0; }
footer p {
margin: 0;
padding-bottom: 1rem; }
footer p:last-child {
padding-bottom: 0; }
footer a {
color: #b8b8b8;
border-bottom: 1px solid #b8b8b8;
font-weight: normal; }
footer a:hover, footer a:focus, footer a:active {
text-decoration: none;
border-bottom-color: #5f5f5f;
color: #5f5f5f; }
/**
Articles
**/
article {
margin-bottom: 2rem;
border-bottom: 1px solid #dee2e6;
padding-bottom: 2rem; }
article h2.article-title {
font-size: 2.5rem;
margin-bottom: 0;
color: #333;
letter-spacing: -1px; }
article p.article-meta {
color: #ccc;
font-size: 0.8rem; }
/**
Sidebar
**/
.sidebar-box {
margin-bottom: 2rem; }
.sidebar-box-bg {
padding: 1rem;
background-color: #f8f8f8;
border-radius: 0px; }
/***
Better nesting of list groups
***/
.list-group-item {
border: none;
border-bottom: 3px solid #fff; }
.sidebar-box-bg a, .list-group-item {
color: #5f5f5f; }
.sidebar-box-bg a:hover, .sidebar-box-bg a:focus, .sidebar-box-bg a:active, .list-group-item:hover, .list-group-item:focus, .list-group-item:active {
color: #464646;
text-decoration: underline; }
.list-group .list-group .list-group-item {
padding-left: 2.5rem; }
.list-group .list-group .list-group .list-group-item {
padding-left: 3.75rem; }
.list-group .list-group .list-group .list-group .list-group-item {
padding-left: 5rem; }
.list-group > .list-group .list-group-item:first-child,
.list-group > .list-group .list-group-item:last-child {
border-radius: 0; }
.list-group > .list-group .list-group-item:last-child {
border-bottom: 2px solid #fff; }
.list-group-root {
background-color: #f8f8f8;
padding: 0rem;
border-radius: 0px; }
.list-group-item {
background-color: #f8f8f8; }
.list-group-item.active {
background-color: #512479;
border-color: #fff;
color: #fff;
border-radius: 0px !important; }
.list-group-item.active:hover, .list-group-item.active:focus, .list-group-item.active:active {
color: #fff;
text-decoration: underline; }
/**
Responsive typography
https://getbootstrap.com/docs/4.0/content/typography/#responsive-typography
**/
html {
font-size: 16px; }
.navbar-container {
padding: 0; }
html {
font-size: 12px; }
@media (min-width: 768px) {
html {
font-size: 14px; }
#sidebar {
width: 320px;
position: fixed;
padding: 0; }
#content {
padding-left: 320px; }
.right-sidebar #content {
padding-left: 0 !important;
padding-right: 320px; }
#content #content-wrapper {
background-color: #fff;
padding: 15px; } }
@media (min-width: 992px) {
html {
font-size: 16px; } }''')
def create_header(bin_list, directory, active, long_read_qc_html, short_read_qc_html=None):
main_header = '''<!doctype html>
<html lang="en">
<head>
<title>slamM</title>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
<!-- Main CSS -->
<link rel="stylesheet" href="css/style.css">
<link rel="stylesheet" type="text/css" href="https://cdn.datatables.net/v/bs4/jq-3.3.1/jszip-2.5.0/dt-1.10.18/b-1.5.6/b-html5-1.5.6/cr-1.5.0/datatables.min.css"/>
<script src="https://code.jquery.com/jquery-3.3.1.min.js" integrity="sha256-FgpCb/KJQlLNfOu91ta32o/NMZxltwRo8QtmkMRdAu8=" crossorigin="anonymous"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.12.9/umd/popper.min.js" integrity="<KEY>" crossorigin="anonymous"></script>
<script src="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/js/bootstrap.min.js" integrity="<KEY>" crossorigin="anonymous"></script>
<script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/pdfmake/0.1.36/pdfmake.min.js"></script>
<script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/pdfmake/0.1.36/vfs_fonts.js"></script>
<script type="text/javascript" src="https://cdn.datatables.net/v/bs4/jq-3.3.1/jszip-2.5.0/dt-1.10.18/b-1.5.6/b-html5-1.5.6/cr-1.5.0/datatables.min.js"></script>
</head>
<body>
<!-- Main navigation -->
<div id="sidebar">
<div class="navbar-expand-md navbar-dark">
<header class="d-none d-md-block">
<h1><span>slam</span>M</h1>
</header>
<!-- Mobile menu toggle and header -->
<div class="mobile-header-controls">
<a class="navbar-brand d-md-none d-lg-none d-xl-none" href="#"><span>slam</span>M</a>
<button class="navbar-toggler" type="button" data-toggle="collapse" data-target="#SidebarContent" aria-controls="SidebarContent" aria-expanded="false" aria-label="Toggle navigation">
<span class="navbar-toggler-icon"></span>
</button>
</div>
<div id="SidebarContent" class="collapse flex-column navbar-collapse">
<!-- Main navigation items -->
<nav class="navbar navbar-dark">
<div id="mainNavbar">
<ul class="flex-column mr-auto">
'''
if active == 'index.html':
main_header += ' <li class="nav-item active">\n'
else:
main_header += ' <li class="nav-item">\n'
main_header += ''' <a class="nav-link" href="''' + directory + '''index.html">Home <span class="sr-only">(current)</span></a>
</li>
<li class="nav-item">
<a class="nav-link" href="''' + directory + long_read_qc_html + '''">Long read stats</a>
</li>'''
if not short_read_qc_html is None:
main_header += ''' <li class="nav-item">
<a class="nav-link" href="''' + directory + short_read_qc_html + '''">Short read stats</a>
</li>
'''
main_header += ''' <li class="nav-item dropdown">
<a class="nav-link dropdown-toggle" href="#MenuDropdown" data-toggle="collapse" aria-controls="MenuDropdown" aria-expanded="false">Details of bins</a>
<ul id="MenuDropdown" class="sub-navbar collapse flex-column">'''
for i in bin_list:
if active == i:
main_header += ' <li class="nav-item active"><a class="nav-link" href="%sbin/%s.html">Bin %s</a></li>\n' % (
directory, i, i)
else:
main_header += ' <li class="nav-item"><a class="nav-link" href="%sbin/%s.html">Bin %s</a></li>\n' % (
directory, i, i)
main_header += ''' </ul>
</li>
<li class="nav-item">
<a class="nav-link" href="gtdbtk.html">GTDBtk</a>
</li>
<li class="nav-item">
<a class="nav-link" href="https://github.com/mjsull/SDMass/issues">Help</a>
</li>
</ul>
</div>
</nav>
</div>
</div>
</div>
<div id="content">
<div id="content-wrapper">
'''
return (main_header)
def add_title(header, subheader=''):
return ('''
<!-- Jumbtron / Slider -->
<div class="jumbotron-wrap">
<div class="container-fluid">
<div class="jumbotron static-slider">
<h1 class="text-center">''' + header + \
'''</h1>
<p class="lead text-center">''' + subheader + '''</p>
</div>
</div>
</div>
''')
def add_main(header, text, contigs=None):
if contigs is None:
return (''' <main class="container-fluid">
<div class="row">
<!-- Main content -->
<div class="col-md-12">
<article>
<h2 class="article-title">''' + header + '''</h2>
<p> ''' + text + ''' </p>
''')
else:
html_string = ''' <main class="container-fluid">
<div class="row">
<!-- Sidebar -->
<aside class="col-md-2">
<div class="sidebar-box">
<h4>Contigs</h4>
<div class="list-group list-group-root">
<a class="list-group-item active" href="index.html">Overview</a>
'''
count = 0
for i in contigs:
the_path = "www/contigs/" + i + ".html"
if os.path.exists(the_path):
html_string += ' <a class="list-group-item" href="' + the_path + '">' + i + '</a>\n'
else:
count += 1
html_string += ''' </div>
</div>
<div class="sidebar-box sidebar-box-bg">
<h4>n.b.</h4>
<p> ''' + str(count) + ''' contigs under 100Kbp not shown.</p>
</div>
</aside>
<!-- Main content -->
<div class="col-md-10">
<article>
<h2 class="article-title">''' + header + '''</h2>
<p> ''' + text + ''' </p>
'''
return (html_string)
def end_main():
return ('''
</div>
</div>
</main>
''')
def add_footer():
return (''' <!-- Footer -->
<div class="container-fluid footer-container">
<footer class="footer">
<div class="footer-bottom">
<p class="text-center">Created by <a href="https://mjsull.github.io"</a>mjsull.github.io</p>
<p class="text-center"><a href="#">Back to top</a></p>
</div>
</footer>
</div>
</div>
</div>
<!-- Bootcamp JavaScript -->
<!-- jQuery first, then Popper.js, then Bootstrap JS -->
</body>
</html>''')
def create_table(headers, list_of_vals, hide_by_default=set(), text_table=None):
if not text_table is None:
with open(text_table, 'w') as o:
o.write('\t'.join(headers) + '\n')
for i in list_of_vals:
o.write('\t'.join(map(str, i)) + '\n')
out_string = '''<script>
$(document).ready(function() {
var table = $('#thetable').DataTable( {
dom: 'frtiplB;',
"scrollX": true,
buttons: [
'copy', 'csv', 'excel', 'pdf'
]
});
$('a.toggle-vis').on( 'click', function (e) {
e.preventDefault();
$(this).toggleClass('greened');
// Get the column API object
var column = table.column( $(this).attr('data-column') );
// Toggle the visibility
column.visible( ! column.visible() );
} );
'''
for num, head in enumerate(headers):
if head in hide_by_default:
out_string += ' table.column(%d).visible( false );\n' % (num)
out_string += '''
} );
</script>
<div>
Toggle column:
'''
for num, head in enumerate(headers):
if head in hide_by_default:
out_string += '<a class="toggle-vis greened" data-column="%d">%s</a> - ' % (num, head)
else:
out_string += '<a class="toggle-vis" data-column="%d">%s</a> - ' % (num, head)
out_string = out_string[:-3]
out_string += '\n </div>'
out_string += ''' <table class="table" id="thetable" style="width:100%">
<thead>
<tr>
'''
for i in headers:
out_string += ' <th>' + i + '</th>\n'
out_string += ''' </tr>
</thead>
<tbody>
'''
for i in list_of_vals:
out_string += ' <tr>\n'
for j in i:
out_string += ' <td>' + str(j) + '</td>\n'
out_string += ' </tr>\n'
out_string += ''' </tbody>
</table>
'''
return (out_string)
def get_cov_stats_long(bamfile, contig, bin_size=3000, bin_step=500, buffer=50):
samfile = pysam.AlignmentFile(bamfile, 'rb')
ref_length = samfile.get_reference_length(contig)
bin_num = ref_length // bin_step + 1
coverage_forward = numpy.zeros(bin_num)
coverage_reverse = numpy.zeros(bin_num)
trimmed_starts =
|
numpy.zeros(bin_num, dtype=int)
|
numpy.zeros
|
import numpy as np
from sklearn.base import clone
from ._utils_boot import boot_manual, draw_weights
from ._utils import fit_predict, fit_predict_proba, tune_grid_search
def fit_irm(y, x, d,
learner_g, learner_m, all_smpls, dml_procedure, score,
n_rep=1, g0_params=None, g1_params=None, m_params=None,
trimming_threshold=1e-12):
n_obs = len(y)
thetas = np.zeros(n_rep)
ses = np.zeros(n_rep)
all_g_hat0 = list()
all_g_hat1 = list()
all_m_hat = list()
all_p_hat = list()
for i_rep in range(n_rep):
smpls = all_smpls[i_rep]
g_hat0, g_hat1, m_hat, p_hat = fit_nuisance_irm(y, x, d,
learner_g, learner_m, smpls,
score,
g0_params=g0_params, g1_params=g1_params, m_params=m_params,
trimming_threshold=trimming_threshold)
all_g_hat0.append(g_hat0)
all_g_hat1.append(g_hat1)
all_m_hat.append(m_hat)
all_p_hat.append(p_hat)
if dml_procedure == 'dml1':
thetas[i_rep], ses[i_rep] = irm_dml1(y, x, d,
g_hat0, g_hat1, m_hat, p_hat,
smpls, score)
else:
assert dml_procedure == 'dml2'
thetas[i_rep], ses[i_rep] = irm_dml2(y, x, d,
g_hat0, g_hat1, m_hat, p_hat,
smpls, score)
theta = np.median(thetas)
se = np.sqrt(np.median(np.power(ses, 2) * n_obs + np.power(thetas - theta, 2)) / n_obs)
res = {'theta': theta, 'se': se,
'thetas': thetas, 'ses': ses,
'all_g_hat0': all_g_hat0, 'all_g_hat1': all_g_hat1, 'all_m_hat': all_m_hat, 'all_p_hat': all_p_hat}
return res
def fit_nuisance_irm(y, x, d, learner_g, learner_m, smpls, score,
g0_params=None, g1_params=None, m_params=None,
trimming_threshold=1e-12):
ml_g0 = clone(learner_g)
ml_g1 = clone(learner_g)
train_cond0 = np.where(d == 0)[0]
g_hat0_list = fit_predict(y, x, ml_g0, g0_params, smpls,
train_cond=train_cond0)
if score == 'ATE':
train_cond1 = np.where(d == 1)[0]
g_hat1_list = fit_predict(y, x, ml_g1, g1_params, smpls,
train_cond=train_cond1)
else:
assert score == 'ATTE'
g_hat1_list = list()
for idx, _ in enumerate(smpls):
# fill it up, but its not further used
g_hat1_list.append(np.zeros_like(g_hat0_list[idx], dtype='float64'))
ml_m = clone(learner_m)
m_hat_list = fit_predict_proba(d, x, ml_m, m_params, smpls,
trimming_threshold=trimming_threshold)
p_hat_list = []
for (_, test_index) in smpls:
p_hat_list.append(np.mean(d[test_index]))
return g_hat0_list, g_hat1_list, m_hat_list, p_hat_list
def tune_nuisance_irm(y, x, d, ml_g, ml_m, smpls, score, n_folds_tune,
param_grid_g, param_grid_m):
train_cond0 = np.where(d == 0)[0]
g0_tune_res = tune_grid_search(y, x, ml_g, smpls, param_grid_g, n_folds_tune,
train_cond=train_cond0)
if score == 'ATE':
train_cond1 = np.where(d == 1)[0]
g1_tune_res = tune_grid_search(y, x, ml_g, smpls, param_grid_g, n_folds_tune,
train_cond=train_cond1)
g1_best_params = [xx.best_params_ for xx in g1_tune_res]
else:
g1_best_params = None
m_tune_res = tune_grid_search(d, x, ml_m, smpls, param_grid_m, n_folds_tune)
g0_best_params = [xx.best_params_ for xx in g0_tune_res]
m_best_params = [xx.best_params_ for xx in m_tune_res]
return g0_best_params, g1_best_params, m_best_params
def compute_iivm_residuals(y, g_hat0_list, g_hat1_list, m_hat_list, p_hat_list, smpls):
u_hat0 = np.full_like(y, np.nan, dtype='float64')
u_hat1 = np.full_like(y, np.nan, dtype='float64')
g_hat0 = np.full_like(y, np.nan, dtype='float64')
g_hat1 = np.full_like(y, np.nan, dtype='float64')
m_hat =
|
np.full_like(y, np.nan, dtype='float64')
|
numpy.full_like
|
import numpy as np
import pandas as pd
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output, State
import plotly.graph_objects as go
import plotly.express as px
import plotly.io as pio
import plotly.figure_factory as ff
from plotly.subplots import make_subplots
import statsmodels.api as sm
from statsmodels.tsa.statespace.exponential_smoothing import ExponentialSmoothing
from statsmodels.tsa.statespace.sarimax import SARIMAX
from statsmodels.regression.quantile_regression import QuantReg
from app import app, companies, fin_stmts, colorscheme
from funcs import grid, calc_kpis, add_quarters, spinner_graph
from data_funcs import get_focus, get_quotes
simulation_scheme = [colorscheme[0], 'rgba(180,180,180,0.2)', '#0f0f0f']
macro = pd.read_csv("data/macro.csv")
macro['USD'] = macro['USD_AVG']
focus = get_focus(macro)
def layout(ticker):
arima_marks = {i: str(i) for i in range(4)}
row = companies[companies['BTICKER'].str[:4] == ticker]
cvm_id = row['CD_CVM'].iloc[0]
company_name = row['NM_PREGAO'].iloc[0]
sectors = [
html.Li(row[s].iloc[0], className='breadcrumb-item')
for s in ['SETOR', 'SUBSETOR', 'SEGMENTO']
]
# Prepare fin statements dataset
data = fin_stmts[fin_stmts['CD_CVM'] == cvm_id]
data = data.reset_index()
data = data[1:]
data = calc_kpis(data)
data = data.merge(macro, on="DT_FIM_EXERC")
#
quotes = pd.read_csv('data/tickers.txt', names=['ticker'])
quotes = quotes[quotes['ticker'].str[:4] == ticker]['ticker'].values
quotes = get_quotes(quotes)
quotes['tipo'] = quotes['ticker'].str[4:]
quotes['qtde'] = np.where(
quotes['tipo'] == '3', row['QTDE_ON'],
row['QTDE_PN'] /
|
np.sum(quotes['tipo'] != '3')
|
numpy.sum
|
from unittest import TestCase
import numpy as np
from python_polar_coding.polar_codes.fast_ssc import FastSSCDecoder
class TestFastSSCDecoder(TestCase):
@classmethod
def setUpClass(cls):
cls.received_llr = np.array([
-2.7273,
-8.7327,
0.1087,
1.6463,
0.0506,
-0.0552,
-1.5304,
-2.1233,
])
cls.length = cls.received_llr.size
cls.n = 3
def test_zero_node_decoder(self):
mask = np.zeros(self.length, dtype=np.int8)
decoder = FastSSCDecoder(mask=mask, n=self.n)
self.assertEqual(len(decoder._decoding_tree.leaves), 1)
decoder.decode(self.received_llr)
np.testing.assert_equal(
decoder.result,
np.zeros(self.length, dtype=np.int8)
)
def test_one_node_decoder(self):
mask = np.ones(self.length, dtype=np.int8)
decoder = FastSSCDecoder(mask=mask, n=self.n)
self.assertEqual(len(decoder._decoding_tree.leaves), 1)
decoder.decode(self.received_llr)
np.testing.assert_equal(
decoder.result,
np.array(self.received_llr < 0, dtype=np.int8)
)
def test_spc_node_decoder(self):
mask =
|
np.array([0, 1, 1, 1, 1, 1, 1, 1], dtype=np.int8)
|
numpy.array
|
import copy
import numpy as np
from .grid import Grid, CachedData
def array_at_verts_basic2d(a):
"""
Computes values at cell vertices on 2d array using neighbor averaging.
Parameters
----------
a : ndarray
Array values at cell centers, could be a slice in any orientation.
Returns
-------
averts : ndarray
Array values at cell vertices, shape (a.shape[0]+1, a.shape[1]+1).
"""
assert a.ndim == 2
shape_verts2d = (a.shape[0]+1, a.shape[1]+1)
# create a 3D array of size (nrow+1, ncol+1, 4)
averts3d = np.full(shape_verts2d + (4,), np.nan)
averts3d[:-1, :-1, 0] = a
averts3d[:-1, 1:, 1] = a
averts3d[1:, :-1, 2] = a
averts3d[1:, 1:, 3] = a
# calculate the mean over the last axis, ignoring NaNs
averts = np.nanmean(averts3d, axis=2)
return averts
class StructuredGrid(Grid):
"""
class for a structured model grid
Parameters
----------
delc
delc array
delr
delr array
Properties
----------
nlay
returns the number of model layers
nrow
returns the number of model rows
ncol
returns the number of model columns
delc
returns the delc array
delr
returns the delr array
xyedges
returns x-location points for the edges of the model grid and
y-location points for the edges of the model grid
Methods
----------
get_cell_vertices(i, j)
returns vertices for a single cell at row, column i, j.
"""
def __init__(self, delc=None, delr=None, top=None, botm=None, idomain=None,
lenuni=None, epsg=None, proj4=None, prj=None, xoff=0.0,
yoff=0.0, angrot=0.0, nlay=None, nrow=None, ncol=None,
laycbd=None):
super(StructuredGrid, self).__init__('structured', top, botm, idomain,
lenuni, epsg, proj4, prj, xoff,
yoff, angrot)
if delc is not None:
self.__nrow = len(delc)
self.__delc = delc.astype(float)
else:
self.__nrow = nrow
self.__delc = delc
if delr is not None:
self.__ncol = len(delr)
self.__delr = delr.astype(float)
else:
self.__ncol = ncol
self.__delr = delr
if top is not None:
assert self.__nrow * self.__ncol == len(np.ravel(top))
if botm is not None:
assert self.__nrow * self.__ncol == len(np.ravel(botm[0]))
if nlay is not None:
self.__nlay = nlay
else:
if laycbd is not None:
self.__nlay = len(botm) - np.sum(laycbd>0)
else:
self.__nlay = len(botm)
else:
self.__nlay = nlay
if laycbd is not None:
self.__laycbd = laycbd
else:
self.__laycbd = np.zeros(self.__nlay, dtype=int)
####################
# Properties
####################
@property
def is_valid(self):
if self.__delc is not None and self.__delr is not None:
return True
return False
@property
def is_complete(self):
if self.__delc is not None and self.__delr is not None and \
super(StructuredGrid, self).is_complete:
return True
return False
@property
def nlay(self):
return self.__nlay
@property
def nrow(self):
return self.__nrow
@property
def ncol(self):
return self.__ncol
@property
def nnodes(self):
return self.__nlay * self.__nrow * self.__ncol
@property
def shape(self):
return self.__nlay, self.__nrow, self.__ncol
@property
def extent(self):
self._copy_cache = False
xyzgrid = self.xyzvertices
self._copy_cache = True
return (np.min(xyzgrid[0]), np.max(xyzgrid[0]),
np.min(xyzgrid[1]), np.max(xyzgrid[1]))
@property
def delc(self):
return copy.deepcopy(self.__delc)
@property
def delr(self):
return copy.deepcopy(self.__delr)
@property
def delz(self):
cache_index = 'delz'
if cache_index not in self._cache_dict or \
self._cache_dict[cache_index].out_of_date:
delz = self.top_botm[:-1, :, :] - self.top_botm[1:, :, :]
self._cache_dict[cache_index] = CachedData(delz)
if self._copy_cache:
return self._cache_dict[cache_index].data
else:
return self._cache_dict[cache_index].data_nocopy
@property
def top_botm_withnan(self):
"""
Same as top_botm array but with NaN where idomain==0 both above and
below a cell.
"""
cache_index = 'top_botm_withnan'
if cache_index not in self._cache_dict or \
self._cache_dict[cache_index].out_of_date:
is_inactive_above = np.full(self.top_botm.shape, True)
is_inactive_above[:-1, :, :] = self._idomain==0
is_inactive_below = np.full(self.top_botm.shape, True)
is_inactive_below[1:, :, :] = self._idomain==0
where_to_nan = np.logical_and(is_inactive_above, is_inactive_below)
top_botm_withnan = np.where(where_to_nan, np.nan, self.top_botm)
self._cache_dict[cache_index] = CachedData(top_botm_withnan)
if self._copy_cache:
return self._cache_dict[cache_index].data
else:
return self._cache_dict[cache_index].data_nocopy
@property
def xyzvertices(self):
"""
Method to get all grid vertices in a layer
Returns:
[]
2D array
"""
cache_index = 'xyzgrid'
if cache_index not in self._cache_dict or \
self._cache_dict[cache_index].out_of_date:
xedge = np.concatenate(([0.], np.add.accumulate(self.__delr)))
length_y = np.add.reduce(self.__delc)
yedge = np.concatenate(([length_y], length_y -
np.add.accumulate(self.delc)))
xgrid, ygrid = np.meshgrid(xedge, yedge)
zgrid, zcenter = self._zcoords()
if self._has_ref_coordinates:
# transform x and y
pass
xgrid, ygrid = self.get_coords(xgrid, ygrid)
if zgrid is not None:
self._cache_dict[cache_index] = \
CachedData([xgrid, ygrid, zgrid])
else:
self._cache_dict[cache_index] = \
CachedData([xgrid, ygrid])
if self._copy_cache:
return self._cache_dict[cache_index].data
else:
return self._cache_dict[cache_index].data_nocopy
@property
def xyedges(self):
"""
Return a list of two 1D numpy arrays: one with the cell edge x
coordinate (size = ncol+1) and the other with the cell edge y
coordinate (size = nrow+1) in model space - not offset or rotated.
"""
cache_index = 'xyedges'
if cache_index not in self._cache_dict or \
self._cache_dict[cache_index].out_of_date:
xedge = np.concatenate(([0.],
|
np.add.accumulate(self.__delr)
|
numpy.add.accumulate
|
#!/usr/bin/env python
__author__ = '<NAME>'
__license__ = "Apache 2.0"
__email__ = '<EMAIL>'
# script to build features for the Kaggle LANL earthquake prediction challenge
# extracts statistics and and signal processing values from an acoustic signal
# please see the exploratory data analysis and the model Jupyter notebooks for more description;
# understanding of the problem is really helped by this
# derived from:
# <NAME>. (2019). Earthquakes FE. More features and samples. Kaggle.
# Retrieved from: https://www.kaggle.com/artgor/earthquakes-fe-more-features-and-samples
import os
import time
import warnings
import traceback
import numpy as np
import pandas as pd
from scipy import stats
import scipy.signal as sg
import multiprocessing as mp
from scipy.signal import hann
from scipy.signal import hilbert
from scipy.signal import convolve
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression
from tqdm import tqdm
import gc
gc.enable()
# some of the original functions from Lukayenko throw warnings, time has not permitted exploring a fix
warnings.filterwarnings("ignore")
# constants
DATA_DIR = "/workspace/Kaggle/LANL"
# DATA_DIR = r'd:\#earthquake\data' # set for local environment
SIG_LEN = 150000
NUM_SEG_PER_PROC = 6000
NUM_THREADS = 6
NY_FREQ_IDX = 75000
CUTOFF = 18000
MAX_FREQ_IDX = 20000
FREQ_STEP = 2500
def split_raw_data():
"""
divides the original training data into two sets that only overlap by one test signal length
TODO: this could probably be done without the expense of file splits by using Pandas read csv parameters
:return: None, outputs a csv file
"""
df = pd.read_csv(os.path.join(DATA_DIR, 'train.csv'))
max_start_index = len(df.index) - SIG_LEN
slice_len = int(max_start_index / 6)
for i in range(NUM_THREADS):
print('working', i)
df0 = df.iloc[slice_len * i: (slice_len * (i + 1)) + SIG_LEN]
df0.to_csv(os.path.join(DATA_DIR, 'raw_data_%d.csv' % i), index=False)
del df0
del df
def build_rnd_idxs():
"""
builds a set of random indices by which the 629m sample training set will be sliced into 150k sets,
the 150k sets match the test sample sizes. builds x set of indices where x is the number of threads or splits
of the original data that are planned
:return: None, outputs a csv file
"""
rnd_idxs = np.zeros(shape=(NUM_THREADS, NUM_SEG_PER_PROC), dtype=np.int32)
max_start_idx = 100000000 # len(df.index) - SIG_LEN
for i in range(NUM_THREADS):
np.random.seed(5591 + i)
start_indices = np.random.randint(0, max_start_idx, size=NUM_SEG_PER_PROC, dtype=np.int32)
rnd_idxs[i, :] = start_indices
for i in range(NUM_THREADS):
print(rnd_idxs[i, :8])
print(rnd_idxs[i, -8:])
print(min(rnd_idxs[i,:]), max(rnd_idxs[i,:]))
np.savetxt(fname='start_indices_4k.csv', X=np.transpose(rnd_idxs), fmt='%d', delimiter=',')
def add_trend_feature(arr, abs_values=False):
"""
adds a trend feature based on an input array
from: Lukayenko (2019)
:param arr: np array to create feature for
:param abs_values: whether to take absolute value of input
:return: slope of the trend
"""
idx = np.array(range(len(arr)))
if abs_values:
arr = np.abs(arr)
lr = LinearRegression()
lr.fit(idx.reshape(-1, 1), arr)
return lr.coef_[0]
def classic_sta_lta(x, length_sta, length_lta):
"""
computes metric for short term divided by long tern signal average
from: Lukayenko (2019)
:param x: np array, signal to process
:param length_sta: length of short term average
:param length_lta: length of long term average
:return: short term average divided by long term average
"""
sta = np.cumsum(x ** 2)
# Convert to float
sta = np.require(sta, dtype=np.float)
# Copy for LTA
lta = sta.copy()
# Compute the STA and the LTA
sta[length_sta:] = sta[length_sta:] - sta[:-length_sta]
sta /= length_sta
lta[length_lta:] = lta[length_lta:] - lta[:-length_lta]
lta /= length_lta
# Pad zeros
sta[:length_lta - 1] = 0
# Avoid division by zero by setting zero values to tiny float
dtiny = np.finfo(0.0).tiny
idx = lta < dtiny
lta[idx] = dtiny
return sta / lta
def des_bw_filter_lp(cutoff=CUTOFF):
"""
designs a 4 pole Butterworth IIR low pass filter, passes low frequencies, eliminates high frequencies
:param cutoff: low pass cutoff frequency as a frequency line number
:return: b, a: coefficients of filter
"""
b, a = sg.butter(4, Wn=cutoff/NY_FREQ_IDX)
return b, a
def des_bw_filter_hp(cutoff=CUTOFF):
"""
designs a 4 pole Butterworth IIR high pass filter, passes high frequencies, eliminates low frequencies
:param cutoff: high pass cutoff frequency as a frequency line number
:return: b, a: coefficients of filter
"""
b, a = sg.butter(4, Wn=cutoff/NY_FREQ_IDX, btype='highpass')
return b, a
def des_bw_filter_bp(low, high):
"""
designs a 4 pole Butterworth IIR band pass filter, passes a band frequencies, eliminates low and high frequencies
:param low: low frequency line number
:param high: high frequency line number
:return: b, a: coefficients of filter
"""
b, a = sg.butter(4, Wn=(low/NY_FREQ_IDX, high/NY_FREQ_IDX), btype='bandpass')
return b, a
def create_features_pk_det(seg_id, seg, X, st, end):
"""
extracts peak values and indices using wavelets, extracts 12 biggest peak signal values
warning: this takes days to run, even if multiprocessing is employed, took 3 days for 24k segments with 6 processes
:param seg_id: segment id as a number
:param seg: segment, as a DataFrame
:param X: DataFrame that is the target into which features are created
:param st: segment start id, for debug
:param end: segment end id, for debug
:return: X: DataFrame that is the target into which features are created
"""
try:
X.loc[seg_id, 'seg_id'] = np.int32(seg_id)
X.loc[seg_id, 'seg_start'] = np.int32(st)
X.loc[seg_id, 'seg_end'] = np.int32(end)
except:
pass
sig = pd.Series(seg['acoustic_data'].values)
b, a = des_bw_filter_lp(cutoff=18000)
sig = sg.lfilter(b, a, sig)
peakind = []
noise_pct = .001
count = 0
while len(peakind) < 12 and count < 24:
peakind = sg.find_peaks_cwt(sig, np.arange(1, 16), noise_perc=noise_pct, min_snr=4.0)
noise_pct *= 2.0
count += 1
if len(peakind) < 12:
print('Warning: Failed to find 12 peaks for %d' % seg_id)
while len(peakind) < 12:
peakind.append(149999)
df_pk = pd.DataFrame(data={'pk': sig[peakind], 'idx': peakind}, columns=['pk', 'idx'])
df_pk.sort_values(by='pk', ascending=False, inplace=True)
for i in range(0, 12):
X.loc[seg_id, 'pk_idx_%d' % i] = df_pk['idx'].iloc[i]
X.loc[seg_id, 'pk_val_%d' % i] = df_pk['pk'].iloc[i]
return X
def create_features(seg_id, seg, X, st, end):
"""
creates the primary statistical features from signal slices, for training slices and test signals
heavily influenced by Lukayenko (2019), added frequency banding via digital filters, Fourier transform was
switched to magnitude and phase based upon the EDA
:param seg_id: segment id as a number
:param seg: segment, as a DataFrame
:param X: DataFrame that is the target into which features are created
:param st: segment start id, for debug
:param end: segment end id, for debug
:return: X: DataFrame that is the target into which features are created
"""
try:
X.loc[seg_id, 'seg_id'] = np.int32(seg_id)
X.loc[seg_id, 'seg_start'] = np.int32(st)
X.loc[seg_id, 'seg_end'] = np.int32(end)
except:
pass
xc = pd.Series(seg['acoustic_data'].values)
xcdm = xc - np.mean(xc)
b, a = des_bw_filter_lp(cutoff=18000)
xcz = sg.lfilter(b, a, xcdm)
zc = np.fft.fft(xcz)
zc = zc[:MAX_FREQ_IDX]
# FFT transform values
realFFT = np.real(zc)
imagFFT = np.imag(zc)
freq_bands = [x for x in range(0, MAX_FREQ_IDX, FREQ_STEP)]
magFFT = np.sqrt(realFFT ** 2 + imagFFT ** 2)
phzFFT = np.arctan(imagFFT / realFFT)
phzFFT[phzFFT == -np.inf] = -np.pi / 2.0
phzFFT[phzFFT == np.inf] = np.pi / 2.0
phzFFT = np.nan_to_num(phzFFT)
for freq in freq_bands:
X.loc[seg_id, 'FFT_Mag_01q%d' % freq] = np.quantile(magFFT[freq: freq + FREQ_STEP], 0.01)
X.loc[seg_id, 'FFT_Mag_10q%d' % freq] = np.quantile(magFFT[freq: freq + FREQ_STEP], 0.1)
X.loc[seg_id, 'FFT_Mag_90q%d' % freq] = np.quantile(magFFT[freq: freq + FREQ_STEP], 0.9)
X.loc[seg_id, 'FFT_Mag_99q%d' % freq] = np.quantile(magFFT[freq: freq + FREQ_STEP], 0.99)
X.loc[seg_id, 'FFT_Mag_mean%d' % freq] = np.mean(magFFT[freq: freq + FREQ_STEP])
X.loc[seg_id, 'FFT_Mag_std%d' % freq] = np.std(magFFT[freq: freq + FREQ_STEP])
X.loc[seg_id, 'FFT_Mag_max%d' % freq] = np.max(magFFT[freq: freq + FREQ_STEP])
X.loc[seg_id, 'FFT_Phz_mean%d' % freq] = np.mean(phzFFT[freq: freq + FREQ_STEP])
X.loc[seg_id, 'FFT_Phz_std%d' % freq] = np.std(phzFFT[freq: freq + FREQ_STEP])
X.loc[seg_id, 'FFT_Rmean'] = realFFT.mean()
X.loc[seg_id, 'FFT_Rstd'] = realFFT.std()
X.loc[seg_id, 'FFT_Rmax'] = realFFT.max()
X.loc[seg_id, 'FFT_Rmin'] = realFFT.min()
X.loc[seg_id, 'FFT_Imean'] = imagFFT.mean()
X.loc[seg_id, 'FFT_Istd'] = imagFFT.std()
X.loc[seg_id, 'FFT_Imax'] = imagFFT.max()
X.loc[seg_id, 'FFT_Imin'] = imagFFT.min()
X.loc[seg_id, 'FFT_Rmean_first_6000'] = realFFT[:6000].mean()
X.loc[seg_id, 'FFT_Rstd__first_6000'] = realFFT[:6000].std()
X.loc[seg_id, 'FFT_Rmax_first_6000'] = realFFT[:6000].max()
X.loc[seg_id, 'FFT_Rmin_first_6000'] = realFFT[:6000].min()
X.loc[seg_id, 'FFT_Rmean_first_18000'] = realFFT[:18000].mean()
X.loc[seg_id, 'FFT_Rstd_first_18000'] = realFFT[:18000].std()
X.loc[seg_id, 'FFT_Rmax_first_18000'] = realFFT[:18000].max()
X.loc[seg_id, 'FFT_Rmin_first_18000'] = realFFT[:18000].min()
del xcz
del zc
b, a = des_bw_filter_lp(cutoff=2500)
xc0 = sg.lfilter(b, a, xcdm)
b, a = des_bw_filter_bp(low=2500, high=5000)
xc1 = sg.lfilter(b, a, xcdm)
b, a = des_bw_filter_bp(low=5000, high=7500)
xc2 = sg.lfilter(b, a, xcdm)
b, a = des_bw_filter_bp(low=7500, high=10000)
xc3 = sg.lfilter(b, a, xcdm)
b, a = des_bw_filter_bp(low=10000, high=12500)
xc4 = sg.lfilter(b, a, xcdm)
b, a = des_bw_filter_bp(low=12500, high=15000)
xc5 = sg.lfilter(b, a, xcdm)
b, a = des_bw_filter_bp(low=15000, high=17500)
xc6 = sg.lfilter(b, a, xcdm)
b, a = des_bw_filter_bp(low=17500, high=20000)
xc7 = sg.lfilter(b, a, xcdm)
b, a = des_bw_filter_hp(cutoff=20000)
xc8 = sg.lfilter(b, a, xcdm)
sigs = [xc, pd.Series(xc0), pd.Series(xc1), pd.Series(xc2), pd.Series(xc3),
pd.Series(xc4), pd.Series(xc5), pd.Series(xc6), pd.Series(xc7), pd.Series(xc8)]
for i, sig in enumerate(sigs):
X.loc[seg_id, 'mean_%d' % i] = sig.mean()
X.loc[seg_id, 'std_%d' % i] = sig.std()
X.loc[seg_id, 'max_%d' % i] = sig.max()
X.loc[seg_id, 'min_%d' % i] = sig.min()
X.loc[seg_id, 'mean_change_abs_%d' % i] = np.mean(np.diff(sig))
X.loc[seg_id, 'mean_change_rate_%d' % i] = np.mean(np.nonzero((np.diff(sig) / sig[:-1]))[0])
X.loc[seg_id, 'abs_max_%d' % i] =
|
np.abs(sig)
|
numpy.abs
|
import numpy as np
import random
import collections
import platform
import sys
import ray
from ray.rllib.execution.segment_tree import SumSegmentTree, MinSegmentTree
from ray.rllib.policy.sample_batch import SampleBatch, DEFAULT_POLICY_ID, \
MultiAgentBatch
from ray.rllib.utils.annotations import DeveloperAPI
from ray.rllib.utils.compression import unpack_if_needed
from ray.util.iter import ParallelIteratorWorker
from ray.rllib.utils.timer import TimerStat
from ray.rllib.utils.window_stat import WindowStat
@DeveloperAPI
class ReplayBuffer:
@DeveloperAPI
def __init__(self, size):
"""Create Prioritized Replay buffer.
Parameters
----------
size: int
Max number of transitions to store in the buffer. When the buffer
overflows the old memories are dropped.
"""
self._storage = []
self._maxsize = size
self._next_idx = 0
self._hit_count = np.zeros(size)
self._eviction_started = False
self._num_added = 0
self._num_sampled = 0
self._evicted_hit_stats = WindowStat("evicted_hit", 1000)
self._est_size_bytes = 0
def __len__(self):
return len(self._storage)
@DeveloperAPI
def add(self, obs_t, action, reward, obs_tp1, done, weight):
data = (obs_t, action, reward, obs_tp1, done)
self._num_added += 1
if self._next_idx >= len(self._storage):
self._storage.append(data)
self._est_size_bytes += sum(sys.getsizeof(d) for d in data)
else:
self._storage[self._next_idx] = data
if self._next_idx + 1 >= self._maxsize:
self._eviction_started = True
self._next_idx = (self._next_idx + 1) % self._maxsize
if self._eviction_started:
self._evicted_hit_stats.push(self._hit_count[self._next_idx])
self._hit_count[self._next_idx] = 0
def _encode_sample(self, idxes):
obses_t, actions, rewards, obses_tp1, dones = [], [], [], [], []
for i in idxes:
data = self._storage[i]
obs_t, action, reward, obs_tp1, done = data
obses_t.append(np.array(unpack_if_needed(obs_t), copy=False))
actions.append(
|
np.array(action, copy=False)
|
numpy.array
|
import numpy as np
def affinity(x, w):
"""
Returns the affinity of each row of 'x' for each column of 'w'.
:param x: List of lists representing X matrix, each row is an x^(i) vector.
:param w: List of lists representing the weights (one column for each class)..
:return: A numpy array of affinities.
"""
return np.dot(x, w)
def softmax_function(z):
"""
Return the softmax_function at z using a numerically stable approach.
:param z: A real number, list of numbers, or list of lists of numbers.
:return: The output of the softmax_function function for z withthe same shape as the input.
"""
index = np.argmax(z)
result = np.empty((len(z), len(z[0])))
copy = np.copy(z)
copy = np.subtract(copy, copy.item(index))
for row in range(len(z)):
denom = 0
xp_1 = copy[row]
for col in range(len(z[0])):
xp_2 = copy[row][col]
denom += np.exp(xp_2)
buffer = (np.divide(np.exp(xp_1), denom))
result[row] = buffer
return result
def softmax_predict_proba(x, w):
"""
Return the estimated probability of each class for each row of 'x'.
:param x: List of lists representing X matrix, each row is an x^(i) vector.
:param w: List of lists representing the weights (one column for each class)..
:return: A numpy 2d array of probabilities.
"""
return softmax_function(affinity(x,w))
def softmax_predict(x, w):
"""
Return the estimated label for each row of 'x'.
:param x: List of lists representing X matrix, each row is an x^(i) vector.
:param w: List of lists representing the weights (one column for each class)..
:return: A numpy array of class labels.
"""
predict = softmax_predict_proba(x, w)
result = np.empty(len(predict))
for row in range(len(predict)):
result[row] = np.argmax(predict[row])
return result
def softmax_cost(x, y, w):
"""
Return the cross entropy cost for softmax regression.
:param x: List of lists representing X matrix, each row is an x^(i) vector.
:param y: List representing the y vector of labels.
:param w: List of lists representing W matrix, each column contains the weightvector for a class.
:return: The value of the cost function.
"""
prob = softmax_predict_proba(x, w)
Y_indexes = softmax_predict(x, w)
Y = np.zeros((len(prob), len(prob[0])))
for col in range(len(Y_indexes)):
index = y[col]
Y[col][index] = 1
log = np.log(prob)
product = np.multiply(Y, log)
sum = np.sum(product)
denom = np.divide(-1, len(x))
result = np.multiply(sum, denom)
return result
def softmax_gradient(x, y, w):
"""
Return the gradient of the cross entropy cost function.
:param x: List of lists representing X matrix, each row is an x^(i) vector.
:param y: List of correct y-values [0... c-1]:param w: List of lists of weights. Each column contains the weights for oneclass.
:return: The gradient of the cross entropy cost function as a list of lists or2D numpy array.
"""
denom = np.divide(1, len(x))
P = softmax_predict_proba(x, w)
X_t = np.transpose(x)
Y = np.zeros((len(P), len(P[0])))
for col in range(len(x)):
index = y[col]
Y[col][index] = 1
subtraction = np.subtract(P, Y)
product = np.dot(X_t, subtraction)
result = np.multiply(denom, product)
return result
def softmax_gradient_descent(x, y, w_init, eta, n_iter):
"""
Uses gradient descent to estimate the weights 'w' that reduce thelogistic regression cost function.
:param x: List of lists representing X matrix, each row is an x^(i) vector.
:param y: List of correct labels [0... c-1].:param w_init: List of lists of initial weights.
:param eta: The learning rate.
:param n_iter: The number of parameter updates to perform.
:return: A 2D numpy array of the estimated weights.
"""
W = np.copy(w_init)
for index in range(n_iter):
print(softmax_cost(x, y, W))
gradient = softmax_gradient(x, y, W)
product = np.multiply(eta, gradient)
W =
|
np.subtract(W, product)
|
numpy.subtract
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Sorting gr3 output from SCHISM in pythonic way. The script is intended to sort
a lot of max elevation results and sorting them at node based on the inundation
value.
This script was developed to tackle the problem of sorting the storm track
developed in Kerry Hydro simulations.
Based on the experience of this scripts twin sister, who only use one core, we
experienced a very slow performance in sorting. So Finally, this script is born
to use the power of MPI (message passing interface) to speedup the work.
MPI Implementation Note:
The original post_sorting_gr3 was implemented on a premise that the objects
are during the reading of the files. Since we are only allowed to communiate
basic datastructure with mpi, it was not possible to pass the complex data
structure, like Node and Elements. So finally, they were streamlined to
use only the numeric values. The pixel objects were formed, essentially,
afterwards in the mpi processes.
Update:
This analysis has been translated into an xarray object, to take advantage of
dask processing. Use the dask version of the script for future analysis. This
script will be phased out in the future eventually.
@license: GPL3
@author: khan
@email: <EMAIL>
"""
import numpy as np
from mpi4py import MPI
import os
import glob
import sys
# Gr3 Object to read and write the gr3 formatted files
class Gr3(object):
def __init__(self, grname=None, nelem=0, nnode=0, nodes=[], elems=[]):
self.grname = grname
self.nelem = nelem
self.nnode = nnode
self.nodes = nodes
self.elems = elems
def read(self, fname, path='./', readnodes=True, readelements=True):
__file = os.path.join(path, fname)
with open(__file) as f:
ds = f.readlines()
__line = 0
# Reading the gr3 name
self.grname = ds[__line].strip()
__line = __line + 1
# Reading the number of nodes and elements
__nelem, __nnode = np.fromstring(string=ds[__line].split('\n')[0], count=2, sep=' ')
self.nelem = int(__nelem)
self.nnode = int(__nnode)
__line = __line + 1
if readnodes:
# Reading the nodes
__nodes = np.genfromtxt(fname=ds[__line:__line+self.nnode])
__line = __line + self.nnode
self.nodes = np.array(__nodes)
if readelements:
# Reading the elements
__elems = np.genfromtxt(fname=ds[__line:__line+self.nelem], dtype=int)
__line = __line + self.nelem
self.elems = np.array(__elems, dtype=int)
# Class to hold pixel values and help sorting
class Pixel(object):
def __init__(self, id, x, y, z, exp):
self.id = id
self.x = x
self.y = y
self.z = z
self.exp = exp
def __lt__(self, other):
if isinstance(other, Pixel):
return(self.z < other.z)
else:
return(self.z < other)
if __name__=='__main__':
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
# Setting up
maxelev_folder = './Maxelev'
sorted_folder = './Sorted'
if not os.path.exists(sorted_folder):
os.mkdir(sorted_folder)
fnames = glob.glob(os.path.join(maxelev_folder, 'Track_*.gr3'))
# fnames = fnames[0:5] # For testing, comment out to consider all files
consider = 'all' # How many files to consider by all processes
if rank == 0:
# Check if the considered number of the files makes sense
print('Total files to be sorted = {:d}'.format(len(fnames)))
if consider != 'all':
if(len(fnames)) <= consider:
consider = 'all'
if rank == 0:
print('The output will consider all ({:d}) values'.format(len(fnames)))
else:
if rank == 0:
print('The output will consider maximum {:d} values'.format(consider))
else:
if rank == 0:
print('The output will consider all ({:d}) values'.format(len(fnames)))
# First loading the file and distribute to other process
if rank == 0:
print('{:04d} - Reading {:s}'.format(0, os.path.basename(fnames[0])))
exp = int(os.path.basename(fnames[0]).split('.gr3')[0].split('_')[1])
gr3 = Gr3()
gr3.read(fnames[0])
gr3data = gr3.nodes
gr3shape = gr3data.shape
else:
gr3shape = None
exp = None
gr3shape = comm.bcast(gr3shape, root=0)
exp = comm.bcast(exp, root=0)
if rank != 0:
gr3data = np.empty(gr3shape)
comm.Bcast(gr3data, root=0)
# Range of points used by each rank and initial Point array
# Similar to use a parmetis library
chunksize = int(np.ceil(float(gr3shape[0])/size))
gr3stack = np.array([Pixel(i[0], i[1], i[2], i[3], exp) for i in gr3data[rank*chunksize:(rank+1)*chunksize]])
gr3stack = np.reshape(gr3stack, newshape=(1, len(gr3stack)))
# Now running over the files
if consider == 'all':
# Load all the files
for i in np.arange(len(fnames))[1:len(fnames)]:
if rank == 0:
print('{:04d} - Reading {:s}'.format(i, os.path.basename(fnames[i])))
exp = int(os.path.basename(fnames[i]).split('.gr3')[0].split('_')[1])
gr3 = Gr3()
gr3.read(fnames[i])
gr3data = gr3.nodes
gr3shape = gr3data.shape
else:
gr3shape = None
exp = None
gr3shape = comm.bcast(gr3shape, root=0)
exp = comm.bcast(exp, root=0)
if rank != 0:
gr3data = np.empty(gr3shape)
comm.Bcast(gr3data, root=0)
gr3append = np.array([Pixel(i[0], i[1], i[2], i[3], exp) for i in gr3data[rank*chunksize:(rank+1)*chunksize]])
gr3stack = np.append(gr3stack, [gr3append], axis=0)
stackshape = gr3stack.shape
gr3stack = np.sort(gr3stack, axis=0)
elif consider != 'all' and consider > 1:
# Preparing matrix upto first sagment of the files
for i in np.arange(len(fnames))[1:consider]:
if rank == 0:
print('{:04d} - Reading {:s}'.format(i, os.path.basename(fnames[i])))
exp = int(os.path.basename(fnames[i]).split('.gr3')[0].split('_')[1])
gr3 = Gr3()
gr3.read(fnames[i])
gr3data = gr3.nodes
gr3shape = gr3data.shape
else:
gr3shape = None
exp = None
gr3shape = comm.bcast(gr3shape, root=0)
exp = comm.bcast(exp, root=0)
if rank != 0:
gr3data = np.empty(gr3shape)
comm.Bcast(gr3data, root=0)
gr3append = np.array([Pixel(i[0], i[1], i[2], i[3], exp) for i in gr3data[rank*chunksize:(rank+1)*chunksize]])
gr3stack = np.append(gr3stack, [gr3append], axis=0)
stackshape = gr3stack.shape
gr3stack = np.sort(gr3stack, axis=0)
# Continue sorting the rest of the files
for i in np.arange(len(fnames))[consider:len(fnames)]:
if rank == 0:
print('{:04d} - Reading {:s}'.format(i, os.path.basename(fnames[i])))
exp = int(os.path.basename(fnames[i]).split('.gr3')[0].split('_')[1])
gr3 = Gr3()
gr3.read(fnames[i])
gr3data = gr3.nodes
gr3shape = gr3data.shape
else:
gr3shape = None
exp = None
gr3shape = comm.bcast(gr3shape, root=0)
exp = comm.bcast(exp, root=0)
if rank != 0:
gr3data = np.empty(gr3shape)
comm.Bcast(gr3data, root=0)
gr3append = np.array([Pixel(i[0], i[1], i[2], i[3], exp) for i in gr3data[rank*chunksize:(rank+1)*chunksize]])
gr3stack = np.append(gr3stack, [gr3append], axis=0)
gr3stack =
|
np.sort(gr3stack, axis=0)
|
numpy.sort
|
import pytest
import numpy as np
from bbox import BBox2D, BBox2DList
from bbox.box_modes import XYXY, XYWH
class TestBBox2DList(object):
@classmethod
def setup_class(cls):
cls.n = 10
cls.l = [BBox2D(np.random.randint(0, 1024, size=4))
for _ in range(cls.n)]
cls.bbl = BBox2DList(cls.l)
def test_null(self):
bbl = BBox2DList([])
assert bbl.shape == (0, 4)
def test_len(self):
assert len(self.bbl) == self.n
def test_init(self):
bbl = BBox2DList(self.bbl)
assert np.array_equal(bbl.numpy(), self.bbl.numpy())
def test_init_invalid(self):
with pytest.raises(TypeError):
BBox2DList("1, 2, 3, 4")
def test_init_invalid_element_type(self):
with pytest.raises(TypeError):
BBox2DList(["1, 2, 3, 4", [1, 2, 3, 4]])
def test_init_empty_ndarray(self):
bbl = BBox2DList(np.empty((0, 4)))
assert bbl.bboxes.shape == (0, 4)
def test_init_vector(self):
bbl = BBox2DList(np.asarray([0, 1, 2, 4]))
assert bbl.bboxes.shape == (1, 4)
def test_init_invalid_dims(self):
with pytest.raises(ValueError):
BBox2DList(np.random.rand(10, 3))
with pytest.raises(ValueError):
BBox2DList(np.random.rand(10, 5))
with pytest.raises(ValueError):
BBox2DList(np.random.rand(10, 1, 4))
def test_box_shapes(self):
n = 10
l = [BBox2D(np.random.randint(0, 1024, size=4)) for _ in range(n)]
bbl = BBox2DList(l)
assert bbl.shape == (n, 4)
lx1 = np.array([b.x1 for b in l])
lx2 = np.array([b.x2 for b in l])
ly1 = np.array([b.y1 for b in l])
ly2 = np.array([b.y2 for b in l])
assert lx1.shape == bbl.x1.shape
assert ly1.shape == bbl.y1.shape
assert lx2.shape == bbl.x2.shape
assert ly2.shape == bbl.y2.shape
assert np.array_equal(lx1, bbl.x1)
assert np.array_equal(lx2, bbl.x2)
assert np.array_equal(ly1, bbl.y1)
assert np.array_equal(ly2, bbl.y2)
assert bbl.x1.shape == (n,)
def test_equality(self):
bblist = BBox2DList(self.l)
assert bblist == self.bbl
def test_inequality(self):
bbl = BBox2DList([BBox2D(np.random.randint(0, 1024, size=4))
for _ in range(self.n)])
assert bbl != self.bbl
def test_equality_invalid(self):
bblist = BBox2DList(self.l)
assert bblist != repr(self.bbl)
def test_getitem(self):
assert self.bbl[3] == self.l[3]
def test_getitem_invalid_key(self):
with pytest.raises(IndexError):
self.bbl['random']
with pytest.raises(IndexError):
self.bbl[30]
def test_setitem(self):
self.bbl[0] = [5, 6, 7, 8]
self.bbl[1] = BBox2D([1, 2, 3, 4])
assert np.array_equal(self.bbl[0], BBox2D([5, 6, 7, 8]))
assert np.array_equal(self.bbl[1], BBox2D([1, 2, 3, 4]))
def test_x1_getter(self):
assert np.array_equal(self.bbl.x1, self.bbl.bboxes[:, 0])
def test_x1_setter(self):
bbl = self.bbl.copy()
bbl.x1 = np.zeros(bbl.shape[0])
assert np.array_equal(bbl.x1, np.zeros(bbl.shape[0]))
def test_y1_getter(self):
assert np.array_equal(self.bbl.y1, self.bbl.bboxes[:, 1])
def test_y1_setter(self):
bbl = self.bbl.copy()
bbl.y1 = np.zeros(bbl.shape[0])
assert np.array_equal(bbl.y1, np.zeros(bbl.shape[0]))
def test_x2_getter(self):
assert np.array_equal(self.bbl.x2, self.bbl.bboxes[:, 2])
def test_x2_setter(self):
bbl = self.bbl.copy()
bbl.x2 = np.zeros(bbl.shape[0])
assert np.array_equal(bbl.x2, np.zeros(bbl.shape[0]))
def test_y2_getter(self):
assert np.array_equal(self.bbl.y2, self.bbl.bboxes[:, 3])
def test_y2_setter(self):
bbl = self.bbl.copy()
bbl.y2 = np.zeros(bbl.shape[0])
assert np.array_equal(bbl.y2, np.zeros(bbl.shape[0]))
def test_invalid_setter(self):
"""
One test is sufficient since all setters use the same verification function
"""
bbl = self.bbl.copy()
with pytest.raises(TypeError):
bbl.x1 = "0," * self.bbl.shape[0]
with pytest.raises(ValueError):
bbl.x1 = np.zeros((5, 4))
with pytest.raises(ValueError):
bbl.x1 = np.zeros(5)
def test_width_getter(self):
w = self.bbl.bboxes[:, 2] - self.bbl.bboxes[:, 0] + 1
assert np.array_equal(self.bbl.w, w)
assert np.array_equal(self.bbl.width, w)
def test_width_setter(self):
bbl = self.bbl.copy()
w = np.ones(bbl.shape[0])
bbl.w = w
assert np.array_equal(bbl.w, w)
assert np.array_equal(bbl.width, w)
def test_height_getter(self):
h = self.bbl.bboxes[:, 3] - self.bbl.bboxes[:, 1] + 1
assert
|
np.array_equal(self.bbl.h, h)
|
numpy.array_equal
|
from __future__ import print_function
import colorsys
import inspect
import json
import math
import os
import pickle
import platform
import signal
import numpy as np
import pybullet as p
import random
import sys
import time
import datetime
import shutil
from collections import defaultdict, deque, namedtuple
from itertools import product, combinations, count, cycle, islice
from multiprocessing import TimeoutError
from contextlib import contextmanager
from .transformations import quaternion_from_matrix, unit_vector, euler_from_quaternion, quaternion_slerp
directory = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(directory, '../motion'))
from motion_planners.rrt_connect import birrt, direct_path
#from ..motion.motion_planners.rrt_connect import birrt, direct_path
# from future_builtins import map, filter
# from builtins import input # TODO - use future
try:
user_input = raw_input
except NameError:
user_input = input
INF = np.inf
PI = np.pi
CIRCULAR_LIMITS = -PI, PI
UNBOUNDED_LIMITS = -INF, INF
DEFAULT_TIME_STEP = 1./240. # seconds
#####################################
DRAKE_PATH = 'models/drake/'
# Models
# Robots
MODEL_DIRECTORY = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, 'models/'))
ROOMBA_URDF = 'models/turtlebot/roomba.urdf'
TURTLEBOT_URDF = 'models/turtlebot/turtlebot_holonomic.urdf'
DRAKE_IIWA_URDF = 'models/drake/iiwa_description/urdf/iiwa14_polytope_collision.urdf'
WSG_50_URDF = 'models/drake/wsg_50_description/urdf/wsg_50_mesh_visual.urdf' # wsg_50 | wsg_50_mesh_visual | wsg_50_mesh_collision
#SCHUNK_URDF = 'models/drake/wsg_50_description/sdf/schunk_wsg_50.sdf'
PANDA_HAND_URDF = "models/franka_description/robots/hand.urdf"
PANDA_ARM_URDF = "models/franka_description/robots/panda_arm_hand.urdf"
# PyBullet Robots
#PYBULLET_DIRECTORY = add_data_path()
KUKA_IIWA_URDF = "kuka_iiwa/model.urdf"
KUKA_IIWA_GRIPPER_SDF = "kuka_iiwa/kuka_with_gripper.sdf"
R2D2_URDF = "r2d2.urdf"
MINITAUR_URDF = "quadruped/minitaur.urdf"
HUMANOID_MJCF = "mjcf/humanoid.xml"
HUSKY_URDF = "husky/husky.urdf"
RACECAR_URDF = 'racecar/racecar.urdf' # racecar_differential.urdf
PR2_GRIPPER = 'pr2_gripper.urdf'
PANDA_URDF = 'franka_panda/panda.urdf'
# PyBullet wsg50 robots
# wsg50_one_motor_gripper.sdf - no visual
# wsg50_one_motor_gripper_free_base.sdf - seg fault
# wsg50_one_motor_gripper_left_finger.urdf - no fingers
# wsg50_one_motor_gripper_new.sdf - no visual
# wsg50_one_motor_gripper_new_free_base.sdf - octopus
# wsg50_one_motor_gripper_no_finger.sdf - no visual
# wsg50_one_motor_gripper_right_finger.urdf - no fingers
WSG_GRIPPER = 'gripper/wsg50_one_motor_gripper_new.sdf'
# PyBullet Objects
KIVA_SHELF_SDF = "kiva_shelf/model.sdf"
FLOOR_URDF = 'plane.urdf'
TABLE_URDF = 'table/table.urdf'
# Objects
SMALL_BLOCK_URDF = "models/drake/objects/block_for_pick_and_place.urdf"
BLOCK_URDF = "models/drake/objects/block_for_pick_and_place_mid_size.urdf"
SINK_URDF = 'models/sink.urdf'
STOVE_URDF = 'models/stove.urdf'
#####################################
# I/O
SEPARATOR = '\n' + 50*'-' + '\n'
#def inf_generator():
# return iter(int, 1)
inf_generator = count
def print_separator(n=50):
print('\n' + n*'-' + '\n')
def is_remote():
return 'SSH_CONNECTION' in os.environ
def is_darwin(): # TODO: change loading accordingly
return platform.system() == 'Darwin' # platform.release()
#return sys.platform == 'darwin'
def read(filename):
with open(filename, 'r') as f:
return f.read()
def write(filename, string):
with open(filename, 'w') as f:
f.write(string)
def read_pickle(filename):
# Can sometimes read pickle3 from python2 by calling twice
# Can possibly read pickle2 from python3 by using encoding='latin1'
with open(filename, 'rb') as f:
return pickle.load(f)
def write_pickle(filename, data): # NOTE - cannot pickle lambda or nested functions
with open(filename, 'wb') as f:
pickle.dump(data, f)
def read_json(path):
return json.loads(read(path))
def write_json(path, data):
with open(path, 'w') as f:
json.dump(data, f, indent=2, sort_keys=True)
def safe_remove(path):
if os.path.exists(path):
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
def ensure_dir(f):
d = os.path.dirname(f)
if not os.path.exists(d):
os.makedirs(d)
##################################################
def safe_zip(sequence1, sequence2): # TODO: *args
sequence1, sequence2 = list(sequence1), list(sequence2)
assert len(sequence1) == len(sequence2)
return list(zip(sequence1, sequence2))
def get_pairs(sequence):
# TODO: lazy version
sequence = list(sequence)
return safe_zip(sequence[:-1], sequence[1:])
def get_wrapped_pairs(sequence):
# TODO: lazy version
sequence = list(sequence)
# zip(sequence, sequence[-1:] + sequence[:-1])
return safe_zip(sequence, sequence[1:] + sequence[:1])
def clip(value, min_value=-INF, max_value=+INF):
return min(max(min_value, value), max_value)
def randomize(iterable): # TODO: bisect
sequence = list(iterable)
random.shuffle(sequence)
return sequence
def get_random_seed():
return random.getstate()[1][0]
def get_numpy_seed():
return np.random.get_state()[1][0]
def set_random_seed(seed):
if seed is not None:
random.seed(seed)
def wrap_numpy_seed(seed):
return seed % (2**32)
def set_numpy_seed(seed):
# These generators are different and independent
if seed is not None:
np.random.seed(wrap_numpy_seed(seed))
#print('Seed:', seed)
DATE_FORMAT = '%y-%m-%d_%H-%M-%S'
def get_date():
return datetime.datetime.now().strftime(DATE_FORMAT)
def implies(p1, p2):
return not p1 or p2
def roundrobin(*iterables):
# https://docs.python.org/3.1/library/itertools.html#recipes
"roundrobin('ABC', 'D', 'EF') --> A D E B F C"
# Recipe credited to <NAME>
pending = len(iterables)
nexts = cycle(iter(it).__next__ for it in iterables)
while pending:
try:
for next in nexts:
yield next()
except StopIteration:
pending -= 1
nexts = cycle(islice(nexts, pending))
def chunks(sequence, n=1):
for i in range(0, len(sequence), n):
yield sequence[i:i + n]
def get_function_name(depth=1):
return inspect.stack()[depth][3]
def load_yaml(path):
import yaml
# grep -r --include="*.py" "yaml\." *
# yaml.dump()
with open(path, 'r') as f:
try:
return yaml.safe_load(f)
except yaml.YAMLError as exc:
raise exc
def flatten(iterable_of_iterables):
return (item for iterables in iterable_of_iterables for item in iterables)
def find(test, sequence):
for item in sequence:
if test(item):
return item
return None
def merge_dicts(*args):
result = {}
for d in args:
result.update(d)
return result
# return dict(reduce(operator.add, [d.items() for d in args]))
##################################################
BYTES_PER_KILOBYTE = math.pow(2, 10)
BYTES_PER_GIGABYTE = math.pow(2, 30)
KILOBYTES_PER_GIGABYTE = BYTES_PER_GIGABYTE / BYTES_PER_KILOBYTE
def get_memory_in_kb():
# https://pypi.org/project/psutil/
# https://psutil.readthedocs.io/en/latest/
import psutil
#rss: aka "Resident Set Size", this is the non-swapped physical memory a process has used. (bytes)
#vms: aka "Virtual Memory Size", this is the total amount of virtual memory used by the process. (bytes)
#shared: (Linux) memory that could be potentially shared with other processes.
#text (Linux, BSD): aka TRS (text resident set) the amount of memory devoted to executable code.
#data (Linux, BSD): aka DRS (data resident set) the amount of physical memory devoted to other than executable code.
#lib (Linux): the memory used by shared libraries.
#dirty (Linux): the number of dirty pages.
#pfaults (macOS): number of page faults.
#pageins (macOS): number of actual pageins.
process = psutil.Process(os.getpid())
#process.pid()
#process.ppid()
pmem = process.memory_info() # this seems to actually get the current memory!
return pmem.vms / BYTES_PER_KILOBYTE
#print(process.memory_full_info())
#print(process.memory_percent())
# process.rlimit(psutil.RLIMIT_NOFILE) # set resource limits (Linux only)
#print(psutil.virtual_memory())
#print(psutil.swap_memory())
#print(psutil.pids())
def raise_timeout(signum, frame):
raise TimeoutError()
@contextmanager
def timeout(duration):
# TODO: function that wraps around
# https://www.jujens.eu/posts/en/2018/Jun/02/python-timeout-function/
# https://code-maven.com/python-timeout
# https://pypi.org/project/func-timeout/
# https://pypi.org/project/timeout-decorator/
# https://eli.thegreenplace.net/2011/08/22/how-not-to-set-a-timeout-on-a-computation-in-python
# https://docs.python.org/3/library/signal.html
# https://docs.python.org/3/library/contextlib.html
# https://stackoverflow.com/a/22348885
assert 0 < duration
if duration == INF:
yield
return
# Register a function to raise a TimeoutError on the signal
signal.signal(signal.SIGALRM, raise_timeout)
# Schedule the signal to be sent after ``duration``
signal.alarm(int(math.ceil(duration)))
try:
yield
except TimeoutError as e:
print('Timeout after {} sec'.format(duration))
#traceback.print_exc()
pass
finally:
# Unregister the signal so it won't be triggered
# if the timeout is not reached
signal.signal(signal.SIGALRM, signal.SIG_IGN)
#####################################
# https://stackoverflow.com/questions/5081657/how-do-i-prevent-a-c-shared-library-to-print-on-stdout-in-python/14797594#14797594
# https://stackoverflow.com/questions/4178614/suppressing-output-of-module-calling-outside-library
# https://stackoverflow.com/questions/4675728/redirect-stdout-to-a-file-in-python/22434262#22434262
class HideOutput(object):
'''
A context manager that block stdout for its scope, usage:
with HideOutput():
os.system('ls -l')
'''
DEFAULT_ENABLE = True
def __init__(self, enable=None):
if enable is None:
enable = self.DEFAULT_ENABLE
self.enable = enable
if not self.enable:
return
sys.stdout.flush()
self._origstdout = sys.stdout
self._oldstdout_fno = os.dup(sys.stdout.fileno())
self._devnull = os.open(os.devnull, os.O_WRONLY)
def __enter__(self):
if not self.enable:
return
self._newstdout = os.dup(1)
os.dup2(self._devnull, 1)
os.close(self._devnull)
sys.stdout = os.fdopen(self._newstdout, 'w')
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.enable:
return
sys.stdout.close()
sys.stdout = self._origstdout
sys.stdout.flush()
os.dup2(self._oldstdout_fno, 1)
os.close(self._oldstdout_fno) # Added
#####################################
# Colors
RED = (1, 0, 0, 1)
GREEN = (0, 1, 0, 1)
BLUE = (0, 0, 1, 1)
BLACK = (0, 0, 0, 1)
WHITE = (1, 1, 1, 1)
BROWN = (0.396, 0.263, 0.129, 1)
TAN = (0.824, 0.706, 0.549, 1)
GREY = (0.5, 0.5, 0.5, 1)
YELLOW = (1, 1, 0, 1)
TRANSPARENT = (0, 0, 0, 0)
ACHROMATIC_COLORS = {
'white': WHITE,
'grey': GREY,
'black': BLACK,
}
CHROMATIC_COLORS = {
'red': RED,
'green': GREEN,
'blue': BLUE,
}
COLOR_FROM_NAME = merge_dicts(ACHROMATIC_COLORS, CHROMATIC_COLORS)
def apply_alpha(color, alpha=1.0):
if color is None:
return None
return tuple(color[:3]) + (alpha,)
def spaced_colors(n, s=1, v=1):
return [colorsys.hsv_to_rgb(h, s, v) for h in np.linspace(0, 1, n, endpoint=False)]
#####################################
# Savers
# TODO: contextlib
class Saver(object):
def restore(self):
raise NotImplementedError()
def __enter__(self):
# TODO: move the saving to enter?
pass
def __exit__(self, type, value, traceback):
self.restore()
class ClientSaver(Saver):
def __init__(self, new_client=None):
self.client = CLIENT
if new_client is not None:
set_client(new_client)
def restore(self):
set_client(self.client)
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, self.client)
class VideoSaver(Saver):
def __init__(self, path):
self.path = path
if path is None:
self.log_id = None
else:
name, ext = os.path.splitext(path)
assert ext == '.mp4'
# STATE_LOGGING_PROFILE_TIMINGS, STATE_LOGGING_ALL_COMMANDS
# p.submitProfileTiming("pythontest")
self.log_id = p.startStateLogging(p.STATE_LOGGING_VIDEO_MP4, fileName=path, physicsClientId=CLIENT)
def restore(self):
if self.log_id is not None:
p.stopStateLogging(self.log_id)
print('Saved', self.path)
#####################################
class PoseSaver(Saver):
def __init__(self, body):
self.body = body
self.pose = get_pose(self.body)
self.velocity = get_velocity(self.body)
def apply_mapping(self, mapping):
self.body = mapping.get(self.body, self.body)
def restore(self):
set_pose(self.body, self.pose)
set_velocity(self.body, *self.velocity)
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, self.body)
class ConfSaver(Saver):
def __init__(self, body, joints=None):
self.body = body
if joints is None:
joints = get_movable_joints(self.body)
self.joints = joints
self.positions = get_joint_positions(self.body, self.joints)
self.velocities = get_joint_velocities(self.body, self.joints)
@property
def conf(self):
return self.positions
def apply_mapping(self, mapping):
self.body = mapping.get(self.body, self.body)
def restore(self):
#set_configuration(self.body, self.conf)
#set_joint_positions(self.body, self.joints, self.positions)
set_joint_states(self.body, self.joints, self.positions, self.velocities)
#set_joint_velocities(self.body, self.joints, self.velocities)
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, self.body)
class BodySaver(Saver):
def __init__(self, body, **kwargs): #, pose=None):
#if pose is None:
# pose = get_pose(body)
self.body = body
self.pose_saver = PoseSaver(body)
self.conf_saver = ConfSaver(body, **kwargs)
self.savers = [self.pose_saver, self.conf_saver]
def apply_mapping(self, mapping):
for saver in self.savers:
saver.apply_mapping(mapping)
def restore(self):
for saver in self.savers:
saver.restore()
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, self.body)
class WorldSaver(Saver):
def __init__(self, bodies=None):
if bodies is None:
bodies = get_bodies()
self.bodies = bodies
self.body_savers = [BodySaver(body) for body in self.bodies]
# TODO: add/remove new bodies
def restore(self):
for body_saver in self.body_savers:
body_saver.restore()
#####################################
# Simulation
CLIENTS = {} # TODO: rename to include locked
CLIENT = 0
def get_client(client=None):
if client is None:
return CLIENT
return client
def set_client(client):
global CLIENT
CLIENT = client
ModelInfo = namedtuple('URDFInfo', ['name', 'path', 'fixed_base', 'scale'])
INFO_FROM_BODY = {}
def get_model_info(body):
key = (CLIENT, body)
return INFO_FROM_BODY.get(key, None)
def get_urdf_flags(cache=False, cylinder=False):
# by default, Bullet disables self-collision
# URDF_INITIALIZE_SAT_FEATURES
# URDF_ENABLE_CACHED_GRAPHICS_SHAPES seems to help
# but URDF_INITIALIZE_SAT_FEATURES does not (might need to be provided a mesh)
# flags = p.URDF_INITIALIZE_SAT_FEATURES | p.URDF_ENABLE_CACHED_GRAPHICS_SHAPES
flags = 0
if cache:
flags |= p.URDF_ENABLE_CACHED_GRAPHICS_SHAPES
if cylinder:
flags |= p.URDF_USE_IMPLICIT_CYLINDER
return flags
def load_pybullet(filename, fixed_base=False, scale=1., **kwargs):
# fixed_base=False implies infinite base mass
with LockRenderer():
if filename.endswith('.urdf'):
flags = get_urdf_flags(**kwargs)
body = p.loadURDF(filename, useFixedBase=fixed_base, flags=flags,
globalScaling=scale, physicsClientId=CLIENT)
elif filename.endswith('.sdf'):
body = p.loadSDF(filename, physicsClientId=CLIENT)
elif filename.endswith('.xml'):
body = p.loadMJCF(filename, physicsClientId=CLIENT)
elif filename.endswith('.bullet'):
body = p.loadBullet(filename, physicsClientId=CLIENT)
elif filename.endswith('.obj'):
# TODO: fixed_base => mass = 0?
body = create_obj(filename, scale=scale, **kwargs)
else:
raise ValueError(filename)
INFO_FROM_BODY[CLIENT, body] = ModelInfo(None, filename, fixed_base, scale)
return body
def set_caching(cache):
p.setPhysicsEngineParameter(enableFileCaching=int(cache), physicsClientId=CLIENT)
def load_model_info(info):
# TODO: disable file caching to reuse old filenames
# p.setPhysicsEngineParameter(enableFileCaching=0, physicsClientId=CLIENT)
if info.path.endswith('.urdf'):
return load_pybullet(info.path, fixed_base=info.fixed_base, scale=info.scale)
if info.path.endswith('.obj'):
mass = STATIC_MASS if info.fixed_base else 1.
return create_obj(info.path, mass=mass, scale=info.scale)
raise NotImplementedError(info.path)
URDF_FLAGS = [p.URDF_USE_INERTIA_FROM_FILE,
p.URDF_USE_SELF_COLLISION,
p.URDF_USE_SELF_COLLISION_EXCLUDE_PARENT,
p.URDF_USE_SELF_COLLISION_EXCLUDE_ALL_PARENTS]
def get_model_path(rel_path): # TODO: add to search path
directory = os.path.dirname(os.path.abspath(__file__))
return os.path.join(directory, '..', rel_path)
def load_model(rel_path, pose=None, **kwargs):
# TODO: error with loadURDF when loading MESH visual and CYLINDER collision
abs_path = get_model_path(rel_path)
add_data_path()
#with LockRenderer():
body = load_pybullet(abs_path, **kwargs)
if pose is not None:
set_pose(body, pose)
return body
#TOOLS_VERSION = date.date()
def get_version(): # year-month-0-day format
s = str(p.getAPIVersion(physicsClientId=CLIENT))
return datetime.date(year=int(s[:4]), month=int(s[4:6]), day=int(s[7:9]))
#####################################
# class World(object):
# def __init__(self, client):
# self.client = client
# self.bodies = {}
# def activate(self):
# set_client(self.client)
# def load(self, path, name=None, fixed_base=False, scale=1.):
# body = p.loadURDF(path, useFixedBase=fixed_base, physicsClientId=self.client)
# self.bodies[body] = URDFInfo(name, path, fixed_base, scale)
# return body
# def remove(self, body):
# del self.bodies[body]
# return p.removeBody(body, physicsClientId=self.client)
# def reset(self):
# p.resetSimulation(physicsClientId=self.client)
# self.bodies = {}
# # TODO: with statement
# def copy(self):
# raise NotImplementedError()
# def __repr__(self):
# return '{}({})'.format(self.__class__.__name__, len(self.bodies))
#####################################
def elapsed_time(start_time):
return time.time() - start_time
MouseEvent = namedtuple('MouseEvent', ['eventType', 'mousePosX', 'mousePosY', 'buttonIndex', 'buttonState'])
def get_mouse_events():
return list(MouseEvent(*event) for event in p.getMouseEvents(physicsClientId=CLIENT))
def update_viewer():
# https://docs.python.org/2/library/select.html
# events = p.getKeyboardEvents() # TODO: only works when the viewer is in focus
get_mouse_events()
# for k, v in keys.items():
# #p.KEY_IS_DOWN, p.KEY_WAS_RELEASED, p.KEY_WAS_TRIGGERED
# if (k == p.B3G_RETURN) and (v & p.KEY_WAS_TRIGGERED):
# return
# time.sleep(1e-3) # Doesn't work
# disable_gravity()
def wait_for_duration(duration): #, dt=0):
t0 = time.time()
while elapsed_time(t0) <= duration:
update_viewer()
def simulate_for_duration(duration):
dt = get_time_step()
for i in range(int(duration / dt)):
step_simulation()
def get_time_step():
# {'gravityAccelerationX', 'useRealTimeSimulation', 'gravityAccelerationZ', 'numSolverIterations',
# 'gravityAccelerationY', 'numSubSteps', 'fixedTimeStep'}
return p.getPhysicsEngineParameters(physicsClientId=CLIENT)['fixedTimeStep']
def enable_separating_axis_test():
p.setPhysicsEngineParameter(enableSAT=1, physicsClientId=CLIENT)
#p.setCollisionFilterPair()
#p.setCollisionFilterGroupMask()
#p.setInternalSimFlags()
# enableFileCaching: Set to 0 to disable file caching, such as .obj wavefront file loading
#p.getAPIVersion() # TODO: check that API is up-to-date
#p.isNumpyEnabled()
def simulate_for_sim_duration(sim_duration, real_dt=0, frequency=INF):
t0 = time.time()
sim_dt = get_time_step()
sim_time = 0
last_print = 0
while sim_time < sim_duration:
if frequency < (sim_time - last_print):
print('Sim time: {:.3f} | Real time: {:.3f}'.format(sim_time, elapsed_time(t0)))
last_print = sim_time
step_simulation()
sim_time += sim_dt
time.sleep(real_dt)
def wait_for_user(message='Press enter to continue'):
if has_gui() and is_darwin():
# OS X doesn't multi-thread the OpenGL visualizer
#wait_for_interrupt()
return threaded_input(message)
return user_input(message)
def wait_if_gui(*args, **kwargs):
if has_gui():
wait_for_user(*args, **kwargs)
def is_unlocked():
return CLIENTS[CLIENT] is True
def wait_if_unlocked(*args, **kwargs):
if is_unlocked():
wait_for_user(*args, **kwargs)
def wait_for_interrupt(max_time=np.inf):
"""
Hold Ctrl to move the camera as well as zoom
"""
print('Press Ctrl-C to continue')
try:
wait_for_duration(max_time)
except KeyboardInterrupt:
pass
finally:
print()
def set_preview(enable):
p.configureDebugVisualizer(p.COV_ENABLE_GUI, enable, physicsClientId=CLIENT)
p.configureDebugVisualizer(p.COV_ENABLE_RGB_BUFFER_PREVIEW, enable, physicsClientId=CLIENT)
p.configureDebugVisualizer(p.COV_ENABLE_DEPTH_BUFFER_PREVIEW, enable, physicsClientId=CLIENT)
p.configureDebugVisualizer(p.COV_ENABLE_SEGMENTATION_MARK_PREVIEW, enable, physicsClientId=CLIENT)
#p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, False, physicsClientId=CLIENT)
#p.configureDebugVisualizer(p.COV_ENABLE_SINGLE_STEP_RENDERING, True, physicsClientId=CLIENT)
#p.configureDebugVisualizer(p.COV_ENABLE_SHADOWS, False, physicsClientId=CLIENT)
#p.configureDebugVisualizer(p.COV_ENABLE_WIREFRAME, True, physicsClientId=CLIENT)
def enable_preview():
set_preview(enable=True)
def disable_preview():
set_preview(enable=False)
def set_renderer(enable):
client = CLIENT
if not has_gui(client):
return
CLIENTS[client] = enable
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, int(enable), physicsClientId=client)
class LockRenderer(Saver):
# disabling rendering temporary makes adding objects faster
def __init__(self, lock=True):
self.client = CLIENT
self.state = CLIENTS[self.client]
# skip if the visualizer isn't active
if has_gui(self.client) and lock:
set_renderer(enable=False)
def restore(self):
if not has_gui(self.client):
return
assert self.state is not None
if self.state != CLIENTS[self.client]:
set_renderer(enable=self.state)
def connect(use_gui=True, shadows=True, color=None, width=None, height=None):
# Shared Memory: execute the physics simulation and rendering in a separate process
# https://github.com/bulletphysics/bullet3/blob/master/examples/pybullet/examples/vrminitaur.py#L7
# make sure to compile pybullet with PYBULLET_USE_NUMPY enabled
if use_gui and not is_darwin() and ('DISPLAY' not in os.environ):
use_gui = False
print('No display detected!')
method = p.GUI if use_gui else p.DIRECT
with HideOutput():
# --window_backend=2 --render_device=0'
# options="--mp4=\"test.mp4\" --mp4fps=240"
options = ''
if color is not None:
options += '--background_color_red={} --background_color_green={} --background_color_blue={}'.format(*color)
if width is not None:
options += '--width={}'.format(width)
if height is not None:
options += '--height={}'.format(height)
sim_id = p.connect(method, options=options) # key=None,
#sim_id = p.connect(p.GUI, options="--opengl2") if use_gui else p.connect(p.DIRECT)
assert 0 <= sim_id
#sim_id2 = p.connect(p.SHARED_MEMORY)
#print(sim_id, sim_id2)
CLIENTS[sim_id] = True if use_gui else None
if use_gui:
# p.COV_ENABLE_PLANAR_REFLECTION
# p.COV_ENABLE_SINGLE_STEP_RENDERING
disable_preview()
p.configureDebugVisualizer(p.COV_ENABLE_TINY_RENDERER, False, physicsClientId=sim_id) # TODO: does this matter?
p.configureDebugVisualizer(p.COV_ENABLE_SHADOWS, shadows, physicsClientId=sim_id)
p.configureDebugVisualizer(p.COV_ENABLE_MOUSE_PICKING, False, physicsClientId=sim_id) # mouse moves meshes
p.configureDebugVisualizer(p.COV_ENABLE_KEYBOARD_SHORTCUTS, False, physicsClientId=sim_id)
# you can also use GUI mode, for faster OpenGL rendering (instead of TinyRender CPU)
#visualizer_options = {
# p.COV_ENABLE_WIREFRAME: 1,
# p.COV_ENABLE_SHADOWS: 0,
# p.COV_ENABLE_RENDERING: 0,
# p.COV_ENABLE_TINY_RENDERER: 1,
# p.COV_ENABLE_RGB_BUFFER_PREVIEW: 0,
# p.COV_ENABLE_DEPTH_BUFFER_PREVIEW: 0,
# p.COV_ENABLE_SEGMENTATION_MARK_PREVIEW: 0,
# p.COV_ENABLE_VR_RENDER_CONTROLLERS: 0,
# p.COV_ENABLE_VR_PICKING: 0,
# p.COV_ENABLE_VR_TELEPORTING: 0,
#}
#for pair in visualizer_options.items():
# p.configureDebugVisualizer(*pair)
return sim_id
def threaded_input(*args, **kwargs):
# OS X doesn't multi-thread the OpenGL visualizer
# http://openrave.org/docs/0.8.2/_modules/openravepy/misc/#SetViewerUserThread
# https://github.com/bulletphysics/bullet3/blob/master/examples/pybullet/examples/userData.py
# https://github.com/bulletphysics/bullet3/tree/master/examples/ExampleBrowser
#from pybullet_utils import bullet_client
#from pybullet_utils.bullet_client import BulletClient
#server = bullet_client.BulletClient(connection_mode=p.SHARED_MEMORY_SERVER) # GUI_SERVER
#sim_id = p.connect(p.GUI)
#print(dir(server))
#client = bullet_client.BulletClient(connection_mode=p.SHARED_MEMORY)
#sim_id = p.connect(p.SHARED_MEMORY)
#threading = __import__('threading')
import threading
data = []
thread = threading.Thread(target=lambda: data.append(user_input(*args, **kwargs)), args=[])
thread.start()
#threading.enumerate()
#thread_id = 0
#for tid, tobj in threading._active.items():
# if tobj is thread:
# thread_id = tid
# break
try:
while thread.is_alive():
update_viewer()
finally:
thread.join()
return data[-1]
def disconnect():
# TODO: change CLIENT?
if CLIENT in CLIENTS:
del CLIENTS[CLIENT]
with HideOutput():
return p.disconnect(physicsClientId=CLIENT)
def is_connected():
return p.getConnectionInfo(physicsClientId=CLIENT)['isConnected']
def get_connection(client=None):
return p.getConnectionInfo(physicsClientId=get_client(client))['connectionMethod']
def has_gui(client=None):
return get_connection(get_client(client)) == p.GUI
def get_data_path():
import pybullet_data
return pybullet_data.getDataPath()
def add_data_path(data_path=None):
if data_path is None:
data_path = get_data_path()
p.setAdditionalSearchPath(data_path)
return data_path
GRAVITY = 9.8
def enable_gravity():
p.setGravity(0, 0, -GRAVITY, physicsClientId=CLIENT)
def disable_gravity():
p.setGravity(0, 0, 0, physicsClientId=CLIENT)
def step_simulation():
p.stepSimulation(physicsClientId=CLIENT)
def set_real_time(real_time):
p.setRealTimeSimulation(int(real_time), physicsClientId=CLIENT)
def enable_real_time():
set_real_time(True)
def disable_real_time():
set_real_time(False)
def update_state():
# TODO: this doesn't seem to automatically update still
disable_gravity()
#step_simulation()
#for body in get_bodies():
# for link in get_links(body):
# # if set to 1 (or True), the Cartesian world position/orientation
# # will be recomputed using forward kinematics.
# get_link_state(body, link)
#for body in get_bodies():
# get_pose(body)
# for joint in get_joints(body):
# get_joint_position(body, joint)
#p.getKeyboardEvents()
#p.getMouseEvents()
def reset_simulation():
p.resetSimulation(physicsClientId=CLIENT)
#####################################
Pixel = namedtuple('Pixel', ['row', 'column'])
def get_camera_matrix(width, height, fx, fy=None):
if fy is None:
fy = fx
#cx, cy = width / 2., height / 2.
cx, cy = (width - 1) / 2., (height - 1) / 2.
return np.array([[fx, 0, cx],
[0, fy, cy],
[0, 0, 1]])
def clip_pixel(pixel, width, height):
x, y = pixel # TODO: row, column instead?
return clip(x, 0, width-1), clip(y, 0, height-1)
def ray_from_pixel(camera_matrix, pixel):
return np.linalg.inv(camera_matrix).dot(np.append(pixel, 1))
def pixel_from_ray(camera_matrix, ray):
return camera_matrix.dot(np.array(ray) / ray[2])[:2]
def dimensions_from_camera_matrix(camera_matrix):
cx, cy = np.array(camera_matrix)[:2, 2]
width, height = (2*cx + 1), (2*cy + 1)
return width, height
def get_field_of_view(camera_matrix):
dimensions = np.array(dimensions_from_camera_matrix(camera_matrix))
focal_lengths = np.array([camera_matrix[i, i] for i in range(2)])
return 2*np.arctan(np.divide(dimensions, 2*focal_lengths))
def get_focal_lengths(dims, fovs):
return np.divide(dims, np.tan(fovs / 2)) / 2
def pixel_from_point(camera_matrix, point_camera):
px, py = pixel_from_ray(camera_matrix, point_camera)
width, height = dimensions_from_camera_matrix(camera_matrix)
if (0 <= px < width) and (0 <= py < height):
r, c = np.floor([py, px]).astype(int)
return Pixel(r, c)
return None
def get_image_aabb(camera_matrix):
upper = np.array(dimensions_from_camera_matrix(camera_matrix)) - 1
lower = np.zeros(upper.shape)
return AABB(lower, upper)
def get_visible_aabb(camera_matrix, rays):
image_aabb = get_image_aabb(camera_matrix)
rays_aabb = aabb_from_points([pixel_from_ray(camera_matrix, ray) for ray in rays])
intersection = aabb_intersection(image_aabb, rays_aabb)
if intersection is None:
return intersection
return AABB(*np.array(intersection).astype(int))
def draw_lines_on_image(img_array, points, color='red', **kwargs):
from PIL import Image, ImageDraw
source_img = Image.fromarray(img_array)
draw = ImageDraw.Draw(source_img)
draw.line(list(map(tuple, points)), fill=color, **kwargs)
return np.array(source_img)
def draw_box_on_image(img_array, aabb, color='red', **kwargs):
# https://github.com/caelan/ROS-Labeler/blob/master/main.py
# https://github.mit.edu/caelan/rl-plan/blob/master/planar_ml/rect_cnn.py
# https://pillow.readthedocs.io/en/stable/reference/ImageDraw.html
# TODO: annotate boxes with text
from PIL import Image, ImageDraw
source_img = Image.fromarray(img_array)
draw = ImageDraw.Draw(source_img)
#box = list(np.array(aabb).astype(int).flatten())
box = list(map(tuple, aabb))
draw.rectangle(box, fill=None, outline=color, **kwargs)
return np.array(source_img)
def extract_box_from_image(img_array, box):
(x1, y1), (x2, y2) = np.array(box).astype(int)
return img_array[y1:y2+1, x1:x2+1, ...]
#####################################
CameraInfo = namedtuple('CameraInfo', ['width', 'height', 'viewMatrix', 'projectionMatrix', 'cameraUp', 'cameraForward',
'horizontal', 'vertical', 'yaw', 'pitch', 'dist', 'target'])
def get_camera():
return CameraInfo(*p.getDebugVisualizerCamera(physicsClientId=CLIENT))
def set_camera(yaw, pitch, distance, target_position=np.zeros(3)):
p.resetDebugVisualizerCamera(distance, yaw, pitch, target_position, physicsClientId=CLIENT)
def get_pitch(point):
dx, dy, dz = point
return np.math.atan2(dz, np.sqrt(dx ** 2 + dy ** 2))
def get_yaw(point):
dx, dy = point[:2]
return np.math.atan2(dy, dx)
def set_camera_pose(camera_point, target_point=np.zeros(3)):
delta_point = np.array(target_point) - np.array(camera_point)
distance = np.linalg.norm(delta_point)
yaw = get_yaw(delta_point) - np.pi/2 # TODO: hack
pitch = get_pitch(delta_point)
p.resetDebugVisualizerCamera(distance, math.degrees(yaw), math.degrees(pitch),
target_point, physicsClientId=CLIENT)
def set_camera_pose2(world_from_camera, distance=2):
target_camera = np.array([0, 0, distance])
target_world = tform_point(world_from_camera, target_camera)
camera_world = point_from_pose(world_from_camera)
set_camera_pose(camera_world, target_world)
#roll, pitch, yaw = euler_from_quat(quat_from_pose(world_from_camera))
# TODO: assert that roll is about zero?
#p.resetDebugVisualizerCamera(cameraDistance=distance, cameraYaw=math.degrees(yaw), cameraPitch=math.degrees(-pitch),
# cameraTargetPosition=target_world, physicsClientId=CLIENT)
CameraImage = namedtuple('CameraImage', ['rgbPixels', 'depthPixels', 'segmentationMaskBuffer',
'camera_pose', 'camera_matrix'])
#CameraImage = namedtuple('CameraImage', ['rgb', 'depth', 'segmentation', 'camera_pose'])
def demask_pixel(pixel):
# https://github.com/bulletphysics/bullet3/blob/master/examples/pybullet/examples/segmask_linkindex.py
# Not needed when p.ER_SEGMENTATION_MASK_OBJECT_AND_LINKINDEX is not enabled
#if 0 <= pixel:
# return None
# Returns a large value when undefined
body = pixel & ((1 << 24) - 1)
link = (pixel >> 24) - 1
return body, link
def save_image(filename, rgba):
import imageio
imageio.imwrite(filename, rgba)
# import scipy.misc
# if filename.endswith('.jpg'):
# scipy.misc.imsave(filename, rgba[:, :, :3])
# elif filename.endswith('.png'):
# scipy.misc.imsave(filename, rgba) # (480, 640, 4)
# # scipy.misc.toimage(image_array, cmin=0.0, cmax=...).save('outfile.jpg')
# else:
# raise ValueError(filename)
print('Saved image at {}'.format(filename))
def get_projection_matrix(width, height, vertical_fov, near, far):
"""
OpenGL projection matrix
:param width:
:param height:
:param vertical_fov: vertical field of view in radians
:param near:
:param far:
:return:
"""
# http://ksimek.github.io/2013/08/13/intrinsic/
# http://www.songho.ca/opengl/gl_projectionmatrix.html
# http://www.songho.ca/opengl/gl_transform.html#matrix
# https://www.edmundoptics.fr/resources/application-notes/imaging/understanding-focal-length-and-field-of-view/
# gluPerspective() requires only 4 parameters; vertical field of view (FOV),
# the aspect ratio of width to height and the distances to near and far clipping planes.
aspect = float(width) / height
fov_degrees = math.degrees(vertical_fov)
projection_matrix = p.computeProjectionMatrixFOV(fov=fov_degrees, aspect=aspect,
nearVal=near, farVal=far, physicsClientId=CLIENT)
#projection_matrix = p.computeProjectionMatrix(left=0, right=width, top=height, bottom=0,
# near=near, far=far, physicsClientId=CLIENT)
return projection_matrix
#return np.reshape(projection_matrix, [4, 4])
def image_from_segmented(segmented, color_from_body=None):
if color_from_body is None:
bodies = get_bodies()
color_from_body = dict(zip(bodies, spaced_colors(len(bodies))))
image = np.zeros(segmented.shape[:2] + (3,))
for r in range(segmented.shape[0]):
for c in range(segmented.shape[1]):
body, link = segmented[r, c, :]
image[r, c, :] = color_from_body.get(body, (0, 0, 0))
return image
def get_image_flags(segment=False, segment_links=False):
if segment:
if segment_links:
return p.ER_SEGMENTATION_MASK_OBJECT_AND_LINKINDEX
return 0 # TODO: adjust output dimension when not segmenting links
return p.ER_NO_SEGMENTATION_MASK
def extract_segmented(seg_image):
segmented = np.zeros(seg_image.shape + (2,))
for r in range(segmented.shape[0]):
for c in range(segmented.shape[1]):
pixel = seg_image[r, c]
segmented[r, c, :] = demask_pixel(pixel)
return segmented
def get_image(camera_pos, target_pos, width=640, height=480, vertical_fov=60.0, near=0.02, far=5.0,
tiny=False, segment=False, **kwargs):
# computeViewMatrixFromYawPitchRoll
up_vector = [0, 0, 1] # up vector of the camera, in Cartesian world coordinates
view_matrix = p.computeViewMatrix(cameraEyePosition=camera_pos, cameraTargetPosition=target_pos,
cameraUpVector=up_vector, physicsClientId=CLIENT)
projection_matrix = get_projection_matrix(width, height, vertical_fov, near, far)
flags = get_image_flags(segment=segment, **kwargs)
# DIRECT mode has no OpenGL, so it requires ER_TINY_RENDERER
renderer = p.ER_TINY_RENDERER if tiny else p.ER_BULLET_HARDWARE_OPENGL
rgb, d, seg = p.getCameraImage(width, height,
viewMatrix=view_matrix,
projectionMatrix=projection_matrix,
shadow=False, # only applies to ER_TINY_RENDERER
flags=flags,
renderer=renderer,
physicsClientId=CLIENT)[2:]
depth = far * near / (far - (far - near) * d)
# https://github.com/bulletphysics/bullet3/blob/master/examples/pybullet/examples/pointCloudFromCameraImage.py
# https://github.com/bulletphysics/bullet3/blob/master/examples/pybullet/examples/getCameraImageTest.py
segmented = None
if segment:
segmented = extract_segmented(seg)
camera_tform =
|
np.reshape(view_matrix, [4, 4])
|
numpy.reshape
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
import paddle
import numpy as np
from PIL import Image
from .base import BaseDataSet
__all__ = ['pascal_context_train', 'pascal_context_qucik_val', 'pascal_context_eval']
# globals
context_data_mean = np.array([0.485, 0.456, 0.406]).reshape(3, 1, 1)
context_data_std = np.array([0.229, 0.224, 0.225]).reshape(3, 1, 1)
class Pascal_Context(BaseDataSet):
"""prepare Pascal_Context path_pairs"""
NUM_CLASS = 59
def __init__(self, root='./dataset', split='train', **kwargs):
super(Pascal_Context, self).__init__(root, split, **kwargs)
if os.sep == '\\': # windows
root = root.replace('/', '\\')
assert os.path.exists(root), "please download pascal_context data_set, put in dataset(dir),or check root"
self.image_path, self.label_path = self._get_pascal_context_pairs(root, split)
assert len(self.image_path) == len(self.label_path), "please check image_length = label_length"
self.print_param()
def print_param(self): # 用于核对当前数据集的信息
print('INFO: dataset_root: {}, split: {}, '
'base_size: {}, crop_size: {}, scale: {}, '
'image_length: {}, label_length: {}'.format(self.root, self.split, self.base_size,
self.crop_size, self.scale, len(self.image_path),
len(self.label_path)))
@staticmethod
def _get_pascal_context_pairs(root, split):
def get_pairs(root, file_txt, label_dir='GroundTruth_trainval_png', img_dir='JPEGImages'):
file_path = os.path.join(root, file_txt)
with open(file_path, 'r') as f:
file_list_item = f.readlines()
image_dir = os.path.join(root, img_dir)
image_path = [os.path.join(image_dir, x.strip() + '.jpg') for x in file_list_item]
gt_dir = os.path.join(root, label_dir)
label_path = [os.path.join(gt_dir, x.strip() + '.png') for x in file_list_item]
return image_path, label_path
if split == 'train':
image_path, label_path = get_pairs(root, 'ImageSets/train.txt')
elif split == 'val':
image_path, label_path = get_pairs(root, 'ImageSets/val.txt')
elif split == 'test':
image_path, label_path = get_pairs(root, 'ImageSets/test.txt') # 返回文件路径,test_label并不存在
else: # 'train_val'
image_path1, label_path1 = get_pairs(root, 'ImageSets/train.txt')
image_path2, label_path2 = get_pairs(root, 'ImageSets/val.txt')
image_path, label_path = image_path1 + image_path2, label_path1 + label_path2
return image_path, label_path
def sync_transform(self, image, label, aug=True):
crop_size = self.crop_size
if self.scale:
short_size = random.randint(int(self.base_size * 0.75), int(self.base_size * 2.0))
else:
short_size = self.base_size
# 随机左右翻转
if random.random() > 0.5:
image = image.transpose(Image.FLIP_LEFT_RIGHT)
label = label.transpose(Image.FLIP_LEFT_RIGHT)
w, h = image.size
# 同比例缩放
if h > w:
out_w = short_size
out_h = int(1.0 * h / w * out_w)
else:
out_h = short_size
out_w = int(1.0 * w / h * out_h)
image = image.resize((out_w, out_h), Image.BILINEAR)
label = label.resize((out_w, out_h), Image.NEAREST)
# deg = random.uniform(-10, 10)
# image = image.rotate(deg, resample=Image.BILINEAR)
# label = label.rotate(deg, resample=Image.NEAREST)
# 四周填充
if short_size < crop_size:
pad_h = crop_size - out_h if out_h < crop_size else 0
pad_w = crop_size - out_w if out_w < crop_size else 0
image = ImageOps.expand(image, border=(pad_w // 2, pad_h // 2, pad_w - pad_w // 2, pad_h - pad_h // 2),
fill=0)
label = ImageOps.expand(label, border=(pad_w // 2, pad_h // 2, pad_w - pad_w // 2, pad_h - pad_h // 2),
fill=0)
# 随机裁剪
w, h = image.size
x = random.randint(0, w - crop_size)
y = random.randint(0, h - crop_size)
image = image.crop((x, y, x + crop_size, y + crop_size))
label = label.crop((x, y, x + crop_size, y + crop_size))
if aug:
# 高斯模糊,可选
if random.random() > 0.7:
image = image.filter(ImageFilter.GaussianBlur(radius=random.random()))
# 可选
if random.random() > 0.7:
# 随机亮度
factor = np.random.uniform(0.75, 1.25)
image = ImageEnhance.Brightness(image).enhance(factor)
# 颜色抖动
factor = np.random.uniform(0.75, 1.25)
image = ImageEnhance.Color(image).enhance(factor)
# 随机对比度
factor = np.random.uniform(0.75, 1.25)
image = ImageEnhance.Contrast(image).enhance(factor)
# 随机锐度
factor = np.random.uniform(0.75, 1.25)
image = ImageEnhance.Sharpness(image).enhance(factor)
return image, label
def sync_val_transform(self, image, label):
crop_size = self.crop_size
short_size = self.base_size
w, h = image.size
# # 同比例缩放
if h > w:
out_w = short_size
out_h = int(1.0 * h / w * out_w)
else:
out_h = short_size
out_w = int(1.0 * w / h * out_h)
image = image.resize((out_w, out_h), Image.BILINEAR)
label = label.resize((out_w, out_h), Image.NEAREST)
# # 中心裁剪
w, h = image.size
x1 = int(round((w - crop_size) / 2.))
y1 = int(round((h - crop_size) / 2.))
image = image.crop((x1, y1, x1 + crop_size, y1 + crop_size))
label = label.crop((x1, y1, x1 + crop_size, y1 + crop_size))
return image, label
def get_path_pairs(self):
return self.image_path, self.label_path
def context_mapper_train(sample):
image_path, label_path, context = sample
image = Image.open(image_path, mode='r').convert('RGB')
label = Image.open(label_path, mode='r')
image, label = context.sync_transform(image, label)
image_array =
|
np.array(image)
|
numpy.array
|
import os
import shutil
from collections import defaultdict
from statistics import mean, stdev
import numpy as np
import pytest
from jina import Document, DocumentArray, Executor, requests, DocumentArrayMemmap
from pympler import asizeof, tracker
from .pages import Pages
from .utils.timecontext import TimeContext
NUM_REPETITIONS = 5
NUM_REQUESTS = 100
TARGET_FILE = 'searchers_compare.json'
def _get_docs(number_of_documents, embedding_size):
return [
Document(embedding=
|
np.random.rand(embedding_size)
|
numpy.random.rand
|
# NN form Scratch in python
# print('hello NN')
import numpy as np
def sigmoid_derivatives(x):
return x * (1 - x)
def sigmoind(x):
return 1 / (1 + np.exp(-x))
traning_input = np.array([[0, 0, 1],
[1, 1, 1],
[1, 0, 1],
[0, 1, 1]])
traning_output = np.array([[0, 1, 1, 0]]).T
np.random.seed(1)
synaptic_weights = 2 *
|
np.random.random((3, 1))
|
numpy.random.random
|
import matplotlib.pyplot as plt
import numpy as np
import analysis.evol_lambda as evl
#import analysis.Major_Minor_accretion as mma
import analysis.misc as amsc
import tree.ctutils as ctu
import utils.match as mtc
def gaussian_fit(ax, data, dx, color='green'):
import matplotlib.mlab as mlab
mean = np.mean(data)
variance = np.var(data)
sigma = np.sqrt(variance)
x = np.linspace(min(data), max(data), 100)
scale = len(data)*dx
ax.plot(mlab.normpdf(x,mean,sigma)*scale, x, color=color)
ax.text(0.1, 0.9, "mean = {:.2f}\n sig = {:.2f}".format(mean,sigma),
horizontalalignment='left',
verticalalignment='top',
transform=ax.transAxes)
def fwhm(xx, curve):
from scipy.interpolate import UnivariateSpline
x = np.linspace(min(xx), max(xx), 100)
spline = UnivariateSpline(x, curve-np.max(curve)/2, s=0) # Find FWHM location
r1, r2 = spline.roots()
def plot_violin(mpgs,
mstar_cut_hard = 5e9,
massive_cut=1e10,
fname="",
base='./',
use_seaborn=True,
scale = "width",
pallette="muted",
linewidth = 0.8,
bw = 0.1,
gridsize=10):
dlt_all=[]
dlo_all=[]
dlM_all=[]
dlm_all=[]
mass_all=[]
for igal, gal in enumerate(mpgs):
if gal.data["mstar"][0] > mstar_cut_hard:
dlt_all.append(gal.dlt) # Total
dlo_all.append(gal.dlo) # Other
dlM_all.append(gal.dlM) # Major
dlm_all.append(gal.dlm) # minor
mass_all.append(gal.data["mstar"][0]) #
#print(gal.dlM, gal.dlm, gal.dlt, gal.data["mstar"][0])
# try:
# mass_all.append(gal.data["mstar"][0].repeat(len(gal.dlm)))
# except:
dlM_all = remove_nan(np.array(dlM_all))
dlm_all = remove_nan(np.array(dlm_all))
dlo_all = remove_nan(np.array(dlo_all))
mass_all = np.tile(remove_nan(np.array(mass_all)),3)
#print(dlM_all)
#print(dlm_all)
#print(dlt_all)
#print(mass_all)
#x = x[~numpy.isnan(x)]
if use_seaborn:
import pandas as pd
# Prepare data in Pandas DataFrame format.
val =
|
np.concatenate((dlM_all, dlm_all, dlt_all))
|
numpy.concatenate
|
import datetime, math
import os
import numpy as np
#from Scientific.IO import NetCDF
import netCDF4
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import spectra_mole.VIS_Colormaps as VIS_Colormaps
import spectra_mole.viridis as viridis
class pltRange():
def __init__(self, time=[0, -1], height=[0, -1]):
self.t_bg = time[0]
self.t_ed = time[-1]
self.h_bg = height[0]
self.h_ed = height[-1]
# In[4]:
filelist = os.listdir("../output")
#print(filelist)
filelist = [f for f in filelist if "mole_output" in f]
filelist = sorted(filelist)
filename = '../output/20150617_1459_mole_output.nc'
#filename = '../output/20150617_1700_mole_output.nc'
#filename = '../output/20150611_1830_mole_output.nc'
#filename = '../output/20150617_2014_mole_output.nc'
print(len(filelist))
print("filename ", filename)
def run_filename(filename):
savepath = '../plots/region'
if not os.path.isdir(savepath):
os.makedirs(savepath)
f = netCDF4.Dataset(filename, 'r')
time_list = f.variables["timestamp"][:]
range_list = f.variables["range"][:]
dt_list = [datetime.datetime.utcfromtimestamp(time) for time in time_list]
# this is the last valid index
jumps = np.where(np.diff(time_list)>15)[0]
for ind in jumps[::-1].tolist():
print(ind)
# and modify the dt_list
dt_list.insert(ind+1, dt_list[ind]+datetime.timedelta(seconds=10))
rect = pltRange(time=[100, 500], height=[10, 40])
rect = pltRange(time=[0, -1], height=[0, -1])
#rect = pltRange(time=[0, 676], height=[0, -1])
#rect = pltRange(time=[170, -169], height=[0, -1])
#rect = pltRange(time=[0, 1183], height=[0, -1])
# case 0611
# rect = pltRange(time=[0, 341], height=[0, -1])
# case 0625
#rect = pltRange(time=[300, 1190], height=[0, -1])
# second cloud 0130-0400
#rect = pltRange(time=[170, 680], height=[0, 65])
# second cloud 0530-0800
# rect = pltRange(time=[851, 1361], height=[0, 65])
# case 0801
#rect = pltRange(time=[2571, 3086], height=[0, -1])
# case 0612
#rect = pltRange(time=[0, 170], height=[0, 60])
#print(time_list[:-1] - time_list[1:])
quality_flag = f.variables["quality_flag"][:]
wipro_vel = f.variables["v"][:].copy()
wipro_vel_fit = f.variables['v_fit'][:].copy()
print(f.variables.keys())
wipro_ucorr_vel = f.variables["v_raw"][:]
tg_v_term = False
if 'mira_v_term' in f.variables.keys():
tg_v_term = True
print('v_term', tg_v_term)
v_term = f.variables["mira_v_term"][:]
for ind in jumps[::-1].tolist():
v_term = np.insert(v_term, ind+1, np.full(height_list.shape, -99.), axis=0)
v_term = np.ma.masked_less(v_term, -90., copy=True)
wpZ_Bragg = f.variables["Z"][:]
wpZ_raw = f.variables["Z_raw"][:]
mira_Z = f.variables["Z_cr"][:]
mira_Z = np.ma.masked_invalid(mira_Z)
cal_const = f.variables["est_cal_const"][:]
cal_corr = f.variables["cal_corr"][:]
sigma_b = f.variables["sigma_broadening"][:]
wipro_width = f.variables["width"][:]
width_raw = f.variables["width_raw"][:]
width_cr = f.variables["width_cr"][:]
error_diff = f.variables["error_diff"][:]
error_fit = f.variables["error_fit"][:]
for ind in jumps[::-1].tolist():
print(ind)
# add the fill array
quality_flag = np.insert(quality_flag, ind+1, np.full(range_list.shape, -1), axis=0)
wipro_vel = np.insert(wipro_vel, ind+1, np.full(range_list.shape, -99.), axis=0)
wipro_vel_fit = np.insert(wipro_vel_fit, ind+1, np.full(range_list.shape, -99.), axis=0)
wipro_ucorr_vel = np.insert(wipro_ucorr_vel, ind+1, np.full(range_list.shape, -99.), axis=0)
wpZ_Bragg = np.insert(wpZ_Bragg, ind+1, np.full(range_list.shape, -200), axis=0)
wpZ_raw = np.insert(wpZ_raw, ind+1, np.full(range_list.shape, -200), axis=0)
mira_Z = np.insert(mira_Z, ind+1, np.full(range_list.shape, -200), axis=0)
cal_const = np.insert(cal_const, ind+1, np.full(range_list.shape, 1e-200), axis=0)
sigma_b = np.insert(sigma_b, ind+1, np.full(range_list.shape, -1), axis=0)
wipro_width = np.insert(wipro_width, ind+1, np.full(range_list.shape, -99.), axis=0)
width_raw = np.insert(width_raw, ind+1, np.full(range_list.shape, -99.), axis=0)
width_cr = np.insert(width_cr, ind+1, np.full(range_list.shape, -99.), axis=0)
error_diff = np.insert(error_diff, ind+1, np.full(range_list.shape, -99.), axis=0)
error_fit = np.insert(error_fit, ind+1, np.full(range_list.shape, -99.), axis=0)
cal_const = np.ma.masked_less_equal(cal_const, 1e-150, copy=True)
quality_flag = np.ma.masked_less(quality_flag, 0., copy=True)
wipro_vel = np.ma.masked_less(wipro_vel, -90., copy=True)
wipro_ucorr_vel = np.ma.masked_less(wipro_ucorr_vel, -90., copy=True)
wpZ_Bragg = np.ma.masked_less_equal(wpZ_Bragg, -200, copy=True)
wpZ_raw = np.ma.masked_less_equal(wpZ_raw, -200, copy=True)
mira_Z = np.ma.masked_less_equal(mira_Z, -200, copy=True)
cal_const = np.ma.masked_less_equal(cal_const, 1e-200, copy=True)
sigma_b = np.ma.masked_less_equal(sigma_b, -1, copy=True)
wipro_width = np.ma.masked_less(wipro_width, -90., copy=True)
width_raw = np.ma.masked_less(width_raw, -90., copy=True)
width_cr = np.ma.masked_less(width_cr, -90., copy=True)
error_diff = np.ma.masked_less(error_diff, -90., copy=True)
error_fit = np.ma.masked_less(error_fit, -90., copy=True)
wipro_vel = np.ma.masked_where(quality_flag > 3.0, wipro_vel)
wipro_vel_fit = np.ma.masked_where(quality_flag > 3.0, wipro_vel_fit)
#quality_flag = np.ma.masked_where(quality_flag >= 2.0, quality_flag)
np.set_printoptions(threshold='nan')
wipro_ucorr_vel = np.ma.masked_invalid(wipro_ucorr_vel)
# In[5]:
#print(f.variables)
print(f.variables.keys())
#print(f.variables['v'][:])
print(f.variables['v'].units)
print(f.variables['quality_flag'].comment)
print('creation time', f.creation_time)
#print('settings ', f.settings)
# In[6]:
fig, ax = plt.subplots(1, figsize=(10, 5.7))
pcmesh = ax.pcolormesh(matplotlib.dates.date2num(dt_list[rect.t_bg:rect.t_ed]),
range_list[rect.h_bg:rect.h_ed],
np.transpose(wipro_vel[rect.t_bg:rect.t_ed, rect.h_bg:rect.h_ed]),
cmap=VIS_Colormaps.carbonne_map, vmin=-1.5, vmax=1.5)
cbar = fig.colorbar(pcmesh)
#ax.set_xlim([dt_list[0], dt_list[-1]])
#ax.set_ylim([height_list[0], height_list[-1]])
ax.set_xlim([dt_list[rect.t_bg], dt_list[rect.t_ed-1]])
ax.set_ylim([range_list[rect.h_bg], range_list[rect.h_ed-1]])
ax.set_xlabel("Time UTC", fontweight='semibold', fontsize=15)
ax.set_ylabel("Height", fontweight='semibold', fontsize=15)
cbar.ax.set_ylabel("Velocity [m s$\mathregular{^{-1}}$]", fontweight='semibold', fontsize=15)
ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%H:%M'))
#ax.xaxis.set_major_locator(matplotlib.dates.MinuteLocator(byminute=[0,30]))
#ax.xaxis.set_minor_locator(matplotlib.dates.MinuteLocator(byminute=range(0,61,10)))
ax.xaxis.set_major_locator(matplotlib.dates.HourLocator(byhour=[0,3,6,9,12,15,18,21]))
ax.xaxis.set_minor_locator(matplotlib.dates.MinuteLocator(byminute=[0,30]))
ax.yaxis.set_minor_locator(matplotlib.ticker.MultipleLocator(500))
# ax.xaxis.set_major_locator(matplotlib.dates.MinuteLocator(byminute=[0,30]))
# ax.xaxis.set_minor_locator(matplotlib.dates.MinuteLocator(interval=5))
ax.tick_params(axis='both', which='major', labelsize=14,
right=True, top=True, width=2, length=5)
ax.tick_params(axis='both', which='minor', width=1.5,
length=3.5, right=True, top=True)
cbar.ax.tick_params(axis='both', which='major', labelsize=14,
width=2, length=4)
savename = savepath + "/" + dt_list[0].strftime("%Y%m%d_%H%M") + "_vel_corr.png"
fig.savefig(savename, dpi=250)
fig, ax = plt.subplots(1, figsize=(10, 5.7))
pcmesh = ax.pcolormesh(matplotlib.dates.date2num(dt_list[rect.t_bg:rect.t_ed]),
range_list[rect.h_bg:rect.h_ed],
np.transpose(wipro_ucorr_vel[rect.t_bg:rect.t_ed, rect.h_bg:rect.h_ed]),
cmap=VIS_Colormaps.carbonne_map, vmin=-1.5, vmax=1.5)
cbar = fig.colorbar(pcmesh)
ax.set_xlim([dt_list[rect.t_bg], dt_list[rect.t_ed-1]])
ax.set_ylim([range_list[rect.h_bg], range_list[rect.h_ed-1]])
ax.set_xlabel("Time UTC", fontweight='semibold', fontsize=15)
ax.set_ylabel("Height", fontweight='semibold', fontsize=15)
cbar.ax.set_ylabel("Velocity [m s$\mathregular{^{-1}}$]", fontweight='semibold', fontsize=15)
ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%H:%M'))
#ax.xaxis.set_major_locator(matplotlib.dates.MinuteLocator(byminute=[0,30]))
#ax.xaxis.set_minor_locator(matplotlib.dates.MinuteLocator(byminute=range(0,61,10)))
ax.xaxis.set_major_locator(matplotlib.dates.HourLocator(byhour=[0,3,6,9,12,15,18,21]))
ax.xaxis.set_minor_locator(matplotlib.dates.MinuteLocator(byminute=[0,30]))
ax.yaxis.set_minor_locator(matplotlib.ticker.MultipleLocator(500))
# ax.xaxis.set_major_locator(matplotlib.dates.MinuteLocator(byminute=[0,30]))
# ax.xaxis.set_minor_locator(matplotlib.dates.MinuteLocator(interval=5))
ax.tick_params(axis='both', which='major', labelsize=14,
right=True, top=True, width=2, length=5)
ax.tick_params(axis='both', which='minor', width=1.5,
length=3.5, right=True, top=True)
cbar.ax.tick_params(axis='both', which='major', labelsize=14,
width=2, length=4)
savename = savepath + "/" + dt_list[0].strftime("%Y%m%d_%H%M") + "_vel_wp.png"
fig.savefig(savename, dpi=250)
# In[7]:
quality_flag[quality_flag == 5] = 4
fig, ax = plt.subplots(1, figsize=(10, 5.7))
pcmesh = ax.pcolormesh(matplotlib.dates.date2num(dt_list[rect.t_bg:rect.t_ed]),
range_list[rect.h_bg:rect.h_ed],
np.transpose(quality_flag[rect.t_bg:rect.t_ed, rect.h_bg:rect.h_ed]),
cmap=VIS_Colormaps.cloudnet_map,
vmin=-0.5, vmax=10.5)
cbar = fig.colorbar(pcmesh, ticks=[0, 1, 2, 3, 4, 5, 6])
cbar.ax.set_yticklabels(["not influenced", "correction reliable",
"plankton", "low SNR",
"noisy spectrum\nmelting layer",
"",
""])
ax.set_xlim([dt_list[rect.t_bg], dt_list[rect.t_ed-1]])
ax.set_ylim([range_list[rect.h_bg], range_list[rect.h_ed-1]])
ax.set_xlabel("Time UTC", fontweight='semibold', fontsize=15)
ax.set_ylabel("Height", fontweight='semibold', fontsize=15)
#cbar.ax.set_ylabel("Flag", fontweight='semibold', fontsize=15)
ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%H:%M'))
#ax.xaxis.set_major_locator(matplotlib.dates.MinuteLocator(byminute=[0,30]))
#ax.xaxis.set_minor_locator(matplotlib.dates.MinuteLocator(byminute=range(0,61,10)))
ax.xaxis.set_major_locator(matplotlib.dates.HourLocator(byhour=[0,3,6,9,12,15,18,21]))
ax.xaxis.set_minor_locator(matplotlib.dates.MinuteLocator(byminute=[0,30]))
ax.yaxis.set_minor_locator(matplotlib.ticker.MultipleLocator(500))
ax.tick_params(axis='both', which='major', labelsize=14,
right=True, top=True, width=2, length=5)
ax.tick_params(axis='both', which='minor', width=1.5,
length=3.5, right=True, top=True)
cbar.ax.tick_params(axis='both', which='major', labelsize=13,
width=2, length=4)
savename = savepath + "/" + dt_list[0].strftime("%Y%m%d_%H%M") + "_quality_flag.png"
plt.subplots_adjust(right=0.9)
#plt.tight_layout()
fig.savefig(savename, dpi=250)
# In[8]:
fig, ax = plt.subplots(1, figsize=(10, 5.7))
pcmesh = ax.pcolormesh(matplotlib.dates.date2num(dt_list[rect.t_bg:rect.t_ed]),
range_list[rect.h_bg:rect.h_ed],
np.transpose(np.log10(cal_const[rect.t_bg:rect.t_ed, rect.h_bg:rect.h_ed])),
cmap='gist_rainbow', vmin=-16.5, vmax=-13.5)
cbar = fig.colorbar(pcmesh)
ax.set_xlim([dt_list[rect.t_bg], dt_list[rect.t_ed-1]])
ax.set_ylim([range_list[rect.h_bg], range_list[rect.h_ed-1]])
ax.set_xlabel("Time UTC", fontweight='semibold', fontsize=15)
ax.set_ylabel("Height", fontweight='semibold', fontsize=15)
cbar.ax.set_ylabel("RWP Calibration Constant [log10]", fontweight='semibold', fontsize=15)
ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%H:%M'))
#ax.xaxis.set_major_locator(matplotlib.dates.MinuteLocator(byminute=[0,30]))
#ax.xaxis.set_minor_locator(matplotlib.dates.MinuteLocator(byminute=range(0,61,10)))
ax.xaxis.set_major_locator(matplotlib.dates.HourLocator(byhour=[0,3,6,9,12,15,18,21]))
ax.xaxis.set_minor_locator(matplotlib.dates.MinuteLocator(byminute=[0,30]))
ax.yaxis.set_minor_locator(matplotlib.ticker.MultipleLocator(500))
ax.tick_params(axis='both', which='major', labelsize=14,
right=True, top=True, width=2, length=5)
ax.tick_params(axis='both', which='minor', width=1.5,
length=3.5, right=True, top=True)
cbar.ax.tick_params(axis='both', which='major', labelsize=14,
width=2, length=4)
savename = savepath + "/" + dt_list[0].strftime("%Y%m%d_%H%M") + "_system_parameter.png"
fig.savefig(savename, dpi=250)
# In[9]:
zmax = 10
#zmax = 40
cmap = viridis.viridis
cmap = 'jet'
print('maximum wind profiler ', np.max(wpZ_raw[rect.t_bg:rect.t_ed, rect.h_bg:rect.h_ed]))
am = np.argmax(wpZ_raw[rect.t_bg:rect.t_ed, rect.h_bg:rect.h_ed])
am = np.unravel_index(am, wpZ_raw[rect.t_bg:rect.t_ed, rect.h_bg:rect.h_ed].shape)
print(dt_list[rect.t_bg:rect.t_ed][am[0]], range_list[rect.h_bg:rect.h_ed][am[1]])
print('cloud radar ', np.nanmax(10 * np.log10(mira_Z[rect.t_bg:rect.t_ed, rect.h_bg:rect.h_ed])))
am = np.nanargmax(mira_Z[rect.t_bg:rect.t_ed, rect.h_bg:rect.h_ed])
am = np.unravel_index(am, mira_Z[rect.t_bg:rect.t_ed, rect.h_bg:rect.h_ed].shape)
print(dt_list[rect.t_bg:rect.t_ed][am[0]], range_list[rect.h_bg:rect.h_ed][am[1]])
fig, ax = plt.subplots(1, figsize=(10, 5.7))
pcmesh = ax.pcolormesh(matplotlib.dates.date2num(dt_list[rect.t_bg:rect.t_ed]),
range_list[rect.h_bg:rect.h_ed],
np.transpose(wpZ_raw[rect.t_bg:rect.t_ed, rect.h_bg:rect.h_ed]),
cmap=cmap, vmin=-35, vmax=zmax)
cbar = fig.colorbar(pcmesh)
ax.set_xlim([dt_list[rect.t_bg], dt_list[rect.t_ed-1]])
ax.set_ylim([range_list[rect.h_bg], range_list[rect.h_ed-1]])
ax.set_xlabel("Time UTC", fontweight='semibold', fontsize=15)
ax.set_ylabel("Height", fontweight='semibold', fontsize=15)
cbar.ax.set_ylabel("Reflectivity [dBZ]", fontweight='semibold', fontsize=15)
ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%H:%M'))
#ax.xaxis.set_major_locator(matplotlib.dates.MinuteLocator(byminute=[0,30]))
#ax.xaxis.set_minor_locator(matplotlib.dates.MinuteLocator(byminute=range(0,61,10)))
ax.xaxis.set_major_locator(matplotlib.dates.HourLocator(byhour=[0,3,6,9,12,15,18,21]))
ax.xaxis.set_minor_locator(matplotlib.dates.MinuteLocator(byminute=[0,30]))
ax.yaxis.set_minor_locator(matplotlib.ticker.MultipleLocator(500))
ax.tick_params(axis='both', which='major', labelsize=14,
right=True, top=True, width=2, length=5)
ax.tick_params(axis='both', which='minor', width=1.5,
length=3.5, right=True, top=True)
cbar.ax.tick_params(axis='both', which='major', labelsize=14,
width=2, length=4)
savename = savepath + "/" + dt_list[0].strftime("%Y%m%d_%H%M") + "_wp_total_reflectivity_jet.png"
fig.savefig(savename, dpi=250)
fig, ax = plt.subplots(1, figsize=(10, 5.7))
pcmesh = ax.pcolormesh(matplotlib.dates.date2num(dt_list[rect.t_bg:rect.t_ed]),
range_list[rect.h_bg:rect.h_ed],
np.transpose(wpZ_Bragg[rect.t_bg:rect.t_ed, rect.h_bg:rect.h_ed]),
cmap=cmap, vmin=-35, vmax=zmax)
cbar = fig.colorbar(pcmesh)
ax.set_xlim([dt_list[rect.t_bg], dt_list[rect.t_ed-1]])
ax.set_ylim([range_list[rect.h_bg], range_list[rect.h_ed-1]])
ax.set_xlabel("Time UTC", fontweight='semibold', fontsize=15)
ax.set_ylabel("Height", fontweight='semibold', fontsize=15)
cbar.ax.set_ylabel("Reflectivity [dBZ]", fontweight='semibold', fontsize=15)
ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%H:%M'))
#ax.xaxis.set_major_locator(matplotlib.dates.MinuteLocator(byminute=[0,30]))
#ax.xaxis.set_minor_locator(matplotlib.dates.MinuteLocator(byminute=range(0,61,10)))
ax.xaxis.set_major_locator(matplotlib.dates.HourLocator(byhour=[0,3,6,9,12,15,18,21]))
ax.xaxis.set_minor_locator(matplotlib.dates.MinuteLocator(byminute=[0,30]))
ax.yaxis.set_minor_locator(matplotlib.ticker.MultipleLocator(500))
ax.tick_params(axis='both', which='major', labelsize=14,
right=True, top=True, width=2, length=5)
ax.tick_params(axis='both', which='minor', width=1.5,
length=3.5, right=True, top=True)
cbar.ax.tick_params(axis='both', which='major', labelsize=14,
width=2, length=4)
savename = savepath + "/" + dt_list[0].strftime("%Y%m%d_%H%M") + "_wp_corr_reflectivity_jet.png"
fig.savefig(savename, dpi=250)
fig, ax = plt.subplots(1, figsize=(10, 5.7))
pcmesh = ax.pcolormesh(matplotlib.dates.date2num(dt_list[rect.t_bg:rect.t_ed]),
range_list[rect.h_bg:rect.h_ed],
np.transpose(mira_Z[rect.t_bg:rect.t_ed, rect.h_bg:rect.h_ed]),
cmap=cmap, vmin=-35, vmax=zmax)
cbar = fig.colorbar(pcmesh)
ax.set_xlim([dt_list[rect.t_bg], dt_list[rect.t_ed-1]])
ax.set_ylim([range_list[rect.h_bg], range_list[rect.h_ed-1]])
ax.set_xlabel("Time UTC", fontweight='semibold', fontsize=15)
ax.set_ylabel("Height", fontweight='semibold', fontsize=15)
cbar.ax.set_ylabel("Reflectivity [dBZ]", fontweight='semibold', fontsize=15)
ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%H:%M'))
#ax.xaxis.set_major_locator(matplotlib.dates.MinuteLocator(byminute=[0,30]))
#ax.xaxis.set_minor_locator(matplotlib.dates.MinuteLocator(byminute=range(0,61,10)))
ax.xaxis.set_major_locator(matplotlib.dates.HourLocator(byhour=[0,3,6,9,12,15,18,21]))
ax.xaxis.set_minor_locator(matplotlib.dates.MinuteLocator(byminute=[0,30]))
ax.yaxis.set_minor_locator(matplotlib.ticker.MultipleLocator(500))
ax.tick_params(axis='both', which='major', labelsize=14,
right=True, top=True, width=2, length=5)
ax.tick_params(axis='both', which='minor', width=1.5,
length=3.5, right=True, top=True)
cbar.ax.tick_params(axis='both', which='major', labelsize=14,
width=2, length=4)
savename = savepath + "/" + dt_list[0].strftime("%Y%m%d_%H%M") + "_mira_reflectivity.png"
fig.savefig(savename, dpi=250)
# In[10]:
fig, ax = plt.subplots(1, figsize=(10, 5.7))
pcmesh = ax.pcolormesh(matplotlib.dates.date2num(dt_list[rect.t_bg:rect.t_ed]),
range_list[rect.h_bg:rect.h_ed],
np.transpose(sigma_b[rect.t_bg:rect.t_ed, rect.h_bg:rect.h_ed]),
# normally the range is 1.5 to 4
cmap='gist_rainbow', vmin=1.5, vmax=7)
cbar = fig.colorbar(pcmesh)
ax.set_xlim([dt_list[rect.t_bg], dt_list[rect.t_ed-1]])
ax.set_ylim([range_list[rect.h_bg], range_list[rect.h_ed-1]])
ax.set_xlabel("Time UTC", fontweight='semibold', fontsize=15)
ax.set_ylabel("Height", fontweight='semibold', fontsize=15)
cbar.ax.set_ylabel("sigma_blure [px]", fontweight='semibold', fontsize=15)
ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%H:%M'))
#ax.xaxis.set_major_locator(matplotlib.dates.MinuteLocator(byminute=[0,30]))
#ax.xaxis.set_minor_locator(matplotlib.dates.MinuteLocator(byminute=range(0,61,10)))
ax.xaxis.set_major_locator(matplotlib.dates.HourLocator(byhour=[0,3,6,9,12,15,18,21]))
ax.xaxis.set_minor_locator(matplotlib.dates.MinuteLocator(byminute=[0,30]))
ax.yaxis.set_minor_locator(matplotlib.ticker.MultipleLocator(500))
ax.tick_params(axis='both', which='major', labelsize=14,
right=True, top=True, width=2, length=5)
ax.tick_params(axis='both', which='minor', width=1.5,
length=3.5, right=True, top=True)
cbar.ax.tick_params(axis='both', which='major', labelsize=14,
width=2, length=4)
savename = savepath + "/" + dt_list[0].strftime("%Y%m%d_%H%M") + "_sigma_blure.png"
fig.savefig(savename, dpi=250)
# In[11]:
if tg_v_term:
fig, ax = plt.subplots(1, figsize=(10, 5.7))
pcmesh = ax.pcolormesh(matplotlib.dates.date2num(dt_list[rect.t_bg:rect.t_ed]),
height_list[rect.h_bg:rect.h_ed],
np.transpose(v_term[rect.t_bg:rect.t_ed, rect.h_bg:rect.h_ed]),
cmap=VIS_Colormaps.carbonne_map, vmin=-2, vmax=2)
cbar = fig.colorbar(pcmesh)
ax.set_xlim([dt_list[rect.t_bg], dt_list[rect.t_ed-1]])
ax.set_ylim([height_list[rect.h_bg], height_list[rect.h_ed-1]])
ax.set_xlabel("Time UTC", fontweight='semibold', fontsize=15)
ax.set_ylabel("Height", fontweight='semibold', fontsize=15)
cbar.ax.set_ylabel("Terminal velocity [m s$\mathregular{^{-1}}$]", fontweight='semibold', fontsize=15)
ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%H:%M'))
#ax.xaxis.set_major_locator(matplotlib.dates.MinuteLocator(byminute=[0,30]))
#ax.xaxis.set_minor_locator(matplotlib.dates.MinuteLocator(byminute=range(0,61,10)))
ax.xaxis.set_major_locator(matplotlib.dates.HourLocator(byhour=[0,3,6,9,12,15,18,21]))
ax.xaxis.set_minor_locator(matplotlib.dates.MinuteLocator(byminute=[0,30]))
#ax.xaxis.set_minor_locator(matplotlib.dates.MinuteLocator(interval=2))
ax.yaxis.set_minor_locator(matplotlib.ticker.MultipleLocator(500))
ax.tick_params(axis='both', which='major', labelsize=14,
right=True, top=True, width=2, length=5)
ax.tick_params(axis='both', which='minor', width=1.5,
length=3.5, right=True, top=True)
cbar.ax.tick_params(axis='both', which='major', labelsize=14,
width=2, length=4)
savename = savepath + "/" + dt_list[0].strftime("%Y%m%d_%H%M") + "_terminal_vel.png"
fig.savefig(savename, dpi=250)
np.max(v_term)
# In[12]:
fig, ax = plt.subplots(1, figsize=(10, 5.7))
pcmesh = ax.pcolormesh(matplotlib.dates.date2num(dt_list[rect.t_bg:rect.t_ed]),
range_list[rect.h_bg:rect.h_ed],
np.transpose(wipro_vel_fit[rect.t_bg:rect.t_ed, rect.h_bg:rect.h_ed]),
cmap=VIS_Colormaps.carbonne_map, vmin=-1.5, vmax=1.5)
cbar = fig.colorbar(pcmesh)
ax.set_xlim([dt_list[rect.t_bg], dt_list[rect.t_ed-1]])
ax.set_ylim([range_list[rect.h_bg], range_list[rect.h_ed-1]])
ax.set_xlabel("Time UTC", fontweight='semibold', fontsize=15)
ax.set_ylabel("Height", fontweight='semibold', fontsize=15)
cbar.ax.set_ylabel("Velocity [m s$\mathregular{^{-1}}$]", fontweight='semibold', fontsize=15)
ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%H:%M'))
#ax.xaxis.set_major_locator(matplotlib.dates.MinuteLocator(byminute=[0,30]))
#ax.xaxis.set_minor_locator(matplotlib.dates.MinuteLocator(byminute=range(0,61,10)))
ax.xaxis.set_major_locator(matplotlib.dates.HourLocator(byhour=[0,3,6,9,12,15,18,21]))
ax.xaxis.set_minor_locator(matplotlib.dates.MinuteLocator(byminute=[0,30]))
ax.yaxis.set_minor_locator(matplotlib.ticker.MultipleLocator(500))
# ax.xaxis.set_major_locator(matplotlib.dates.MinuteLocator(byminute=[0,30]))
# ax.xaxis.set_minor_locator(matplotlib.dates.MinuteLocator(interval=5))
ax.tick_params(axis='both', which='major', labelsize=14,
right=True, top=True, width=2, length=5)
ax.tick_params(axis='both', which='minor', width=1.5,
length=3.5, right=True, top=True)
cbar.ax.tick_params(axis='both', which='major', labelsize=14,
width=2, length=4)
savename = savepath + "/" + dt_list[0].strftime("%Y%m%d_%H%M") + "_vel_wp_fit.png"
fig.savefig(savename, dpi=250)
diff_estimates = wipro_vel - wipro_vel_fit
diff_estimates = np.ma.masked_where(quality_flag == 0, diff_estimates)
fig, ax = plt.subplots(1, figsize=(10, 5.7))
pcmesh = ax.pcolormesh(matplotlib.dates.date2num(dt_list[rect.t_bg:rect.t_ed]),
range_list[rect.h_bg:rect.h_ed],
np.transpose(diff_estimates[rect.t_bg:rect.t_ed, rect.h_bg:rect.h_ed]),
cmap=VIS_Colormaps.carbonne_map, vmin=-1., vmax=1.0)
cbar = fig.colorbar(pcmesh)
ax.set_xlim([dt_list[rect.t_bg], dt_list[rect.t_ed-1]])
ax.set_ylim([range_list[rect.h_bg], range_list[rect.h_ed-1]])
ax.set_xlabel("Time UTC", fontweight='semibold', fontsize=15)
ax.set_ylabel("Height", fontweight='semibold', fontsize=15)
cbar.ax.set_ylabel("Differenece between the estimates [m s$\mathregular{^{-1}}$]", fontweight='semibold', fontsize=15)
ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%H:%M'))
#ax.xaxis.set_major_locator(matplotlib.dates.MinuteLocator(byminute=[0,30]))
#ax.xaxis.set_minor_locator(matplotlib.dates.MinuteLocator(byminute=range(0,61,10)))
ax.xaxis.set_major_locator(matplotlib.dates.HourLocator(byhour=[0,3,6,9,12,15,18,21]))
ax.xaxis.set_minor_locator(matplotlib.dates.MinuteLocator(byminute=[0,30]))
#ax.xaxis.set_minor_locator(matplotlib.dates.MinuteLocator(interval=2))
ax.yaxis.set_minor_locator(matplotlib.ticker.MultipleLocator(500))
ax.tick_params(axis='both', which='major', labelsize=14,
right=True, top=True, width=2, length=5)
ax.tick_params(axis='both', which='minor', width=1.5,
length=3.5, right=True, top=True)
cbar.ax.tick_params(axis='both', which='major', labelsize=14,
width=2, length=4)
savename = savepath + "/" + dt_list[0].strftime("%Y%m%d_%H%M") + "_vel_fit1.png"
#fig.savefig(savename, dpi=250)
# In[13]:
fig, ax = plt.subplots(1, figsize=(10, 5.7))
pcmesh = ax.pcolormesh(matplotlib.dates.date2num(dt_list[rect.t_bg:rect.t_ed]),
range_list[rect.h_bg:rect.h_ed],
np.transpose(wipro_width[rect.t_bg:rect.t_ed, rect.h_bg:rect.h_ed]),
cmap=cmap, vmin=0.01, vmax=1)
cbar = fig.colorbar(pcmesh)
ax.set_xlim([dt_list[rect.t_bg], dt_list[rect.t_ed-1]])
ax.set_ylim([range_list[rect.h_bg], range_list[rect.h_ed-1]])
ax.set_xlabel("Time UTC", fontweight='semibold', fontsize=15)
ax.set_ylabel("Height", fontweight='semibold', fontsize=15)
cbar.ax.set_ylabel("Spectral width [dBZ]", fontweight='semibold', fontsize=15)
ax.xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%H:%M'))
#ax.xaxis.set_major_locator(matplotlib.dates.MinuteLocator(byminute=[0,30]))
#ax.xaxis.set_minor_locator(matplotlib.dates.MinuteLocator(byminute=range(0,61,10)))
ax.xaxis.set_major_locator(matplotlib.dates.HourLocator(byhour=[0,3,6,9,12,15,18,21]))
ax.xaxis.set_minor_locator(matplotlib.dates.MinuteLocator(byminute=[0,30]))
ax.yaxis.set_minor_locator(matplotlib.ticker.MultipleLocator(500))
ax.tick_params(axis='both', which='major', labelsize=14,
right=True, top=True, width=2, length=5)
ax.tick_params(axis='both', which='minor', width=1.5,
length=3.5, right=True, top=True)
cbar.ax.tick_params(axis='both', which='major', labelsize=14,
width=2, length=4)
savename = savepath + "/" + dt_list[0].strftime("%Y%m%d_%H%M") + "_wp_corr_width.png"
fig.savefig(savename, dpi=250)
fig, ax = plt.subplots(1, figsize=(10, 5.7))
pcmesh = ax.pcolormesh(matplotlib.dates.date2num(dt_list[rect.t_bg:rect.t_ed]),
range_list[rect.h_bg:rect.h_ed],
|
np.transpose(width_raw[rect.t_bg:rect.t_ed, rect.h_bg:rect.h_ed])
|
numpy.transpose
|
""" Utilities for working with 3D vectors in cartesian space.
note: some of these functions are redundant as they are provided by built-in numpy operations.
a vector is a one dimensional numpy array with 3 elements: x, y, z.
some functions accept a tuple or list of 3 elements as an alternative to a numpy array.
"""
version = '15th November 2021'
import logging
log = logging.getLogger(__name__)
import math as maths
import numpy as np
def radians_from_degrees(deg):
"""Converts angle from degrees to radians."""
return np.radians(deg)
def degrees_from_radians(rad):
"""Converts angle from radians to degrees."""
return np.degrees(rad)
def zero_vector():
"""Returns a zero vector [0.0, 0.0, 0.0]."""
return np.zeros(3)
def v_3d(v):
"""Returns a 3D vector for a 2D or 3D vector."""
assert 2 <= len(v) <= 3
if len(v) == 3:
return v
v3 = np.zeros(3)
v3[:2] = v
return v3
def add(a, b): # note: could just use numpy a + b facility
"""Returns vector sum a+b."""
a = np.array(a)
b = np.array(b)
assert a.size == b.size
return a + b
def subtract(a, b): # note: could just use numpy a - b facility
"""Returns vector difference a-b."""
a = np.array(a)
b = np.array(b)
assert a.size == b.size
return a - b
def elemental_multiply(a, b): # note: could just use numpy a * b facility
"""Returns vector with products of corresponding elements of a and b."""
a = np.array(a)
b = np.array(b)
assert a.size == b.size
return a * b
def amplify(v, scaling): # note: could just use numpy a * scalar facility
"""Returns vector with direction of v, amplified by scaling."""
v = np.array(v)
return scaling * v
def unit_vector(v):
"""Returns vector with same direction as v but with unit length."""
assert 2 <= len(v) <= 3
v = np.array(v, dtype = float)
if np.all(v == 0.0):
return v
return v / maths.sqrt(np.sum(v * v))
def unit_vectors(v):
"""Returns vectors with same direction as those in v but with unit length."""
scaling = np.sqrt(np.sum(v * v, axis = -1))
zero_mask = np.zeros(v.shape, dtype = bool)
zero_mask[
|
np.where(scaling == 0.0)
|
numpy.where
|
import numpy as np
import os as os
import scipy.integrate as scint
import matplotlib as mpl
from scipy.optimize import least_squares
mpl.use('Agg')
from matplotlib.pyplot import cm , step
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.animation as animation
import matplotlib.pyplot as plt
from scipy.signal import savgol_filter
from numpy.fft import fft, fftfreq, fftshift, rfft, rfftfreq
from scipy.integrate import cumtrapz
#searches for closest to value element in array
def find_nearest(array,value):
i = (np.abs(array-value)).argmin()
return int(i)
#This is a workaround until scipy fixes the issue
import warnings
warnings.filterwarnings(action="ignore", module="scipy", message="^internal gelsd")
def plotData(sysVar):
print("Plotting datapoints to pdf",end='')
avgstyle = 'dashed'
avgsize = 0.6
expectstyle = 'solid'
expectsize = 1
loavgpercent = sysVar.plotLoAvgPerc #percentage of time evolution to start averaging
loavgind = int(loavgpercent*sysVar.dataPoints) #index to start at when calculating average and stddev
loavgtime = np.round(loavgpercent * (sysVar.deltaT * sysVar.steps * sysVar.plotTimeScale),2)
if sysVar.boolPlotAverages:
print(' with averaging from Jt=%.2f' % loavgtime,end='')
fwidth = sysVar.plotSavgolFrame
ford = sysVar.plotSavgolOrder
params={
'legend.fontsize': sysVar.plotLegendSize,
'font.size': sysVar.plotFontSize,
'mathtext.default' : 'rm' # see http://matplotlib.org/users/customizing.html
}
plt.rcParams['agg.path.chunksize']=0
plt.rcParams.update(params)
plt.rc('text', usetex=True)
plt.rc('font', **{'family': 'sans-serif', 'sans-serif': ['Arial']})
pp = PdfPages('./plots/plots.pdf')
occfile = './data/occupation.txt'
occ_array = np.loadtxt(occfile)
#multiply step array with time scale
step_array = occ_array[:,0] * sysVar.plotTimeScale
normfile = './data/norm.txt'
norm_array = np.loadtxt(normfile)
#want deviation from 1
norm_array[:,1] = 1 - norm_array[:,1]
entfile = './data/entropy.txt'
ent_array = np.loadtxt(entfile)
if sysVar.boolPlotEngy:
engies = np.loadtxt('./data/hamiltonian_eigvals.txt')
if sysVar.boolPlotDecomp:
stfacts = np.loadtxt('./data/state.txt')
if sysVar.boolTotalEnt:
totentfile = './data/total_entropy.txt'
totent_array = np.loadtxt(totentfile)
if sysVar.boolTotalEnergy:
energyfile = './data/energy.txt'
en_array = np.loadtxt(energyfile)
en0 = en_array[0,1]
en_array[:,1] -= en0
#en_micind = find_nearest(engies[:,1], en0)
#print(' - |(E0 - Emicro)/E0|: %.0e - ' % (np.abs((en0 - engies[en_micind,1])/en0)), end='' )
if sysVar.boolPlotDiagExp:
microexpfile = './data/diagexpect.txt'
microexp = np.loadtxt(microexpfile)
if sysVar.boolPlotOffDiag:
offdiagfile = './data/offdiagonal.txt'
offdiag = np.loadtxt(offdiagfile)
if sysVar.boolPlotOffDiagDens:
offdiagdensfile = './data/offdiagonaldens.txt'
offdiagdens = np.loadtxt(offdiagdensfile)
if sysVar.boolPlotGreen:
greenfile = './data/green.txt'
greendat = np.loadtxt(greenfile)
def complete_system_enttropy():
return 0
#### Complete system Entropy
if(sysVar.boolTotalEnt):
plt.plot(totent_array[:,0]*sysVar.plotTimeScale,totent_array[:,1]*1e13, linewidth =0.6, color = 'r')
plt.grid()
plt.xlabel(r'$J\,t$')
plt.ylabel(r'Total system entropy $/ 10^{-13}$')
plt.tight_layout()
###
pp.savefig()
plt.clf()
print('.',end='',flush=True)
def subsystem_entropy():
return 0
### Subsystem Entropy
plt.plot(step_array,ent_array[:,1], linewidth =0.8, color = 'r')
plt.grid()
if sysVar.boolPlotAverages:
tavg = savgol_filter(ent_array[:,1],fwidth,ford)
plt.plot(step_array,tavg, linewidth = avgsize, linestyle=avgstyle, color = 'black')
plt.xlabel(r'$J\,t$')
plt.ylabel('Subsystem entropy')
plt.tight_layout()
pp.savefig()
plt.clf()
print('.',end='',flush=True)
'''
###FFT
print('')
fourier = np.fft.rfft(ent_array[loavgind:,1])
print(fourier[0].real)
freq = np.fft.rfftfreq(np.shape(ent_array[loavgind:,1])[-1], d=step_array[1])
plt.plot(freq[1:],np.abs(fourier[1:]))
print('')
plt.ylabel(r'$A_{\omega}$')
plt.xlabel(r'$\omega$')
plt.grid()
plt.tight_layout()
###
pp.savefig()
plt.clf()
print('.',end='',flush=True)
'''
def single_level_occ():
return 0
### Single-level occupation numbers
for i in range(0,sysVar.m):
plt.plot(step_array,occ_array[:,i+1],label=r'$n_'+str(i)+'$', linewidth =0.5)
if sysVar.boolPlotAverages:
tavg = savgol_filter(occ_array[:,i+1],fwidth,ford)
plt.plot(step_array,tavg, linewidth =avgsize, linestyle=avgstyle, color = 'black')
if sysVar.boolPlotDiagExp:
plt.axhline(y=microexp[i,1], color='purple', linewidth = expectsize, linestyle = expectstyle)
plt.ylabel(r'Occupation number')
plt.xlabel(r'$J\,t$')
plt.legend(loc='upper right')
plt.grid()
plt.tight_layout()
###
pp.savefig()
plt.clf()
print('.',end='',flush=True)
'''
###FFT
print('')
for i in range(0,sysVar.m):
plt.xlim(xmax=30)
#GK = -i(2n-1)
fourier = (rfft(occ_array[loavgind:,i+1],norm='ortho'))*2 -1
print(fourier[0].real)
freq = rfftfreq(np.shape(occ_array[loavgind:,i+1])[-1], d=step_array[1])
plt.plot(freq,fourier.real,linewidth = 0.05)
plt.plot(freq,fourier.imag,linewidth = 0.05)
plt.ylabel(r'$G^K_{\omega}$')
plt.xlabel(r'$\omega$')
plt.grid()
plt.tight_layout()
###
pp.savefig()
plt.clf()
print('.',end='',flush=True)
'''
def bath_occ():
return 0
### Traced out (bath) occupation numbers
for i in sysVar.kRed:
plt.plot(step_array,occ_array[:,i+1],label=r'$n_'+str(i)+'$', linewidth =0.6)
if sysVar.boolPlotDiagExp:
plt.axhline(y=microexp[i,1], color='purple', linewidth = expectsize, linestyle = expectstyle)
if sysVar.boolPlotAverages:
tavg = savgol_filter(occ_array[:,i+1],fwidth,ford)
plt.plot(step_array,tavg, linewidth = avgsize, linestyle=avgstyle, color = 'black')
plt.ylabel(r'Occupation number')
plt.xlabel(r'$J\,t$')
plt.legend(loc='lower right')
plt.grid()
plt.tight_layout()
###
pp.savefig()
plt.clf()
print('.',end='',flush=True)
def system_occ():
return 0
### Leftover (system) occupation numbers
for i in np.arange(sysVar.m)[sysVar.mask]:
plt.plot(step_array,occ_array[:,i+1],label=r'$n_'+str(i)+'$', linewidth =0.6)
if sysVar.boolPlotDiagExp:
plt.axhline(y=microexp[i,1], color='purple', linewidth = expectsize, linestyle = expectstyle)
if sysVar.boolPlotAverages:
tavg = savgol_filter(occ_array[:,i+1],fwidth,ford)
plt.plot(step_array,tavg, linewidth = avgsize, linestyle=avgstyle, color = 'black')
plt.ylabel(r'Occupation number')
plt.xlabel(r'$J\,t$')
plt.legend(loc='lower right')
plt.grid()
plt.tight_layout()
###
pp.savefig()
plt.clf()
print('.',end='',flush=True)
def subsystem_occupation():
return 0
### Subsystems occupation numbers
#store fluctuations in a data
fldat = open('./data/fluctuation.txt','w')
fldat.write('N_tot: %i\n' % (sysVar.N))
tmp = np.zeros(len(step_array))
for i in sysVar.kRed:
tmp += occ_array[:,i+1]
plt.plot(step_array,tmp,label="bath", linewidth =0.8, color = 'magenta')
if sysVar.boolPlotAverages:
tavg = savgol_filter(tmp,fwidth,ford)
plt.plot(step_array,tavg, linewidth = avgsize, linestyle=avgstyle, color = 'black')
if sysVar.boolPlotDiagExp:
mictmp = 0
for i in sysVar.kRed:
mictmp += microexp[i,1]
plt.axhline(y=mictmp, color='purple', linewidth = expectsize, linestyle = expectstyle)
avg = np.mean(tmp[loavgind:],dtype=np.float64)
stddev = np.std(tmp[loavgind:],dtype=np.float64)
fldat.write('bath_average: %.16e\n' % avg)
fldat.write('bath_stddev: %.16e\n' % stddev)
fldat.write('bath_rel._fluctuation: %.16e\n' % (stddev/avg))
tmp.fill(0)
for i in np.arange(sysVar.m)[sysVar.mask]:
tmp += occ_array[:,i+1]
plt.plot(step_array,tmp,label="system", linewidth =0.8, color = 'darkgreen')
if sysVar.boolPlotAverages:
tavg = savgol_filter(tmp,fwidth,ford)
plt.plot(step_array,tavg, linewidth = avgsize, linestyle=avgstyle, color = 'black')
if sysVar.boolPlotDiagExp:
mictmp = 0
for i in np.arange(sysVar.m)[sysVar.mask]:
mictmp += microexp[i,1]
plt.axhline(y=mictmp, color='purple', linewidth = expectsize, linestyle = expectstyle)
avg = np.mean(tmp[loavgind:],dtype=np.float64)
stddev = np.std(tmp[loavgind:],dtype=np.float64)
fldat.write('system_average: %.16e\n' % avg)
fldat.write('system_stddev: %.16e\n' % stddev)
fldat.write('system_rel._fluctuation: %.16e\n' % (stddev/avg))
for i in range(sysVar.m):
avg = np.mean(occ_array[loavgind:,i+1],dtype=np.float64)
stddev = np.std(occ_array[loavgind:,i+1],dtype=np.float64)
fldat.write('n%i_average: %.16e\n' % (i,avg))
fldat.write('n%i_stddev: %.16e\n' % (i,stddev))
fldat.write('n%i_rel._fluctuation: %.16e\n' % (i,(stddev/avg)))
avg = np.mean(ent_array[loavgind:,1],dtype=np.float64)
stddev = np.std(ent_array[loavgind:,1],dtype=np.float64)
fldat.write('ssentropy_average: %.16e\n' % avg)
fldat.write('ssentropy_stddev: %.16e\n' % stddev)
fldat.write('ssentropy_rel._fluctuation: %.16e\n' % (stddev/avg))
fldat.close()
plt.ylabel(r'Occupation number')
plt.xlabel(r'$J\,t$')
plt.legend(loc='center right')
plt.grid()
plt.tight_layout()
###
pp.savefig()
plt.clf()
print('.',end='',flush=True)
def occ_distribution():
return 0
#occupation number in levels against level index
occavg = np.loadtxt('./data/fluctuation.txt', usecols=(1,))
plt.xlim(-0.1,sysVar.m-0.9)
for l in range(0,sysVar.m):
plt.errorbar(l,occavg[int(7 + 3*l)]/sysVar.N,xerr=None,yerr=occavg[int(8 + 3*l)]/sysVar.N,marker='o',color=cm.Set1(0))
plt.ylabel(r'Relative level occupation')
plt.xlabel(r'Level index')
plt.grid()
plt.tight_layout()
###
pp.savefig()
plt.clf()
print('.',end='',flush=True)
def sum_offdiagonals():
return 0
#sum of off diagonal elements in energy eigenbasis
if sysVar.boolPlotOffDiag:
for i in range(0,sysVar.m):
plt.plot(step_array,offdiag[:,i+1],label=r'$n_'+str(i)+'$', linewidth =0.5)
plt.ylabel(r'Sum of off diagonals')
plt.xlabel(r'$J\,t$')
plt.legend(loc='upper right')
plt.grid()
plt.tight_layout()
###
pp.savefig()
plt.clf()
dt = offdiag[1,0]-offdiag[0,0]
nrm = offdiag[:,0]/dt
nrm[1:] = 1/nrm[1:]
for i in range(0,sysVar.m):
###### only sum (subsystem-thermalization)
plt.ylabel('Sum of off diagonals in $n^{%i}$' % (i))
# start at 10% of the whole x-axis
lox = (offdiag[-1,0]-offdiag[0,0])/10 + offdiag[0,0]
hiy = offdiag[ int(len(offdiag[:,0])/10) ,0] * 1.1
plt.plot(offdiag[:,0],offdiag[:,i+1],linewidth = 0.5)
plt.xlim(xmin=lox)
plt.ylim(ymax=hiy)
plt.grid()
plt.tight_layout()
###inlay with the whole deal
a = plt.axes([0.62, 0.6, 0.28, 0.28])
a.plot(offdiag[:,0],offdiag[:,i+1],linewidth = 0.8)
a.set_xticks([])
a.set_yticks([])
###
pp.savefig()
plt.clf()
plt.ylabel('Sum of off diagonals in $n^{%i}$' % (i))
plt.semilogy(offdiag[:,0],np.abs(offdiag[:,i+1]),linewidth = 0.5)
plt.xlim(xmin=lox)
plt.ylim(ymin=1e-2)
plt.grid()
plt.tight_layout()
###inlay with the whole deal
a = plt.axes([0.62, 0.6, 0.28, 0.28])
a.semilogy(offdiag[:,0],offdiag[:,i+1],linewidth = 0.8)
a.set_ylim(ymin=1e-2)
a.set_xticks([])
a.set_yticks([])
###
pp.savefig()
plt.clf()
###### average (eigenstate-thermalization)
f, (ax1, ax2) = plt.subplots(2, sharex=False, sharey=False)
tmp = cumtrapz(offdiag[:,i+1],offdiag[:,0],initial=offdiag[0,i+1])
tmp = np.multiply(tmp,nrm)
f.text(0.03, 0.5, 'Average of summed off diagonals in $n^{%i}$' % (i), ha='center', va='center', rotation='vertical')
ax1.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
ax1.plot(offdiag[:,0],tmp,linewidth = 0.5)
ax1.grid()
ax2.loglog(offdiag[:,0],np.abs(tmp),linewidth = 0.5)
ax2.set_ylim(bottom=1e-4)
ax2.grid()
plt.tight_layout()
plt.subplots_adjust(left=0.12)
###
pp.savefig()
plt.clf()
print('.',end='',flush=True)
def sum_offdiagonalsdens():
return 0
if sysVar.boolPlotOffDiagDens:
plt.plot(step_array,offdiagdens[:,1], linewidth =0.5)
plt.ylabel(r'Sum of off diagonals (red. dens. mat.)')
plt.xlabel(r'$J\,t$')
plt.grid()
plt.tight_layout()
###
pp.savefig()
plt.clf()
def total_energy():
return 0
### Total system energy
if sysVar.boolTotalEnergy:
plt.title('$E_{tot}, \; E_0$ = %.2e' % en0)
plt.plot(en_array[:,0]*sysVar.plotTimeScale,en_array[:,1]*1e10, linewidth =0.6)
plt.ylabel(r'$E_{tot} - E_0 / 10^{-10}$')
plt.xlabel(r'$J\,t$')
plt.grid()
plt.tight_layout()
###
pp.savefig()
plt.clf()
print('.',end='',flush=True)
def norm_deviation():
return 0
### Norm deviation
plt.plot(step_array,norm_array[:,1], "ro", ms=0.5)
plt.ylabel('norm deviation from 1')
plt.xlabel(r'$J\,t$')
plt.grid(False)
plt.tight_layout()
###
pp.savefig()
plt.clf()
print('.',end='',flush=True)
###
plt.title('State Norm multiplied (deviation from 1)')
plt.plot(step_array,norm_array[:,2]-1, linewidth =0.6, color = 'r')
plt.ylabel('correction factor - 1')
plt.xlabel(r'$J\,t$')
plt.grid()
plt.tight_layout()
###
pp.savefig()
plt.clf()
print('.',end='',flush=True)
def eigenvalues():
return 0
### Hamiltonian eigenvalues (Eigenenergies)
if sysVar.boolPlotEngy:
linearize = False
if linearize:
tap = []
lal = -1
for e in engies[:,1]:
if lal == -1:
tap.append(e)
lal += 1
elif np.abs(e - tap[lal]) > 1:
lal += 1
tap.append(e)
plt.plot(tap,linestyle='none',marker='o',ms=0.5,color='blue')
else:
plt.plot(engies[:,0],engies[:,1],linestyle='none',marker='o',ms=0.5,color='blue')
plt.ylabel(r'E/J')
plt.xlabel(r'\#')
plt.grid(False)
plt.xlim(xmin=-(len(engies[:,0]) * (2.0/100) ))
plt.tight_layout()
###
pp.savefig()
plt.clf()
print('.',end='',flush=True)
def density_of_states():
return 0
### DOS
if sysVar.boolPlotDOS:
dos = np.zeros(sysVar.dim)
window = 50
iw = window
for i in range(iw,sysVar.dim-iw):
dos[i] = (window)*2/(engies[i+iw,1] - engies[i-iw,1])
dos /= (sysVar.dim-iw)
print(scint.simps(dos[iw:], engies[iw:,1]))
plt.plot(engies[:,1],dos,lw=0.005)
plt.ylabel(r'DOS')
plt.xlabel(r'E')
plt.grid(False)
plt.tight_layout()
###
pp.savefig()
plt.clf()
print('.',end='',flush=True)
def greensfunction():
return 0
### Greensfunction
if sysVar.boolPlotGreen:
gd = greendat
'''
gd = np.zeros((np.shape(greendat)[0]*2,np.shape(greendat)[1]))
gd[int(np.shape(greendat)[0]/2):-int(np.shape(greendat)[0]/2 + np.shape(greendat)[0]%2)] = greendat[:,:].copy()
'''
spec = []
discpoints = len(gd[:,0])
print('')
for i in range(0,sysVar.m):
plt.title(r'two time Green function of level $%i$' % (i))
ind = 2*i + 1
plt.plot(greendat[:,0]*sysVar.plotTimeScale,greendat[:,ind],lw=0.1,color='red',label='real')
plt.plot(greendat[:,0]*sysVar.plotTimeScale,greendat[:,ind+1],lw=0.1,color='blue',label='imaginary')
#plt.xlim(xmax=10)
plt.ylabel(r'$G^R(\tau)$')
plt.xlabel(r'$J\,\tau$')
plt.legend(loc='lower right')
plt.grid()
plt.tight_layout()
###
pp.savefig()
plt.clf()
###
plt.title(r'Spectral function of level $%i$' % (i))
green_ret = gd[:,ind] + 1j * gd[:,ind+1]
green_ret_freq = fft(np.hanning(len(green_ret))*green_ret,norm='ortho')
spec_tmp = np.abs(-2*fftshift(green_ret_freq.imag))[::-1]
if i == 0:
samp_spacing = sysVar.deltaT * (sysVar.steps / sysVar.dataPoints) * sysVar.plotTimeScale
hlpfrq = fftshift(fftfreq(len(spec_tmp)))*(2*np.pi)/samp_spacing
### !!! normalize by hand! this might be strange but is necessary here
spec_tmp /= (np.trapz(spec_tmp,x=hlpfrq)/(2*np.pi))
if i == 0:
spec_total = spec_tmp[:]
# scale on x-axis is frequency
else:
spec_total += spec_tmp
spec.append(spec_tmp)
print(i,np.trapz(spec_tmp, x = hlpfrq)/(2*np.pi))
#exit()
plt.plot(hlpfrq,spec_tmp,color = 'red',lw=0.1)
plt.minorticks_on()
plt.ylabel(r'$A$')
plt.xlabel(r'$\omega / J$')
plt.grid()
plt.grid(which='minor', color='blue', linestyle='dotted', lw=0.2)
plt.tight_layout()
###
pp.savefig()
plt.clf()
plt.title(r'Spectral function')
plt.plot(hlpfrq,spec_total,color = 'red',lw=0.1)
plt.ylabel(r'$A$')
plt.xlabel(r'$\omega / J$')
#plt.xlim([-100,100])
plt.grid()
plt.tight_layout()
###
pp.savefig()
plt.clf()
plt.plot(hlpfrq,np.abs(spec_total),color = 'red',lw=0.1)
plt.ylabel(r'$|A|$')
plt.xlabel(r'$\omega / J$')
#plt.xlim([-100,100])
plt.grid()
plt.tight_layout()
###
pp.savefig()
plt.clf()
print('.',end='',flush=True)
print()
weights = np.zeros(len(spec))
for s in range(0,len(spec)):
print(np.average(hlpfrq,weights=spec[s]), np.average(hlpfrq,weights=np.abs(spec[s])))
weights[s] = np.abs(np.average(hlpfrq,weights=np.abs(spec[s])))
print('')
'''
# the integrated version
def occno(spec,freq,temp,mu):
rt = []
for i in range(0,len(freq)):
rt.append(spec[i]/(np.exp((freq[i]-mu)/temp)-1.0))
return np.trapz(np.array(rt), x=freq)
'''
# the averaged version
def occno(freq,temp,mu):
return (1/(np.exp((freq-mu)/temp)-1.0))
def bestatd(args):
temp = args[0]
mu = args[1]
ret =[]
for i in range(0,sysVar.m):
ret.append(occno(weights[i],temp,mu) - occavg[int(7 + 3*i)])
return np.array(ret)
def bestat(args):
temp = args[0]
mu = args[1]
ret =[]
for i in range(0,sysVar.m):
ret.append(occno(weights[i],temp,mu))
return np.array(ret)
strt = np.array([10,-0.1])
bnds = np.array([[0.0001,-500],[1000,weights[0]]])
rgs = least_squares(bestatd,x0=strt,bounds=bnds,loss='soft_l1')
print(rgs)
print(rgs.x)
print(bestat(rgs.x))
a = []
for i in range(0,sysVar.m):
a.append(occavg[int(7+3*i)])
print(a)
#occupation number in levels against renormalized energy
plt.title('Bose-Einstein distribution fit')
ws =
|
np.sort(weights)
|
numpy.sort
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 5 07:26:24 2018
@author: cjburke
Utilties for handling of eccentricity for transit model
fitting. Kepler equation solver plus
find the true anomaly that minimizes the separation between planets/stars
This is needed to get true anomaly equivalent to the mid transit time
"""
import numpy as np
import scipy.optimize as opt
import matplotlib.pyplot as plt
import timeit
import scipy.interpolate as intp
from ke import E
import theano
import theano.tensor as tt
import exoplanet as xo
#@profile
def ekepl2(emin, ein, iterations=2):
"""
Returns E that solves Kepler's equation em=E-esinE
Based upon odell and gooding 1986 celestial mechanics
Two or three iterations needed also vectorized with numpy
"""
if
|
np.isscalar(emin)
|
numpy.isscalar
|
"""
Unit test for babelscan
"""
import numpy as np
import matplotlib.pyplot as plt
import babelscan
print('####################################################')
print('############## babelscan unit tests ################')
print('####################################################')
print('\n')
print(babelscan.module_info())
file = r"C:\Users\dgpor\Dropbox\Python\ExamplePeaks\810002.nxs" # eta scan with pilatus
cv_file = r"C:\Users\dgpor\Dropbox\Python\ExamplePeaks\857991.nxs" # trajectory scan/ cvscan/ kthZebra
im_file = r'C:\\Users\\dgpor\\OneDrive - Diamond Light Source Ltd\\I16\\Nexus_Format\\example_nexus\\872996.nxs' # hkl scan with data
dat_file = r'C:\\Users\\dgpor\\OneDrive - Diamond Light Source Ltd\\I16\\Nexus_Format\\example_nexus\\872996.dat'
datadir = r"C:\Users\dgpor\OneDrive - Diamond Light Source Ltd\I16\Nexus_Format\example_nexus" # eta scan with pilatus
rsmap = r"C:\Users\dgpor\OneDrive - Diamond Light Source Ltd\I16\Nexus_Format\example_nexus\872996-pilatus3_100k-files\rsmap_872996_201215_101906.nxs"
i10_file = r"C:\Users\dgpor\OneDrive - Diamond Light Source Ltd\I16\Nexus_Format\I10_nexus\i10-578596.nxs"
i06_file = r"C:\Users\dgpor\OneDrive - Diamond Light Source Ltd\I16\Nexus_Format\I06_example\227980.dat"
print('\n\n############ File Type Tests ##############')
print('standard I16 eta scan:')
scan = babelscan.file_loader(file)
print(scan)
print('\nI16 CV scan:')
scan = babelscan.file_loader(cv_file)
print(scan)
print('\nI16 hkl scan:')
scan = babelscan.file_loader(im_file)
print(scan)
print('\nI16 .dat file:')
scan = babelscan.file_loader(dat_file)
print(scan)
print('\nI16 rsmap file:')
scan = babelscan.file_loader(rsmap)
print(scan)
print('\nI10 Nexus file:')
scan = babelscan.file_loader(i10_file)
print(scan)
print('\nI06 .dat file:')
scan = babelscan.file_loader(i06_file, scan_command_name='command')
print(scan)
print('\n\n############ Missing count_time Tests ##############')
scan = babelscan.file_loader(file, debug='all')
scan.add2namespace(['count_time', 'counttime', 'Time', 't'], None, 'count_time')
print(scan)
print('\n\n')
print(scan('count_time'))
print('\n\n')
print(scan('nroi[31,31]'))
print('\n\n############### FolderMonitor Tests ################')
exp = babelscan.FolderMonitor(datadir)
scan = exp.scan(0)
print(scan)
print('\n\n##################### Plot Tests ###################')
scan = exp.scan(794940)
x, y, dy, xlab, ylab = scan.get_plot_data('axes', 'nroi_peak[31,31]', '/count_time/Transmission', 'np.sqrt(x+0.1)')
plt.figure()
plt.errorbar(x, y, dy, fmt='-o')
plt.xlabel(xlab)
plt.ylabel(ylab)
plt.title(scan.title())
scan.plot.plot_image('sum', clim=[0, 100])
plt.show()
print('\n\n##################### Fit Tests ###################')
scan = exp(877619) # merlin
scan.fit('axes', 'nroi_peak[31, 31]')
scan.plot('axes', ['nroi_peak[31, 31]', 'fit'])
print(scan.string('amplitude'))
scan = exp.scan(794940) # multipeak
scan.fit.multi_peak_fit(npeaks=2)
scan.plot('axes', ['signal', 'fit', 'p1_fit', 'p2_fit', 'bkg_fit'])
plt.show()
print('\n\n################# MultiScan Tests ##################')
scan_range = range(794932, 794947, 1) # datadir, sperp, spara, eta scans
scans = exp.scans(scan_range, ['sperp', 'spara'])
print(scans)
print('\n\n################### Volume Tests ###################')
scan = babelscan.file_loader(im_file)
volume = scan.volume()
print('%r, %s' % (scan, scan.find_image()))
print(volume)
print(np.max(volume))
print(volume.peak_search())
scan1 = babelscan.file_loader(dat_file)
volume1 = scan1.volume()
print('\n%r' % scan1)
print(volume1)
print(
|
np.max(volume1)
|
numpy.max
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.