prompt
stringlengths 15
655k
| completion
stringlengths 3
32.4k
| api
stringlengths 8
52
|
---|---|---|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
from matplotlib.text import Text
import matplotlib.transforms as mtransforms
from .frame import RectangularFrame
class AxisLabels(Text):
def __init__(self, frame, minpad=1, *args, **kwargs):
self._frame = frame
super(AxisLabels, self).__init__(*args, **kwargs)
self.set_clip_on(True)
self.set_visible_axes('all')
self.set_ha('center')
self.set_va('center')
self._minpad = minpad
def get_minpad(self, axis):
try:
return self._minpad[axis]
except TypeError:
return self._minpad
def set_visible_axes(self, visible_axes):
self._visible_axes = visible_axes
def get_visible_axes(self):
if self._visible_axes == 'all':
return self._frame.keys()
else:
return [x for x in self._visible_axes if x in self._frame]
def set_minpad(self, minpad):
self._minpad = minpad
def draw(self, renderer, bboxes, ticklabels_bbox_list, visible_ticks):
if not self.get_visible():
return
text_size = renderer.points_to_pixels(self.get_size())
for axis in self.get_visible_axes():
padding = text_size * self.get_minpad(axis)
# Find position of the axis label. For now we pick the mid-point
# along the path but in future we could allow this to be a
# parameter.
x_disp, y_disp = self._frame[axis].pixel[:, 0], self._frame[axis].pixel[:, 1]
d = np.hstack([0., np.cumsum(np.sqrt(np.diff(x_disp) ** 2 + np.diff(y_disp) ** 2))])
xcen = np.interp(d[-1] / 2., d, x_disp)
ycen = np.interp(d[-1] / 2., d, y_disp)
# Find segment along which the mid-point lies
imin = np.searchsorted(d, d[-1] / 2.) - 1
# Find normal of the axis label facing outwards on that segment
normal_angle = self._frame[axis].normal_angle[imin] + 180.
label_angle = (normal_angle - 90.) % 360.
if label_angle < 225 and label_angle > 135:
label_angle += 180
self.set_rotation(label_angle)
# Find label position by looking at the bounding box of ticks'
# labels and the image. It sets the default padding at 1 times the
# axis label font size which can also be changed by setting
# the minpad parameter.
if isinstance(self._frame, RectangularFrame):
if len(ticklabels_bbox_list) > 0:
ticklabels_bbox = mtransforms.Bbox.union(ticklabels_bbox_list)
else:
ticklabels_bbox = None
if axis == 'l':
if axis in visible_ticks and ticklabels_bbox is not None:
left = ticklabels_bbox.xmin
else:
left = xcen
xpos = left - padding
self.set_position((xpos, ycen))
elif axis == 'r':
if axis in visible_ticks and ticklabels_bbox is not None:
right = ticklabels_bbox.x1
else:
right = xcen
xpos = right + padding
self.set_position((xpos, ycen))
elif axis == 'b':
if axis in visible_ticks and ticklabels_bbox is not None:
bottom = ticklabels_bbox.ymin
else:
bottom = ycen
ypos = bottom - padding
self.set_position((xcen, ypos))
elif axis == 't':
if axis in visible_ticks and ticklabels_bbox is not None:
top = ticklabels_bbox.y1
else:
top = ycen
ypos = top + padding
self.set_position((xcen, ypos))
else: # arbitrary axis
dx = np.cos(
|
np.radians(normal_angle)
|
numpy.radians
|
import math
from collections import defaultdict
from itertools import chain
from pprint import pprint
import bpy
import numpy as np
from typing import List
from ....bpy_utilities.utils import get_material
from ..datatypes.material_sort import MaterialSort
from ..datatypes.mesh import Mesh, VertexType
from ..datatypes.model import RespawnModel
from ..datatypes.texture_data import TextureData
from ..datatypes.texture_info import TextureInfo
from ..entities.base_entity_handler import BaseEntityHandler
from ..entities.r1_entity_classes import entity_class_handle, worldspawn, func_window_hint, trigger_indoor_area, \
trigger_capture_point, trigger_out_of_bounds, trigger_soundscape, Base
class TitanfallEntityHandler(BaseEntityHandler):
entity_lookup_table = entity_class_handle
def _load_brush_model(self, model_id, model_name):
objs = []
model: RespawnModel = self._bsp.get_lump("LUMP_MODELS").models[model_id]
tex_data: List[TextureData] = self._bsp.get_lump("LUMP_TEXDATA").texture_data
indices: np.ndarray = self._bsp.get_lump("LUMP_INDICES").indices
bsp_vertices: np.ndarray = self._bsp.get_lump('LUMP_VERTICES').vertices
grouped_by_lightmap = defaultdict(list)
for mesh_id in range(model.first_mesh, model.first_mesh + model.mesh_count):
mesh: Mesh = self._bsp.get_lump("LUMP_MESHES").meshes[mesh_id]
material_sort: MaterialSort = self._bsp.get_lump('LUMP_MATERIALSORT').materials[mesh.material_sort]
grouped_by_lightmap[material_sort.lightmap_header_index].append(mesh_id)
for lightmap_id, meshes in grouped_by_lightmap.items():
merged_vertex_ids = np.array([], np.uint32)
merged_uv_data = np.array([], np.float32)
merged_lightmap_uv_data = np.array([], np.float32)
merged_materials_ids = np.array([], np.uint32)
material_indices = []
l_headers = self._bsp.get_lump('LUMP_LIGHTMAP_HEADERS').lightmap_headers
l_data = self._bsp.get_lump('LUMP_LIGHTMAP_DATA_SKY').lightmap_data
offset = 0
for n, header in enumerate(l_headers):
for c in range(header.count + 1):
pixel_count = header.width * header.height
if n == lightmap_id:
name = f'lightmap_{n}_{c}'
if name in bpy.data.images:
continue
pixel_data: np.ndarray = l_data[offset:offset + pixel_count]
pixel_data = pixel_data.astype(np.float32) / 255
image = bpy.data.images.get(name, None) or bpy.data.images.new(
name,
width=header.width,
height=header.height,
alpha=True,
)
image.filepath = name + '.tga'
image.alpha_mode = 'CHANNEL_PACKED'
image.file_format = 'TARGA'
if bpy.app.version > (2, 83, 0):
image.pixels.foreach_set(pixel_data.flatten().tolist())
else:
image.pixels[:] = pixel_data.flatten().tolist()
image.pack()
offset += pixel_count
for mesh_id in meshes:
print(f'Loading Mesh {mesh_id - model.first_mesh}/{model.mesh_count} from {model_name}')
mesh: Mesh = self._bsp.get_lump("LUMP_MESHES").meshes[mesh_id]
material_sort: MaterialSort = self._bsp.get_lump('LUMP_MATERIALSORT').materials[mesh.material_sort]
material_data = tex_data[material_sort.texdata_index]
if material_data.name in material_indices:
mat_id = material_indices.index(material_data.name)
else:
mat_id = len(material_indices)
material_indices.append(material_data.name)
if mesh.flags & 0x200 > 0:
vertex_info_lump = self._bsp.get_lump("LUMP_BUMPLITVERTEX").vertex_info
elif mesh.flags & 0x400 > 0:
vertex_info_lump = self._bsp.get_lump("LUMP_UNLITVERTEX").vertex_info
elif mesh.flags & 0x600 > 0:
vertex_info_lump = self._bsp.get_lump("LUMP_UNLITTSVERTEX").vertex_info
else:
raise NotImplementedError(f'Unknown mesh format {mesh.flags:016b}')
mesh_indices = indices[
mesh.triangle_start:mesh.triangle_start + mesh.triangle_count * 3].astype(
np.uint32) + material_sort.vertex_offset
used_vertices_info = vertex_info_lump[mesh_indices]
vertex_indices = used_vertices_info['vpi'].flatten()
uvs = used_vertices_info['uv']
uvs[:, 1] = 1 - uvs[:, 1]
merged_vertex_ids = np.append(merged_vertex_ids, vertex_indices, axis=0)
if merged_uv_data.shape[0] == 0:
merged_uv_data = uvs
else:
merged_uv_data = np.append(merged_uv_data, uvs, axis=0)
if mesh.flags & 0x200 > 0:
uvs_lm = used_vertices_info['uv_lm']
uvs_lm[:, 1] = 1 - uvs_lm[:, 1]
if merged_lightmap_uv_data.shape[0] == 0:
merged_lightmap_uv_data = uvs_lm
else:
merged_lightmap_uv_data = np.append(merged_lightmap_uv_data, uvs_lm, axis=0)
else:
if merged_lightmap_uv_data.shape[0] == 0:
merged_lightmap_uv_data = np.zeros_like(uvs)
else:
merged_lightmap_uv_data = np.append(merged_lightmap_uv_data,
|
np.zeros_like(uvs)
|
numpy.zeros_like
|
import splinart as spl
import numpy as np
import pytest
from pytest import approx
def test_circle_1():
theta, path = spl.circle([0, 0], 1, npoints=2) # create a splinned circle with center [0,0] radius 1 and 2 points
print(theta)
assert theta == approx([0, 2*np.pi]) # the resulting vector of theta angles for each point
#assert path == approx(np.array([[1, 0], [1, 0]]))
print(path)
assert path == approx(np.array([[1, 0], [1, 0]])) # and the resulting path must be this
def test_circle_2():
theta, path = spl.circle([0, 0], 1, npoints=5)
assert theta == approx(np.linspace(0, 2*np.pi, 5))
assert path == approx(np.array([[1, 0], [0, 1], [-1, 0], [0, -1], [1, 0]]))
def test_circle_10():
"""
test with 10 points
"""
theta, path = spl.circle([0,0],1,npoints=10)
assert theta == approx(np.linspace(0, 2*np.pi, 10))
def test_line_1():
path = spl.line(0, 1, npoints=2)
assert path == approx(
|
np.array([[0, 0.5], [1, 0.5]])
|
numpy.array
|
import pyffs
import numpy as np
import matplotlib.pyplot as plt
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from scipy.interpolate import RegularGridInterpolator
from typing import Optional, Union, Iterable, Callable
import periodispline.splines.green.univariate as perspline1d
#plt.style.use('source/custom_style.mplstyle')
class GreenFunctionND:
def __init__(self, orders: Union[Iterable, float, None] = None, periods: Union[Iterable, float, None] = None,
ndim: int = 2, rtol: float = 1e-3, nogibbs: Optional[bool] = None,
cutoffs: Union[Iterable, float, None] = None):
if ndim < 2:
raise ValueError(
'For univariate green functions, use base class periodispline.splines.green.univariate.GreenFunction')
else:
self.ndim = ndim
if periods is None:
periods = 2 * np.pi
orders, periods = np.asarray(orders).reshape(-1), np.asarray(periods).reshape(-1)
if orders.size == self.ndim:
self.orders = orders
elif orders.size == 1:
self.orders = orders * np.ones(shape=(ndim,))
else:
raise ValueError('Argument orders should be of size ndim or one.')
if periods.size == self.ndim:
self.periods = periods
elif periods.size == 1:
self.periods = periods * np.ones(shape=(ndim,))
else:
raise ValueError('Argument orders should be of size ndim or one.')
self.rtol = rtol
if nogibbs is None:
self.nogibbs = (np.min(self.orders) < self.ndim)
self.nogibbs_method = 'fejer' if (np.min(self.orders) < self.ndim) else None
else:
self.nogibbs = nogibbs
self.nogibbs_method = 'fejer' if nogibbs else None
if cutoffs is None:
self.cutoffs = self.get_cutoffs()
else:
self.cutoffs = np.asarray(cutoffs).reshape(-1)
self.bandwidths = 2 * self.cutoffs + 1
self.fourier_coefficients = self._compute_fs_coefficients()
self.interpolated_green_function = self.green_function()
self.colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
def get_cutoffs(self, min_cutoff: int = 16) -> np.ndarray:
cutoffs = np.clip(np.ceil((1 / self.rtol) ** (1 / self.orders)), a_min=min_cutoff, a_max=None)
composite_cutoffs = 2 ** np.ceil(np.log2(cutoffs)).astype(int)
return composite_cutoffs
def _compute_fs_coefficients(self):
pass
def green_function(self, resolution: int = 512) -> Callable:
if self.ndim > 2:
resolution = 128
if self.nogibbs:
ramps = []
for i in range(self.ndim):
ramps.append(1 - np.abs(np.arange(-self.cutoffs[i], self.cutoffs[i] + 1)) / (self.cutoffs[i] + 1))
meshgrids = np.meshgrid(*ramps)
windows = np.prod(np.stack(meshgrids, axis=-1), axis=-1)
fs_coefficients = self.fourier_coefficients * windows
else:
fs_coefficients = self.fourier_coefficients
zero_padding = np.clip(resolution - np.array(fs_coefficients.shape), a_min=1, a_max=None).astype(int)
padding_list = [(0, padder) for padder in zero_padding]
fs_coefficients = np.pad(fs_coefficients, padding_list)
sampled_green_function = fs_coefficients.transpose()
space_samples = []
for axis in range(self.ndim):
sampled_green_function = np.fft.ifftshift(pyffs.iffs(x_FS=sampled_green_function, T=self.periods[axis],
T_c=self.periods[axis] / 2,
N_FS=self.bandwidths[axis], axis=axis), axes=axis)
axis_space_samples = np.fft.ifftshift(pyffs.ffs_sample(T=self.periods[axis], N_FS=self.bandwidths[axis],
T_c=self.periods[axis] / 2,
N_s=sampled_green_function.shape[axis])[0])
space_samples.append(axis_space_samples)
self.space_samples_meshgrid = np.meshgrid(*space_samples)
self.sampled_green_function = sampled_green_function
interpolated_green_function = RegularGridInterpolator(points=space_samples, values=sampled_green_function,
method='linear', bounds_error=False, fill_value=None)
return interpolated_green_function
def __call__(self, x: np.ndarray) -> Union[float, np.ndarray]:
x = x % self.periods
return self.interpolated_green_function(x)
def plot(self, nb_of_periods: int = 2, resolution: int = 128, color: Union[int, str, None] = None,
cmap: Optional[str] = 'RdYlBu_r', plt_type: str = 'wireframe', c: float = 2, a: float = 1,
ratio: float = 0.6):
if self.ndim != 2:
raise NotImplementedError(
'Plotting not supported for multivariate green functions with dimension greater than two.')
x = np.linspace(-nb_of_periods * self.periods[0] / 2, nb_of_periods * self.periods[0] / 2, resolution)
y = np.linspace(-nb_of_periods * self.periods[1] / 2, nb_of_periods * self.periods[1] / 2, resolution)
X, Y = np.meshgrid(x, y)
points = np.stack((X, Y), axis=-1)
Z = np.real(self.__call__(points))
Z /= np.max(np.abs(Z[np.abs(Z) > 0]))
if color is None:
color = self.colors[0]
elif type(color) is int:
color = self.colors[color]
else:
pass
if plt_type is 'wireframe':
fig = plt.figure()
ax3d = fig.add_subplot(111, projection='3d')
ax3d.plot_wireframe(X=X, Y=Y, Z=Z, rcount=resolution, ccount=resolution, colors=color, antialiaseds=True,
linewidths=0.5)
elif plt_type is 'plot_surface':
fig = plt.figure()
ax3d = fig.add_subplot(111, projection='3d')
ax3d.plot_surface(X=X, Y=Y, Z=Z, cmap=cmap, rcount=resolution, ccount=resolution, antialiaseds=True)
elif plt_type is 'pcolormesh':
fig = plt.figure()
plt.pcolormesh(X, Y, Z, cmap=cmap, shading='gouraud', snap=True)
plt.colorbar()
elif plt_type is 'plotly_surface':
fig = go.Figure(data=[go.Surface(z=Z, x=x, y=y, colorscale='Plasma', showscale=False, opacity=0.9)])
# fig.update_traces(contours_z=dict(show=True, usecolormap=True, project_z=True, highlightcolor="limegreen"))
fig.update_layout(width=1280, height=720,
margin=dict(l=0, r=0, b=0, t=0))
# fig.show(renderer="browser")
# fig.write_image("exports/fig1.pdf")
elif plt_type is 'flat_torus':
x_cart = (c + a * np.cos(X)) * np.cos(Y)
y_cart = (c + a * np.cos(X)) * np.sin(Y)
z_cart = a * np.sin(X)
fig = go.Figure(
data=[go.Surface(z=z_cart, x=x_cart, y=y_cart, surfacecolor=Z, colorscale='Plasma', showscale=False)])
fig.update_layout(width=1280, height=720,
margin=dict(l=0, r=0, b=0, t=0))
# fig.show(renderer="browser")
# fig.write_image("exports/fig1.pdf")
elif plt_type is 'bump_torus':
a += ratio * Z / np.max(Z)
x_cart = (c * a + a * np.cos(X)) * np.cos(Y)
y_cart = (c * a + a * np.cos(X)) * np.sin(Y)
z_cart = a * np.sin(X)
fig = go.Figure(
data=[go.Surface(z=z_cart, x=x_cart, y=y_cart, surfacecolor=Z, colorscale='Plasma', showscale=False)])
fig.update_layout(width=1280, height=720,
margin=dict(l=0, r=0, b=0, t=0))
# fig.show(renderer="browser")
elif plt_type is 'surface_torus':
fig = make_subplots(rows=1, cols=2,
specs=[[{'type': 'surface'}, {'type': 'surface'}]])
a += ratio * Z
x_cart = (c * a + a * np.cos(X)) * np.cos(Y)
y_cart = (c * a + a * np.cos(X)) * np.sin(Y)
z_cart = a * np.sin(X)
fig.add_trace(go.Surface(z=Z, x=x, y=y, colorscale='Plasma', showscale=False), row=1, col=1)
fig.add_trace(
go.Surface(z=z_cart, x=x_cart, y=y_cart, surfacecolor=Z, colorscale='Plasma', showscale=False), row=1,
col=2)
fig.update_layout(width=1280, height=720,
margin=dict(l=0, r=0, b=0, t=0),
scene=dict(
xaxis=dict(
backgroundcolor="white",
gridcolor="white",
showbackground=False,
zerolinecolor="white", nticks=0, tickfont=dict(color='white')),
yaxis=dict(
backgroundcolor="white",
gridcolor="white",
showbackground=False,
zerolinecolor="white", nticks=0, tickfont=dict(color='white')),
zaxis=dict(
backgroundcolor="white",
gridcolor="white",
showbackground=False,
zerolinecolor="white", nticks=0, tickfont=dict(color='white')),
xaxis_title=' ', yaxis_title=' ', zaxis_title=' '),
scene2=dict(
xaxis=dict(
backgroundcolor="white",
gridcolor="white",
showbackground=False,
zerolinecolor="white", nticks=0, tickfont=dict(color='white')),
yaxis=dict(
backgroundcolor="white",
gridcolor="white",
showbackground=False,
zerolinecolor="white", nticks=0, tickfont=dict(color='white')),
zaxis=dict(
backgroundcolor="white",
gridcolor="white",
showbackground=False,
zerolinecolor="white", nticks=0, tickfont=dict(color='white')),
xaxis_title=' ', yaxis_title=' ', zaxis_title=' ')
)
# fig.show(renderer="browser")
else:
fig = None
return fig
def volume_plot(self, nb_of_periods=2, resolution=32):
if self.ndim != 3:
raise NotImplementedError(
'Volume plots are for trivariate green functions only!')
x = np.linspace(-nb_of_periods * self.periods[0] / 2, nb_of_periods * self.periods[0] / 2, resolution)
y = np.linspace(-nb_of_periods * self.periods[1] / 2, nb_of_periods * self.periods[1] / 2, resolution)
z = np.linspace(-nb_of_periods * self.periods[1] / 2, nb_of_periods * self.periods[1] / 2, resolution)
X, Y, Z = np.meshgrid(x, y, z)
points = np.stack((X, Y, Z), axis=-1)
values = np.real(self.__call__(points))
values /= np.max(values)
fig = go.Figure(data=go.Volume(
x=X.flatten(),
y=Y.flatten(),
z=Z.flatten(),
value=values.flatten(),
isomin=
|
np.min(values)
|
numpy.min
|
import numpy as np
import matplotlib.pyplot as plt
def draw(x1, x2):
ln = plt.plot(x1, x2)
def sigmoid(score):
return 1/(1 + np.exp(-score))
def calculate_error(line_parameters, points, y):
m = points.shape[0]
p = sigmoid(points*line_parameters)
cross_entropy = -(1/m)*(np.log(p).transpose() * y + np.log(1-p).transpose()*(1-y))
return cross_entropy
def gradient_descent(line_parameters, points, y, alpha):
m = points.shape[0]
for i in range(5000):
p = sigmoid(points*line_parameters)
gradient = (1/m)*(points.T * (p - y))
line_parameters = line_parameters - gradient
w1 = line_parameters.item(0)
w2 = line_parameters.item(1)
b = line_parameters.item(2)
x1 = np.array([points[:, 0].min(), points[:, 0].max()])
x2 = -b / w2 + x1 * (-w1 / w2)
draw(x1,x2)
n_pts = 100
np.random.seed(0)
bias = np.ones(n_pts)
top_region = np.array([np.random.normal(10, 2, n_pts), np.random.normal(12,2,n_pts), bias]).transpose()
bottom_region = np.array([np.random.normal(5, 2, n_pts), np.random.normal(6,2,n_pts), bias]).transpose()
all_points = np.vstack((top_region, bottom_region))
line_parameters = np.matrix([
|
np.zeros(3)
|
numpy.zeros
|
#! /usr/bin/env python
"""
The idea is based on <NAME> Keplerspline.pro
IDL module that iteratively fits 4th-orther B-splines
to the data to remove trends.
Breakpoints for each campaign come from A.V.
the `knots` utility program defines inner knots
and is based on <NAME>'s bspline_bkpts.pro
modified and simplified for SciPy
"""
import numpy as np
from astropy.io import fits
from scipy.interpolate import LSQUnivariateSpline,UnivariateSpline,interp1d
from scipy.special import erf,erfc,betainc,binom
from scipy.signal import medfilt
# check if fitsio is installed
try:
import fitsio
nofitsio = False
except ImportError:
nofitsio = True
def rolling_window(a, window):
shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)
strides = a.strides + (a.strides[-1],)
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
#here some changes
def get_knots(x, dt = None, npts = None, k=4,verbose=False):
"""
determines the inner knots for a spline
the satisfy the Shoenberg-Whiteney conditions
"""
# if there is an empty list, return it and fail
n = len(x)
if n<1:
return x, (True,)
# Get the range in x
x = np.array(x)
x.sort() # sort x from low to high
x_range = x[-1] - x[0]
##########################################################
## Get evenly spaced knots #
## knots must be internal to the #
## abcissa. We first generate #
## a list evenly spaced on [min(x) + dt/2,max(x) - dt/2) #
## OLD #t = np.arange(x[0]+ dt/2.,x[-1]-dt/2.,dt)
##########################################################
# if dt is given, use it
if dt is not None:
npts = int(x_range / dt) + 1
tempdt = x_range/(npts - 1.)
if npts < 2: npts = 2
t = np.arange(npts,dtype=float) * tempdt + x[0]
# if dt not given & npts is, divide
elif npts is not None:
npts = int(npts)
tempdt = x_range/(npts - 1.)
t = np.arange(npts,dtype=float) * tempdt + x[0]
else:
npts = 11
tempdt = x_range/(npts - 1.)
print(('Defaulting to %i knots. dt = %0.2f'%(npts,dt)))
t = np.arange(npts,dtype=float) * tempdt + x[0]
if np.nanmin(x) < np.min(t):
t[np.argmin(t)] = np.nanmin(x)
if np.nanmax(x) > np.max(t):
t[np.argmax(t)] = np.nanmax(x)
t = t[(t>np.min(x)) & (t<np.max(x))] # LSQUnivariateSpline internally adds boundary knots
# https://github.com/scipy/scipy/issues/5916#issuecomment-191346579
## Check Shoenberg-Whiteney conditions
## set fmode to True so that it actually starts
## checking. Keep checking until fmode[0] is False,
## indicating that knots satisfy SW conditions
fmode = True, None
## Check condition again after
## removing offending knots
while fmode[0]:
if verbose and not fmode[0]: print("Checking Schoenberg-Whitney")
#fmode contains bool for if failed, and list of where it fails
fmode = check_knots(x,t,k,verbose=verbose) # Schoenberg-Whitney conditions
if fmode[0]=='sw':
if verbose:
print('Deleting %s knots'%len(fmode[1]))
t = np.delete(t,fmode[1])
fmode=True,None # set to recheck SW conditions
elif fmode[1]=='f3':
t = np.unique(t) # sort and recheck SW conditions
elif fmode[1]=='f2':
return t,(True,None) # Let us know it failed
elif fmode[1]=='f1':
return None, (True, None) # Let us know if failed
return t,fmode
def check_knots(x,t,k,verbose=True):
'''
returns bool,fmode or 'sw', [indices where it fails]
bool: Did it fail SW conditions
fmode: f1 - fails a uniqueness conditions
f2 - too few points
f3 - not monotonic increasing
sw - fails SW conditions
'''
m = len(x)
#oldt = t.copy()
t = np.concatenate(([x[0]]*(k+1), t, [x[-1]]*(k+1)))
n = len(t)
# condition 1
if not np.all(t[k+1:n-k]-t[k:n-k-1] > 0):
if verbose: print('Failed condition 1 (t[k+1:n-k]-t[k:n-k-1]>0)')
return True,'f1'
if not (k+1 <= n-k-1 <= m):
# >2k+2 and < m-(k+1) point
if verbose: print('Failed condition 2 (too few points for order)')
return True,'f2'
if not np.all(t[1:]-t[:-1] >= 0):
# monitonically increasing
if verbose: print('Failed condition 3a (monotonic abscissa)')
return True,'f3'
if not np.all(t[n-k-1:-1] <= t[n-k:]):
# monitonically increasing
if verbose: print('Failed condition 3b (monotonic abscissa)')
return True,'f3'
# Schoenberg-Whitney Condition
# i.e., there must be data between
# every knot.
# implementation 1
arr = []
for j in range(n-k-1):
arr.append(np.any((t[j] <= x[j:]) & (x[j:] <= t[j+k+1])))
if not np.all(arr):
if verbose: print("Failed Schoenberg-Whitney")
return 'sw',np.where(~np.asarray(arr))[0]
return False, None
def find_data_gaps(time, delta = 1.5):
'''
return np.where(np.diff(time) > delta).tolist()
delta = 1.5 [default]
'''
if time is None:
return []
else:
return np.where(np.diff(time) > delta)[0].tolist()
def where(condition):
wh = np.where(condition)[0]
if len(wh) > 0:
return [wh[0]-1]
else:
return wh
def get_breaks(cadenceno, campaign = None, time=None, dt = 1.5):
"""
if no campaign number is given it checks
to see if cadenceno is a K2 LC fits file
For speed, preferably just pass cadenceno and campaign
"""
if campaign is None:
try:
campaign = cadenceno[0].header['CAMPAIGN']
cadenceno = cadenceno['BESTAPER'].data['CADENCENO']
except:
pass
breakp = [0]
if campaign==3:
breakp.extend(where(cadenceno > 100174))
breakp.extend(where(cadenceno > 101801))
elif campaign==4:
breakp.extend(where(cadenceno > 104222))
breakp.extend(where(cadenceno > 105854))
elif campaign==5:
breakp.extend(where(cadenceno > 109374))
elif campaign==6:
breakp.extend(where(cadenceno > 111550))
breakp.extend(where(cadenceno > 113482))
elif campaign==7:
breakp.extend(where(cadenceno > 117870))
elif campaign==8:
breakp.extend(where(cadenceno > 120881))
breakp.extend(where(cadenceno > 121345))
breakp.extend(where(cadenceno > 121824))
#elif campaign==102:
# breakp = find_data_gaps(time)
elif campaign==111: # bad breakpoints for c11
breakp.extend(where(cadenceno > 134742))
elif campaign==12:
breakp.extend(where(cadenceno > 137332))
elif campaign==13:
breakp.extend(where(cadenceno > 141356))
breakp.extend(where(cadenceno > 143095))
elif campaign==14:
breakp.extend(where(cadenceno > 145715))
breakp.extend(where(cadenceno > 147539))
else:
breakp.extend([])
breakp.extend(find_data_gaps(time, delta = dt))
return np.unique(breakp).tolist()
def piecewise_spline(time, fcor, cadenceno, campaign = 0, mask = None, verbose=False,\
breakpoints = None, delta = None, return_knots = False, k = 4):
"""
returns the piecewise spline fit for every x (or time) value
breakpoints are the **indices** of the break in the data
time, fcor, and candenceno must have the same size/indexing
for breakpoints to work
if you must mask, pass the mask with the keyword, otherwise
you will get unexpected results and offsets
"""
if mask is None:
mask = np.full(cadenceno.shape,True,dtype=np.bool)
if breakpoints is None:
breakpoints = get_breaks(cadenceno, campaign, time=time, dt = delta)
condlist = []
spl = np.asarray([])
#breakpoints = np.append(0,breakpoints).astype(int) #first segment starts at 0
# create condlist defining interval start
for i,breakpoint in enumerate(breakpoints):
if i < len(breakpoints)-1:
condlist.append((cadenceno >= cadenceno[breakpoint]) & (cadenceno < cadenceno[breakpoints[i+1]]))
else:
condlist.append((cadenceno >= cadenceno[breakpoint]))
## isolate each interval with XOR
#for i,c in enumerate(condlist[:-1]):
# condlist[i] = c ^ condlist[i+1]
# Build up the spline array
for cond in condlist:
x = time[cond & mask]
y = fcor[cond & mask]
kn,fail_mode = get_knots(x, delta,verbose=verbose,k=k)
if fail_mode[0]:
if verbose: print('Couldn\'t find knots. Using LSQUnivariate Spline w/o mask')
x = time[cond]
y = fcor[cond]
kn,_ = get_knots(x, delta,verbose=verbose)
spl_part = LSQUnivariateSpline(time[cond & mask],fcor[cond & mask], t=kn, k=k)
else:
spl_part = LSQUnivariateSpline(x, y, t=kn, k=k ) #eval spline
spl = np.append(spl,spl_part(time[cond]))
return spl.ravel()
def single_spline(time, fcor, mask = None, \
delta = None, return_knots = False, k = 4):
"""
if you must mask, pass the mask with the keyword, otherwise
you will get unexpected results and offsets
"""
if mask is None:
mask = np.full(time.shape,True,dtype=np.bool)
x = time[mask]
y = fcor[mask]
srt = np.argsort(x)
x = x[srt]
y = y[srt]
kn,fmode = get_knots(x, delta)
#print x.min(),kn.min(),kn.max(),x.max()
spl= LSQUnivariateSpline(x, y, t=kn, k=k ) #eval spline
spl = spl(time)
if return_knots:
return spl.ravel(),kn
else:
return spl.ravel()
def get_k2_data(k2dataset):
if isinstance(k2dataset,str):
try:
if not nofitsio:
data = fitsio.FITS(k2dataset)
t = data[1]['T'].read()
f = data[1]['FCOR'].read()
firing = data[1]['MOVING'].read()
cadenceno = data[1]['CADENCENO'].read()
campaign = data[0].read_header()['CAMPAIGN']
mag = data[0].read_header()['KEPMAG']
data.close()
else:
data = fits.open(k2dataset)
t = data[1].data['T']
f = data[1].data['FCOR']
firing = data[1].data['MOVING']
campaign = data[0].header['CAMPAIGN']
cadenceno = data[1].data['CADENCENO']
mag = data[0].header['KEPMAG']
except:
print('Problem')
data = fits.open(k2dataset)
t = data[1].data['T']
f = data[1].data['FCOR']
firing = data[1].data['MOVING']
campaign = data[0].header['CAMPAIGN']
cadenceno = data[1].data['CADENCENO']
mag = data[0].header['KEPMAG']
else:
t = k2dataset['BESTAPER'].data['T']
f = k2dataset['BESTAPER'].data['FCOR']
firing = k2dataset['BESTAPER'].data['FIRING']
cadenceno = k2dataset['BESTAPER'].data['CADENCENO']
campaign = k2dataset[0].header['CAMPAIGN']
mag = k2dataset[0].header['KEPMAG']
g = firing == 0
# import pdb;pdb.set_trace()
return t[g],f[g], cadenceno[g], campaign, mag
def pmpoints(m,n,ns):
'''
m: number of points found
n: number of trials
ns: n*sigma for prob of finding a point
'''
m = m // 1 # np.floor(m)
valid = np.ones_like(m)
#np.sqrt(2) = 1.4142135623730951
prob = erfc(ns/1.4142135623730951)
#np.log(2) = 0.69314718055994529
p = -(n*0.69314718055994529) + np.log(binom(n,m))
p += (-m + n)*np.log(2 - prob)
p += m*np.log(prob)
valid[(m < 0) | (m >n)] = 0
return np.exp(p) * valid
def cmpoints(m,n,ns):
m = m // 1 # np.floor(m)
valid = np.ones_like(m)
p = np.log(betainc(-m + n,1 + m,1 - erfc(ns/np.sqrt(2))/2.))
valid[(m<0)] = 0.
p[m>=n] = 1.
return np.exp(p) * valid
def pgtnsigma(n):
return 0.5 * erfc(n / np.sqrt(2.))
def mad(x,axis=None):
return 1.4826*np.nanmedian(np.abs(x-np.nanmedian(x,axis=axis)),axis=axis)
def std1(x):
'''
get full standard deviation
for top/bottom half of lc
'''
if np.mean(x-1) < 0:
return mad(np.hstack([x-1,np.abs(x-1)]))
else:
return mad(np.hstack([x-1,-np.abs(x-1)]))
def statmask(f,sl=1e-5,sigma=3,perpoint=False):
mxu=np.where(f>=1)[0]
gfluxu = f[mxu]
Mu = np.arange(0,len(gfluxu))[::-1]
srtu = np.argsort(np.argsort(gfluxu))
Mu = Mu[srtu]
snrsu = np.abs(gfluxu-1) / std1(gfluxu)
if perpoint:
pu = 1-cmpoints(Mu,len(gfluxu),snrsu)
else:
pu = 1-cmpoints(Mu,len(gfluxu),sigma)
mxd=np.where(f<1)[0]
gfluxd = f[mxd]
Md = np.arange(0,len(gfluxd))
srtd = np.argsort(np.argsort(gfluxd))
Md = Md[srtd]
snrsd = np.abs(gfluxd-1) / std1(gfluxd)
if perpoint:
pd = 1-cmpoints(Md,len(gfluxd),snrsd)
else:
pd = 1-cmpoints(Md,len(gfluxd),sigma)
mx = np.hstack([mxu,mxd])
M = np.hstack([Mu,Md])
gflux = np.hstack([gfluxu,gfluxd])
srt = np.hstack([srtu,srtd])
snrs = np.hstack([snrsu,snrsd])
p = np.hstack([pu,pd])
return (p<sl)[
|
np.argsort(mx)
|
numpy.argsort
|
import numpy as np
import xarray as xr
from itertools import combinations
import dask.array as dsa
import dask
from xcape.core import calc_cape
from xcape.core import calc_srh
from .fixtures import empty_dask_array, dataset_soundings, dataset_ERA5pressurelevel
import pytest
@pytest.fixture(scope='module')
def p_t_td_1d(nlevs=20):
p =
|
np.random.rand(nlevs)
|
numpy.random.rand
|
# -*- coding: utf-8 -*-
"""this module defines some common used utilities"""
import os
import re
import subprocess as sp
from collections import OrderedDict
from collections.abc import Iterable
from shutil import rmtree
from sys import stdout
import CifFile
import numpy as np
from mykit.core.constants import PI
def get_dirpath(filePath):
"""get the name of directory with filePath
Args:
filePath (str): the string of the path of file
Returns:
str: the absolute path of parent directory, if filepath represents a file,
otherwise the path of the directory
"""
_path = os.path.abspath(filePath)
if os.path.isdir(_path):
_path = _path + "/"
return os.path.dirname(_path)
def get_file_ext(filePath):
"""Return the extension name of filePath
If filePath is a existing directory, None will be returned
If the path have no characters after "." or have no ".",
an empty string will be returned.
Args:
filePath (str): the path of the file
"""
if os.path.isdir(filePath):
return None
base = os.path.basename(os.path.abspath(filePath))
return os.path.splitext(base)[1][1:]
def get_filename_wo_ext(filePath):
"""Get the filename without extension
Args:
filePath (str): the path of file
"""
fnExt = os.path.basename(os.path.abspath(filePath))
return os.path.splitext(fnExt)[0]
def get_cwd_name():
"""Get the name of current working directory
"""
return os.path.basename(os.getcwd())
def get_matched_files(dirPath=".", regex=None):
"""Get the abspath of the files whose name matches a regex
Only files will be returned, and directories are excluded.
Args:
dirPath (str): the directory to search
regex (regex): the regular expression to match the filename
Returns:
tuple of strings
"""
# check the exisitence of path
fns = []
_absDir = os.path.abspath(dirPath)
if os.path.isdir(_absDir):
for i in os.listdir(_absDir):
if regex != None:
if not re.match(regex, i):
continue
_fpath = os.path.join(_absDir, i)
if os.path.isfile(_fpath):
fns.append(_fpath)
return tuple(fns)
# def common_io_checkdir(dirname=None, create=True):
# '''
# check if dirname exists, or create it
# return: the full path of target directory
# '''
# dirname = dirname.strip()
# if (dirname is None or dirname.strip() == ""):
# dirname = os.getcwd()
# elif (not os.path.exists(dirname)) and create:
# os.mkdir(dirname.strip())
# return dirname
def io_cleandir(dirname=None):
'''check if dirname exists and is empty, or create it
return: the full path of target directory
'''
if (dirname is None or dirname.strip() == ""):
dirname = os.getcwd()
elif (not os.path.exists(dirname)):
os.mkdir(dirname)
elif (os.path.exists(dirname)):
rmtree(dirname)
os.mkdir(dirname)
return dirname
def trim_after(string, regex, include_pattern=False):
"""Trim a string after the first match of regex.
If fail to match any pattern, the original string is returned
The matched pattern is trimed as well.
Args:
string (str): the string to trim
regex (regex): the regex to match
include_pattern (bool): if the matched pattern is included
in the return string
"""
_search = re.search(regex, string)
if _search != None:
if include_pattern:
return string[: _search.end()]
return string[: _search.start()]
return string
def trim_before(string, regex, include_pattern=False):
"""Trim a string from the beginning to the first match of regex.
If fail to match any pattern, the original string is returned.
Args:
string (str): the string to trim
regex (regex): the regex to match
include_pattern (bool): if the matched pattern is included
in the return string
"""
_search = re.search(regex, string)
if _search != None:
if include_pattern:
return string[_search.start() :]
return string[_search.end() :]
return string
def trim_both_sides(string, regex_left, regex_right, include_pattern=False):
"""Trim a string from both sides.
Basically it first tries to match regex_left, trim the characters on the left
of the matched pattern, then match regex_right and trim the characters after.
Args:
regex_left (regex):
regex_right (regex):
include_pattern (bool): if the matched pattern is included
in the return string
"""
_trimed = trim_before(string, regex_left, include_pattern=include_pattern)
_trimed = trim_after(_trimed, regex_right, include_pattern=include_pattern)
return _trimed
def check_duplicates_in_tag_tuple(tagtuple):
"""Check if there is duplicate in a tag tuple, case sensitive
Args:
tagTuple (tuple) : the tag tuple to check
"""
_dup = -1
for _i, _k in enumerate(tagtuple):
if _k in tagtuple[:_i]:
_dup = _i
break
return _dup
# def data_normalization(data, scale=1.0, normByPeak=True):
# '''Normalize the 1D data.
# Args:
# data (iterable): the container of 1D data
# normByPeak (bool) : when set True, the normalization factor will be
# the peak absolute value. Otherwise, the sum of absolute values
# will be used as normalization factor.
# TODO:
# Generalize the normalization
# Returns:
# numpy array, the normalized data
# '''
# import numpy as np
# assert len(np.shape(data)) == 1
# assert isinstance(normByPeak, bool)
# _a = []
# try:
# _a = np.array(data, dtype="float64")
# except ValueError:
# raise ValueError("the data cannot be converted.")
# _sum = np.sum(np.abs(_a)) / scale
# _max = np.max(np.abs(_a)) / scale
# if normByPeak:
# return _a / _max
# return _a / _sum
# def find_data_extreme(data):
# '''Find the point at which the data reaches extrema
# TODO:
# Generalize to 2D and 3D coordinate
# Returns:
# dict, with two keys, "min" and "max".
# Either key has a 2-member tuple with its first the min/max value
# and second the coordinate where it reaches the extreme
# '''
# pass
def find_vol_dirs(path=".", vdPat=None):
"""Find names of directories corresponding to calculation with lattice of different volumes
Args:
path (str): the path to search directories within. Default is CWD.
vdPat (regex): the pattern of the names of volume directories
If not specified, use "V_x.xx" where x is 0-9
Returns:
list of strings
"""
pat = vdPat
if pat is None:
pat = r"^V_\d.\d+"
_dirs = []
for _d in os.listdir(path):
if re.match(pat, _d):
_dirs.append(_d)
def __sort_vol(dirstr):
return float(dirstr.split("_")[1])
if vdPat is None:
_dirs = sorted(_dirs, key=__sort_vol)
return _dirs
def conv_string(string, conv2, *indices, sep=None, strips=None):
"""
Split the string and convert substrings to a specified type.
Args:
string (str): the string from which to convert value
conv2: the type to which the substring will be converted
support ``str``, ``int``, ``float``, ``bool``
indices (int): if specified, the substring with indices in the splitted string lists will be converted.
otherwise, all substring will be converted.
sep (regex): the separators used to split the string.
strips (str): extra strings to strip for each substring before conversion
Returns:
``conv2``, or list of ``conv2`` type
"""
assert conv2 in [str, int, float, bool]
str_tmp = string.strip()
if sep is not None:
str_list = re.split(sep, str_tmp)
else:
str_list = str_tmp.split()
if strips is None:
str_list = [x.strip() for x in str_list]
else:
str_list = [x.strip(" " + strips) for x in str_list]
# need to convert to float first for converting to integer
if conv2 is int:
def convfunc(x):
return int(float(x))
elif conv2 is bool:
def convfunc(x):
return {
"TRUE": True,
"T": True,
".TRUE.": True,
".T.": True,
"FALSE": True,
"F": True,
".FALSE.": True,
".F.": False,
}.get(x.upper(), None)
else:
convfunc = conv2
if len(indices) == 0:
return list(map(convfunc, str_list))
elif len(indices) == 1:
return convfunc(str_list[indices[0]])
conv_strs = [str_list[i] for i in indices]
return list(map(convfunc, conv_strs))
def get_first_last_line(filePath, encoding=stdout.encoding):
"""Return the first and the last lines of file
The existence of filePath should be check beforehand.
Args:
filePath (str): the path of the file
encoding (str): the encoding of the file. Default stdout.encoding
Returns
two strings (unstripped)
"""
with open(filePath, "rb") as f:
first = f.readline() # Read the first line.
f.seek(-2, os.SEEK_END) # Jump to the second last byte.
while f.read(1) != b"\n": # Until EOL is found...
# ...jump back the read byte plus one more.
f.seek(-2, os.SEEK_CUR)
last = f.readline() # Read last line.
# encode string
return str(first, encoding), str(last, encoding)
def get_str_indices(container, string):
"""Return the indices of ``string`` in a list or tuple``container``
Args:
container (list or tuple): container of strings
string (str): the string to locate
Returns:
list
"""
assert isinstance(container, (list, tuple))
ind = []
for i, s in enumerate(container):
if string == s:
ind.append(i)
return ind
def get_str_indices_by_iden(container, iden=None):
"""Return the indices of identified strings in a list or tuple``container``.
The strings are identified by ``iden``, either a str, int, or a Iterable of these types.
If ``iden`` is int or corresponding Iterable, the value greater or equal to the
length of ``container`` will be ignored.
Args:
container (list or tuple): container of strings
iden (int, str, Iterable): the identifier for string to locate
Returns:
list, unique indices of identified strings
"""
ret = []
if iden is None:
return ret
l = len(container)
if isinstance(iden, int):
if iden < l:
ret.append(iden)
elif isinstance(iden, str):
ret.extend(get_str_indices(container, iden))
elif isinstance(iden, Iterable):
for i in iden:
if isinstance(i, int):
if i < l:
ret.append(i)
elif isinstance(i, str):
ret.extend(get_str_indices(container, i))
if ret != []:
return list(OrderedDict.fromkeys(ret).keys())
return ret
def run_cmd(cmd, fout=None, ferr=None):
"""Run the calculation command by threading a subprocess calling cmd
"""
shell = False
if isinstance(cmd, str):
shell = True
if fout is None:
ofile = sp.PIPE
else:
ofile = open(fout, 'w')
if ferr is None:
efile = sp.PIPE
else:
efile = open(ferr, 'w')
p = sp.Popen(cmd, stdout=ofile, stderr=efile, shell=shell)
p.wait()
if not fout is None:
ofile.close()
if not ferr is None:
efile.close()
def conv_estimate_number(s):
"""Convert a string representing a number with error to a float number.
Literally, string like '3.87(6)' will be converted to 3.876.
For now, estimate error in the parenthese is reserved.
Args:
s (str): number string
Retuns:
float
"""
return float(re.sub(r"[\(\)]", "", s))
def get_latt_vecs_from_latt_consts(a, b, c, alpha=90, beta=90, gamma=90):
"""Convert lattice constants to lattice vectors in right-hand system
Currently support orthormrhobic lattice only!!!
Args:
a, b, c (float): length of lattice vectors
alpha, beta, gamma (float): angles between lattice vectors in degree.
90 used as default.
"""
a = abs(a)
b = abs(b)
c = abs(c)
return [[a, 0, 0], [0, b, 0], [0, 0, c]]
def get_latt_consts_from_latt_vecs(latt):
"""Convert lattice vectors in right-hand system to lattice constants
Args:
latt (2d-array): lattice vectors, shape (3,3)
Returns:
6 floats, a, b, c, alpha, beta, gamma (in degree)
"""
try:
assert
|
np.shape(latt)
|
numpy.shape
|
import numpy as np
def measure_penalty_error(embeddings, centers, radii, edge_map, nodes, edges):
edge_count = embeddings.shape[0]
error = 0.0
for i in range(edge_count):
edge = edge_map[edges[i]]
n_u = edge[0]
n_v = edge[1]
n_u_ind = np.where(nodes == n_u)
n_v_ind = np.where(nodes == n_v)
X_uv = embeddings[i]
c_u = centers[n_u_ind]
c_v = centers[n_v_ind]
r_u = radii[n_u_ind][0][0]
r_v = radii[n_v_ind][0][0]
#print type(r_u)
if
|
np.linalg.norm(X_uv - c_u)
|
numpy.linalg.norm
|
import logging
from typing import List, Tuple, Union, Optional
from pathlib import Path
import numpy as np # type: ignore
import pandas as pd # type: ignore
from numpy.random import RandomState
from blender.core import Galaxy, Blend, Stamp
from blender.segmap import normalize_segmap
from blender.segmap import mask_out_pixels
from blender.visualisation import asin_stretch_norm
PathType = Union[Path, str]
class BlendShiftError(Exception):
pass
class BlendMissingTestError(Exception):
pass
class Blender:
img_dtype = np.float32
seg_dtype = np.uint8
def __init__(self, imgpath: PathType, segpath: PathType, catpath: PathType,
train_test_ratio: float = 0.2,
magdiff: int = 2, raddiff: int = 4, seed: int = 42) -> None:
self.data = np.load(imgpath).astype(self.img_dtype, copy=False)
self.seg = np.load(segpath).astype(self.seg_dtype, copy=False)
self.cat = pd.read_csv(catpath)
self.tt_ratio = np.clip(train_test_ratio, 0, 1)
self.magdiff = magdiff
self.raddiff = raddiff
self.rng = RandomState(seed=seed)
self.img_size = self.data.shape[-1]
self.assign_train_test()
@property
def n_gal(self) -> int:
return len(self.data)
def assign_train_test(self) -> None:
randomized_indices = self.rng.permutation(self.n_gal)
split_idx = int(self.n_gal * self.tt_ratio)
test, train =
|
np.split(randomized_indices, [split_idx])
|
numpy.split
|
# -*- coding: utf-8 -*-
"""Genetic_algorithms_mk5.ipynb
# Genetic algorithms - example
## Finding maximum of a function:
$$
f(x,y)=sin(\omega x)^2 cos(\omega y)^2 e^{\frac{x+y}{\sigma}}
$$
## Visualization via countour lines
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import animation, rc
from IPython.display import HTML
# parameters of the SearchSpace
class SearchSpace:
dim = 2
# fitness function parameters
w = 1
# this controls how many peaks you want in the search space
num_peaks_sqrt = 4 #@param {type : 'integer'}
sigma = 10 + 2.5*(num_peaks_sqrt - 4)
num_peaks = num_peaks_sqrt**2
n_per_peak = 25
with_moat = False
# search space parameters
start = 0
stop = num_peaks_sqrt*np.pi
N = num_peaks_sqrt * n_per_peak
f_normal = lambda x,y : np.power(np.sin(SearchSpace.w*x) * np.sin(SearchSpace.w*y),2) * np.exp((x+y)/SearchSpace.sigma)
@classmethod
def f_moat(cls, xv,yv):
''' Alternate fitness function with a "moat". '''
if xv.shape != yv.shape:
print("Warning! Different shapes of x and y.")
s = cls.stop
nps = cls.num_peaks_sqrt
z = np.empty(xv.shape, dtype = float)
if (len(xv.shape) < 2):
nx = xv.shape[0]
for i in range(nx):
x,y = xv[i], yv[i]
z[i] = cls.f_normal(x,y) if ((x < s/nps) or (y < s/nps) or ((x > (1-1/nps)*s) and (y > (1-1/nps)*s))) else 0
else:
ny,nx = xv.shape
for i in range(nx):
for j in range(ny):
x,y = xv[i,j], yv[i,j]
z[i,j] = cls.f_normal(x,y) if ((x < s/nps) or (y < s/nps) or ((x > (1-1/nps)*s) and (y > (1-1/nps)*s))) else 0
return z
@classmethod
def plot2d(cls, ax):
''' Plot 2d search space. '''
# making the hill
x = np.linspace(SearchSpace.start, SearchSpace.stop, num=SearchSpace.N)
xv, yv = np.meshgrid(x, x)
f = cls.f_moat if cls.with_moat else cls.f_normal
z = f(xv,yv) # (np.sin(w*x)**2) * (np.sin(w*y)**2) * np.exp((x+y)/sigma)
_ = ax.contour(x.reshape(-1), x.reshape(-1), z, levels=12, linewidths=0.5, colors='k', extend='both')
_ = ax.contourf(x.reshape(-1), x.reshape(-1), z, levels=12, cmap='PuBu', extend='both')
pass
SearchSpace.with_moat = False #@param {type:"boolean"}
fig, ax = plt.subplots()
ax.set_aspect('equal')
SearchSpace.plot2d(ax)
fig.show()
"""## Chromosome"""
class Chromosome:
num_genes = SearchSpace.dim
genes_lower_bound = SearchSpace.start * np.ones(num_genes)
genes_upper_bound = SearchSpace.stop * np.ones(num_genes)
p_mutation = 0.1
p_crossover = 0.75
mutation_step = 3
hash_precision = 4
@classmethod
def plot(cls, c, ax):
''' Plot first 2 genes of a chromosome as xy coordinates. '''
ax.plot(c[0], c[1], 'ko', ms=3)
@classmethod
def prehash(cls, c):
''' Make a string from chromosome that can be used for hashing. '''
c = np.around(10**Chromosome.hash_precision*c, decimals=0)
s = ""
for g in c[:Chromosome.num_genes]:
s += str(g)[:-2]
return s
"""## Defining the population and genetic operators"""
class Population:
default_cap = 100
start = SearchSpace.start
stop = SearchSpace.stop/SearchSpace.num_peaks_sqrt
p_selection_in_tournament = 0
# new random population
def __init__(self, cap=None):
if cap is None:
cap = Population.default_cap
self.cap = cap
self.max_size = 2*cap**2
self.default_tournament_size = cap
self.hash = set()
self.gen = np.empty((self.max_size, Chromosome.num_genes+1), dtype=float)
self.last = 0
while self.last < self.cap:
self.add(Population.start+(Population.stop - Population.start)*np.random.rand(Chromosome.num_genes))
def size(self):
return self.last
def __len__(self):
return self.last
def add(self, c):
if self.last == self.max_size:
print("ERROR. Adding to a full generation matrix")
return
if (c[0] < Population.start) or (c[1] < Population.start):
print("SOMETHING IS VERY WRONG")
s = Chromosome.prehash(c)
if not s in self.hash:
self.gen[self.last, :Chromosome.num_genes] = c
self.last += 1
self.hash.add(s)
def __str__(self):
s = ""
for c in self.gen:
s += str(c) + '\n'
return s
def plot(self, ax):
for c in self.gen[:self.last, :2]:
Chromosome.plot(c, ax)
pass
# monogenic mutation
def mutate(self):
old_size = self.size()
for i in range(old_size):
if np.random.rand() < Chromosome.p_mutation:
c = np.copy(self.gen[i,:Chromosome.num_genes])
gene_idx = np.random.randint(0, Chromosome.num_genes)
# step = (2*np.random.rand()-1)*Chromosome.mutation_step
step = np.sign(np.random.rand() - 0.5)*np.random.rand()*Chromosome.mutation_step
c[gene_idx] += step
if c[gene_idx] < SearchSpace.start:
c[gene_idx] -= 2*step
if c[gene_idx] > SearchSpace.stop:
c[gene_idx] -= 2*step
self.add(c)
def crossover(self):
old_size = self.last
for i in range(old_size):
for j in range(i+1, old_size):
if (np.random.rand() < Chromosome.p_crossover):
c_idx = np.random.randint(1,Chromosome.num_genes)
c1 = np.copy(self.gen[i,:Chromosome.num_genes])
c1[c_idx:] = self.gen[j,c_idx:Chromosome.num_genes]
c2 = np.copy(self.gen[j,:Chromosome.num_genes])
c2[c_idx:] = self.gen[i,c_idx:Chromosome.num_genes]
self.add(c1)
self.add(c2)
def calc_fitness(self):
fitness = SearchSpace.f_moat if SearchSpace.with_moat else SearchSpace.f_normal
self.gen[:self.last, 2] = fitness(self.gen[:self.last, 0], self.gen[:self.last,1])
def sort(self):
# fitness based sort
idx = self.gen[:self.last, 2].argsort()[::-1]
self.gen[:self.last] = self.gen[idx]
def preselect(self):
self.calc_fitness()
self.sort()
def truncate(self, start=None):
''' Simple truncation '''
if start is None:
start = self.cap
if start != self.cap:
print("Warning! Number of selected chromosomes is different from popcap.")
return
# remove elements from hashset
for c in self.gen[start:self.last, :Chromosome.num_genes]:
s = Chromosome.prehash(c)
self.hash.remove(s)
# move buffer tail to popcap
self.last = self.cap
# TODO expand docstring
def rearange(self, mask):
''' Rearange the chromosomes in generation matrix according to mask such that:
the first part of the generation matrix contains chromosomes with values True in the mask,
and the second part contains the ones with the values False.'''
N = np.count_nonzero(mask)
if N != self.cap:
print("Warning! Number of selected chromosomes is different from popcap.")
aux_mask = np.zeros(self.max_size, dtype=bool)
aux_mask[:self.last] = True
self.gen[:N], self.gen[N:self.last] = self.gen[mask], self.gen[mask!=aux_mask]
return N
def keep(self, mask):
''' Given a boolean mask that represents which chromosomes to keep,
first rearange the generation matrix and then truncate the leftover chromosomes. '''
N = self.rearange(mask)
self.truncate(N)
def truncation_select(self):
''' Keeps the individuals with best fitness. '''
if self.size() > self.cap:
self.preselect()
self.truncate()
def fps_select(self):
''' Fitness Proportionate Selection, aka "Roulette wheel" selection. '''
if self.size() > self.cap:
self.preselect()
f = np.copy(self.gen[:self.last, Chromosome.num_genes]) # first column after chromosome values is fitness
f = f / np.sum(f)
for i in range(1,self.last):
f[i] += f[i-1]
mask = np.zeros(self.max_size, dtype=bool)
n_selected = 0
while n_selected != self.cap:
rnd = np.random.rand()
idx =
|
np.searchsorted(f, rnd)
|
numpy.searchsorted
|
# -*- coding: utf-8 -*-
"""
@brief test log(time=3s)
"""
import unittest
import warnings
import io
import pickle
from logging import getLogger
import numpy
from scipy.special import expit # pylint: disable=E0611
from sklearn.base import ClassifierMixin, BaseEstimator
from sklearn.linear_model import LogisticRegression
from pyquickhelper.pycode import ExtTestCase, ignore_warnings
from skl2onnx import update_registered_converter
from skl2onnx.algebra.onnx_ops import ( # pylint: disable=E0611
OnnxIdentity, OnnxMatMul, OnnxAdd, OnnxSigmoid, OnnxArgMax)
from skl2onnx.common.data_types import guess_numpy_type, Int64TensorType
from mlprodict.onnx_conv import to_onnx
from mlprodict.onnxrt import OnnxInference
from mlprodict.npy import onnxsklearn_classifier, onnxsklearn_class
import mlprodict.npy.numpy_onnx_impl as nxnp
class CustomLinearClassifier(ClassifierMixin, BaseEstimator):
def __init__(self):
BaseEstimator.__init__(self)
ClassifierMixin.__init__(self)
def fit(self, X, y=None, sample_weights=None):
lr = LogisticRegression().fit(X, y, sample_weights)
self.classes_ = lr.classes_ # pylint: disable=W0201
self.coef_ = lr.coef_ # pylint: disable=W0201
self.intercept_ = lr.intercept_ # pylint: disable=W0201
if len(y.shape) == 1 or y.shape[1] == 1:
# binary class
self.coef_ = numpy.vstack( # pylint: disable=W0201
[-self.coef_, self.coef_]) # pylint: disable=E1130
self.intercept_ = numpy.vstack( # pylint: disable=W0201
[-self.intercept_, self.intercept_]).T # pylint: disable=E1130
return self
def predict_proba(self, X):
return expit(X @ self.coef_ + self.intercept_)
def predict(self, X):
prob = self.predict_proba(X)
return numpy.argmax(prob, axis=1)
def custom_linear_classifier_shape_calculator(operator):
op = operator.raw_operator
input_type = operator.inputs[0].type.__class__
input_dim = operator.inputs[0].type.shape[0]
lab_type = Int64TensorType([input_dim])
prob_type = input_type([input_dim, op.coef_.shape[-1]])
operator.outputs[0].type = lab_type
operator.outputs[1].type = prob_type
def custom_linear_classifier_converter(scope, operator, container):
op = operator.raw_operator
opv = container.target_opset
out = operator.outputs
X = operator.inputs[0]
dtype = guess_numpy_type(X.type)
raw = OnnxAdd(
OnnxMatMul(X, op.coef_.astype(dtype), op_version=opv),
op.intercept_.astype(dtype), op_version=opv)
prob = OnnxSigmoid(raw, op_version=opv)
label = OnnxArgMax(prob, axis=1, op_version=opv)
Yl = OnnxIdentity(label, op_version=opv, output_names=out[:1])
Yp = OnnxIdentity(prob, op_version=opv, output_names=out[1:])
Yl.add_to(scope, container)
Yp.add_to(scope, container)
class CustomLinearClassifier3(CustomLinearClassifier):
pass
@onnxsklearn_classifier(register_class=CustomLinearClassifier3)
def custom_linear_classifier_converter3(X, op_=None):
if X.dtype is None:
raise AssertionError("X.dtype cannot be None.")
if isinstance(X, numpy.ndarray):
raise TypeError("Unexpected type %r." % X)
if op_ is None:
raise AssertionError("op_ cannot be None.")
coef = op_.coef_.astype(X.dtype)
intercept = op_.intercept_.astype(X.dtype)
prob = nxnp.expit((X @ coef) + intercept)
label = nxnp.argmax(prob, axis=1)
return nxnp.xtuple(label, prob)
@onnxsklearn_class("onnx_predict")
class CustomLinearClassifierOnnx(ClassifierMixin, BaseEstimator):
def __init__(self):
BaseEstimator.__init__(self)
ClassifierMixin.__init__(self)
def fit(self, X, y=None, sample_weights=None):
lr = LogisticRegression().fit(X, y, sample_weights)
self.classes_ = lr.classes_ # pylint: disable=W0201
self.coef_ = lr.coef_ # pylint: disable=W0201
self.intercept_ = lr.intercept_ # pylint: disable=W0201
if len(y.shape) == 1 or y.shape[1] == 1:
# binary class
self.coef_ = numpy.vstack( # pylint: disable=W0201
[-self.coef_, self.coef_]) # pylint: disable=E1130
self.intercept_ = numpy.vstack( # pylint: disable=W0201
[-self.intercept_, self.intercept_]).T # pylint: disable=E1130
return self
def onnx_predict(self, X):
if X.dtype is None:
raise AssertionError("X.dtype cannot be None.")
if isinstance(X, numpy.ndarray):
raise TypeError("Unexpected type %r." % X)
coef = self.coef_.astype(X.dtype)
intercept = self.intercept_.astype(X.dtype)
prob = nxnp.expit((X @ coef) + intercept)
label = nxnp.argmax(prob, axis=1)
return nxnp.xtuple(label, prob)
class TestCustomClassifier(ExtTestCase):
def setUp(self):
logger = getLogger('skl2onnx')
logger.disabled = True
with warnings.catch_warnings():
warnings.simplefilter("ignore", ResourceWarning)
update_registered_converter(
CustomLinearClassifier, "SklearnCustomLinearClassifier",
custom_linear_classifier_shape_calculator,
custom_linear_classifier_converter,
options={'zipmap': [False, True, 'columns'],
'nocl': [True, False]})
@ignore_warnings((DeprecationWarning, RuntimeWarning))
def test_function_classifier(self):
X = numpy.random.randn(20, 2).astype(numpy.float32)
y = ((X.sum(axis=1) + numpy.random.randn(
X.shape[0]).astype(numpy.float32)) >= 0).astype(numpy.int64)
dec = CustomLinearClassifier()
dec.fit(X, y)
onx = to_onnx(dec, X.astype(numpy.float32),
options={id(dec): {'zipmap': False}})
oinf = OnnxInference(onx)
exp = dec.predict(X)
prob = dec.predict_proba(X)
got = oinf.run({'X': X})
self.assertEqualArray(exp, got['label'].ravel())
self.assertEqualArray(prob, got['probabilities'])
@ignore_warnings((DeprecationWarning, RuntimeWarning))
def test_function_classifier3_float32(self):
X = numpy.random.randn(20, 2).astype(numpy.float32)
y = ((X.sum(axis=1) + numpy.random.randn(
X.shape[0]).astype(numpy.float32)) >= 0).astype(numpy.int64)
dec = CustomLinearClassifier3()
dec.fit(X, y)
onx = to_onnx(dec, X.astype(numpy.float32),
options={id(dec): {'zipmap': False}})
oinf = OnnxInference(onx)
exp = dec.predict(X)
prob = dec.predict_proba(X) # pylint: disable=W0612
got = oinf.run({'X': X})
self.assertEqualArray(exp, got['label'])
self.assertEqualArray(prob, got['probabilities'])
X2, P2 = custom_linear_classifier_converter3( # pylint: disable=E0633
X, op_=dec)
self.assertEqualArray(X2, got['label'])
self.assertEqualArray(P2, got['probabilities'])
@ignore_warnings((DeprecationWarning, RuntimeWarning))
def test_function_classifier3_float64(self):
X = numpy.random.randn(20, 2).astype(numpy.float64)
y = ((X.sum(axis=1) + numpy.random.randn(
X.shape[0]).astype(numpy.float32)) >= 0).astype(numpy.int64)
dec = CustomLinearClassifier3()
dec.fit(X, y)
onx = to_onnx(dec, X.astype(numpy.float64),
options={id(dec): {'zipmap': False}})
oinf = OnnxInference(onx)
exp = dec.predict(X)
prob = dec.predict_proba(X)
got = oinf.run({'X': X})
self.assertEqualArray(exp, got['label'])
self.assertEqualArray(prob, got['probabilities'])
X2, P2 = custom_linear_classifier_converter3( # pylint: disable=E0633
X, op_=dec)
self.assertEqualArray(X2, got['label'])
self.assertEqualArray(P2, got['probabilities'])
@ignore_warnings((DeprecationWarning, RuntimeWarning))
def test_function_classifier_onnx_float32(self):
X = numpy.random.randn(20, 2).astype(numpy.float32)
y = ((X.sum(axis=1) + numpy.random.randn(
X.shape[0]).astype(numpy.float32)) >= 0).astype(numpy.int64)
dec = CustomLinearClassifierOnnx()
dec.fit(X, y)
res = dec.onnx_predict_(X) # pylint: disable=E1101
self.assertNotEmpty(res)
exp1 = dec.predict(X) # pylint: disable=E1101
prob1 = dec.predict_proba(X) # pylint: disable=E1101
onx = to_onnx(dec, X.astype(numpy.float32),
options={id(dec): {'zipmap': False}})
oinf = OnnxInference(onx)
exp2 = dec.predict(X) # pylint: disable=E1101
prob2 = dec.predict_proba(X) # pylint: disable=E1101
got = oinf.run({'X': X})
self.assertEqualArray(prob1, res[1])
self.assertEqualArray(prob1, got['probabilities'])
self.assertEqualArray(prob2, got['probabilities'])
self.assertEqualArray(exp1, res[0])
self.assertEqualArray(exp1, got['label'])
self.assertEqualArray(exp2, got['label'])
@ignore_warnings((DeprecationWarning, RuntimeWarning))
def test_function_classifier_onnx_float64(self):
X =
|
numpy.random.randn(20, 2)
|
numpy.random.randn
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 27 17:53:29 2020
@author: <NAME>
See LICENSE for details.
Sample code to simulate a simple pendulum in Python
--------------
Explanation
--------------
1) Import libraries needed for the simulation.
2) Define custom functions.
3) Execute main script.
"""
from matplotlib import pyplot as plt
from matplotlib import animation as animation
import numpy as np
from scipy import integrate
from celluloid import Camera
def system_dynamics(t, x, params,):
"""
Parameters
----------
x0 : State vector
t : Current time step
params : Simulation parameters
Returns
-------
dx : State vector dynamics for time step integration
"""
# Extract state variables and parameters
# Python starts counting with 0 like any sane programming language!
x1, x2, = x
# Params is a dictionary with key and value pairs
m = params['mass']
g = params['gravity']
l = params['length']
k = params['friction']
# Solve system dynamics for the current time step
dot_x1 = x2
dot_x2 = - (g/l) *
|
np.sin(x1)
|
numpy.sin
|
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class BaseModel:
def get_weight(self):
weight = []
for param in self.parameters():
weight.append(param.data.numpy().flatten())
weight = np.concatenate(weight, 0)
return weight
def set_weight(self, solution):
offset = 0
for param in self.parameters():
param_shape = param.data.numpy().shape
param_size =
|
np.prod(param_shape)
|
numpy.prod
|
import numpy as np
def scale_and_transform_points(points):
x = points[0]
y = points[1]
center = points.mean(axis=1)
cx = x - center[0]
cy = y - center[1]
distance = np.sqrt(np.power(cx, 2) + np.power(cy, 2))
scale = np.sqrt(2) / distance.mean()
norm3d = np.array([
[scale, 0, -scale*center[0]], #x
[0, scale, -scale*center[1]], #y
[0, 0, 1]]) #z
return np.dot(norm3d, points), norm3d
def correspondence_matrix(p1, p2):
p1x, p1y = p1[:2]
p2x, p2y = p2[:2]
return np.array([
p1x * p2x, p1x * p2y, p1x,
p1y * p2x, p1y * p2y, p1y,
p2x, p2y, np.ones(len(p1x))
]).T
return np.array([
p2x * p1x, p2x * p1y, p2x,
p2y * p1x, p2y * p1y, p2y,
p1x, p1y, np.ones(len(p1x))
]).T
def compute_img_to_img_matrix(x1, x2, compute_essential=False):
A = correspondence_matrix(x1, x2)
U, S, V = np.linalg.svd(A)
F = V[-1].reshape(3, 3)
U, S, V = np.linalg.svd(F)
S[-1] = 0
if compute_essential:
S = [1, 1, 0] # Force rank 2 and equal eigenvalues
F = np.dot(U, np.dot(np.diag(S), V))
return F
def compute_essential_normalized_matrix(p1, p2, compute_essential=False):
if p1.shape != p2.shape:
raise ValueError("Numbers of p1 and p2 don´t match !")
# preprocess img coords
p1n, T1 = scale_and_transform_points(p1)
p2n, T2 = scale_and_transform_points(p2)
# compute F
F = compute_img_to_img_matrix(p1n, p2n, compute_essential)
F = np.dot(T1.T, np.dot(F, T2))
F = F / F[2, 2]
return F
def compute_essential_normalized(p1, p2):
return compute_essential_normalized_matrix(p1, p2, compute_essential=True)
def compute_P_from_essential(E):
U, S, V =
|
np.linalg.svd(E)
|
numpy.linalg.svd
|
'''
Created on 17.03.2014
@author: afedynitch
'''
import numpy as np
from impy.common import MCRun, MCEvent, impy_config
from impy.util import standard_particles, info
class SibyllEvent(MCEvent):
"""Wrapper class around SIBYLL 2.1 & 2.3 particle stack."""
# Workaround for no data on vertext positions in SIBYLL
_no_vertex_data = None
def __init__(self, lib, event_kinematics, event_frame):
# HEPEVT (style) common block
evt = lib.hepevt
# Save selector for implementation of on-demand properties
px, py, pz, en, m = evt.phep
vx, vy, vz, vt = evt.vhep
MCEvent.__init__(self,
lib=lib,
event_kinematics=event_kinematics,
event_frame=event_frame,
nevent=evt.nevhep,
npart=evt.nhep,
p_ids=evt.idhep,
status=evt.isthep,
px=px,
py=py,
pz=pz,
en=en,
m=m,
vx=vx,
vy=vy,
vz=vz,
vt=vt,
pem_arr=evt.phep,
vt_arr=evt.vhep)
def filter_final_state(self):
self.selection = np.where(self.status == 1)
self._apply_slicing()
def filter_final_state_charged(self):
self.selection = np.where((self.status == 1) & (self.charge != 0))
self._apply_slicing()
@property
def charge(self):
return self.lib.schg.ichg[self.selection]
@property
def parents(self):
"""In SIBYLL parents are difficult to obtain. This function returns 0."""
MCEvent.parents(self)
return self.lib.hepevt.jmohep
@property
def children(self):
"""In SIBYLL daughters are difficult to obtain. This function returns 0."""
MCEvent.children(self)
return self.lib.hepevt.jdahep
# Nuclear collision parameters
@property
def impact_parameter(self):
"""Impact parameter for nuclear collisions."""
return self.lib.cnucms.b
@property
def n_wounded_A(self):
"""Number of wounded nucleons side A"""
return self.lib.cnucms.na
@property
def n_wounded_B(self):
"""Number of wounded nucleons side B"""
return self.lib.cnucms.nb
@property
def n_NN_interactions(self):
"""Number of inelastic nucleon-nucleon interactions"""
return self.lib.cnucms.ni
class SIBYLLRun(MCRun):
"""Implements all abstract attributes of MCRun for the
SIBYLL 2.1, 2.3 and 2.3c event generators."""
def sigma_inel(self, *args, **kwargs):
"""Inelastic cross section according to current
event setup (energy, projectile, target)"""
k = self._curr_event_kin
sigproj = None
if abs(k.p1pdg) in [2212, 2112, 3112]:
sigproj = 1
elif abs(k.p1pdg) == 211:
sigproj = 2
elif abs(k.p1pdg) == 321:
sigproj = 3
else:
info(0, "No cross section available for projectile", k.p1pdg)
raise Exception('Input error')
if k.p1_is_nucleus:
raise Exception('Nuclear projectiles not supported by SIBYLL.')
if k.p2_is_nucleus:
# Return production cross section for nuclear target
try:
return self.lib.sib_sigma_hnuc(sigproj, k.A2, self._ecm)[0]
except AttributeError:
return 'Nuclear cross section not supported for this SIBYLL version'
return self.lib.sib_sigma_hp(sigproj, self._ecm)[2]
def sigma_inel_air(self):
"""Inelastic cross section according to current
event setup (energy, projectile, target)"""
k = self._curr_event_kin
sigproj = None
if abs(k.p1pdg) in [2212, 2112, 3112]:
sigproj = 1
elif abs(k.p1pdg) == 211:
sigproj = 2
elif abs(k.p1pdg) == 321:
sigproj = 3
else:
info(0, "No cross section available for projectile", k.p1pdg)
raise Exception('Input error')
sigma = self.lib.sib_sigma_hair(sigproj, self._ecm)
if not isinstance(sigma, tuple):
return sigma
else:
return sigma[0]
def set_event_kinematics(self, event_kinematics):
"""Set new combination of energy, momentum, projectile
and target combination for next event."""
info(5, 'Setting event kinematics.')
info(10, event_kinematics)
k = event_kinematics
if k.p1_is_nucleus:
raise Exception('Projectile nuclei not natively supported in SIBYLL')
elif k.p2_is_nucleus and k.A2 > 20:
print(k.p2_is_nucleus, k.A2)
raise Exception('Target nuclei with A>20 not supported in SIBYLL')
self._sibproj = self.lib.isib_pdg2pid(k.p1pdg)
self._iatarg = k.A2
self._ecm = k.ecm
self._curr_event_kin = event_kinematics
def attach_log(self, fname=None):
"""Routes the output to a file or the stdout."""
fname = impy_config['output_log'] if fname is None else fname
if fname == 'stdout':
self.lib.s_debug.lun = 6
info(5, 'Output is routed to stdout.')
else:
lun = self._attach_fortran_logfile(fname)
self.lib.s_debug.lun = lun
info(5, 'Output is routed to', fname, 'via LUN', lun)
def init_generator(self, event_kinematics, seed='random', logfname=None):
from random import randint
self._abort_if_already_initialized()
if seed == 'random':
seed = randint(1000000, 10000000)
else:
seed = int(seed)
info(5, 'Using seed:', seed)
self.lib.s_debug.ndebug = impy_config['sibyll']['debug_level']
self.set_event_kinematics(event_kinematics)
self.attach_log(fname=logfname)
self.lib.sibini(int(seed))
self.lib.pdg_ini()
self.conv_hepevt = (self.lib.sibhep1
if '21' in self.lib.__name__ else self.lib.sibhep3)
self._define_default_fs_particles()
def set_stable(self, pdgid, stable=True):
sid = abs(self.lib.isib_pdg2pid(pdgid))
if abs(pdgid) == 311:
info(1, 'Ignores K0. Use K0L/S 130/310 in final state definition.')
return
idb = self.lib.s_csydec.idb
if sid == 0 or sid > idb.size - 1:
return
if stable:
info(
5, 'defining as stable particle pdgid/sid = {0}/{1}'.format(
pdgid, sid))
idb[sid - 1] = -np.abs(idb[sid - 1])
else:
info(5, 'pdgid/sid = {0}/{1} allowed to decay'.format(pdgid, sid))
idb[sid - 1] =
|
np.abs(idb[sid - 1])
|
numpy.abs
|
#!/usr/bin/env python
import os
import sys
import csv
import numpy as np
from math import factorial
from PyQt5 import QtGui, QtCore
from PyQt5.QtWidgets import QMainWindow, QApplication, QGraphicsView, QGraphicsScene, QWidget, QHBoxLayout, QVBoxLayout, QToolBar, QPushButton, QCheckBox, QStatusBar, QLabel, QLineEdit, QPlainTextEdit, QTextEdit, QGridLayout, QFileDialog, QGraphicsLineItem, QGraphicsEllipseItem, QGraphicsPolygonItem, QGraphicsItem, QMessageBox, QInputDialog, QDockWidget, QSizePolicy, QDesktopWidget, QShortcut
from PyQt5.QtWebEngineWidgets import QWebEngineView
def comb(n, k):
return factorial(n) / factorial(k) / factorial(n - k)
#To-do list (descending priority)
# -combine UI into one window (done)
# -scale bar
# -show values as measurements being made
# -mouseover xy position
# -preferences change in options
# -tune bezier curve tension parameter (rational bezier) with scroll wheel
# -photo saved not working correctly?
# -angle lines different color?
# -arrows w/ heads for angle measurement
# -arc between angle lines
# -object outline: fusiform
# def main(args=None):
class Manual(QWidget):
def __init__(self, parent=None):
super(Manual, self).__init__()
self.manual = QWebEngineView()
webpage = QtCore.QUrl('https://wingtorres.github.io/morphometrix/')
self.manual.setUrl(webpage)
self.grid = QGridLayout()
self.grid.addWidget(self.manual,1,0)
self.setLayout(self.grid)
class Window(QWidget):
def __init__(self, parent=None):
#init methods runs every time, use for core app stuff)
super(Window, self).__init__()
#self.setWindowTitle("MorphoMetriX")
#self.setGeometry(50, 50, 100, 200) #x,y,width,height
#self.setStyleSheet("background-color: rgb(0,0,0)") #change color
#self.setStyleSheet("font-color: rgb(0,0,0)") #change color
self.label_id = QLabel("Image ID")
self.id = QLineEdit()
self.id.setText('0000')
#Define custom attributes for pixel -> SI conversion
self.label_foc = QLabel("Focal Length (mm):")
self.focal = QLineEdit()
self.focal.setText('50')
self.label_alt = QLabel("Altitude (m):")
self.altitude = QLineEdit()
self.altitude.setText('50')
self.label_pd = QLabel("Pixel Dimension (mm/pixel)")
self.pixeldim = QLineEdit()
self.pixeldim.setText('0.00391667')
self.label_widths = QLabel("# Width Segments:")
self.numwidths = QLineEdit()
self.numwidths.setText('10')
self.label_not = QLabel("Notes:")
self.notes = QPlainTextEdit()
# self.manual = QWebEngineView()
#fpath = os.path.abspath('/Users/WalterTorres/Dropbox/KC_WT/MorphoMetrix/morphometrix/README.html')
#webpage = QtCore.QUrl.fromLocalFile(fpath)
# webpage = QtCore.QUrl('https://wingtorres.github.io/morphometrix/')
# self.manual.setUrl(webpage)
self.exit = QPushButton("Exit", self)
self.exit.clicked.connect(self.close_application)
self.grid = QGridLayout()
self.grid.addWidget(self.label_id, 1, 0)
self.grid.addWidget(self.id, 1, 1)
self.grid.addWidget(self.label_foc, 2, 0)
self.grid.addWidget(self.focal, 2, 1)
self.grid.addWidget(self.label_alt, 3, 0)
self.grid.addWidget(self.altitude, 3, 1)
self.grid.addWidget(self.label_pd, 4, 0)
self.grid.addWidget(self.pixeldim, 4, 1)
self.grid.addWidget(self.label_widths, 5, 0)
self.grid.addWidget(self.numwidths, 5, 1)
self.grid.addWidget(self.label_not, 6, 0)
self.grid.addWidget(self.notes, 6, 1)
# self.grid.addWidget(self.manual, 8,0,1,4)
self.grid.addWidget(self.exit, 7, 3)
self.setLayout(self.grid)
def close_application(self):
choice = QMessageBox.question(self, 'exit', "Exit program?",
QMessageBox.Yes | QMessageBox.No)
if choice == QMessageBox.Yes:
self.parent().deleteLater()
self.parent().close()
else:
pass
#references:
#https://stackoverflow.com/questions/26901540/arc-in-qgraphicsscene/26903599#26903599
#https://stackoverflow.com/questions/27109629/how-can-i-resize-the-main-window-depending-on-screen-resolution-using-pyqt
class MainWindow(QMainWindow):
def __init__(self, parent = None):
#super(Window, self).__init__()
super(MainWindow, self).__init__()
D = QDesktopWidget()
# center = D.availableGeometry().center()
self.move(0,0)#center.x() + .25*D.width() , center.y() - .5*D.height() )
self.resize(.95*D.width(),.6*D.height())
self.setWindowState(self.windowState() & ~QtCore.Qt.WindowMinimized
| QtCore.Qt.WindowActive)
self.activateWindow()
self.subWin = Window()
self.iw = imwin()
self.Manual = Manual()
self.setCentralWidget(self.iw)
#Stacked dock widgets
docked1 = QDockWidget("", self)
docked2 = QDockWidget("", self)
self.addDockWidget(QtCore.Qt.LeftDockWidgetArea, docked1)
self.addDockWidget(QtCore.Qt.LeftDockWidgetArea, docked2)
docked1.setWidget(self.subWin)
docked2.setWidget(self.Manual)
docked1.setFeatures(QDockWidget.DockWidgetFloatable)
self.setCorner(QtCore.Qt.TopLeftCorner, QtCore.Qt.LeftDockWidgetArea);
self.setCorner(QtCore.Qt.TopRightCorner, QtCore.Qt.RightDockWidgetArea)
self.setCorner(QtCore.Qt.BottomLeftCorner, QtCore.Qt.LeftDockWidgetArea);
self.setCorner(QtCore.Qt.BottomRightCorner, QtCore.Qt.RightDockWidgetArea)
self.resizeDocks( (docked1,docked2), (400,400), QtCore.Qt.Horizontal )
self.exportButton = QPushButton("Export Measurements", self)
self.exportButton.clicked.connect(self.export_measurements)
self.exportButton.setEnabled(False)
self.importImage = QPushButton("New Image", self)
self.importImage.clicked.connect(self.file_open)
self.lengthButton = QPushButton("Measure Length", self)
self.lengthButton.clicked.connect(self.measure_length)
self.lengthButton.setEnabled(False)
self.lengthButton.setCheckable(True)
self.lengthNames = []
self.widthsButton = QPushButton("Measure Widths", self)
self.widthsButton.clicked.connect(self.iw.measure_widths)
self.widthsButton.setEnabled(False)
self.widthsButton.setCheckable(True)
self.areaButton = QPushButton("Measure Area", self)
self.areaButton.clicked.connect(self.measure_area)
self.areaButton.setEnabled(False)
self.areaButton.setCheckable(True)
self.areaNames = []
self.angleButton = QPushButton("Measure Angle", self)
self.angleButton.clicked.connect(self.measure_angle)
self.angleButton.setEnabled(False)
self.angleButton.setCheckable(True)
self.angleNames = []
shortcut_polyClose = QShortcut(QtGui.QKeySequence(QtCore.Qt.Key_Tab), self)
shortcut_polyClose.activated.connect(self.iw.polyClose)
self.undoButton = QPushButton("Undo", self)
self.undoButton.clicked.connect(self.undo)
self.undoButton.setEnabled(False)
shortcut_undo = QShortcut(QtGui.QKeySequence('Ctrl+Z'), self)
shortcut_undo.activated.connect(self.undo)
self.bezier = QCheckBox("Bezier fit", self)
self.bezier.setEnabled(False)
self.bezier.setChecked(True)
self.statusbar = self.statusBar()
self.statusbar.showMessage('Select new image to begin')
self.tb = QToolBar('Toolbar')
#self.addToolBar(QtCore.Qt.RightToolBarArea,self.tb)
spacer = QWidget(self)
spacer.setSizePolicy(QSizePolicy.Expanding,QSizePolicy.Expanding)
self.tb.addWidget(spacer)
self.addToolBar(self.tb)
self.tb.addWidget(self.importImage)
self.tb.addWidget(self.exportButton)
self.tb.addWidget(self.lengthButton)
self.tb.addWidget(self.widthsButton)
self.tb.addWidget(self.areaButton)
self.tb.addWidget(self.angleButton)
self.tb.addWidget(self.undoButton)
self.tb.addWidget(self.bezier)
#self.tb.setOrientation(QtCore.Qt.Vertical)
def file_open(self):
self.iw.scene.clear()
self.image_name = QFileDialog.getOpenFileName(self, 'Open File')
self.iw.pixmap = QtGui.QPixmap(self.image_name[0])
self.iw.pixmap_fit = self.iw.pixmap.scaled(
self.iw.pixmap.width(),
self.iw.pixmap.height(),
QtCore.Qt.KeepAspectRatio,
transformMode=QtCore.Qt.SmoothTransformation)
self.iw.scene.addPixmap(self.iw.pixmap_fit) #add image
self.iw.setScene(self.iw.scene)
#Adjust window size automatically?
self.iw.fitInView(self.iw.scene.sceneRect(), QtCore.Qt.KeepAspectRatio)
self.iw.scene.update()
self.statusbar.showMessage('Select a measurement to make from the toolbar')
self.lengthButton.setEnabled(True)
self.areaButton.setEnabled(True)
self.angleButton.setEnabled(True)
self.exportButton.setEnabled(True)
self.undoButton.setEnabled(True)
self.bezier.setEnabled(True)
self.bezier.setChecked(True)
self.widthsButton.setEnabled(False)
self.angleNames = []
self.areaNames = []
self.lengthNames = []
#self.iw.measurements = [[]]
self.iw.widths = []
self.iw.lengths = [[]]
self.iw.L = posData(
np.empty(shape=(0, 0)), np.empty(shape=(0, 0))) #lengths
self.iw.A = posData(
np.empty(shape=(0, 0)), np.empty(shape=(0, 0))) #area
self.iw.W = posData(
np.empty(shape=(0, 0)), np.empty(shape=(0, 0))) #widths
self.iw.T = angleData(np.empty(shape=(0, 0))) #angles
self.iw.angleValues = np.empty((0,0))
self.iw.areaValues = np.empty((0,0))
self.iw._lastpos = None
self.iw._thispos = None
self.iw.measuring_length = False
self.iw.measuring_area = False
self.iw.measuring_widths = False
self.iw.measuring_angle = False
self.iw._zoom = 0
self.iw.factor = 1.0
self.iw.d = {} #dictionary for line items
self.iw.k = 0 #initialize counter so lines turn yellow
self.iw.m = None
self.iw.scene.realline = None
self.iw.scene.testline = None
self.iw.scene.ellipseItem = None
self.iw.scene.area_ellipseItem = None
self.iw.scene.polyItem = None
self.iw.image_name = None
def measure_length(self):
self.lel = QLineEdit(self)
self.lel.move(130, 22)
text, ok = QInputDialog.getText(self, 'Input Dialog', 'Length name')
if ok:
self.lel.setText(str(text))
self.lengthNames.append(self.lel.text())
QApplication.setOverrideCursor(QtCore.Qt.CrossCursor) #change cursor
self.widthsButton.setChecked(False)
self.widthsButton.setEnabled(False)
self.iw.line_count = 0
self.iw.measuring_length = True
self.iw.L = posData(
np.empty(shape=(0, 0)),
np.empty(shape=(0, 0))) #preallocate
self.iw._lastpos = None
self.iw._thispos = None
self.statusbar.showMessage('Click initial point for length measurement')
else:
self.lengthButton.setChecked(False)
def measure_angle(self):
self.lea = QLineEdit(self)
self.lea.move(130, 22)
text, ok = QInputDialog.getText(self, 'Input Dialog', 'Angle name')
if ok:
self.lea.setText(str(text))
self.angleNames.append(self.lea.text())
QApplication.setOverrideCursor(QtCore.Qt.CrossCursor) #change cursor
self.bezier.setEnabled(False)
self.iw.measuring_angle = True
self.iw._lastpos = None
self.iw._thispos = None
self.statusbar.showMessage('Click initial point for angle measurement')
else:
self.angleButton.setChecked(False)
def measure_area(self):
self.lea = QLineEdit(self)
self.lea.move(130, 22)
text, ok = QInputDialog.getText(self, 'Input Dialog', 'Area name')
if ok:
self.lea.setText(str(text))
self.areaNames.append(self.lea.text())
QApplication.setOverrideCursor(QtCore.Qt.CrossCursor) #change cursor
self.bezier.setEnabled(False)
self.iw.line_count = 0
self.iw.measuring_area = True
self.iw._lastpos = None
self.iw._thispos = None
self.iw.A = posData(
np.empty(shape=(0, 0)),
np.empty(shape=(0, 0))) #preallocate
self.statusbar.showMessage('Click initial point for area measurement')
else:
self.areaButton.setChecked(False)
def undo(self):
if self.iw.measuring_length:
self.iw._thispos = self.iw._lastpos
self.iw.L.downdate() #remove data
self.iw.line_count += -1
self.iw.scene.removeItem(self.iw.scene.realline) #remove graphic
self.iw.scene.realline = False
if self.iw.measuring_area:
self.iw._thispos = self.iw._lastpos
self.iw.A.downdate() #remove data
self.iw.line_count += -1
self.iw.scene.removeItem(self.iw.scene.realline) #remove graphic
self.iw.scene.realline = False
if self.iw.measuring_widths:
self.iw.W.downdate() #remove data
self.iw.scene.removeItem(self.iw.scene.ellipseItem) #remove graphic
self.iw.scene.ellipseItem = False
self.iw.d[str(self.iw.k)].setPen(
QtGui.QPen(QtGui.QColor('black'))) #un-highlight next spine
self.iw.k += -1 #reduce count
if self.iw.measuring_angle:
self.iw.T.downdate() #remove data
self.iw._thispos = self.iw_lastpos
self.iw.scene.removeItem(self.iw.scene.realline) #remove graphic
self.iw.scene.realline = False
def export_measurements(self):
fac = max(self.iw.pixmap.width(), self.iw.pixmap.height()) / max(
self.iw.pixmap_fit.width(),
self.iw.pixmap_fit.height()) #scale pixel -> m by scaled image
name = QFileDialog.getSaveFileName(
self, 'Save File', self.image_name[0].split('.', 1)[0])[0]
self.pixeldim = float(self.subWin.pixeldim.text())
self.altitude = float(self.subWin.altitude.text())
self.focal = float(self.subWin.focal.text())
#okay in mm https://www.imaging-resource.com/PRODS/sony-a5100/sony-a5100DAT.HTM
if name:
#Convert pixels to meters
#measurements = [ f * fac * self.pixeldim * self.altitude / self.focal for f in self.iw.measurements]
#lengths = [ f * fac * self.pixeldim * self.altitude / self.focal for f in self.iw.lengths]
#print(self.iw.widths)
areas = self.iw.areaValues * (
fac * self.pixeldim * self.altitude / self.focal)**2
values_optical = np.array([
self.subWin.id.text(), self.image_name[0], self.focal,
self.altitude, self.pixeldim
])
names_optical = [
'Image ID', 'Image Path', 'Focal Length', 'Altitude',
'Pixel Dimension'
]
names_widths = ['Object'] + ['Length (m)'] + ['Widths (%)'] # + self.iw.widthNames[0]
#names_widths.append([self.iw.widthNames[0]])
#Write .csv file
print(name)
with open(name + '.csv', 'w') as csvfile:
writer = csv.writer(csvfile)
for (f, g) in zip(names_optical, values_optical):
writer.writerow([f, g])
writer.writerow(['Notes', self.subWin.notes.toPlainText()])
writer.writerow([''])
writer.writerow(names_widths)
for k,m in enumerate(self.lengthNames):
#format and convert pixel length measurement
l = "{0:.2f}".format( self.iw.lengths[k] * fac * self.pixeldim * self.altitude / self.focal )
if any(self.iw.widths[k]): #check if width measurement exists for length
n = self.iw.widthNames[k]
writer.writerow( [''] + [''] + n )
#format and convert pixel width measurement
vals = [ "{0:.2f}".format(g * fac * self.pixeldim * self.altitude / self.focal) for g in self.iw.widths[k]]
line = [m] + [l] + list(vals)
else:
#vals = l #f.copy()
line = [m] + [l]
writer.writerow(line)
writer.writerow([''])
writer.writerow(['Object'] + ['Angle'])
for k, f in enumerate(self.angleNames): #write angles
line = [[f] + ["{0:.3f}".format(self.iw.angleValues[k])]] #need to convert NaNs to empty
writer.writerows(line)
writer.writerow([''])
writer.writerow(['Object'] + ['Area (m\u00B2)'])
for k, f in enumerate(self.areaNames): #write areas
line = [[f] + ["{0:.3f}".format(areas[k])]] #need to convert NaNs to empty
writer.writerows(line)
#Export image
self.iw.fitInView(self.iw.scene.sceneRect(), QtCore.Qt.KeepAspectRatio)
pix = QtGui.QPixmap(self.iw.viewport().size())
self.iw.viewport().render(pix)
pix.save(name + '-measurements.png')
class imwin(QGraphicsView): #Subclass QLabel for interaction w/ QPixmap
def __init__(self, parent=None):
super(imwin, self).__init__(parent)
self.scene = QGraphicsScene()
self.view = QGraphicsView(self.scene)
self.pixmap = None
self._lastpos = None
self._thispos = None
self.delta = QtCore.QPointF(0, 0)
self.nm = None
self.measuring_length = False
self.measuring_widths = False
self.measuring_area = False
self.measuring_angle = False
self._zoom = 1
self.newPos = None
self.oldPos = None
self.factor = 1.0
self.numwidths = None
self.widthNames = [] #initialize as empty list
self.d = {} #dictionary for line items
#self.k = 0 #initialize counter so lines turn yellow
self.L = posData(np.empty(shape=(0, 0)), np.empty(shape=(0, 0)))
self.W = posData(
|
np.empty(shape=(0, 0))
|
numpy.empty
|
# -*- coding: utf-8 -*-
r"""
Created on 02 Apr 2021 22:07:05
@author: jiahuei
"""
import os
import time
import math
import random
import numpy as np
import seaborn as sns
from matplotlib import pyplot as plt
from tqdm import tqdm
sns.set_theme(style="darkgrid", rc={"legend.loc": "lower left", "legend.framealpha": 0.7})
def set_seed(seed: int):
assert isinstance(seed, int)
# set Random seed
random.seed(seed)
np.random.seed(seed)
print(f"RNG seed set to {seed}.")
def get_pe(height=6, width=6):
pe = np.zeros((height, width), dtype=np.float32)
position = np.expand_dims(np.arange(0, height, dtype=np.float32), 1)
div_term = np.exp(np.arange(0, width, 2, dtype=np.float32) * -(math.log(10) / width))
pe[:, 0::2] = np.sin(position * div_term)
pe[:, 1::2] = np.cos(position * div_term)
return pe
def get_gauss(height=6, width=6):
x, y = np.meshgrid(np.linspace(0, 1.75, width), np.linspace(0, 1.75, height))
dst =
|
np.sqrt(x * x + y * y)
|
numpy.sqrt
|
#!/usr/bin/env python
# coding=utf-8
from __future__ import division, print_function, unicode_literals
import itertools
import numpy as np
import pytest
from brainstorm.handlers import NumpyHandler
from brainstorm.optional import has_pycuda
non_default_handlers = []
handler_ids = []
if has_pycuda:
from brainstorm.handlers import PyCudaHandler
non_default_handlers.append(PyCudaHandler())
handler_ids.append("PyCudaHandler")
# np.random.seed(1234)
ref_dtype = np.float32
ref = NumpyHandler(ref_dtype)
some_2d_shapes = ((1, 1), (4, 1), (1, 4), (5, 5), (3, 4), (4, 3))
some_nd_shapes = ((1, 1, 4), (1, 1, 3, 3), (3, 4, 2, 1))
np.set_printoptions(linewidth=150)
def operation_check(handler, op_name, ref_args, ignored_args=(), atol=1e-8):
args = get_args_from_ref_args(handler, ref_args)
getattr(ref, op_name)(*ref_args)
getattr(handler, op_name)(*args)
check_list = []
for i, (ref_arg, arg) in enumerate(zip(ref_args, args)):
if i in ignored_args:
# print(i, "was ignored")
continue
if type(ref_arg) is ref.array_type:
arg_ref = handler.get_numpy_copy(arg)
check = np.allclose(ref_arg, arg_ref, atol=atol)
check_list.append(check)
if not check:
print("-" * 40)
print("\nCheck failed for argument number %d:" % i)
print("Reference (expected) array {}:\n{}".format(
ref_arg.shape, ref_arg))
print("\nObtained array {}:\n{}".format(arg_ref.shape,
arg_ref))
d = ref_arg.ravel() - arg_ref.ravel()
print("Frobenius Norm of differences: ", np.sum(d*d))
else:
check = (ref_arg == arg)
check_list.append(check)
if not check:
print("-" * 40)
print("Check failed for argument number %d:" % i)
print("\nReference (expected) value:\n", ref_arg)
print("\nObtained value:\n", arg)
d = ref_arg.ravel() - arg_ref.ravel()
print("Frobenius Norm of differences: ", np.sum(d*d))
# print("Check was ", check)
if False in check_list:
return False
else:
return True
def get_args_from_ref_args(handler, ref_args):
args = []
for ref_arg in ref_args:
if type(ref_arg) is ref.array_type:
temp = handler.create_from_numpy(ref_arg)
args.append(temp)
else:
args.append(ref_arg)
return args
def get_random_arrays(shapes=some_2d_shapes, dtype=ref_dtype):
arrays = []
for shape in shapes:
arrays.append(np.random.randn(*shape).astype(dtype))
return arrays
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_sum_t(handler):
list_a = get_random_arrays()
list_axis = [0, 1, None]
for a, axis in itertools.product(list_a, list_axis):
if axis == 0:
out = np.zeros((1, a.shape[1]), dtype=ref_dtype)
elif axis == 1:
out = np.zeros((a.shape[0]), dtype=ref_dtype)
else:
out = np.array([0.], dtype=ref_dtype).reshape(tuple())
ref_args = (a, axis, out)
assert operation_check(handler, 'sum_t', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_dot_mm(handler):
list_a = get_random_arrays()
list_b = get_random_arrays()
list_b = [b.T.copy() for b in list_b]
for a, b in zip(list_a, list_b):
out = np.zeros((a.shape[0], a.shape[0]), dtype=ref_dtype)
ref_args = (a, b, out)
assert operation_check(handler, 'dot_mm', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_dot_add_mm(handler):
list_a = get_random_arrays()
list_b = get_random_arrays()
list_b = [b.T.copy() for b in list_b]
for a, b in zip(list_a, list_b):
out = np.random.randn(a.shape[0], a.shape[0]).astype(ref_dtype)
ref_args = (a, b, out)
assert operation_check(handler, 'dot_add_mm', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_mult_tt(handler):
list_a = get_random_arrays(some_2d_shapes + some_nd_shapes)
list_b = get_random_arrays(some_2d_shapes + some_nd_shapes)
for a, b in zip(list_a, list_b):
out = np.zeros_like(a, dtype=ref_dtype)
ref_args = (a, b, out)
assert operation_check(handler, 'mult_tt', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_mult_add_tt(handler):
list_a = get_random_arrays(some_2d_shapes + some_nd_shapes)
list_b = get_random_arrays(some_2d_shapes + some_nd_shapes)
for a, b in zip(list_a, list_b):
out = np.random.randn(*a.shape).astype(ref_dtype)
ref_args = (a, b, out)
assert operation_check(handler, 'mult_add_tt', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_mult_st(handler):
list_a = [0, 0.5, -1]
list_b = get_random_arrays(some_2d_shapes + some_nd_shapes)
for a, b in zip(list_a, list_b):
out = np.zeros_like(b, dtype=ref_dtype)
ref_args = (a, b, out)
assert operation_check(handler, 'mult_st', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_mult_add_st(handler):
list_a = [0, 0.5, -1]
list_b = get_random_arrays(some_2d_shapes + some_nd_shapes)
for a, b in zip(list_a, list_b):
out = np.random.randn(*b.shape).astype(ref_dtype)
ref_args = (a, b, out)
assert operation_check(handler, 'mult_add_st', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_add_tt(handler):
list_a = get_random_arrays(some_2d_shapes + some_nd_shapes)
list_b = get_random_arrays(some_2d_shapes + some_nd_shapes)
for a, b in zip(list_a, list_b):
out = np.zeros_like(a, dtype=ref_dtype)
ref_args = (a, b, out)
assert operation_check(handler, 'add_tt', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_add_st(handler):
list_a = [0, 0.5, -1]
list_b = get_random_arrays(some_2d_shapes + some_nd_shapes)
for a, b in zip(list_a, list_b):
out = np.zeros_like(b, dtype=ref_dtype)
ref_args = (a, b, out)
assert operation_check(handler, 'add_st', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_subtract_tt(handler):
list_a = get_random_arrays(some_2d_shapes + some_nd_shapes)
list_b = get_random_arrays(some_2d_shapes + some_nd_shapes)
for a, b in zip(list_a, list_b):
out = np.zeros_like(a, dtype=ref_dtype)
ref_args = (a, b, out)
assert operation_check(handler, 'subtract_tt', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_subtract_mv(handler):
# Only checking with row vectors
list_a = get_random_arrays()
list_b = get_random_arrays()
list_b = [b[0, :].reshape((1, -1)).copy() for b in list_b]
for a, b in zip(list_a, list_b):
out = np.zeros_like(a, dtype=ref_dtype)
ref_args = (a, b, out)
assert operation_check(handler, 'subtract_mv', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_add_mv(handler):
# Only checking with row vectors
list_a = get_random_arrays()
list_b = get_random_arrays()
list_b = [b[0, :].reshape((1, -1)).copy() for b in list_b]
for a, b in zip(list_a, list_b):
out = np.zeros_like(a, dtype=ref_dtype)
ref_args = (a, b, out)
assert operation_check(handler, 'add_mv', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_broadcast_t(handler):
args_to_check = [
([1], 0, [3]),
([1], 0, [1]),
([1, 2], 0, [3, 2]),
([3, 1], 1, [3, 2]),
([1, 2, 5], 0, [3, 2, 5]),
([3, 1, 5], 1, [3, 2, 5]),
([3, 2, 1], 2, [3, 2, 5])
]
a_shapes, axes, out_shapes = list(zip(*args_to_check))
list_a = get_random_arrays(a_shapes)
list_out = get_random_arrays(out_shapes)
for ref_args in zip(list_a, axes, list_out):
assert operation_check(handler, 'broadcast_t', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_clip_t(handler):
list_a = get_random_arrays(some_nd_shapes)
list_clip_min = [-0.4, 0, 0.2]
list_clip_max = [-0.1, 0, 0.3]
for a, clip_min, clip_max in itertools.product(list_a, list_clip_min,
list_clip_max):
if clip_max >= clip_min:
out = np.zeros_like(a, dtype=ref_dtype)
ref_args = (a, clip_min, clip_max, out)
assert operation_check(handler, 'clip_t', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_log_t(handler):
list_a = get_random_arrays(some_nd_shapes)
for a in list_a:
a += 10 # to remove negatives
out = np.zeros_like(a, dtype=ref_dtype)
ref_args = (a, out)
assert operation_check(handler, 'log_t', ref_args)
@pytest.mark.parametrize("handler", non_default_handlers, ids=handler_ids)
def test_sqrt_t(handler):
list_a = get_random_arrays(some_nd_shapes)
for a in list_a:
a += 10 # to remove negatives
out =
|
np.zeros_like(a, dtype=ref_dtype)
|
numpy.zeros_like
|
import numpy as np
from scipy import signal
class FilterConfiguration():
def __init__(self, order: int, cutoff: float, appendage: int):
self.order = order
self.cutoff = cutoff
self.appendage = appendage
self.sample_frequency = 0
def add_appendage(data: np.ndarray, config: FilterConfiguration):
if data.ndim == 1:
front = np.repeat(np.take(data, 0), config.appendage)
end = np.repeat(np.take(data, -1), config.appendage)
data = np.concatenate((front, data, end))
else:
front = np.repeat(np.take(data, 0, axis=1)[:, None], \
config.appendage, axis=1)
end = np.repeat(
|
np.take(data, -1, axis=1)
|
numpy.take
|
import numpy as np
import tensorflow as tf
NUM_DIGITS = 16
NUM_LAYERS = 1
NUM_HIDDEN = 100
NUM_OUTPUT = 4
BATCH_SIZE = 128
NUM_EPOCH = 1000
# Each input number is a list of its binary digits
def binary_encode(i, num_digits):
return np.array([i >> d & 1 for d in range(num_digits)])
# One-hot encoding for [number, "fizz", "buzz", "fizzbuzz"]
def fizz_buzz_encode(i):
if i % 15 == 0:
return np.array([0, 0, 0, 1])
elif i % 5 == 0:
return np.array([0, 0, 1, 0])
elif i % 3 == 0:
return np.array([0, 1, 0, 0])
else:
return np.array([1, 0, 0, 0])
def fizz_buzz(i, prediction):
return [str(i), "fizz", "buzz", "fizzbuzz"][prediction]
train_range = range(101, 2 ** NUM_DIGITS)
train_x = np.array([binary_encode(i, NUM_DIGITS) for i in train_range])
train_y = np.array([fizz_buzz_encode(i) for i in train_range])
numbers =
|
np.arange(1, 101)
|
numpy.arange
|
#!/usr/bin/env python
"""
coding=utf-8
Build model for a dataset by identifying type of column along with its
respective parameters.
"""
from __future__ import print_function
from __future__ import division
from future.utils import with_metaclass
import copy
import time
import abc
import warnings
import numpy as np
from . import histogram_utils
from .base_column_profilers import BaseColumnProfiler
from .profiler_options import NumericalOptions
class abstractstaticmethod(staticmethod):
__slots__ = ()
def __init__(self, function):
super(abstractstaticmethod, self).__init__(function)
function.__isabstractmethod__ = True
__isabstractmethod__ = True
class NumericStatsMixin(with_metaclass(abc.ABCMeta, object)):
"""
Abstract numerical column profile subclass of BaseColumnProfiler. Represents
a column in the dataset which is a text column. Has Subclasses itself.
"""
col_type = None
def __init__(self, options=None):
"""
Initialization of column base properties and itself.
:param options: Options for the numerical stats.
:type options: NumericalOptions
"""
self.options = None
if options and isinstance(options, NumericalOptions):
self.options = options
self.min = None
self.max = None
self.sum = 0
self.variance = 0
self.max_histogram_bin = 10000
self.histogram_bin_method_names = ['auto', 'fd', 'doane', 'scott',
'rice', 'sturges', 'sqrt']
self.histogram_methods = {}
for method in self.histogram_bin_method_names:
self.histogram_methods[method] = {
'total_loss': 0,
'current_loss': 0,
'histogram': {
'bin_counts': None,
'bin_edges': None
}
}
self.histogram_selection = None
self.quantiles = {
bin_num: None for bin_num in range(1000)
}
self.__calculations = {
"min": NumericStatsMixin._get_min,
"max": NumericStatsMixin._get_max,
"sum": NumericStatsMixin._get_sum,
"variance": NumericStatsMixin._get_variance,
"histogram_and_quantiles":
NumericStatsMixin._get_histogram_and_quantiles
}
self._filter_properties_w_options(self.__calculations, options)
def __getattribute__(self, name):
return super(NumericStatsMixin, self).__getattribute__(name)
def __getitem__(self, item):
return super(NumericStatsMixin, self).__getitem__(item)
@BaseColumnProfiler._timeit(name="histogram_and_quantiles")
def _add_helper_merge_profile_histograms(self, other1, other2):
"""
Adds histogram of two profiles together
:param other1: profile1 being added to self
:type other1: BaseColumnProfiler
:param other2: profile2 being added to self
:type other2: BaseColumnProfiler
:return: None
"""
# get available bin methods and set to current
bin_methods = list(set(other1.histogram_bin_method_names) &
set(other2.histogram_bin_method_names))
if not bin_methods:
raise ValueError('Profiles have no overlapping bin methods and '
'therefore cannot be added together.')
self.histogram_bin_method_names = bin_methods
for i, method in enumerate(self.histogram_bin_method_names):
combined_values = other1._histogram_to_array(
method) + other2._histogram_to_array(method)
bin_counts, bin_edges = self._get_histogram(
combined_values, method)
self.histogram_methods[method]['histogram']['bin_counts'] = \
bin_counts
self.histogram_methods[method]['histogram']['bin_edges'] = bin_edges
# Select histogram: always choose first profile selected method
# Either both profiles have the same selection or you at least use one
# of the profiles selected method
self.histogram_selection = other1.histogram_selection
self._get_quantiles()
def _add_helper(self, other1, other2):
"""
Helper function for merging profiles.
:param other1: profile1 being added to self
:param other2: profile2 being added to self
:return: None
"""
BaseColumnProfiler._merge_calculations(
self._NumericStatsMixin__calculations,
other1._NumericStatsMixin__calculations,
other2._NumericStatsMixin__calculations)
# Merge variance, histogram, min, max, and sum
if "variance" in self.__calculations.keys():
self.variance = self._merge_variance(
other1.match_count, other1.variance, other1.mean,
other2.match_count, other2.variance, other2.mean)
if "histogram_and_quantiles" in self.__calculations.keys():
if other1.histogram_selection is not None and \
other2.histogram_selection is not None:
self._add_helper_merge_profile_histograms(other1, other2)
elif other2.histogram_selection is None:
self.histogram_methods = other1.histogram_methods
self.quantiles = other1.quantiles
else:
self.histogram_methods = other2.histogram_methods
self.quantiles = other2.quantiles
if "min" in self.__calculations.keys():
if other1.min is not None and other2.min is not None:
self.min = min(other1.min, other2.min)
elif other2.min is None:
self.min = other1.min
else:
self.min = other2.min
if "max" in self.__calculations.keys():
if other1.max is not None and other2.max is not None:
self.max = max(other1.max, other2.max)
elif other2.max is None:
self.max = other1.max
else:
self.max = other2.max
if "sum" in self.__calculations.keys():
self.sum = other1.sum + other2.sum
@property
def mean(self):
if self.match_count == 0:
return 0
return float(self.sum) / self.match_count
@property
def stddev(self):
if self.match_count == 0:
return np.nan
return np.sqrt(self.variance)
def _update_variance(self, batch_mean, batch_var, batch_count):
"""
Calculate the combined variance of the current values and new dataset.
:param batch_mean: mean of new chunk
:param batch_var: variance of new chunk
:param batch_count: number of samples in new chunk
:return: combined variance
:rtype: float
"""
return self._merge_variance(self.match_count, self.variance, self.mean,
batch_count, batch_var, batch_mean)
@staticmethod
def _merge_variance(match_count1, variance1, mean1,
match_count2, variance2, mean2):
"""
Calculate the combined variance of the current values and new dataset.
:param match_count1: number of samples in new chunk 1
:param mean1: mean of chunk 1
:param variance1: variance of chunk 1
:param match_count2: number of samples in new chunk 2
:param mean2: mean of chunk 2
:param variance2: variance of chunk 2
:return: combined variance
:rtype: float
"""
if np.isnan(variance1):
variance1 = 0
if np.isnan(variance2):
variance2 = 0
if match_count1 < 1:
return variance2
elif match_count2 < 1:
return variance1
curr_count = match_count1
delta = mean2 - mean1
m_curr = variance1 * (curr_count - 1)
m_batch = variance2 * (match_count2 - 1)
M2 = m_curr + m_batch + delta ** 2 * curr_count * match_count2 / \
(curr_count + match_count2)
new_variance = M2 / (curr_count + match_count2 - 1)
return new_variance
def _estimate_stats_from_histogram(self, method):
# test estimated mean and var
bin_counts = self.histogram_methods[method]['histogram']['bin_counts']
bin_edges = self.histogram_methods[method]['histogram']['bin_edges']
mids = 0.5 * (bin_edges[1:] + bin_edges[:-1])
mean = np.average(mids, weights=bin_counts)
var = np.average((mids - mean) ** 2, weights=bin_counts)
std = np.sqrt(var)
return mean, var, std
def _total_histogram_bin_variance(self, input_array, method):
# calculate total variance over all bins of a histogram
bin_edges = self.histogram_methods[method]['histogram']['bin_edges']
inds = np.digitize(input_array, bin_edges)
sum_var = 0
for i in range(1, len(bin_edges)):
elements_in_bin = input_array[inds == i]
bin_var = elements_in_bin.var() if len(elements_in_bin) > 0 else 0
sum_var += bin_var
return sum_var
@staticmethod
def _histogram_loss(diff_var, avg_diffvar, total_var,
avg_totalvar, run_time, avg_runtime):
norm_diff_var, norm_total_var, norm_runtime = 0, 0, 0
if avg_diffvar > 0:
norm_diff_var = float(diff_var - avg_diffvar) / avg_diffvar
if avg_totalvar > 0:
norm_total_var = float(total_var - avg_totalvar) / avg_totalvar
penalized_time = 1 # currently set as 1s
if (run_time - avg_runtime) >= penalized_time:
norm_runtime = float(run_time - avg_runtime) / avg_runtime
return norm_diff_var + norm_total_var + norm_runtime
def _select_method_for_histogram(self, current_exact_var, current_est_var,
current_total_var, current_run_time):
current_diff_var = np.abs(current_exact_var - current_est_var)
current_avg_diff_var = current_diff_var.mean()
current_avg_total_var = current_total_var.mean()
current_avg_run_time = current_run_time.mean()
min_total_loss = np.inf
selected_method = ''
for method_id, method in enumerate(self.histogram_bin_method_names):
self.histogram_methods[method]['current_loss'] = \
self._histogram_loss(current_diff_var[method_id],
current_avg_diff_var,
current_total_var[method_id],
current_avg_total_var,
current_run_time[method_id],
current_avg_run_time)
self.histogram_methods[method]['total_loss'] += \
self.histogram_methods[method]['current_loss']
if min_total_loss > self.histogram_methods[method]['total_loss']:
min_total_loss = self.histogram_methods[method]['total_loss']
selected_method = method
return selected_method
def _histogram_to_array(self, bins):
# Extend histogram to array format
bin_counts = self.histogram_methods[bins]['histogram']['bin_counts']
bin_edges = self.histogram_methods[bins]['histogram']['bin_edges']
hist_to_array = [[bin_edge] * bin_count for bin_count, bin_edge in
zip(bin_counts[:-1], bin_edges[:-2])]
hist_to_array.append([bin_edges[-2]] * int(bin_counts[-1] / 2))
hist_to_array.append([bin_edges[-1]] *
(bin_counts[-1] - int(bin_counts[-1] / 2)))
array_flatten = [element for sublist in hist_to_array for
element in sublist]
return array_flatten
def _get_histogram(self, values, bin_method):
"""
Get histogram from values and bin method, using np.histogram
:param values: input values
:type values: np.array or pd.Series
:param bin_method: bin method, e.g., sqrt, rice, etc
:type bin_method: str
:return: bin edges and bin counts
"""
if len(np.unique(values)) == 1:
bin_counts = np.array([len(values)])
if isinstance(values, (np.ndarray, list)):
unique_value = values[0]
else:
unique_value = values.iloc[0]
bin_edges = np.array([unique_value, unique_value])
else:
values, weights = histogram_utils._ravel_and_check_weights(
values, None)
_, n_equal_bins = histogram_utils._get_bin_edges(
values, bin_method, None, None)
n_equal_bins = min(n_equal_bins, self.max_histogram_bin)
bin_counts, bin_edges = np.histogram(values, bins=n_equal_bins)
return bin_counts, bin_edges
def _merge_histogram(self, values, bins):
# values is the current array of values,
# that needs to be updated to the accumulated histogram
combined_values = values + self._histogram_to_array(bins)
bin_counts, bin_edges = self._get_histogram(combined_values, bins)
self.histogram_methods[bins]['histogram']['bin_counts'] = bin_counts
self.histogram_methods[bins]['histogram']['bin_edges'] = bin_edges
def _update_histogram(self, df_series):
"""
Update histogram for each method and the combined method. The algorithm
'Follow the best expert' is applied to select the combined method:
<NAME> and <NAME>, Prediction, learning, and games.
Cambridge University Press, 2006.
<NAME>, <NAME>, and <NAME>, "Regret bounds
for sleeping experts and bandits," in Proceedings of the 21st Annual
Conference on Learning Theory - COLT 2008, Helsinki, Finland, 2008,
pp. 425–436.
The idea is to select the current best method based on accumulated
losses up to the current time: all methods are compared using the
accumulated losses, and the best method with minimal loss is picked
:param df_series: a given column
:type df_series: pandas.core.series.Series
:return:
"""
df_series = df_series.replace([np.inf, -np.inf], np.nan).dropna()
if df_series.empty:
return
current_est_var = np.zeros(len(self.histogram_bin_method_names))
current_exact_var = np.zeros(len(self.histogram_bin_method_names))
current_total_var = np.zeros(len(self.histogram_bin_method_names))
current_run_time = np.zeros(len(self.histogram_bin_method_names))
for i, method in enumerate(self.histogram_bin_method_names):
# update histogram for the method
start_time = time.time()
bin_counts, bin_edges = self._get_histogram(df_series, method)
if self.histogram_methods[method]['histogram']['bin_counts'] is None:
self.histogram_methods[method]['histogram']['bin_counts'] = bin_counts
self.histogram_methods[method]['histogram']['bin_edges'] = bin_edges
else:
self._merge_histogram(df_series.tolist(), bins=method)
run_time = time.time() - start_time
# update loss for the method
current_est_var[i] = self._estimate_stats_from_histogram(method)[1]
current_exact_var = df_series.values.var()
current_total_var[i] = self._total_histogram_bin_variance(
df_series.values, method)
current_run_time[i] = run_time
# select the best method and update the total loss
selected_method = self._select_method_for_histogram(
current_exact_var, current_est_var,
current_total_var, current_run_time)
self.histogram_selection = selected_method
def _get_percentile(self, percentile):
"""
Get value for the number where the given percentage of values fall below
it.
:param percentile: Percentage of values to fall before the value
:type percentile: float
:return: Value for which the percentage of values in the distribution
fall before the percentage
"""
selected_method = self.histogram_selection
bin_counts = \
self.histogram_methods[selected_method]['histogram']['bin_counts']
bin_edges = \
self.histogram_methods[selected_method]['histogram']['bin_edges']
num_edges = len(bin_edges)
if percentile == 100:
return bin_edges[-1]
percentile = float(percentile) / 100
accumulated_count = 0
bin_counts = bin_counts.astype(float)
normalized_bin_counts = bin_counts /
|
np.sum(bin_counts)
|
numpy.sum
|
###########################################
"""
Project: Quantum Mechanics Non-linearities
Description: This code evolves states under the linear and non-linear Hamiltonians
Functions:
"""
###########################################
# Import dependencies
from qutip import *
import yaml
import matplotlib.pyplot as plt
import os
import datetime
import time
import numpy as np
from scipy.special import factorial
class Solver(object):
"""
Description: Class that includes the simulation run and all the functions.
Input: Config file name
Output: Data and plots
"""
def __init__(self, config):
"""
Description: Class function, reads parameters from config file
Input: Config file
Output: None
"""
# Read parameters from file. If it fails, print fail statement
self.args = {}
if type(config) == str:
with open(config) as cfile:
self.args.update(yaml.load(cfile))
elif type(config) == dict:
self.args.update(config)
else:
print("Failed to load config arguments")
# Assign parameters to class variables
self.time = float(self.args['time']) # Final evolution time
self.N = int(self.args['N']) # Hilbert space size
self.M = int(self.args['M']) # Hilbert space size
self.omegaC = float(self.args['omegaC']) # optical frequency (rescaled)
self.C1bar = float(self.args['C1bar']) # single-photon coupling
self.muc = float(self.args['muc']) # optical coherent state parameter
self.mum = float(self.args['mum']) # Mechanical coherent state parameter
self.folder = str(self.args['folder']) # Folder where things are saved
# For time-dependent systems
self.epsilon = float(self.args['epsilon']) # amplitude of squeezing
self.Omega0 = float(self.args['Omega0']) # amplitude of squeezing
# Noisy dynamics
self.gammac = float(self.args['gammac']) # Optical noise
self.gammam = float(self.args['gammam']) # Mechanical noise
def coherent_coherent(self):
"""
Description: Generates an initial coherent state
Input: None (thus far)
Output: Separable initial state
"""
state = tensor(coherent(self.N, self.muc), coherent(self.N, self.mum))
return state
def coherent_thermal(self):
"""
Description: Generates a single thermal state
Input: None
Output: A thermal state
"""
states = []
for i in range(0,self.N):
states.append(tanh(self.rT)**(2*i)/cosh(rT)**2*fock_dm(self.N,i))
thermal = sum(states)
print(thermal.overlap(thermal))
return tensor(coherent(self.N, self.mu), thermal)
def run_time_independent(self, state):
"""
Description: Evolves the state
Input: None
Output: Array of [times, linear states, non-linear states]
"""
# Define operators
a = tensor(destroy(self.N),qeye(self.N))
b = tensor(qeye(self.N),destroy(self.N))
d_ops = []
# Build the non-linear Hamiltonians
HNL = b.dag()*b - self.C1bar*a.dag()*a*(b + b.dag())
# Define collapse operators
Loptics = np.sqrt(self.gammac)*a
Lmechanics = np.sqrt(self.gammam)*b
# Define a list of all the expectation values we want
if (self.gammac == 0 and self.gammam == 0):
c_ops = []
else:
c_ops = [Loptics, Lmechanics]
# Define array of times to feed the solver
times = np.linspace(0.0, self.time, 100.0)
# Call Master Equation solver. Give it Hamiltonian, state, times and decoherence ops.
# Also give is the list of expectation values to compute
resultsNL = mesolve(HNL, state, times, c_ops = c_ops, e_ops = [], args = [], progress_bar = True)
# Extract expectation values
states = resultsNL.states
return [times, states]
def run_time_dependent(self, state):
"""
Description: Evolves the state
Input: None
Output: Array of [times, linear states, non-linear states]
"""
# Define operators
a = tensor(destroy(self.N),qeye(self.N))
b = tensor(qeye(self.N),destroy(self.N))
d_ops = []
# Define free Hamiltonian
H0 = b.dag()*b
# Define the time-dependent Hamiltonian
H1 = a.dag()*a*(b.dag() + b)
C1bar = self.C1bar
epsilon = self.epsilon
Omega0 = self.Omega0
# Define a function for the coefficient:
def H1_coeff(t, args):
return - self.C1bar*(1. + epsilon*np.sin(Omega0*t))
# Define the full Hamiltonian
H = [H0, [H1, H1_coeff]]
# Change the options so that we have a smaller stepsize
opts = Options()
opts.order = 20
opts.nsteps = 2500
# Define collapse operators
Loptics = np.sqrt(self.gammac)*a
Lmechanics = np.sqrt(self.gammam)*b
# Define a list of all the expectation values we want
if (self.gammac == 0 and self.gammam == 0):
c_ops = []
else:
c_ops = [Loptics, Lmechanics]
# Define array of times to feed the solver
times = np.linspace(0.0, self.time, 100.0)
# Call Master Equation solver. Give it Hamiltonian, state, times and decoherence ops.
# Also give is the list of expectation values to compute
results = mesolve(H, state, times, c_ops = c_ops, e_ops = [], args = [], progress_bar = True, options=opts)
# Extract expectation values
states = results.states
return [times, states]
def construct_sigmas(self, states):
"""
Description: Returns an array of CMs built from the second moments of the full states
Input: Array of evalues
Output: Array of CMs
"""
# Define operators
a = tensor(destroy(self.N),qeye(self.N))
b = tensor(qeye(self.N),destroy(self.N))
# Optics quadratic exp values
ada_exp = []
a2_exp = []
# Mechanics quadratic exp values
bdb_exp = []
b2_exp = []
# Mixed expectation values
abd_exp = []
ab_exp = []
# Single expectation values
a_exp = []
b_exp = []
# Calculate exp values for each state
for state in states:
# Optics
a_exp.append(expect(a, state))
ada_exp.append(expect(a.dag()*a, state))
# Mechanics
b_exp.append(expect(b, state))
bdb_exp.append(expect(b.dag()*b, state))
# Mixed oeprators
abd_exp.append(expect(a*b.dag(), state))
ab_exp.append(expect(a*b, state))
# Single operators
a2_exp.append(expect(a*a, state))
b2_exp.append(expect(b*b, state))
# Define array of CMs
sigmas = []
# Build the CMs
# Get an array of a CM at every different time
for i in range(0,len(ada_exp)):
# Optics
sigma11 = 1. + 2.*ada_exp[i] - 2.*a_exp[i]*np.conjugate(a_exp[i])
sigma33 = 1. + 2.*ada_exp[i] - 2.*a_exp[i]*np.conjugate(a_exp[i])
sigma31 = 2.*a2_exp[i] - 2.*a_exp[i]*a_exp[i]
sigma13 = 2.*np.conjugate(a2_exp[i]) - 2.*np.conjugate(a_exp[i]*a_exp[i])
# Mechanics
sigma22 = 1. + 2.*bdb_exp[i] - 2.*b_exp[i]*np.conjugate(b_exp[i])
sigma44 = 1. + 2.*bdb_exp[i] - 2.*b_exp[i]*np.conjugate(b_exp[i])
sigma42 = 2.*b2_exp[i] - 2.*b_exp[i]*b_exp[i]
sigma24 = 2.*np.conjugate(b2_exp[i]) - 2.*np.conjugate(b_exp[i]*b_exp[i])
# Mixed sector
sigma12 = 2.*np.conjugate(abd_exp[i]) -2.*np.conjugate(a_exp[i])*b_exp[i]
sigma21 = 2.*abd_exp[i] - 2.*a_exp[i]*np.conjugate(b_exp[i])
sigma41 = 2.*ab_exp[i] - 2.*a_exp[i]*b_exp[i]
sigma14 = 2.*np.conjugate(ab_exp[i]) - 2.*np.conjugate(a_exp[i]*b_exp[i])
sigma23 = 2.*np.conjugate(ab_exp[i]) - 2.*
|
np.conjugate(a_exp[i]*b_exp[i])
|
numpy.conjugate
|
import numpy as np
import cv2
def points_errors(reference, candidate):
common_points = set(reference.points.keys()).\
intersection(set(candidate.points.keys()))
return np.array([reference.points[p].coordinates -
candidate.points[p].coordinates
for p in common_points])
def completeness_errors(reference, candidate):
return float(len(candidate.shots))/float(len(reference.shots)),\
float(len(candidate.points))/float(len(reference.points))
def gps_errors(candidate):
errors = []
for shot in candidate.shots.values():
pose1 = shot.metadata.gps_position
pose2 = shot.pose.get_origin()
errors.append(pose1-pose2)
return np.array(errors)
def position_errors(reference, candidate):
common_shots = set(reference.shots.keys()).\
intersection(set(candidate.shots.keys()))
errors = []
for s in common_shots:
pose1 = reference.shots[s].pose.get_origin()
pose2 = candidate.shots[s].pose.get_origin()
errors.append(pose1-pose2)
return np.array(errors)
def rotation_errors(reference, candidate):
common_shots = set(reference.shots.keys()).\
intersection(set(candidate.shots.keys()))
errors = []
for s in common_shots:
pose1 = reference.shots[s].pose.get_rotation_matrix()
pose2 = candidate.shots[s].pose.get_rotation_matrix()
difference = np.transpose(pose1).dot(pose2)
rodrigues = cv2.Rodrigues(difference)[0].ravel()
angle =
|
np.linalg.norm(rodrigues)
|
numpy.linalg.norm
|
"""Defines the GradientDescent class."""
import warnings
import numpy as np
from .base import Optimizer
from ..utils import validate_bool
from ..utils import validate_float
from ..utils import validate_int
def validate_gd_params(rate, momentum, nesterov, anneal, iterations):
"""Validate tuning parameters for gradient descent.
Parameters
----------
rate: float, optional
Step size/learning rate. Must be positive.
momentum: float, optional
Momentum parameter. Must be positive.
nesterov: bool
If True, the update rule is Nesterov's accelerated gradient descent.
If False, the update rule is vanilla gradient descent with momentum.
anneal: float, optional
Factor determining the annealing schedule of the learning rate. Must
be positive. Smaller values lead to faster shrinking of the learning
rate over time.
iterations: int, optional
Number of iterations of the algorithm to perform. Must be positive."""
rate = validate_float(rate, "rate", positive=True)
momentum = validate_float(momentum, "momentum", minimum=0.0)
nesterov = validate_bool(nesterov, "nesterov")
anneal = validate_float(anneal, "anneal", positive=True)
iterations = validate_int(iterations, "iterations", minimum=1)
if nesterov and momentum == 0.0:
warnings.warn("momentum=0 not valid for Nesterov's accelerated gradient"
" descent. Reverting to vanilla gradient descent.")
nesterov = False
return rate, momentum, nesterov, anneal, iterations
class GradientDescent(Optimizer):
"""Unconstrained batch gradient descent with momentum."""
def __init__(self, rate=0.1, momentum=0.0, nesterov=False, anneal=np.inf,
iterations=10000):
"""Initialize the parameters of a gradient descent object.
Parameters
----------
rate: float, optional
Step size/learning rate. Must be positive.
momentum: float, optional
Momentum parameter. Must be positive.
nesterov: bool
If True, the update rule is Nesterov's accelerated gradient descent.
If False, the update rule is vanilla gradient descent with momentum.
anneal: float, optional
Factor determining the annealing schedule of the learning rate. Must
be positive. Smaller values lead to faster shrinking of the learning
rate over time.
iterations: int, optional
Number of iterations of the algorithm to perform. Must be positive.
"""
self.rate, self.momentum, self.nesterov, self.anneal, self.iterations \
= validate_gd_params(rate, momentum, nesterov, anneal, iterations)
def optimize(self, x0, func, grad=None, args=None, kwargs=None,
callback=None):
"""Approximate a minimizer of the objective function.
Parameters
----------
func: callable
The objective function to minimize.
x0: array-like
Initial guess for the minimizer.
grad: callable, optional
Gradient/Jacobian (vector of first derivatives) of the objective
function. This must be a function returning a 1D array. If it is not
specified, then `func` needs to have a 'grad' attribute.
args: sequence, optional
Extra positional arguments to pass to the objective function and
gradient.
kwargs: dict, optional
Extra keyword arguments to pass to the objective function and
gradient.
callback: callable, optional
Function to call at every iteration of the algorithm. The function
is called on the current value of the parameter being minimized
along with the extra arguments specified by `args` and `kwargs`.
For example, `callback` could be a function that prints the value of
the objective function at each iteration.
Returns
-------
x : array-like
The approximate minimizer of the objective function.
"""
if not callable(func):
raise ValueError(f"Objective function {func} is not callable")
if grad is None:
if hasattr(func, "grad"):
grad = func.grad
else:
raise ValueError("Could not detect objective function gradient")
if not callable(grad):
raise ValueError(f"Gradient {grad} is not callable")
if args is None:
args = ()
if kwargs is None:
kwargs = {}
x =
|
np.asarray(x0)
|
numpy.asarray
|
"""
The Evolution Strategy can be summarized as the following term:
{mu/rho +, lambda}-ES
Here we use following term to find a maximum point.
{n_pop/n_pop + n_kid}-ES
Visit my tutorial website for more: https://morvanzhou.github.io/tutorials/
"""
import numpy as np
import matplotlib.pyplot as plt
DNA_SIZE = 1 # DNA (real number)
DNA_BOUND = [0, 5] # solution upper and lower bounds
N_GENERATIONS = 200
POP_SIZE = 100 # population size
N_KID = 50 # n kids per generation
def F(x): return np.sin(10*x)*x + np.cos(2*x)*x # to find the maximum of this function
# find non-zero fitness for selection
def get_fitness(pred): return pred.flatten()
def make_kid(pop, n_kid):
# generate empty kid holder
kids = {'DNA': np.empty((n_kid, DNA_SIZE))}
kids['mut_strength'] = np.empty_like(kids['DNA'])
for kv, ks in zip(kids['DNA'], kids['mut_strength']):
# crossover (roughly half p1 and half p2)
p1, p2 = np.random.choice(np.arange(POP_SIZE), size=2, replace=False)
cp = np.random.randint(0, 2, DNA_SIZE, dtype=np.bool) # crossover points
kv[cp] = pop['DNA'][p1, cp]
kv[~cp] = pop['DNA'][p2, ~cp]
ks[cp] = pop['mut_strength'][p1, cp]
ks[~cp] = pop['mut_strength'][p2, ~cp]
# mutate (change DNA based on normal distribution)
ks[:] = np.maximum(ks + (
|
np.random.rand(*ks.shape)
|
numpy.random.rand
|
from collections import defaultdict
import numpy as np
import matplotlib.pyplot as plt
from hyperopt_synthetic import run_one_exp as hyperopt_synthetic_opt
from xbbo_synthetic import run_one_exp as xbbo_synthetic_opt
max_call = 50
if __name__ == "__main__":
rng = np.random.RandomState(42)
result_opts = defaultdict(list)
for i in range(3):
seed = rng.randint(1e5)
# result_opts['hyperopt-rand'].append(hyperopt_synthetic_opt('rand', max_call,seed))
result_opts['hyperopt-tpe'].append(hyperopt_synthetic_opt('tpe', max_call,seed))
# result_opts['hyperopt-atpe'].append(hyperopt_synthetic_opt('atpe', max_call,seed))
# result_opts['hyperopt-mix'].append(hyperopt_synthetic_opt('mix', max_call,seed))
result_opts['hyperopt-anneal'].append(hyperopt_synthetic_opt('anneal', max_call,seed))
result_opts['XBBO-tpe'].append(xbbo_synthetic_opt('tpe', max_call,seed))
result_opts['XBBO-anneal'].append(xbbo_synthetic_opt('anneal',max_call,seed))
plt.figure()
for key in result_opts:
plt.plot(range(1,max_call+1), np.mean(np.minimum.accumulate(
|
np.asarray(result_opts[key])
|
numpy.asarray
|
from alcokit import SR, HOP_LENGTH
from alcokit.util import signal, f2s
import numpy as np
from librosa import resample, phase_vocoder, util, stft
from pyrubberband.pyrb import pitch_shift as rb_shift
def _shift_translate(S, k_bins):
n = S.shape[0]
k = k_bins
rng = np.arange(n)
i_rng = rng if k == 0 else (rng[:-k] if k > 0 else rng[-k:])
j_rng = rng if k == 0 else (rng[k:] if k > 0 else rng[:k])
D =
|
np.zeros((n, n))
|
numpy.zeros
|
from django.shortcuts import render, redirect
from django.contrib import messages
import os, csv, io, numpy as np
from math import sqrt
from .forms import StaffForm
from .models import uCalibrationUpdate, uRawDataModel
from staffs.models import Staff, StaffType
from range_calibration.models import RangeParameters
from datetime import date
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ObjectDoesNotExist
from datetime import datetime
from staffs.models import Staff
from django.db.models import Q
from django.conf import settings
#from accounts.models import CustomUser
# Create your views here.
def homeview(request):
return redirect('/')
def guideview(request):
return render(request, 'staff_calibration/staff_calibration_guide.html')
# Staff lists
@login_required(login_url="/accounts/login")
def user_staff_lists(request):
if request.user.is_staff:
staff_lists = uCalibrationUpdate.objects.all().order_by('-processed_date')[:10]
else:
staff_lists = uCalibrationUpdate.objects.filter(staff_number__staff_owner = request.user.authority).order_by('-processed_date')[:10]
context = {
'staff_lists': staff_lists}
return render(request, 'staff_calibration/user_staff_lists.html', context=context)
# delete staffs
def user_staff_delete(request, update_index):
try:
if uCalibrationUpdate.objects.filter(user= request.user).exists():
# Delete Calibration update
user_staff = uCalibrationUpdate.objects.get(user= request.user, update_index=update_index)
user_staff.delete()
messages.success(request, 'Calibration record deleted.')
# Delete raw data
user_staff_data = uRawDataModel.objects.filter(user= request.user, update_index=update_index)
user_staff_data.delete()
messages.success(request, 'Raw data record deleted.')
# return to the registry list
return redirect('staff_calibration:user-staff-lists')
else:
messages.warning(request, 'This staff belongs to another person. You cannot delete it.')
return redirect('staff_calibration:user-staff-lists')
except:
messages.error(request, 'This action cannot be performed. Contact Landgate.')
return redirect('staff_calibration:user-staff-lists')
# handle data file
def handle_uploaded_file(f):
root_dir = os.path.join(settings.UPLOAD_ROOT, 'client_data')
file_path = os.path.join(root_dir, f.name[:-4]+'-'+date.today().strftime('%Y%m%d')+'.csv')
# file_path = "data/client_data/"+f.name
with open(file_path, 'wb+') as destination:
for chunk in f.chunks():
destination.write(chunk)
return file_path
# Preprocess staff readings to calculate the height differences between pins
def preprocess_staff(data_set):
data_set = np.array(data_set, dtype=object)
observation_set = []
for i in range(len(data_set)-1):
pini, obsi, nmeasi, stdi = data_set[i]
pinj, obsj, nmeasj, stdj = data_set[i+1]
if float(stdi) == 0:
stdi = 10**-5
if float(stdj) == 0:
stdj = 10**-5
dMeasuredLength = float(obsj)- float(obsi)
dStdDeviation = sqrt(float(stdi)**2 + float(stdj)**2)
observation_set.append([str(pini)+'-'+str(pinj),
'{:.5f}'.format(float(obsi)), '{:.5f}'.format(float(obsj)),
'{:.5f}'.format(dMeasuredLength),
'{:.7f}'.format(dStdDeviation)])
return observation_set
# generate correction factor from below
def generate_correction_factor(uncorrected_scale_factor, staff_meta):
list_scale_factors = []
start_temperature = 0.
end_temperature = 40.
interval = 2.
while start_temperature <= end_temperature:
scale_factor = (((start_temperature-staff_meta['dObsTemperature'])*staff_meta['dThermalCoefficient'])+1)*uncorrected_scale_factor
correction = (scale_factor-1)*1000.
list_scale_factors.append([str(int(start_temperature)), '{:.6f}'.format(scale_factor), '{:.2f}'.format(correction)])
start_temperature += interval
return list_scale_factors
# Calculate the correction factor
def process_correction_factor(data_set, reference_set, meta):
data_set = np.array(data_set, dtype=object)
reference_set = np.array(reference_set, dtype=object)
# output tables
adjusted_corrections = []
#allocate arrays
W = np.zeros([len(data_set)])
A = np.ones([len(data_set)])
sum_sq_diff = np.zeros([len(data_set)])
variance = np.zeros([len(data_set)])
j = 0
for i in range(len(W)):
j+=1
pin, frm, to, diff, std = data_set[i]
if pin in reference_set[:,0]:
known_length = reference_set[reference_set[:,0]==pin][0][1]
measured_length = float(diff) #* (((meta['dObsTemperature']-meta['dStdTemperature'])*meta['dThermalCoefficient'])+1)
corrected_length = float(diff) * (((meta['dObsTemperature']-meta['dStdTemperature'])*meta['dThermalCoefficient'])+1)
correction = float(known_length) - float(measured_length)
# squared differences
sum_sq_diff[j-1,] = (float(known_length) - measured_length)**2
# Variance
variance[j-1,] = float(std)
# Scale factor
W[j-1] = float(known_length) / float(measured_length)
# Table 1
adjusted_corrections.append([pin, frm, to, known_length, '{:.5f}'.format(measured_length),'{:.5f}'.format(correction)])
# Now do the least squares adjustment
P =
|
np.diag(1/variance**2)
|
numpy.diag
|
# -*- coding: iso-8859-1 -*-
"""
Purpose of this code is to plot the figures from the Discussion section of our paper.
"""
########################
###Import useful libraries
########################
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import pdb
import cookbook
from matplotlib.pyplot import cm
import cPickle as pickle
########################
###Define useful constants, all in CGS (via http://www.astro.wisc.edu/~dolan/constants.html)
########################
#Unit conversions
km2m=1.e3 #1 km in m
km2cm=1.e5 #1 km in cm
cm2km=1.e-5 #1 cm in km
cm2inch=1./2.54 #1 cm in inches
amu2g=1.66054e-24 #1 amu in g
bar2atm=0.9869 #1 bar in atm
Pascal2bar=1.e-5 #1 Pascal in bar
Pa2bar=1.e-5 #1 Pascal in bar
bar2Pa=1.e5 #1 bar in Pascal
deg2rad=np.pi/180.
bar2barye=1.e6 #1 Bar in Barye (the cgs unit of pressure)
barye2bar=1.e-6 #1 Barye in Bar
micron2m=1.e-6 #1 micron in m
micron2cm=1.e-4 #1 micron in cm
metricton2kg=1000. #1 metric ton in kg
#Fundamental constants
c=2.997924e10 #speed of light, cm/s
h=6.6260755e-27 #planck constant, erg/s
k=1.380658e-16 #boltzmann constant, erg/K
sigma=5.67051e-5 #Stefan-Boltzmann constant, erg/(cm^2 K^4 s)
R_earth=6371.*km2m#radius of earth in m
R_sun=69.63e9 #radius of sun in cm
AU=1.496e13#1AU in cm
#Mean molecular masses
m_co2=44.01*amu2g #co2, in g
m_h2o=18.02*amu2g #h2o, in g
#Mars parameters
g=371. #surface gravity of Mars, cm/s**2, from: http://nssdc.gsfc.nasa.gov/planetary/factsheet/marsfact.html
deg2rad=np.pi/180. #1 degree in radian
########################
###Which plots to generate?
########################
plot_doses_pco2=True #plot the dependence of dose rate on pCO2 in a CO2-H2O atmosphere (fixed temperature) NOTE: may want to do for low-albedo. Both more physically plausible and avoids weird uptick effect
plot_doses_clouds=True #plot the dependence of dose rate on CO2 cloud optical depth in a CO2-H2O atmosphere (fixed temperature)
plot_doses_dust_pco2=True #plot the dependence of dose rate as a function of dust level for different pCO2
plot_doses_dust_clouds=True #plot the dependence of dose rate as a function of dust level for different cloud levels.
plot_doses_pso2_pco2=True #plot the dependence of dose rate as a function of pSO2 for different pCO2
plot_doses_pso2_clouds=True #plot the dependence of dose rate as a function of dust level for different cloud levels.
plot_doses_ph2s_pco2=True #plot the dependence of dose rate as a function of pH2S for different pCO2
plot_doses_ph2s_clouds=True #plot the dependence of dose rate as a function of pH2S for different pCO2
plot_reldoses_pso2=True #plot the ratio between the "bad" dose rate and the "good" dose rate as function of PSO2. Plot for 1) pCO2=0.02, cloud=1000 and 2) pCO2=2 bar, cloud=0.
plot_reldoses_ph2s=True #plot the ratio between the "bad" dose rate and the "good" dose rate as function of PH2S. Plot for 1) pCO2=0.02, cloud=1000 and 2) pCO2=2 bar, cloud=0.
plot_reldoses_dust=True #plot the ratio between the "bad" dose rate and the "good" dose rate as function of dust level. Plot for 1) pCO2=0.02, cloud=1000 and 2) pCO2=2 bar, cloud=0.
########################
###
########################
if plot_reldoses_dust:
"""
The purpose of this script is to plot the relative dose rates of the "stressor" photoprocess compared to the "eustressor" photoprocess as a function of pH2S. We evaluate this for varying levels of atmospheric dust loading. We do this for two cases: 1) pCO2=2 bar, and 2) pCO2=0.02 bar and a tau=1000 cloud deck emplaced at 20.5 km. This is so we can separate out absorption amplification due to cloud decks (relatively flat) and due to Rayleigh scattering (not flat).
#Conditions: T_0=250, A=desert, z=0, no TD XCs, DeltaScaling
"""
########################
###Read in doses
########################
###Read in computed doses
od_list=np.array(['dustod=0.1', 'dustod=1', 'dustod=10']) #list of dust optical depths (500 nm)
od_axis=np.array([1.e-1, 1., 1.e1])
titles_list=np.array([r'pCO$_2$=2 bar, $\tau_{cloud}=0$',r'pCO$_2$=0.02 bar, $\tau_{cloud}=1000$ (unscaled)'])
num_cases=len(titles_list)
num_od=len(od_list)
dose_100_165=np.zeros([num_od,num_cases]) #surface radiance integrated 100-165 nm
dose_200_300=np.zeros([num_od,num_cases]) #surface radiance integrated 200-300 nm
dose_ump_193=np.zeros([num_od,num_cases]) #dose rate for UMP glycosidic bond cleavage, assuming lambda0=193
dose_ump_230=np.zeros([num_od,num_cases]) #dose rate for UMP glycosidic bond cleavage, assuming lambda0=230
dose_ump_254=np.zeros([num_od,num_cases]) #dose rate for UMP glycosidic bond cleavage, assuming lambda0=254
dose_cucn3_254=np.zeros([num_od,num_cases]) #dose rate for solvated electron production from tricyanocuprate, assuming lambda0=254
dose_cucn3_300=np.zeros([num_od,num_cases]) #dose rate for solvated electron production from tricyanocuprate, assuming lambda0=300
for ind in range(0, num_od):
od=od_list[ind]
dose_100_165[ind,0],dose_200_300[ind,0],dose_ump_193[ind,0],dose_ump_230[ind,0],dose_ump_254[ind,0],dose_cucn3_254[ind,0],dose_cucn3_300[ind,0]=np.genfromtxt('./DoseRates/dose_rates_colddrymars_2bar_250K_z=0_A=desert_noTD_DS_'+od+'.dat', skip_header=1, skip_footer=0,usecols=(0,1,2,3,4,5,6), unpack=True)
dose_100_165[ind,1],dose_200_300[ind,1],dose_ump_193[ind,1],dose_ump_230[ind,1],dose_ump_254[ind,1],dose_cucn3_254[ind,1],dose_cucn3_300[ind,1]=np.genfromtxt('./DoseRates/dose_rates_colddrymars_0.02bar_250K_z=0_A=desert_noTD_DS_'+od+'_co2cloudod=1000_z=20.5_reff=10.dat', skip_header=1, skip_footer=0,usecols=(0,1,2,3,4,5,6), unpack=True)
########################
###Plot results
########################
fig, ax=plt.subplots(num_cases, figsize=(16.5*cm2inch,10.), sharex=True, sharey=False)
markersizeval=3.
colors=cm.rainbow(np.linspace(0,1,6))
for ind2 in range(0, num_cases):
ax[ind2].set_title(titles_list[ind2])
ax[ind2].plot(od_axis, dose_ump_193[:,ind2]/dose_cucn3_254[:,ind2], marker='s', markeredgewidth=0., markersize=markersizeval, linewidth=1, color=colors[0], label=r'UMP-193/CuCN3-254')
ax[ind2].plot(od_axis, dose_ump_230[:,ind2]/dose_cucn3_254[:,ind2], marker='s', markeredgewidth=0., markersize=markersizeval, linewidth=1, color=colors[1], label=r'UMP-230/CuCN3-254')
ax[ind2].plot(od_axis, dose_ump_254[:,ind2]/dose_cucn3_254[:,ind2], marker='s', markeredgewidth=0., markersize=markersizeval, linewidth=1, color=colors[2], label=r'UMP-254/CuCN3-254')
ax[ind2].plot(od_axis, dose_ump_193[:,ind2]/dose_cucn3_300[:,ind2], marker='s', markeredgewidth=0., markersize=markersizeval, linewidth=1, color=colors[3], label=r'UMP-193/CuCN3-300')
ax[ind2].plot(od_axis, dose_ump_230[:,ind2]/dose_cucn3_300[:,ind2], marker='s', markeredgewidth=0., markersize=markersizeval, linewidth=1, color=colors[4], label=r'UMP-230/CuCN3-300')
ax[ind2].plot(od_axis, dose_ump_254[:,ind2]/dose_cucn3_300[:,ind2], marker='s', markeredgewidth=0., markersize=markersizeval, linewidth=1, color=colors[5], label=r'UMP-254/CuCN3-300')
#ax.set_ylim([1.e-2, 1.e4])
ax[ind2].set_yscale('log')
ax[ind2].set_ylabel(r'$\bar{D}_{UMP-X}/\bar{D}_{CuCN3-Y}$')
#ax.set_xlim([100, 500])
ax[num_cases-1].set_xscale('log')
ax[num_cases-1].set_xlabel(r'$\tau_{d}$ (unscaled)', fontsize=12)
plt.tight_layout(rect=(0,0,1., 0.9))
ax[0].legend(bbox_to_anchor=[0, 1.2, 1., 0.7], loc=3, ncol=2, mode='expand', borderaxespad=0., fontsize=10)
plt.savefig('./Plots/discussion_doses_reldoses_dust.eps', orientation='portrait',papertype='letter', format='eps')
plt.show()
########################
###
########################
if plot_reldoses_ph2s:
"""
The purpose of this script is to plot the relative dose rates of the "stressor" photoprocess compared to the "eustressor" photoprocess as a function of pH2S. We evaluate this for pSO2=2e-9 -- 2e-4 bar. We do this for two cases: 1) pCO2=2 bar, and 2) pCO2=0.02 bar and a tau=1000 cloud deck emplaced at 20.5 km. This is so we can separate out absorption amplification due to cloud decks (relatively flat) and due to Rayleigh scattering (not flat).
#Conditions: T_0=250, A=desert, z=0, no TD XCs, DeltaScaling
"""
########################
###Read in doses
########################
###Read in computed doses
nocloud_list=np.array(['2bar_250K_0so2_1ppbh2s', '2bar_250K_0so2_10ppbh2s','2bar_250K_0so2_100ppbh2s','2bar_250K_0so2_1ppmh2s', '2bar_250K_0so2_10ppmh2s', '2bar_250K_0so2_100ppmh2s']) #list of pH2S for pCO2=2
cloud_list=np.array(['0.02bar_250K_0so2_100ppbh2s','0.02bar_250K_0so2_1ppmh2s','0.02bar_250K_0so2_10ppmh2s', '0.02bar_250K_0so2_100ppmh2s', '0.02bar_250K_0so2_1000ppmh2s', '0.02bar_250K_0so2_10000ppmh2s']) #list of pH2S for pCO2=0.02
ph2s_axis=np.array([2.e-9, 2.e-8, 2.e-7, 2.e-6, 2.e-5, 2.e-4]) #pH2S in bar
titles_list=np.array([r'pCO$_2$=2 bar, $\tau_{cloud}=0$',r'pCO$_2$=0.02 bar, $\tau_{cloud}=1000$ (unscaled)'])
num_cases=len(titles_list)
num_h2s=len(nocloud_list)
dose_100_165=np.zeros([num_h2s,num_cases]) #surface radiance integrated 100-165 nm
dose_200_300=np.zeros([num_h2s,num_cases]) #surface radiance integrated 200-300 nm
dose_ump_193=np.zeros([num_h2s,num_cases]) #dose rate for UMP glycosidic bond cleavage, assuming lambda0=193
dose_ump_230=np.zeros([num_h2s,num_cases]) #dose rate for UMP glycosidic bond cleavage, assuming lambda0=230
dose_ump_254=np.zeros([num_h2s,num_cases]) #dose rate for UMP glycosidic bond cleavage, assuming lambda0=254
dose_cucn3_254=np.zeros([num_h2s,num_cases]) #dose rate for solvated electron production from tricyanocuprate, assuming lambda0=254
dose_cucn3_300=np.zeros([num_h2s,num_cases]) #dose rate for solvated electron production from tricyanocuprate, assuming lambda0=300
for ind in range(0, num_h2s):
dose_100_165[ind,0],dose_200_300[ind,0],dose_ump_193[ind,0],dose_ump_230[ind,0],dose_ump_254[ind,0],dose_cucn3_254[ind,0],dose_cucn3_300[ind,0]=np.genfromtxt('./DoseRates/dose_rates_volcanicmars_'+nocloud_list[ind]+'_z=0_A=desert_noTD_noDS_noparticles.dat', skip_header=1, skip_footer=0,usecols=(0,1,2,3,4,5,6), unpack=True)
dose_100_165[ind,1],dose_200_300[ind,1],dose_ump_193[ind,1],dose_ump_230[ind,1],dose_ump_254[ind,1],dose_cucn3_254[ind,1],dose_cucn3_300[ind,1]=np.genfromtxt('./DoseRates/dose_rates_volcanicmars_'+cloud_list[ind]+'_z=0_A=desert_noTD_DS_co2cloudod=1000_z=20.5_reff=10.dat', skip_header=1, skip_footer=0,usecols=(0,1,2,3,4,5,6), unpack=True)
########################
###Plot results
########################
fig, ax=plt.subplots(num_cases, figsize=(16.5*cm2inch,10.), sharex=True, sharey=False)
markersizeval=3.
colors=cm.rainbow(np.linspace(0,1,6))
for ind2 in range(0, num_cases):
ax[ind2].set_title(titles_list[ind2])
ax[ind2].plot(ph2s_axis, dose_ump_193[:,ind2]/dose_cucn3_254[:,ind2], marker='s', markeredgewidth=0., markersize=markersizeval, linewidth=1, color=colors[0], label=r'UMP-193/CuCN3-254')
ax[ind2].plot(ph2s_axis, dose_ump_230[:,ind2]/dose_cucn3_254[:,ind2], marker='s', markeredgewidth=0., markersize=markersizeval, linewidth=1, color=colors[1], label=r'UMP-230/CuCN3-254')
ax[ind2].plot(ph2s_axis, dose_ump_254[:,ind2]/dose_cucn3_254[:,ind2], marker='s', markeredgewidth=0., markersize=markersizeval, linewidth=1, color=colors[2], label=r'UMP-254/CuCN3-254')
ax[ind2].plot(ph2s_axis, dose_ump_193[:,ind2]/dose_cucn3_300[:,ind2], marker='s', markeredgewidth=0., markersize=markersizeval, linewidth=1, color=colors[3], label=r'UMP-193/CuCN3-300')
ax[ind2].plot(ph2s_axis, dose_ump_230[:,ind2]/dose_cucn3_300[:,ind2], marker='s', markeredgewidth=0., markersize=markersizeval, linewidth=1, color=colors[4], label=r'UMP-230/CuCN3-300')
ax[ind2].plot(ph2s_axis, dose_ump_254[:,ind2]/dose_cucn3_300[:,ind2], marker='s', markeredgewidth=0., markersize=markersizeval, linewidth=1, color=colors[5], label=r'UMP-254/CuCN3-300')
#ax.set_ylim([1.e-2, 1.e4])
ax[ind2].set_yscale('log')
ax[ind2].set_ylabel(r'$\bar{D}_{UMP-X}/\bar{D}_{CuCN3-Y}$')
#ax.set_xlim([100, 500])
ax[num_cases-1].set_xscale('log')
ax[num_cases-1].set_xlabel(r'pH$_2$S', fontsize=12)
plt.tight_layout(rect=(0,0,1., 0.9))
ax[0].legend(bbox_to_anchor=[0, 1.2, 1., 0.7], loc=3, ncol=2, mode='expand', borderaxespad=0., fontsize=10)
plt.savefig('./Plots/discussion_doses_reldoses_ph2s.eps', orientation='portrait',papertype='letter', format='eps')
plt.show()
########################
###
########################
if plot_reldoses_pso2:
"""
The purpose of this script is to plot the relative dose rates of the "stressor" photoprocess compared to the "eustressor" photoprocess as a function of pSO2. We evaluate this for pSO2=2e-9 -- 2e-5 bar. We do this for two cases: 1) pCO2=2 bar, and 2) pCO2=0.02 bar and a tau=1000 cloud deck emplaced at 20.5 km. This is so we can separate out absorption amplification due to cloud decks (relatively flat) and due to Rayleigh scattering (not flat).
#Conditions: T_0=250, A=desert, z=0, no TD XCs, DeltaScaling
"""
########################
###Read in doses
########################
###Read in computed doses
nocloud_list=np.array(['2bar_250K_1ppbso2_0h2s','2bar_250K_10ppbso2_0h2s','2bar_250K_100ppbso2_0h2s', '2bar_250K_1ppmso2_0h2s', '2bar_250K_10ppmso2_0h2s']) #list of pSO2 for pCO2=2
cloud_list=np.array(['0.02bar_250K_100ppbso2_0h2s','0.02bar_250K_1ppmso2_0h2s','0.02bar_250K_10ppmso2_0h2s', '0.02bar_250K_100ppmso2_0h2s', '0.02bar_250K_1000ppmso2_0h2s']) #list of pSO2 for pCO2=0.02
pso2_axis=np.array([2.e-9, 2.e-8, 2.e-7, 2.e-6, 2.e-5]) #pSO2 in bar
titles_list=np.array([r'pCO$_2$=2 bar, $\tau_{cloud}=0$',r'pCO$_2$=0.02 bar, $\tau_{cloud}=1000$ (unscaled)'])
num_cases=len(titles_list)
num_so2=len(nocloud_list)
dose_100_165=np.zeros([num_so2,num_cases]) #surface radiance integrated 100-165 nm
dose_200_300=np.zeros([num_so2,num_cases]) #surface radiance integrated 200-300 nm
dose_ump_193=np.zeros([num_so2,num_cases]) #dose rate for UMP glycosidic bond cleavage, assuming lambda0=193
dose_ump_230=np.zeros([num_so2,num_cases]) #dose rate for UMP glycosidic bond cleavage, assuming lambda0=230
dose_ump_254=np.zeros([num_so2,num_cases]) #dose rate for UMP glycosidic bond cleavage, assuming lambda0=254
dose_cucn3_254=np.zeros([num_so2,num_cases]) #dose rate for solvated electron production from tricyanocuprate, assuming lambda0=254
dose_cucn3_300=np.zeros([num_so2,num_cases]) #dose rate for solvated electron production from tricyanocuprate, assuming lambda0=300
for ind in range(0, num_so2):
dose_100_165[ind,0],dose_200_300[ind,0],dose_ump_193[ind,0],dose_ump_230[ind,0],dose_ump_254[ind,0],dose_cucn3_254[ind,0],dose_cucn3_300[ind,0]=np.genfromtxt('./DoseRates/dose_rates_volcanicmars_'+nocloud_list[ind]+'_z=0_A=desert_noTD_noDS_noparticles.dat', skip_header=1, skip_footer=0,usecols=(0,1,2,3,4,5,6), unpack=True)
dose_100_165[ind,1],dose_200_300[ind,1],dose_ump_193[ind,1],dose_ump_230[ind,1],dose_ump_254[ind,1],dose_cucn3_254[ind,1],dose_cucn3_300[ind,1]=np.genfromtxt('./DoseRates/dose_rates_volcanicmars_'+cloud_list[ind]+'_z=0_A=desert_noTD_DS_co2cloudod=1000_z=20.5_reff=10.dat', skip_header=1, skip_footer=0,usecols=(0,1,2,3,4,5,6), unpack=True)
########################
###Plot results
########################
fig, ax=plt.subplots(num_cases, figsize=(16.5*cm2inch,10.), sharex=True, sharey=False)
markersizeval=3.
colors=cm.rainbow(np.linspace(0,1,6))
for ind2 in range(0, num_cases):
ax[ind2].set_title(titles_list[ind2])
ax[ind2].plot(pso2_axis, dose_ump_193[:,ind2]/dose_cucn3_254[:,ind2], marker='s', markeredgewidth=0., markersize=markersizeval, linewidth=1, color=colors[0], label=r'UMP-193/CuCN3-254')
ax[ind2].plot(pso2_axis, dose_ump_230[:,ind2]/dose_cucn3_254[:,ind2], marker='s', markeredgewidth=0., markersize=markersizeval, linewidth=1, color=colors[1], label=r'UMP-230/CuCN3-254')
ax[ind2].plot(pso2_axis, dose_ump_254[:,ind2]/dose_cucn3_254[:,ind2], marker='s', markeredgewidth=0., markersize=markersizeval, linewidth=1, color=colors[2], label=r'UMP-254/CuCN3-254')
ax[ind2].plot(pso2_axis, dose_ump_193[:,ind2]/dose_cucn3_300[:,ind2], marker='s', markeredgewidth=0., markersize=markersizeval, linewidth=1, color=colors[3], label=r'UMP-193/CuCN3-300')
ax[ind2].plot(pso2_axis, dose_ump_230[:,ind2]/dose_cucn3_300[:,ind2], marker='s', markeredgewidth=0., markersize=markersizeval, linewidth=1, color=colors[4], label=r'UMP-230/CuCN3-300')
ax[ind2].plot(pso2_axis, dose_ump_254[:,ind2]/dose_cucn3_300[:,ind2], marker='s', markeredgewidth=0., markersize=markersizeval, linewidth=1, color=colors[5], label=r'UMP-254/CuCN3-300')
#ax.set_ylim([1.e-2, 1.e4])
ax[ind2].set_yscale('log')
ax[ind2].set_ylabel(r'$\bar{D}_{UMP-X}/\bar{D}_{CuCN3-Y}$')
#ax.set_xlim([100, 500])
ax[num_cases-1].set_xscale('log')
ax[num_cases-1].set_xlabel(r'pSO$_2$', fontsize=12)
plt.tight_layout(rect=(0,0,1., 0.9))
ax[0].legend(bbox_to_anchor=[0, 1.2, 1., 0.7], loc=3, ncol=2, mode='expand', borderaxespad=0., fontsize=10)
plt.savefig('./Plots/discussion_doses_reldoses_pso2.eps', orientation='portrait',papertype='letter', format='eps')
plt.show()
########################
###
########################
if plot_doses_ph2s_clouds:
"""
The purpose of this script is to plot the doses for a CO2-H2O-SO2 Martian atmosphere with pH2S, with CO2 cloud decks of varying thickness emplaced at 20.5 km (20-21 km).
#pCO2=0.02 bar (optically thin in gas scattering)
#pH2S=2e-9 -- 2e-4 bar
#CO2 cloud OD varies from 1-1000.
#Conditions: T_0=250, A=desert, z=0, no TD XCs, DeltaScaling
"""
########################
###Read in doses
########################
###Read in computed doses
pso2_list=np.array(['0.02bar_250K_0so2_100ppbh2s','0.02bar_250K_0so2_1ppmh2s','0.02bar_250K_0so2_10ppmh2s', '0.02bar_250K_0so2_100ppmh2s', '0.02bar_250K_0so2_1000ppmh2s', '0.02bar_250K_0so2_10000ppmh2s']) #list of pH2S for pCO2=0.02
pso2_axis=np.array([2.e-9, 2.e-8, 2.e-7, 2.e-6, 2.e-5, 2.e-4]) #pSO2 in bar
cloud_list=np.array(['co2cloudod=1', 'co2cloudod=10', 'co2cloudod=100','co2cloudod=1000'])
cloud_labels=np.array([r'$\tau_{cloud}=1$ (unscaled)',r'$\tau_{cloud}=10$ (unscaled)',r'$\tau_{cloud}=100$ (unscaled)',r'$\tau_{cloud}=1000$ (unscaled)'])
num_so2=len(pso2_list)
num_cloud=len(cloud_list)
dose_100_165=np.zeros([num_so2,num_cloud]) #surface radiance integrated 100-165 nm
dose_200_300=np.zeros([num_so2,num_cloud]) #surface radiance integrated 200-300 nm
dose_ump_193=np.zeros([num_so2,num_cloud]) #dose rate for UMP glycosidic bond cleavage, assuming lambda0=193
dose_ump_230=np.zeros([num_so2,num_cloud]) #dose rate for UMP glycosidic bond cleavage, assuming lambda0=230
dose_ump_254=np.zeros([num_so2,num_cloud]) #dose rate for UMP glycosidic bond cleavage, assuming lambda0=254
dose_cucn3_254=np.zeros([num_so2,num_cloud]) #dose rate for solvated electron production from tricyanocuprate, assuming lambda0=254
dose_cucn3_300=np.zeros([num_so2,num_cloud]) #dose rate for solvated electron production from tricyanocuprate, assuming lambda0=300
for ind in range(0, num_so2):
for ind2 in range(0, num_cloud):
pso2=pso2_list[ind]
cloudod=cloud_list[ind2]
dose_100_165[ind,ind2],dose_200_300[ind,ind2],dose_ump_193[ind,ind2],dose_ump_230[ind,ind2],dose_ump_254[ind,ind2],dose_cucn3_254[ind,ind2],dose_cucn3_300[ind,ind2]=np.genfromtxt('./DoseRates/dose_rates_volcanicmars_'+pso2+'_z=0_A=desert_noTD_DS_'+cloudod+'_z=20.5_reff=10.dat', skip_header=1, skip_footer=0,usecols=(0,1,2,3,4,5,6), unpack=True)
########################
###Plot results
########################
fig, ax=plt.subplots(num_cloud, figsize=(16.5*cm2inch,10.), sharex=True, sharey=False)
markersizeval=3.
colors=cm.rainbow(np.linspace(0,1,6))
for ind2 in range(0, num_cloud):
ax[ind2].set_title(cloud_labels[ind2])
#ax[ind2].plot(pso2_axis, dose_200_300[:,ind2], marker='s', markeredgewidth=0., markersize=markersizeval, linewidth=1, color=colors[0], label='Radiance 200-300 nm')
ax[ind2].plot(pso2_axis, dose_ump_193[:,ind2], marker='s', markeredgewidth=0., markersize=markersizeval, linewidth=1, color=colors[1], label=r'UMP Bond Cleavage ($\lambda_0=193$)')
ax[ind2].plot(pso2_axis, dose_ump_230[:,ind2], marker='s', markeredgewidth=0., markersize=markersizeval, linewidth=1, color=colors[2], label=r'UMP Bond Cleavage ($\lambda_0=230$)')
ax[ind2].plot(pso2_axis, dose_ump_254[:,ind2], marker='s', markeredgewidth=0., markersize=markersizeval, linewidth=1, color=colors[3], label=r'UMP Bond Cleavage ($\lambda_0=254$)')
ax[ind2].plot(pso2_axis, dose_cucn3_254[:,ind2], marker='s', markeredgewidth=0., markersize=markersizeval, linewidth=1, color=colors[4], label=r'CuCN$_3$ Photoionization ($\lambda_0=254$)')
ax[ind2].plot(pso2_axis, dose_cucn3_300[:,ind2], marker='s', markeredgewidth=0., markersize=markersizeval, linewidth=1, color=colors[5], label=r'CuCN$_3$ Photoionization ($\lambda_0=300$)')
#ax[ind2].set_ylim([1.e-10, 1.e1])
ax[ind2].set_yscale('log')
ax[ind2].set_ylabel(r'Relative Dose Rate $\bar{D}_i$')
#ax.set_xlim([100, 500])
ax[num_cloud-1].set_xscale('log')
ax[num_cloud-1].set_xlabel(r'pH$_2$S (unscaled)', fontsize=12)
plt.tight_layout(rect=(0,0,1., 0.9))
ax[0].legend(bbox_to_anchor=[0, 1.2, 1., 0.7], loc=3, ncol=2, mode='expand', borderaxespad=0., fontsize=10)
plt.savefig('./Plots/discussion_doses_ph2s_clouds.eps', orientation='portrait',papertype='letter', format='eps')
plt.show()
########################
###
########################
if plot_doses_ph2s_pco2:
"""
The purpose of this script is to plot the doses for a CO2-H2O-H2S Martian atmosphere with varying pH2S for varying background pCO2
#pCO2=0.02-2 bar
#pH2S=2e-9 -- 2e-4 bar
#Conditions: T_0=250, A=desert, z=0, no TD XCs, no DeltaScaling, no clouds
"""
########################
###Read in doses
########################
###Read in computed doses
ph2s_list0=np.array(['0.02bar_250K_0so2_100ppbh2s','0.02bar_250K_0so2_1ppmh2s','0.02bar_250K_0so2_10ppmh2s', '0.02bar_250K_0so2_100ppmh2s', '0.02bar_250K_0so2_1000ppmh2s', '0.02bar_250K_0so2_10000ppmh2s']) #list of pH2S for pCO2=0.02
ph2s_list1=np.array(['0.2bar_250K_0so2_10ppbh2s','0.2bar_250K_0so2_100ppbh2s','0.2bar_250K_0so2_1ppmh2s', '0.2bar_250K_0so2_10ppmh2s', '0.2bar_250K_0so2_100ppmh2s','0.2bar_250K_0so2_1000ppmh2s']) #list of pH2S for pCO2=0.2
ph2s_list2=np.array(['2bar_250K_0so2_1ppbh2s', '2bar_250K_0so2_10ppbh2s','2bar_250K_0so2_100ppbh2s','2bar_250K_0so2_1ppmh2s', '2bar_250K_0so2_10ppmh2s', '2bar_250K_0so2_100ppmh2s']) #list of pH2S for pCO2=2
ph2s_axis=np.array([2.e-9, 2.e-8, 2.e-7, 2.e-6, 2.e-5, 2e-4]) #pSO2 in bar
pco2_list=np.array(['0.02bar', '0.2bar', '2bar'])
pco2_labels=np.array(['0.02 bar', '0.2 bar', '2 bar'])
num_ph2s=len(ph2s_axis)
num_pco2=len(pco2_list)
dose_100_165=np.zeros([num_ph2s,num_pco2]) #surface radiance integrated 100-165 nm
dose_200_300=np.zeros([num_ph2s,num_pco2]) #surface radiance integrated 200-300 nm
dose_ump_193=np.zeros([num_ph2s,num_pco2]) #dose rate for UMP glycosidic bond cleavage, assuming lambda0=193
dose_ump_230=np.zeros([num_ph2s,num_pco2]) #dose rate for UMP glycosidic bond cleavage, assuming lambda0=230
dose_ump_254=np.zeros([num_ph2s,num_pco2]) #dose rate for UMP glycosidic bond cleavage, assuming lambda0=254
dose_cucn3_254=np.zeros([num_ph2s,num_pco2]) #dose rate for solvated electron production from tricyanocuprate, assuming lambda0=254
dose_cucn3_300=np.zeros([num_ph2s,num_pco2]) #dose rate for solvated electron production from tricyanocuprate, assuming lambda0=300
for ind in range(0, num_ph2s):
dose_100_165[ind,0],dose_200_300[ind,0],dose_ump_193[ind,0],dose_ump_230[ind,0],dose_ump_254[ind,0],dose_cucn3_254[ind,0],dose_cucn3_300[ind,0]=np.genfromtxt('./DoseRates/dose_rates_volcanicmars_'+ph2s_list0[ind]+'_z=0_A=desert_noTD_noDS_noparticles.dat', skip_header=1, skip_footer=0,usecols=(0,1,2,3,4,5,6), unpack=True)
dose_100_165[ind,1],dose_200_300[ind,1],dose_ump_193[ind,1],dose_ump_230[ind,1],dose_ump_254[ind,1],dose_cucn3_254[ind,1],dose_cucn3_300[ind,1]=np.genfromtxt('./DoseRates/dose_rates_volcanicmars_'+ph2s_list1[ind]+'_z=0_A=desert_noTD_noDS_noparticles.dat', skip_header=1, skip_footer=0,usecols=(0,1,2,3,4,5,6), unpack=True)
dose_100_165[ind,2],dose_200_300[ind,2],dose_ump_193[ind,2],dose_ump_230[ind,2],dose_ump_254[ind,2],dose_cucn3_254[ind,2],dose_cucn3_300[ind,2]=np.genfromtxt('./DoseRates/dose_rates_volcanicmars_'+ph2s_list2[ind]+'_z=0_A=desert_noTD_noDS_noparticles.dat', skip_header=1, skip_footer=0,usecols=(0,1,2,3,4,5,6), unpack=True)
########################
###Plot results
########################
fig, ax=plt.subplots(num_pco2, figsize=(16.5*cm2inch,10.), sharex=True, sharey=False)
markersizeval=3.
colors=cm.rainbow(np.linspace(0,1,6))
for ind2 in range(0, num_pco2):
ax[ind2].set_title('pCO$_2$='+pco2_labels[ind2])
#ax[ind2].plot(ph2s_axis, dose_200_300[:,ind2], marker='s', markeredgewidth=0., markersize=markersizeval, linewidth=1, color=colors[0], label='Radiance 200-300 nm')
ax[ind2].plot(ph2s_axis, dose_ump_193[:,ind2], marker='s', markeredgewidth=0., markersize=markersizeval, linewidth=1, color=colors[1], label=r'UMP Bond Cleavage ($\lambda_0=193$)')
ax[ind2].plot(ph2s_axis, dose_ump_230[:,ind2], marker='s', markeredgewidth=0., markersize=markersizeval, linewidth=1, color=colors[2], label=r'UMP Bond Cleavage ($\lambda_0=230$)')
ax[ind2].plot(ph2s_axis, dose_ump_254[:,ind2], marker='s', markeredgewidth=0., markersize=markersizeval, linewidth=1, color=colors[3], label=r'UMP Bond Cleavage ($\lambda_0=254$)')
ax[ind2].plot(ph2s_axis, dose_cucn3_254[:,ind2], marker='s', markeredgewidth=0., markersize=markersizeval, linewidth=1, color=colors[4], label=r'CuCN$_3$ Photoionization ($\lambda_0=254$)')
ax[ind2].plot(ph2s_axis, dose_cucn3_300[:,ind2], marker='s', markeredgewidth=0., markersize=markersizeval, linewidth=1, color=colors[5], label=r'CuCN$_3$ Photoionization ($\lambda_0=300$)')
#ax[ind2].set_ylim([1.e-, 1.e1])
ax[ind2].set_yscale('log')
ax[ind2].set_ylabel(r'Relative Dose Rate $\bar{D}_i$')
#ax.set_xlim([100, 500])
ax[num_pco2-1].set_xscale('log')
ax[num_pco2-1].set_xlabel(r'pH$_2$S (bar)', fontsize=12)
plt.tight_layout(rect=(0,0,1., 0.9))
ax[0].legend(bbox_to_anchor=[0, 1.2, 1., 0.7], loc=3, ncol=2, mode='expand', borderaxespad=0., fontsize=10)
plt.savefig('./Plots/discussion_doses_ph2s_pco2.eps', orientation='portrait',papertype='letter', format='eps')
plt.show()
########################
###
########################
if plot_doses_pso2_clouds:
"""
The purpose of this script is to plot the doses for a CO2-H2O-SO2 Martian atmosphere with pSO2, with CO2 cloud decks of varying thickness emplaced at 20.5 km (20-21 km).
#pCO2=0.02 bar (optically thin in gas scattering)
#pSO2=2e-9-2e-5 bar
#CO2 cloud OD varies from 1-1000.
#Conditions: T_0=250, A=desert, z=0, no TD XCs, DeltaScaling
"""
########################
###Read in doses
########################
###Read in computed doses
pso2_list=np.array(['0.02bar_250K_100ppbso2_0h2s','0.02bar_250K_1ppmso2_0h2s','0.02bar_250K_10ppmso2_0h2s', '0.02bar_250K_100ppmso2_0h2s', '0.02bar_250K_1000ppmso2_0h2s']) #list of pSO2 for pCO2=0.02
pso2_axis=np.array([2.e-9, 2.e-8, 2.e-7, 2.e-6, 2.e-5]) #pSO2 in bar
cloud_list=np.array(['co2cloudod=1', 'co2cloudod=10', 'co2cloudod=100','co2cloudod=1000'])
cloud_labels=np.array([r'$\tau_{cloud}=1$ (unscaled)',r'$\tau_{cloud}=10$ (unscaled)',r'$\tau_{cloud}=100$ (unscaled)',r'$\tau_{cloud}=1000$ (unscaled)'])
num_so2=len(pso2_list)
num_cloud=len(cloud_list)
dose_100_165=np.zeros([num_so2,num_cloud]) #surface radiance integrated 100-165 nm
dose_200_300=np.zeros([num_so2,num_cloud]) #surface radiance integrated 200-300 nm
dose_ump_193=np.zeros([num_so2,num_cloud]) #dose rate for UMP glycosidic bond cleavage, assuming lambda0=193
dose_ump_230=
|
np.zeros([num_so2,num_cloud])
|
numpy.zeros
|
import unittest
from functools import partial
import numpy as np
import sympy as sp
from scipy import stats
import pickle
import os
from pyapprox.sparse_grid import update_1d_samples_weights_economical, \
get_1d_samples_weights, get_hierarchical_sample_indices, \
get_subspace_polynomial_indices, get_sparse_grid_samples_and_weights, \
get_subspace_samples, evaluate_sparse_grid, get_smolyak_coefficients, \
get_num_model_evaluations_from_samples, get_equivalent_cost, \
get_num_sparse_grid_samples, integrate_sparse_grid, \
convert_univariate_lagrange_basis_to_orthonormal_polynomials, \
convert_multivariate_lagrange_polys_to_orthonormal_polys
from pyapprox.adaptive_sparse_grid import CombinationSparseGrid, \
max_level_admissibility_function, mypriorityqueue, \
get_sparse_grid_univariate_leja_quadrature_rules_economical, \
variance_refinement_indicator, isotropic_refinement_indicator, \
update_smolyak_coefficients, surplus_refinement_indicator, \
insitu_update_sparse_grid_quadrature_rule, \
convert_sparse_grid_to_polynomial_chaos_expansion, \
get_active_subspace_indices, extract_items_from_priority_queue, \
compute_hierarchical_surpluses_direct, \
extract_sparse_grid_quadrature_rule, compute_surpluses
from pyapprox.barycentric_interpolation import \
compute_barycentric_weights_1d, \
multivariate_barycentric_lagrange_interpolation
from pyapprox.monomial import evaluate_monomial, \
monomial_mean_uniform_variables, monomial_variance_uniform_variables
from pyapprox.univariate_polynomials.orthonormal_recursions import \
jacobi_recurrence, krawtchouk_recurrence
from pyapprox.univariate_polynomials.orthonormal_polynomials import \
evaluate_orthonormal_polynomial_1d
from pyapprox.indexing import set_difference, sort_indices_lexiographically, \
compute_hyperbolic_indices
from pyapprox.univariate_polynomials.quadrature import leja_growth_rule, \
clenshaw_curtis_in_polynomial_order, clenshaw_curtis_rule_growth, \
clenshaw_curtis_pts_wts_1D, gauss_quadrature
from pyapprox.univariate_polynomials.leja_quadrature import \
get_univariate_leja_quadrature_rule, \
candidate_based_christoffel_leja_rule_1d
from pyapprox.utilities import beta_pdf_on_ab, cartesian_product, hash_array, \
lists_of_arrays_equal, outer_product, allclose_unsorted_matrix_rows, \
gaussian_pdf
from pyapprox.variable_transformations import \
define_iid_random_variable_transformation
from pyapprox.manipulate_polynomials import get_indices_double_set
from pyapprox.variable_transformations import \
AffineBoundedVariableTransformation, AffineRandomVariableTransformation
from pyapprox.variables import IndependentMultivariateRandomVariable
from pyapprox.polynomial_chaos.multivariate_polynomials import \
PolynomialChaosExpansion, define_poly_options_from_variable_transformation
from pyapprox.models.wrappers import WorkTrackingModel
from pyapprox.probability_measure_sampling import \
generate_independent_random_samples
class MultilevelPolynomialModel():
def __init__(self, num_levels, return_work=False):
self.num_levels = num_levels
self.return_work = return_work
self.ab = jacobi_recurrence(
2*num_levels+1, alpha=0, beta=0, probability=True)
self.coeff = 1./(10**np.arange(0, 2*num_levels+1))
def __call__(self, samples):
vals = []
for ii in range(samples.shape[1]):
level = samples[-1, ii]
assert level.is_integer()
level = int(level)
assert level >= 0
random_sample = samples[:-1, ii]
basis_matrix = evaluate_orthonormal_polynomial_1d(
np.asarray([random_sample.sum()]), level+1, self.ab)
pp = np.dot(basis_matrix, self.coeff[:level+2])
vals.append(pp)
vals = np.asarray(vals)
if self.return_work:
vals = np.hstack(
(vals, self.cost_function(samples[-1:, :])[:, np.newaxis]))
return vals
def cost_function(self, x):
return x[0, :]+1.
class TestSparseGrid(unittest.TestCase):
def test_update_1d_samples_weights_economical(self):
num_vars = 3
level = 2
alpha_stat, beta_stat = 5, 2
variable = stats.beta(alpha_stat, beta_stat)
beta_quad_rule = get_univariate_leja_quadrature_rule(
variable, leja_growth_rule)
quad_rules_econ = [
clenshaw_curtis_in_polynomial_order, beta_quad_rule]
growth_rules_econ = [clenshaw_curtis_rule_growth, leja_growth_rule]
unique_rule_indices = [[1], [0, 2]]
levels = [level]*num_vars
samples_1d_econ, weights_1d_econ = get_1d_samples_weights(
quad_rules_econ, growth_rules_econ, levels, None,
unique_rule_indices)
quad_rules = [beta_quad_rule, clenshaw_curtis_in_polynomial_order,
beta_quad_rule]
growth_rules = [leja_growth_rule, clenshaw_curtis_rule_growth,
leja_growth_rule]
samples_1d, weights_1d = get_1d_samples_weights(
quad_rules, growth_rules, levels)
assert len(samples_1d_econ) == num_vars
for ii in range(num_vars):
assert len(samples_1d_econ[ii]) == len(samples_1d[ii])
for jj in range(len(samples_1d[ii])):
assert np.allclose(samples_1d[ii][jj], samples_1d_econ[ii][jj])
assert np.allclose(weights_1d[ii][jj], weights_1d_econ[ii][jj])
levels = [level+2]*num_vars
samples_1d_econ, weights_1d_econ = \
update_1d_samples_weights_economical(
quad_rules_econ, growth_rules_econ,
levels, samples_1d, weights_1d, None, unique_rule_indices)
samples_1d, weights_1d = get_1d_samples_weights(
quad_rules, growth_rules, levels, None)
assert len(samples_1d_econ) == num_vars
for ii in range(num_vars):
assert len(samples_1d_econ[ii]) == len(samples_1d[ii])
for jj in range(len(samples_1d[ii])):
assert np.allclose(samples_1d[ii][jj], samples_1d_econ[ii][jj])
assert np.allclose(weights_1d[ii][jj], weights_1d_econ[ii][jj])
levels = [3, 5, 2]
samples_1d_econ, weights_1d_econ = get_1d_samples_weights(
quad_rules_econ, growth_rules_econ, levels, None,
unique_rule_indices)
quad_rules = [beta_quad_rule, clenshaw_curtis_in_polynomial_order,
beta_quad_rule]
growth_rules = [leja_growth_rule, clenshaw_curtis_rule_growth,
leja_growth_rule]
samples_1d, weights_1d = get_1d_samples_weights(
quad_rules, growth_rules, levels)
levels = np.asarray(levels)
assert len(samples_1d_econ) == num_vars
for dd in range(len(unique_rule_indices)):
unique_rule_indices[dd] = np.asarray(
unique_rule_indices[dd], dtype=int)
max_level_dd = levels[unique_rule_indices[dd]].max()
for ii in unique_rule_indices[dd]:
assert len(samples_1d_econ[ii]) == max_level_dd+1
for ii in range(num_vars):
for jj in range(len(samples_1d[ii])):
assert np.allclose(samples_1d[ii][jj], samples_1d_econ[ii][jj])
assert np.allclose(weights_1d[ii][jj], weights_1d_econ[ii][jj])
def test_get_hierarchical_sample_indices(self):
num_vars = 4
level = 2
quad_rules = [clenshaw_curtis_in_polynomial_order]*num_vars
growth_rules = [clenshaw_curtis_rule_growth]*num_vars
samples_1d, __ = get_1d_samples_weights(quad_rules, growth_rules,
[level]*num_vars)
subspace_index = np.array([1, 0, 2, 0])
subspace_poly_indices = get_subspace_polynomial_indices(
subspace_index, growth_rules)
config_variables_idx = None
hier_indices = get_hierarchical_sample_indices(
subspace_index, subspace_poly_indices,
samples_1d, config_variables_idx)
num_indices = 4
indices = np.zeros((num_vars, num_indices), dtype=int)
indices[0, 0] = 1
indices[2, 0] = 3
indices[0, 1] = 2
indices[2, 1] = 3
indices[0, 2] = 1
indices[2, 2] = 4
indices[0, 3] = 2
indices[2, 3] = 4
assert np.allclose(subspace_poly_indices[:, hier_indices], indices)
def test_get_hierarchical_sample_indices_with_config_variables(self):
num_config_vars = 1
num_random_vars = 1
num_vars = num_random_vars+num_config_vars
level = 2
quad_rules = [clenshaw_curtis_in_polynomial_order]*num_random_vars
growth_rules = [clenshaw_curtis_rule_growth]*num_random_vars
samples_1d, __ = get_1d_samples_weights(
quad_rules, growth_rules, [level]*num_random_vars)
subspace_index = np.array([0, 2])
config_variables_idx = num_vars-num_config_vars
subspace_poly_indices = get_subspace_polynomial_indices(
subspace_index, growth_rules, config_variables_idx)
hier_indices = get_hierarchical_sample_indices(
subspace_index, subspace_poly_indices,
samples_1d, config_variables_idx)
indices = np.array([0])
assert np.allclose(hier_indices, indices)
subspace_index = np.array([1, 1])
config_variables_idx = num_vars-num_config_vars
subspace_poly_indices = get_subspace_polynomial_indices(
subspace_index, growth_rules, config_variables_idx)
hier_indices = get_hierarchical_sample_indices(
subspace_index, subspace_poly_indices,
samples_1d, config_variables_idx)
indices = np.array([1, 2])
assert np.allclose(hier_indices, indices)
num_config_vars = 2
num_random_vars = 2
num_vars = num_random_vars+num_config_vars
level = 2
quad_rules = [clenshaw_curtis_in_polynomial_order]*num_random_vars
growth_rules = [clenshaw_curtis_rule_growth]*num_random_vars
samples_1d, __ = get_1d_samples_weights(
quad_rules, growth_rules, [level]*num_random_vars)
subspace_index = np.array([0, 0, 0, 2])
config_variables_idx = num_vars-num_config_vars
subspace_poly_indices = get_subspace_polynomial_indices(
subspace_index, growth_rules, config_variables_idx)
hier_indices = get_hierarchical_sample_indices(
subspace_index, subspace_poly_indices,
samples_1d, config_variables_idx)
indices = np.zeros(1)
# for some reason np.array([0])==np.array([]) in python so check length
assert hier_indices.shape[0] == 1
assert np.allclose(indices, hier_indices)
subspace_index = np.array([1, 0, 0, 2])
config_variables_idx = num_vars-num_config_vars
subspace_poly_indices = get_subspace_polynomial_indices(
subspace_index, growth_rules, config_variables_idx)
hier_indices = get_hierarchical_sample_indices(
subspace_index, subspace_poly_indices,
samples_1d, config_variables_idx)
indices = np.arange(1, 3)
assert np.allclose(indices, hier_indices)
def test_get_subspace_samples(self):
num_vars = 4
level = 2
quad_rules = [clenshaw_curtis_in_polynomial_order]*num_vars
growth_rules = [clenshaw_curtis_rule_growth]*num_vars
samples_1d, __ = get_1d_samples_weights(
quad_rules, growth_rules, [level]*num_vars)
subspace_index = np.array([1, 0, 2, 0])
subspace_poly_indices = get_subspace_polynomial_indices(
subspace_index, growth_rules)
subspace_samples = get_subspace_samples(
subspace_index, subspace_poly_indices, samples_1d)
abscissa_1d = []
for dd in range(num_vars):
abscissa_1d.append(samples_1d[dd][subspace_index[dd]])
samples = cartesian_product(abscissa_1d)
assert np.allclose(subspace_samples, samples)
subspace_index = np.array([1, 0, 2, 0])
subspace_samples = get_subspace_samples(
subspace_index, subspace_poly_indices, samples_1d,
unique_samples_only=True)
# there are two unique samples in each of the active variablces
# so num_samples=4
num_samples = 4
samples = np.zeros((num_vars, num_samples))
samples[0, 0] = samples_1d[0][1][1]
samples[2, 0] = samples_1d[2][2][3]
samples[0, 1] = samples_1d[0][1][2]
samples[2, 1] = samples_1d[2][2][3]
samples[0, 2] = samples_1d[0][1][1]
samples[2, 2] = samples_1d[2][2][4]
samples[0, 3] = samples_1d[0][1][2]
samples[2, 3] = samples_1d[2][2][4]
assert np.allclose(subspace_samples, samples)
def test_sparse_grid_integration_clenshaw_curtis(self):
num_vars = 4
level = 3
samples, weights, data_structures = \
get_sparse_grid_samples_and_weights(
num_vars, level, clenshaw_curtis_in_polynomial_order,
clenshaw_curtis_rule_growth)
poly_indices = data_structures[1]
# plot_sparse_grid(samples,weights,poly_indices)
# plt.show()
J = np.arange(poly_indices.shape[1])
coeffs = np.random.normal(0.0, 1.0, (J.shape[0], 1))
values = evaluate_monomial(poly_indices[:, J], coeffs, samples)
assert np.allclose(np.dot(values[:, 0], weights),
monomial_mean_uniform_variables(
poly_indices[:, J], coeffs))
def test_sparse_grid_integration_mixed_quadrature_rule(self):
num_vars = 2
level = 3
alpha_stat, beta_stat = 5, 2
variable = stats.beta(alpha_stat, beta_stat)
beta_quad_rule = get_univariate_leja_quadrature_rule(
variable, leja_growth_rule)
quad_rules = [clenshaw_curtis_in_polynomial_order, beta_quad_rule]
growth_rules = [clenshaw_curtis_rule_growth, leja_growth_rule]
samples, weights, data_structures = \
get_sparse_grid_samples_and_weights(
num_vars, level, quad_rules, growth_rules)
poly_indices = data_structures[1]
# plot_sparse_grid(samples,weights,poly_indices)
# plt.show()
J = np.arange(poly_indices.shape[1])
coeffs = np.random.normal(0.0, 1.0, (J.shape[0]))
x, y = sp.Symbol('x'), sp.Symbol('y')
monomial_expansion = 0
for ii in range(poly_indices.shape[1]):
monomial_expansion +=\
coeffs[ii]*x**poly_indices[0, ii]*y**poly_indices[1, ii]
weight_function_x = 0.5
weight_function_y = beta_pdf_on_ab(alpha_stat, beta_stat, -1, 1, y)
weight_function = weight_function_x*weight_function_y
ranges = [-1, 1, -1, 1]
exact_mean = float(sp.integrate(
monomial_expansion*weight_function,
(x, ranges[0], ranges[1]), (y, ranges[2], ranges[3])))
values = evaluate_monomial(poly_indices[:, J], coeffs, samples)
sparse_grid_mean = np.dot(values[:, 0], weights)
assert
|
np.allclose(sparse_grid_mean, exact_mean)
|
numpy.allclose
|
import numpy as np
from nose.tools import (assert_less, assert_greater, assert_equal,
assert_raises_regexp)
from sklearn.utils.testing import assert_warns
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_true
from bolero.optimizer import CMAESOptimizer, fmin
def test_cmaes_no_initial_params():
opt = CMAESOptimizer()
opt.init(10)
params = np.empty(10)
opt.get_next_parameters(params)
def test_cmaes_dimensions_mismatch():
opt = CMAESOptimizer(initial_params=np.zeros(5))
assert_raises_regexp(ValueError, "Number of dimensions", opt.init, 10)
def test_cmaes_diagonal_cov():
opt = CMAESOptimizer(covariance=np.zeros(10))
opt.init(10)
params = np.empty(10)
opt.get_next_parameters(params)
def test_unknown_cmaes():
assert_raises_regexp(
ValueError, "Unknown cma_type", fmin, lambda x:
|
np.linalg.norm(x)
|
numpy.linalg.norm
|
import numpy as np
import xgboost as xgb
import networkx as nx
from features import features
from loading_utils import metis_format_to_nx, write_nx_in_metis_format, search_for_graphs, get_graphs_and_labels, get_dmatrix_from_graphs
from datetime import date
import os
import subprocess
# mtxe first graph no convergence of eigenvectors ... strange
graph_paths = search_for_graphs([], graph_folder="/home/graph_collection/independentset_instances/mtxe")[2:4]
label_paths = ["/home/jholten/mis/kamis_results/" + os.path.basename(path)[:-6] + ".uniform.mis" for path in graph_paths][2:4]
OUTPUT_FOLDER = "/home/jholten/kernels/ml_reduce_kernels/"
graphs = get_graphs_and_labels(graph_paths, label_paths)
data = get_dmatrix_from_graphs(graphs)
bst = xgb.Booster({'nthread': 16})
bst.load_model("first-10_2021-04-11.model")
num_stages = 5
q = 0.98 # confidence niveau
for graph in graphs:
graph.graph['removals'] = []
graph.graph['old_number_of_nodes'] = graph.number_of_nodes()
print("reducing graphs:")
for stage in range(1, num_stages+1):
print(f"stage {stage}")
label_pred = bst.predict(data)
# np.savetxt(f"{date.today()}prediction_stage{stage}.pred", label_pred)
for graph in graphs:
print(f"{graph.graph['kw']} ...")
removal =
|
np.array(graph.nodes)
|
numpy.array
|
import os
# import pcl
import torch
import numpy as np
import cv2
import math
from bBox_2D import bBox_2D
import json
import random
import shutil
cloudata_train = np.load('./testset/cloudata_train_new.npy')
cloudata_test = np.load('./testset/cloudata_test_new.npy')
anndata_train = np.load('./testset/anndata_train_new.npy')
anndata_test = np.load('./testset/anndata_test_new.npy')
# time_stamps = joblib.load('./testset/time_stamps_tmp')
# ==============================
resolution = 1000 # res*res !!! (224 ResNet 299 Inception 1000 Visualization ONLY)
# ==============================
# ------------> x annotation box clock wise
# |
# |
# |
# y
# ==============================
def data2image(cloudata, anndata, isTrain):
img = []
# Cloud data to images
# _pixel_enhance = np.array([-1, 0, 1])
# pixel_enhance = np.array([[x, y] for x in _pixel_enhance for y in _pixel_enhance]) # enhance pixel by extra 8
_pixel_enhance = np.array([-1, 0, 1])
pixel_enhance = np.array([[x, y] for x in _pixel_enhance for y in _pixel_enhance]) # enhance pixel by extra 8
for i, scan in enumerate(cloudata):
emptyImage = np.zeros([1000, 1000, 3], np.uint8)
for dot in scan:
if dot[0] < 30 and 100 / 6 > dot[1] > -100 / 6: # in range
x, y = int(dot[0] * 900 / 30 + 100), int(dot[1] * 30 + 500)
enhanced = [[x, y] + e for e in pixel_enhance]
for e in enhanced:
if e[0] < 1000 and 0 <= e[0] and e[1] < 1000 and 0 <= e[0]:
emptyImage[e[0], e[1]] = (
int(255 - math.hypot(dot[0], dot[1]) * 255 / 60),
int(255 - (dot[0] * 235 / 30 + 20)),
int(dot[1] * 75 / 15 + 80)
# 255, 255, 255
)
outImage = cv2.resize(emptyImage, (resolution, resolution), interpolation=cv2.INTER_CUBIC)
for j, label in enumerate(anndata[i]):
if label[4] == -90 or label[4] == 90:
box = bBox_2D(label[1], label[0], label[3], label[2],
-label[4]) # fix annotations!!! x-y from data is reversed
else:
box = bBox_2D(label[0], label[1], label[3], label[2], -label[4]) # clock wise
# print(box.xc,box.yc)
if box.xc == 0 and box.yc == 0 and box.length == 0 and box.width == 0:
anndata[i][j] = [0, 0, 0, 0, 0] # mark with 0
continue
# print(' xc ', box.xc, ' yc ', box.yc, ' l ', box.length, ' w ', box.width)
box.scale(900 / 30, 500, 100)
box.scale(resolution / 1000, 0, 0)
anndata[i][j] = [box.length, box.width, box.xc, box.yc, box.alpha]
# rad = box.alpha * math.pi / 180
# box.bBoxCalcVertxex()
# cv2.line(outImage, box.vertex1, box.vertex2, (155, 255, 255), 1, cv2.LINE_AA)
# cv2.line(outImage, box.vertex2, box.vertex4, (155, 255, 255), 1, cv2.LINE_AA)
# cv2.line(outImage, box.vertex3, box.vertex1, (155, 255, 255), 1, cv2.LINE_AA)
# cv2.line(outImage, box.vertex4, box.vertex3, (155, 255, 255), 1, cv2.LINE_AA)
# point = int(box.xc - box.length * 0.8 * np.sin(rad)), int(box.yc + box.length * 0.8 * np.cos(rad))
# cv2.line(outImage, (int(box.xc), int(box.yc)),
# point,
# (155, 255, 255), 1, cv2.LINE_AA)
# print(box.xc, box.yc,box.alpha)
# cv2.imshow('scan', outImage)
print(i)
# k=cv2.waitKey()
# if k == 27: # Esc for exiting
# cv2.destroyAllWindows()
# os._exit(1)
img.append(outImage)
# Flipping
if isTrain:
augmentimg = []
for i, im in enumerate(img):
imflipped = cv2.flip(im, 1)
augmentimg.append(imflipped)
img = img + augmentimg
del augmentimg
augmentann = np.zeros(anndata.shape, dtype=np.float)
for i, scan in enumerate(anndata):
for j, label in enumerate(scan):
if label[0] == 0:
continue
box = bBox_2D(label[0], label[1], label[2], label[3], label[4])
box.flipx(axis=int(resolution / 2))
augmentann[i][j] = [box.length, box.width, box.xc, box.yc, box.alpha]
anndata = np.concatenate((anndata, augmentann))
del augmentann
#
# Adding noise : rotate, translate(x,y), resize
# print('Adding Noise...')
# augmentann = np.zeros(anndata.shape, dtype=np.float)
# for i, scan in enumerate(anndata):
# for j, label in enumerate(scan):
# if label[0]==0:
# continue
# noiseratio = ((torch.randn(2)).div_(20)).exp_()
# noiseoffset = (torch.randn(2))
# box = bBox_2D(label[0], label[1], label[2], label[3], label[4])
# box.rotate(noiseratio[0])
# box.resize(noiseratio[1])
# box.translate(noiseoffset[0], noiseoffset[1])
# augmentann[i][j] = [box.length, box.width, box.xc, box.yc, box.alpha]
# anndata = np.concatenate((anndata, augmentann))
# del augmentann
# img = img + img
# # #
ll = len(img)
print(anndata.shape, '\t', ll)
return img, anndata
# to COCO json dataset and shuffle and split
# ann_json = {}
# images = []
# annotations = []
# categories = []
# iminfo = {}
# anninfo = {}
# catinfo = {}
# trainsplit, valsplit, testsplit = int(ll * 0.70), int(ll * (0.70 + 0.15)), ll
# overfittest = 60
# print(trainsplit, valsplit - trainsplit, testsplit - valsplit)
# mwidth, mlength, mrotation, marea = 0, 0, 0, 0
#
# shutil.rmtree('./maskrcnn-benchmark/datasets/coco/val2014')
# os.mkdir('./maskrcnn-benchmark/datasets/coco/val2014')
# shutil.rmtree('./maskrcnn-benchmark/datasets/coco/train2014')
# os.mkdir('./maskrcnn-benchmark/datasets/coco/train2014')
# shutil.rmtree('./maskrcnn-benchmark/datasets/coco/test2014')
# os.mkdir('./maskrcnn-benchmark/datasets/coco/test2014')
# shutil.rmtree('./maskrcnn-benchmark/datasets/coco/overfit2014')
# os.mkdir('./maskrcnn-benchmark/datasets/coco/overfit2014') # renew data space
def create_coco_train(img, anndata):
images = []
annotations = []
categories = []
mwidth, mlength, mrotation, marea = 0, 0, 0, 0
shutil.rmtree('./maskrcnn-benchmark/datasets/coco/train2014')
os.mkdir('./maskrcnn-benchmark/datasets/coco/train2014')
pixel_mean = np.array([0., 0., 0.])
pixel_std = np.array([0., 0., 0.])
for i, im in enumerate(img):
cv2.imwrite('./maskrcnn-benchmark/datasets/coco/train2014/im%d.jpg' % i, im)
pixel_mean += np.array([np.mean(im[:, :, 0]), np.mean(im[:, :, 1]), np.mean(im[:, :, 2])])
pixel_std += np.array([np.std(im[:, :, 0]), np.std(im[:, :, 1]), np.std(im[:, :, 2])])
iminfo = {
"file_name": "im%d.jpg" % i,
"height": im.shape[0],
"width": im.shape[1],
"id": i
}
images.append(iminfo)
ll = len(img)
print(pixel_mean / ll, '==pixel_mean==', pixel_std / ll, '==pixel_std==')
idcount = 0
for j, ann in enumerate(anndata):
# np.save('./testset/dataset/ann/ann%d' % j, ann)
for i, label in enumerate(ann):
# remove empty
if label[0] == 0:
continue
# filter bbox too small too large or too thin!! (unit in PIXELs)
if label[0] < 12 or label[1] < 12 or label[0] > 144 or label[1] > 144 or label[0] * label[1] < 360 or label[
0] * \
label[1] > 13000:
continue
box = bBox_2D(label[0], label[1], label[2], label[3], label[4])
box.xcyc2topleft()
anninfo = {
'segmentation': [],
'area': box.length * box.width,
'image_id': j,
'bbox': [box.xtl, box.ytl, box.width, box.length],
'rotation': box.alpha,
'category_id': 0,
'id': idcount,
'iscrowd': 0
}
annotations.append(anninfo)
idcount += 1
mwidth += box.width
mlength += box.length
marea += box.length * box.width
mrotation += box.alpha
catinfo = {
"supercategory": "none",
"id": 0,
"name": "car"}
categories.append(catinfo)
imagetrain = images[:]
imids = set(im['id'] for im in imagetrain)
annids = set(ann['id'] if ann['image_id'] in imids else None for ann in
annotations) # get binary inds and ids of ann according to im
# annids.remove(None)
anntrain = []
for ann in annotations:
if ann['image_id'] in imids: # two different ids !!!!!!!
anntrain.append(ann)
trainann_json = {'info': {}, 'images': imagetrain, 'annotations': anntrain, 'categories': categories}
with open("./maskrcnn-benchmark/datasets/coco/annotations/trainann.json", 'w', encoding='utf-8') as json_file:
json.dump(trainann_json, json_file, ensure_ascii=False)
print('train summary ', mwidth / idcount, mlength / idcount, marea / idcount, mrotation / idcount)
def create_coco_test(img, anndata):
images = []
annotations = []
categories = []
mwidth, mlength, mrotation, marea = 0, 0, 0, 0
shutil.rmtree('./maskrcnn-benchmark/datasets/coco/test2014')
os.mkdir('./maskrcnn-benchmark/datasets/coco/test2014')
pixel_mean = np.array([0., 0., 0.])
pixel_std = np.array([0., 0., 0.])
for i, im in enumerate(img):
cv2.imwrite('./maskrcnn-benchmark/datasets/coco/test2014/im%d.jpg' % i, im)
pixel_mean += np.array([np.mean(im[:, :, 0]), np.mean(im[:, :, 1]), np.mean(im[:, :, 2])])
pixel_std += np.array([np.std(im[:, :, 0]), np.std(im[:, :, 1]),
|
np.std(im[:, :, 2])
|
numpy.std
|
"""
HSDA sub-module of pyhail
Contains the dual pol hail size discrimination algorithm
requires temperature, reflectivity, differential reflectivity and cross correlation
<NAME> - 15 June 2018
"""
from pyhail import common, hsda_mf
import numpy as np
import netCDF4
import pyart
def main(radar, snd_input, hca_hail_idx, dzdr, ref_name, zdr_name, rhv_name, phi_name, snr_name, cbb_name, hca_name):
"""
Wrapper function for HSDA processing
Parameters:
===========
radar: struct
Py-ART radar object.
snd_input: string
sounding full filename (inc path)
hca_hail_idx: list
index of hail related fields in classification to apply HSDA
dzdr:
offset for differential reflectivity
####_name: string
field name from radar object
Returns:
========
hsda: ndarray
hsda classe array (1 = small < 25, 2 = large 25-50, 3 = giant > 50
"""
#build sounding data
snd_data = netCDF4.Dataset(snd_input)
snd_temp = snd_data.variables["temp"][:]
snd_geop = snd_data.variables["height"][:]
snd_rh = snd_data.variables["rh"][:]
snd_data.close()
#calc wbt
snd_wbt = common.wbt(snd_temp,snd_rh)
#run interpolation
wbt_minus25C = common.sounding_interp(snd_wbt,snd_geop,-25)/1000
wbt_0C = common.sounding_interp(snd_wbt,snd_geop,0)/1000
#building consts
const = {'wbt_minus25C' : wbt_minus25C, 'wbt_0C' : wbt_0C, 'dzdr' : dzdr, 'hca_hail_idx':hca_hail_idx}
#load data
zh_cf = radar.fields[ref_name]['data']
zdr_cf = radar.fields[zdr_name]['data']
rhv_cf = radar.fields[rhv_name]['data']
phi_cf = radar.fields[phi_name]['data']
snr_cf = radar.fields[snr_name]['data']
cbb_cf = radar.fields[cbb_name]['data']
hca = radar.fields[hca_name]['data']
#smooth radar data
zh_cf_smooth = common.smooth_ppi_rays(zh_cf,5)
zdr_cf_smooth = common.smooth_ppi_rays(zdr_cf,5)
rhv_cf_smooth = common.smooth_ppi_rays(rhv_cf,5)
#build membership functions
w, mf = hsda_mf.build_mf()
#generate quality vector
q = hsda_q(zh_cf_smooth, phi_cf, rhv_cf_smooth, phi_cf, cbb_cf, cbb_threshold=0.5)
#calc pixel alt
rg, azg = np.meshgrid(radar.range['data'], radar.azimuth['data'])
rg, eleg = np.meshgrid(radar.range['data'], radar.elevation['data'])
_, _, alt = pyart.core.antenna_to_cartesian(rg / 1000.0, azg, eleg)
#find all pixels in hca which match the hail classes
#for each pixel, apply transform
hail_mask = np.isin(hca, const['hca_hail_idx'])
hail_idx = np.where(hail_mask)
#loop through every pixel
hsda =
|
np.zeros(hca.shape)
|
numpy.zeros
|
import numpy as np
from scipy.optimize import least_squares
import pandas as pd
from lib.logger import Logger
import json, math
logger = Logger('./calibration', clear_log=True)
# y,x,z
#pixel ray directions [center-center][left-center][right-center][center-bottom][center-top]
# width, height
# 539 959, 0 959, 1079 959, 539 0, 539 1919
indices = {
'center': [(539, 959), (539, 960), (540, 959), (540, 960)],
'left': [(0, 959), (0, 960)],
'left_half': [(269, 959), (269, 960)],
'right': [(1079, 959), (1079, 960)],
'right_half': [(810, 959), (810, 960)],
'top': [(539, 1919), (540, 1919)],
'top_half': [(539, 1440), (540, 1440)],
'bottom': [(539, 0), (540, 0)],
'bottom_half': [(539, 479), (540, 479)],
'top_left_half': [(269, 1440)],
'top_right_half': [(810, 1440)],
'bottom_left_half': [(269, 479)],
'bottom_right_half': [(810, 479)],
}
idx = [(539, 959), ]#,(0, 959), (1079, 959), (539, 0), (539, 1919)]
cams = []
for i in [1,2,3,4,5]:
rays =pd.read_csv('data/scalarFlow/calib20190813/{}_rays.txt'.format(i), sep=' ', skiprows=1, header=None, names=['pY','pX','dY','dX','dZ'], index_col=(0,1))
cam = {}
for key, idx in indices.items():
tmp = []
try:
for id in idx:
ray = rays.loc[id]
tmp.append({'start': np.asarray([ray['pX'],ray['pY'],0.0]),
'dir': np.asarray([ray['dX'],ray['dY'],ray['dZ']]),
})
cam[key] = tmp
except:
print('[W]: could not access index {} for cam {}, key {}'.format(id,i, key))
cams.append(cam)
#https://math.stackexchange.com/questions/2598811/calculate-the-point-closest-to-multiple-rays
def to_ray(pos, start, dir):
t = np.dot(dir, pos-start)/np.dot(dir,dir)
return pos - (start + t*dir)
def dist_to_ray(pos, start, dir):
dist = no.linalg.norm(to_ray(pos, start, dir))
def f(x, rays):
e = []
for ray in rays:
e += list(to_ray(x, ray['start'], ray['dir']))
return e
def angle_between(a, b):
return np.arccos(np.dot(a, b)/(np.linalg.norm(a)*np.linalg.norm(b)))
#https://en.wikipedia.org/wiki/Slerp
def slerp(v1, v2, t):
O = angle_between(v1, v2)
sO = np.sin(O)
return np.sin((1-t)*O)/sO * v1 + np.sin(t*O)/sO * v2
deg_to_rad = np.pi/180
rad_to_deg = 180/np.pi
cam_order = [0,1,2,3,4] #[0, 4, 3, 2, 1] #mapping of ray calibration files to recorded sequences?
#i=0
cam_rays = []
cam_params = []
cam_json = {str(_):{} for _ in cam_order}
for i in range(len(cams)):
cam = cams[cam_order[i]]
params = {'rotation':None, 'position':None, 'position_error':None,
'forward':None, 'right':None, 'up':None,
'fov_horizontal':None, 'fov_vertical':None}
print('cam', i+1)
if 'center' in cam:
c_rays = [ray['dir'] for ray in cam['center']]
c = slerp(slerp(c_rays[0],c_rays[1], 0.5), slerp(c_rays[2],c_rays[3], 0.5), 0.5)
c /= np.linalg.norm(c)
fwd = c
t_y = np.arctan(c[0]/c[2])
t_x = np.arctan(c[1]/np.linalg.norm([c[0], c[2]]))
print('\trot:', t_x*rad_to_deg,t_y*rad_to_deg,0.0)
print('\tfwd: {} (center ray)'.format(c))
params['rotation']=[t_x*rad_to_deg,t_y*rad_to_deg,0.0]
params['forward']=list(fwd)
if 'left' in cam and 'right' in cam:
l = slerp(cam['left'][0]['dir'],cam['left'][1]['dir'], 0.5)
r = slerp(cam['right'][0]['dir'],cam['right'][1]['dir'], 0.5)
up = np.cross(l, r)
up /= np.linalg.norm(up)
print('\tup: {}'.format(up))
print('\t\tfov x: {}'.format(angle_between(l,r)*rad_to_deg))
params['up']=list(up)
params['fov_horizontal']=angle_between(l,r)*rad_to_deg
if 'left_half' in cam and 'right_half' in cam:
l = slerp(cam['left_half'][0]['dir'],cam['left_half'][1]['dir'], 0.5)
r = slerp(cam['right_half'][0]['dir'],cam['right_half'][1]['dir'], 0.5)
up = np.cross(l, r)
up /= np.linalg.norm(up)
print('\t[up_half: {}]'.format(up))
if params['up'] is None:
params['up']=list(up)
if 'top' in cam and 'bottom' in cam:
t = slerp(cam['top'][0]['dir'],cam['top'][1]['dir'], 0.5)
b = slerp(cam['bottom'][0]['dir'],cam['bottom'][1]['dir'], 0.5)
right = np.cross(t, b)
right /= np.linalg.norm(right)
print('\tright: {}'.format(right))
print('\t\tfov y: {}'.format(angle_between(t,b)*rad_to_deg))
params['right']=list(right)
params['fov_vertical']=angle_between(t,b)*rad_to_deg
if 'top_half' in cam and 'bottom_half' in cam:
t = slerp(cam['top_half'][0]['dir'],cam['top_half'][1]['dir'], 0.5)
b = slerp(cam['bottom_half'][0]['dir'],cam['bottom_half'][1]['dir'], 0.5)
right = np.cross(t, b)
right /=
|
np.linalg.norm(right)
|
numpy.linalg.norm
|
from functools import partial
from typing import List, Union, Dict, Callable
import numpy as np
from utils.utils_mytorch import FancyDict
from utils.utils import KNOWN_DATASETS
from .load import load_jf17k_statements, load_jf17k_quints, load_jf17k_triples, \
load_wikipeople_statements, load_wikipeople_quints, load_wikipeople_triples, \
load_wd50k_statements, load_wd50k_quints, load_wd50k_triples, \
load_wd50k_100_statements, load_wd50k_100_quints, load_wd50k_100_triples, \
load_wd50k_66_statements, load_wd50k_66_quints, load_wd50k_66_triples, \
load_wd50k_33_statements, load_wd50k_33_quints, load_wd50k_33_triples
from .clean_datasets import load_clean_wikipeople_statements, load_clean_jf17k_statements, load_clean_wd50k
class DataManager(object):
""" Give me your args I'll give you a path to load the dataset with my superawesome AI """
@staticmethod
def load(config: Union[dict, FancyDict]) -> Callable:
""" Depends upon 'STATEMENT_LEN' and 'DATASET' """
# Get the necessary dataset's things.
assert config['DATASET'] in KNOWN_DATASETS, f"Dataset {config['DATASET']} is unknown."
if config['DATASET'] == 'wd50k':
if config['STATEMENT_LEN'] == 5:
if config['CLEANED_DATASET']:
return partial(load_clean_wd50k, name="wd50k", subtype="quints")
else:
return load_wd50k_quints
elif config['STATEMENT_LEN'] == 3:
if config['CLEANED_DATASET']:
return partial(load_clean_wd50k, name="wd50k", subtype="triples")
else:
return load_wd50k_triples
else:
if config['CLEANED_DATASET']:
# return partial(load_clean_wd50k, name="wd50k", subtype="statements", maxlen=config['MAX_QPAIRS'])
return partial(load_clean_wd50k, name="wd50k", subtype=config['SUBTYPE'], maxlen=config['MAX_QPAIRS'])
else:
return partial(load_wd50k_statements, maxlen=config['MAX_QPAIRS'])
elif config['DATASET'] == 'wikipeople':
if config['STATEMENT_LEN'] == 5:
if config['CLEANED_DATASET']:
return partial(load_clean_wikipeople_statements, subtype="quints")
else:
return load_wikipeople_quints
elif config['STATEMENT_LEN'] == 3:
if config['CLEANED_DATASET']:
return partial(load_clean_wikipeople_statements, subtype="triples")
else:
return load_wikipeople_triples
else:
if config['CLEANED_DATASET']:
return partial(load_clean_wikipeople_statements, subtype="statements", maxlen=config['MAX_QPAIRS'])
else:
return partial(load_wikipeople_statements, maxlen=config['MAX_QPAIRS'])
elif config['DATASET'] == 'wd50k_100':
if config['STATEMENT_LEN'] == 5:
if config['CLEANED_DATASET']:
return partial(load_clean_wd50k, name="wd50k_100", subtype="quints")
else:
return load_wd50k_100_quints
elif config['STATEMENT_LEN'] == 3:
if config['CLEANED_DATASET']:
return partial(load_clean_wd50k, name="wd50k_100", subtype="triples")
else:
return load_wd50k_100_triples
else:
if config['CLEANED_DATASET']:
# return partial(load_clean_wd50k, name="wd50k_100", subtype="statements", maxlen=config['MAX_QPAIRS'])
return partial(load_clean_wd50k, name="wd50k_100", subtype=config['SUBTYPE'], maxlen=config['MAX_QPAIRS'])
else:
return partial(load_wd50k_100_statements, maxlen=config['MAX_QPAIRS'])
elif config['DATASET'] == 'wd50k_33':
if config['STATEMENT_LEN'] == 5:
if config['CLEANED_DATASET']:
return partial(load_clean_wd50k, name="wd50k_33", subtype="quints")
else:
return load_wd50k_33_quints
elif config['STATEMENT_LEN'] == 3:
if config['CLEANED_DATASET']:
return partial(load_clean_wd50k, name="wd50k_33", subtype="triples")
else:
return load_wd50k_33_triples
else:
if config['CLEANED_DATASET']:
return partial(load_clean_wd50k, name="wd50k_33", subtype=config['SUBTYPE'], maxlen=config['MAX_QPAIRS'])
else:
return partial(load_wd50k_33_statements, maxlen=config['MAX_QPAIRS'])
elif config['DATASET'] == 'wd50k_66':
if config['STATEMENT_LEN'] == 5:
if config['CLEANED_DATASET']:
return partial(load_clean_wd50k, name="wd50k_66", subtype="quints")
else:
return load_wd50k_66_quints
elif config['STATEMENT_LEN'] == 3:
if config['CLEANED_DATASET']:
return partial(load_clean_wd50k, name="wd50k_66", subtype="triples")
else:
return load_wd50k_66_triples
else:
if config['CLEANED_DATASET']:
return partial(load_clean_wd50k, name="wd50k_66", subtype=config['SUBTYPE'], maxlen=config['MAX_QPAIRS'])
else:
return partial(load_wd50k_66_statements, maxlen=config['MAX_QPAIRS'])
elif config['DATASET'] == 'jf17k':
if config['STATEMENT_LEN'] == 5:
if config['CLEANED_DATASET']:
return partial(load_clean_jf17k_statements, subtype="quints")
else:
return load_jf17k_quints
elif config['STATEMENT_LEN'] == 3:
if config['CLEANED_DATASET']:
return partial(load_clean_jf17k_statements, subtype="triples")
else:
return load_jf17k_triples
elif config['STATEMENT_LEN'] == -1:
if config['CLEANED_DATASET']:
return partial(load_clean_jf17k_statements, subtype="statements", maxlen=config['MAX_QPAIRS'])
else:
return partial(load_jf17k_statements, maxlen=config['MAX_QPAIRS'])
@staticmethod
def gather_missing_entities(data: List[list], n_ents: int, positions: List[int]) -> np.array:
"""
Find the entities which aren't available from range(n_ents).
Think inverse of gather_entities
:param data: A list of triples/quints whatever
:param n_ents: Int signifying total number of entities
:param positions: the positions over which we intend to count these things.
:return: np array of entities NOT appearing in range(n_ents)
"""
appeared = np.zeros(n_ents, dtype=np.int)
for datum in data:
for pos in positions:
appeared[datum[pos]] = 1
# Return this removed from range(n_ents)
return np.arange(n_ents)[appeared == 0]
@staticmethod
def get_graph_repr(raw: Union[List[List[int]], np.ndarray], config: dict) \
-> Dict[str, np.ndarray]:
"""
Decisions:
We are NOT making inverse of qualifier relations. Those are just repeated.
The normal triple relations are inverted.
Pseudocode:
for each of train, test, valid split
for each triple,
s, o -> edge_index
r -> edge_type
r_q1,... -> list of column vectors (np.arrs)
e_q1,... -> list of column vectors (np.arrs)
endfor
endfor
create reverse relations in the existing stuff.
TODO: Check if the data has repeats (should not).
:param raw: [[s, p, o, qr1, qe1, qr2, qe3...], ..., [...]]
(already have a max qualifier length padded data)
:param config: the config dict
"""
has_qualifiers: bool = config['STATEMENT_LEN'] != 3
try:
nr = config['NUM_RELATIONS']
except KeyError:
raise AssertionError("Function called too soon. Num relations not found.")
edge_index, edge_type = np.zeros((2, len(raw) * 2), dtype='int32'), np.zeros((len(raw) * 2), dtype='int32')
qual_rel = np.zeros(((len(raw[0]) - 3) // 2, len(raw) * 2), dtype='int32')
qual_ent = np.zeros(((len(raw[0]) - 3) // 2, len(raw) * 2), dtype='int32')
# Add actual data
for i, data in enumerate(raw):
edge_index[:, i] = [data[0], data[2]]
edge_type[i] = data[1]
# @TODO: add qualifiers
if has_qualifiers:
qual_rel[:, i] = data[3::2]
qual_ent[:, i] = data[4::2]
# Add inverses
edge_index[1, len(raw):] = edge_index[0, :len(raw)]
edge_index[0, len(raw):] = edge_index[1, :len(raw)]
edge_type[len(raw):] = edge_type[:len(raw)] + nr
if has_qualifiers:
qual_rel[:, len(raw):] = qual_rel[:, :len(raw)]
qual_ent[:, len(raw):] = qual_ent[:, :len(raw)]
return {'edge_index': edge_index,
'edge_type': edge_type,
'qual_rel': qual_rel,
'qual_ent': qual_ent}
else:
return {'edge_index': edge_index,
'edge_type': edge_type}
@staticmethod
def get_alternative_graph_repr(raw: Union[List[List[int]], np.ndarray], config: dict) \
-> Dict[str, np.ndarray]:
"""
Decisions:
Quals are represented differently here, i.e., more as a coo matrix
s1 p1 o1 qr1 qe1 qr2 qe2 [edge index column 0]
s2 p2 o2 qr3 qe3 [edge index column 1]
edge index:
[ [s1, s2],
[o1, o2] ]
edge type:
[ p1, p2 ]
quals will looks like
[ [qr1, qr2, qr3],
[qe1, qr2, qe3],
[0 , 0 , 1 ] <- obtained from the edge index columns
:param raw: [[s, p, o, qr1, qe1, qr2, qe3...], ..., [...]]
(already have a max qualifier length padded data)
:param config: the config dict
:return: output dict
"""
has_qualifiers: bool = config['STATEMENT_LEN'] != 3
try:
nr = config['NUM_RELATIONS']
except KeyError:
raise AssertionError("Function called too soon. Num relations not found.")
edge_index, edge_type = np.zeros((2, len(raw) * 2), dtype='int32'), np.zeros((len(raw) * 2), dtype='int32')
# qual_rel = np.zeros(((len(raw[0]) - 3) // 2, len(raw) * 2), dtype='int32')
# qual_ent = np.zeros(((len(raw[0]) - 3) // 2, len(raw) * 2), dtype='int32')
qualifier_rel = []
qualifier_ent = []
qualifier_edge = []
# Add actual data
for i, data in enumerate(raw):
edge_index[:, i] = [data[0], data[2]]
edge_type[i] = data[1]
# @TODO: add qualifiers
if has_qualifiers:
qual_rel = np.array(data[3::2])
qual_ent =
|
np.array(data[4::2])
|
numpy.array
|
import numpy as np
import torch
class Metric:
def __init__(self):
pass
def __call__(self, outputs, target, loss):
raise NotImplementedError
def reset(self):
raise NotImplementedError
def value(self):
raise NotImplementedError
def name(self):
raise NotImplementedError
class AccumulatedAccuracyMetric(Metric):
"""
Works with classification model
"""
def __init__(self):
self.correct = 0
self.total = 0
def __call__(self, outputs, target, loss):
pred = outputs[0].data.max(1, keepdim=True)[1]
self.correct += pred.eq(target[0].data.view_as(pred)).cpu().sum()
self.total += target[0].size(0)
return self.value()
def reset(self):
self.correct = 0
self.total = 0
def value(self):
return 100 * float(self.correct) / self.total
def name(self):
return 'Accuracy'
class AverageNonzeroTripletsMetric(Metric):
'''
Counts average number of nonzero triplets found in minibatches
'''
def __init__(self):
self.values = []
def __call__(self, outputs, target, loss):
self.values.append(loss[1])
return self.value()
def reset(self):
self.values = []
def value(self):
return np.mean(self.values)
def name(self):
return 'Average nonzero triplets'
class LossMetric(Metric):
'''
Counts average number of nonzero triplets found in minibatches
'''
def __init__(self):
self.values = []
def __call__(self, outputs, target, loss):
self.values.append(loss)
return self.value()
def reset(self):
self.values = []
def value(self):
return np.mean(self.values)
def name(self):
return 'Loss'
class AccuracyTripletsMetric(Metric):
'''
Counts average number of nonzero triplets found in minibatches
'''
def __init__(self):
self.values = []
self.tp = 0
self.fp = 0
self.total_p = 0
self.total_n = 0
def __call__(self, outputs, target, loss):
"""print(len(outputs))
print(outputs[0].shape)
print(outputs[0][0,:])"""
with torch.no_grad():
TA = np.linalg.norm(outputs[0] - outputs[1], axis=1)
TAc = TA[TA < 0.8]
self.tp = len(TAc)
self.total_p = len(TA)
#print("TA:", self.tp)
FA = np.linalg.norm(outputs[0] - outputs[2], axis=1)
FAc = FA[FA < 0.8]
self.fp = len(FAc)
self.total_n = len(FA)
self.fn = self.total_p - self.tp
self.tn = self.total_n - self.fp
#print("FA:", self.fp)
accuracy = (self.tp + self.tn) / (self.total_p + self.total_n)
#print("Acc: ", accuracy)
self.values.append(accuracy)
return self.value()
def reset(self):
self.values = []
self.fp = 0
self.tp = 0
self.total_p = 0
self.total_n = 0
def value(self):
return np.mean(self.values)
def name(self):
return 'Tripplet accuracy'
class RecallTripletsMetric(Metric):
'''
Counts average number of nonzero triplets found in minibatches
'''
def __init__(self):
self.values = []
self.tp = 0
self.fp = 0
self.total_p = 0
self.total_n = 0
def __call__(self, outputs, target, loss):
"""print(len(outputs))
print(outputs[0].shape)
print(outputs[0][0,:])"""
with torch.no_grad():
TA = np.linalg.norm(outputs[0] - outputs[1], axis=1)
TAc = TA[TA < 0.8]
self.tp = len(TAc)
self.total_p = len(TA)
#print("TA:", self.tp)
FA = np.linalg.norm(outputs[0] - outputs[2], axis=1)
FAc = FA[FA < 0.8]
self.fp = len(FAc)
self.total_n = len(FA)
self.fn = self.total_p - self.tp
self.tn = self.total_n - self.fp
#print("FA:", self.fp)
recall = self.tp / (self.tp + self.fn)
#print("Acc: ", accuracy)
self.values.append(recall)
return self.value()
def reset(self):
self.values = []
self.fp = 0
self.tp = 0
self.total_p = 0
self.total_n = 0
def value(self):
return np.mean(self.values)
def name(self):
return 'Tripplet recall'
class FPRTripletsMetric(Metric):
'''
Counts average number of nonzero triplets found in minibatches
'''
def __init__(self):
self.values = []
self.tp = 0
self.fp = 0
self.total_p = 0
self.total_n = 0
def __call__(self, outputs, target, loss):
"""print(len(outputs))
print(outputs[0].shape)
print(outputs[0][0,:])"""
with torch.no_grad():
TA =
|
np.linalg.norm(outputs[0] - outputs[1], axis=1)
|
numpy.linalg.norm
|
"""
Author: <NAME> 2017
This module provides:
:func:`render_reconstruct_world` as a helper function to reconstruct and render a clip of video
:func:`reconstruct_world` to create a 3D reconstruction of a clip of video
:func:`reconstruct_frame_pair` to triangulate and reconstruct two frames
:func:`generate_world_cloud` to generate a dense point cloud from a video, utilising a moving average
:func:`gen_binned_points` to bin a dense point cloud and average over reliable bins
:func:`get_outlier_mask` to determine outliers in a dense point cloud
:func:`generate_frame_pair_cloud` to create an instance of :class:`pointcloud.Pointcloud` from two frames
:func:`triangulate_frames` to generate a point cloud from two frames
:func:`estimate_projections` to estimate projection matrices (P1, P2, R, T) from pixel
correspondences and camera matrix
:func:`create_pixel_correspondences` to create pixel correspondences from relative motion velocities
:func:`get_projections_from_rt` to get projection matrices from R and T
And legacy functions:
:func:`get_fundamental` to get the Fundamental matrix from corresponding pixel positions
:func:`get_rt` to get rotation R and translation T matrices from the essential matrix E
"""
import logging
import multiprocessing
import cv2
import numpy as np
from field_reconstruction import dtcwt_registration, pointcloud, video
from field_reconstruction.numpy_caching import np_cache
def get_fundamental(u1, v1, u2, v2):
"""Legacy function to get the Fundamental matrix from corresponding pixel positions"""
u1 = u1.reshape((-1, 1))
v1 = v1.reshape((-1, 1))
u2 = u2.reshape((-1, 1))
v2 = v2.reshape((-1, 1))
lhs = np.hstack([u2*u1, u2*v1, u2, v2*u1, v2*v1, v2, u1, v1, np.ones_like(v1)])
U, s, VT = np.linalg.svd(lhs)
NS = VT[-1:, :].transpose()
return NS.reshape((3, 3)) / NS[-1]
def get_rt(E):
"""Legacy function to get rotation R and translation T matrices from the essential matrix E"""
U, s, VT = np.linalg.svd(E)
Tx = U.dot(
np.array([
[0, 1, 0],
[-1, 0, 0],
[0, 0, 0]
])
).dot(U.transpose())
R = U.dot(
np.array([
[0, -1, 0],
[1, 0, 0],
[0, 0, 1]
])
).dot(VT)
t = np.array([
[Tx[2][1]],
[Tx[0][2]],
[Tx[1][0]],
])
return R, t
def get_projections_from_rt(K, R, t):
"""
Get projection matrices from R and T
:param K: [3,3] Camera calibration matrix
:param R: [3,3] Rotation matrix
:param t: [3,1] Translation matrix
:return: P1, P2, projection matrices, both [3,4]
"""
P1 = K.dot(
np.hstack([np.eye(3), np.zeros((3, 1))])
)
P2 = K.dot(
np.hstack([R, t])
)
return P1, P2
def create_pixel_correspondences(vel):
"""
Create pixel correspondences from relative motion velocities
:param vel: Motion velocities from :func:`dtcwt_registration.load_velocity_fields`, a [2,Y,X] array
:return: tuple of two pixel correspondences, each a [Y,X] array corresponding to one frame
"""
velx, vely = vel
imshape = velx.shape
shapey, shapex = imshape
X, Y = np.meshgrid(np.arange(shapex), np.arange(shapey))
# velocity vectors map pixels in f2 to their locations in f1
u1shaped = X + velx
v1shaped = Y + vely
u2shaped, v2shaped = X, Y
return np.dstack((u1shaped, v1shaped)), np.dstack((u2shaped, v2shaped))
def estimate_projections(correspondences, K):
"""
Estimate the projection matrices given point correspondences and the camera calibration matrix K
:param correspondences: Tuple of two frame correspondences (each [X,Y] matrices). Should be
pre-cropped to ensure good data points (otherwise E becomes unstable)
:param K: [3,3] Camera calibration matrix
:return:P1, P2, R, t Camera projection matrices:
P1: Projection to frame 1
P2: Projection to frame 2
R: Rotation from frame 1 to frame 2
t: Translation from frame 1 to frame 2
"""
corr1, corr2 = correspondences
w1 = np.vstack((corr1[:, :, 0].flat, corr1[:, :, 1].flat))
w2 = np.vstack((corr2[:, :, 0].flat, corr2[:, :, 1].flat))
E, mask = cv2.findEssentialMat(w1.transpose(), w2.transpose(), K)
# TODO: refine sampling method
retval, R, t, mask = cv2.recoverPose(E, w1[:, ::100].transpose(), w2[:, ::100].transpose(), K, mask=None)
P1, P2 = get_projections_from_rt(K, R, t)
return P1, P2, R, t
@np_cache(True, hash_method='readable')
def triangulate_frames(vid, frame_pair, K):
"""
Perform point triangulation from two frames of a video
:param vid: :class:video.Video object from which to take the frames
:param frame_pair: Tuple of two frame numbers (frame1, frame2)
:param K: [3,3] Camera calibration matrix
:returns: points, velocities, P1, P2, R, t
WHERE
- points are a [3, N] numpy array point cloud
- velocities are the velocities returned by the dtcwt transform
as a [2, Y, X] numpy array (see :func:`dtcwt_registration.load_velocity_fields`)
- P1, P2, R, t are the projection matrix parameters
returned by :func:`estimate_projections`)
"""
vel = dtcwt_registration.load_velocity_fields(vid, *frame_pair)[:, 50:-50, 50:-50]
corr1, corr2 = create_pixel_correspondences(vel)
P1, P2, R, t = estimate_projections((corr1, corr2), K)
w1 = np.vstack((corr1[:, :, 0].flat, corr1[:, :, 1].flat))
w2 = np.vstack((corr2[:, :, 0].flat, corr2[:, :, 1].flat))
points = cv2.triangulatePoints(P1.astype(float), P2.astype(float), w1.astype(float), w2.astype(float))
points = points[:-1, :] / points[-1, :]
return points, vel, P1, P2, R, t
def generate_frame_pair_cloud(vid, frame_pair, K):
"""
Generates a instance of :class:`pointcloud.Pointcloud` from a pair of frames of a :class:`video.Video`.
:param vid: :class:video.Video object from which to take the frames
:param frame_pair: Tuple of two frame numbers (frame1, frame2)
:param K: [3,3] Camera calibration matrix
:return: pointcloud, velocities
WHERE
- pointcloud is an instance of :class:`pointcloud.Pointcloud`
- velocities are the velocities returned by the dtcwt transform
as a [2, Y, X] numpy array (see :func:`dtcwt_registration.load_velocity_fields`)
"""
points, vel, P1, P2, R, t = triangulate_frames(vid, frame_pair, K)
imshape = vel.shape[1:]
return pointcloud.PointCloud(points, imshape, P1, P2, R, t), vel
def get_outlier_mask(points, percentile_discard):
"""
Generate a mask identifying outliers in a point cloud
:param points: A [3, N] numpy array of points
:param percentile_discard: The percentile to discard symmetrically (i.e. a :param:percentile_discard of 5
discards points which fall into the first or last 1% of the data in the x, y, or z dimensions.
:return: outlier_mask, a [N,] boolean numpy array, where True values correspond to an outlying point
"""
# print(np.median(points, axis=1))
# print(np.percentile(points, [0., 10., 25., 50., 75., 90., 100.], axis=1))
limits = np.percentile(points, [float(percentile_discard), 100.0 - percentile_discard], axis=1)
lower_limit = limits[0, :]
upper_limit = limits[-1, :]
outlier_mask = (
(points[0, :] >= lower_limit[0]) & (points[0, :] <= upper_limit[0]) &
(points[1, :] >= lower_limit[1]) & (points[1, :] <= upper_limit[1]) &
(points[2, :] >= lower_limit[2]) & (points[2, :] <= upper_limit[2])
)
return outlier_mask
def gen_binned_points(points, detail=50, minpointcount=4):
"""
Bin points from a point cloud, ignoring outliers
:param points: A [3, N] numpy array of points
:param detail: The bins per point cloud unit
:param minpointcount: Minimum number of points in a bin considered to generate a reliable mean
:return: binned_points, a [3,N] numpy array of the binned points
"""
logging.info("binning points shape: {}".format(points.shape))
orig_points_shape = points.shape
points = points[:, get_outlier_mask(points, 1)]
# points = points[:, get_outlier_mask(pointcloud.align_points_with_xy(points), 10)]
print("Removed outliers, kept {0:.0%}".format(points.shape[1] / orig_points_shape[1]))
xmin, ymin, zmin = np.floor(np.min(points, axis=1)).astype(int)
xmax, ymax, zmax = np.ceil(np.max(points, axis=1)).astype(int)
logging.info("Data shape: {}".format(points.shape))
logging.info("Data min: {}".format(
|
np.min(points, axis=1)
|
numpy.min
|
#! /usr/bin/env python
import os
import sys
from functools import reduce
import numpy as np
import numpy.testing as npt
import ap.mesh.parsers as parsers
import ap.mesh.meshtools as meshtools
import ap.mesh.meshes as meshes
class TestMeshParser(object):
def __init__(self, nodes, elements, edges, mesh_files):
"""
Test case for a parsed mesh. Tries to test an Argyris mesh.
"""
for mesh_file in mesh_files:
self.nodes = nodes
self.elements = elements
self.edges = edges
parsed_mesh = parsers.parser_factory(*mesh_file)
npt.assert_almost_equal(self.nodes, parsed_mesh.nodes)
npt.assert_equal(self.elements, parsed_mesh.elements)
if parsed_mesh.edges:
assert np.all(self.edges == parsed_mesh.edges)
npt.assert_almost_equal(meshtools.project_nodes(lambda x : x[0:2],
self.elements, self.nodes),
self.nodes[:, 0:2], decimal=10)
if parsed_mesh.edges:
assert set(map(lambda x : x[0:-1], self.edges)) == \
set(map(lambda x : x[0:-1],
meshtools.extract_boundary_edges(self.elements)))
# Test Argyris stuff.
if self.elements.shape[1] == 6:
TestArgyrisCase(mesh_file, parsed_mesh)
class TestLagrangeMesh(object):
def __init__(self, nodes, elements, edges, mesh_files):
"""
Test case for a Lagrange mesh. Tries to test an Argyris mesh.
"""
for mesh_file in mesh_files:
self.nodes = nodes
# The mesh classes try to flatten nodes if possible. Do it here too.
if np.all(self.nodes[:, -1] == self.nodes[0, -1]):
self.nodes = self.nodes[:,0:-1]
self.elements = elements
self.edges = edges
mesh = meshes.mesh_factory(*mesh_file)
parsed_mesh = parsers.parser_factory(*mesh_file)
npt.assert_equal(self.nodes, mesh.nodes)
npt.assert_equal(self.elements, mesh.elements)
if parsed_mesh.edges:
assert set(self.edges) == reduce(lambda a, b : a + b,
mesh.edge_collections.values())
npt.assert_almost_equal(meshtools.project_nodes(lambda x : x[0:2],
self.elements, self.nodes),
self.nodes[:,0:2], decimal=10)
if parsed_mesh.edges:
assert set(map(lambda x : x[0:-1], self.edges)) == \
set(map(lambda x : x[0:-1],
meshtools.extract_boundary_edges(self.elements)))
# Test Argyris stuff.
if self.elements.shape[1] == 6:
TestArgyrisCase(mesh_file, parsed_mesh)
class TestArgyrisCase(object):
"""
Test case for an Argyris mesh.
"""
def __init__(self, mesh_file, parsed_mesh):
argyris_mesh = meshes.mesh_factory(*mesh_file, argyris=True)
lagrange_mesh = meshes.mesh_factory(*mesh_file)
assert argyris_mesh.elements.shape == (parsed_mesh.elements.shape[0],21)
stacked_nodes = dict()
edges_by_midpoint = dict()
for element in argyris_mesh.elements:
for local_number, global_number in enumerate(element[0:3]):
corner_nodes = (element[3 + 2*local_number],
element[3 + 2*local_number + 1],
element[9 + 3*local_number],
element[9 + 3*local_number + 1],
element[9 + 3*local_number + 2])
if stacked_nodes.has_key(global_number):
assert corner_nodes == stacked_nodes[global_number]
else:
stacked_nodes[global_number] = corner_nodes
for node in corner_nodes:
npt.assert_almost_equal(
argyris_mesh.nodes[global_number - 1, :],
argyris_mesh.nodes[node - 1, :])
for midpoint_number, local_edge in enumerate([[0,1], [0,2], [1,2]]):
midpoint = element[18 + midpoint_number]
if edges_by_midpoint.has_key(midpoint):
npt.assert_equal(edges_by_midpoint[midpoint],
element[local_edge])
else:
edges_by_midpoint[midpoint] = element[local_edge]
# Ensure that the Argyris mesh and the Lagrange mesh come up with the
# same edges.
for name, collection in lagrange_mesh.edge_collections.items():
argyris_collection = \
map(lambda x : x.edge,
filter(lambda x : x.name == name,
argyris_mesh.node_collections)[0].edges)
for edge in collection:
argyris_edge = (min(edge[0:2]), max(edge[0:2]), edge[2])
assert argyris_edge in argyris_collection
# The tests rely on parsing several files. Change the directory and then change
# back.
original_directory = os.getcwd()
os.chdir(os.path.dirname(os.path.abspath(sys.argv[0])))
try:
TestMeshParser(np.array([[0,0,0],[1,0,0],[1,1,0],[0,1,0],[0.5,0.5,0]]),
np.array([[1,2,5],[2,3,5],[3,4,5],[4,1,5]]),
[(1,2,1),(2,3,2),(3,4,3),(4,1,4)],
[["linears1.mesh"], ["linears1_nodes.txt",
"linears1_elements.txt"],
["linears1_elements.txt", "linears1_nodes.txt"]])
# case for extra nodes
TestLagrangeMesh(
|
np.array([[0,0,0],[1,0,0],[1,1,0],[0,1,0],[0.5,0.5,0]])
|
numpy.array
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
class TestGraphSampleNeighbors(unittest.TestCase):
def setUp(self):
num_nodes = 20
edges = np.random.randint(num_nodes, size=(100, 2))
edges = np.unique(edges, axis=0)
self.edges_id = np.arange(0, len(edges)).astype("int64")
sorted_edges = edges[np.argsort(edges[:, 1])]
# Calculate dst index cumsum counts, also means colptr
dst_count =
|
np.zeros(num_nodes)
|
numpy.zeros
|
'''
@ref: Incorporating Query Reformulating Behavior into Web Search Evaluation
@author: <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
@desc: Implementation of each models
'''
# encoding: utf-8
from __future__ import division
import os
import json
import math
import random
import numpy as np
from scipy.stats import pearsonr, spearmanr, ttest_ind, ttest_rel
def soft_max(z):
t = np.exp(z)
a = np.exp(z) / np.sum(t, axis=0)
return a
class RAM:
def __init__(self, args):
self.args = args
self.k_num = self.args.k_num
self.max_dnum = self.args.max_dnum
self.max_usefulness = self.args.max_usefulness
self.id = self.args.id
self.metric_type = self.args.metric_type
self.data = self.args.data
self.click_model = self.args.click_model
self.use_knowledge = self.args.use_knowledge
self.iter_num = self.args.iter_num
self.alpha = self.args.alpha
self.lamda = self.args.lamda
self.alpha_decay = self.args.alpha_decay
self.patience = self.args.patience
if self.k_num == 6:
self.train_type = "fineinit"
else:
self.train_type = "wo_reform"
# param initialization
self.omegas = ["F", "A", "D", "K", "T", "O"]
if self.use_knowledge:
self.pi_omega_k = {"F": [100, 0, 0, 0, 0, 0],
"A": [0, 89, 1, 7, 3, 0],
"D": [0, 8, 76, 12, 2, 1],
"K": [0, 10, 1, 86, 2, 1],
"T": [0, 32, 7, 15, 43, 4],
"O": [0, 8, 4, 4, 34, 49]}
else:
self.pi_omega_k = {"F": [1, 0, 0, 0, 0, 0],
"A": [0, 1, 0, 0, 0, 0],
"D": [0, 0, 1, 0, 0, 0],
"K": [0, 0, 0, 1, 0, 0],
"T": [0, 0, 0, 0, 1, 0],
"O": [0, 0, 0, 0, 0, 1]}
self.psi_k = [3] * self.k_num
self.beta_k = [0.5] * self.k_num
# add some disturbance
for reform in self.omegas:
self.pi_omega_k[reform] = self.pi_omega_k[reform][:self.k_num]
for k in range(self.k_num):
self.pi_omega_k[reform][k] += random.random() * 0.1
self.pi_omega_k[reform][k] = math.log(self.pi_omega_k[reform][k])
self.train_data = self.load_data(mode='train')
self.test_data = self.load_data(mode='test')
def load_data(self, mode='train'):
dir = "./data/bootstrap_%s/%s/%s_file.txt" % (self.data, self.id, mode)
f = open(dir, "r")
lines = f.read().strip().split('\n')
train_data = []
for line in lines:
es = line.strip().split("\t")
train_data.append([es[0], json.loads(es[1]), json.loads(es[2]), es[3]])
return train_data
def train_model(self):
"""
train the RAM with different click mdoels
"""
pass
def eval(self):
"""
evaluate the various models
"""
pass
class uDBN(RAM):
def __init__(self, args):
RAM.__init__(self, args)
if self.click_model == 'DBN':
self.gamma_k = [0.5] * self.k_num
else: # 'SDBN
self.gamma_k = [1.0] * self.k_num
self.sigma_u_k = {i: [0.5] * self.k_num for i in range(self.max_usefulness + 1)}
# best params
self.best_pi_omega_k, self.best_i_omega_k = {}, {}
if self.click_model == 'DBN':
self.best_gamma_k = [0.5] * self.k_num
else: # 'SDBN
self.best_gamma_k = [1.0] * self.k_num
self.best_sigma_u_k = {i: [0.5] * self.k_num for i in range(self.max_usefulness + 1)}
self.best_psi_k = [3] * self.k_num
self.best_beta_k = [0.5] * self.k_num
# randomize
for k in range(self.k_num):
self.psi_k[k] += random.random() * 0.05
self.beta_k[k] += random.random() * 0.05
for usef in range(self.max_usefulness + 1):
self.sigma_u_k[usef][k] += random.random() * 0.05
def train_model(self):
num_q = len(self.train_data)
last_loss1, last_loss2, best_loss = 1e30, 1e30, 1e30
for i in range(int(self.iter_num)):
loss, loss1, loss2 = 0., 0., 0.
# initialize the partials
partial_pi_omega_k = {"F": np.zeros(self.k_num, dtype=float),
"A": np.zeros(self.k_num, dtype=float),
"D": np.zeros(self.k_num, dtype=float),
"K": np.zeros(self.k_num, dtype=float),
"T": np.zeros(self.k_num, dtype=float),
"O": np.zeros(self.k_num, dtype=float)}
partial_gamma_k = np.zeros(self.k_num, dtype=float)
partial_sigma_u_k = np.zeros((self.max_usefulness + 1, self.k_num), dtype=float)
partial_psi_k = np.zeros(self.k_num, dtype=float)
partial_beta_k = np.zeros(self.k_num, dtype=float)
# update i_omega_k according to pi
i_omega_k = {}
for key in self.pi_omega_k:
intent_dist = np.array(self.pi_omega_k[key], dtype=float)
softmax_dist = soft_max(intent_dist).tolist()
if key not in i_omega_k:
i_omega_k[key] = softmax_dist
# training
iter_loss1 = 0
for train_s in self.train_data:
if self.k_num == 1: # w/o reform inform
reform = 'F'
else:
reform = train_s[0]
clicks = train_s[1]
usefs = train_s[2]
sat = float(train_s[3])
alphas = (np.exp2(usefs) - 1) / np.exp2(self.max_usefulness)
alphas = alphas.tolist()
# loss calculation
epsilons, epsilons_1 =
|
np.zeros((self.k_num, self.max_dnum), dtype=float)
|
numpy.zeros
|
import numpy as np
import time, yaml
import itertools, glob
import traceback
from hera_cal import redcal
from collections import OrderedDict as odict
from pyuvdata import UVData, utils as uvutils
from datetime import datetime
import copy
from scipy.interpolate import interp1d
import uvtools as uvt
import argparse
from .conversions import Cosmo_Conversions
def cov(d1, w1, d2=None, w2=None, conj_1=False, conj_2=True):
"""
Computes an empirical covariance matrix from data vectors. If d1 is of size
(M,N), then the output is M x M. In other words, the second axis is the
axis that is averaged over in forming the covariance (e.g. a time axis).
If d2 is provided and d1 != d2, then this computes the cross-variance,
i.e. <d1 d2^dagger> - <d1> <d2>^dagger
The fact that the second copy is complex conjugated is the default behaviour,
which can be altered by the conj_1 and the conj_2 kwargs. If conj_1 = False
and conj_2 = False, then <d1 d2^t> is computed, whereas if conj_1 = True
and conj_2 = True, then <d1^* d2^t*> is computed. (Minus the mean terms).
Parameters_
----------
d1 : array_like
Data vector of size (M,N), where N is the length of the "averaging axis"
w1 : integer
Weights for averaging d1
d2 : array_like, optional
Data vector of size (M,N), where N is the length of the "averaging axis"
Default: None
w2 : integer, optional
Weights for averaging d1. Default: None
conj_1 : boolean, optional
Whether to conjugate d1 or not. Default: False
conj_2 : boolean, optional
Whether to conjugate d2 or not. Default: True
Returns
-------
cov : array_like
Covariance (or cross-variance) matrix of size (M,M)
"""
if d2 is None: d2,w2 = d1,w1
if not np.isreal(w1).all(): raise TypeError("Weight matrices must be real")
if not np.isreal(w2).all(): raise TypeError("Weight matrices must be real")
if np.less(w1, 0.).any() or np.less(w2, 0.).any():
raise ValueError("Weight matrices must be positive")
d1sum,d1wgt = (w1*d1).sum(axis=1), w1.sum(axis=1)
d2sum,d2wgt = (w2*d2).sum(axis=1), w2.sum(axis=1)
x1 = d1sum / np.where(d1wgt > 0, d1wgt, 1)
x2 = d2sum / np.where(d2wgt > 0, d2wgt, 1)
x1.shape = (-1,1); x2.shape = (-1,1)
z1 = w1*d1
z2 = w2*d2
if conj_1:
z1 = z1.conj()
x1 = x1.conj()
if conj_2:
z2 = z2.conj()
x2 = x2.conj()
C = np.dot(z1, z2.T)
W = np.dot(w1, w2.T)
C /= np.where(W > 0, W, 1)
C -= np.outer(x1, x2)
return C
def variance_from_auto_correlations(uvd, bl, spw_range, time_index):
"""
Predict noise variance on a baseline from autocorrelation amplitudes on antennas.
Pick a baseline $b=(alpha,beta)$ where $alpha$ and $beta$ are antennas,
The way to estimate the covariance matrix $C$ from auto-visibility is:
$C_{ii}(b, LST) = | V(b_alpha, LST, nu_i) V(b_beta, LST, nu_i) | / {B Delta_t},
where $b_alpha = (alpha,alpha)$ and $b_beta = (beta,beta)$.
With LST binned over days, we have $C_{ii}(b, LST) = |V(b_alpha,nu_i,t) V(b_beta, nu_i,t)| / {N_{samples} B Delta_t}$.
Parameters
----------
uvd : UVData
bl : tuple
baseline (pol) key, in the format of (ant1, ant2, pol)
spw_range : tuple
Length-2 tuple of the spectral window
time_index : int
Returns
-------
var : ndarray, (spw_Nfreqs,)
"""
assert isinstance(bl, tuple) and len(bl)==3, "bl must be fed as Length-3 tuple"
assert isinstance(spw_range, tuple) and len(spw_range)==2, "spw_range must be fed as Length-2 tuple"
dt = np.median(uvd.integration_time)
# Delta_t
df = uvd.channel_width
# B
bl1 = (bl[0],bl[0], bl[2])
# baseline b_alpha
bl2 = (bl[1], bl[1], bl[2])
# baseline b_beta
spw = slice(spw_range[0], spw_range[1])
x_bl1 = uvd.get_data(bl1)[time_index, spw]
x_bl2 = uvd.get_data(bl2)[time_index, spw]
nsample_bl = uvd.get_nsamples(bl)[time_index, spw]
nsample_bl = np.where(nsample_bl>0, nsample_bl, np.median(uvd.nsample_array[:,:,spw,:]))
# some impainted data have zero nsample while is not flagged, and they will be assigned the median nsample within the spectral window.
var = np.abs(x_bl1*x_bl2.conj()) / dt / df / nsample_bl
return var
def construct_blpairs(bls, exclude_auto_bls=False, exclude_cross_bls=False,
exclude_permutations=False, group=False, Nblps_per_group=1):
"""
Construct a list of baseline-pairs from a baseline-group. This function
can be used to easily convert a single list of baselines into the input
needed by PSpecData.pspec(bls1, bls2, ...).
Parameters
----------
bls : list of tuple
List of baseline tuples, Ex. [(1, 2), (2, 3), (3, 4)]. Baseline
integers are not supported, and must first be converted to tuples
using UVData.baseline_to_antnums().
exclude_auto_bls: bool, optional
If True, exclude all baselines crossed with themselves from the final
blpairs list. Default: False.
exclude_cross_bls : bool, optional
If True, exclude all bls crossed with a different baseline. Note if
this and exclude_auto_bls are True then no blpairs will exist.
exclude_permutations : bool, optional
If True, exclude permutations and only form combinations of the bls
list.
For example, if bls = [1, 2, 3] (note this isn't the proper form of
bls, but makes the example clearer) and exclude_permutations = False,
then blpairs = [11, 12, 13, 21, 22, 23,, 31, 32, 33]. If however
exclude_permutations = True, then blpairs = [11, 12, 13, 22, 23, 33].
Furthermore, if exclude_auto_bls = True then 11, 22, and 33 would
also be excluded.
Default: False.
group : bool, optional
If True, group each consecutive Nblps_per_group blpairs into sub-lists.
Default: False.
Nblps_per_group : int, optional
Number of baseline-pairs to put into each sub-group if group = True.
Default: 1.
Returns (bls1, bls2, blpairs)
-------
bls1, bls2 : list of tuples
List of baseline tuples from the zeroth/first index of the blpair.
blpairs : list of tuple
List of blpair tuples.
"""
# assert form
assert isinstance(bls, (list, np.ndarray)) and isinstance(bls[0], tuple), \
"bls must be fed as list or ndarray of baseline antnum tuples. Use " \
"UVData.baseline_to_antnums() to convert baseline integers to tuples."
assert (not exclude_auto_bls) or (not exclude_cross_bls), "Can't exclude both auto and cross blpairs"
# form blpairs w/o explicitly forming auto blpairs
# however, if there are repeated bl in bls, there will be auto bls in blpairs
if exclude_permutations:
blpairs = list(itertools.combinations(bls, 2))
else:
blpairs = list(itertools.permutations(bls, 2))
# explicitly add in auto baseline pairs
blpairs.extend(list(zip(bls, bls)))
# iterate through and eliminate all autos if desired
if exclude_auto_bls:
new_blpairs = []
for blp in blpairs:
if blp[0] != blp[1]:
new_blpairs.append(blp)
blpairs = new_blpairs
# same for cross
if exclude_cross_bls:
new_blpairs = []
for blp in blpairs:
if blp[0] == blp[1]:
new_blpairs.append(blp)
blpairs = new_blpairs
# create bls1 and bls2 list
bls1 = [blp[0] for blp in blpairs]
bls2 = [blp[1] for blp in blpairs]
# group baseline pairs if desired
if group:
Nblps = len(blpairs)
Ngrps = int(np.ceil(float(Nblps) / Nblps_per_group))
new_blps = []
new_bls1 = []
new_bls2 = []
for i in range(Ngrps):
new_blps.append(blpairs[i*Nblps_per_group:(i+1)*Nblps_per_group])
new_bls1.append(bls1[i*Nblps_per_group:(i+1)*Nblps_per_group])
new_bls2.append(bls2[i*Nblps_per_group:(i+1)*Nblps_per_group])
bls1 = new_bls1
bls2 = new_bls2
blpairs = new_blps
return bls1, bls2, blpairs
def calc_blpair_reds(uvd1, uvd2, bl_tol=1.0, filter_blpairs=True,
xant_flag_thresh=0.95, exclude_auto_bls=False,
exclude_cross_bls=False,
exclude_permutations=True, Nblps_per_group=None,
bl_len_range=(0, 1e10), bl_deg_range=(0, 180),
xants=None, include_autocorrs=False,
include_crosscorrs=True, extra_info=False):
"""
Use hera_cal.redcal to get matching, redundant baseline-pair groups from
uvd1 and uvd2 within the specified baseline tolerance, not including
flagged ants.
Parameters
----------
uvd1, uvd2 : UVData
UVData instances with visibility data for the first/second visibilities
in the cross-spectra that will be formed.
bl_tol : float, optional
Baseline-vector redundancy tolerance in meters
filter_blpairs : bool, optional
if True, calculate xants (based on data flags) and filter-out baseline pairs
based on actual baselines in the data.
xant_flag_thresh : float, optional
Fraction of 2D visibility (per-waterfall) needed to be flagged to
consider the entire visibility flagged.
xants : list, optional
Additional lilst of xants to hand flag, regardless of flags in the data.
exclude_auto_bls: boolean, optional
If True, exclude all bls crossed with itself from the blpairs list
exclude_cross_bls : boolean, optional
If True, exclude all bls crossed with a different baseline. Note if
this and exclude_auto_bls are True then no blpairs will exist.
exclude_permutations : boolean, optional
If True, exclude permutations and only form combinations of the bls list.
For example, if bls = [1, 2, 3] (note this isn't the proper form of bls,
but makes this example clearer) and exclude_permutations = False,
then blpairs = [11, 12, 13, 21, 22, 23, 31, 32, 33]. If however
exclude_permutations = True, then blpairs = [11, 12, 13, 22, 23, 33].
Furthermore, if exclude_auto_bls = True then 11, 22, and 33 are excluded.
Nblps_per_group : integer, optional
Number of baseline-pairs to put into each sub-group. No grouping if None.
Default: None
bl_len_range : tuple, optional
len-2 tuple containing minimum baseline length and maximum baseline
length [meters] to keep in baseline type selection
bl_deg_range : tuple, optional
len-2 tuple containing (minimum, maximum) baseline angle in degrees
to keep in baseline selection
include_autocorrs : bool, optional
If True, include autocorrelation visibilities in their own redundant group.
If False, dont return any autocorrelation visibilities.
default is False.
include_crosscorrs : bool, optional
If True, include crosscorrelation visibilities. Set to False only if you
want to compute power spectra for autocorrelation visibilities only!
default is True.
extra_info : bool, optional
If True, return three extra arrays containing
redundant baseline group indices, lengths and angles
Returns
-------
baselines1, baselines2 : lists of baseline tuples
Lists of baseline tuples that should be fed as first/second argument
to PSpecData.pspec(), corresponding to uvd1/uvd2
blpairs : list of baseline-pair tuples
Contains the baseline-pair tuples. i.e. zip(baselines1, baselines2)
xants1, xants2 : lists
List of bad antenna integers for uvd1 and uvd2
red_groups : list of integers, returned as extra_info
Lists index of redundant groups, indexing red_lens and red_angs
red_lens : list, returned as extra_info
List of baseline lengths [meters] with len of unique redundant groups
red_angs : list, returned as extra_info
List of baseline angles [degrees] (North of East in ENU)
"""
# get antenna positions
antpos1, ants1 = uvd1.get_ENU_antpos(pick_data_ants=False)
antpos1 = dict(list(zip(ants1, antpos1)))
antpos2, ants2 = uvd2.get_ENU_antpos(pick_data_ants=False)
antpos2 = dict(list(zip(ants2, antpos2)))
antpos = dict(list(antpos1.items()) + list(antpos2.items()))
# assert antenna positions match
for a in set(antpos1).union(set(antpos2)):
if a in antpos1 and a in antpos2:
msg = "antenna positions from uvd1 and uvd2 do not agree to within " \
"tolerance of {} m".format(bl_tol)
assert np.linalg.norm(antpos1[a] - antpos2[a]) < bl_tol, msg
# calculate xants via flags if asked
xants1, xants2 = [], []
if filter_blpairs and uvd1.flag_array is not None and uvd2.flag_array is not None:
xants1, xants2 = set(ants1), set(ants2)
baselines = sorted(set(uvd1.baseline_array).union(set(uvd2.baseline_array)))
for bl in baselines:
# get antenna numbers
antnums = uvd1.baseline_to_antnums(bl)
# continue if autocorr and we dont want to include them
if not include_autocorrs:
if antnums[0] == antnums[1]: continue
if not include_crosscorrs:
if antnums[0] != antnums[1]: continue
# work on xants1
if bl in uvd1.baseline_array:
# get flags
f1 = uvd1.get_flags(bl)
# remove from bad list if unflagged data exists
if np.sum(f1) < np.prod(f1.shape) * xant_flag_thresh:
if antnums[0] in xants1:
xants1.remove(antnums[0])
if antnums[1] != antnums[0] and antnums[1] in xants1:
xants1.remove(antnums[1])
# work on xants2
if bl in uvd2.baseline_array:
# get flags
f2 = uvd2.get_flags(bl)
# remove from bad list if unflagged data exists
if np.sum(f2) < np.prod(f2.shape) * xant_flag_thresh:
if antnums[0] in xants2:
xants2.remove(antnums[0])
if antnums[1] != antnums[0] and antnums[1] in xants2:
xants2.remove(antnums[1])
xants1 = sorted(xants1)
xants2 = sorted(xants2)
# add hand-flagged xants if fed
if xants is not None:
xants1 += xants
xants2 += xants
# construct redundant groups
reds, lens, angs = get_reds(antpos, bl_error_tol=bl_tol, xants=xants1+xants2,
add_autos=include_autocorrs, autos_only=not(include_crosscorrs),
bl_deg_range=bl_deg_range, bl_len_range=bl_len_range)
# construct baseline pairs
baselines1, baselines2, blpairs, red_groups = [], [], [], []
for j, r in enumerate(reds):
(bls1, bls2,
blps) = construct_blpairs(r, exclude_auto_bls=exclude_auto_bls,
exclude_cross_bls=exclude_cross_bls, group=False,
exclude_permutations=exclude_permutations)
if len(bls1) < 1:
continue
# filter based on real baselines in data
if filter_blpairs:
uvd1_bls = uvd1.get_antpairs()
uvd2_bls = uvd2.get_antpairs()
_bls1, _bls2 = [], []
for blp in blps:
bl1 = blp[0]
bl2 = blp[1]
if ((bl1 in uvd1_bls) or (bl1[::-1] in uvd1_bls)) \
and ((bl2 in uvd2_bls) or (bl2[::-1] in uvd2_bls)):
_bls1.append(bl1)
_bls2.append(bl2)
bls1, bls2 = _bls1, _bls2
blps = list(zip(bls1, bls2))
# populate redundant group indices
rinds = [j] * len(blps)
# group if desired
if Nblps_per_group is not None:
Ngrps = int(np.ceil(float(len(blps)) / Nblps_per_group))
bls1 = [bls1[Nblps_per_group*i:Nblps_per_group*(i+1)]
for i in range(Ngrps)]
bls2 = [bls2[Nblps_per_group*i:Nblps_per_group*(i+1)]
for i in range(Ngrps)]
blps = [blps[Nblps_per_group*i:Nblps_per_group*(i+1)]
for i in range(Ngrps)]
rinds = [rinds[Nblps_per_group*i:Nblps_per_group*(i+1)]
for i in range(Ngrps)]
baselines1.extend(bls1)
baselines2.extend(bls2)
blpairs.extend(blps)
red_groups.extend(rinds)
if extra_info:
return baselines1, baselines2, blpairs, xants1, xants2, red_groups, lens, angs
else:
return baselines1, baselines2, blpairs, xants1, xants2
def get_delays(freqs, n_dlys=None):
"""
Return an array of delays, tau, corresponding to the bins of the delay
power spectrum given by frequency array.
Parameters
----------
freqs : ndarray of frequencies in Hz
n_dlys : number of delay bins, optional
Default: None, which then assumes that the number of bins is
equal to the number of frequency channels.
Returns
-------
delays : array_like
Delays, tau. Units: seconds.
"""
Delta_nu = np.median(np.diff(freqs))
n_freqs = freqs.size
if n_dlys == None: # assume that n_dlys = n_freqs if not specified
n_dlys = n_freqs
# Calculate the delays
delay = np.fft.fftshift(np.fft.fftfreq(n_dlys, d=Delta_nu))
return delay
def spw_range_from_freqs(data, freq_range, bounds_error=True):
"""
Return a tuple defining the spectral window that corresponds to the
frequency range specified in freq_range.
(Spectral windows are specified as tuples containing the first and last
index of a frequency range in data.freq_array.)
Parameters
----------
data : UVData or UVPSpec object
Object containing data with a frequency dimension.
freq_range : tuple or list of tuples
Tuples containing the lower and upper frequency bounds for each
spectral window. The range is inclusive of the lower frequency bound,
i.e. it includes all channels in freq_range[0] <= freq < freq_range[1].
Frequencies are in Hz.
bounds_error : bool, optional
Whether to raise an error if a specified lower/upper frequency is
outside the frequency range available in 'data'. Default: True.
Returns
-------
spw_range : tuple or list of tuples
Indices of the channels at the lower and upper bounds of the specified
spectral window(s).
Note: If the requested spectral window is outside the available
frequency range, and bounds_error is False, '(None, None)' is returned.
"""
# Get frequency array from input object
try:
freqs = data.freq_array
if len(freqs.shape) == 2 and freqs.shape[0] == 1:
freqs = freqs.flatten() # Support UVData 2D freq_array
elif len(freqs.shape) > 2:
raise ValueError("data.freq_array has unsupported shape: %s" \
% str(freqs.shape))
except:
raise AttributeError("Object 'data' does not have a freq_array attribute.")
# Check for a single tuple input
is_tuple = False
if isinstance(freq_range, tuple):
is_tuple = True
freq_range = [freq_range,]
# Make sure freq_range is now a list (of tuples)
if not isinstance(freq_range, list):
raise TypeError("freq_range must be a tuple or list of tuples.")
# Loop over tuples and find spectral window indices
spw_range = []
for frange in freq_range:
fmin, fmax = frange
if fmin > fmax:
raise ValueError("Upper bound of spectral window is less than "
"the lower bound.")
# Check that this doesn't go beyond the available range of freqs
if fmin < np.min(freqs) and bounds_error:
raise ValueError("Lower bound of spectral window is below the "
"available frequency range. (Note: freqs should "
"be in Hz)")
if fmax > np.max(freqs) and bounds_error:
raise ValueError("Upper bound of spectral window is above the "
"available frequency range. (Note: freqs should "
"be in Hz)")
# Get indices within this range
idxs = np.where(np.logical_and(freqs >= fmin, freqs < fmax))[0]
spw = (idxs[0], idxs[-1]) if idxs.size > 0 else (None, None)
spw_range.append(spw)
# Unpack from list if only a single tuple was specified originally
if is_tuple: return spw_range[0]
return spw_range
def spw_range_from_redshifts(data, z_range, bounds_error=True):
"""
Return a tuple defining the spectral window that corresponds to the
redshift range specified in z_range.
(Spectral windows are specified as tuples containing the first and last
index of a frequency range in data.freq_array.)
Parameters
----------
data : UVData or UVPSpec object
Object containing data with a frequency dimension.
z_range : tuple or list of tuples
Tuples containing the lower and upper fredshift bounds for each
spectral window. The range is inclusive of the upper redshift bound,
i.e. it includes all channels in z_range[0] > z >= z_range[1].
bounds_error : bool, optional
Whether to raise an error if a specified lower/upper redshift is
outside the frequency range available in 'data'. Default: True.
Returns
-------
spw_range : tuple or list of tuples
Indices of the channels at the lower and upper bounds of the specified
spectral window(s).
Note: If the requested spectral window is outside the available
frequency range, and bounds_error is False, '(None, None)' is returned.
"""
# Check for a single tuple input
is_tuple = False
if isinstance(z_range, tuple):
is_tuple = True
z_range = [z_range,]
# Convert redshifts to frequencies (in Hz)
freq_range = []
for zrange in z_range:
zmin, zmax = zrange
freq_range.append( (Cosmo_Conversions.z2f(zmax),
Cosmo_Conversions.z2f(zmin)) )
# Use freq. function to get spectral window
spw_range = spw_range_from_freqs(data=data, freq_range=freq_range,
bounds_error=bounds_error)
# Unpack from list if only a single tuple was specified originally
if is_tuple: return spw_range[0]
return spw_range
def log(msg, f=None, lvl=0, tb=None, verbose=True):
"""
Add a message to the log.
Parameters
----------
msg : str
Message string to print.
f : file descriptor
file descriptor to write message to.
lvl : int, optional
Indent level of the message. Each level adds two extra spaces.
Default: 0.
tb : traceback tuple, optional
Output of sys.exc_info()
verbose : bool, optional
if True, print msg. Even if False, still writes to file
if f is provided.
"""
# catch for traceback if provided
if tb is not None:
msg += "\n{}".format('\n'.join(traceback.format_exception(*tb)))
# print
output = "%s%s" % (" "*lvl, msg)
if verbose:
print(output)
# write
if f is not None:
f.write(output)
f.flush()
def load_config(config_file):
"""
Load configuration details from a YAML file.
All entries of 'None' --> None and all lists
of lists become lists of tuples.
"""
# define recursive replace function
def replace(d):
if isinstance(d, (dict, odict)):
for k in d.keys():
# 'None' and '' turn into None
if d[k] == 'None': d[k] = None
# list of lists turn into lists of tuples
if isinstance(d[k], list) \
and np.all([isinstance(i, list) for i in d[k]]):
d[k] = [tuple(i) for i in d[k]]
elif isinstance(d[k], (dict, odict)): replace(d[k])
# Open and read config file
with open(config_file, 'r') as cfile:
try:
cfg = yaml.load(cfile, Loader=yaml.FullLoader)
except yaml.YAMLError as exc:
raise(exc)
# Replace entries
replace(cfg)
return cfg
def flatten(nested_list):
"""
Flatten a list of nested lists
"""
return [item for sublist in nested_list for item in sublist]
def config_pspec_blpairs(uv_templates, pol_pairs, group_pairs, exclude_auto_bls=False,
exclude_permutations=True, bl_len_range=(0, 1e10),
bl_deg_range=(0, 180), xants=None, exclude_patterns=None,
include_autocorrs=False,
file_type='miriad', verbose=True):
"""
Given a list of glob-parseable file templates and selections for
polarization and group labels, construct a master list of
group-pol pairs, and also a list of blpairs for each
group-pol pair given selections on baseline angles and lengths.
A group is a fieldname in the visibility files that denotes the
"type" of dataset. For example, the group field in the following files
zen.even.LST.1.01.xx.HH.uv
zen.odd.LST.1.01.xx.HH.uv
are the "even" and "odd" fields, which specifies the two time-binning groups.
To form cross spectra between these two files, one would feed a group_pair
of: group_pairs = [('even', 'odd')] and pol_pairs = [('xx', 'xx')].
Parameters
----------
uv_templates : list
List of glob-parseable string templates, each of which must have
a {pol} and {group} field.
pol_pairs : list
List of len-2 polarization tuples to use in forming cross spectra.
Ex: [('xx', 'xx'), ('yy', 'yy'), ...]
group_pairs : list
List of len-2 group tuples to use in forming cross spectra.
See top of doc-string for an explanation of a "group" in this context.
Ex: [('grp1', 'grp1'), ('grp2', 'grp2'), ...]
exclude_auto_bls : bool
If True, exclude all baselines paired with itself.
exclude_permutations : bool
If True, exclude baseline2_cross_baseline1 if
baseline1_cross_baseline2 exists.
bl_len_range : len-2 tuple
A len-2 integer tuple specifying the range of baseline lengths
(meters in ENU frame) to consider.
bl_deg_range : len-2 tuple
A len-2 integer tuple specifying the range of baseline angles
(degrees in ENU frame) to consider.
xants : list, optional
A list of integer antenna numbers to exclude. Default: None.
exclude_patterns : list, optional
A list of patterns to exclude if found in the final list of input
files (after the templates have been filled-in). This currently
just takes a list of strings, and does not recognize wildcards.
Default: None.
include_autocorrs : bool, optional
If True, include autocorrelation visibilities
in the set of blpair groups calculated and returned.
file_type : str, optional
File type of the input files. Default: 'miriad'.
verbose : bool, optional
If True, print feedback to stdout. Default: True.
Returns
-------
groupings : dict
A dictionary holding pol and group pair (tuple) as keys
and a list of baseline-pairs as values.
Notes
-----
A group-pol-pair is formed by self-matching unique files in the
glob-parsed master list, and then string-formatting-in appropriate
pol and group selections given pol_pairs and group_pairs.
"""
# type check
if isinstance(uv_templates, (str, np.str)):
uv_templates = [uv_templates]
assert len(pol_pairs) == len(group_pairs), "len(pol_pairs) must equal "\
"len(group_pairs)"
# get unique pols and groups
pols = sorted(set([item for sublist in pol_pairs for item in sublist]))
groups = sorted(set([item for sublist in group_pairs for item in sublist]))
# parse wildcards in uv_templates to get wildcard-unique filenames
unique_files = []
pol_grps = []
for template in uv_templates:
for pol in pols:
for group in groups:
# parse wildcards with pol / group selection
files = glob.glob(template.format(pol=pol, group=group))
# if any files were parsed, add to pol_grps
if len(files) > 0:
pol_grps.append((pol, group))
# insert into unique_files with {pol} and {group} re-inserted
for _file in files:
_unique_file = _file.replace(".{pol}.".format(pol=pol),
".{pol}.").replace(".{group}.".format(group=group), ".{group}.")
if _unique_file not in unique_files:
unique_files.append(_unique_file)
unique_files = sorted(unique_files)
# Exclude user-specified patterns
if exclude_patterns is not None:
to_exclude = []
# Loop over files and patterns
for f in unique_files:
for pattern in exclude_patterns:
# Add to list of files to be excluded
if pattern in f:
if verbose:
print("File matches pattern '%s' and will be excluded: %s" \
% (pattern, f))
to_exclude.append(f)
continue
# Exclude files that matched a pattern
for f in to_exclude:
try:
unique_files.remove(f)
except:
pass
# Test for empty list and fail if found
if len(unique_files) == 0:
if verbose:
print("config_pspec_blpairs: All files were filtered out!")
return []
# use a single file from unique_files and a single pol-group combination to get antenna positions
_file = unique_files[0].format(pol=pol_grps[0][0], group=pol_grps[0][1])
uvd = UVData()
uvd.read(_file, read_data=False, file_type=file_type)
# get baseline pairs
(_bls1, _bls2, _, _,
_) = calc_blpair_reds(uvd, uvd, filter_blpairs=False, exclude_auto_bls=exclude_auto_bls,
exclude_permutations=exclude_permutations, bl_len_range=bl_len_range,
include_autocorrs=include_autocorrs, bl_deg_range=bl_deg_range)
# take out xants if fed
if xants is not None:
bls1, bls2 = [], []
for bl1, bl2 in zip(_bls1, _bls2):
if bl1[0] not in xants \
and bl1[1] not in xants \
and bl2[0] not in xants \
and bl2[1] not in xants:
bls1.append(bl1)
bls2.append(bl2)
else:
bls1, bls2 = _bls1, _bls2
blps = list(zip(bls1, bls2))
# iterate over pol-group pairs that exist
groupings = odict()
for pp, gp in zip(pol_pairs, group_pairs):
if (pp[0], gp[0]) not in pol_grps or (pp[1], gp[1]) not in pol_grps:
if verbose:
print("pol_pair {} and group_pair {} not found in data files".format(pp, gp))
continue
groupings[(tuple(gp), tuple(pp))] = blps
return groupings
def get_blvec_reds(blvecs, bl_error_tol=1.0, match_bl_lens=False):
"""
Given a blvecs dictionary, form groups of baseline-pairs based on
redundancy in ENU coordinates. Note: this only uses the East-North components
of the baseline vectors to calculate redundancy.
Parameters:
-----------
blvecs : dictionary (or UVPSpec object)
A dictionary with len-2 or 3 ndarray baseline vectors as values.
Alternatively, this can be a UVPSpec object.
bl_error_tol : float, optional
Redundancy tolerance of baseline vector in meters. Default: 1.0
match_bl_lens : bool, optional
Combine baseline groups of identical baseline length but
differing angle (using bl_error_tol). Default: False
Returns:
--------
red_bl_grp : list
A list of baseline groups, ordered by ascending baseline length.
red_bl_len : list
A list of baseline lengths in meters for each bl group
red_bl_ang : list
A list of baseline angles in degrees for each bl group
red_bl_tag : list
A list of baseline string tags denoting bl length and angle
"""
from hera_pspec import UVPSpec
# type check
assert isinstance(blvecs, (dict, odict, UVPSpec)), \
"blpairs must be fed as a dict or UVPSpec"
if isinstance(blvecs, UVPSpec):
# get baseline vectors
uvp = blvecs
bls = uvp.bl_array
bl_vecs = uvp.get_ENU_bl_vecs()[:, :2]
blvecs = dict(list(zip( [uvp.bl_to_antnums(_bls) for _bls in bls],
bl_vecs )))
# get baseline-pairs
blpairs = uvp.get_blpairs()
# form dictionary
_blvecs = odict()
for blp in blpairs:
bl1 = blp[0]
bl2 = blp[1]
_blvecs[blp] = (blvecs[bl1] + blvecs[bl2]) / 2.
blvecs = _blvecs
# create empty lists
red_bl_grp = []
red_bl_vec = []
red_bl_len = []
red_bl_ang = []
red_bl_tag = []
# iterate over each baseline in blvecs
for bl in blvecs.keys():
# get bl vector and properties
bl_vec = blvecs[bl][:2]
bl_len = np.linalg.norm(bl_vec)
bl_ang = np.arctan2(*bl_vec[::-1]) * 180 / np.pi
if bl_ang < 0: bl_ang = (bl_ang + 180) % 360
bl_tag = "{:03.0f}_{:03.0f}".format(bl_len, bl_ang)
# append to list if unique within tolerance
if match_bl_lens:
# match only on bl length
match = [np.all(np.isclose(bll, bl_len, rtol=0.0, atol=bl_error_tol)) for bll in red_bl_len]
else:
# match on full bl vector
match = [np.all(np.isclose(blv, bl_vec, rtol=0.0, atol=bl_error_tol)) for blv in red_bl_vec]
if np.any(match):
match_id = np.where(match)[0][0]
red_bl_grp[match_id].append(bl)
# else create new list
else:
red_bl_grp.append([bl])
red_bl_vec.append(bl_vec)
red_bl_len.append(bl_len)
red_bl_ang.append(bl_ang)
red_bl_tag.append(bl_tag)
# order based on tag
order = np.argsort(red_bl_tag)
red_bl_grp = [red_bl_grp[i] for i in order]
red_bl_len = [red_bl_len[i] for i in order]
red_bl_ang = [red_bl_ang[i] for i in order]
red_bl_tag = [red_bl_tag[i] for i in order]
return red_bl_grp, red_bl_len, red_bl_ang, red_bl_tag
def job_monitor(run_func, iterator, action_name, M=map, lf=None, maxiter=1,
verbose=True):
"""
Job monitoring function, used to send elements of iterator through calls of
run_func. Can be parallelized if the input M function is from the
multiprocess module.
Parameters
----------
run_func : function
A worker function to run on each element in iterator. Should return
0 if job is successful, otherwise a failure is any non-zero integer.
iterator : iterable
An iterable whose elements define an individual job launch, and are
passed through to run_func for each individual job.
action_name : str
A descriptive name for the operation being performed by run_func.
M : map function
A map function used to send elements of iterator through calls to
run_func. Default is built-in map function.
lf : file descriptor
Log-file descriptor to print message to.
maxiter : int
Maximum number of job re-tries for failed jobs.
verbose : bool
If True, print feedback to stdout and logfile.
Returns
-------
failures : list
A list of failed job indices from iterator. Failures are any output of
run_func that aren't 0.
"""
# Start timing
t_start = time.time()
# run function over jobs
exit_codes = np.array(list(M(run_func, iterator)))
tnow = datetime.utcnow()
# check for len-0
if len(exit_codes) == 0:
raise ValueError("No output generated from run_func over iterator {}".format(iterator))
# inspect for failures
if np.all(exit_codes != 0):
# everything failed, raise error
log("\n{}\nAll {} jobs failed w/ exit codes\n {}: {}\n".format("-"*60,
action_name, exit_codes, tnow),
f=lf, verbose=verbose)
raise ValueError("All {} jobs failed".format(action_name))
# if not all failed, try re-run
failures = np.where(exit_codes != 0)[0]
counter = 1
while True:
if not np.all(exit_codes == 0):
if counter >= maxiter:
# break after certain number of tries
break
# re-run function over jobs that failed
exit_codes = np.array(list(M(run_func, failures)))
# update counter
counter += 1
# update failures
failures = failures[exit_codes != 0]
else:
# all passed
break
# print failures if they exist
if len(failures) > 0:
log("\nSome {} jobs failed after {} tries:\n{}".format(action_name,
maxiter,
failures),
f=lf, verbose=verbose)
else:
t_run = time.time() - t_start
log("\nAll {} jobs ran through ({:1.1f} sec)".format(action_name, t_run),
f=lf, verbose=verbose)
return failures
def get_bl_lens_angs(blvecs, bl_error_tol=1.0):
"""
Given a list of baseline vectors in ENU (TOPO) coords, get the
baseline length [meter] and angle [deg] given a baseline error
tolerance [meters]
Parameters
----------
blvecs : list
A list of ndarray of 2D or 3D baseline vectors.
bl_error_tol : float, optional
A baseline vector error tolerance.
Returns
-------
lens : ndarray
Array of baseline lengths [meters]
angs : ndarray
Array of baseline angles [degrees]
"""
# type check
blvecs = np.asarray(blvecs)
assert blvecs.shape[1] in [2, 3], "blvecs must have shape (N, 2) or (N, 3)"
# get lengths and angles
lens = np.array([np.linalg.norm(v) for v in blvecs])
angs = np.array([np.arctan2(*v[:2][::-1]) * 180 / np.pi for v in blvecs])
angs = np.array([(a + 180) % 360 if a < 0 else a for a in angs])
# Find baseline groups with ang ~ 180 deg that have y-vec within bl_error and set to ang = 0 deg.
flip = (blvecs[:, 1] > -bl_error_tol) & (blvecs[:, 1] < 0) & (blvecs[:, 0] > 0)
angs[flip] = 0
return lens, angs
def get_reds(uvd, bl_error_tol=1.0, pick_data_ants=False, bl_len_range=(0, 1e4),
bl_deg_range=(0, 180), xants=None, add_autos=False,
autos_only=False, min_EW_cut=0,
file_type='miriad'):
"""
Given a UVData object, a Miriad filepath or antenna position dictionary,
calculate redundant baseline groups using hera_cal.redcal and optionally
filter groups based on baseline cuts and xants.
Parameters
----------
uvd : UVData object or str or dictionary
UVData object or filepath string or antenna position dictionary.
An antpos dict is formed via dict(zip(ants, ant_vecs)).
N.B. If uvd is a filepath, use the `file_type` kwarg to specify the
file type.
bl_error_tol : float
Redundancy tolerance in meters
pick_data_ants : boolean
If True, use only antennas in the UVData to construct reds, else use all
antennas present.
bl_len_range : float tuple
A len-2 float tuple specifying baseline length cut in meters
bl_deg_range : float tuple
A len-2 float tuple specifying baseline angle cut in degrees in ENU frame
xants : list
List of bad antenna numbers to exclude
add_autos : bool
If True, add into autocorrelation group to the redundant group list.
autos_only : bool, optional
If True, only include autocorrelations.
Default is False.
min_EW_cut : float
Baselines with a projected East-West absolute baseline length in meters
less than this are not included in the output.
file_type : str, optional
File type of the input files. Default: 'miriad'.
Returns (reds, lens, angs)
-------
reds : list
List of redundant baseline (antenna-pair) groups
lens : list
List of baseline lengths [meters] of each group in reds
angs : list
List of baseline angles [degrees ENU coords] of each group in reds
"""
# handle string and UVData object
if isinstance(uvd, (str, np.str, UVData)):
# load filepath
if isinstance(uvd, (str, np.str)):
_uvd = UVData()
_uvd.read(uvd, read_data=False, file_type=file_type)
uvd = _uvd
# get antenna position dictionary
antpos, ants = uvd.get_ENU_antpos(pick_data_ants=pick_data_ants)
antpos_dict = dict(list(zip(ants, antpos)))
elif isinstance(uvd, (dict, odict)):
# use antenna position dictionary
antpos_dict = uvd
else:
raise TypeError("uvd must be a UVData object, filename string, or dict "
"of antenna positions.")
# get redundant baselines
reds = redcal.get_pos_reds(antpos_dict, bl_error_tol=bl_error_tol)
# get vectors, len and ang for each baseline group
vecs = np.array([antpos_dict[r[0][0]] - antpos_dict[r[0][1]] for r in reds])
lens, angs = get_bl_lens_angs(vecs, bl_error_tol=bl_error_tol)
# restrict baselines
_reds, _lens, _angs = [], [], []
for i, (l, a) in enumerate(zip(lens, angs)):
if l < bl_len_range[0] or l > bl_len_range[1]: continue
if a < bl_deg_range[0] or a > bl_deg_range[1]: continue
if np.abs(l *
|
np.cos(a * np.pi / 180)
|
numpy.cos
|
"""
Local classifier per node approach.
Numeric and string output labels are both handled.
"""
from copy import deepcopy
import networkx as nx
import numpy as np
import ray
from sklearn.base import BaseEstimator
from sklearn.utils.validation import check_array, check_is_fitted
from hiclass import BinaryPolicy
from hiclass.ConstantClassifier import ConstantClassifier
from hiclass.HierarchicalClassifier import HierarchicalClassifier
@ray.remote
def _parallel_fit(lcpn, node):
classifier = lcpn.hierarchy_.nodes[node]["classifier"]
X, y = lcpn.binary_policy_.get_binary_examples(node)
unique_y = np.unique(y)
if len(unique_y) == 1 and lcpn.replace_classifiers:
classifier = ConstantClassifier()
classifier.fit(X, y)
return classifier
class LocalClassifierPerNode(BaseEstimator, HierarchicalClassifier):
"""
Assign local classifiers to each node of the graph, except the root node.
A local classifier per node is a local hierarchical classifier that fits one local binary classifier
for each node of the class hierarchy, except for the root node.
"""
def __init__(
self,
local_classifier: BaseEstimator = None,
binary_policy: str = "siblings",
verbose: int = 0,
edge_list: str = None,
replace_classifiers: bool = True,
n_jobs: int = 1,
):
"""
Initialize a local classifier per node.
Parameters
----------
local_classifier : BaseEstimator, default=LogisticRegression
The local_classifier used to create the collection of local classifiers. Needs to have fit, predict and
clone methods.
binary_policy : str, default="siblings"
Rules for defining positive and negative training examples.
verbose : int, default=0
Controls the verbosity when fitting and predicting.
See https://verboselogs.readthedocs.io/en/latest/readme.html#overview-of-logging-levels
for more information.
edge_list : str, default=None
Path to write the hierarchy built.
replace_classifiers : bool, default=True
Turns on (True) the replacement of a local classifier with a constant classifier when trained on only
a single unique class.
n_jobs : int, default=1
The number of jobs to run in parallel. Only :code:`fit` is parallelized.
"""
super().__init__(
local_classifier=local_classifier,
verbose=verbose,
edge_list=edge_list,
replace_classifiers=replace_classifiers,
n_jobs=n_jobs,
classifier_abbreviation="LCPN",
)
self.binary_policy = binary_policy
def fit(self, X, y):
"""
Fit a local classifier per node.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The training input samples. Internally, its dtype will be converted
to ``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csc_matrix``.
y : array-like of shape (n_samples, n_levels)
The target values, i.e., hierarchical class labels for classification.
Returns
-------
self : object
Fitted estimator.
"""
# Execute common methods necessary before fitting
super()._pre_fit(X, y)
# Initialize policy
self._initialize_binary_policy()
# Fit local classifiers in DAG
super().fit(X, y)
# TODO: Store the classes seen during fit
# TODO: Add function to allow user to change local classifier
# TODO: Add parameter to receive hierarchy as parameter in constructor
# TODO: Add support to empty labels in some levels
# Return the classifier
return self
def predict(self, X):
"""
Predict classes for the given data.
Hierarchical labels are returned.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
The input samples. Internally, its dtype will be converted
to ``dtype=np.float32``. If a sparse matrix is provided, it will be
converted into a sparse ``csr_matrix``.
Returns
-------
y : ndarray of shape (n_samples,) or (n_samples, n_outputs)
The predicted classes.
"""
# Check if fit has been called
check_is_fitted(self)
# Input validation
X = check_array(X, accept_sparse="csr")
y = np.empty((X.shape[0], self.max_levels_), dtype=self.dtype_)
# TODO: Add threshold to stop prediction halfway if need be
bfs = nx.bfs_successors(self.hierarchy_, source=self.root_)
self.logger_.info("Predicting")
for predecessor, successors in bfs:
if predecessor == self.root_:
mask = [True] * X.shape[0]
subset_x = X[mask]
else:
mask = np.isin(y, predecessor).any(axis=1)
subset_x = X[mask]
if subset_x.shape[0] > 0:
probabilities = np.zeros((subset_x.shape[0], len(successors)))
for i, successor in enumerate(successors):
successor_name = str(successor).split(self.separator_)[-1]
self.logger_.info(f"Predicting for node '{successor_name}'")
classifier = self.hierarchy_.nodes[successor]["classifier"]
positive_index = np.where(classifier.classes_ == 1)[0]
probabilities[:, i] = classifier.predict_proba(subset_x)[
:, positive_index
][:, 0]
highest_probability =
|
np.argmax(probabilities, axis=1)
|
numpy.argmax
|
import os
import unittest
import tempfile
import json
import numpy as np
import pandas as pd
import shutil
from supervised import AutoML
from numpy.testing import assert_almost_equal
from sklearn import datasets
from supervised.exceptions import AutoMLException
from supervised.algorithms.xgboost import additional
additional["max_rounds"] = 1
class AutoMLTargetsTest(unittest.TestCase):
automl_dir = "automl_tests"
rows = 50
def tearDown(self):
shutil.rmtree(self.automl_dir, ignore_errors=True)
def test_bin_class_01(self):
X = np.random.rand(self.rows, 3)
X = pd.DataFrame(X, columns=[f"f{i}" for i in range(3)])
y = np.random.randint(0, 2, self.rows)
automl = AutoML(
results_path=self.automl_dir,
total_time_limit=1,
algorithms=["Xgboost"],
train_ensemble=False,
explain_level=0,
start_random_models=1,
)
automl.fit(X, y)
pred = automl.predict(X)
u = np.unique(pred)
self.assertTrue(0 in u or 1 in u)
self.assertTrue(len(u) <= 2)
def test_bin_class_11(self):
X = np.random.rand(self.rows, 3)
X = pd.DataFrame(X, columns=[f"f{i}" for i in range(3)])
y = np.random.randint(0, 2, self.rows) * 2 - 1
automl = AutoML(
results_path=self.automl_dir,
total_time_limit=1,
algorithms=["Xgboost"],
train_ensemble=False,
explain_level=0,
start_random_models=1,
)
automl.fit(X, y)
p = automl.predict(X)
pred = automl.predict(X)
u = np.unique(pred)
self.assertTrue(-1 in u or 1 in u)
self.assertTrue(len(u) <= 2)
def test_bin_class_AB(self):
X = np.random.rand(self.rows, 3)
X = pd.DataFrame(X, columns=[f"f{i}" for i in range(3)])
y = np.random.permutation(["a", "B"] * int(self.rows / 2))
automl = AutoML(
results_path=self.automl_dir,
total_time_limit=1,
algorithms=["Xgboost"],
train_ensemble=False,
explain_level=0,
start_random_models=1,
)
automl.fit(X, y)
p = automl.predict(X)
pred = automl.predict(X)
u = np.unique(pred)
self.assertTrue("a" in u or "B" in u)
self.assertTrue(len(u) <= 2)
def test_bin_class_AB_missing_targets(self):
X = np.random.rand(self.rows, 3)
X = pd.DataFrame(X, columns=[f"f{i}" for i in range(3)])
y = pd.Series(
np.random.permutation(["a", "B"] * int(self.rows / 2)), name="target"
)
y.iloc[1] = None
y.iloc[3] = np.NaN
y.iloc[13] = np.nan
automl = AutoML(
results_path=self.automl_dir,
total_time_limit=1,
algorithms=["Xgboost"],
train_ensemble=False,
explain_level=0,
start_random_models=1,
)
automl.fit(X, y)
p = automl.predict(X)
pred = automl.predict(X)
u = np.unique(pred)
self.assertTrue("a" in u or "B" in u)
self.assertTrue(len(u) <= 2)
def test_multi_class_0123(self):
X =
|
np.random.rand(self.rows * 4, 3)
|
numpy.random.rand
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from hdn.core.config import cfg
from hdn.utils.bbox import corner2center
from hdn.utils.point import Point
import math
import cv2
import matplotlib.pyplot as plt
import logging
class PointTarget:
def __init__(self, ):
self.points = Point(cfg.POINT.STRIDE, cfg.TRAIN.OUTPUT_SIZE, cfg.TRAIN.SEARCH_SIZE // 2)#8 25, 255
def __call__(self, img, target, size, angle=0.0, neg=False, init_wh=[]):
# -1 ignore 0 negative 1 positive
cls = 0 * np.ones((size, size), dtype=np.float32)# cls: neg=-2, ignore=-1 pos=0~1
img_mask = np.ones((cfg.TRAIN.SEARCH_SIZE, cfg.TRAIN.SEARCH_SIZE), dtype=np.int32)
img_mask = np.argwhere(img_mask).reshape(cfg.TRAIN.SEARCH_SIZE, cfg.TRAIN.SEARCH_SIZE,2).transpose(2,0,1)
self.img = img
delta = np.zeros((4, size, size), dtype=np.float32)
def select(position, keep_num=16):
num = position[0].shape[0]
if num <= keep_num:
return position, num
slt =
|
np.arange(num)
|
numpy.arange
|
import numpy as np
|
np.random.seed(1)
|
numpy.random.seed
|
#!/usr/bin/env python
# Analyze lesions
#
# Copyright (c) 2014 Polytechnique Montreal <www.neuro.polymtl.ca>
# Author: Charley
# Modified: 2017-08-19
#
# About the license: see the file LICENSE.TXT
from __future__ import print_function, absolute_import, division
import os, math, sys, pickle, shutil
import numpy as np
import pandas as pd
from skimage.measure import label
import spinalcordtoolbox.image as msct_image
from spinalcordtoolbox.image import Image
from msct_parser import Parser
from msct_types import Centerline
import sct_utils as sct
from sct_utils import extract_fname, printv, tmp_create
from spinalcordtoolbox.centerline.core import get_centerline
def get_parser():
# Initialize the parser
parser = Parser(__file__)
parser.usage.set_description('Compute statistics on lesions of the input binary file (1 for lesion, 0 for background). The function assigns an ID value to each lesion (1, 2, 3, etc.) and outputs morphometric measures for each lesion:'
'\n- volume [mm^3]'
'\n- length [mm]: length along the Superior-Inferior axis'
'\n- max_equivalent_diameter [mm]: maximum diameter of the lesion, when approximating the lesion as a circle in the axial cross-sectional plane orthogonal to the spinal cord'
'\n\nIf an image (e.g. T2w or T1w image, texture image) is provided, it computes the mean and standard deviation values of this image within each lesion.'
'\n\nIf a registered template is provided, it computes:'
'\n- the distribution of each lesion depending on each vertebral level and on each region of the template (eg GM, WM, WM tracts).'
'\n- the proportion of ROI (eg vertebral level, GM, WM) occupied by lesion.'
'\nN.B. If the proportion of lesion in each region (e.g., WM and GM) does not sum up to 100%, it means that the registered template does not fully cover the lesion, in that case you might want to check the registration results.')
parser.add_option(name="-m",
type_value="file",
description="Lesion mask to analyze",
mandatory=True,
example='t2_lesion.nii.gz')
parser.add_option(name="-s",
type_value="file",
description="Spinal cord centerline or segmentation file, which will be used to correct morphometric measures with cord angle with respect to slice.",
mandatory=False,
example='t2_seg.nii.gz')
parser.add_option(name="-i",
type_value="file",
description="Image from which to extract average values within lesions (e.g. T2w or T1w image, texture image).",
mandatory=False,
example='t2.nii.gz')
parser.add_option(name="-f",
type_value="str",
description="Path to folder containing the atlas/template registered to the anatomical image.",
mandatory=False,
example="./label")
parser.add_option(name="-ofolder",
type_value="folder_creation",
description="Output folder",
mandatory=False,
example='./')
parser.add_option(name="-r",
type_value="multiple_choice",
description="Remove temporary files.",
mandatory=False,
default_value='1',
example=['0', '1'])
parser.add_option(name="-v",
type_value='multiple_choice',
description="Verbose: 0 = nothing, 1 = classic, 2 = expended",
mandatory=False,
example=['0', '1', '2'],
default_value='1')
return parser
class AnalyzeLeion:
def __init__(self, fname_mask, fname_sc, fname_ref, path_template, path_ofolder, verbose):
self.fname_mask = fname_mask
self.fname_sc = fname_sc
self.fname_ref = fname_ref
self.path_template = path_template
self.path_ofolder = path_ofolder
self.verbose = verbose
self.wrk_dir = os.getcwd()
if not set(np.unique(Image(fname_mask).data)) == set([0.0, 1.0]):
if set(np.unique(Image(fname_mask).data)) == set([0.0]):
printv('WARNING: Empty masked image', self.verbose, 'warning')
else:
printv("ERROR input file %s is not binary file with 0 and 1 values" % fname_mask, 1, 'error')
# create tmp directory
self.tmp_dir = tmp_create(verbose=verbose) # path to tmp directory
# lesion file where each lesion has a different value
self.fname_label = extract_fname(self.fname_mask)[1] + '_label' + extract_fname(self.fname_mask)[2]
# initialization of measure sheet
measure_lst = ['label', 'volume [mm3]', 'length [mm]', 'max_equivalent_diameter [mm]']
if self.fname_ref is not None:
for measure in ['mean', 'std']:
measure_lst.append(measure + '_' + extract_fname(self.fname_ref)[1])
measure_dct = {}
for column in measure_lst:
measure_dct[column] = None
self.measure_pd = pd.DataFrame(data=measure_dct, index=range(0), columns=measure_lst)
# orientation of the input image
self.orientation = None
# volume object
self.volumes = None
# initialization of proportion measures, related to registrated atlas
if self.path_template is not None:
self.path_atlas = os.path.join(self.path_template, "atlas")
self.path_levels = os.path.join(self.path_template, "template", "PAM50_levels.nii.gz")
else:
self.path_atlas, self.path_levels = None, None
self.vert_lst = None
self.atlas_roi_lst = None
self.distrib_matrix_dct = {}
# output names
self.pickle_name = extract_fname(self.fname_mask)[1] + '_analyzis.pkl'
self.excel_name = extract_fname(self.fname_mask)[1] + '_analyzis.xls'
def analyze(self):
self.ifolder2tmp()
# Orient input image(s) to RPI
self.orient2rpi()
# Label connected regions of the masked image
self.label_lesion()
# Compute angle for CSA correction
self.angle_correction()
# Compute lesion volume, equivalent diameter, (S-I) length, max axial nominal diameter
# if registered template provided: across vertebral level, GM, WM, within WM/GM tracts...
# if ref image is provided: Compute mean and std value in each labeled lesion
self.measure()
# reorient data if needed
self.reorient()
# print averaged results
self.show_total_results()
# save results in excel and pickle files
self.pack_measures()
# save results to ofolder
self.tmp2ofolder()
def tmp2ofolder(self):
os.chdir(self.wrk_dir) # go back to working directory
printv('\nSave results files...', self.verbose, 'normal')
printv('\n... measures saved in the files:', self.verbose, 'normal')
for file_ in [self.fname_label, self.excel_name, self.pickle_name]:
printv('\n - ' + os.path.join(self.path_ofolder, file_), self.verbose, 'normal')
sct.copy(os.path.join(self.tmp_dir, file_), os.path.join(self.path_ofolder, file_))
def pack_measures(self):
writer = pd.ExcelWriter(self.excel_name, engine='xlwt')
self.measure_pd.to_excel(writer, sheet_name='measures', index=False, engine='xlwt')
# Add the total column and row
if self.path_template is not None:
for sheet_name in self.distrib_matrix_dct:
if '#' in sheet_name:
df = self.distrib_matrix_dct[sheet_name].copy()
df = df.append(df.sum(numeric_only=True, axis=0), ignore_index=True)
df['total'] = df.sum(numeric_only=True, axis=1)
df.iloc[-1, df.columns.get_loc('vert')] = 'total'
df.to_excel(writer, sheet_name=sheet_name, index=False, engine='xlwt')
else:
self.distrib_matrix_dct[sheet_name].to_excel(writer, sheet_name=sheet_name, index=False, engine='xlwt')
# Save pickle
self.distrib_matrix_dct['measures'] = self.measure_pd
with open(self.pickle_name, 'wb') as handle:
pickle.dump(self.distrib_matrix_dct, handle)
# Save Excel
writer.save()
def show_total_results(self):
printv('\n\nAveraged measures...', self.verbose, 'normal')
for stg, key in zip([' Volume [mm^3] = ', ' (S-I) Length [mm] = ', ' Equivalent Diameter [mm] = '], ['volume [mm3]', 'length [mm]', 'max_equivalent_diameter [mm]']):
printv(stg + str(np.round(np.mean(self.measure_pd[key]), 2)) + '+/-' + str(np.round(np.std(self.measure_pd[key]), 2)), self.verbose, type='info')
printv('\nTotal volume = ' + str(np.round(np.sum(self.measure_pd['volume [mm3]']), 2)) + ' mm^3', self.verbose, 'info')
printv('Lesion count = ' + str(len(self.measure_pd['volume [mm3]'].values)), self.verbose, 'info')
def reorient(self):
if not self.orientation == 'RPI':
printv('\nOrient output image to initial orientation...', self.verbose, 'normal')
self._orient(self.fname_label, self.orientation)
def _measure_within_im(self, im_lesion, im_ref, label_lst):
printv('\nCompute reference image features...', self.verbose, 'normal')
for lesion_label in label_lst:
im_label_data_cur = im_lesion == lesion_label
im_label_data_cur[np.where(im_ref == 0)] = 0 # if the ref object is eroded compared to the labeled object
mean_cur, std_cur = np.mean(im_ref[np.where(im_label_data_cur)]), np.std(im_ref[np.where(im_label_data_cur)])
label_idx = self.measure_pd[self.measure_pd.label == lesion_label].index
self.measure_pd.loc[label_idx, 'mean_' + extract_fname(self.fname_ref)[1]] = mean_cur
self.measure_pd.loc[label_idx, 'std_' + extract_fname(self.fname_ref)[1]] = std_cur
printv('Mean+/-std of lesion #' + str(lesion_label) + ' in ' + extract_fname(self.fname_ref)[1] + ' file: ' + str(np.round(mean_cur, 2)) + '+/-' + str(np.round(std_cur, 2)), self.verbose, type='info')
def _measure_volume(self, im_data, p_lst, idx):
for zz in range(im_data.shape[2]):
self.volumes[zz, idx - 1] = np.sum(im_data[:, :, zz]) * p_lst[0] * p_lst[1] * p_lst[2]
vol_tot_cur = np.sum(self.volumes[:, idx - 1])
self.measure_pd.loc[idx, 'volume [mm3]'] = vol_tot_cur
printv(' Volume : ' + str(
|
np.round(vol_tot_cur, 2)
|
numpy.round
|
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'
import tensorflow as tf
assert float(tf.__version__[:3]) >= 2.3
import numpy as np
import pandas as pd
from sklearn.metrics import f1_score, accuracy_score
from tensorflow.keras import datasets, layers, models, losses
gpus = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(gpus[0], True)
dataset_path = '/home/alexandr/datasets/santas_2'
###################################
tf.random.set_seed(8)
np.random.seed(8)
IMAGE_SIZE = 448
IMG_SHAPE = (IMAGE_SIZE, IMAGE_SIZE, 3)
BATCH_SIZE = 32
FILTERS = 64
DROPOUT = 0.1
K_PARTS = 3
VALIDATION_SPLIT = 0.0
FREEZE_EPOCHS = 2
UNFREEZE_CONFIG = [(100, 1e-5),
(2, 1e-8)]
args = [IMAGE_SIZE, K_PARTS, FREEZE_EPOCHS,
'|'.join([str(i[0]) for i in UNFREEZE_CONFIG]),
FILTERS, DROPOUT]
OUTPUT_FILE_NAME = '{}'.format('_'.join([str(i) for i in args]))
LOAD_MODEL = not True
MODEL_NAME = '0.8654310907491829__448_3_0_10|10_64_0.0'
EVAL_ONLY = not True
OUTPUT_FILE = '{}.tflite'.format(OUTPUT_FILE_NAME)
OUTPUT_FILE_Q = '{}_q.tflite'.format(OUTPUT_FILE_NAME)
###################################
classes_paths = os.listdir(dataset_path)
CLASSES_NUM = len(classes_paths)
# Read data
data = dict()
for i in range(len(classes_paths)):
class_name = classes_paths[i]
data[class_name] = []
for img in os.listdir('{}/{}'.format(dataset_path, classes_paths[i])):
data[class_name].append('{}/{}/{}'.format(dataset_path, classes_paths[i], img))
# Split data
data_parts = [dict() for i in range(K_PARTS)]
for key in data.keys():
tmp =
|
np.array_split(data[key], K_PARTS)
|
numpy.array_split
|
import itertools
import numpy as np
from tqdm import tqdm
from typing import List
from .utils import (
to_array,
calc_score,
array_score,
sequence_score,
lazy_calc_score,
sequence_max_score,
sequence_lost_score,
)
from .models import Photo, Orientation
ALL_TAGS = []
def arrange_photos(data: List[Photo]):
global ALL_TAGS
ALL_TAGS = sorted(set([x for x in map(lambda x: x.tags, data) for x in x]))
photos = [x for x in data if x.orientation != Orientation.Vertical]
vertical_photos = [x for x in data if x.orientation == Orientation.Vertical]
print("Arranging photos...")
np.random.seed(12)
arranged_photos = []
for size in sorted({len(x) // 2 * 2 for x in photos}):
sizes = (size, size + 1)
slide_score = size // 2
sequence = [x for x in photos if len(x) in sizes]
if not sequence:
continue
sequences = _create_sub_sequences(sequence, th=slide_score)
nb_attempts = 0
previous_total_score = 0
bar = tqdm(total=len(sequences) - 1, desc=f"Processing {sizes}")
while True:
# subsequence post processing
# trying to reduce number of subsequences, all subsequences must remain perfect
nb_sequence = len(sequences)
sequences = _stitch(sequences, th=slide_score)
sequences = _insert(sequences, th=slide_score)
sequences = _shuffle(sequences, th=slide_score, p=0.1)
sequences = _partial_reverse(sequences, th=slide_score, p=0.1)
sequences, vertical_photos = _stitch_by_vertical_photos(
sequences,
vertical_photos,
th=slide_score,
nb_proposals=20000,
p_build=0.02,
)
bar.update(nb_sequence - len(sequences))
total_score = sum(sequence_score(x) for x in sequences)
if total_score <= previous_total_score:
nb_attempts += 1
else:
nb_attempts = 0
previous_total_score = total_score
if len(sequences) == 1 or nb_attempts >= 50:
break
bar.close()
assert all(sequence_lost_score(s) == 0 for s in sequences)
arranged_photos += sum(sequences, [])
print("Done.")
print(f"Number of photos: {len(arranged_photos)}")
score = sequence_score(arranged_photos)
max_score = sequence_max_score(arranged_photos)
print(f"Score = {score} / {max_score}")
return arranged_photos, vertical_photos
def _stitch(sequences, th=1):
""" trying to connect two different sequences """
if len(sequences) <= 1:
return sequences
if th == 0:
return [sum(sequences, [])]
for i, j in itertools.combinations(range(len(sequences)), r=2):
s1, s2 = sequences[i], sequences[j]
if not s1 or not s2:
continue
if lazy_calc_score(s1[-1], s2[0]) >= th:
sequences[i], sequences[j] = [], s1 + s2
continue
if lazy_calc_score(s1[-1], s2[-1]) >= th:
sequences[i], sequences[j] = [], s1 + s2[::-1]
continue
if lazy_calc_score(s1[0], s2[0]) >= th:
sequences[i], sequences[j] = [], s1[::-1] + s2
continue
if lazy_calc_score(s1[0], s2[-1]) >= th:
sequences[i], sequences[j] = [], s1[::-1] + s2[::-1]
continue
return [s for s in sequences if s]
def _do_insert(s1, s2, th):
""" trying to insert sequence 1 into sequence 2 """
if not s1 or len(s2) <= 1:
return False, None
for i, p2 in enumerate(s2[1:], start=1):
p1 = s2[i - 1]
if lazy_calc_score(p1, s1[0]) >= th and lazy_calc_score(s1[-1], p2) >= th:
return True, s2[:i] + s1 + s2[i:]
if lazy_calc_score(p1, s1[-1]) >= th and lazy_calc_score(s1[0], p2) >= th:
return True, s2[:i] + s1[::-1] + s2[i:]
return False, None
def _insert(sequences, th):
if len(sequences) <= 1:
return sequences
for i, j in itertools.product(range(len(sequences)), repeat=2):
if i != j:
status, combined_sequence = _do_insert(sequences[i], sequences[j], th=th)
if status:
sequences[i], sequences[j] = [], combined_sequence
return [s for s in sequences if s]
def _do_partial_reverse(sequence, th=1, p=0.1):
""" trying to reverse part of the sequence """
if len(sequence) <= 2 or p == 0:
return sequence
first_photo = sequence[0]
for i, photo in enumerate(sequence[2:], start=2):
if calc_score(first_photo, photo) >= th:
if np.random.random_sample() < p:
return sequence[:i][::-1] + sequence[i:]
return sequence
def _partial_reverse(sequences, th=1, p=0.1):
if len(sequences) <= 1:
return sequences
for i in range(len(sequences)):
sequences[i] = _do_partial_reverse(sequences[i], th=th, p=p)
sequences[i] = _do_partial_reverse(sequences[i][::-1], th=th, p=p)
return sequences
def _do_shuffle(s1, s2, th=1, p=0.1):
""" trying to swap some subsequence from sequence 1 and sequence 2 """
if not s1 or len(s2) <= 1 or p == 0:
return s1, s2
for i, p2 in enumerate(s2[1:], start=1):
p1 = s2[i - 1]
if lazy_calc_score(p1, s1[0]) >= th:
if np.random.random_sample() < p:
return s2[:i] + s1, s2[i:]
if lazy_calc_score(p1, s1[-1]) >= th:
if np.random.random_sample() < p:
return s2[:i] + s1[::-1], s2[i:]
return s1, s2
def _shuffle(sequences, th=1, p=0.1):
if len(sequences) <= 1 or p == 0:
return sequences
for i, j in itertools.product(range(len(sequences)), repeat=2):
if i != j:
sequences[i], sequences[j] = _do_shuffle(
sequences[i], sequences[j], th=th, p=p
)
return [s for s in sequences if s]
def _create_sub_sequences(sequence, th=1):
""" create list of perfect subsequence """
out = []
if not sequence:
return out
sub_sequence = [sequence[0]]
sequence = sequence[1:]
while sequence:
p1 = sub_sequence[-1]
_next = None
for i, p2 in enumerate(sequence):
if p2 & p1 == th:
_next = i
break
if _next is not None:
p2 = sequence[_next]
sub_sequence.append(p2)
sequence.pop(_next)
else:
out.append(sub_sequence)
sub_sequence = [sequence[0]]
sequence = sequence[1:]
out.append(sub_sequence)
assert all(sequence_lost_score(s) == 0 for s in out)
return out
def _do_stitch_by_vertical_photos(sequences, proposals, th=1, p_build=0.05):
if len(sequences) <= 1 or len(proposals) < 1:
return
proposals = np.array(proposals)
ar = to_array(proposals, ALL_TAGS)
used_pairs = set()
def update(_i, _j, _pair, _new_sequence):
sequences[_i], sequences[_j] = [], _new_sequence
used_pairs.update([x for x in _pair.id])
def build(_i, _pair, _new_sequence):
sequences[_i] = _new_sequence
used_pairs.update([x for x in _pair.id])
pair = None
for i, j in itertools.combinations(range(len(sequences)), r=2):
s1, s2 = sequences[i], sequences[j]
if not s1 or not s2:
continue
if pair is not None:
cond = [
p.id[0] not in pair.id and p.id[1] not in pair.id for p in proposals
]
proposals = proposals[cond]
ar = ar[cond]
s11 = array_score(to_array(s1[0], ALL_TAGS), ar)
s12 = s11 if len(s1) == 1 else array_score(to_array(s1[-1], ALL_TAGS), ar)
s21 = array_score(to_array(s2[0], ALL_TAGS), ar)
s22 = s21 if len(s2) == 1 else array_score(to_array(s2[-1], ALL_TAGS), ar)
cond = s12 + s21 >= th * 2
if np.any(cond):
i_pair = np.random.choice(np.where(cond)[0])
pair = proposals[i_pair]
update(i, j, pair, s1 + [pair] + s2)
continue
cond = s12 + s22 >= th * 2
if np.any(cond):
i_pair = np.random.choice(np.where(cond)[0])
pair = proposals[i_pair]
update(i, j, pair, s1 + [pair] + s2[::-1])
continue
cond = s11 + s21 >= th * 2
if np.any(cond):
i_pair = np.random.choice(np.where(cond)[0])
pair = proposals[i_pair]
update(i, j, pair, s1[::-1] + [pair] + s2)
continue
cond = s11 + s22 >= th * 2
if np.any(cond):
i_pair = np.random.choice(np.where(cond)[0])
pair = proposals[i_pair]
update(i, j, pair, s1[::-1] + [pair] + s2[::-1])
continue
cond = s11 >= th
if np.any(cond) and np.random.random_sample() <= p_build:
i_pair = np.random.choice(np.where(cond)[0])
pair = proposals[i_pair]
build(i, pair, [pair] + s1)
continue
cond = s12 >= th
if np.any(cond) and np.random.random_sample() <= p_build:
i_pair = np.random.choice(
|
np.where(cond)
|
numpy.where
|
import os
import argparse
import numpy as np
from net import AlexNetPlusLatent
from timeit import time
import torch
import torch.nn as nn
from torchvision import datasets, models, transforms
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
import torch.optim.lr_scheduler
parser = argparse.ArgumentParser(description='Deep Hashing evaluate mAP')
parser.add_argument('--pretrained', type=str, default=0, metavar='pretrained_model',
help='loading pretrained model(default = None)')
parser.add_argument('--bits', type=int, default=48, metavar='bts',
help='binary bits')
parser.add_argument('--path', type=str, default='model', metavar='P',
help='path directory')
args = parser.parse_args()
def load_data():
transform_train = transforms.Compose(
[transforms.Resize(227),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
transform_test = transforms.Compose(
[transforms.Resize(227),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
trainset = datasets.CIFAR10(root='./data', train=True, download=True,
transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=100,
shuffle=False, num_workers=2)
testset = datasets.CIFAR10(root='./data', train=False, download=True,
transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=100,
shuffle=False, num_workers=2)
return trainloader, testloader
def binary_output(dataloader):
net = AlexNetPlusLatent(args.bits)
net.load_state_dict(torch.load('./{}/{}'.format(args.path, args.pretrained)))
use_cuda = torch.cuda.is_available()
if use_cuda:
net.cuda()
full_batch_output = torch.cuda.FloatTensor()
full_batch_label = torch.cuda.LongTensor()
net.eval()
for batch_idx, (inputs, targets) in enumerate(dataloader):
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = Variable(inputs), Variable(targets)
outputs, _ = net(inputs)
full_batch_output = torch.cat((full_batch_output, outputs.data), 0)
full_batch_label = torch.cat((full_batch_label, targets.data), 0)
return torch.round(full_batch_output), full_batch_label
def precision(trn_binary, trn_label, tst_binary, tst_label):
trn_binary = trn_binary.cpu().numpy()
trn_binary = np.asarray(trn_binary, np.int32)
trn_label = trn_label.cpu().numpy()
tst_binary = tst_binary.cpu().numpy()
tst_binary = np.asarray(tst_binary, np.int32)
tst_label = tst_label.cpu().numpy()
classes = np.max(tst_label) + 1
for i in range(classes):
if i == 0:
tst_sample_binary = tst_binary[np.random.RandomState(seed=i).permutation(np.where(tst_label==i)[0])[:100]]
tst_sample_label = np.array([i]).repeat(100)
continue
else:
tst_sample_binary = np.concatenate([tst_sample_binary, tst_binary[np.random.RandomState(seed=i).permutation(np.where(tst_label==i)[0])[:100]]])
tst_sample_label = np.concatenate([tst_sample_label, np.array([i]).repeat(100)])
query_times = tst_sample_binary.shape[0]
trainset_len = trn_binary.shape[0]
AP =
|
np.zeros(query_times)
|
numpy.zeros
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# File for training denoisers with at most one classifier attached to
from torch.optim import lr_scheduler
from architectures import DENOISERS_ARCHITECTURES, get_architecture, IMAGENET_CLASSIFIERS
from datasets import get_dataset, DATASETS
from test_denoiser_aug import test_with_classifier
from torch.nn import MSELoss, CrossEntropyLoss, KLDivLoss, L1Loss
from torch.optim import SGD, Optimizer, Adam
from torch.optim.lr_scheduler import StepLR, MultiStepLR
from torch.utils.data import DataLoader
from torchvision.transforms import ToPILImage
from train_utils import AverageMeter, accuracy, init_logfile, log, copy_code, requires_grad_
import torch.utils.data as data
import torch.nn as nn
from torchvision import models
import argparse
import datetime
import numpy as np
import os
import time
import torch
import torchvision
import dataset_txt_generate as tt
import RED_Dataset as RD
import random
from torchvision import transforms
import torchvision.transforms.functional as TF
from torch.utils.tensorboard import SummaryWriter
from collections import OrderedDict
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('--dataset', type=str, choices=DATASETS)
parser.add_argument('--arch', type=str, choices=DENOISERS_ARCHITECTURES)
parser.add_argument('--outdir', default='logs', type=str, help='folder to save denoiser and training log)')
parser.add_argument('--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=200, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--batch', default=256, type=int, metavar='N',
help='batchsize (default: 256)')
parser.add_argument('--lr', '--learning-rate', default=1e-6, type=float,
help='initial learning rate', dest='lr')
parser.add_argument('--lr_step_size', type=int, default=30,
help='How often to decrease learning by gamma.')
parser.add_argument('--gamma', type=float, default=0.1,
help='LR is multiplied by gamma on schedule.')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--gpu', default=None, type=str,
help='id(s) for CUDA_VISIBLE_DEVICES')
parser.add_argument('--print-freq', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--noise_sd', default=0.0, type=float,
help="standard deviation of noise distribution for data augmentation")
parser.add_argument('--objective', default='denoising', type=str,
help="the objective that is used to train the denoiser",
choices=['denoising', 'classification', 'stability'])
# parser.add_argument('--classifier', default='', type=str,
# help='path to the classifier used with the `classificaiton`'
# 'or `stability` objectives of the denoiser.')
parser.add_argument('--surrogate_model', default='res18',type=str,help='the name of the surrogate model for regularization term calculation')
parser.add_argument('--robust_res50_path', default='', type=str, help='The path of the checkpoint for the victim model robust res50')
parser.add_argument('--pretrained-denoiser', default='', type=str,
help='path to a pretrained denoiser')
parser.add_argument('--advdata_dir', default='', type=str,
help='path to the training dataset')
parser.add_argument('--root', default='', type=str,
help='path to the root of the training code')
parser.add_argument('--optimizer', default='Adam', type=str,
help='SGD, Adam, or Adam then SGD', choices=['SGD', 'Adam', 'AdamThenSGD'])
parser.add_argument('--start-sgd-epoch', default=50, type=int,
help='[Relevent only to AdamThenSGD.] Epoch at which adam switches to SGD')
parser.add_argument('--start-sgd-lr', default=1e-5, type=float,
help='[Relevent only to AdamThenSGD.] LR at which SGD starts after Adam')
parser.add_argument('--resume', action='store_true',
help='if true, tries to resume training from an existing checkpoint')
parser.add_argument('--azure_datastore_path', type=str, default='',
help='Path to imagenet on azure')
parser.add_argument('--philly_imagenet_path', type=str, default='',
help='Path to imagenet on philly')
parser.add_argument('--gamma1',type=float, default=0.01, help='the coefficient of reconstruction accuracy')
parser.add_argument('--lambda1',type=float, default=0.01, help='the coefficient of adv reconstruction accuracy')
parser.add_argument('--finetune_stability', action='store_true', help='if true, tries to finetune with stability objective')
parser.add_argument('--l1orNOl1', default = 'NOl1', type = str, help='if l1, l1 regularizer for objective function')
parser.add_argument('--MAEorMSE', default = 'MAE', type = str, help='Choose MSE or MAE or Stability (neither of the former two) for loss function')
parser.add_argument('--mu1', default = 0.1, type = float, help='l1 sparsity for reconstructed perturbation advs-denoised')
parser.add_argument('--eta', default = 0, type = float, help='coefficient for the weight of the cosine angle')
parser.add_argument('--attack_method', default = 'PGD,FGSM,CW', type = str, help = 'The attack attacks, PGD, FGSM, CW')
parser.add_argument('--victim_model', default = 'res18', type = str, help ='The victim model, res18, res50, vgg16, vgg19, incptv3')
parser.add_argument('--data_portion', default = 0.1, type = float, help ='subset portion')
parser.add_argument('--aug', default='', type = str, help = 'augmentation types')
parser.add_argument('--aug2', default='', type = str, help = 'augmentation type 2s')
parser.add_argument('--reg_data', default = 'all', type=str, help='The type of data for fine tuning, all, ori, robust')
parser.add_argument("--multigpu", default=None, type=lambda x: [int(a) for a in x.split(",")], help="Which GPUs to use for multigpu training")
parser.add_argument('--seed', type=int, default=42, help="random seed for initialization")
args = parser.parse_args()
if args.azure_datastore_path:
os.environ['IMAGENET_DIR_AZURE'] = os.path.join(args.azure_datastore_path, 'datasets/imagenet_zipped')
if args.philly_imagenet_path:
os.environ['IMAGENET_DIR_PHILLY'] = os.path.join(args.philly_imagenet_path, './')
torch.manual_seed(0)
torch.cuda.manual_seed_all(0)
random.seed(0)
toPilImage = ToPILImage()
def set_gpu(args, model):
assert torch.cuda.is_available(), "CPU-only experiments currently unsupported"
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
elif args.multigpu is None:
device = torch.device("cpu")
else:
# DataParallel will divide and allocate batch_size to all available GPUs
print(f"=> Parallelizing on {args.multigpu} gpus")
torch.cuda.set_device(args.multigpu[0])
args.gpu = args.multigpu[0]
model = torch.nn.DataParallel(model, device_ids=args.multigpu).cuda()
if args.seed is None:
cudnn.benchmark = True
return model
def assign_learning_rate(optimizer, new_lr):
for param_group in optimizer.param_groups:
param_group["lr"] = new_lr
def cosine_lr(optimizer, args, **kwargs):
def _lr_adjuster(epoch, iteration):
# if epoch < args.warmup_length:
# lr = warmup_lr(args.lr, args.warmup_length, epoch)
# else:
e = epoch
es = args.epochs
lr = 0.5 * (1 + np.cos(np.pi * e / es)) * args.lr
assign_learning_rate(optimizer, lr)
return lr
return _lr_adjuster
def main():
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
# Copy code to output directory
copy_code(args.outdir)
root = args.root
best_loss = 10000.0
# -------------------------------------------- step 1/5 : Data Loading -------------------------------------------
print('-------------------------------------\nLoading Data...\n-------------------------------------\n')
attack_method = list(map(lambda x: str(x), args.attack_method.split(",")))
victim_model = list(map(lambda x: str(x), args.victim_model.split(",")))
attack_method_victim_model_list = list()
for model_name in victim_model:
attack_method_victim_model_list.extend(list(map(lambda x: x+model_name, attack_method)))
train_data_list = list()
for item in attack_method_victim_model_list:
txt_train_list = os.path.join(root, 'train_list.txt')
advdata_dir = args.advdata_dir
img_train_clean = advdata_dir + item + '/trainclean'
img_train_adv = advdata_dir + item + '/train'
tt.gen_txt(txt_train_list, img_train_clean, img_train_adv)
train_data_list.append(RD.FaceDataset(txt_train_list))
multiple_dataset = data.ConcatDataset(train_data_list)
example_num = len(multiple_dataset)
idx_input = random.sample(range(0,example_num),int(example_num * args.data_portion))
sub_dataset = data.Subset(multiple_dataset, idx_input)
train_loader = DataLoader(dataset=sub_dataset, batch_size=args.batch, shuffle=True, num_workers=args.workers, pin_memory=False)
class Normalize(nn.Module):
def __init__(self, mean, std):
super(Normalize, self).__init__()
self.register_buffer('mean', torch.Tensor(mean))
self.register_buffer('std', torch.Tensor(std))
def forward(self, input):
# Broadcasting
mean = self.mean.reshape(1, 3, 1, 1)
std = self.std.reshape(1, 3, 1, 1)
return (input - mean) / std
norm_layer = Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
# ------------------------------------ step 2/5 : Network Definition ------------------------------------
if args.pretrained_denoiser:
checkpoint = torch.load(args.pretrained_denoiser)
assert checkpoint['arch'] == args.arch
denoiser = get_architecture(checkpoint['arch'], args.dataset)
denoiser = set_gpu(args, denoiser)
denoiser.cuda()
denoiser.load_state_dict(checkpoint['state_dict'])
elif args.finetune_stability:
checkpoint = torch.load('logs/gamma0lambda0checkpoint.pth.tar')
assert checkpoint['arch'] == args.arch
denoiser = get_architecture(checkpoint['arch'], args.dataset)
denoiser.load_state_dict(checkpoint['state_dict'])
else: # training a new denoiser
denoiser = get_architecture(args.arch, args.dataset)
# denoiser = set_gpu(args, denoiser)
# denoiser.cuda()
if args.optimizer == 'Adam':
optimizer = Adam(denoiser.parameters(), lr=args.lr, weight_decay=args.weight_decay)
elif args.optimizer == 'SGD':
optimizer = SGD(denoiser.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
elif args.optimizer == 'AdamThenSGD':
optimizer = Adam(denoiser.parameters(), lr=args.lr, weight_decay=args.weight_decay)
scheduler = StepLR(optimizer, step_size=args.lr_step_size, gamma=args.gamma)
starting_epoch = 0
logfilename = os.path.join(args.outdir, 'log.txt')
## Resume from checkpoint if exists and if resume flag is True
denoiser_path = os.path.join(args.outdir, 'checkpoint.pth.tar')
if args.resume and os.path.isfile(denoiser_path):
print("=> loading checkpoint '{}'".format(denoiser_path))
checkpoint = torch.load(denoiser_path,
map_location=lambda storage, loc: storage)
assert checkpoint['arch'] == args.arch
starting_epoch = checkpoint['epoch']
denoiser.load_state_dict(checkpoint['state_dict'])
if starting_epoch >= args.start_sgd_epoch and args.optimizer == 'AdamThenSGD ': # Do adam for few steps thaen continue SGD
print("-->[Switching from Adam to SGD.]")
args.lr = args.start_sgd_lr
optimizer = SGD(denoiser.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
scheduler = StepLR(optimizer, step_size=args.lr_step_size, gamma=args.gamma)
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(denoiser_path, checkpoint['epoch']))
else:
if args.resume: print("=> no checkpoint found at '{}'".format(args.outdir))
init_logfile(logfilename, "epoch\ttime\tlr\ttrainloss\ttestloss\ttestAcc")
if args.objective == 'error':
criterion = MSELoss(size_average=None, reduce=None, reduction='mean').cuda()
best_loss = 1e6
elif args.objective in ['classification', 'stability', 'denoising']:
if args.surrogate_model == "incptv3":
print('--load pretrained inceptv3--')
clf = nn.Sequential(norm_layer,models.inception_v3(pretrained=True)).cuda().eval()
elif args.surrogate_model == "vgg16":
print('--load pretrained vgg16--')
clf = nn.Sequential(norm_layer, models.vgg16(pretrained=True)).cuda().eval()
elif args.surrogate_model == "vgg19":
print('--load pretrained vgg19--')
clf = nn.Sequential(norm_layer, models.vgg19(pretrained=True)).cuda().eval()
elif args.surrogate_model == "res18":
print('--load pretrained res18--')
clf = nn.Sequential(norm_layer,models.resnet18(pretrained=True)).cuda().eval()
elif args.surrogate_model == "res50":
print('--load pretrained res50--')
clf = nn.Sequential(norm_layer,models.resnet50(pretrained=True)).cuda().eval()
elif args.surrogate_model == "robust_res50":
print('--load robust trained res50--')
surrogate_checkpoint = torch.load(args.robust_res50_path)
clf = models.__dict__['resnet50']()
state_dict = surrogate_checkpoint['state_dict']
new_state_dict = OrderedDict()
for k, v in state_dict.items():
k = k.replace('module.', '')
new_state_dict[k] = v
clf.load_state_dict(new_state_dict)
clf = clf.cuda()
requires_grad_(clf, False)
criterion = CrossEntropyLoss(size_average=None, reduce=None, reduction='mean').cuda()
best_acc = 0
clf = set_gpu(args, clf)
clf.cuda()
# ------------------------------------ step 3/5 : Training --------------------------------------------------
writer = SummaryWriter(args.outdir)
test_data_method_model = dict()
# lr_policy = cosine_lr(optimizer, args)
for epoch in range(starting_epoch, args.epochs):
# lr_policy(epoch, iteration=None)
before = time.time()
before1 = time.process_time()
if args.objective == 'denoising':
if args.surrogate_model == 'incptv3':
train_loss = train(train_loader, denoiser, criterion, optimizer, epoch, clf, True)
else:
train_loss = train(train_loader, denoiser, criterion, optimizer, epoch, clf, False)
# test_loss = test(test_loader, denoiser, criterion, 0.0, args.print_freq, args.outdir)
writer.add_scalar('train_loss', train_loss, epoch)
test_loss = 1000.0
test_acc = 'NA'
scheduler.step()
args.lr = scheduler.get_last_lr()[0]
# Switch from Adam to SGD
if epoch == args.start_sgd_epoch and args.optimizer == 'AdamThenSGD ': # Do adam for few steps thaen continue SGD
print("-->[Switching from Adam to SGD.]")
args.lr = args.start_sgd_lr
optimizer = SGD(denoiser.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
scheduler = StepLR(optimizer, step_size=args.lr_step_size, gamma=args.gamma)
torch.save({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': denoiser.state_dict(),
'optimizer': optimizer.state_dict(),
}, os.path.join(args.outdir, 'Aug'+str(args.MAEorMSE)+str(args.l1orNOl1)+'mu'+str(args.mu1)+'gamma'+str(args.gamma1)+'lambda'+str(args.lambda1) + 'surrogate' + str(args.surrogate_model) + 'attack_method' + str(args.attack_method) + 'victim_model'+ str(args.victim_model) +'checkpoint.pth.tar'))
print('save model at {}'.format(args.outdir, 'Aug'+str(args.MAEorMSE)+str(args.l1orNOl1)+'mu'+str(args.mu1)+'gamma'+str(args.gamma1)+'lambda'+str(args.lambda1) + 'surrogate' + str(args.surrogate_model) + 'attack_method' + str(args.attack_method) + 'victim_model'+ str(args.victim_model) +'checkpoint.pth.tar'))
# ---------------------------begin validation-----------------------------
attack_method = list(map(lambda x: str(x), args.attack_method.split(",")))
victim_model = list(map(lambda x: str(x), args.victim_model.split(",")))
clf_loss = np.zeros((len(attack_method), len(victim_model)))
clf_acc = np.zeros((len(attack_method), len(victim_model)))
clf_acc_perturb = np.zeros((len(attack_method), len(victim_model)))
model_num = 0
for i, model_name in enumerate(victim_model): #res18,res50,vgg16,vgg19,incptv3
attack_num = 0
for j, attack_name in enumerate(attack_method): #PGD,FGSM,CW
if epoch == 0:
# print(model_name,attack_name)
text_test_list_method_model = os.path.join(root, 'test_list_{}{}.txt'.format(attack_name,model_name))
img_test_clean = advdata_dir + attack_name + model_name + '/testclean'
img_test_adv = advdata_dir + attack_name + model_name + '/test'
tt.gen_txt(text_test_list_method_model, img_test_clean, img_test_adv)
test_data_method_model[model_name+attack_name] = RD.FaceDataset(text_test_list_method_model)
test_data = test_data_method_model[model_name+attack_name]
if model_name == "incptv3":
clf_gt = nn.Sequential(norm_layer,models.inception_v3(pretrained=True)).cuda().eval()
elif model_name == "vgg16":
clf_gt = nn.Sequential(norm_layer, models.vgg16(pretrained=True)).cuda().eval()
elif model_name == "vgg19":
clf_gt = nn.Sequential(norm_layer, models.vgg19(pretrained=True)).cuda().eval()
elif model_name == "res18":
clf_gt = nn.Sequential(norm_layer,models.resnet18(pretrained=True)).cuda().eval()
elif model_name == "res50":
clf_gt = nn.Sequential(norm_layer,models.resnet50(pretrained=True)).cuda().eval()
clf_gt = set_gpu(args, clf_gt)
clf_gt.cuda()
classification_criterion = CrossEntropyLoss(size_average=None, reduce=None, reduction = 'mean').cuda()
test_loader = DataLoader(dataset=test_data, batch_size=64, num_workers=args.workers, pin_memory=False)
if model_name == "incptv3":
# input_batch = input_tensor.unsqueeze(0)
clf_loss[attack_num,model_num], clf_acc[attack_num,model_num], clf_acc_perturb[attack_num,model_num] = test_with_classifier(test_loader, denoiser, classification_criterion, args.noise_sd, args.print_freq, clf_gt,clf_gt,True, args.outdir)
else:
clf_loss[attack_num,model_num], clf_acc[attack_num,model_num], clf_acc_perturb[attack_num,model_num] = test_with_classifier(test_loader, denoiser, classification_criterion, args.noise_sd, args.print_freq, clf_gt,clf_gt,False, args.outdir)
print('The average MSE between reconstructed images and clean images for attack method {} and victim model {} is {}'.format(attack_name,model_name,clf_loss[attack_num,model_num]))
print('Reconstructed Image Accuracy for attack method {} and victim model {} is {}'.format(attack_name,model_name,clf_acc[attack_num,model_num]))
print('Reconstructed Adversarial Image Accuracy (compared with the label of the adversarial example) for attack method {} and victim model {} is {}'.format(attack_name,model_name,clf_acc_perturb[attack_num,model_num]))
attack_num = attack_num + 1
model_num = model_num + 1
clf_loss_avg = np.mean(clf_loss)
clf_acc_avg = np.mean(clf_acc)
clf_acc_perturb_avg = np.mean(clf_acc_perturb)
writer.add_scalar('validate_loss', clf_loss_avg, epoch)
writer.add_scalar('validate_acc', clf_acc_avg, epoch)
writer.add_scalar('validate_perturb_acc', clf_acc_perturb_avg, epoch)
print('The average MSE, clean accuracy, and attack successful rate across the dataset is {}, {}, {}'.format(clf_loss_avg,clf_acc_avg,clf_acc_perturb_avg))
#-----------------------------------end validation--------------------------------------
if args.objective == 'denoising' and clf_acc_avg > best_acc:
best_acc = clf_acc_avg
torch.save({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': denoiser.state_dict(),
'optimizer': optimizer.state_dict(),
}, os.path.join(args.outdir, 'Aug'+str(args.MAEorMSE)+str(args.l1orNOl1)+'mu'+str(args.mu1)+'gamma'+str(args.gamma1)+'lambda'+str(args.lambda1) + 'surrogate' + str(args.surrogate_model) + 'attack_method' + str(args.attack_method) + 'victim_model'+ str(args.victim_model) +'bestpoint.pth.tar'))
after = time.time()
after1 = time.process_time()
if args.objective == 'denoising':
log(logfilename, "{}\t{:.3}\t{:.3}\t{:.3}\t{:.3}\t{:.3}\t{:.3}\t{:.3}".format(
epoch, after - before, after1 - before1,
args.lr, train_loss, clf_loss_avg, clf_acc_avg, clf_acc_perturb_avg))
# if args.objective == 'denoising':
# log(logfilename, "{}\t{:.3}\t{:.3}\t{:.3}\t{:.3}\t{:.3}".format(
# epoch, after - before,
# args.lr, train_loss, test_loss, test_acc))
def train(loader: DataLoader, denoiser: torch.nn.Module, criterion, optimizer: Optimizer, epoch: int, classifier: torch.nn.Module, incptv3:bool):
"""
Function for training denoiser for one epoch
:param loader:DataLoader: training dataloader
:param denoiser:torch.nn.Module: the denoiser being trained
:param criterion: loss function
:param optimizer:Optimizer: optimizer used during trainined
:param epoch:int: the current epoch (for logging)
:param noise_sd:float: the std-dev of the Guassian noise perturbation of the input
:param classifier:torch.nn.Module=None: a ``freezed'' classifier attached to the denoiser
(required classifciation/stability objectives), None for denoising objective
"""
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
end = time.time()
MSE = MSELoss(size_average=None, reduce=None, reduction='mean').cuda()
MAE = L1Loss(size_average=None, reduce=None, reduction='mean').cuda()
# switch to train mode
denoiser.train()
# if classifier:
# classifier.eval()
dict_aug = {'rotate': rotation, 'flip': flip, 'translate': translate, 'cutout': cut_out, 'crop&pad':crop_w_padding, 'cutmix': cut_mix}
for i, (cleans, advs) in enumerate(loader):
# measure data loading time
# data_time.update(time.time() - end)
batch_size = cleans.shape[0]
cleans = cleans.cuda().to(dtype=torch.float)
advs = advs.cuda().to(dtype=torch.float)
if args.aug != '' and args.aug2 == '':
cleans_crop, advs_crop = dict_aug[args.aug](cleans, advs)
batch_size = len(cleans)
cleans = torch.cat((cleans, cleans_crop), 0)
advs = torch.cat((advs, advs_crop), 0)
if args.aug2 != '':
cleans_crop, advs_crop = dict_aug[args.aug](cleans, advs)
cleans_crop2, advs_crop2 = dict_aug[args.aug2](cleans, advs)
batch_size = len(cleans)
cleans = torch.cat((cleans, cleans_crop, cleans_crop2), 0)
advs = torch.cat((advs, advs_crop, advs_crop2), 0)
data_time.update(time.time() - end)
# compute output
batch_size = len(cleans)
denoised = denoiser(advs)
if args.eta>0:
perturbation = advs-cleans
perturbation = perturbation.view(batch_size,-1)
reconstruction = advs-denoised
reconstruction = reconstruction.view(batch_size,-1)
cos = nn.CosineSimilarity(dim=1, eps=1e-6)
simi = cos(perturbation, reconstruction) # the angle at f_adv
mean_simi = torch.mean(simi)
# adv_outputs = classifier(advs - denoised + cleans)
# f_denoised = classifier(denoised)
# clean_label = classifier(cleans)
# adv_label = classifier(advs)
# clean_label = clean_label.argmax(1).detach().clone()
# adv_label = adv_label.argmax(1).detach().clone()
# loss1 = MSE(denoised, cleans)
if args.MAEorMSE == 'MAE':
loss1 = MAE(denoised, cleans)
elif args.MAEorMSE == 'MSE':
loss1 = MSE(denoised, cleans)
elif args.MAEorMSE == 'Stability':
loss1 = 0
# loss = loss1 - args.eta * mean_simi
loss = loss1
# print(MSE(cleans,advs),MSE(cleans,denoised))
if args.gamma1>0:
f_clean = classifier(cleans)
F_clean = f_clean.argmax(1).detach().clone()
f_denoised = classifier(denoised)
if args.reg_data == 'all':
loss2 = criterion(f_denoised, F_clean)
elif args.reg_data =='ori':
loss2 = criterion(f_denoised[:batch_size], F_clean[:batch_size])
elif args.reg_data == 'robust':
F_ori = F_clean[:batch_size]
F_ori = F_ori.repeat(int(F_clean.shape[0]/batch_size))
strong_clean = (F_clean == F_ori)
loss2 = criterion(f_denoised[strong_clean], F_clean[strong_clean])
loss = loss + args.gamma1 * loss2
if args.lambda1>0:
f_adv = classifier(advs)
F_adv = f_adv.argmax(1).detach().clone()
f_reon_adv = classifier(advs - denoised + cleans)
if args.reg_data == 'all':
loss3 = criterion(f_reon_adv, F_adv)
elif args.reg_data == 'ori':
loss3 = criterion(f_reon_adv[:batch_size], F_adv[:batch_size])
elif args.reg_data == 'robust':
F_adv_ori = F_adv[:batch_size]
F_adv_ori = F_adv_ori.repeat(int(F_adv.shape[0]/batch_size))
strong_adv = (F_adv == F_adv_ori)
loss3 = criterion(f_reon_adv[strong_adv], F_adv[strong_adv])
loss = loss + args.lambda1 * loss3
losses.update(loss.item(), cleans.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})'.format(
epoch, i, len(loader), batch_time=batch_time,
data_time=data_time, loss=losses))
return losses.avg
def frozen_module(module):
for param in module.parameters():
param.requires_grad = False
# 1. random flip
def vflip(clean,adv):
return TF.vflip(clean), TF.vflip(adv)
def hflip(clean,adv):
return TF.hflip(clean), TF.hflip(adv)
def flip(clean,adv):
if random.random() > 0.5:
return vflip(clean,adv)
else:
return hflip(clean,adv)
# 2. random rotation
def rotation(clean, adv):
angle = transforms.RandomRotation.get_params([-180, 180])
return TF.rotate(clean,angle), TF.rotate(adv,angle)
# def crop_resize(clean, adv):
# clean_crop = TF.resized_crop(clean, 10, 10, 200, 200, 224)
# adv_crop = TF.resized_crop(adv, 10, 10, 200, 200, 224)
# return clean_crop, adv_crop
# 3. cut out
class Cutout(object):
def __init__(self, n_holes, length, random=True):
self.n_holes = n_holes
self.length = length
self.random = random
def __call__(self, img, adv):
h = img.size(2)
w = img.size(3)
length = random.randint(1, self.length)
mask = np.ones((h, w), np.float32)
for n in range(self.n_holes):
y =
|
np.random.randint(h)
|
numpy.random.randint
|
import astropy.units as u
import numpy as np
from astropy.time import Time
from sora.config.decorators import deprecated_alias
__all__ = ['fit_ellipse']
@deprecated_alias(pos_angle='position_angle', dpos_angle='dposition_angle', log='verbose') # remove this line for v1.0
def fit_ellipse(*args, equatorial_radius, dequatorial_radius=0, center_f=0, dcenter_f=0, center_g=0,
dcenter_g=0, oblateness=0, doblateness=0, position_angle=0, dposition_angle=0,
loop=10000000, number_chi=10000, dchi_min=None, verbose=False, ellipse_error=0, sigma_result=1):
"""Fits an ellipse to given occultation using given parameters.
Parameters
----------
center_f : `int`, `float`, default=0
The coordinate in f of the ellipse center.
center_g : `int`, `float`, default=0
The coordinate in g of the ellipse center.
equatorial_radius : `int`, `float`
The Equatorial radius (semi-major axis) of the ellipse.
oblateness : `int`, `float`, default=0
The oblateness of the ellipse.
position_angle : `int`, `float`, default=0
The pole position angle of the ellipse in degrees.
Zero is in the North direction ('g-positive'). Positive clockwise.
dcenter_f : `int`, `float`
Interval for coordinate f of the ellipse center.
dcenter_g : `int`, `float`
Interval for coordinate g of the ellipse center.
dequatorial_radius `int`, `float`
Interval for the Equatorial radius (semi-major axis) of the ellipse.
doblateness : `int`, `float`
Interval for the oblateness of the ellipse
dposition_angle : `int`, `float`
Interval for the pole position angle of the ellipse in degrees.
loop : `int`, default=10000000
The number of ellipses to attempt fitting.
dchi_min : `int`, `float`
If given, it will only save ellipsis which chi square are smaller than
chi_min + dchi_min.
number_chi : `int`, default=10000
If dchi_min is given, the procedure is repeated until number_chi is
reached.
verbose : `bool`, default=False
If True, it prints information while fitting.
ellipse_error : `int`, `float`
Model uncertainty to be considered in the fit, in km.
sigma_result : `int`, `float`
Sigma value to be considered as result.
Returns
-------
chisquare : `sora.ChiSquare`
A ChiSquare object with all parameters.
Important
---------
Each occultation is added as the first argument(s) directly.
Mandatory input parameters: 'center_f', 'center_g', 'equatorial_radius',
'oblateness', and 'position_angle'.
Parameters fitting interval: 'dcenter_f', 'dcenter_g', 'dequatorial_radius',
'doblateness', and 'dposition_angle'. Default values are set to zero.
Search done between (value - dvalue) and (value + dvalue).
Examples
--------
To fit the ellipse to the chords of occ1 Occultation object:
>>> fit_ellipse(occ1, **kwargs)
To fit the ellipse to the chords of occ1 and occ2 Occultation objects together:
>>> fit_ellipse(occ1, occ2, **kwargs)
"""
from sora.extra import ChiSquare
from sora.config.visuals import progressbar_show
from astropy.coordinates import Angle
from .core import Occultation
v = {'dcenter_f': dcenter_f, 'dcenter_g': dcenter_g, 'doblateness': doblateness, 'dposition_angle': dposition_angle,
'dequatorial_radius': dequatorial_radius, 'ellipse_error': ellipse_error, 'sigma_result': sigma_result,
'dchi_min': dchi_min}
for key, item in v.items():
if item is not None and item < 0:
raise ValueError("{} must be a positive number.".format(key))
values = []
chord_name = []
if len(args) == 0:
raise ValueError('No occultation have been given as input.')
for occ in args:
if not isinstance(occ, Occultation):
raise TypeError('Given argument must be an Occultation object.')
for name, chord in occ.chords.items():
if chord.status() == 'positive':
if chord.is_able['immersion']:
f, g, vf, vg = chord.get_fg(time='immersion', vel=True)
err = np.linalg.norm([vf, vg])*chord.lightcurve.immersion_err
values.append([f, g, err])
chord_name.append(name + '_immersion')
if chord.is_able['emersion']:
f, g, vf, vg = chord.get_fg(time='emersion', vel=True)
err = np.linalg.norm([vf, vg])*chord.lightcurve.emersion_err
values.append([f, g, err])
chord_name.append(name + '_emersion')
controle_f0 = Time.now()
f0_chi = np.array([])
g0_chi = np.array([])
a_chi = np.array([])
obla_chi = np.array([])
posang_chi = np.array([])
chi2_best = np.array([])
while len(f0_chi) < number_chi:
progressbar_show(len(f0_chi), number_chi, prefix='Ellipse fit:')
chi2 = np.zeros(loop)
f0 = center_f + dcenter_f*(2*np.random.random(loop) - 1)
g0 = center_g + dcenter_g*(2*np.random.random(loop) - 1)
a = equatorial_radius + dequatorial_radius*(2*np.random.random(loop) - 1)
obla = oblateness + doblateness*(2*np.random.random(loop) - 1)
obla[obla < 0], obla[obla > 1] = 0, 1
phi_deg = position_angle + dposition_angle*(2*np.random.random(loop) - 1)
controle_f1 = Time.now()
for fi, gi, si in values:
b = a - a*obla
phi = phi_deg*(np.pi/180.0)
dfi = fi-f0
dgi = gi-g0
theta = np.arctan2(dgi, dfi)
ang = theta+phi
r_model = (a*b)/np.sqrt((a*np.sin(ang))**2 + (b*np.cos(ang))**2)
f_model = f0 + r_model*np.cos(theta)
g_model = g0 + r_model*np.sin(theta)
chi2 += ((fi - f_model)**2 + (gi - g_model)**2)/(si**2 + ellipse_error**2)
controle_f2 = Time.now()
if dchi_min is not None:
region = np.where(chi2 < chi2.min() + dchi_min)[0]
else:
region = np.arange(len(chi2))
chi2_best = np.append(chi2_best, chi2[region])
if verbose:
print('Elapsed time: {:.3f} seconds.'.format((controle_f2 - controle_f1).sec))
print(len(chi2[region]), len(chi2_best))
f0_chi = np.append(f0_chi, f0[region])
g0_chi = np.append(g0_chi, g0[region])
a_chi = np.append(a_chi, a[region])
obla_chi = np.append(obla_chi, obla[region])
posang_chi = np.append(posang_chi, phi_deg[region])
progressbar_show(number_chi, number_chi, prefix='Ellipse fit:')
chisquare = ChiSquare(chi2_best, len(values), center_f=f0_chi, center_g=g0_chi, equatorial_radius=a_chi,
oblateness=obla_chi, position_angle=posang_chi)
controle_f4 = Time.now()
if verbose:
print('Total elapsed time: {:.3f} seconds.'.format((controle_f4 - controle_f0).sec))
result_sigma = chisquare.get_nsigma(sigma=sigma_result)
a = result_sigma['equatorial_radius'][0]
f0 = result_sigma['center_f'][0]
g0 = result_sigma['center_g'][0]
obla = result_sigma['oblateness'][0]
phi_deg = result_sigma['position_angle'][0]
radial_dispersion = np.array([])
error_bar = np.array([])
position_angle_point = np.array([])
for fi, gi, si in values:
b = a - a*obla
phi = phi_deg*(np.pi/180.0)
dfi = fi-f0
dgi = gi-g0
r =
|
np.sqrt(dfi**2 + dgi**2)
|
numpy.sqrt
|
from SurfaceTopography import make_sphere
import ContactMechanics as Solid
from NuMPI.Optimization import ccg_with_restart, ccg_without_restart
import numpy as np
import scipy.optimize as optim
# import pytest
def test_using_primal_obj():
nx = ny = 128
sx, sy = 1., 1.
R = 10.
gtol = 1e-6
surface = make_sphere(R, (nx, ny), (sx, sy), kind="paraboloid")
Es = 50.
substrate = Solid.PeriodicFFTElasticHalfSpace((nx, ny), young=Es,
physical_sizes=(sx, sy))
system = Solid.Systems.NonSmoothContactSystem(substrate, surface)
offset = 0.005
lbounds = np.zeros((nx, ny))
bnds = system._reshape_bounds(lbounds, )
init_gap = np.zeros((nx, ny))
disp = np.zeros((nx, ny))
init_gap = disp - surface.heights() - offset
# ####################POLONSKY-KEER##############################
res = ccg_with_restart.constrained_conjugate_gradients(
system.primal_objective(offset, gradient=True),
system.primal_hessian_product, x0=init_gap, gtol=gtol)
assert res.success
polonsky_gap = res.x.reshape((nx, ny))
# ####################BUGNICOURT###################################
res = ccg_without_restart.constrained_conjugate_gradients(
system.primal_objective(offset, gradient=True),
system.primal_hessian_product, x0=init_gap, mean_val=None, gtol=gtol)
assert res.success
bugnicourt_gap = res.x.reshape((nx, ny))
# #####################LBFGSB#####################################
res = optim.minimize(system.primal_objective(offset, gradient=True),
init_gap,
method='L-BFGS-B', jac=True,
bounds=bnds,
options=dict(gtol=gtol, ftol=1e-20))
assert res.success
lbfgsb_gap = res.x.reshape((nx, ny))
np.testing.assert_allclose(polonsky_gap, bugnicourt_gap, atol=1e-3)
np.testing.assert_allclose(polonsky_gap, lbfgsb_gap, atol=1e-3)
np.testing.assert_allclose(lbfgsb_gap, bugnicourt_gap, atol=1e-3)
# ##########TEST MEAN VALUES#######################################
mean_val = np.mean(lbfgsb_gap)
# ####################POLONSKY-KEER##############################
res = ccg_with_restart.constrained_conjugate_gradients(
system.primal_objective(offset, gradient=True),
system.primal_hessian_product, init_gap, gtol=gtol,
mean_value=mean_val)
assert res.success
polonsky_gap_mean_cons = res.x.reshape((nx, ny))
# ####################BUGNICOURT###################################
ccg_without_restart.constrained_conjugate_gradients(system.primal_objective
(offset, gradient=True),
system.
primal_hessian_product,
x0=init_gap,
mean_val=mean_val,
gtol=gtol
)
assert res.success
bugnicourt_gap_mean_cons = res.x.reshape((nx, ny))
np.testing.assert_allclose(polonsky_gap_mean_cons, lbfgsb_gap, atol=1e-3)
np.testing.assert_allclose(bugnicourt_gap_mean_cons, lbfgsb_gap, atol=1e-3)
np.testing.assert_allclose(lbfgsb_gap, bugnicourt_gap, atol=1e-3)
np.testing.assert_allclose(lbfgsb_gap, bugnicourt_gap_mean_cons, atol=1e-3)
def test_using_dual_obj():
nx = ny = 128
sx, sy = 1., 1.
R = 10.
gtol = 1e-7
surface = make_sphere(R, (nx, ny), (sx, sy), kind="paraboloid")
Es = 50.
substrate = Solid.PeriodicFFTElasticHalfSpace((nx, ny), young=Es,
physical_sizes=(sx, sy))
system = Solid.Systems.NonSmoothContactSystem(substrate, surface)
offset = 0.005
lbounds = np.zeros((nx, ny))
bnds = system._reshape_bounds(lbounds, )
init_gap = np.zeros((nx, ny))
disp = init_gap + surface.heights() + offset
init_pressure = substrate.evaluate_force(disp)
# ####################LBFGSB########################################
res = optim.minimize(system.dual_objective(offset, gradient=True),
init_pressure,
method='L-BFGS-B', jac=True,
bounds=bnds,
options=dict(gtol=gtol, ftol=1e-20))
assert res.success
CA_lbfgsb = res.x.reshape((nx, ny)) > 0 # Contact area
CA_lbfgsb = CA_lbfgsb.sum() / (nx * ny)
lbfgsb_force = res.x.reshape((nx, ny))
fun = system.dual_objective(offset, gradient=True)
gap_lbfgsb = fun(res.x)[1]
gap_lbfgsb = gap_lbfgsb.reshape((nx, ny))
# ###################BUGNICOURT########################################
ccg_without_restart.constrained_conjugate_gradients(
system.dual_objective(offset, gradient=True),
system.dual_hessian_product, init_pressure, mean_val=None, gtol=gtol)
assert res.success
bugnicourt_force = res.x.reshape((nx, ny))
CA_bugnicourt = res.x.reshape((nx, ny)) > 0 # Contact area
CA_bugnicourt = CA_bugnicourt.sum() / (nx * ny)
gap_bugnicourt = fun(res.x)[1]
gap_bugnicourt = gap_bugnicourt.reshape((nx, ny))
#
# # ##################POLONSKY-KEER#####################################
res = ccg_with_restart.constrained_conjugate_gradients(
system.dual_objective(offset, gradient=True),
system.dual_hessian_product, init_pressure, gtol=gtol)
assert res.success
polonsky_force = res.x.reshape((nx, ny))
CA_polonsky = res.x.reshape((nx, ny)) > 0 # Contact area
CA_polonsky = CA_polonsky.sum() / (nx * ny)
gap_polonsky = fun(res.x)[1]
gap_polonsky = gap_polonsky.reshape((nx, ny))
np.testing.assert_allclose(gap_lbfgsb, gap_polonsky, atol=1e-3)
np.testing.assert_allclose(gap_lbfgsb, gap_bugnicourt, atol=1e-3)
np.testing.assert_allclose(gap_bugnicourt, gap_polonsky, atol=1e-3)
np.testing.assert_allclose(CA_lbfgsb, CA_polonsky, atol=1e-3)
np.testing.assert_allclose(CA_lbfgsb, CA_bugnicourt, atol=1e-3)
np.testing.assert_allclose(bugnicourt_force, polonsky_force, atol=1e-3)
np.testing.assert_allclose(lbfgsb_force, polonsky_force, atol=1e-3)
np.testing.assert_allclose(lbfgsb_force, bugnicourt_force, atol=1e-3)
np.testing.assert_allclose(bugnicourt_force, polonsky_force, atol=1e-3)
# ##########TEST MEAN VALUES#######################################
mean_val =
|
np.mean(lbfgsb_force)
|
numpy.mean
|
import numpy as np
import scipy as sp
from ffthompy.trigpol import Grid, get_Nodd, mean_index, fft_form_default
from ffthompy.matvecs import Matrix
from ffthompy.tensors import Tensor
import itertools
def scalar(N, Y, NyqNul=True, tensor=True, fft_form=fft_form_default):
"""
Assembly of discrete kernels in Fourier space for scalar elliptic problems.
Parameters
----------
N : numpy.ndarray
no. of discretization points
Y : numpy.ndarray
size of periodic unit cell
Returns
-------
G1l : numpy.ndarray
discrete kernel in Fourier space; provides projection
on curl-free fields with zero mean
G2l : numpy.ndarray
discrete kernel in Fourier space; provides projection
on divergence-free fields with zero mean
"""
if fft_form in ['r']:
fft_form_r=True
fft_form=0
else:
fft_form_r=False
d = np.size(N)
N = np.array(N, dtype=np.int)
if NyqNul:
Nred = get_Nodd(N)
else:
Nred = N
xi = Grid.get_xil(Nred, Y, fft_form=fft_form)
xi2 = []
for m in np.arange(d):
xi2.append(xi[m]**2)
G0l = np.zeros(np.hstack([d, d, Nred]))
G1l = np.zeros(np.hstack([d, d, Nred]))
G2l = np.zeros(np.hstack([d, d, Nred]))
num = np.zeros(np.hstack([d, d, Nred]))
denom = np.zeros(Nred)
ind_center = mean_index(Nred, fft_form=fft_form)
for m in np.arange(d): # diagonal components
Nshape = np.ones(d, dtype=np.int)
Nshape[m] = Nred[m]
Nrep = np.copy(Nred)
Nrep[m] = 1
a = np.reshape(xi2[m], Nshape)
num[m][m] = np.tile(a, Nrep) # numerator
denom = denom + num[m][m]
G0l[m, m][ind_center] = 1
for m in np.arange(d): # upper diagonal components
for n in np.arange(m+1, d):
NshapeM = np.ones(d, dtype=np.int)
NshapeM[m] = Nred[m]
NrepM = np.copy(Nred)
NrepM[m] = 1
NshapeN = np.ones(d, dtype=np.int)
NshapeN[n] = Nred[n]
NrepN = np.copy(Nred)
NrepN[n] = 1
num[m][n] = np.tile(np.reshape(xi[m], NshapeM), NrepM) \
* np.tile(np.reshape(xi[n], NshapeN), NrepN)
# avoiding a division by zero
denom[ind_center] = 1
# calculation of projections
for m in np.arange(d):
for n in np.arange(m, d):
G1l[m][n] = num[m][n]/denom
G2l[m][n] = (m == n)*np.ones(Nred) - G1l[m][n]
G2l[m][n][ind_center] = 0
# symmetrization
for m in np.arange(1, d):
for n in np.arange(m):
G1l[m][n] = G1l[n][m]
G2l[m][n] = G2l[n][m]
if tensor:
G0l = Tensor(name='hG0', val=G0l, order=2, N=N, multype=21, Fourier=True, fft_form=fft_form)
G1l = Tensor(name='hG1', val=G1l, order=2, N=N, multype=21, Fourier=True, fft_form=fft_form)
G2l = Tensor(name='hG2', val=G2l, order=2, N=N, multype=21, Fourier=True, fft_form=fft_form)
else:
G0l = Matrix(name='hG0', val=G0l, Fourier=True)
G1l = Matrix(name='hG1', val=G1l, Fourier=True)
G2l = Matrix(name='hG2', val=G2l, Fourier=True)
if NyqNul:
G0l = G0l.enlarge(N)
G1l = G1l.enlarge(N)
G2l = G2l.enlarge(N)
if fft_form_r:
for tensor in [G0l, G1l, G2l]:
tensor.set_fft_form(fft_form='r')
tensor.val/=np.prod(tensor.N)
return G0l, G1l, G2l
def elasticity(N, Y, NyqNul=True, tensor=True, fft_form=fft_form_default):
"""
Projection matrix on a space of admissible strain fields
INPUT =
N : ndarray of e.g. stiffness coefficients
d : dimension; d = 2
D : dimension in engineering notation; D = 3
Y : the size of periodic unit cell
OUTPUT =
G1h,G1s,G2h,G2s : projection matrices of size DxDxN
"""
if fft_form in ['r']:
fft_form_r=True
fft_form=0
else:
fft_form_r=False
xi = Grid.get_xil(N, Y, fft_form=fft_form)
N = np.array(N, dtype=np.int)
d = N.size
D = int(d*(d+1)/2)
if NyqNul:
Nred = get_Nodd(N)
else:
Nred = N
xi2 = []
for ii in range(d):
xi2.append(xi[ii]**2)
num = np.zeros(np.hstack([d, d, Nred]))
norm2_xi = np.zeros(Nred)
for mm in np.arange(d): # diagonal components
Nshape = np.ones(d, dtype=np.int)
Nshape[mm] = Nred[mm]
Nrep = np.copy(Nred)
Nrep[mm] = 1
num[mm][mm] = np.tile(np.reshape(xi2[mm], Nshape), Nrep) # numerator
norm2_xi += num[mm][mm]
norm4_xi = norm2_xi**2
ind_center = mean_index(Nred, fft_form=fft_form)
# avoid division by zero
norm2_xi[ind_center] = 1
norm4_xi[ind_center] = 1
for m in np.arange(d): # upper diagonal components
for n in np.arange(m+1, d):
NshapeM = np.ones(d, dtype=np.int)
NshapeM[m] = Nred[m]
NrepM = np.copy(Nred)
NrepM[m] = 1
NshapeN = np.ones(d, dtype=np.int)
NshapeN[n] = Nred[n]
NrepN = np.copy(Nred)
NrepN[n] = 1
num[m][n] = np.tile(np.reshape(xi[m], NshapeM), NrepM) \
* np.tile(np.reshape(xi[n], NshapeN), NrepN)
# G1h = np.zeros([D,D]).tolist()
G1h = np.zeros(np.hstack([D, D, Nred]))
G1s = np.zeros(np.hstack([D, D, Nred]))
IS0 = np.zeros(np.hstack([D, D, Nred]))
mean = np.zeros(np.hstack([D, D, Nred]))
Lamh = np.zeros(np.hstack([D, D, Nred]))
S = np.zeros(np.hstack([D, D, Nred]))
W = np.zeros(
|
np.hstack([D, D, Nred])
|
numpy.hstack
|
__author__ = '<NAME>'
import numpy as np
from numpy import linalg
class Lasso:
def __init__(self, lam=1., lr=1., tol=1e-5, logistic=False, weighted=False):
self.lam = lam
self.lr = lr
self.tol = tol
self.decay = 0.5
self.maxIter = 500
self.logistic = logistic
self.weighted = weighted
def setLambda(self, lam):
self.lam = lam
def setLogisticFlag(self, logistic):
self.logistic = logistic
def setWeightedFlag(self, weighted):
self.weighted = weighted
def setLearningRate(self, lr):
self.lr = lr
def setMaxIter(self, a):
self.maxIter = a
def setTol(self, t):
self.tol = t
def fit(self, X, y):
if self.logistic:
if self.weighted:
self.weights = np.ones_like(y)
c1 = len(np.where(y == 1)[0])
c0 = len(np.where(y == 0)[0])
w1 = float(c1) / (c1 + c0)
w0 = 1 - w1
self.weights[y == 1] = w0
self.weights[y == 0] = w1
X0 = np.ones(len(y)).reshape(len(y), 1)
X = np.hstack([X, X0])
shp = X.shape
self.beta = np.zeros([shp[1], 1])
resi_prev = np.inf
resi = self.cost(X, y)
step = 0
while
|
np.abs(resi_prev - resi)
|
numpy.abs
|
# stdlib
from typing import Any
from typing import Callable
from typing import List
from typing import Type
# third party
import numpy as np
import pytest
import syft as sy
import torch
from sympc.module import MAP_TORCH_TO_SYMPC
from sympc.session import Session
from sympc.session import SessionManager
from sympc.tensor import MPCTensor
class LinearNet(sy.Module):
def __init__(self, torch_ref):
super(LinearNet, self).__init__(torch_ref=torch_ref)
self.fc1 = self.torch_ref.nn.Linear(3, 10)
self.fc2 = self.torch_ref.nn.Linear(10, 1)
def forward(self, x):
x = self.fc1(x)
x = self.torch_ref.nn.functional.relu(x)
x = self.fc2(x)
x = self.torch_ref.nn.functional.relu(x)
return x
class ConvNet(sy.Module):
def __init__(self, torch_ref, kernel_size=5):
super(ConvNet, self).__init__(torch_ref=torch_ref)
self.conv1 = self.torch_ref.nn.Conv2d(
in_channels=1,
out_channels=5,
kernel_size=kernel_size,
stride=2,
padding=(2, 1),
)
self.fc1 = self.torch_ref.nn.Linear(910, 10)
self.fc2 = self.torch_ref.nn.Linear(10, 5)
def forward(self, x):
x = self.conv1(x)
x = self.torch_ref.nn.functional.relu(x)
x = x.view([-1, 910])
x = self.fc1(x)
x = self.fc2(x)
x = self.torch_ref.nn.functional.relu(x)
return x
def test_run_linear_model(get_clients: Callable[[int], List[Any]]):
model = LinearNet(torch)
clients = get_clients(2)
session = Session(parties=clients)
SessionManager.setup_mpc(session)
mpc_model = model.share(session=session)
x_secret = torch.randn(2, 3)
x_mpc = MPCTensor(secret=x_secret, session=session)
model.eval()
# For the moment we have only inference
expected = model(x_secret)
res_mpc = mpc_model(x_mpc)
assert isinstance(res_mpc, MPCTensor)
res = res_mpc.reconstruct()
expected = expected.detach().numpy()
assert np.allclose(res, expected, atol=1e-3)
@pytest.mark.order(1)
def test_run_conv_model(get_clients: Callable[[int], List[Any]]):
model = ConvNet(torch)
clients = get_clients(2)
session = Session(parties=clients)
SessionManager.setup_mpc(session)
mpc_model = model.share(session=session)
x_secret = torch.randn((1, 1, 28, 28))
x_mpc = MPCTensor(secret=x_secret, session=session)
model.eval()
# For the moment we have only inference
expected = model(x_secret)
res_mpc = mpc_model(x_mpc)
assert isinstance(res_mpc, MPCTensor)
res = res_mpc.reconstruct()
expected = expected.detach().numpy()
assert
|
np.allclose(res, expected, atol=1e-2)
|
numpy.allclose
|
import numpy as np
from scipy.interpolate import RectBivariateSpline
from scipy.ndimage import binary_dilation
from scipy.stats import gaussian_kde
from utils import prediction_output_to_trajectories
import visualization
from matplotlib import pyplot as plt
import pdb
def compute_ade(predicted_trajs, gt_traj):
error = np.linalg.norm(predicted_trajs - gt_traj, axis=-1)
ade = np.mean(error, axis=-1)
return ade.flatten()
def compute_fde(predicted_trajs, gt_traj):
final_error = np.linalg.norm(predicted_trajs[:, :, -1] - gt_traj[-1], axis=-1)
return final_error.flatten()
def compute_kde_nll(predicted_trajs, gt_traj):
kde_ll = 0.
log_pdf_lower_bound = -20
num_timesteps = gt_traj.shape[0]
num_batches = predicted_trajs.shape[0]
for batch_num in range(num_batches):
for timestep in range(num_timesteps):
try:
kde = gaussian_kde(predicted_trajs[batch_num, :, timestep].T)
pdf = np.clip(kde.logpdf(gt_traj[timestep].T), a_min=log_pdf_lower_bound, a_max=None)[0]
kde_ll += pdf / (num_timesteps * num_batches)
except np.linalg.LinAlgError:
kde_ll = np.nan
return -kde_ll
def compute_obs_violations(predicted_trajs, map):
obs_map = map.data
interp_obs_map = RectBivariateSpline(range(obs_map.shape[1]),
range(obs_map.shape[0]),
binary_dilation(obs_map.T, iterations=4),
kx=1, ky=1)
old_shape = predicted_trajs.shape
pred_trajs_map = map.to_map_points(predicted_trajs.reshape((-1, 2)))
traj_obs_values = interp_obs_map(pred_trajs_map[:, 0], pred_trajs_map[:, 1], grid=False)
traj_obs_values = traj_obs_values.reshape((old_shape[0], old_shape[1]))
num_viol_trajs = np.sum(traj_obs_values.max(axis=1) > 0, dtype=float)
return num_viol_trajs
def compute_batch_statistics(prediction_output_dict,
dt,
max_hl,
ph,
node_type_enum,
kde=True,
obs=False,
map=None,
prune_ph_to_future=False,
best_of=False):
(prediction_dict,
_,
futures_dict) = prediction_output_to_trajectories(prediction_output_dict,
dt,
max_hl,
ph,
prune_ph_to_future=prune_ph_to_future)
# pdb.set_trace()
batch_error_dict = dict()
for node_type in node_type_enum:
batch_error_dict[node_type] = {'ade': list(), 'fde': list(), 'kde': list(), 'obs_viols': list()}
for t in prediction_dict.keys():
for node in prediction_dict[t].keys():
ade_errors = compute_ade(prediction_dict[t][node], futures_dict[t][node])
fde_errors = compute_fde(prediction_dict[t][node], futures_dict[t][node])
if kde:
kde_ll = compute_kde_nll(prediction_dict[t][node], futures_dict[t][node])
else:
kde_ll = 0
if obs:
obs_viols = compute_obs_violations(prediction_dict[t][node], map)
else:
obs_viols = 0
if best_of:
ade_errors = np.min(ade_errors, keepdims=True)
fde_errors = np.min(fde_errors, keepdims=True)
kde_ll = np.min(kde_ll)
batch_error_dict[node.type]['ade'].extend(list(ade_errors))
batch_error_dict[node.type]['fde'].extend(list(fde_errors))
batch_error_dict[node.type]['kde'].extend([kde_ll])
batch_error_dict[node.type]['obs_viols'].extend([obs_viols])
return batch_error_dict
def log_batch_errors(batch_errors_list, log_writer, namespace, curr_iter, bar_plot=[], box_plot=[]):
for node_type in batch_errors_list[0].keys():
for metric in batch_errors_list[0][node_type].keys():
metric_batch_error = []
for batch_errors in batch_errors_list:
metric_batch_error.extend(batch_errors[node_type][metric])
if len(metric_batch_error) > 0:
log_writer.add_histogram(f"{node_type.name}/{namespace}/{metric}", metric_batch_error, curr_iter)
log_writer.add_scalar(f"{node_type.name}/{namespace}/{metric}_mean", np.mean(metric_batch_error), curr_iter)
log_writer.add_scalar(f"{node_type.name}/{namespace}/{metric}_median",
|
np.median(metric_batch_error)
|
numpy.median
|
#!/usr/bin/env python3
"""
Module containing maths functionss for working with ProteinNet datasets
"""
import numpy as np
from Bio.SeqUtils import seq1
__all__ = ["calc_dihedral", "calc_chi1", "softmin_sample", "softmax_sample"]
# the 4 atoms that define the CA-CB rotational angle
CHI1_ATOMS = dict(ALA=None, GLY=None,
ARG=['N', 'CA', 'CB', 'CG'], ASN=['N', 'CA', 'CB', 'CG'],
ASP=['N', 'CA', 'CB', 'CG'], CYS=['N', 'CA', 'CB', 'SG'],
GLN=['N', 'CA', 'CB', 'CG'], GLU=['N', 'CA', 'CB', 'CG'],
HIS=['N', 'CA', 'CB', 'CG'], ILE=['N', 'CA', 'CB', 'CG1'],
LEU=['N', 'CA', 'CB', 'CG'], LYS=['N', 'CA', 'CB', 'CG'],
MET=['N', 'CA', 'CB', 'CG'], PHE=['N', 'CA', 'CB', 'CG'],
PRO=['N', 'CA', 'CB', 'CG'], SER=['N', 'CA', 'CB', 'OG'],
THR=['N', 'CA', 'CB', 'OG1'], TRP=['N', 'CA', 'CB', 'CG'],
TYR=['N', 'CA', 'CB', 'CG'], VAL=['N', 'CA', 'CB', 'CG1'])
"""
Atoms required to calculate chiral dihedral angles from each amino acid
"""
def calc_dihedral(p):
#pylint: disable=invalid-name
"""
Calculate dihedral angles between 4 cartesian points.
Calculate dihedral angles between 4 cartesian points, meaning the angle between the plane defined by ABC and that defined by BCD from the four points passed (ABCD).
The points should be on the first axis of the numpy array (i.e. in rows with coordinates as columns as displayed).
The Phi, Psi and Omega backbone angles in proteins are dihedral angles between the planes defined by different combinations of backbone atoms (Phi: CA-C-N-CA, Psi: C-N-CA-C, Omega: N-CA-C-N).
Parameters
----------
p : ndarray
Numpy array of points. Different points are on the first axis with coordinates along the second axis. The points are ordered ABCD.
Returns
-------
Float
The calculated dihedral angle.
References
----------
This code is adapted from user Praxeolitic's `StackOverflow answer <https://stackoverflow.com/a/34245697>`_.
"""
b0 = p[0] - p[1]
b1 = p[2] - p[1]
b2 = p[3] - p[2]
# normalize b1 so that it does not influence magnitude of vector
# rejections that come next
b1 /= np.linalg.norm(b1)
# vector rejections
# v = projection of b0 onto plane perpendicular to b1
# = b0 minus component that aligns with b1
# w = projection of b2 onto plane perpendicular to b1
# = b2 minus component that aligns with b1
v = b0 - np.dot(b0, b1)*b1
w = b2 - np.dot(b2, b1)*b1
# angle between v and w in a plane is the torsion angle
# v and w may not be normalized but that's fine since tan is y/x
x = np.dot(v, w)
y = np.dot(np.cross(b1, v), w)
return np.arctan2(y, x)
def calc_chi1(mmcif, target_chain, seq):
"""
Calculate Chi1 angle for each residue in an mmCIF dictionary.
Calculate side chain rotamer chiral angle for each residue in an mmCIF dictionary.
This angle is the dihedral angle (see `calc_dihedral`) between the backbone and side chain (specific atoms used can be found in `CHI1_ATOMS`).
Parameters
----------
mmcif : dict
mmCIF dictionary, as generated by Biopythons Bio.PDB.MMCIF2Dict.
target_chain : str
Chain to calculate angles for.
seq : Iterable of str
Expected target chain sequence.
Returns
-------
ndarray
The calculated Chi1 angles for each position in the sequence.
References
----------
This function is adapted from `code by <NAME> <https://bitbucket.org/uperron/proteinnet_vep/src/master/continuous_angle_features.py>`_.
"""
chain = np.array(mmcif.get("_atom_site.label_asym_id"))
group = np.array(mmcif.get("_atom_site.group_PDB"))
residue_name = np.array(mmcif.get("_atom_site.label_comp_id"))
# Filter to correct atoms
ind = ((chain == target_chain) &\
(group == 'ATOM') &\
(np.isin(residue_name, list(CHI1_ATOMS.keys()))))
residue_name = residue_name[ind]
residue_number = np.array(mmcif.get("_atom_site.label_seq_id"))[ind].astype(int)
atom = np.array(mmcif.get("_atom_site.label_atom_id"))[ind]
coords = np.vstack([np.array(mmcif.get("_atom_site.Cartn_x"), dtype=float)[ind],
np.array(mmcif.get("_atom_site.Cartn_y"), dtype=float)[ind],
np.array(mmcif.get("_atom_site.Cartn_z"), dtype=float)[ind]]
).T
chi = []
mask = []
for i, pn_residue in enumerate(seq, 1):
# Not all residues have structure
if i not in residue_number:
chi.append(0)
mask.append(0)
continue
# Ala and Gly don't have Chi1
if pn_residue in ['A', 'G']:
chi.append(0)
mask.append(1)
continue
# Select correct atom coords
# mmCIF dict automatically generates these with atoms correctly ordered for this opperation
ind = residue_number == i
res_name = residue_name[ind][0]
res_atom = atom[ind]
res_coords = coords[ind][np.isin(res_atom, CHI1_ATOMS[res_name])]
if not seq1(res_name) == pn_residue:
raise ValueError(f'PDB seq does not match ProteinNet seq at position {i}')
# Don't have correct data
if not res_coords.shape == (4, 3):
chi.append(0)
mask.append(0)
continue
chi.append(calc_dihedral(res_coords))
mask.append(1)
return
|
np.array(chi, dtype=np.float)
|
numpy.array
|
import numpy as np
import os
import parmap
import scipy
def remove_duplicates(fname_templates, fname_weights,
save_dir, CONFIG, units_in=None, units_to_process=None,
multi_processing=False, n_processors=1):
# output folder
if not os.path.exists(save_dir):
os.makedirs(save_dir)
# load weights
weights = np.load(fname_weights)
# units_in is all units if none
if units_in is None:
units_in = np.arange(len(weights))
if units_to_process is None:
units_to_process = np.copy(units_in)
# this allows units not in units_to_prcoess not get killed
units_to_not_process = np.arange(len(weights))
units_to_not_process = units_to_not_process[
~np.in1d(units_to_not_process, units_to_process)]
weights[units_to_not_process] = np.max(weights) + 10
# compute overlapping units
fname_units_to_compare = os.path.join(save_dir, 'units_to_compare.npy')
if os.path.exists(fname_units_to_compare):
units_to_compare = np.load(fname_units_to_compare)[()]
else:
units_to_compare = compute_units_to_compare(
fname_templates, units_in, units_to_process, CONFIG)
# save it
np.save(fname_units_to_compare,
units_to_compare)
## partition templates
#save_dir_partition = os.path.join(save_dir, 'partition')
#if not os.path.exists(save_dir_partition):
# os.makedirs(save_dir_partition)
#fnames_in = partition_templates(fname_templates,
# units_to_compare,
# save_dir_partition)
#find duplicates
#save_dir_result = os.path.join(save_dir, 'result')
#if not os.path.exists(save_dir_result):
# os.makedirs(save_dir_result)
fname_duplicates = os.path.join(save_dir, 'duplicates.npy')
if os.path.exists(fname_duplicates):
duplicates = np.load(fname_duplicates)[()]
else:
up_factor = 5
max_diff_threshold = CONFIG.clean_up.abs_max_diff
max_diff_rel_threshold = CONFIG.clean_up.rel_max_diff
# find duplicates
if multi_processing:
# divide keys
units = list(units_to_compare.keys())
n_units = len(units)
sub_units_to_compare = []
for j in range(n_processors):
sub_keys = units[slice(j, n_units, n_processors)]
sub_units_to_compare.append({k: units_to_compare[k] for k in sub_keys})
# run duplicate detector
duplicates_list = parmap.map(run_duplicate_detector,
sub_units_to_compare,
fname_templates,
up_factor,
max_diff_threshold,
max_diff_rel_threshold,
processes=n_processors)
duplicates = {}
for sub_list in duplicates_list:
for unit in sub_list:
duplicates[unit] = sub_list[unit]
else:
duplicates = run_duplicate_detector(
units_to_compare, fname_templates,
up_factor, max_diff_threshold,
max_diff_rel_threshold)
# save it
np.save(fname_duplicates, duplicates)
fname_units_killed = os.path.join(save_dir, 'units_killed.npy')
if os.path.exists(fname_units_killed):
units_killed = np.load(fname_units_killed)
else:
units_killed = kill_duplicates(duplicates, weights)
np.save(fname_units_killed, units_killed)
return np.setdiff1d(units_in, units_killed)
def compute_units_to_compare(fname_templates, units_in,
units_to_process, CONFIG):
# threshold on ptp diff
diff_threshold = CONFIG.clean_up.abs_max_diff
diff_rel_threshold = CONFIG.clean_up.rel_max_diff
# load templates
templates = np.load(fname_templates)
#templates = templates[units_in]
#n_units = templates.shape[0]
# get ptps
max_val = templates.max(1)
min_val = templates.min(1)
ptps = (max_val - min_val).max(1)
ptps_higher = np.maximum(ptps[:, None], ptps[None])
units_to_compare = {}
idx_process = np.in1d(units_in, units_to_process)
units_in_process = units_in[idx_process]
units_in_dont_process = units_in[~idx_process]
for ii, j in enumerate(units_in_process):
if ii < len(units_in_process) - 1:
# add within units_in_:
max_val_diff = np.max(np.abs(max_val[units_in_process[ii+1:]] - max_val[[j]]), axis=1)
min_val_diff = np.max(np.abs(min_val[units_in_process[ii+1:]] - min_val[[j]]), axis=1)
abs_diff = np.maximum(max_val_diff, min_val_diff)
abs_diff_rel = abs_diff/ptps_higher[j, units_in_process[ii+1:]]
units_to_compare_1 = units_in_process[ii+1:][np.logical_or(
abs_diff < diff_threshold, abs_diff_rel < diff_rel_threshold)]
else:
units_to_compare_1 = np.array(0, 'int32')
#
max_val_diff = np.max(np.abs(max_val[units_in_dont_process] - max_val[[j]]), axis=1)
min_val_diff = np.max(np.abs(min_val[units_in_dont_process] - min_val[[j]]), axis=1)
abs_diff = np.maximum(max_val_diff, min_val_diff)
abs_diff_rel = abs_diff/ptps_higher[j, units_in_dont_process]
units_to_compare_2 = units_in_dont_process[np.logical_or(
abs_diff < diff_threshold, abs_diff_rel < diff_rel_threshold)]
# nearby units
units_to_compare[j] = np.hstack((units_to_compare_1, units_to_compare_2))
return units_to_compare
def run_duplicate_detector(
units_to_compare, fname_templates,
up_factor=5, max_diff_threshold=1.2,
max_diff_rel_threshold=0.12):
templates = np.load(fname_templates)
duplicates = {}
for unit in units_to_compare:
# skip if already run
#fname_out = os.path.join(save_dir, 'unit_{}.npz'.format(unit))
#if os.path.exists(fname_out):
# continue
# candidates
candidates = units_to_compare[unit]
# skip if no candidates
if len(candidates) == 0:
continue
duplicates_ = abs_max_dist(
templates[unit],
templates[candidates],
up_factor,
max_diff_threshold,
max_diff_rel_threshold)
duplicates_ = candidates[duplicates_]
duplicates[unit] = duplicates_
## save duplicates
#np.savez(fname_out,
# unit=unit,
# duplicates=duplicates)
return duplicates
def abs_max_dist(template_unit, templates_candidates,
up_factor=5, max_diff_threshold=1.2,
max_diff_rel_threshold=0.12):
# find shift
mc = template_unit.ptp(0).argmax()
min_unit = template_unit[:, mc].argmin()
min_candidates = templates_candidates[:, :, mc].argmin(1)
shifts = min_candidates - min_unit
# find vis chans
vis_chan_unit = np.max(
|
np.abs(template_unit)
|
numpy.abs
|
# coding: utf-8
# In[ ]:
import scipy
from scipy import optimize
import scipy.misc
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
class ProportionalDiffusion(object):
def __init__(self, rt=None, accuracy=None, stimulus_strength=None, required_accuracy=None):
""" Initalizes
Parameters
-----------
rt: list-like (list or np.array or pd.Series; anything that pd.DataFrame understands)
Reaction times to fit
accuracy: list-like
Accuracies to fit
stimulus_strength: list-like
Stimulus strength corresponding to rt and accuracy arguments
required_accuracy: float
Accuracy you want to get the corresponding stimulus strength for
"""
if rt is not None and accuracy is not None and stimulus_strength is not None:
self.data = pd.DataFrame({'rt': rt, 'accuracy': accuracy, 'stimulus_strength': stimulus_strength})
# remove null responses
self.data = self.data.loc[pd.notnull(self.data['rt'])]
else:
self.data = None
self.kfit = None
self.t0fit = None
self.Aprimefit = None
self.required_accuracy = required_accuracy
def Pc(self, x, k, Aprime):
""" Proportion correct """
return 1/(1+np.exp(-2*Aprime*k*np.abs(x)))
def Tt(self, x, k, Aprime, t0):
""" Mean RT """
return (Aprime/(k*x)) * np.tanh(Aprime * k * x) + t0
def get_stimulus_strength_for_accuracy(self, accuracy, k=None, Aprime=None, lower=None, upper=None):
""" Solve eq. 1 for x to get stimulus strength for a given accuracy level """
if k is None and Aprime is None:
k = self.kfit
Aprime = self.Aprimefit
x = np.log((1-accuracy)/accuracy) / (-2*Aprime*k)
if lower is not None:
if x < lower:
x = lower
if upper is not None:
if x > upper:
x = upper
return x
def obj(self, pars):
""" Objective function for fitting """
k = pars[0]
Aprime = pars[1]
t0 = pars[2]
observed_means = self.data.groupby('stimulus_strength').mean()
unique_strengths = observed_means.index.values
predicted_sems = self.data.groupby('stimulus_strength').sem() # predicted SE of mean
predicted_means = self.Tt(unique_strengths, k, Aprime, t0)
unique_strengths = observed_means.index.values
observed_means = observed_means['rt'].values
predicted_sems = predicted_sems['rt'].values
# Eq 3
dev = np.divide(1, np.multiply(predicted_sems, np.sqrt(2*np.pi)))
exponent = np.exp(-(predicted_means - observed_means)**2) / (2*predicted_sems ** 2)
likelihood_rts = np.multiply(dev, exponent)
# Eq 4
n_total = self.data.groupby('stimulus_strength')['accuracy'].size().values
n_correct = self.data.groupby('stimulus_strength')['accuracy'].sum().values
likelihood_accs = scipy.special.comb(n_total, n_correct) * self.Pc(x=unique_strengths, Aprime=Aprime, k=k)**n_correct * (1-self.Pc(x=unique_strengths, Aprime=Aprime, k=k))**(n_total-n_correct)
negLL = -np.sum(np.concatenate((np.log(likelihood_accs), np.log(likelihood_rts))))
return(negLL)
def fit(self):
""" Fits model to provided data """
print('Fitting...')
if self.data is None:
raise(IOError('No data provided to be fitted...'))
bounds = [(0, 200), (0, 100), (0, 2)] # k, Aprime, t0
opt = scipy.optimize.differential_evolution(func=self.obj,
bounds=bounds)
self.kfit = opt.x[0]
self.Aprimefit = opt.x[1]
self.t0fit = opt.x[2]
self.opt = opt
if not opt.success:
print('WARNING! Convergence was unsuccessful!')
print('done')
def simulate_diffusion(self, n=1000, a=None, v=None, t0=None, required_accuracy=None):
""" Function to simulate the Diffusion model without z, sz, sv, st.
Aka EZ diffusion / proportional drift rate diffusion model.
**MUCH** more efficient simulation methods are available in, e.g., HDDM; this is included here only to be able to
simulate within psychopy and not rely on external packages (HDDM is not part of the standalone version of PsychoPy)
a = boundary sep
v = drift
t0 = non-decision time
Note that this simulates with bounds parametrized as [0, a] instead of [-a, a] as the prop drift rate model uses.
Therefore, multiply Aprimefit by 2
"""
if a is None and v is None and t0 is None:
if required_accuracy is None:
if self.required_accuracy is None:
raise(IOError('I need to know what the required accuracy is to determine the drift rate...'))
else:
required_accuracy = self.required_accuracy
a = self.Aprimefit*2
v = self.kfit*self.get_stimulus_strength_for_accuracy(accuracy=required_accuracy, k=self.kfit, Aprime=self.Aprimefit)
t0 = self.t0fit
si=1 #scaling factor
M = np.pi*si**2/a**2 * (np.exp(a*v/(2*si**2))+np.exp(-a*v/(2*si**2))) * 1/ (v**2/(2*si**2)+np.pi**2*si**2 / (2*a**2))
lmb = v**2/(2*si**2) + np.pi**2*si**2/(2*a**2)
eps=1e-15
ou=[]
rej=0
while len(ou) < n:
w=
|
np.random.uniform(0, 1, 1)
|
numpy.random.uniform
|
'''
File: surfacemesh.py
License: MIT
Author: <NAME>
Created: 09/07/2016
Python Version: 3.5
========================
This module involves computations related to the boundary surface of the
tetrahedral mesh. The boundary surface is extracted as a triangle mesh, and
normals/curvatures are computed.
'''
from utils import normalize
from bidict import bidict
import math
import meshpy.tet
import numpy as np
class SurfaceMesh(object):
def __init__(self, tet_mesh):
"""Extract surface triangle mesh from volumetric tetrahedral mesh."""
# Vertices as array of coordinates.
# Faces as triplets of vertex indices.
self.vertices = []
self.faces = []
# Bidirectional volume-to-surface index maps.
self.vertex_map = bidict()
self.face_map = bidict()
# Loop through all faces.
for fi, face in enumerate(tet_mesh.faces):
# If face marker is 0, face is internal.
if (tet_mesh.face_markers[fi] == 0):
continue
# Otherwise, face is at boundary.
for vi in face:
# If vertex is currently not mapped
if vi not in self.vertex_map:
# Keep track of volume-to-surface index
self.vertex_map[vi] = len(self.vertices)
# Append to the surface vertex list
self.vertices.append(np.array(tet_mesh.points[vi]))
# Store surface vertex indices.
face = [ self.vertex_map[vi] for vi in face ]
self.face_map[fi] = len(self.faces)
self.faces.append(face)
# Normal vectors.
self.vertex_normals = [
|
np.zeros(3)
|
numpy.zeros
|
#!/usr/bin/env python
from LLC_Membranes.analysis.markov_state_dependent_dynamics import States
from LLC_Membranes.llclib import timeseries, fitting_functions, file_rw
from scipy.stats import levy_stable
import matplotlib.pyplot as plt
import numpy as np
res = 'URE'
directory = "/home/bcoscia/Documents/Gromacs/Transport/NaGA3C11/%s/10wt" % res
ntraj = 24 # number of trajectories to simulate
nboot = 200 # number of bootstrap trials when getting errorbars on MSD
max_k = 15
equil = {'GCL': 2400, 'URE': 2000, 'MET': 7000, 'ACH': 8800} # frame number, not ns. (multiply ns by 2)
truncate = {'GCL': 0.8, 'URE': 0.8, 'MET': 1.0, 'ACH': 1.0}
traj = '5ms_nojump.xtc'
gro = 'em.gro'
first_frame = equil[res] # frame at which to start reading trajectory
# probably easier to just re-run these calculations in the appropriate directory.
# Doesn't matter which dwell/hop is used as they will be re-fit below
states = file_rw.load_object('%s/states.pl' % directory)
nstates = states.count_matrix.shape[0]
ntransitions = 0
for i in range(nstates):
for j in range(nstates):
if i != j:
ntransitions += states.count_matrix[i, j]
print(states.count_matrix)
print(states.count_matrix.sum())
print(ntransitions)
percent_transitions = 100 * (ntransitions / states.count_matrix.sum())
print('Percentage transitions: %.2f' % percent_transitions)
acf = states.transition_autocorrelation()
timeseries.plot_autocorrelation(acf, max_k=max_k, nboot=200, show=False, label='Empirical autocorrelation')
H = timeseries.hurst(acf, nboot=nboot, max_k=max_k).mean()
plt.plot(np.arange(max_k + 1), fitting_functions.hurst_autocovariance(
|
np.arange(max_k + 1)
|
numpy.arange
|
import os
import numpy as np
import time
from shutil import copyfile, rmtree, move
from ase.io import read, write
from ase.neb import NEB
from pymofscreen.janitor import clean_files
def nebmake(initial_atoms,final_atoms,n_images):
"""
Make interpolated images for NEB
Args:
initial_atoms (ASE Atoms object): initial MOF structure
final_atoms (ASE Atoms object): final MOF structure
n_images (int): number of NEB images
"""
pwd = os.getcwd()
neb_path = os.path.join(pwd,'neb')
if os.path.exists(neb_path):
rmtree(neb_path)
os.makedirs(neb_path)
os.chdir(neb_path)
images = [initial_atoms]
for i in range(n_images):
images.append(initial_atoms.copy())
images.append(final_atoms)
neb = NEB(images)
neb.interpolate('idpp',mic=True)
for i, neb_image in enumerate(neb.images):
if i < 10:
ii = '0'+str(i)
else:
ii = str(i)
os.mkdir(os.path.join(neb_path,ii))
write(os.path.join(neb_path,ii,'POSCAR'),neb_image,format='vasp')
write_dummy_outcar(os.path.join(neb_path,'00','OUTCAR'),initial_atoms.get_potential_energy())
write_dummy_outcar(os.path.join(neb_path,ii,'OUTCAR'),final_atoms.get_potential_energy())
def write_dummy_outcar(name,E):
"""
Construct a dummy OUTCAR for images 0 and n
Args:
name (string): name of file to write
E (float): energy to write out in dummy OUTCAR
"""
with open(name,'w') as wf:
wf.write(' energy without entropy= energy(sigma->0) = '+str(E)+'\n')
def neb2dim():
"""
Construct initial dimer job from NEB
"""
pwd = os.getcwd()
neb_path = os.path.join(pwd,'neb')
os.chdir(neb_path)
os.system('vfin.pl neb_fin')
time.sleep(5)
neb_fin_path = os.path.join(neb_path,'neb_fin')
os.chdir(neb_fin_path)
os.system('nebresults.pl')
copyfile(os.path.join(neb_fin_path,'exts.dat'),os.path.join(neb_path,'exts.dat'))
os.chdir(neb_path)
if os.stat(os.path.join(neb_path,'exts.dat')).st_size == 0:
raise ValueError('Error with exts.dat file')
os.system('neb2dim.pl')
old_dim_path = os.path.join(neb_path,'dim')
new_dim_path = os.path.join(pwd,'dim')
move(old_dim_path,new_dim_path)
os.chdir(new_dim_path)
mof = read('POSCAR')
max_F = 0
high_i = 0
if os.stat(os.path.join(neb_fin_path,'nebef.dat')).st_size == 0:
raise ValueError('nebef.dat not written')
with open(os.path.join(neb_fin_path,'nebef.dat'),'r') as rf:
for i, line in enumerate(rf):
line = line.strip()
max_F_temp = np.fromstring(line,dtype=float,sep=' ')[1]
if max_F_temp > max_F:
max_F = max_F_temp
high_i = i
try:
if high_i < 10:
str_high_i = '0'+str(high_i)
else:
str_high_i = str(high_i)
move(os.path.join(neb_fin_path,str_high_i,'WAVECAR.gz'),os.path.join(new_dim_path,'WAVECAR.gz'))
os.system('gunzip WAVECAR.gz')
except:
pass
return mof
def dimmins(dis):
"""
Run dimmins.pl
Args:
dis (float): displacement vector
"""
os.system('vfin.pl dim_fin')
rmtree('dim_fin')
os.system('dimmins.pl POSCAR MODECAR '+str(dis))
def nebef(ediffg):
"""
Run nebef.pl
Args:
ediffg (float): specified EDIFFG vlaue in VASP
Returns:
neb_conv (bool): True if NEB converged within EDIFFG
"""
ediffg = abs(ediffg)
clean_files(['POSCAR'])
open('nebef.dat','w').close()
os.system('nebef.pl > nebef.dat')
max_F = 0
if os.stat('nebef.dat').st_size == 0:
raise ValueError('nebef.dat not written')
with open('nebef.dat','r') as rf:
for line in rf:
line = line.strip()
max_F_temp =
|
np.fromstring(line,dtype=float,sep=' ')
|
numpy.fromstring
|
import sys
import os
sys.path = [os.path.join(os.path.dirname(__file__), "..")] + sys.path
from physt import binnings
import numpy as np
import pytest
# TODO: Enable in Python < 3.3
#
class TestCalculateBinsNd(object):
def test_range(self):
data1 = np.linspace(0, 10, 100)
data = np.array([data1, data1]).T
bins1, bins2 = binnings.calculate_bins_nd(data, range=(4, 5))
assert bins1.first_edge == 4
assert bins1.last_edge == 5
assert bins2.first_edge == 4
assert bins2.last_edge == 5
def test_range_partly_none(self):
data1 = np.linspace(0, 10, 100)
data = np.array([data1, data1]).T
bins1, bins2 = binnings.calculate_bins_nd(data, range=((4, 5), None))
assert bins1.first_edge == 4
assert bins1.last_edge == 5
assert bins2.first_edge == 0
assert bins2.last_edge == 10
class TestNumpyBins(object):
def test_int_behaviour(self):
data = np.random.rand(100)
the_binning = binnings.numpy_binning(data, 10)
assert np.allclose(the_binning.numpy_bins, np.histogram(data, 10)[1])
the_binning = binnings.numpy_binning(data, 10, range=(0.2, 1.0))
assert np.allclose(the_binning.numpy_bins, np.histogram(data, 10, range=(0.2, 1.0))[1])
def test_bin_list_behaviour(self):
data = np.random.rand(100)
edges = [0.3, 4.5, 5.3, 8.6]
the_binning = binnings.numpy_binning(data, edges)
assert np.allclose(the_binning.numpy_bins, edges)
assert np.allclose(the_binning.numpy_bins, np.histogram(data, edges)[1])
class TestFixedWidthBins(object):
def test_without_alignment(self):
data = np.asarray([4.6, 7.3])
the_binning = binnings.fixed_width_binning(data, 1.0, align=False)
assert np.allclose(the_binning.numpy_bins, [4.6, 5.6, 6.6, 7.6])
def test_with_alignment(self):
data = np.asarray([4.6, 7.3])
the_binning = binnings.fixed_width_binning(data, 1.0, align=True)
assert np.allclose(the_binning.numpy_bins, [4, 5, 6, 7, 8])
def test_adapt_extension(self):
b = binnings.FixedWidthBinning(bin_width=10, bin_count=3, min=0, adaptive=True)
b2 = binnings.FixedWidthBinning(bin_width=10, bin_count=2, min=0, adaptive=True)
m1, m2 = b2.adapt(b)
assert tuple(m1) == ((0, 0), (1, 1))
assert m2 is None
assert np.array_equal(b2.numpy_bins, [0, 10, 20, 30])
assert b2.bin_count == 3
def test_adapt_left(self):
b = binnings.FixedWidthBinning(bin_width=10, bin_count=3, min=0, adaptive=True)
b3 = binnings.FixedWidthBinning(bin_width=10, bin_count=2, min=50, adaptive=True)
m1, m2 = b3.adapt(b)
assert tuple(m1) == ((0, 5), (1, 6))
assert tuple(m2) == ((0, 0), (1, 1), (2, 2))
assert b3.bin_count == 7
def test_adapt_right(self):
b = binnings.FixedWidthBinning(bin_width=10, bin_count=3, min=0, adaptive=True)
b4 = binnings.FixedWidthBinning(bin_width=10, bin_count=2, min=-30, adaptive=True)
m1, m2 = b4.adapt(b)
assert tuple(m1) == ((0, 0), (1, 1))
assert tuple(m2) == ((0, 3), (1, 4), (2, 5))
assert b4.bin_count == 6
def test_adapt_intersection1(self):
b = binnings.FixedWidthBinning(bin_width=10, bin_count=3, min=0, adaptive=True)
b5 = binnings.FixedWidthBinning(bin_width=10, bin_count=2, min=-10, adaptive=True)
m1, m2 = b5.adapt(b)
assert tuple(m1) == ((0, 0), (1, 1))
assert tuple(m2) == ((0, 1), (1, 2), (2, 3))
assert b5.bin_count == 4
def test_adapt_intersection2(self):
b = binnings.FixedWidthBinning(bin_width=10, bin_count=3, min=0, adaptive=True)
b6 = binnings.FixedWidthBinning(bin_width=10, bin_count=3, min=10, adaptive=True)
m1, m2 = b6.adapt(b)
assert tuple(m1) == ((0, 1), (1, 2), (2, 3))
assert tuple(m2) == ((0, 0), (1, 1), (2, 2))
assert b6.bin_count == 4
def test_adapt_internal(self):
b1 = binnings.FixedWidthBinning(bin_width=10, bin_count=3, min=0, adaptive=True)
b2 = binnings.FixedWidthBinning(bin_width=10, bin_count=1, min=10, adaptive=True)
m1, m2 = b1.adapt(b2)
assert m1 is None
assert tuple(m2) == ((0, 1),)
def test_adapt_external(self):
b1 = binnings.FixedWidthBinning(bin_width=10, bin_count=1, min=10, adaptive=True)
b2 = binnings.FixedWidthBinning(bin_width=10, bin_count=3, min=0, adaptive=True)
m1, m2 = b1.adapt(b2)
assert tuple(m1) == ((0, 1),)
assert m2 is None
assert b1.bin_count == 3
def test_adapt_wrong(self):
b1 = binnings.FixedWidthBinning(bin_width=10, bin_count=2, min=0, adaptive=True)
b2 = binnings.FixedWidthBinning(bin_width=10, bin_count=2, min=1, adaptive=True)
with pytest.raises(RuntimeError):
b1.adapt(b2)
with pytest.raises(RuntimeError):
b2.adapt(b1)
b3 = binnings.FixedWidthBinning(bin_width=5, bin_count=6, min=0, adaptive=True)
with pytest.raises(RuntimeError):
b1.adapt(b3)
with pytest.raises(RuntimeError):
b3.adapt(b1)
class TestHumanBins(object):
def test_exact(self):
data = np.random.rand(1000)
the_binning = binnings.human_binning(data, 10)
assert np.allclose(the_binning.numpy_bins, np.linspace(0, 1, 11))
the_binning = binnings.human_binning(data, 9)
assert np.allclose(the_binning.numpy_bins, np.linspace(0, 1, 11))
the_binning = binnings.human_binning(data, 11)
assert np.allclose(the_binning.numpy_bins, np.linspace(0, 1, 11))
class TestIntegerBins(object):
def test_dice(self):
data = np.asarray([1, 2, 3, 5, 6, 2, 4, 3, 2, 3, 4, 5, 6, 6, 1, 2, 5])
the_binning = binnings.integer_binning(data)
assert np.allclose(the_binning.numpy_bins, [0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5])
the_binning = binnings.integer_binning(data, range=(1, 6))
assert np.allclose(the_binning.numpy_bins, [0.5, 1.5, 2.5, 3.5, 4.5, 5.5])
class TestExponentialBins(object):
def test_data(self):
data = np.asarray([0.1, 0.3, 0.4, 0.7, 1.0, 2.0, 2.5, 3.5, 10.0])
the_binning = binnings.exponential_binning(data, 2)
assert np.allclose(the_binning.numpy_bins, [0.1, 1.0, 10.0])
the_binning = binnings.exponential_binning(data, 2, range=(1.0, 100.0))
assert
|
np.allclose(the_binning.numpy_bins, [1.0, 10.0, 100.0])
|
numpy.allclose
|
"""This submodule contains the class definitions of the the main five classes
svgpathtools is built around: Path, Line, QuadraticBezier, CubicBezier, and
Arc."""
# External dependencies
from __future__ import division, absolute_import, print_function
import re
try:
from collections.abc import MutableSequence # noqa
except ImportError:
from collections import MutableSequence # noqa
from warnings import warn
from operator import itemgetter
import numpy as np
from itertools import tee
# these imports were originally from math and cmath, now are from numpy
# in order to encourage code that generalizes to vector inputs
from numpy import sqrt, cos, sin, tan, arccos as acos, arcsin as asin, \
degrees, radians, log, pi, ceil
from numpy import exp, sqrt as csqrt, angle as phase
try:
from scipy.integrate import quad
_quad_available = True
except:
_quad_available = False
# Internal dependencies
from .bezier import (bezier_intersections, bezier_bounding_box, split_bezier,
bezier_by_line_intersections, polynomial2bezier,
bezier2polynomial)
from .misctools import BugException
from .polytools import rational_limit, polyroots, polyroots01, imag, real
# To maintain forward/backward compatibility
try:
str = basestring
except NameError:
pass
COMMANDS = set('MmZzLlHhVvCcSsQqTtAa')
UPPERCASE = set('MZLHVCSQTA')
COMMAND_RE = re.compile("([MmZzLlHhVvCcSsQqTtAa])")
FLOAT_RE = re.compile("[-+]?[0-9]*\.?[0-9]+(?:[eE][-+]?[0-9]+)?")
# Default Parameters ##########################################################
# path segment .length() parameters for arc length computation
LENGTH_MIN_DEPTH = 5
LENGTH_ERROR = 1e-12
USE_SCIPY_QUAD = True # for elliptic Arc segment arc length computation
# path segment .ilength() parameters for inverse arc length computation
ILENGTH_MIN_DEPTH = 5
ILENGTH_ERROR = 1e-12
ILENGTH_S_TOL = 1e-12
ILENGTH_MAXITS = 10000
# compatibility/implementation related warnings and parameters
CLOSED_WARNING_ON = True
_NotImplemented4ArcException = \
Exception("This method has not yet been implemented for Arc objects.")
# _NotImplemented4QuadraticException = \
# Exception("This method has not yet been implemented for QuadraticBezier "
# "objects.")
_is_smooth_from_warning = \
("The name of this method is somewhat misleading (yet kept for "
"compatibility with scripts created using svg.path 2.0). This method "
"is meant only for d-string creation and should NOT be used to check "
"for kinks. To check a segment for differentiability, use the "
"joins_smoothly_with() method instead or the kinks() function (in "
"smoothing.py).\nTo turn off this warning, set "
"warning_on=False.")
# Miscellaneous ###############################################################
def bezier_segment(*bpoints):
if len(bpoints) == 2:
return Line(*bpoints)
elif len(bpoints) == 4:
return CubicBezier(*bpoints)
elif len(bpoints) == 3:
return QuadraticBezier(*bpoints)
else:
assert len(bpoints) in (2, 3, 4)
def is_bezier_segment(seg):
return (isinstance(seg, Line) or
isinstance(seg, QuadraticBezier) or
isinstance(seg, CubicBezier))
def is_path_segment(seg):
return is_bezier_segment(seg) or isinstance(seg, Arc)
def is_bezier_path(path):
"""Checks that all segments in path are a Line, QuadraticBezier, or
CubicBezier object."""
return isinstance(path, Path) and all(map(is_bezier_segment, path))
def concatpaths(list_of_paths):
"""Takes in a sequence of paths and returns their concatenations into a
single path (following the order of the input sequence)."""
return Path(*[seg for path in list_of_paths for seg in path])
def bbox2path(xmin, xmax, ymin, ymax):
"""Converts a bounding box 4-tuple to a Path object."""
b = Line(xmin + 1j*ymin, xmax + 1j*ymin)
t = Line(xmin + 1j*ymax, xmax + 1j*ymax)
r = Line(xmax + 1j*ymin, xmax + 1j*ymax)
l = Line(xmin + 1j*ymin, xmin + 1j*ymax)
return Path(b, r, t.reversed(), l.reversed())
def polyline(*points):
"""Converts a list of points to a Path composed of lines connecting those
points (i.e. a linear spline or polyline). See also `polygon()`."""
return Path(*[Line(points[i], points[i+1])
for i in range(len(points) - 1)])
def polygon(*points):
"""Converts a list of points to a Path composed of lines connecting those
points, then closes the path by connecting the last point to the first.
See also `polyline()`."""
return Path(*[Line(points[i], points[(i + 1) % len(points)])
for i in range(len(points))])
# Conversion###################################################################
def bpoints2bezier(bpoints):
"""Converts a list of length 2, 3, or 4 to a CubicBezier, QuadraticBezier,
or Line object, respectively.
See also: poly2bez."""
order = len(bpoints) - 1
if order == 3:
return CubicBezier(*bpoints)
elif order == 2:
return QuadraticBezier(*bpoints)
elif order == 1:
return Line(*bpoints)
else:
assert len(bpoints) in {2, 3, 4}
def poly2bez(poly, return_bpoints=False):
"""Converts a cubic or lower order Polynomial object (or a sequence of
coefficients) to a CubicBezier, QuadraticBezier, or Line object as
appropriate. If return_bpoints=True then this will instead only return
the control points of the corresponding Bezier curve.
Note: The inverse operation is available as a method of CubicBezier,
QuadraticBezier and Line objects."""
bpoints = polynomial2bezier(poly)
if return_bpoints:
return bpoints
else:
return bpoints2bezier(bpoints)
def bez2poly(bez, numpy_ordering=True, return_poly1d=False):
"""Converts a Bezier object or tuple of Bezier control points to a tuple
of coefficients of the expanded polynomial.
return_poly1d : returns a numpy.poly1d object. This makes computations
of derivatives/anti-derivatives and many other operations quite quick.
numpy_ordering : By default (to accommodate numpy) the coefficients will
be output in reverse standard order.
Note: This function is redundant thanks to the .poly() method included
with all bezier segment classes."""
if is_bezier_segment(bez):
bez = bez.bpoints()
return bezier2polynomial(bez,
numpy_ordering=numpy_ordering,
return_poly1d=return_poly1d)
# Geometric####################################################################
def transform_segments_together(path, transformation):
"""Makes sure that, if joints were continuous, they're kept that way."""
transformed_segs = [transformation(seg) for seg in path]
joint_was_continuous = [sa.end == sb.start for sa, sb in path.joints()]
for i, (sa, sb)in enumerate(path.joints()):
if sa.end == sb.start:
transformed_segs[i].end = transformed_segs[(i + 1) % len(path)].start
return Path(*transformed_segs)
def rotate(curve, degs, origin=None):
"""Returns curve rotated by `degs` degrees (CCW) around the point `origin`
(a complex number). By default origin is either `curve.point(0.5)`, or in
the case that curve is an Arc object, `origin` defaults to `curve.center`.
"""
def transform(z):
return exp(1j*radians(degs))*(z - origin) + origin
if origin is None:
if isinstance(curve, Arc):
origin = curve.center
else:
origin = curve.point(0.5)
if isinstance(curve, Path):
transformation = lambda seg: rotate(seg, degs, origin=origin)
return transform_segments_together(curve, transformation)
elif is_bezier_segment(curve):
return bpoints2bezier([transform(bpt) for bpt in curve.bpoints()])
elif isinstance(curve, Arc):
new_start = transform(curve.start)
new_end = transform(curve.end)
new_rotation = curve.rotation + degs
return Arc(new_start, radius=curve.radius, rotation=new_rotation,
large_arc=curve.large_arc, sweep=curve.sweep, end=new_end)
else:
raise TypeError("Input `curve` should be a Path, Line, "
"QuadraticBezier, CubicBezier, or Arc object.")
def translate(curve, z0):
"""Shifts the curve by the complex quantity z such that
translate(curve, z0).point(t) = curve.point(t) + z0"""
if isinstance(curve, Path):
transformation = lambda seg: translate(seg, z0)
return transform_segments_together(curve, transformation)
elif is_bezier_segment(curve):
return bpoints2bezier([bpt + z0 for bpt in curve.bpoints()])
elif isinstance(curve, Arc):
new_start = curve.start + z0
new_end = curve.end + z0
return Arc(new_start, radius=curve.radius, rotation=curve.rotation,
large_arc=curve.large_arc, sweep=curve.sweep, end=new_end)
else:
raise TypeError("Input `curve` should be a Path, Line, "
"QuadraticBezier, CubicBezier, or Arc object.")
def scale(curve, sx, sy=None, origin=0j):
"""Scales `curve`, about `origin`, by diagonal matrix `[[sx,0],[0,sy]]`.
Notes:
------
* If `sy` is not specified, it is assumed to be equal to `sx` and
a scalar transformation of `curve` about `origin` will be returned.
I.e.
scale(curve, sx, origin).point(t) ==
((curve.point(t) - origin) * sx) + origin
"""
if sy is None:
isy = 1j*sx
else:
isy = 1j*sy
def _scale(z):
if sy is None:
return sx*z
return sx*z.real + isy*z.imag
def scale_bezier(bez):
p = [_scale(c) for c in bez2poly(bez)]
p[-1] += origin - _scale(origin)
return poly2bez(p)
if isinstance(curve, Path):
transformation = lambda seg: scale(seg, sx, sy, origin)
return transform_segments_together(curve, transformation)
elif is_bezier_segment(curve):
return scale_bezier(curve)
elif isinstance(curve, Arc):
if sy is None or sy == sx:
return Arc(start=sx*(curve.start - origin) + origin,
radius=sx*curve.radius,
rotation=curve.rotation,
large_arc=curve.large_arc,
sweep=curve.sweep,
end=sx*(curve.end - origin) + origin)
else:
raise Exception("\nFor `Arc` objects, only scale transforms "
"with sx==sy are implemented.\n")
else:
raise TypeError("Input `curve` should be a Path, Line, "
"QuadraticBezier, CubicBezier, or Arc object.")
def transform(curve, tf):
"""Transforms the curve by the homogeneous transformation matrix tf"""
def to_point(p):
return np.array([[p.real], [p.imag], [1.0]])
def to_vector(z):
return np.array([[z.real], [z.imag], [0.0]])
def to_complex(v):
return v.item(0) + 1j * v.item(1)
if isinstance(curve, Path):
transformation = lambda seg: transform(seg, tf)
return transform_segments_together(curve, transformation)
elif is_bezier_segment(curve):
return bpoints2bezier([to_complex(tf.dot(to_point(p)))
for p in curve.bpoints()])
elif isinstance(curve, Arc):
new_start = to_complex(tf.dot(to_point(curve.start)))
new_end = to_complex(tf.dot(to_point(curve.end)))
new_radius = to_complex(tf.dot(to_vector(curve.radius)))
if tf[0][0] * tf[1][1] >= 0.0:
new_sweep = curve.sweep
else:
new_sweep = not curve.sweep
return Arc(new_start, radius=new_radius, rotation=curve.rotation,
large_arc=curve.large_arc, sweep=new_sweep, end=new_end)
else:
raise TypeError("Input `curve` should be a Path, Line, "
"QuadraticBezier, CubicBezier, or Arc object.")
def bezier_unit_tangent(seg, t):
"""Returns the unit tangent of the segment at t.
Notes
-----
If you receive a RuntimeWarning, try the following:
>>> import numpy
>>> old_numpy_error_settings = numpy.seterr(invalid='raise')
This can be undone with:
>>> numpy.seterr(**old_numpy_error_settings)
"""
dseg = seg.derivative(t)
# Note: dseg might be numpy value, use np.seterr(invalid='raise')
try:
unit_tangent = dseg/abs(dseg)
except (ZeroDivisionError, FloatingPointError):
# This may be a removable singularity, if so we just need to compute
# the limit.
# Note: limit{{dseg / abs(dseg)} = sqrt(limit{dseg**2 / abs(dseg)**2})
dseg_poly = seg.poly().deriv()
dseg_abs_squared_poly = (real(dseg_poly) ** 2 +
imag(dseg_poly) ** 2)
try:
unit_tangent = csqrt(rational_limit(dseg_poly**2,
dseg_abs_squared_poly, t))
except ValueError:
bef = seg.poly().deriv()(t - 1e-4)
aft = seg.poly().deriv()(t + 1e-4)
mes = ("Unit tangent appears to not be well-defined at "
"t = {}, \n".format(t) +
"seg.poly().deriv()(t - 1e-4) = {}\n".format(bef) +
"seg.poly().deriv()(t + 1e-4) = {}".format(aft))
raise ValueError(mes)
return unit_tangent
def segment_curvature(self, t, use_inf=False):
"""returns the curvature of the segment at t.
Notes
-----
If you receive a RuntimeWarning, run command
>>> old = np.seterr(invalid='raise')
This can be undone with
>>> np.seterr(**old)
"""
dz = self.derivative(t)
ddz = self.derivative(t, n=2)
dx, dy = dz.real, dz.imag
ddx, ddy = ddz.real, ddz.imag
old_np_seterr = np.seterr(invalid='raise')
try:
kappa = abs(dx*ddy - dy*ddx)/sqrt(dx*dx + dy*dy)**3
except (ZeroDivisionError, FloatingPointError):
# tangent vector is zero at t, use polytools to find limit
p = self.poly()
dp = p.deriv()
ddp = dp.deriv()
dx, dy = real(dp), imag(dp)
ddx, ddy = real(ddp), imag(ddp)
f2 = (dx*ddy - dy*ddx)**2
g2 = (dx*dx + dy*dy)**3
lim2 = rational_limit(f2, g2, t)
if lim2 < 0: # impossible, must be numerical error
return 0
kappa = sqrt(lim2)
finally:
np.seterr(**old_np_seterr)
return kappa
def bezier_radialrange(seg, origin, return_all_global_extrema=False):
"""returns the tuples (d_min, t_min) and (d_max, t_max) which minimize and
maximize, respectively, the distance d = |self.point(t)-origin|.
return_all_global_extrema: Multiple such t_min or t_max values can exist.
By default, this will only return one. Set return_all_global_extrema=True
to return all such global extrema."""
def _radius(tau):
return abs(seg.point(tau) - origin)
shifted_seg_poly = seg.poly() - origin
r_squared = real(shifted_seg_poly) ** 2 + \
imag(shifted_seg_poly) ** 2
extremizers = [0, 1] + polyroots01(r_squared.deriv())
extrema = [(_radius(t), t) for t in extremizers]
if return_all_global_extrema:
raise NotImplementedError
else:
seg_global_min = min(extrema, key=itemgetter(0))
seg_global_max = max(extrema, key=itemgetter(0))
return seg_global_min, seg_global_max
def closest_point_in_path(pt, path):
"""returns (|path.seg.point(t)-pt|, t, seg_idx) where t and seg_idx
minimize the distance between pt and curve path[idx].point(t) for 0<=t<=1
and any seg_idx.
Warning: Multiple such global minima can exist. This will only return
one."""
return path.radialrange(pt)[0]
def farthest_point_in_path(pt, path):
"""returns (|path.seg.point(t)-pt|, t, seg_idx) where t and seg_idx
maximize the distance between pt and curve path[idx].point(t) for 0<=t<=1
and any seg_idx.
:rtype : object
:param pt:
:param path:
Warning: Multiple such global maxima can exist. This will only return
one."""
return path.radialrange(pt)[1]
def path_encloses_pt(pt, opt, path):
"""returns true if pt is a point enclosed by path (which must be a Path
object satisfying path.isclosed==True). opt is a point you know is
NOT enclosed by path."""
assert path.isclosed()
intersections = Path(Line(pt, opt)).intersect(path)
if len(intersections) % 2:
return True
else:
return False
def segment_length(curve, start, end, start_point, end_point,
error=LENGTH_ERROR, min_depth=LENGTH_MIN_DEPTH, depth=0):
"""Recursively approximates the length by straight lines"""
mid = (start + end)/2
mid_point = curve.point(mid)
length = abs(end_point - start_point)
first_half = abs(mid_point - start_point)
second_half = abs(end_point - mid_point)
length2 = first_half + second_half
if (length2 - length > error) or (depth < min_depth):
# Calculate the length of each segment:
depth += 1
return (segment_length(curve, start, mid, start_point, mid_point,
error, min_depth, depth) +
segment_length(curve, mid, end, mid_point, end_point,
error, min_depth, depth))
# This is accurate enough.
return length2
def inv_arclength(curve, s, s_tol=ILENGTH_S_TOL, maxits=ILENGTH_MAXITS,
error=ILENGTH_ERROR, min_depth=ILENGTH_MIN_DEPTH):
"""INPUT: curve should be a CubicBezier, Line, of Path of CubicBezier
and/or Line objects.
OUTPUT: Returns a float, t, such that the arc length of curve from 0 to
t is approximately s.
s_tol - exit when |s(t) - s| < s_tol where
s(t) = seg.length(0, t, error, min_depth) and seg is either curve or,
if curve is a Path object, then seg is a segment in curve.
error - used to compute lengths of cubics and arcs
min_depth - used to compute lengths of cubics and arcs
Note: This function is not designed to be efficient, but if it's slower
than you need, make sure you have scipy installed."""
curve_length = curve.length(error=error, min_depth=min_depth)
assert curve_length > 0
if not 0 <= s <= curve_length:
raise ValueError("s is not in interval [0, curve.length()].")
if s == 0:
return 0
if s == curve_length:
return 1
if isinstance(curve, Path):
seg_lengths = [seg.length(error=error, min_depth=min_depth)
for seg in curve]
lsum = 0
# Find which segment the point we search for is located on
for k, len_k in enumerate(seg_lengths):
if lsum <= s <= lsum + len_k:
t = inv_arclength(curve[k], s - lsum, s_tol=s_tol,
maxits=maxits, error=error,
min_depth=min_depth)
return curve.t2T(k, t)
lsum += len_k
return 1
elif isinstance(curve, Line):
return s / curve.length(error=error, min_depth=min_depth)
elif (isinstance(curve, QuadraticBezier) or
isinstance(curve, CubicBezier) or
isinstance(curve, Arc)):
t_upper = 1
t_lower = 0
iteration = 0
while iteration < maxits:
iteration += 1
t = (t_lower + t_upper)/2
s_t = curve.length(t1=t, error=error, min_depth=min_depth)
if abs(s_t - s) < s_tol:
return t
elif s_t < s: # t too small
t_lower = t
else: # s < s_t, t too big
t_upper = t
if t_upper == t_lower:
warn("t is as close as a float can be to the correct value, "
"but |s(t) - s| = {} > s_tol".format(abs(s_t-s)))
return t
raise Exception("Maximum iterations reached with s(t) - s = {}."
"".format(s_t - s))
else:
raise TypeError("First argument must be a Line, QuadraticBezier, "
"CubicBezier, Arc, or Path object.")
# Operations###################################################################
def crop_bezier(seg, t0, t1):
"""returns a cropped copy of this segment which starts at self.point(t0)
and ends at self.point(t1)."""
assert t0 < t1
if t0 == 0:
cropped_seg = seg.split(t1)[0]
elif t1 == 1:
cropped_seg = seg.split(t0)[1]
else:
pt1 = seg.point(t1)
# trim off the 0 <= t < t0 part
trimmed_seg = crop_bezier(seg, t0, 1)
# find the adjusted t1 (i.e. the t1 such that
# trimmed_seg.point(t1) ~= pt))and trim off the t1 < t <= 1 part
t1_adj = trimmed_seg.radialrange(pt1)[0][1]
cropped_seg = crop_bezier(trimmed_seg, 0, t1_adj)
return cropped_seg
# Main Classes ################################################################
class Line(object):
def __init__(self, start, end):
self.start = start
self.end = end
def __repr__(self):
return 'Line(start=%s, end=%s)' % (self.start, self.end)
def __eq__(self, other):
if not isinstance(other, Line):
return NotImplemented
return self.start == other.start and self.end == other.end
def __ne__(self, other):
if not isinstance(other, Line):
return NotImplemented
return not self == other
def __getitem__(self, item):
return self.bpoints()[item]
def __len__(self):
return 2
def joins_smoothly_with(self, previous, wrt_parameterization=False):
"""Checks if this segment joins smoothly with previous segment. By
default, this only checks that this segment starts moving (at t=0) in
the same direction (and from the same positive) as previous stopped
moving (at t=1). To check if the tangent magnitudes also match, set
wrt_parameterization=True."""
if wrt_parameterization:
return self.start == previous.end and np.isclose(
self.derivative(0), previous.derivative(1))
else:
return self.start == previous.end and np.isclose(
self.unit_tangent(0), previous.unit_tangent(1))
def point(self, t):
"""returns the coordinates of the Bezier curve evaluated at t."""
distance = self.end - self.start
return self.start + distance*t
def points(self, ts):
"""Faster than running Path.point many times."""
return self.poly(ts)
def length(self, t0=0, t1=1, error=None, min_depth=None):
"""returns the length of the line segment between t0 and t1."""
return abs(self.end - self.start)*(t1-t0)
def ilength(self, s, s_tol=ILENGTH_S_TOL, maxits=ILENGTH_MAXITS,
error=ILENGTH_ERROR, min_depth=ILENGTH_MIN_DEPTH):
"""Returns a float, t, such that self.length(0, t) is approximately s.
See the inv_arclength() docstring for more details."""
return inv_arclength(self, s, s_tol=s_tol, maxits=maxits, error=error,
min_depth=min_depth)
def bpoints(self):
"""returns the Bezier control points of the segment."""
return self.start, self.end
def poly(self, return_coeffs=False):
"""returns the line as a Polynomial object."""
p = self.bpoints()
coeffs = ([p[1] - p[0], p[0]])
if return_coeffs:
return coeffs
else:
return np.poly1d(coeffs)
def derivative(self, t=None, n=1):
"""returns the nth derivative of the segment at t."""
assert self.end != self.start
if n == 1:
return self.end - self.start
elif n > 1:
return 0
else:
raise ValueError("n should be a positive integer.")
def unit_tangent(self, t=None):
"""returns the unit tangent of the segment at t."""
assert self.end != self.start
dseg = self.end - self.start
return dseg/abs(dseg)
def normal(self, t=None):
"""returns the (right hand rule) unit normal vector to self at t."""
return -1j*self.unit_tangent(t)
def curvature(self, t):
"""returns the curvature of the line, which is always zero."""
return 0
# def icurvature(self, kappa):
# """returns a list of t-values such that 0 <= t<= 1 and
# seg.curvature(t) = kappa."""
# if kappa:
# raise ValueError("The .icurvature() method for Line elements will "
# "return an empty list if kappa is nonzero and "
# "will raise this exception when kappa is zero as "
# "this is true at every point on the line.")
# return []
def reversed(self):
"""returns a copy of the Line object with its orientation reversed."""
return Line(self.end, self.start)
def intersect(self, other_seg, tol=None):
"""Finds the intersections of two segments.
returns a list of tuples (t1, t2) such that
self.point(t1) == other_seg.point(t2).
Note: This will fail if the two segments coincide for more than a
finite collection of points.
tol is not used."""
if isinstance(other_seg, Line):
assert other_seg.end != other_seg.start and self.end != self.start
assert self != other_seg
# Solve the system [p1-p0, q1-q0]*[t1, t2]^T = q0 - p0
# where self == Line(p0, p1) and other_seg == Line(q0, q1)
a = (self.start.real, self.end.real)
b = (self.start.imag, self.end.imag)
c = (other_seg.start.real, other_seg.end.real)
d = (other_seg.start.imag, other_seg.end.imag)
denom = ((a[1] - a[0])*(d[0] - d[1]) -
(b[1] - b[0])*(c[0] - c[1]))
if np.isclose(denom, 0):
return []
t1 = (c[0]*(b[0] - d[1]) -
c[1]*(b[0] - d[0]) -
a[0]*(d[0] - d[1]))/denom
t2 = -(a[1]*(b[0] - d[0]) -
a[0]*(b[1] - d[0]) -
c[0]*(b[0] - b[1]))/denom
if 0 <= t1 <= 1 and 0 <= t2 <= 1:
return [(t1, t2)]
return []
elif isinstance(other_seg, QuadraticBezier):
t2t1s = bezier_by_line_intersections(other_seg, self)
return [(t1, t2) for t2, t1 in t2t1s]
elif isinstance(other_seg, CubicBezier):
t2t1s = bezier_by_line_intersections(other_seg, self)
return [(t1, t2) for t2, t1 in t2t1s]
elif isinstance(other_seg, Arc):
t2t1s = other_seg.intersect(self)
return [(t1, t2) for t2, t1 in t2t1s]
elif isinstance(other_seg, Path):
raise TypeError(
"other_seg must be a path segment, not a Path object, use "
"Path.intersect().")
else:
raise TypeError("other_seg must be a path segment.")
def bbox(self):
"""returns the bounding box for the segment in the form
(xmin, xmax, ymin, ymax)."""
xmin = min(self.start.real, self.end.real)
xmax = max(self.start.real, self.end.real)
ymin = min(self.start.imag, self.end.imag)
ymax = max(self.start.imag, self.end.imag)
return xmin, xmax, ymin, ymax
def point_to_t(self, point):
"""If the point lies on the Line, returns its `t` parameter.
If the point does not lie on the Line, returns None."""
# Single-precision floats have only 7 significant figures of
# resolution, so test that we're within 6 sig figs.
if np.isclose(point, self.start, rtol=0, atol=1e-6):
return 0.0
elif np.isclose(point, self.end, rtol=0, atol=1e-6):
return 1.0
# Finding the point "by hand" here is much faster than calling
# radialrange(), see the discussion on PR #40:
# https://github.com/mathandy/svgpathtools/pull/40#issuecomment-358134261
p = self.poly()
# p(t) = (p_1 * t) + p_0 = point
# t = (point - p_0) / p_1
t = (point - p[0]) / p[1]
if np.isclose(t.imag, 0) and (t.real >= 0.0) and (t.real <= 1.0):
return t.real
return None
def cropped(self, t0, t1):
"""returns a cropped copy of this segment which starts at
self.point(t0) and ends at self.point(t1)."""
return Line(self.point(t0), self.point(t1))
def split(self, t):
"""returns two segments, whose union is this segment and which join at
self.point(t)."""
pt = self.point(t)
return Line(self.start, pt), Line(pt, self.end)
def radialrange(self, origin, **kwargs):
"""compute points in self that are min and max distance to origin.
Args:
origin (complex): the point extremize distance to
Returns:
tuples (d_min, t_min) and (d_max, t_max) which minimize and
maximize, respectively, the distance d = |self.point(t)-origin|.
"""
x, y = origin.real, origin.imag
p0, p1 = self.start, self.end
x0, y0, x1, y1 = p0.real, p0.imag, p1.real, p1.imag
dx, dy = x1 - x0, y1 - y0
numerator, denominator = dx * (x - x0) + dy * (y - y0), dx * dx + dy * dy
t = numerator / denominator
if 0 < t < 1:
# get distance to origin at 0, 1, and t
d0, d1, dt = (
abs(p0 - origin),
abs(p1 - origin),
abs(self.point(t) - origin)
)
if d0 < d1:
return (dt, t), (d1, 1)
return (dt, t), (d0, 0)
else:
# get distance to origin at t = 0 and t = 1
d0, d1 = abs(p0 - origin), abs(p1 - origin)
if d0 < d1:
return (d0, 0), (d1, 1)
return (d1, 1), (d0, 0)
def rotated(self, degs, origin=None):
"""Returns a copy of self rotated by `degs` degrees (CCW) around the
point `origin` (a complex number). By default `origin` is either
`self.point(0.5)`, or in the case that self is an Arc object,
`origin` defaults to `self.center`."""
return rotate(self, degs, origin=origin)
def translated(self, z0):
"""Returns a copy of self shifted by the complex quantity `z0` such
that self.translated(z0).point(t) = self.point(t) + z0 for any t."""
return translate(self, z0)
def scaled(self, sx, sy=None, origin=0j):
"""Scale transform. See `scale` function for further explanation."""
return scale(self, sx=sx, sy=sy, origin=origin)
class QuadraticBezier(object):
# For compatibility with old pickle files.
_length_info = {'length': None, 'bpoints': None}
def __init__(self, start, control, end):
self.start = start
self.end = end
self.control = control
# used to know if self._length needs to be updated
self._length_info = {'length': None, 'bpoints': None}
def __repr__(self):
return 'QuadraticBezier(start=%s, control=%s, end=%s)' % (
self.start, self.control, self.end)
def __eq__(self, other):
if not isinstance(other, QuadraticBezier):
return NotImplemented
return self.start == other.start and self.end == other.end \
and self.control == other.control
def __ne__(self, other):
if not isinstance(other, QuadraticBezier):
return NotImplemented
return not self == other
def __getitem__(self, item):
return self.bpoints()[item]
def __len__(self):
return 3
def is_smooth_from(self, previous, warning_on=True):
"""[Warning: The name of this method is somewhat misleading (yet kept
for compatibility with scripts created using svg.path 2.0). This
method is meant only for d string creation and should not be used to
check for kinks. To check a segment for differentiability, use the
joins_smoothly_with() method instead.]"""
if warning_on:
warn(_is_smooth_from_warning)
if isinstance(previous, QuadraticBezier):
return (self.start == previous.end and
(self.control - self.start) == (
previous.end - previous.control))
else:
return self.control == self.start
def joins_smoothly_with(self, previous, wrt_parameterization=False,
error=0):
"""Checks if this segment joins smoothly with previous segment. By
default, this only checks that this segment starts moving (at t=0) in
the same direction (and from the same positive) as previous stopped
moving (at t=1). To check if the tangent magnitudes also match, set
wrt_parameterization=True."""
if wrt_parameterization:
return self.start == previous.end and abs(
self.derivative(0) - previous.derivative(1)) <= error
else:
return self.start == previous.end and abs(
self.unit_tangent(0) - previous.unit_tangent(1)) <= error
def point(self, t):
"""returns the coordinates of the Bezier curve evaluated at t."""
tc = 1 - t
return tc*tc*self.start + 2*tc*t*self.control + t*t*self.end
def points(self, ts):
"""Faster than running Path.point many times."""
return self.poly(ts)
def length(self, t0=0, t1=1, error=None, min_depth=None):
if t0 == 1 and t1 == 0:
if self._length_info['bpoints'] == self.bpoints():
return self._length_info['length']
a = self.start - 2*self.control + self.end
b = 2*(self.control - self.start)
a_dot_b = a.real*b.real + a.imag*b.imag
if abs(a) < 1e-12:
s = abs(b)*(t1 - t0)
elif abs(a_dot_b + abs(a)*abs(b)) < 1e-12:
tstar = abs(b)/(2*abs(a))
if t1 < tstar:
return abs(a)*(t0**2 - t1**2) - abs(b)*(t0 - t1)
elif tstar < t0:
return abs(a)*(t1**2 - t0**2) - abs(b)*(t1 - t0)
else:
return abs(a)*(t1**2 + t0**2) - abs(b)*(t1 + t0) + \
abs(b)**2/(2*abs(a))
else:
c2 = 4*(a.real**2 + a.imag**2)
c1 = 4*a_dot_b
c0 = b.real**2 + b.imag**2
beta = c1/(2*c2)
gamma = c0/c2 - beta**2
dq1_mag = sqrt(c2*t1**2 + c1*t1 + c0)
dq0_mag = sqrt(c2*t0**2 + c1*t0 + c0)
logarand = (sqrt(c2)*(t1 + beta) + dq1_mag) / \
(sqrt(c2)*(t0 + beta) + dq0_mag)
s = (t1 + beta)*dq1_mag - (t0 + beta)*dq0_mag + \
gamma*sqrt(c2)*log(logarand)
s /= 2
if t0 == 1 and t1 == 0:
self._length_info['length'] = s
self._length_info['bpoints'] = self.bpoints()
return self._length_info['length']
else:
return s
def ilength(self, s, s_tol=ILENGTH_S_TOL, maxits=ILENGTH_MAXITS,
error=ILENGTH_ERROR, min_depth=ILENGTH_MIN_DEPTH):
"""Returns a float, t, such that self.length(0, t) is approximately s.
See the inv_arclength() docstring for more details."""
return inv_arclength(self, s, s_tol=s_tol, maxits=maxits, error=error,
min_depth=min_depth)
def bpoints(self):
"""returns the Bezier control points of the segment."""
return self.start, self.control, self.end
def poly(self, return_coeffs=False):
"""returns the quadratic as a Polynomial object."""
p = self.bpoints()
coeffs = (p[0] - 2*p[1] + p[2], 2*(p[1] - p[0]), p[0])
if return_coeffs:
return coeffs
else:
return np.poly1d(coeffs)
def derivative(self, t, n=1):
"""returns the nth derivative of the segment at t.
Note: Bezier curves can have points where their derivative vanishes.
If you are interested in the tangent direction, use the unit_tangent()
method instead."""
p = self.bpoints()
if n == 1:
return 2*((p[1] - p[0])*(1 - t) + (p[2] - p[1])*t)
elif n == 2:
return 2*(p[2] - 2*p[1] + p[0])
elif n > 2:
return 0
else:
raise ValueError("n should be a positive integer.")
def unit_tangent(self, t):
"""returns the unit tangent vector of the segment at t (centered at
the origin and expressed as a complex number). If the tangent
vector's magnitude is zero, this method will find the limit of
self.derivative(tau)/abs(self.derivative(tau)) as tau approaches t."""
return bezier_unit_tangent(self, t)
def normal(self, t):
"""returns the (right hand rule) unit normal vector to self at t."""
return -1j*self.unit_tangent(t)
def curvature(self, t):
"""returns the curvature of the segment at t."""
return segment_curvature(self, t)
# def icurvature(self, kappa):
# """returns a list of t-values such that 0 <= t<= 1 and
# seg.curvature(t) = kappa."""
# z = self.poly()
# x, y = real(z), imag(z)
# dx, dy = x.deriv(), y.deriv()
# ddx, ddy = dx.deriv(), dy.deriv()
#
# p = kappa**2*(dx**2 + dy**2)**3 - (dx*ddy - ddx*dy)**2
# return polyroots01(p)
def reversed(self):
"""returns a copy of the QuadraticBezier object with its orientation
reversed."""
new_quad = QuadraticBezier(self.end, self.control, self.start)
if self._length_info['length']:
new_quad._length_info = self._length_info
new_quad._length_info['bpoints'] = (
self.end, self.control, self.start)
return new_quad
def intersect(self, other_seg, tol=1e-12):
"""Finds the intersections of two segments.
returns a list of tuples (t1, t2) such that
self.point(t1) == other_seg.point(t2).
Note: This will fail if the two segments coincide for more than a
finite collection of points."""
if isinstance(other_seg, Line):
return bezier_by_line_intersections(self, other_seg)
elif isinstance(other_seg, QuadraticBezier):
assert self != other_seg
longer_length = max(self.length(), other_seg.length())
return bezier_intersections(self, other_seg,
longer_length=longer_length,
tol=tol, tol_deC=tol)
elif isinstance(other_seg, CubicBezier):
longer_length = max(self.length(), other_seg.length())
return bezier_intersections(self, other_seg,
longer_length=longer_length,
tol=tol, tol_deC=tol)
elif isinstance(other_seg, Arc):
t2t1s = other_seg.intersect(self)
return [(t1, t2) for t2, t1 in t2t1s]
elif isinstance(other_seg, Path):
raise TypeError(
"other_seg must be a path segment, not a Path object, use "
"Path.intersect().")
else:
raise TypeError("other_seg must be a path segment.")
def bbox(self):
"""returns the bounding box for the segment in the form
(xmin, xmax, ymin, ymax)."""
return bezier_bounding_box(self)
def split(self, t):
"""returns two segments, whose union is this segment and which join at
self.point(t)."""
bpoints1, bpoints2 = split_bezier(self.bpoints(), t)
return QuadraticBezier(*bpoints1), QuadraticBezier(*bpoints2)
def cropped(self, t0, t1):
"""returns a cropped copy of this segment which starts at
self.point(t0) and ends at self.point(t1)."""
return QuadraticBezier(*crop_bezier(self, t0, t1))
def radialrange(self, origin, return_all_global_extrema=False):
"""returns the tuples (d_min, t_min) and (d_max, t_max) which minimize
and maximize, respectively, the distance d = |self.point(t)-origin|."""
return bezier_radialrange(self, origin,
return_all_global_extrema=return_all_global_extrema)
def rotated(self, degs, origin=None):
"""Returns a copy of self rotated by `degs` degrees (CCW) around the
point `origin` (a complex number). By default `origin` is either
`self.point(0.5)`, or in the case that self is an Arc object,
`origin` defaults to `self.center`."""
return rotate(self, degs, origin=origin)
def translated(self, z0):
"""Returns a copy of self shifted by the complex quantity `z0` such
that self.translated(z0).point(t) = self.point(t) + z0 for any t."""
return translate(self, z0)
def scaled(self, sx, sy=None, origin=0j):
"""Scale transform. See `scale` function for further explanation."""
return scale(self, sx=sx, sy=sy, origin=origin)
class CubicBezier(object):
# For compatibility with old pickle files.
_length_info = {'length': None, 'bpoints': None, 'error': None,
'min_depth': None}
def __init__(self, start, control1, control2, end):
self.start = start
self.control1 = control1
self.control2 = control2
self.end = end
# used to know if self._length needs to be updated
self._length_info = {'length': None, 'bpoints': None, 'error': None,
'min_depth': None}
def __repr__(self):
return 'CubicBezier(start=%s, control1=%s, control2=%s, end=%s)' % (
self.start, self.control1, self.control2, self.end)
def __eq__(self, other):
if not isinstance(other, CubicBezier):
return NotImplemented
return self.start == other.start and self.end == other.end \
and self.control1 == other.control1 \
and self.control2 == other.control2
def __ne__(self, other):
if not isinstance(other, CubicBezier):
return NotImplemented
return not self == other
def __getitem__(self, item):
return self.bpoints()[item]
def __len__(self):
return 4
def is_smooth_from(self, previous, warning_on=True):
"""[Warning: The name of this method is somewhat misleading (yet kept
for compatibility with scripts created using svg.path 2.0). This
method is meant only for d string creation and should not be used to
check for kinks. To check a segment for differentiability, use the
joins_smoothly_with() method instead.]"""
if warning_on:
warn(_is_smooth_from_warning)
if isinstance(previous, CubicBezier):
return (self.start == previous.end and
(self.control1 - self.start) == (
previous.end - previous.control2))
else:
return self.control1 == self.start
def joins_smoothly_with(self, previous, wrt_parameterization=False):
"""Checks if this segment joins smoothly with previous segment. By
default, this only checks that this segment starts moving (at t=0) in
the same direction (and from the same positive) as previous stopped
moving (at t=1). To check if the tangent magnitudes also match, set
wrt_parameterization=True."""
if wrt_parameterization:
return self.start == previous.end and np.isclose(
self.derivative(0), previous.derivative(1))
else:
return self.start == previous.end and np.isclose(
self.unit_tangent(0), previous.unit_tangent(1))
def point(self, t):
"""Evaluate the cubic Bezier curve at t using Horner's rule."""
# algebraically equivalent to
# P0*(1-t)**3 + 3*P1*t*(1-t)**2 + 3*P2*(1-t)*t**2 + P3*t**3
# for (P0, P1, P2, P3) = self.bpoints()
return self.start + t*(
3*(self.control1 - self.start) + t*(
3*(self.start + self.control2) - 6*self.control1 + t*(
-self.start + 3*(self.control1 - self.control2) + self.end
)))
def points(self, ts):
"""Faster than running Path.point many times."""
return self.poly(ts)
def length(self, t0=0, t1=1, error=LENGTH_ERROR, min_depth=LENGTH_MIN_DEPTH):
"""Calculate the length of the path up to a certain position"""
if t0 == 0 and t1 == 1:
if self._length_info['bpoints'] == self.bpoints() \
and self._length_info['error'] >= error \
and self._length_info['min_depth'] >= min_depth:
return self._length_info['length']
# using scipy.integrate.quad is quick
if _quad_available:
s = quad(lambda tau: abs(self.derivative(tau)), t0, t1,
epsabs=error, limit=1000)[0]
else:
s = segment_length(self, t0, t1, self.point(t0), self.point(t1),
error, min_depth, 0)
if t0 == 0 and t1 == 1:
self._length_info['length'] = s
self._length_info['bpoints'] = self.bpoints()
self._length_info['error'] = error
self._length_info['min_depth'] = min_depth
return self._length_info['length']
else:
return s
def ilength(self, s, s_tol=ILENGTH_S_TOL, maxits=ILENGTH_MAXITS,
error=ILENGTH_ERROR, min_depth=ILENGTH_MIN_DEPTH):
"""Returns a float, t, such that self.length(0, t) is approximately s.
See the inv_arclength() docstring for more details."""
return inv_arclength(self, s, s_tol=s_tol, maxits=maxits, error=error,
min_depth=min_depth)
def bpoints(self):
"""returns the Bezier control points of the segment."""
return self.start, self.control1, self.control2, self.end
def poly(self, return_coeffs=False):
"""Returns a the cubic as a Polynomial object."""
p = self.bpoints()
coeffs = (-p[0] + 3*(p[1] - p[2]) + p[3],
3*(p[0] - 2*p[1] + p[2]),
3*(-p[0] + p[1]),
p[0])
if return_coeffs:
return coeffs
else:
return np.poly1d(coeffs)
def derivative(self, t, n=1):
"""returns the nth derivative of the segment at t.
Note: Bezier curves can have points where their derivative vanishes.
If you are interested in the tangent direction, use the unit_tangent()
method instead."""
p = self.bpoints()
if n == 1:
return 3*(p[1] - p[0])*(1 - t)**2 + 6*(p[2] - p[1])*(1 - t)*t + 3*(
p[3] - p[2])*t**2
elif n == 2:
return 6*(
(1 - t)*(p[2] - 2*p[1] + p[0]) + t*(p[3] - 2*p[2] + p[1]))
elif n == 3:
return 6*(p[3] - 3*(p[2] - p[1]) - p[0])
elif n > 3:
return 0
else:
raise ValueError("n should be a positive integer.")
def unit_tangent(self, t):
"""returns the unit tangent vector of the segment at t (centered at
the origin and expressed as a complex number). If the tangent
vector's magnitude is zero, this method will find the limit of
self.derivative(tau)/abs(self.derivative(tau)) as tau approaches t."""
return bezier_unit_tangent(self, t)
def normal(self, t):
"""returns the (right hand rule) unit normal vector to self at t."""
return -1j * self.unit_tangent(t)
def curvature(self, t):
"""returns the curvature of the segment at t."""
return segment_curvature(self, t)
# def icurvature(self, kappa):
# """returns a list of t-values such that 0 <= t<= 1 and
# seg.curvature(t) = kappa."""
# z = self.poly()
# x, y = real(z), imag(z)
# dx, dy = x.deriv(), y.deriv()
# ddx, ddy = dx.deriv(), dy.deriv()
#
# p = kappa**2*(dx**2 + dy**2)**3 - (dx*ddy - ddx*dy)**2
# return polyroots01(p)
def reversed(self):
"""returns a copy of the CubicBezier object with its orientation
reversed."""
new_cub = CubicBezier(self.end, self.control2, self.control1,
self.start)
if self._length_info['length']:
new_cub._length_info = self._length_info
new_cub._length_info['bpoints'] = (
self.end, self.control2, self.control1, self.start)
return new_cub
def intersect(self, other_seg, tol=1e-12):
"""Finds the intersections of two segments.
returns a list of tuples (t1, t2) such that
self.point(t1) == other_seg.point(t2).
Note: This will fail if the two segments coincide for more than a
finite collection of points."""
if isinstance(other_seg, Line):
return bezier_by_line_intersections(self, other_seg)
elif (isinstance(other_seg, QuadraticBezier) or
isinstance(other_seg, CubicBezier)):
assert self != other_seg
longer_length = max(self.length(), other_seg.length())
return bezier_intersections(self, other_seg,
longer_length=longer_length,
tol=tol, tol_deC=tol)
elif isinstance(other_seg, Arc):
t2t1s = other_seg.intersect(self)
return [(t1, t2) for t2, t1 in t2t1s]
elif isinstance(other_seg, Path):
raise TypeError(
"other_seg must be a path segment, not a Path object, use "
"Path.intersect().")
else:
raise TypeError("other_seg must be a path segment.")
def bbox(self):
"""returns the bounding box for the segment in the form
(xmin, xmax, ymin, ymax)."""
return bezier_bounding_box(self)
def split(self, t):
"""returns two segments, whose union is this segment and which join at
self.point(t)."""
bpoints1, bpoints2 = split_bezier(self.bpoints(), t)
return CubicBezier(*bpoints1), CubicBezier(*bpoints2)
def cropped(self, t0, t1):
"""returns a cropped copy of this segment which starts at
self.point(t0) and ends at self.point(t1)."""
return CubicBezier(*crop_bezier(self, t0, t1))
def radialrange(self, origin, return_all_global_extrema=False):
"""returns the tuples (d_min, t_min) and (d_max, t_max) which minimize
and maximize, respectively, the distance d = |self.point(t)-origin|."""
return bezier_radialrange(self, origin,
return_all_global_extrema=return_all_global_extrema)
def rotated(self, degs, origin=None):
"""Returns a copy of self rotated by `degs` degrees (CCW) around the
point `origin` (a complex number). By default `origin` is either
`self.point(0.5)`, or in the case that self is an Arc object,
`origin` defaults to `self.center`."""
return rotate(self, degs, origin=origin)
def translated(self, z0):
"""Returns a copy of self shifted by the complex quantity `z0` such
that self.translated(z0).point(t) = self.point(t) + z0 for any t."""
return translate(self, z0)
def scaled(self, sx, sy=None, origin=0j):
"""Scale transform. See `scale` function for further explanation."""
return scale(self, sx=sx, sy=sy, origin=origin)
class Arc(object):
def __init__(self, start, radius, rotation, large_arc, sweep, end,
autoscale_radius=True):
"""
This should be thought of as a part of an ellipse connecting two
points on that ellipse, start and end.
Parameters
----------
start : complex
The start point of the curve. Note: `start` and `end` cannot be the
same. To make a full ellipse or circle, use two `Arc` objects.
radius : complex
rx + 1j*ry, where rx and ry are the radii of the ellipse (also
known as its semi-major and semi-minor axes, or vice-versa or if
rx < ry).
Note: If rx = 0 or ry = 0 then this arc is treated as a
straight line segment joining the endpoints.
Note: If rx or ry has a negative sign, the sign is dropped; the
absolute value is used instead.
Note: If no such ellipse exists, the radius will be scaled so
that one does (unless autoscale_radius is set to False).
rotation : float
This is the CCW angle (in degrees) from the positive x-axis of the
current coordinate system to the x-axis of the ellipse.
large_arc : bool
Given two points on an ellipse, there are two elliptical arcs
connecting those points, the first going the short way around the
ellipse, and the second going the long way around the ellipse. If
`large_arc == False`, the shorter elliptical arc will be used. If
`large_arc == True`, then longer elliptical will be used.
In other words, `large_arc` should be 0 for arcs spanning less than
or equal to 180 degrees and 1 for arcs spanning greater than 180
degrees.
sweep : bool
For any acceptable parameters `start`, `end`, `rotation`, and
`radius`, there are two ellipses with the given major and minor
axes (radii) which connect `start` and `end`. One which connects
them in a CCW fashion and one which connected them in a CW
fashion. If `sweep == True`, the CCW ellipse will be used. If
`sweep == False`, the CW ellipse will be used. See note on curve
orientation below.
end : complex
The end point of the curve. Note: `start` and `end` cannot be the
same. To make a full ellipse or circle, use two `Arc` objects.
autoscale_radius : bool
If `autoscale_radius == True`, then will also scale `self.radius`
in the case that no ellipse exists with the input parameters
(see inline comments for further explanation).
Derived Parameters/Attributes
-----------------------------
self.theta : float
This is the phase (in degrees) of self.u1transform(self.start).
It is $\theta_1$ in the official documentation and ranges from
-180 to 180.
self.delta : float
This is the angular distance (in degrees) between the start and
end of the arc after the arc has been sent to the unit circle
through self.u1transform().
It is $\Delta\theta$ in the official documentation and ranges from
-360 to 360; being positive when the arc travels CCW and negative
otherwise (i.e. is positive/negative when sweep == True/False).
self.center : complex
This is the center of the arc's ellipse.
self.phi : float
The arc's rotation in radians, i.e. `radians(self.rotation)`.
self.rot_matrix : complex
Equal to `exp(1j * self.phi)` which is also equal to
`cos(self.phi) + 1j*sin(self.phi)`.
Note on curve orientation (CW vs CCW)
-------------------------------------
The notions of clockwise (CW) and counter-clockwise (CCW) are reversed
in some sense when viewing SVGs (as the y coordinate starts at the top
of the image and increases towards the bottom).
"""
assert start != end
assert radius.real != 0 and radius.imag != 0
self.start = start
self.radius = abs(radius.real) + 1j*abs(radius.imag)
self.rotation = rotation
self.large_arc = bool(large_arc)
self.sweep = bool(sweep)
self.end = end
self.autoscale_radius = autoscale_radius
self.segment_length_hash = None
self.segment_length = None
# Convenience parameters
self.phi = radians(self.rotation)
self.rot_matrix = exp(1j*self.phi)
# Derive derived parameters
self._parameterize()
def __hash__(self):
return hash((self.start, self.radius, self.rotation, self.large_arc, self.sweep, self.end))
def __repr__(self):
params = (self.start, self.radius, self.rotation,
self.large_arc, self.sweep, self.end)
return ("Arc(start={}, radius={}, rotation={}, "
"large_arc={}, sweep={}, end={})".format(*params))
def __eq__(self, other):
if not isinstance(other, Arc):
return NotImplemented
return self.start == other.start and self.end == other.end \
and self.radius == other.radius \
and self.rotation == other.rotation \
and self.large_arc == other.large_arc and self.sweep == other.sweep
def __ne__(self, other):
if not isinstance(other, Arc):
return NotImplemented
return not self == other
def _parameterize(self):
# See http://www.w3.org/TR/SVG/implnote.html#ArcImplementationNotes
# my notation roughly follows theirs
rx = self.radius.real
ry = self.radius.imag
rx_sqd = rx*rx
ry_sqd = ry*ry
# Transform z-> z' = x' + 1j*y'
# = self.rot_matrix**(-1)*(z - (end+start)/2)
# coordinates. This translates the ellipse so that the midpoint
# between self.end and self.start lies on the origin and rotates
# the ellipse so that the its axes align with the xy-coordinate axes.
# Note: This sends self.end to -self.start
zp1 = (1/self.rot_matrix)*(self.start - self.end)/2
x1p, y1p = zp1.real, zp1.imag
x1p_sqd = x1p*x1p
y1p_sqd = y1p*y1p
# Correct out of range radii
# Note: an ellipse going through start and end with radius and phi
# exists if and only if radius_check is true
radius_check = (x1p_sqd/rx_sqd) + (y1p_sqd/ry_sqd)
if radius_check > 1:
if self.autoscale_radius:
rx *= sqrt(radius_check)
ry *= sqrt(radius_check)
self.radius = rx + 1j*ry
rx_sqd = rx*rx
ry_sqd = ry*ry
else:
raise ValueError("No such elliptic arc exists.")
# Compute c'=(c_x', c_y'), the center of the ellipse in (x', y') coords
# Noting that, in our new coord system, (x_2', y_2') = (-x_1', -x_2')
# and our ellipse is cut out by of the plane by the algebraic equation
# (x'-c_x')**2 / r_x**2 + (y'-c_y')**2 / r_y**2 = 1,
# we can find c' by solving the system of two quadratics given by
# plugging our transformed endpoints (x_1', y_1') and (x_2', y_2')
tmp = rx_sqd*y1p_sqd + ry_sqd*x1p_sqd
radicand = (rx_sqd*ry_sqd - tmp) / tmp
radical = 0 if
|
np.isclose(radicand, 0)
|
numpy.isclose
|
from stable_baselines import DQN
# from stable_baselines.common.evaluation import evaluate_policy
import gym
from gym import spaces
import numpy as np
from stable_baselines.common.vec_env import DummyVecEnv, VecNormalize
from stable_baselines.sac.policies import MlpPolicy
from stable_baselines import PPO2, SAC
import hopper_rep
import os
import gym
import os
import numpy as np
import matplotlib.pyplot as plt
from stable_baselines.common import set_global_seeds
from stable_baselines.bench import Monitor
from stable_baselines.results_plotter import load_results, ts2xy
import json
seed = 500
set_global_seeds(seed)
class RandomWalkEnv(gym.Env):
def __init__(self,total_states = '100'):
self.past_actions = []
# print("Total states in this environment is: {}".format(total_states))
self.metadata={'render.modes': ['human']}
self.states = np.arange(total_states)
self.total_states = total_states
self.reset()
self.action_space = spaces.Discrete(2)
self.observation_space = spaces.Box(low=np.array([-1.0]),high=np.array([1.0]))
self.cum_rewards = 0
self.observation_dim=1
self.action_dim = 2
def get_int_to_state(self,state):
normalized_state = state/float(self.total_states)-1
return normalized_state
# def get_q(self):
# for state in range(self.total_states):
# normalized_state = state/float(self.total_states)-1
# normalized_state=np.array(normalized_state).reshape(1,1)
# print("State: {}, Action prob:{}",state,)
def reset(self):
# self.task = self.tasks[np.random.choice(len(self.tasks))]
# print("New task is {}".format(self.task))
# print("reset called")
self.state = 0
self.cum_rewards = 0
normalized_state = self.state/float(self.total_states)-1
return np.array([normalized_state]).reshape(1,)
def step(self,action):
# print("Env state:{}",self.state)
reward = 0
done=False
if(self.state == 0 and action==1):
self.state = self.state
reward = 0
elif(self.state == 0 and action==0):
if(self.total_states==1):
done=True
reward = 1000
self.state = self.state+1
elif (self.state==self.total_states-1 and action==1):
self.state = self.total_states+1
done=True
reward=1000
elif action==0:
self.state=self.state-1
elif action==1:
self.state = self.state+1
# if action==0 and self.state%2==0:
# self.state=self.state-1
# elif action==1 and self.state%2==0:
# self.state=self.state+1
# elif action==0 and self.state%2==1:
# self.state=self.state+1
# elif action==1 and self.state%2==1:
# self.state=self.state-1
# if(self.state<0):
# self.state = 0
# if(self.state>=self.total_states):
# self.state = self.total_states
# reward=1000
# done=True
self.cum_rewards +=reward
normalized_state = self.state/float(self.total_states)-1
if(done):
return np.array(normalized_state).reshape(1,),reward,done,{'episode':{'r':self.cum_rewards}}
return np.array([normalized_state]).reshape(1,),reward,done,{}
results = {'horizon':[],'steps':[],'q_values':[]}
for horizon in range(1):
horizon = 50
best_mean_reward, n_steps = -np.inf, 0
max_timesteps=horizon+1
steps_to_solve = 0
q_horizon= []
test_env = RandomWalkEnv(max_timesteps)
print("Starting to learn task with horizon: {}".format(horizon+1))
def callback(_locals, _globals):
"""
Callback called at each step (for DQN an others) or after n steps (see ACER or PPO2)
:param _locals: (dict)
:param _globals: (dict)
"""
global n_steps, best_mean_reward,max_timesteps,steps_to_solve
# Print stats every 1000 calls
total_reward=0
mean_reward=0
steps_to_solve+=1
if (n_steps + 1) % 1== 0:
for i in range(1):
dones=False
timesteps = 0
obs = test_env.reset()
while not dones:
action, q_values = model.predict(np.array(obs).reshape(1,1))
# print("state: {}, action: {} | {}".format(test_env.state,action,q_values))
obs, rewards, dones, info = test_env.step(action)
total_reward+=rewards
timesteps+=1
if(timesteps==max_timesteps*3):
dones=True
if(dones):
break
mean_reward=total_reward
print(max_timesteps)
print("****************************")
state_q = []
for state in range(test_env.total_states):
normalized_state = state/float(test_env.total_states)-1
normalized_state=np.array(normalized_state).reshape(1,1)
state_q.append(model.predict(
|
np.array(normalized_state)
|
numpy.array
|
#! /usr/bin/env python3
"""Test join."""
# --- import --------------------------------------------------------------------------------------
import posixpath
import numpy as np
import WrightTools as wt
from WrightTools import datasets
# --- test ----------------------------------------------------------------------------------------
def test_wm_w2_w1():
col = wt.Collection()
p = datasets.PyCMDS.wm_w2_w1_000
a = wt.data.from_PyCMDS(p)
p = datasets.PyCMDS.wm_w2_w1_001
b = wt.data.from_PyCMDS(p)
joined = wt.data.join([a, b], parent=col, name="join")
assert posixpath.basename(joined.name) == "join"
assert joined.natural_name == "join"
assert joined.shape == (63, 11, 11)
assert joined.d2.shape == (1, 1, 1)
assert not np.isnan(joined.channels[0][:]).any()
assert joined["w1"].label == "1"
joined.print_tree(verbose=True)
a.close()
b.close()
joined.close()
def test_1D_no_overlap():
a = wt.Data()
b = wt.Data()
a.create_variable("x", np.linspace(0, 10, 11))
b.create_variable("x", np.linspace(11, 21, 11))
a.transform("x")
b.transform("x")
joined = wt.data.join([a, b])
assert joined.shape == (22,)
assert np.allclose(joined.x.points, np.linspace(0, 21, 22))
a.close()
b.close()
joined.close()
def test_1D_overlap_identical():
a = wt.Data()
b = wt.Data()
a.create_variable("x", np.linspace(0, 10, 11))
b.create_variable("x", np.linspace(5, 15, 11))
a.transform("x")
b.transform("x")
joined = wt.data.join([a, b])
assert joined.shape == (16,)
assert np.allclose(joined.x.points, np.linspace(0, 15, 16))
a.close()
b.close()
joined.close()
def test_1D_overlap_offset():
a = wt.Data()
b = wt.Data()
a.create_variable("x", np.linspace(0, 10, 11))
b.create_variable("x", np.linspace(5.5, 15.5, 11))
a.transform("x")
b.transform("x")
joined = wt.data.join([a, b])
assert joined.shape == (22,)
assert np.allclose(
joined.x.points,
np.sort(np.concatenate([np.linspace(0, 10, 11), np.linspace(5.5, 15.5, 11)])),
)
a.close()
b.close()
joined.close()
def test_2D_no_overlap_aligned():
a = wt.Data()
b = wt.Data()
a.create_variable("x", np.linspace(0, 10, 11)[:, None])
a.create_variable("y", np.linspace(0, 10, 11)[None, :])
b.create_variable("x", np.linspace(11, 21, 11)[:, None])
b.create_variable("y", np.linspace(0, 10, 11)[None, :])
a.transform("x", "y")
b.transform("x", "y")
joined = wt.data.join([a, b])
assert joined.shape == (22, 11)
assert np.allclose(joined.x.points, np.linspace(0, 21, 22))
a.close()
b.close()
joined.close()
def test_2D_no_overlap_offset():
a = wt.Data()
b = wt.Data()
a.create_variable("x", np.linspace(0, 10, 11)[:, None])
a.create_variable("y", np.linspace(0, 10, 11)[None, :])
b.create_variable("x", np.linspace(11, 21, 11)[:, None])
b.create_variable("y", np.linspace(0.5, 10.5, 11)[None, :])
a.create_channel("z", np.full(a.shape, 1))
b.create_channel("z", np.full(b.shape, 2))
a.transform("x", "y")
b.transform("x", "y")
joined = wt.data.join([a, b])
assert joined.shape == (22, 22)
assert np.allclose(joined.x.points, np.linspace(0, 21, 22))
assert np.allclose(joined.y.points, np.linspace(0, 10.5, 22))
a.close()
b.close()
joined.close()
def test_2D_overlap_identical():
a = wt.Data()
b = wt.Data()
a.create_variable("x", np.linspace(0, 10, 11)[:, None])
a.create_variable("y", np.linspace(0, 10, 11)[None, :])
b.create_variable("x", np.linspace(5, 15, 11)[:, None])
b.create_variable("y", np.linspace(0, 10, 11)[None, :])
a.create_channel("z", np.full(a.shape, 1))
b.create_channel("z", np.full(b.shape, 2))
a.transform("x", "y")
b.transform("x", "y")
joined = wt.data.join([a, b])
assert joined.shape == (16, 11)
assert np.sum(np.isnan(joined.z)) == 0
a.close()
b.close()
joined.close()
def test_2D_overlap_offset():
a = wt.Data()
b = wt.Data()
a.create_variable("x", np.linspace(0, 10, 11)[:, None])
a.create_variable("y", np.linspace(0, 10, 11)[None, :])
b.create_variable("x", np.linspace(5, 15, 11)[:, None])
b.create_variable("y", np.linspace(0.5, 10.5, 11)[None, :])
a.create_channel("z", np.full(a.shape, 1))
b.create_channel("z", np.full(b.shape, 2))
a.transform("x", "y")
b.transform("x", "y")
joined = wt.data.join([a, b])
assert joined.shape == (16, 22)
a.close()
b.close()
joined.close()
def test_2D_to_3D_overlap():
x1 = np.arange(-2.5, 2.5, 0.5)
x2 = np.arange(1, 10, 1)
x3 = np.arange(5, 25, 2.5)
y = np.linspace(-1, 1, 11)
z = np.arange(1, 10)
ch1 = lambda x, y, z: 5 * (x / 2.5) ** 2 + y ** 2 + (z / 10) ** 2
ch2 = lambda x, y, z: 4 * np.exp(-x / 10) - 3 * y + z
ch3 = lambda x, y, z: 5 + np.cos(x) - np.sin(z + y)
ds = []
for zi in z:
for xi, chi in zip([x1, x2, x3], [ch1, ch2, ch3]):
d = wt.data.Data()
d.create_variable(name="x", values=xi[:, None], units="fs")
d.create_variable(name="y", values=y[None, :], units="nm")
d.create_variable(name="z", values=np.array([[zi]]))
d.create_channel(name="ch", values=chi(xi[:, None], y[None, :], zi), units="nm")
d.transform("x", "y", "z")
ds.append(d)
joined = wt.data.join(ds)
[d.close() for d in ds]
assert not np.any(np.isnan(joined.ch[:]))
assert joined.shape[0] == len(set(list(x1) + list(x2) + list(x3)))
assert joined.shape[1:] == (y.size, z.size)
joined.close()
def test_1D_to_2D_aligned():
a = wt.Data()
b = wt.Data()
c = wt.Data()
a.create_variable("x", np.linspace(0, 10, 11))
a.create_variable("y", np.array([0.0]))
b.create_variable("x", np.linspace(0, 10, 11))
b.create_variable("y", np.array([1.0]))
c.create_variable("x", np.linspace(0, 10, 11))
c.create_variable("y", np.array([2.0]))
a.transform("x", "y")
b.transform("x", "y")
c.transform("x", "y")
joined = wt.data.join([a, b, c])
assert joined.shape == (11, 3)
assert np.allclose(joined.x.points, np.linspace(0, 10, 11))
assert np.allclose(joined.y.points, np.linspace(0, 2, 3))
a.close()
b.close()
c.close()
joined.close()
def test_1D_to_2D_not_aligned():
a = wt.Data()
b = wt.Data()
c = wt.Data()
a.create_variable("x", np.linspace(0, 10, 11))
a.create_variable("y", np.array([0.0]))
b.create_variable("x", np.linspace(0.5, 10.5, 11))
b.create_variable("y", np.array([1.0]))
c.create_variable("x", np.linspace(0, 10, 9))
c.create_variable("y", np.array([2.0]))
a.transform("x", "y")
b.transform("x", "y")
c.transform("x", "y")
joined = wt.data.join([a, b, c])
assert joined.shape == (26, 3)
assert np.allclose(joined.y.points, np.linspace(0, 2, 3))
a.close()
b.close()
c.close()
joined.close()
def test_2D_plus_1D():
a = wt.Data()
b = wt.Data()
a.create_variable("x", np.linspace(0, 10, 11)[:, None])
a.create_variable("y", np.linspace(0, 10, 11)[None, :])
b.create_variable("x", np.linspace(0, 10, 11))
b.create_variable("y", np.array([11.0]))
a.transform("x", "y")
b.transform("x", "y")
joined = wt.data.join([a, b])
assert joined.shape == (11, 12)
assert np.allclose(joined.x.points, np.linspace(0, 10, 11))
assert np.allclose(joined.y.points, np.linspace(0, 11, 12))
joined.close()
joined = wt.data.join([b, a])
assert joined.shape == (11, 12)
assert np.allclose(joined.x.points, np.linspace(0, 10, 11))
assert np.allclose(joined.y.points, np.linspace(0, 11, 12))
a.close()
b.close()
joined.close()
def test_3D_no_overlap_aligned():
a = wt.Data()
b = wt.Data()
a.create_variable("x", np.linspace(0, 10, 11)[:, None, None])
a.create_variable("y", np.linspace(0, 10, 11)[None, :, None])
a.create_variable("z", np.linspace(0, 10, 11)[None, None, :])
b.create_variable("x", np.linspace(11, 21, 11)[:, None, None])
b.create_variable("y", np.linspace(0, 10, 11)[None, :, None])
b.create_variable("z", np.linspace(0, 10, 11)[None, None, :])
a.transform("x", "y", "z")
b.transform("x", "y", "z")
joined = wt.data.join([a, b])
assert joined.shape == (22, 11, 11)
assert np.allclose(joined.x.points, np.linspace(0, 21, 22))
a.close()
b.close()
joined.close()
def test_3D_no_overlap_offset():
a = wt.Data()
b = wt.Data()
a.create_variable("x", np.linspace(0, 10, 11)[:, None, None])
a.create_variable("y", np.linspace(0, 10, 11)[None, :, None])
a.create_variable("z", np.linspace(0, 10, 11)[None, None, :])
b.create_variable("x",
|
np.linspace(0.5, 10.5, 11)
|
numpy.linspace
|
# Copyright 2016-2020 The <NAME> Lab at the California Institute of
# Technology (Caltech), with support from the Paul Allen Family Foundation,
# Google, & National Institutes of Health (NIH) under Grant U24CA224309-01.
# All rights reserved.
#
# Licensed under a modified Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.github.com/vanvalenlab/caliban-toolbox/LICENSE
#
# The Work provided may be used for non-commercial academic purposes only.
# For any other use of the Work, including commercial use, please contact:
# <EMAIL>
#
# Neither the name of Caltech nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import math
import numpy as np
from itertools import product
import xarray as xr
def compute_crop_indices(img_len, crop_size=None, crop_num=None, overlap_frac=0):
"""Determine how to crop the image across one dimension.
Args:
img_len: length of the image for given dimension
crop_size: size in pixels of the crop in given dimension; must be specified if
crop_num not provided
crop_num: number of crops in the given dimension; must be specified if
crop_size not provided
overlap_frac: fraction that adjacent crops will overlap each other on each side
Returns:
numpy.array: coordinates for where each crop will start in given dimension
numpy.array: coordinates for where each crop will end in given dimension
int: number of pixels of padding at start and end of image in given dimension
"""
# compute indices based on fixed number of pixels per crop
if crop_size is not None:
# compute overlap fraction in pixels
overlap_pix = math.floor(crop_size * overlap_frac)
# compute indices based on fixed number of crops
elif crop_num is not None:
# number of pixels in non-overlapping portion of crop
non_overlap_crop_size = np.ceil(img_len / crop_num).astype('int')
# Technically this is the fraction the non-overlap, rather than fraction of the whole,
# but we're going to visually crop overlays anyway to make sure value is appropriate
overlap_pix = math.floor(non_overlap_crop_size * overlap_frac)
# total crop size
crop_size = non_overlap_crop_size + overlap_pix
# the crops start at pixel 0, and are spaced crop_size - overlap_pix away from each other
start_indices = np.arange(0, img_len - overlap_pix, crop_size - overlap_pix)
# the crops each end crop_size away the start
end_indices = start_indices + crop_size
# the padding for the final image is the amount that the last crop goes beyond the image size
padding = end_indices[-1] - img_len
return start_indices, end_indices, padding
def crop_helper(input_data, row_starts, row_ends, col_starts, col_ends, padding):
"""Crops an image into pieces according to supplied coordinates
Args:
input_data: xarray of either X or y data to be cropped
row_starts: list of indices where row crops start
row_ends: list of indices where row crops end
col_starts: list of indices where col crops start
col_ends: list of indices where col crops end
padding: tuple which specifies the amount of padding on the final image
Returns:
numpy.array: 7D tensor of cropped images
tuple: shape of the final padded image
"""
# determine key parameters of crop
fov_len, stack_len, input_crop_num, slice_num, _, _, channel_len = input_data.shape
if input_crop_num > 1:
raise ValueError("Array has already been cropped")
# get name of last dimension from input data to determine if X or y
last_dim_name = input_data.dims[-1]
crop_num = len(row_starts) * len(col_starts)
crop_size_row = row_ends[0] - row_starts[0]
crop_size_col = col_ends[0] - col_starts[0]
# create xarray to hold crops
cropped_stack = np.zeros((fov_len, stack_len, crop_num, slice_num,
crop_size_row, crop_size_col, channel_len), dtype=input_data.dtype)
# labels for each index within a dimension
coordinate_labels = [input_data.fovs, input_data.stacks, range(crop_num), input_data.slices,
range(crop_size_row), range(crop_size_col), input_data[last_dim_name]]
# labels for each dimension
cropped_xr = xr.DataArray(data=cropped_stack, coords=coordinate_labels, dims=input_data.dims)
# pad the input to account for imperfectly overlapping final crop in rows and cols
formatted_padding = ((0, 0), (0, 0), (0, 0), (0, 0), (0, padding[0]), (0, padding[1]), (0, 0))
padded_input = np.pad(input_data, formatted_padding, mode='constant', constant_values=0)
# loop through rows and cols to generate crops
crop_counter = 0
for i in range(len(row_starts)):
for j in range(len(col_starts)):
cropped_xr[:, :, crop_counter, ...] = padded_input[:, :, 0, :,
row_starts[i]:row_ends[i],
col_starts[j]:col_ends[j], :]
crop_counter += 1
return cropped_xr, padded_input.shape
def stitch_crops(crop_stack, log_data):
"""Takes a stack of annotated labels and stitches them together into a single image
Args:
crop_stack: 7D tensor of labels to be stitched together
log_data: dictionary of parameters for reconstructing original image data
Returns:
numpy.array: 7D tensor of reconstructed labels
"""
# Initialize image with single dimension for channels
fov_len, stack_len, _, _, row_size, col_size, _ = log_data['original_shape']
row_padding, col_padding = log_data.get('row_padding', 0), log_data.get('col_padding', 0)
stitched_labels = np.zeros((fov_len, stack_len, 1, 1, row_size + row_padding,
col_size + col_padding, 1), dtype=crop_stack.dtype)
row_starts, row_ends = log_data['row_starts'], log_data['row_ends']
col_starts, col_ends = log_data['col_starts'], log_data['col_ends']
if crop_stack.shape[3] != 1:
raise ValueError('Stacks must be combined before stitching can occur')
# for each fov and stack, loop through rows and columns of crop positions
for fov, stack, row, col in product(range(fov_len), range(stack_len),
range(len(row_starts)), range(len(col_starts))):
# determine what crop # we're currently working on
crop_counter = row * len(row_starts) + col
# get current crop
crop = crop_stack[fov, stack, crop_counter, 0, :, :, 0]
# increment values to ensure unique labels across final image
lowest_allowed_val = np.amax(stitched_labels[fov, stack, ...])
crop = np.where(crop == 0, crop, crop + lowest_allowed_val)
# get ids of cells in current crop
potential_overlap_cells = np.unique(crop)
potential_overlap_cells = \
potential_overlap_cells[
|
np.nonzero(potential_overlap_cells)
|
numpy.nonzero
|
import torch
import torch.cuda
import torch.nn.functional as F
from typing import Optional, Union, List
from dataclasses import dataclass
import numpy as np
import cv2
from scipy.spatial.transform import Rotation
from scipy.interpolate import CubicSpline
from matplotlib import pyplot as plt
from warnings import warn
@dataclass
class Rays:
origins: Union[torch.Tensor, List[torch.Tensor]]
dirs: Union[torch.Tensor, List[torch.Tensor]]
gt: Union[torch.Tensor, List[torch.Tensor]]
def to(self, *args, **kwargs):
origins = self.origins.to(*args, **kwargs)
dirs = self.dirs.to(*args, **kwargs)
gt = self.gt.to(*args, **kwargs)
return Rays(origins, dirs, gt)
def __getitem__(self, key):
origins = self.origins[key]
dirs = self.dirs[key]
gt = self.gt[key]
return Rays(origins, dirs, gt)
def __len__(self):
return self.origins.size(0)
@dataclass
class Intrin:
fx: Union[float, torch.Tensor]
fy: Union[float, torch.Tensor]
cx: Union[float, torch.Tensor]
cy: Union[float, torch.Tensor]
def scale(self, scaling: float):
return Intrin(
self.fx * scaling, self.fy * scaling, self.cx * scaling, self.cy * scaling
)
def get(self, field: str, image_id: int = 0):
val = self.__dict__[field]
return val if isinstance(val, float) else val[image_id].item()
class Timing:
"""
Timing environment
usage:
with Timing("message"):
your commands here
will print CUDA runtime in ms
"""
def __init__(self, name):
self.name = name
def __enter__(self):
self.start = torch.cuda.Event(enable_timing=True)
self.end = torch.cuda.Event(enable_timing=True)
self.start.record()
def __exit__(self, type, value, traceback):
self.end.record()
torch.cuda.synchronize()
print(self.name, "elapsed", self.start.elapsed_time(self.end), "ms")
def get_expon_lr_func(
lr_init, lr_final, lr_delay_steps=0, lr_delay_mult=1.0, max_steps=1000000
):
"""
Continuous learning rate decay function. Adapted from JaxNeRF
The returned rate is lr_init when step=0 and lr_final when step=max_steps, and
is log-linearly interpolated elsewhere (equivalent to exponential decay).
If lr_delay_steps>0 then the learning rate will be scaled by some smooth
function of lr_delay_mult, such that the initial learning rate is
lr_init*lr_delay_mult at the beginning of optimization but will be eased back
to the normal learning rate when steps>lr_delay_steps.
:param conf: config subtree 'lr' or similar
:param max_steps: int, the number of steps during optimization.
:return HoF which takes step as input
"""
def helper(step):
if step < 0 or (lr_init == 0.0 and lr_final == 0.0):
# Disable this parameter
return 0.0
if lr_delay_steps > 0:
# A kind of reverse cosine decay.
delay_rate = lr_delay_mult + (1 - lr_delay_mult) * np.sin(
0.5 * np.pi * np.clip(step / lr_delay_steps, 0, 1)
)
else:
delay_rate = 1.0
t = np.clip(step / max_steps, 0, 1)
log_lerp = np.exp(np.log(lr_init) * (1 - t) + np.log(lr_final) * t)
return delay_rate * log_lerp
return helper
def viridis_cmap(gray: np.ndarray):
"""
Visualize a single-channel image using matplotlib's viridis color map
yellow is high value, blue is low
:param gray: np.ndarray, (H, W) or (H, W, 1) unscaled
:return: (H, W, 3) float32 in [0, 1]
"""
colored = plt.cm.viridis(plt.Normalize()(gray.squeeze()))[..., :-1]
return colored.astype(np.float32)
def save_img(img: np.ndarray, path: str):
"""Save an image to disk. Image should have values in [0,1]."""
img = np.array((np.clip(img, 0.0, 1.0) * 255.0).astype(np.uint8))
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
cv2.imwrite(path, img)
def equirect2xyz(uv, rows, cols):
"""
Convert equirectangular coordinate to unit vector,
inverse of xyz2equirect
Taken from <NAME>
Args:
uv: np.ndarray [..., 2] x, y coordinates in image space in [-1.0, 1.0]
Returns:
xyz: np.ndarray [..., 3] unit vectors
"""
lon = (uv[..., 0] * (1.0 / cols) - 0.5) * (2 * np.pi)
lat = -(uv[..., 1] * (1.0 / rows) - 0.5) * np.pi
coslat = np.cos(lat)
return np.stack(
[
coslat * np.sin(lon),
np.sin(lat),
coslat * np.cos(lon),
],
axis=-1,
)
def xyz2equirect(bearings, rows, cols):
"""
Convert ray direction vectors into equirectangular pixel coordinates.
Inverse of equirect2xyz.
Taken from <NAME>
"""
lat = np.arcsin(bearings[..., 1])
lon = np.arctan2(bearings[..., 0], bearings[..., 2])
x = cols * (0.5 + lon / 2 / np.pi)
y = rows * (0.5 - lat / np.pi)
return np.stack([x, y], axis=-1)
def generate_dirs_equirect(w, h):
x, y = np.meshgrid( # pylint: disable=unbalanced-tuple-unpacking
np.arange(w, dtype=np.float32) + 0.5, # X-Axis (columns)
np.arange(h, dtype=np.float32) + 0.5, # Y-Axis (rows)
indexing="xy",
)
uv = np.stack([x * (2.0 / w) - 1.0, y * (2.0 / h) - 1.0], axis=-1)
camera_dirs = equirect2xyz(uv)
return camera_dirs
# Data
def select_or_shuffle_rays(
rays_init: Rays,
permutation: int = False,
epoch_size: Optional[int] = None,
device: Union[str, torch.device] = "cpu",
):
n_rays = rays_init.origins.size(0)
n_samp = n_rays if (epoch_size is None) else epoch_size
if permutation:
print(" Shuffling rays")
indexer = torch.randperm(n_rays, device="cpu")[:n_samp]
else:
print(" Selecting random rays")
indexer = torch.randint(n_rays, (n_samp,), device="cpu")
return rays_init[indexer].to(device=device)
def compute_ssim(
img0,
img1,
max_val=1.0,
filter_size=11,
filter_sigma=1.5,
k1=0.01,
k2=0.03,
return_map=False,
):
"""Computes SSIM from two images.
This function was modeled after tf.image.ssim, and should produce comparable
output.
Args:
img0: torch.tensor. An image of size [..., width, height, num_channels].
img1: torch.tensor. An image of size [..., width, height, num_channels].
max_val: float > 0. The maximum magnitude that `img0` or `img1` can have.
filter_size: int >= 1. Window size.
filter_sigma: float > 0. The bandwidth of the Gaussian used for filtering.
k1: float > 0. One of the SSIM dampening parameters.
k2: float > 0. One of the SSIM dampening parameters.
return_map: Bool. If True, will cause the per-pixel SSIM "map" to returned
Returns:
Each image's mean SSIM, or a tensor of individual values if `return_map`.
"""
device = img0.device
ori_shape = img0.size()
width, height, num_channels = ori_shape[-3:]
img0 = img0.view(-1, width, height, num_channels).permute(0, 3, 1, 2)
img1 = img1.view(-1, width, height, num_channels).permute(0, 3, 1, 2)
batch_size = img0.shape[0]
# Construct a 1D Gaussian blur filter.
hw = filter_size // 2
shift = (2 * hw - filter_size + 1) / 2
f_i = ((torch.arange(filter_size, device=device) - hw + shift) / filter_sigma) ** 2
filt = torch.exp(-0.5 * f_i)
filt /= torch.sum(filt)
# Blur in x and y (faster than the 2D convolution).
# z is a tensor of size [B, H, W, C]
filt_fn1 = lambda z: F.conv2d(
z,
filt.view(1, 1, -1, 1).repeat(num_channels, 1, 1, 1),
padding=[hw, 0],
groups=num_channels,
)
filt_fn2 = lambda z: F.conv2d(
z,
filt.view(1, 1, 1, -1).repeat(num_channels, 1, 1, 1),
padding=[0, hw],
groups=num_channels,
)
# Vmap the blurs to the tensor size, and then compose them.
filt_fn = lambda z: filt_fn1(filt_fn2(z))
mu0 = filt_fn(img0)
mu1 = filt_fn(img1)
mu00 = mu0 * mu0
mu11 = mu1 * mu1
mu01 = mu0 * mu1
sigma00 = filt_fn(img0 ** 2) - mu00
sigma11 = filt_fn(img1 ** 2) - mu11
sigma01 = filt_fn(img0 * img1) - mu01
# Clip the variances and covariances to valid values.
# Variance must be non-negative:
sigma00 = torch.clamp(sigma00, min=0.0)
sigma11 = torch.clamp(sigma11, min=0.0)
sigma01 = torch.sign(sigma01) * torch.min(
torch.sqrt(sigma00 * sigma11), torch.abs(sigma01)
)
c1 = (k1 * max_val) ** 2
c2 = (k2 * max_val) ** 2
numer = (2 * mu01 + c1) * (2 * sigma01 + c2)
denom = (mu00 + mu11 + c1) * (sigma00 + sigma11 + c2)
ssim_map = numer / denom
ssim = torch.mean(ssim_map.reshape([-1, num_channels * width * height]), dim=-1)
return ssim_map if return_map else ssim
def generate_rays(w, h, focal, camtoworlds, equirect=False):
"""
Generate perspective camera rays. Principal point is at center.
Args:
w: int image width
h: int image heigth
focal: float real focal length
camtoworlds: jnp.ndarray [B, 4, 4] c2w homogeneous poses
equirect: if true, generates spherical rays instead of pinhole
Returns:
rays: Rays a namedtuple(origins [B, 3], directions [B, 3], viewdirs [B, 3])
"""
x, y = np.meshgrid( # pylint: disable=unbalanced-tuple-unpacking
np.arange(w, dtype=np.float32), # X-Axis (columns)
np.arange(h, dtype=np.float32), # Y-Axis (rows)
indexing="xy",
)
if equirect:
uv = np.stack([x * (2.0 / w) - 1.0, y * (2.0 / h) - 1.0], axis=-1)
camera_dirs = equirect2xyz(uv)
else:
camera_dirs = np.stack(
[
(x - w * 0.5) / focal,
-(y - h * 0.5) / focal,
-np.ones_like(x),
],
axis=-1,
)
# camera_dirs = camera_dirs / np.linalg.norm(camera_dirs, axis=-1, keepdims=True)
c2w = camtoworlds[:, None, None, :3, :3]
camera_dirs = camera_dirs[None, Ellipsis, None]
directions = np.matmul(c2w, camera_dirs)[Ellipsis, 0]
origins = np.broadcast_to(camtoworlds[:, None, None, :3, -1], directions.shape)
norms = np.linalg.norm(directions, axis=-1, keepdims=True)
viewdirs = directions / norms
rays = Rays(origins=origins, directions=directions, viewdirs=viewdirs)
return rays
def similarity_from_cameras(c2w):
"""
Get a similarity transform to normalize dataset
from c2w (OpenCV convention) cameras
:param c2w: (N, 4)
:return T (4,4) , scale (float)
"""
t = c2w[:, :3, 3]
R = c2w[:, :3, :3]
# (1) Rotate the world so that z+ is the up axis
# we estimate the up axis by averaging the camera up axes
ups = np.sum(R * np.array([0, -1.0, 0]), axis=-1)
world_up = np.mean(ups, axis=0)
world_up /= np.linalg.norm(world_up)
up_camspace = np.array([0.0, -1.0, 0.0])
c = (up_camspace * world_up).sum()
cross = np.cross(world_up, up_camspace)
skew = np.array(
[
[0.0, -cross[2], cross[1]],
[cross[2], 0.0, -cross[0]],
[-cross[1], cross[0], 0.0],
]
)
if c > -1:
R_align = np.eye(3) + skew + (skew @ skew) * 1 / (1 + c)
else:
# In the unlikely case the original data has y+ up axis,
# rotate 180-deg about x axis
R_align = np.array([[-1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
# R_align = np.eye(3) # DEBUG
R = R_align @ R
fwds = np.sum(R * np.array([0, 0.0, 1.0]), axis=-1)
t = (R_align @ t[..., None])[..., 0]
# (2) Recenter the scene using camera center rays
# find the closest point to the origin for each camera's center ray
nearest = t + (fwds * -t).sum(-1)[:, None] * fwds
# median for more robustness
translate = -np.median(nearest, axis=0)
# translate = -np.mean(t, axis=0) # DEBUG
transform = np.eye(4)
transform[:3, 3] = translate
transform[:3, :3] = R_align
# (3) Rescale the scene using camera distances
scale = 1.0 / np.median(np.linalg.norm(t + translate, axis=-1))
return transform, scale
def jiggle_and_interp_poses(poses: torch.Tensor, n_inter: int, noise_std: float = 0.0):
"""
For generating a novel trajectory close to known trajectory
:param poses: torch.Tensor (B, 4, 4)
:param n_inter: int, number of views to interpolate in total
:param noise_std: float, default 0
"""
n_views_in = poses.size(0)
poses_np = poses.cpu().numpy().copy()
rot = Rotation.from_matrix(poses_np[:, :3, :3])
trans = poses_np[:, :3, 3]
trans += np.random.randn(*trans.shape) * noise_std
pose_quat = rot.as_quat()
t_in = np.arange(n_views_in, dtype=np.float32)
t_out = np.linspace(t_in[0], t_in[-1], n_inter, dtype=np.float32)
q_new = CubicSpline(t_in, pose_quat)
q_new: np.ndarray = q_new(t_out)
q_new = q_new / np.linalg.norm(q_new, axis=-1)[..., None]
t_new = CubicSpline(t_in, trans)
t_new = t_new(t_out)
rot_new = Rotation.from_quat(q_new)
R_new = rot_new.as_matrix()
Rt_new = np.concatenate([R_new, t_new[..., None]], axis=-1)
bottom = np.array([[0.0, 0.0, 0.0, 1.0]], dtype=np.float32)
bottom = bottom[None].repeat(Rt_new.shape[0], 0)
Rt_new = np.concatenate([Rt_new, bottom], axis=-2)
Rt_new = torch.from_numpy(Rt_new).to(device=poses.device, dtype=poses.dtype)
return Rt_new
# Rather ugly pose generation code, derived from NeRF
def _trans_t(t):
return np.array(
[
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, t],
[0, 0, 0, 1],
],
dtype=np.float32,
)
def _rot_phi(phi):
return np.array(
[
[1, 0, 0, 0],
[0, np.cos(phi), -np.sin(phi), 0],
[0, np.sin(phi),
|
np.cos(phi)
|
numpy.cos
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from .base import BaseDetector
from .test_mixins_3d import RPNTestMixin, BBoxTestMixin, MaskTestMixin
from .. import builder
from ..registry import DETECTORS
from mmdet.core import bbox2roi3D, bbox2result3D, build_assigner, build_sampler, tensor2img3D, multiclass_nms_3d
import mmcv
import math
import cv2
import matplotlib.pyplot as plt
import matplotlib.patches as pts
import numpy as np
import os.path
from os import path
@DETECTORS.register_module
class TwoStageDetector3D2ScalesHeadsRefinementHead(BaseDetector, RPNTestMixin, BBoxTestMixin,
MaskTestMixin):
def __init__(self,
backbone,
neck=None,
shared_head=None,
rpn_head=None,
rpn_head_2=None,
bbox_roi_extractor=None,
bbox_head=None,
refinement_head=None,
mask_roi_extractor=None,
mask_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None):
super(TwoStageDetector3D2ScalesHeadsRefinementHead, self).__init__()
# for debugging....
self.iteration = 1
self.iterations = []
self.rpn_cls_losses = []
self.rpn_bbox_reg_losses = []
self.total_losses = []
self.backbone = builder.build_backbone(backbone)
# self.backbone_2 = builder.build_backbone(backbone_2)
if neck is not None:
self.neck = builder.build_neck(neck)
if shared_head is not None:
self.shared_head = builder.build_shared_head(shared_head)
if rpn_head is not None:
self.rpn_head = builder.build_head(rpn_head)
if rpn_head_2 is not None:
self.rpn_head_2 = builder.build_head(rpn_head_2)
if bbox_head is not None:
self.bbox_roi_extractor = builder.build_roi_extractor(
bbox_roi_extractor)
self.bbox_roi_extractor_2 = builder.build_roi_extractor(
bbox_roi_extractor)
self.bbox_roi_extractor_refinement = builder.build_roi_extractor(
bbox_roi_extractor)
self.bbox_head = builder.build_head(bbox_head)
self.bbox_head_2 = builder.build_head(bbox_head)
if refinement_head is not None:
self.refinement_head = builder.build_head(refinement_head)
# comment out to disable segmentation head
self.mask_head = None; mask_head = None
if mask_head is not None:
if mask_roi_extractor is not None:
self.mask_roi_extractor = builder.build_roi_extractor(
mask_roi_extractor)
# enable second mask head
self.mask_roi_extractor_2 = builder.build_roi_extractor(
mask_roi_extractor)
self.mask_roi_extractor_refinement = builder.build_roi_extractor(
mask_roi_extractor)
self.share_roi_extractor = False
else:
breakpoint()
self.share_roi_extractor = True
self.mask_roi_extractor = self.bbox_roi_extractor
self.mask_head = builder.build_head(mask_head)
# enable second mask head
self.mask_head_2 = builder.build_head(mask_head)
self.mask_head_refinement = builder.build_head(mask_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.init_weights(pretrained=pretrained)
@property
def with_rpn(self):
return hasattr(self, 'rpn_head') and self.rpn_head is not None
def init_weights(self, pretrained=None):
super(TwoStageDetector3D2ScalesHeadsRefinementHead, self).init_weights(pretrained)
self.backbone.init_weights(pretrained=pretrained)
# self.backbone_2.init_weights(pretrained=pretrained)
if self.with_neck:
if isinstance(self.neck, nn.Sequential):
for m in self.neck:
m.init_weights()
else:
self.neck.init_weights()
if self.with_shared_head:
self.shared_head.init_weights(pretrained=pretrained)
if self.with_rpn:
self.rpn_head.init_weights()
self.rpn_head_2.init_weights()
if self.with_bbox:
self.bbox_roi_extractor.init_weights()
self.bbox_roi_extractor_2.init_weights()
self.bbox_roi_extractor_refinement.init_weights()
self.bbox_head.init_weights()
self.bbox_head_2.init_weights()
if self.refinement_head:
self.refinement_head.init_weights()
if self.with_mask:
self.mask_head.init_weights()
# enable second mask head
self.mask_head_2.init_weights()
self.mask_head_refinement.init_weights()
if not self.share_roi_extractor:
self.mask_roi_extractor.init_weights()
# enable second mask head
self.mask_roi_extractor_2.init_weights()
self.mask_roi_extractor_refinement.init_weights()
def extract_feat(self, imgs):
x = self.backbone(imgs)
if self.with_neck:
x = self.neck(x)
return x
def extract_feat_2(self, imgs):
x = self.backbone_2(imgs)
if self.with_neck:
x = self.neck(x)
return x
def forward(self, imgs, img_meta, imgs_2, img_meta_2, return_loss=True, **kwargs):
if return_loss:
return self.forward_train(imgs, img_meta, imgs_2, img_meta_2, **kwargs)
else:
return self.forward_test(imgs, img_meta, imgs_2, img_meta_2, **kwargs)
def forward_train(self,
imgs,
img_meta,
imgs_2,
img_meta_2,
gt_bboxes,
gt_bboxes_2,
gt_labels,
gt_labels_2,
gt_bboxes_ignore=None,
gt_masks=None,
gt_masks_2=None,
pp=None,
pp_2=None,
proposals=None):
assert imgs.shape[1] == 3 and imgs_2.shape[1] == 3 # make sure channel size is 3
# Default FPN
x = self.extract_feat(imgs)
x_2 = self.extract_feat(imgs_2)
# x_2 = self.extract_feat_2(imgs_2)
losses = dict()
# RPN forward and loss
if self.with_rpn:
rpn_outs = self.rpn_head(x)
rpn_outs_2 = self.rpn_head_2(x_2)
rpn_loss_inputs = rpn_outs + (gt_bboxes, img_meta,
self.train_cfg.rpn)
rpn_loss_inputs_2 = rpn_outs_2 + (gt_bboxes_2, img_meta,
self.train_cfg.rpn)
rpn_losses = self.rpn_head.loss(
*rpn_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore, iteration=self.iteration)
rpn_losses_2 = self.rpn_head_2.loss(
*rpn_loss_inputs_2, gt_bboxes_ignore=gt_bboxes_ignore, iteration=self.iteration, img_meta_2=img_meta_2)
losses.update(rpn_losses)
losses.update(rpn_losses_2)
proposal_inputs = rpn_outs + (img_meta, self.train_cfg.rpn_proposal)
proposal_inputs_2 = rpn_outs_2 + (img_meta, self.train_cfg.rpn_proposal)
proposal_list, anchors = self.rpn_head.get_bboxes(*proposal_inputs)
proposal_list_2, anchors_2 = self.rpn_head_2.get_bboxes(*proposal_inputs_2, img_meta_2=img_meta_2)
# self.rpn_head.visualize_anchor_boxes(imgs, rpn_outs[0], img_meta, slice_num=45, shuffle=True) # debug only
# self.visualize_proposals(imgs, proposal_list, gt_bboxes, img_meta, slice_num=None, isProposal=True) #debug only
# self.visualize_proposals(imgs, anchors, gt_bboxes, img_meta, slice_num=None, isProposal=False) #debug only
# self.visualize_gt_bboxes(imgs, gt_bboxes, img_meta) #debug only
# breakpoint()
# self.visualize_gt_bboxes(imgs_2, gt_bboxes_2, img_meta_2) #debug only
# breakpoint()
# self.visualize_gt_bboxes_masks(imgs, gt_bboxes, img_meta, gt_masks) # debug only
# breakpoint()
# self.visualize_gt_bboxes_masks(imgs_2, gt_bboxes_2, img_meta_2, gt_masks_2) # debug only
# breakpoint()
else:
proposal_list = proposals
# hard negative mining: determine each proposal's probability of being CMB
# with torch.no_grad():
# proposal_list, scores = self.simple_test_bboxes(x, img_meta, proposal_list, None, rescale=False)
# proposal_list = proposal_list[:, 6:]
# proposal_list = [torch.cat((proposal_list, scores[:,1, None]), dim=1)]
# proposal_list_2, scores_2 = self.simple_test_bboxes_2(x_2, img_meta_2, proposal_list_2, None, rescale=False)
# proposal_list_2 = proposal_list_2[:, 6:]
# proposal_list_2 = [torch.cat((proposal_list_2, scores_2[:,1, None]), dim=1)]
# assign gts and sample proposals
if self.with_bbox or self.with_mask:
bbox_assigner = build_assigner(self.train_cfg.rcnn.assigner)
bbox_sampler = build_sampler(
self.train_cfg.rcnn.sampler, context=self)
num_imgs = imgs.size(0)
gt_bboxes_ignore = [None for _ in range(num_imgs)]
sampling_results = []
for i in range(num_imgs):
gt_bboxes_cur_pat = gt_bboxes[i]
gt_bboxes_ignore_cur_pat = gt_bboxes_ignore[i]
gt_labels_cur_pat = gt_labels[i]
assign_result = bbox_assigner.assign(
proposal_list[i], gt_bboxes_cur_pat,
gt_bboxes_ignore_cur_pat, gt_labels_cur_pat)
sampling_result = bbox_sampler.sample(
assign_result,
proposal_list[i],
gt_bboxes_cur_pat,
gt_labels_cur_pat,
feats=[lvl_feat[i][None] for lvl_feat in x])
sampling_results.append(sampling_result)
bbox_assigner_2 = build_assigner(self.train_cfg.rcnn.assigner)
bbox_sampler_2 = build_sampler(
self.train_cfg.rcnn.sampler, context=self)
num_imgs_2 = imgs_2.size(0)
gt_bboxes_ignore_2 = [None for _ in range(num_imgs_2)]
sampling_results_2 = []
for i in range(num_imgs_2):
gt_bboxes_cur_pat_2 = gt_bboxes_2[i]
gt_bboxes_ignore_cur_pat_2 = gt_bboxes_ignore_2[i]
gt_labels_cur_pat_2 = gt_labels_2[i]
assign_result_2 = bbox_assigner_2.assign(
proposal_list_2[i], gt_bboxes_cur_pat_2,
gt_bboxes_ignore_cur_pat_2, gt_labels_cur_pat_2)
sampling_result_2 = bbox_sampler_2.sample(
assign_result_2,
proposal_list_2[i],
gt_bboxes_cur_pat_2,
gt_labels_cur_pat_2,
feats=[lvl_feat[i][None] for lvl_feat in x_2])
sampling_results_2.append(sampling_result_2)
# bbox head forward and loss
if self.with_bbox:
rois = bbox2roi3D([res.bboxes for res in sampling_results])
rois_2 = bbox2roi3D([res.bboxes for res in sampling_results_2])
# TODO: a more flexible way to decide which feature maps to use
bbox_feats = self.bbox_roi_extractor(
x[:self.bbox_roi_extractor.num_inputs], rois)
bbox_feats_2 = self.bbox_roi_extractor_2(
x_2[:self.bbox_roi_extractor_2.num_inputs], rois_2)
cls_score, bbox_pred = self.bbox_head(bbox_feats)
cls_score_2, bbox_pred_2 = self.bbox_head_2(bbox_feats_2)
bbox_targets = self.bbox_head.get_target(
sampling_results, gt_bboxes, gt_labels, self.train_cfg.rcnn)
bbox_targets_2 = self.bbox_head_2.get_target(
sampling_results_2, gt_bboxes_2, gt_labels_2, self.train_cfg.rcnn)
loss_bbox = self.bbox_head.loss(cls_score, bbox_pred,
*bbox_targets)
loss_bbox_2 = self.bbox_head_2.loss(cls_score_2, bbox_pred_2,
*bbox_targets_2, img_meta_2=img_meta_2)
losses.update(loss_bbox)
losses.update(loss_bbox_2)
# prepare upscaled data for refinement head
upscaled_factor = img_meta_2[0]['ori_shape'][0] / img_meta[0]['ori_shape'][0]
# convert parameterized adjustments to actual bounding boxes coordinates
pred_bboxes_2 = self.bbox_head_2.convert_adjustments_to_bboxes(rois_2, bbox_pred_2, img_meta_2[0]['img_shape'])[:,6:].cpu().detach().numpy() / upscaled_factor
pred_cls_score_2 = cls_score_2[:,1, None].cpu().detach().numpy()
pred_bboxes_2 = np.concatenate((pred_bboxes_2, pred_cls_score_2), axis=1)
pred_bboxes_2 = [torch.from_numpy(pred_bboxes_2).cuda()]
bbox_assigner = build_assigner(self.train_cfg.rcnn.assigner)
bbox_sampler = build_sampler(
self.train_cfg.rcnn.sampler, context=self)
num_imgs = imgs.size(0)
gt_bboxes_ignore = [None for _ in range(num_imgs)]
sampling_results_refinement = []
for i in range(num_imgs):
gt_bboxes_cur_pat = gt_bboxes[i]
gt_bboxes_ignore_cur_pat = gt_bboxes_ignore[i]
gt_labels_cur_pat = gt_labels[i]
assign_result = bbox_assigner.assign(
pred_bboxes_2[i], gt_bboxes_cur_pat,
gt_bboxes_ignore_cur_pat, gt_labels_cur_pat)
sampling_result = bbox_sampler.sample(
assign_result,
pred_bboxes_2[i],
gt_bboxes_cur_pat,
gt_labels_cur_pat,
feats=[lvl_feat[i][None] for lvl_feat in x])
sampling_results_refinement.append(sampling_result)
rois_refinement = bbox2roi3D([res.bboxes for res in sampling_results_refinement])
bbox_feats_refinement = self.bbox_roi_extractor_refinement(
x[:self.bbox_roi_extractor_refinement.num_inputs], rois_refinement)
# training refinement head
refined_bbox_pred = self.refinement_head(bbox_feats_refinement)
bbox_targets_refinement = self.refinement_head.get_target(
sampling_results_refinement, gt_bboxes, gt_labels, self.train_cfg.rcnn)
loss_refinement = self.refinement_head.loss(refined_bbox_pred,
*bbox_targets_refinement)
losses.update(loss_refinement)
# mask head forward and loss
if self.with_mask:
# lower resolution mask head
pos_rois = bbox2roi3D(
[res.pos_bboxes for res in sampling_results])
mask_feats = self.mask_roi_extractor(
x[:self.mask_roi_extractor.num_inputs], pos_rois)
mask_pred = self.mask_head(mask_feats)
mask_targets = self.mask_head.get_target(
sampling_results, gt_masks, self.train_cfg.rcnn)
pos_labels = torch.cat(
[res.pos_gt_labels for res in sampling_results])
loss_mask = self.mask_head.loss(mask_pred, mask_targets,
pos_labels)
losses.update(loss_mask)
# higher resolution mask head
pos_rois = bbox2roi3D(
[res.pos_bboxes for res in sampling_results_2])
mask_feats = self.mask_roi_extractor_2(
x_2[:self.mask_roi_extractor_2.num_inputs], pos_rois)
mask_pred = self.mask_head_2(mask_feats)
mask_targets = self.mask_head_2.get_target(
sampling_results_2, gt_masks_2, self.train_cfg.rcnn)
pos_labels = torch.cat(
[res.pos_gt_labels for res in sampling_results_2])
loss_mask_2 = self.mask_head_2.loss(mask_pred, mask_targets,
pos_labels, img_meta_2=img_meta_2)
losses.update(loss_mask_2)
# refinement mask head
pos_rois = bbox2roi3D(
[res.pos_bboxes for res in sampling_results_refinement])
mask_feats = self.mask_roi_extractor_refinement(
x[:self.mask_roi_extractor_refinement.num_inputs], pos_rois)
mask_pred = self.mask_head_refinement(mask_feats)
mask_targets = self.mask_head_refinement.get_target(
sampling_results_refinement, gt_masks, self.train_cfg.rcnn)
pos_labels = torch.cat(
[res.pos_gt_labels for res in sampling_results_refinement])
loss_mask_refinement = self.mask_head_refinement.loss(mask_pred, mask_targets,
pos_labels, img_meta_refinement=True)
losses.update(loss_mask_refinement)
# self.save_losses_plt(losses) #debug only...
self.iteration += 1
return losses
def forward_test(self, imgs, img_metas, imgs_2, img_meta_2, **kwargs):
return self.simple_test(imgs, img_metas, imgs_2, img_meta_2, **kwargs)
def simple_test(self, imgs, img_metas, imgs_2, img_metas_2, pp=None, pp_2=None, proposals=None, rescale=False, test_cfg2=None):
"""Test without augmentation."""
assert self.with_bbox, "Bbox head must be implemented."
if test_cfg2 is not None:
test_cfg = test_cfg2
else:
test_cfg = self.test_cfg
img_metas = img_metas[0]
img_metas_2 = img_metas_2[0]
# Default FPN
x = self.extract_feat(imgs)
x_2 = self.extract_feat(imgs_2)
# x_2 = self.extract_feat_2(imgs_2)
# dataset 1
proposal_list = self.simple_test_rpn(
x, img_metas, test_cfg.rpn) if proposals is None else proposals
bboxes, scores = self.simple_test_bboxes(
x, img_metas, proposal_list, None, rescale=rescale)
# dataset 2
proposal_list = self.simple_test_rpn_2(
x_2, img_metas_2, test_cfg.rpn) if proposals is None else proposals
bboxes_2, scores_2 = self.simple_test_bboxes_2(
x_2, img_metas_2, proposal_list, None, rescale=rescale)
# refinement head
bboxes_2_refinement = bboxes_2[:, 6:]
bboxes_2_refinement = [torch.cat((bboxes_2_refinement, scores_2[:,1, None]), dim=1)]
bboxes_2_refinement = self.simple_test_bbox_refinement(
x, img_metas, bboxes_2_refinement, None, rescale=rescale)
# combine non-scaled and upscaled bboxes and scores
bboxes_combined = torch.cat((bboxes, bboxes_2_refinement), 0)
scores_combined = torch.cat((scores, scores_2), 0)
det_bboxes, det_labels = multiclass_nms_3d(bboxes_combined, scores_combined, test_cfg.rcnn.score_thr, test_cfg.rcnn.nms, test_cfg.rcnn.max_per_img)
# return bboxes only
bbox_results = bbox2result3D(det_bboxes, det_labels,
self.bbox_head.num_classes)
return bbox_results
'''
bboxes from non-scaled pathway are fed into non-scaled mask branch, while bboxes from upscaled pathway are
fed into the refinement mask branch
'''
# find out which detection box belongs to which resolution
det_bboxes_np = det_bboxes.cpu().numpy()
det_labels_np = det_labels.cpu().numpy()
bboxes_np = bboxes_combined.cpu().numpy()
cutoff_between_res1_res2 = len(bboxes)
nonscaled_bboxes = []
nonscaled_labels = []
upscaled_bboxes = []
upscaled_labels = []
for det_bbox, det_label in zip(det_bboxes_np, det_labels_np):
for index, bbox in enumerate(bboxes_np):
if np.all(det_bbox[:6] == bbox[6:]):
if index >= cutoff_between_res1_res2:
# upscaled bboxes
upscaled_bboxes.append(det_bbox)
upscaled_labels.append(det_label)
else:
# original-scaled bboxes
nonscaled_bboxes.append(det_bbox)
nonscaled_labels.append(det_label)
nonscaled_bboxes_gpu = torch.from_numpy(np.array(nonscaled_bboxes)).cuda()
nonscaled_labels_gpu = torch.from_numpy(
|
np.array(nonscaled_labels)
|
numpy.array
|
# Copyright 2020 The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import timeit
import numpy as np
import pandas as pd
import pymc3 as pm
import theano
import theano.tensor as tt
def glm_hierarchical_model(random_seed=123):
"""Sample glm hierarchical model to use in benchmarks"""
np.random.seed(random_seed)
data = pd.read_csv(pm.get_data('radon.csv'))
data['log_radon'] = data['log_radon'].astype(theano.config.floatX)
county_idx = data.county_code.values
n_counties = len(data.county.unique())
with pm.Model() as model:
mu_a = pm.Normal('mu_a', mu=0., sd=100**2)
sigma_a = pm.HalfCauchy('sigma_a', 5)
mu_b = pm.Normal('mu_b', mu=0., sd=100**2)
sigma_b = pm.HalfCauchy('sigma_b', 5)
a = pm.Normal('a', mu=0, sd=1, shape=n_counties)
b = pm.Normal('b', mu=0, sd=1, shape=n_counties)
a = mu_a + sigma_a * a
b = mu_b + sigma_b * b
eps = pm.HalfCauchy('eps', 5)
radon_est = a[county_idx] + b[county_idx] * data.floor.values
pm.Normal('radon_like', mu=radon_est, sd=eps, observed=data.log_radon)
return model
def mixture_model(random_seed=1234):
"""Sample mixture model to use in benchmarks"""
np.random.seed(1234)
size = 1000
w_true = np.array([0.35, 0.4, 0.25])
mu_true = np.array([0., 2., 5.])
sigma = np.array([0.5, 0.5, 1.])
component = np.random.choice(mu_true.size, size=size, p=w_true)
x = np.random.normal(mu_true[component], sigma[component], size=size)
with pm.Model() as model:
w = pm.Dirichlet('w', a=
|
np.ones_like(w_true)
|
numpy.ones_like
|
"""
Parameters that are the same for all behaviors.
"""
from __future__ import division
import numpy as np
# DIMENSIONALITY
nstates = 6
ncontrols = 3
# BEHAVIOR CONTROL
real_tol = [0.5, 0.5, np.deg2rad(10), np.inf, np.inf, np.inf]
pointshoot_tol =
|
np.deg2rad(20)
|
numpy.deg2rad
|
import numpy
import sympy
from ..helpers import backend_to_function
from ..nsimplex import NSimplexScheme, get_vol, transform
class TetrahedronScheme(NSimplexScheme):
def __init__(self, name, weights, points, degree, citation=None):
self.name = name
self.degree = degree
self.citation = citation
if weights.dtype == numpy.float64:
self.weights = weights
else:
assert weights.dtype in [numpy.dtype("O"), numpy.int64]
self.weights = weights.astype(numpy.float64)
self.weights_symbolic = weights
if points.dtype == numpy.float64:
self.points = points
else:
assert points.dtype in [numpy.dtype("O"), numpy.int64]
self.points = points.astype(numpy.float64)
self.points_symbolic = points
return
def show(
self,
tet=numpy.array(
[
[+1, 0, -1.0 / numpy.sqrt(2.0)],
[-1, 0, -1.0 / numpy.sqrt(2.0)],
[0, +1, +1.0 / numpy.sqrt(2.0)],
[0, -1, +1.0 / numpy.sqrt(2.0)],
]
),
backend="vtk",
render=True,
):
edges = numpy.array([[tet[i], tet[j]] for i in range(4) for j in range(i)])
edges = numpy.moveaxis(edges, 1, 2)
backend_to_function[backend](
transform(self.points.T, tet.T).T,
self.weights,
get_vol(tet),
edges,
render=render,
)
return
def _s4(symbolic):
frac = sympy.Rational if symbolic else lambda x, y: x / y
return numpy.full((1, 4), frac(1, 4))
def _s31(a):
b = 1 - 3 * a
return numpy.array([[a, a, a, b], [a, a, b, a], [a, b, a, a], [b, a, a, a]])
def _s22(a):
b = 0.5 - a
return numpy.array(
[
[a, a, b, b],
[a, b, a, b],
[b, a, a, b],
[a, b, b, a],
[b, a, b, a],
[b, b, a, a],
]
)
def _s211(a, b):
c = 1.0 - 2 * a - b
return numpy.array(
[
[a, a, b, c],
[a, b, a, c],
[b, a, a, c],
[a, b, c, a],
[b, a, c, a],
[b, c, a, a],
[a, a, c, b],
[a, c, a, b],
[c, a, a, b],
[a, c, b, a],
[c, a, b, a],
[c, b, a, a],
]
)
def _s1111(a, b, c):
d = 1.0 - a - b - c
return numpy.array(
[
[a, b, c, d],
[a, b, d, c],
[a, c, b, d],
[a, c, d, b],
[a, d, b, c],
[a, d, c, b],
[b, a, c, d],
[b, a, d, c],
[b, c, a, d],
[b, c, d, a],
[b, d, a, c],
[b, d, c, a],
[c, a, b, d],
[c, a, d, b],
[c, b, a, d],
[c, b, d, a],
[c, d, a, b],
[c, d, b, a],
[d, a, b, c],
[d, a, c, b],
[d, b, a, c],
[d, b, c, a],
[d, c, a, b],
[d, c, b, a],
]
)
def untangle2(data):
points = []
weights = []
if "s4" in data:
assert len(data["s4"]) == 1
w = numpy.array(data["s4"]).T
points.append(_s4(symbolic=False))
weights.append(w[0])
if "s31" in data:
d = numpy.array(data["s31"]).T
s31_data = numpy.moveaxis(_s31(d[1]), 0, 1)
points.append(_collapse0(s31_data).T)
weights.append(numpy.tile(d[0], 4))
if "s22" in data:
d = numpy.array(data["s22"]).T
s22_data = numpy.moveaxis(_s22(d[1]), 0, 1)
points.append(_collapse0(s22_data).T)
weights.append(numpy.tile(d[0], 6))
if "s211" in data:
d = numpy.array(data["s211"]).T
s211_data = numpy.moveaxis(_s211(*d[1:]), 0, 1)
points.append(_collapse0(s211_data).T)
weights.append(
|
numpy.tile(d[0], 12)
|
numpy.tile
|
"""
datadict.py :
Data classes we use throughout the plottr package, and tools to work on them.
"""
import warnings
import copy as cp
import numpy as np
from functools import reduce
from typing import List, Tuple, Dict, Sequence, Union, Any, Iterator, Optional, TypeVar
from plottr.utils import num, misc
__author__ = '<NAME>'
__license__ = 'MIT'
# TODO: functionality that returns axes values given a set of slices.
# TODO: an easier way to access data and meta values.
# maybe with getattr/setattr?
# TODO: direct slicing of full datasets. implement getitem/setitem?
# TODO: feature to compare if datadicts are equal not fully tested yet.
def is_meta_key(key: str) -> bool:
if key[:2] == '__' and key[-2:] == '__':
return True
else:
return False
def meta_key_to_name(key: str) -> str:
if is_meta_key(key):
return key[2:-2]
else:
raise ValueError(f'{key} is not a meta key.')
def meta_name_to_key(name: str) -> str:
return '__' + name + '__'
T = TypeVar('T', bound='DataDictBase')
class GriddingError(ValueError):
pass
class DataDictBase(dict):
"""
Simple data storage class that is based on a regular dictionary.
This base class does not make assumptions about the structure of the
values. This is implemented in inheriting classes.
"""
def __init__(self, **kw: Any):
super().__init__(self, **kw)
def __eq__(self, other: object) -> bool:
"""Check for content equality of two datadicts."""
if not isinstance(other, DataDictBase):
return NotImplemented
if not self.same_structure(self, other):
# print('structure')
return False
for k, v in self.meta_items():
if k not in [kk for kk, vv in other.meta_items()]:
# print(f'{k} not in {other}')
return False
elif other.meta_val(k) != v:
# print(f'{other.meta_val(k)} != {v}')
return False
for k, v in other.meta_items():
if k not in [kk for kk, vv in self.meta_items()]:
# print(f'{k} not in {self}')
return False
for dn, dv in self.data_items():
# print(dn)
if dn not in [dnn for dnn, dvv in other.data_items()]:
# print(f"{dn} not in {other}")
return False
if self[dn].get('unit', '') != other[dn].get('unit', ''):
# print(f"different units for {dn}")
return False
if self[dn].get('label', '') != other[dn].get('label', ''):
# print(f"different labels for {dn}")
return False
if self[dn].get('axes', []) != other[dn].get('axes', []):
# print(f"different axes for {dn}")
return False
if not num.arrays_equal(
np.array(self.data_vals(dn)),
np.array(other.data_vals(dn)),
):
# print(f"different data for {dn}")
return False
for k, v in self.meta_items(dn):
if k not in [kk for kk, vv in other.meta_items(dn)]:
# print(f"{dn}: {k} not in {other}")
return False
elif v != other.meta_val(k, dn):
# print(f"{v} != {other.meta_val(k, dn)}")
return False
for dn, dv in other.data_items():
# print(dn)
if dn not in [dnn for dnn, dvv in self.data_items()]:
# print(f"{dn} not in {other}")
return False
for k, v in other.meta_items(dn):
if k not in [kk for kk, vv in self.meta_items(dn)]:
# print(f"{dn}: {k} not in {other}")
return False
return True
# Assignment and retrieval of data and meta data
@staticmethod
def _is_meta_key(key: str) -> bool:
return is_meta_key(key)
@staticmethod
def _meta_key_to_name(key: str) -> str:
return meta_key_to_name(key)
@staticmethod
def _meta_name_to_key(name: str) -> str:
return meta_name_to_key(name)
def data_items(self) -> Iterator[Tuple[str, Dict[str, Any]]]:
"""
Generator for data field items.
Like dict.items(), but ignores meta data.
"""
for k, v in self.items():
if not self._is_meta_key(k):
yield k, v
def meta_items(self, data: Union[str, None] = None,
clean_keys: bool = True) -> Iterator[Tuple[str, Dict[str, Any]]]:
"""
Generator for meta items.
Like dict.items(), but yields `only` meta entries.
The keys returned do not contain the underscores used internally.
:param data: if ``None`` iterate over global meta data.
if it's the name of a data field, iterate over the meta
information of that field.
:param clean_keys: if `True`, remove the underscore pre/suffix
"""
if data is None:
for k, v in self.items():
if self._is_meta_key(k):
if clean_keys:
n = self._meta_key_to_name(k)
else:
n = k
yield n, v
else:
for k, v in self[data].items():
if self._is_meta_key(k):
if clean_keys:
n = self._meta_key_to_name(k)
else:
n = k
yield n, v
def data_vals(self, key: str) -> np.ndarray:
"""
Return the data values of field ``key``.
Equivalent to ``DataDict['key'].values``.
:param key: name of the data field
:return: values of the data field
"""
if self._is_meta_key(key):
raise ValueError(f"{key} is a meta key.")
return self[key].get('values', np.array([]))
def has_meta(self, key: str) -> bool:
"""Check whether meta field exists in the dataset."""
k = self._meta_name_to_key(key)
if k in self:
return True
else:
return False
def meta_val(self, key: str, data: Union[str, None] = None) -> Any:
"""
Return the value of meta field ``key`` (given without underscore).
:param key: name of the meta field
:param data: ``None`` for global meta; name of data field for data meta.
:return: the value of the meta information.
"""
k = self._meta_name_to_key(key)
if data is None:
return self[k]
else:
return self[data][k]
def add_meta(self, key: str, value: Any, data: Union[str, None] = None) -> None:
"""
Add meta info to the dataset.
If the key already exists, meta info will be overwritten.
:param key: Name of the meta field (without underscores)
:param value: Value of the meta information
:param data: if ``None``, meta will be global; otherwise assigned to
data field ``data``.
"""
key = self._meta_name_to_key(key)
if data is None:
self[key] = value
else:
self[data][key] = value
set_meta = add_meta
def delete_meta(self, key: str, data: Union[str, None] = None) -> None:
"""
Remove meta data.
:param key: name of the meta field to remove.
:param data: if ``None``, this affects global meta; otherwise remove
from data field ``data``.
"""
key = self._meta_name_to_key(key)
if data is None:
del self[key]
else:
del self[data][key]
def clear_meta(self, data: Union[str, None] = None) -> None:
"""
Delete meta information.
:param data: if this is not None, delete onlymeta information from data
field `data`. Else, delete all top-level meta, as well as
meta for all data fields.
"""
if data is None:
meta_list = [k for k, _ in self.meta_items()]
for m in meta_list:
self.delete_meta(m)
for d, _ in self.data_items():
data_meta_list = [k for k, _ in self.meta_items(d)]
for m in data_meta_list:
self.delete_meta(m, d)
else:
for m, _ in self.meta_items(data):
self.delete_meta(m, data)
def extract(self: T, data: List[str], include_meta: bool = True,
copy: bool = True, sanitize: bool = True) -> T:
"""
Extract data from a dataset.
Return a new datadict with all fields specified in ``data`` included.
Will also take any axes fields along that have not been explicitly
specified.
:param data: data field or list of data fields to be extracted
:param include_meta: if ``True``, include the global meta data.
data meta will always be included.
:param copy: if ``True``, data fields will be deep copies of the
original.
:param sanitize: if ``True``, will run DataDictBase.sanitize before
returning.
:return: new DataDictBase containing only requested fields.
"""
if isinstance(data, str):
data = [data]
else:
data = data.copy()
for d in data:
for a in self.axes(d):
if a not in data:
data.append(a)
ret = self.__class__()
for d in data:
if copy:
ret[d] = cp.deepcopy(self[d])
else:
ret[d] = self[d]
if include_meta:
for k, v in self.meta_items():
if copy:
ret.add_meta(k, cp.deepcopy(v))
else:
ret.add_meta(k, v)
if sanitize:
ret = ret.sanitize()
ret.validate()
return ret
# info about structure
@staticmethod
def same_structure(*data: T,
check_shape: bool = False) -> bool:
"""
Check if all supplied DataDicts share the same data structure
(i.e., dependents and axes).
Ignores meta info and values. Checks also for matching shapes if
`check_shape` is `True`.
:param data: the data sets to compare
:param check_shape: whether to include a shape check in the comparison
:return: ``True`` if the structure matches for all, else ``False``.
"""
if len(data) < 2:
return True
def empty_structure(d: T) -> T:
s = misc.unwrap_optional(d.structure(include_meta=False, add_shape=check_shape))
for k, v in s.data_items():
if 'values' in v:
del s[k]['values']
return s
s0 = empty_structure(data[0])
for d in data[1:]:
if d is None:
return False
if s0 != empty_structure(d):
return False
return True
def structure(self: T, add_shape: bool = False,
include_meta: bool = True,
same_type: bool = False) -> Optional[T]:
"""
Get the structure of the DataDict.
Return the datadict without values (`value` omitted in the dict).
:param add_shape: Deprecated -- ignored.
:param include_meta: if `True`, include the meta information in
the returned dict, else clear it.
:param same_type: if `True`, return type will be the one of the
object this is called on. Else, DataDictBase.
:return: The DataDict containing the structure only. The exact type
is the same as the type of ``self``
"""
if add_shape:
warnings.warn("'add_shape' is deprecated and will be ignored",
DeprecationWarning)
add_shape = False
if self.validate():
s = self.__class__()
for n, v in self.data_items():
v2 = v.copy()
v2.pop('values')
s[n] = v2
if include_meta:
for n, v in self.meta_items():
s.add_meta(n, v)
else:
s.clear_meta()
if same_type:
s = self.__class__(**s)
return s
return None
def label(self, name: str) -> Optional[str]:
"""
Get a label for a data field.
If label is present, use the label for the data; otherwise
fallback to use data name as the label.
If a unit is present, this is the name with the unit appended in
brackets: ``name (unit)``; if no unit is present, just the name.
:param name: name of the data field
:return: labelled name
"""
if self.validate():
if name not in self:
raise ValueError("No field '{}' present.".format(name))
if self[name]['label'] != '':
n = self[name]['label']
else:
n = name
if self[name]['unit'] != '':
n += ' ({})'.format(self[name]['unit'])
return n
return None
def axes_are_compatible(self) -> bool:
"""
Check if all dependent data fields have the same axes.
This includes axes order.
:return: ``True`` or ``False``
"""
axes = []
for i, d in enumerate(self.dependents()):
if i == 0:
axes = self.axes(d)
else:
if self.axes(d) != axes:
return False
return True
def axes(self, data: Union[Sequence[str], str, None] = None) -> List[str]:
"""
Return a list of axes.
:param data: if ``None``, return all axes present in the dataset,
otherwise only the axes of the dependent ``data``.
:return: the list of axes
"""
lst = []
if data is None:
for k, v in self.data_items():
if 'axes' in v:
for n in v['axes']:
if n not in lst and self[n].get('axes', []) == []:
lst.append(n)
else:
if isinstance(data, str):
dataseq: Sequence[str] = (data,)
else:
dataseq = data
for n in dataseq:
if 'axes' not in self[n]:
continue
for m in self[n]['axes']:
if m not in lst and self[m].get('axes', []) == []:
lst.append(m)
return lst
def dependents(self) -> List[str]:
"""
Get all dependents in the dataset.
:return: a list of the names of dependents (data fields that have axes)
"""
ret = []
for n, v in self.data_items():
if len(v.get('axes', [])) != 0:
ret.append(n)
return ret
def shapes(self) -> Dict[str, Tuple[int, ...]]:
"""
Get the shapes of all data fields.
:return: a dictionary of the form ``{key : shape}``, where shape is the
np.shape-tuple of the data with name ``key``.
"""
shapes = {}
for k, v in self.data_items():
shapes[k] = np.array(self.data_vals(k)).shape
return shapes
# validation and sanitizing
def validate(self) -> bool:
"""
Check the validity of the dataset.
Checks performed:
* all axes specified with dependents must exist as data fields.
Other tasks performed:
* ``unit`` keys are created if omitted
* ``label`` keys are created if omitted
* ``shape`` meta information is updated with the correct values
(only if present already).
:return: ``True`` if valid.
:raises: ``ValueError`` if invalid.
"""
msg = '\n'
for n, v in self.data_items():
if 'axes' in v:
for na in v['axes']:
if na not in self:
msg += " * '{}' has axis '{}', but no field " \
"with name '{}' registered.\n".format(
n, na, na)
elif na not in self.axes():
msg += " * '{}' has axis '{}', but no independent " \
"with name '{}' registered.\n".format(
n, na, na)
else:
v['axes'] = []
if 'unit' not in v:
v['unit'] = ''
if 'label' not in v:
v['label'] = ''
vals = v.get('values', [])
if type(vals) not in [np.ndarray, np.ma.core.MaskedArray]:
vals = np.array(vals)
v['values'] = vals
if msg != '\n':
raise ValueError(msg)
return True
def remove_unused_axes(self: T) -> T:
"""
Removes axes not associated with dependents.
:return: cleaned dataset.
"""
dependents = self.dependents()
unused = []
ret = self.copy()
for n, v in self.data_items():
used = False
if n not in dependents:
for m in dependents:
if n in self[m]['axes']:
used = True
else:
used = True
if not used:
unused.append(n)
for u in unused:
del ret[u]
return ret
def sanitize(self: T) -> T:
"""
Clean-up tasks:
* removes unused axes.
:return: sanitized dataset.
"""
return self.remove_unused_axes()
# axes order tools
def reorder_axes_indices(self, name: str,
**pos: int) -> Tuple[Tuple[int, ...], List[str]]:
"""
Get the indices that can reorder axes in a given way.
:param name: name of the data field of which we want to reorder axes
:param pos: new axes position in the form ``axis_name = new_position``.
non-specified axes positions are adjusted automatically.
:return: the tuple of new indices, and the list of axes names in the
new order.
"""
axlist = self.axes(name)
order = misc.reorder_indices_from_new_positions(axlist, **pos)
return order, [axlist[i] for i in order]
def reorder_axes(self: T, data_names: Union[str, Sequence[str], None] = None,
**pos: int) -> T:
"""
Reorder data axes.
:param data_names: data name(s) for which to reorder the axes
if None, apply to all dependents.
:param pos: new axes position in the form ``axis_name = new_position``.
non-specified axes positions are adjusted automatically.
:return: dataset with re-ordered axes.
"""
if data_names is None:
data_names = self.dependents()
if isinstance(data_names, str):
data_names = [data_names]
ret = self.copy()
for n in data_names:
neworder, newaxes = self.reorder_axes_indices(n, **pos)
ret[n]['axes'] = newaxes
ret.validate()
return ret
def copy(self: T) -> T:
"""
Make a copy of the dataset.
:return: A copy of the dataset.
"""
return cp.deepcopy(self)
def astype(self: T, dtype: np.dtype) -> T:
"""
Convert all data values to given dtype.
:param dtype: np dtype.
:return: copy of the dataset, with values as given type.
"""
ret = self.copy()
for k, v in ret.data_items():
vals = v['values']
if type(v['values']) not in [np.ndarray, np.ma.core.MaskedArray]:
vals =
|
np.array(v['values'])
|
numpy.array
|
from __future__ import print_function, division, absolute_import
import math
import copy
import numbers
import sys
import os
import json
import types
import warnings
import numpy as np
import cv2
import imageio
import scipy.spatial.distance
import six
import six.moves as sm
import skimage.draw
import skimage.measure
import collections
from PIL import Image as PIL_Image, ImageDraw as PIL_ImageDraw, ImageFont as PIL_ImageFont
ALL = "ALL"
FILE_DIR = os.path.dirname(os.path.abspath(__file__))
# filepath to the quokka image, its annotations and depth map
QUOKKA_FP = os.path.join(FILE_DIR, "quokka.jpg")
QUOKKA_ANNOTATIONS_FP = os.path.join(FILE_DIR, "quokka_annotations.json")
QUOKKA_DEPTH_MAP_HALFRES_FP = os.path.join(FILE_DIR, "quokka_depth_map_halfres.png")
DEFAULT_FONT_FP = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"DejaVuSans.ttf"
)
# We instantiate a current/global random state here once.
# One can also call np.random, but that is (in contrast to np.random.RandomState)
# a module and hence cannot be copied via deepcopy. That's why we use RandomState
# here (and in all augmenters) instead of np.random.
CURRENT_RANDOM_STATE = np.random.RandomState(42)
SEED_MIN_VALUE = 0
SEED_MAX_VALUE = 2**31-1 # use 2**31 instead of 2**32 here because 2**31 errored on some systems
# to check if a dtype instance is among these dtypes, use e.g. `dtype.type in NP_FLOAT_TYPES`
# do not just use `dtype in NP_FLOAT_TYPES` as that would fail
NP_FLOAT_TYPES = set(np.sctypes["float"])
NP_INT_TYPES = set(np.sctypes["int"])
NP_UINT_TYPES = set(np.sctypes["uint"])
IMSHOW_BACKEND_DEFAULT = "matplotlib"
IMRESIZE_VALID_INTERPOLATIONS = ["nearest", "linear", "area", "cubic",
cv2.INTER_NEAREST, cv2.INTER_LINEAR, cv2.INTER_AREA, cv2.INTER_CUBIC]
def is_np_array(val):
"""
Checks whether a variable is a numpy array.
Parameters
----------
val
The variable to check.
Returns
-------
out : bool
True if the variable is a numpy array. Otherwise False.
"""
# using np.generic here via isinstance(val, (np.ndarray, np.generic)) seems to also fire for scalar numpy values
# even though those are not arrays
return isinstance(val, np.ndarray)
def is_single_integer(val):
"""
Checks whether a variable is an integer.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is an integer. Otherwise False.
"""
return isinstance(val, numbers.Integral) and not isinstance(val, bool)
def is_single_float(val):
"""
Checks whether a variable is a float.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a float. Otherwise False.
"""
return isinstance(val, numbers.Real) and not is_single_integer(val) and not isinstance(val, bool)
def is_single_number(val):
"""
Checks whether a variable is a number, i.e. an integer or float.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a number. Otherwise False.
"""
return is_single_integer(val) or is_single_float(val)
def is_iterable(val):
"""
Checks whether a variable is iterable.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is an iterable. Otherwise False.
"""
return isinstance(val, collections.Iterable)
# TODO convert to is_single_string() or rename is_single_integer/float/number()
def is_string(val):
"""
Checks whether a variable is a string.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a string. Otherwise False.
"""
return isinstance(val, six.string_types)
def is_single_bool(val):
"""
Checks whether a variable is a boolean.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a boolean. Otherwise False.
"""
return type(val) == type(True)
def is_integer_array(val):
"""
Checks whether a variable is a numpy integer array.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a numpy integer array. Otherwise False.
"""
return is_np_array(val) and issubclass(val.dtype.type, np.integer)
def is_float_array(val):
"""
Checks whether a variable is a numpy float array.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a numpy float array. Otherwise False.
"""
return is_np_array(val) and issubclass(val.dtype.type, np.floating)
def is_callable(val):
"""
Checks whether a variable is a callable, e.g. a function.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True if the variable is a callable. Otherwise False.
"""
# python 3.x with x <= 2 does not support callable(), apparently
if sys.version_info[0] == 3 and sys.version_info[1] <= 2:
return hasattr(val, '__call__')
else:
return callable(val)
def is_generator(val):
"""
Checks whether a variable is a generator.
Parameters
----------
val
The variable to check.
Returns
-------
bool
True is the variable is a generator. Otherwise False.
"""
return isinstance(val, types.GeneratorType)
def caller_name():
"""
Returns the name of the caller, e.g. a function.
Returns
-------
str
The name of the caller as a string
"""
return sys._getframe(1).f_code.co_name
def seed(seedval):
"""
Set the seed used by the global random state and thereby all randomness
in the library.
This random state is by default by all augmenters. Under special
circumstances (e.g. when an augmenter is switched to deterministic mode),
the global random state is replaced by another -- local -- one.
The replacement is dependent on the global random state.
Parameters
----------
seedval : int
The seed to use.
"""
CURRENT_RANDOM_STATE.seed(seedval)
def current_random_state():
"""
Returns the current/global random state of the library.
Returns
----------
numpy.random.RandomState
The current/global random state.
"""
return CURRENT_RANDOM_STATE
def new_random_state(seed=None, fully_random=False):
"""
Returns a new random state.
Parameters
----------
seed : None or int, optional
Optional seed value to use.
The same datatypes are allowed as for ``numpy.random.RandomState(seed)``.
fully_random : bool, optional
Whether to use numpy's random initialization for the
RandomState (used if set to True). If False, a seed is sampled from
the global random state, which is a bit faster and hence the default.
Returns
-------
numpy.random.RandomState
The new random state.
"""
if seed is None:
if not fully_random:
# sample manually a seed instead of just RandomState(),
# because the latter one
# is way slower.
seed = CURRENT_RANDOM_STATE.randint(SEED_MIN_VALUE, SEED_MAX_VALUE, 1)[0]
return
|
np.random.RandomState(seed)
|
numpy.random.RandomState
|
"""
Copyright ©2019. The Regents of the University of California (Regents). All Rights Reserved.
Permission to use, copy, modify, and distribute this software and its documentation for educational,
research, and not-for-profit purposes, without fee and without a signed licensing agreement, is
hereby granted, provided that the above copyright notice, this paragraph and the following two
paragraphs appear in all copies, modifications, and distributions. Contact The Office of Technology
Licensing, UC Berkeley, 2150 Shattuck Avenue, Suite 510, Berkeley, CA 94720-1620, (510) 643-
7201, <EMAIL>, http://ipira.berkeley.edu/industry-info for commercial licensing opportunities.
IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL,
INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF
THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF REGENTS HAS BEEN
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED
HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
Author: <NAME>
"""
import os
import skimage
import numpy as np
from mrcnn.utils import Dataset
"""
ImageDataset creates a Matterport dataset for a directory of
images in order to ensure compatibility with benchmarking tools
and image resizing for networks.
Directory structure must be as follows:
$base_path/
test_indices.npy
train_indices.npy
images/ (Train/Test Images here)
image_000000.png
image_000001.png
...
segmasks/ (GT segmasks here, one channel)
image_000000.png
image_000001.png
...
"""
class ImageDataset(Dataset):
def __init__(self, config):
assert config['dataset']['path'] != "", "You must provide the path to a dataset!"
self.dataset_config = config['dataset']
self.base_path = config['dataset']['path']
self.images = config['dataset']['images']
self.masks = config['dataset']['masks']
self._channels = config['model']['settings']['image_channel_count']
super().__init__()
def load(self, indices_file, augment=False):
# Load the indices for imset.
split_file = os.path.join(self.base_path, '{:s}'.format(indices_file))
self.image_id = np.load(split_file)
self.add_class('clutter', 1, 'fg')
flips = [1, 2, 3]
for i in self.image_id:
if 'numpy' in self.images:
p = os.path.join(self.base_path, self.images,
'image_{:06d}.npy'.format(i))
else:
p = os.path.join(self.base_path, self.images,
'image_{:06d}.png'.format(i))
self.add_image('clutter', image_id=i, path=p)
if augment:
for flip in flips:
self.add_image('clutter', image_id=i, path=p, flip=flip)
def flip(self, image, flip):
# flips during training for augmentation
if flip == 1:
image = image[::-1,:,:]
elif flip == 2:
image = image[:,::-1,:]
elif flip == 3:
image = image[::-1,::-1,:]
return image
def load_image(self, image_id):
# loads image from path
if 'numpy' in self.images:
image = np.load(self.image_info[image_id]['path']).squeeze()
else:
image = skimage.io.imread(self.image_info[image_id]['path'])
if self._channels < 4 and image.shape[-1] == 4 and image.ndim == 3:
image = image[...,:3]
if self._channels == 1 and image.ndim == 2:
image = image[:,:,np.newaxis]
elif self._channels == 1 and image.ndim == 3:
image = image[:,:,0,np.newaxis]
elif self._channels == 3 and image.ndim == 3 and image.shape[-1] == 1:
image = skimage.color.gray2rgb(image)
elif self._channels == 4 and image.shape[-1] == 3:
concat_image =
|
np.concatenate([image, image[:,:,0:1]], axis=2)
|
numpy.concatenate
|
# -*- coding: utf-8 -*-
"""Test for updater.information module"""
import pytest
import numpy as np
from stonesoup.models.measurement.linear import LinearGaussian
from stonesoup.types.detection import Detection
from stonesoup.types.hypothesis import SingleHypothesis
from stonesoup.types.prediction import GaussianStatePrediction, InformationStatePrediction
from stonesoup.updater.kalman import KalmanUpdater
from stonesoup.updater.information import InformationKalmanUpdater
@pytest.mark.parametrize(
"UpdaterClass, measurement_model, prediction, measurement",
[
( # Standard Information filter
InformationKalmanUpdater,
LinearGaussian(ndim_state=2, mapping=[0],
noise_covar=
|
np.array([[0.04]])
|
numpy.array
|
import numpy as np
import pandas as pd
import pytest
from preprocessy.exceptions import ArgumentsError
from preprocessy.pipelines import Pipeline
from preprocessy.pipelines.config import save_config
from preprocessy.utils import num_of_samples
def custom_read(params):
params["df"] = pd.read_csv(params["df_path"])
params["df_copy"] = params["df"].copy()
def times_two(params):
params["df"][params["col_1"]] *= 2
def squared(params):
params["df"][params["col_2"]] **= 2
def split(params):
n_samples = num_of_samples(params["df"])
params["X_test"] = params["df"].iloc[
: int(params["test_size"] * n_samples)
]
params["X_train"] = params["df"].iloc[
int(params["test_size"] * n_samples) :
]
class TestBasePipeline:
def test_pipeline_arguments(self):
with pytest.raises(ArgumentsError):
Pipeline()
with pytest.raises(ArgumentsError):
Pipeline(steps=[custom_read, times_two, squared, split])
with pytest.raises(TypeError):
Pipeline(
df_path="./datasets/configs/dataset.csv",
steps=[custom_read, "times_two", squared, split],
params=["hello"],
)
with pytest.raises(TypeError):
Pipeline(
df_path="./datasets/configs/dataset.csv",
steps=[custom_read, times_two, squared, split],
params=["hello"],
)
with pytest.raises(TypeError):
Pipeline(
df_path="./datasets/configs/dataset.csv",
steps=[times_two, squared, split],
params={"col_1": "A"},
custom_reader="custom_read",
)
def test_pipeline_with_default_reader(self):
df = pd.DataFrame({"A": np.arange(1, 100), "B": np.arange(1, 100)})
_ = df.to_csv("./datasets/configs/dataset.csv", index=False)
params = {
"col_1": "A",
"col_2": "B",
"test_size": 0.2,
}
pipeline = Pipeline(
df_path="./datasets/configs/dataset.csv",
steps=[times_two, squared, split],
params=params,
)
pipeline.process()
assert "df" in pipeline.params.keys()
assert "summary" in pipeline.params.keys()
assert "stats" in pipeline.params.keys()
def test_pipeline_with_custom_reader(self):
df = pd.DataFrame({"A": np.arange(1, 100), "B": np.arange(1, 100)})
_ = df.to_csv("./datasets/configs/dataset.csv", index=False)
params = {
"col_1": "A",
"col_2": "B",
"test_size": 0.2,
"df": "./datasets/configs/dataset.csv",
}
pipeline = Pipeline(
df_path="./datasets/configs/dataset.csv",
steps=[times_two, squared, split],
params=params,
custom_reader=custom_read,
)
pipeline.process()
assert (
pipeline.params["df"].loc[69, "A"]
== pipeline.params["df_copy"].loc[69, "A"] * 2
)
assert (
pipeline.params["df"].loc[42, "B"]
== pipeline.params["df_copy"].loc[42, "B"] ** 2
)
assert len(pipeline.params["X_train"]) == 80
def test_add(self):
df = pd.DataFrame({"A": np.arange(1, 100), "B": np.arange(1, 100)})
_ = df.to_csv("./datasets/configs/dataset.csv", index=False)
params = {
"col_1": "A",
"test_size": 0.2,
}
pipeline = Pipeline(
df_path="./datasets/configs/dataset.csv",
steps=[times_two, split],
params=params,
)
pipeline.process()
assert pipeline.params["df"].loc[42, "A"] == df.loc[42, "A"] * 2
pipeline.add(
squared,
{
"col_2": "A",
},
before="times_two",
)
pipeline.process()
num_0 = pipeline.params["df"].loc[42, "A"]
num_1 = df.loc[42, "A"]
assert num_0 == (num_1 ** 2) * 2
pipeline.remove("squared")
pipeline.add(squared, {"col_2": "A"}, after="read_file")
pipeline.process()
num_0 = pipeline.params["df"].loc[42, "A"]
num_1 = df.loc[42, "A"]
assert num_0 == (num_1 ** 2) * 2
def test_remove(self):
df = pd.DataFrame({"A":
|
np.arange(1, 100)
|
numpy.arange
|
"""
=======================================================
:mod:`go_benchmark` -- Benchmark optimization functions
=======================================================
This module provides a set of benchmark problems for global optimization.
.. Copyright 2013 <NAME>
.. module:: go_benchmark
.. moduleauthor:: <NAME> <<EMAIL>>
.. modifiedby:: <NAME> <<EMAIL>> 2016
"""
# Array math module implemented in C / C++
import numpy
# Optimized mathematical functions
from numpy import abs, arctan2, cos, dot, exp, floor, inf, log, log10, pi, prod, sin, sqrt, sum, tan, tanh
# Array functions
from numpy import arange, asarray, atleast_1d, ones, roll, seterr, sign, where, zeros, zeros_like
from numpy.random import uniform
from math import factorial
# Tell numpy to ignore errors
seterr(all='ignore')
# -------------------------------------------------------------------------------- #
class Benchmark(object):
"""
Defines a global optimization benchmark problem.
This abstract class defines the basic structure of a global
optimization problem. Subclasses should implement the ``evaluator`` method
for a particular optimization problem.
Public Attributes:
- *dimensions* -- the number of inputs to the problem
- *fun_evals* -- stores the number of function evaluations, as some crappy
optimization frameworks (i.e., `nlopt`) do not return this value
- *change_dimensionality* -- whether we can change the benchmark function `x`
variable length (i.e., the dimensionality of the problem)
- *custom_bounds* -- a set of lower/upper bounds for plot purposes (if needed).
- *spacing* -- the spacing to use to generate evenly spaced samples across the
lower/upper bounds on the variables, for plotting purposes
"""
def __init__(self, dimensions):
self.dimensions = dimensions
self.fun_evals = 0
self.change_dimensionality = False
self.custom_bounds = None
self.record = [] # A record of objective values per evaluations
if dimensions == 1:
self.spacing = 1001
else:
self.spacing = 201
def __str__(self):
return "%s (%i dimensions)"%(self.__class__.__name__, self.dimensions)
def __repr__(self):
return self.__class__.__name__
def generator(self):
"""The generator function for the benchmark problem."""
return [uniform(l, u) for l, u in self.bounds]
def evaluator(self, candidates):
"""The evaluator function for the benchmark problem."""
raise NotImplementedError
def set_dimensions(self, ndim):
self.dimensions = ndim
def lower_bounds_constraints(self, x):
lower = asarray([b[0] for b in self.bounds])
return asarray(x) - lower
def upper_bounds_constraints(self, x):
upper = asarray([b[1] for b in self.bounds])
return upper - asarray(x)
#-----------------------------------------------------------------------
# SINGLE-OBJECTIVE PROBLEMS
#-----------------------------------------------------------------------
class Ackley(Benchmark):
"""
Ackley test objective function.
This class defines the Ackley global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Ackley}}(\\mathbf{x}) = -20e^{-0.2 \\sqrt{\\frac{1}{n} \\sum_{i=1}^n x_i^2}} - e^{ \\frac{1}{n} \\sum_{i=1}^n \\cos(2 \\pi x_i)} + 20 + e
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-32, 32]` for :math:`i=1,...,n`.
.. figure:: figures/Ackley.png
:alt: Ackley function
:align: center
**Two-dimensional Ackley function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-32.0] * self.dimensions,
[ 32.0] * self.dimensions))
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
a = 20.0; b = 0.2; c = 2.0*pi
return -a*exp(-b*sqrt(1./self.dimensions*sum(x**2)))-exp(1./self.dimensions*sum(cos(c*x)))+a+exp(1.)
#-----------------------------------------------------------------------
class Adjiman(Benchmark):
"""
Adjiman test objective function.
This class defines the Adjiman global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Adjiman}}(\\mathbf{x}) = \\cos(x_1)\\sin(x_2) - \\frac{x_1}{(x_2^2 + 1)}
Here, :math:`n` represents the number of dimensions and :math:`x_1 \\in [-1, 2]` and :math:`x_2 \\in [-1, 1]`.
.. figure:: figures/Adjiman.png
:alt: Adjiman function
:align: center
**Two-dimensional Adjiman function**
*Global optimum*: :math:`f(x_i) = -2.02181` for :math:`\\mathbf{x} = [2, 0.10578]`
"""
def __init__(self, dimensions):
Benchmark.__init__(self, 2)
self.bounds = [(-1.0, 2.0), (-1.0, 1.0)]
self.global_optimum = [2.0, 0.10578]
self.fglob = -2.02180678
self.change_dimensionality = False
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
return cos(x1)*sin(x2) - x1/(x2**2.0 + 1)
# -------------------------------------------------------------------------------- #
class Alpine01(Benchmark):
"""
Alpine 1 test objective function.
This class defines the Alpine 1 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Alpine01}}(\\mathbf{x}) = \\sum_{i=1}^{n} \\lvert {x_i \\sin \\left( x_i \\right) + 0.1 x_i} \\rvert
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,...,n`.
.. figure:: figures/Alpine01.png
:alt: Alpine 1 function
:align: center
**Two-dimensional Alpine 1 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
return sum(abs(x*sin(x) + 0.1*x))
# -------------------------------------------------------------------------------- #
class Alpine02(Benchmark):
"""
Alpine 2 test objective function.
This class defines the Alpine 2 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Alpine02}}(\\mathbf{x}) = \\prod_{i=1}^{n} \\sqrt{x_i} \\sin(x_i)
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 10]` for :math:`i=1,...,n`.
.. figure:: figures/Alpine02.png
:alt: Alpine 2 function
:align: center
**Two-dimensional Alpine 2 function**
*Global optimum*: :math:`f(x_i) = -6.1295` for :math:`x_i = 7.917` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([ 0.0] * self.dimensions,
[10.0] * self.dimensions))
self.global_optimum = [7.91705268, 4.81584232]
self.fglob = -6.12950
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
return prod(sqrt(x)*sin(x))
# -------------------------------------------------------------------------------- #
class AMGM(Benchmark):
"""
AMGM test objective function.
This class defines the Arithmetic Mean - Geometric Mean Equality global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{AMGM}}(\\mathbf{x}) = \\left ( \\frac{1}{n} \\sum_{i=1}^{n} x_i - \\sqrt[n]{ \\prod_{i=1}^{n} x_i} \\right )^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 10]` for :math:`i=1,...,n`.
.. figure:: figures/AMGM.png
:alt: AMGM function
:align: center
**Two-dimensional Arithmetic Mean - Geometric Mean Equality function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_1 = x_2 = ... = x_n` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([ 0.0] * self.dimensions,
[10.0] * self.dimensions))
self.global_optimum = [1, 1]
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
n = self.dimensions
f1 = sum(x)
f2 = prod(x)
xsum = f1
f1 = f1/n
f2 = f2**(1.0/n)
return (f1 - f2)**2
# -------------------------------------------------------------------------------- #
class BartelsConn(Benchmark):
"""
Bartels-Conn test objective function.
This class defines the Bartels-Conn global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{BartelsConn}}(\\mathbf{x}) = \\lvert {x_1^2 + x_2^2 + x_1x_2} \\rvert + \\lvert {\\sin(x_1)} \\rvert + \\lvert {\\cos(x_2)} \\rvert
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-50, 50]` for :math:`i=1,...,n`.
.. figure:: figures/BartelsConn.png
:alt: Bartels-Conn function
:align: center
**Two-dimensional Bartels-Conn function**
*Global optimum*: :math:`f(x_i) = 1` for :math:`x_i = 0` for :math:`i=1,...,n`
"""
def __init__(self):
Benchmark.__init__(self, 2)
self.bounds = list(zip([-5.0] * self.dimensions,
[ 5.0] * self.dimensions))
self.global_optimum = [0.0] * self.dimensions
self.fglob = 1.0
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
return abs(x1**2.0 + x2**2.0 + x1*x2) + abs(sin(x1)) + abs(cos(x2))
# -------------------------------------------------------------------------------- #
class Beale(Benchmark):
"""
Beale test objective function.
This class defines the Beale global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Beale}}(\\mathbf{x}) = \\left(x_1 x_2 - x_1 + 1.5\\right)^{2} + \\left(x_1 x_2^{2} - x_1 + 2.25\\right)^{2} + \\left(x_1 x_2^{3} - x_1 + 2.625\\right)^{2}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2`.
.. figure:: figures/Beale.png
:alt: Beale function
:align: center
**Two-dimensional Beale function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [3, 0.5]`
"""
def __init__(self, dimensions):
Benchmark.__init__(self, 2)
self.bounds = list(zip([-4.5] * self.dimensions,
[ 4.5] * self.dimensions))
self.global_optimum = [3.0, 0.5]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
return (1.5 - x[0] + x[0]*x[1])**2 + (2.25 - x[0] + x[0]*x[1]**2)**2 + (2.625 - x[0] + x[0]*x[1]**3)**2
# -------------------------------------------------------------------------------- #
class Bird(Benchmark):
"""
Bird test objective function.
This class defines the Bird global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Bird}}(\\mathbf{x}) = \\left(x_1 - x_2\\right)^{2} + e^{\left[1 - \\sin\\left(x_1\\right) \\right]^{2}} \\cos\\left(x_2\\right) + e^{\left[1 - \\cos\\left(x_2\\right)\\right]^{2}} \\sin\\left(x_1\\right)
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-2\\pi, 2\\pi]` for :math:`i=1,2`.
.. figure:: figures/Bird.png
:alt: Bird function
:align: center
**Two-dimensional Bird function**
*Global optimum*: :math:`f(x_i) = -106.7645367198034` for :math:`\\mathbf{x} = [4.701055751981055 , 3.152946019601391]` or
:math:`\\mathbf{x} = [-1.582142172055011, -3.130246799635430]`
"""
def __init__(self):
Benchmark.__init__(self, 2)
self.bounds = list(zip([-2.0*pi] * self.dimensions,
[ 2.0*pi] * self.dimensions))
self.global_optimum = ([4.701055751981055 , 3.152946019601391],
[-1.582142172055011, -3.130246799635430])
self.fglob = -106.7645367198034
def evaluator(self, x, *args):
self.fun_evals += 1
return sin(x[0])*exp((1-cos(x[1]))**2) + cos(x[1])*exp((1-sin(x[0]))**2) + (x[0]-x[1])**2
# -------------------------------------------------------------------------------- #
class Bohachevsky(Benchmark):
"""
Bohachevsky test objective function.
This class defines the Bohachevsky global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Bohachevsky}}(\\mathbf{x}) = \\sum_{i=1}^{n-1}\\left[x_i^2 + 2x_{i+1}^2 - 0.3\\cos(3\\pi x_i) - 0.4\\cos(4\\pi x_{i+1}) + 0.7\\right]
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-15, 15]` for :math:`i=1,...,n`.
.. figure:: figures/Bohachevsky.png
:alt: Bohachevsky function
:align: center
**Two-dimensional Bohachevsky function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-15.0] * self.dimensions,
[ 15.0] * self.dimensions))
self.custom_bounds = [(-2, 2), (-2, 2)]
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
x0 = x[:-1]
x1 = roll(x,-1)[:-1]
return sum(x0**2 + 2*x1**2 - 0.3 * cos(3*pi*x0) - 0.4 * cos(4*pi*x1) + 0.7)
# -------------------------------------------------------------------------------- #
class BoxBetts(Benchmark):
"""
BoxBetts test objective function.
This class defines the Box-Betts global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{BoxBetts}}(\\mathbf{x}) = \\sum_{i=1}^k g(x_i)^2
Where, in this exercise:
.. math:: g(x) = e^{-0.1(i+1)x_1} - e^{-0.1(i+1)x_2} - \\left[(e^{-0.1(i+1)}) - e^{-(i+1)}x_3\\right]
And :math:`k = 10`.
Here, :math:`n` represents the number of dimensions and :math:`x_1 \\in [0.9, 1.2], x_2 \\in [9, 11.2], x_3 \\in [0.9, 1.2]`.
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [1, 10, 1]`
"""
def __init__(self, dimensions=3):
Benchmark.__init__(self, dimensions)
self.bounds = ([0.9, 1.2], [9.0, 11.2], [0.9, 1.2])
self.global_optimum = [1.0, 10.0, 1.0]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
y = 0.0
for i in range(1, 11):
y += (exp(-0.1*i*x[0]) - exp(-0.1*i*x[1]) - (exp(-0.1*i) - exp(-1.0*i))*x[2])**2.0
return y
# -------------------------------------------------------------------------------- #
class Branin01(Benchmark):
"""
Branin 1 test objective function.
This class defines the Branin 1 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Branin01}}(\\mathbf{x}) = \\left(- 1.275 \\frac{x_1^{2}}{\pi^{2}} + 5 \\frac{x_1}{\pi} + x_2 -6\\right)^{2} + \\left(10 - \\frac{5}{4 \\pi} \\right) \\cos\\left(x_1\\right) + 10
Here, :math:`n` represents the number of dimensions and :math:`x_1 \\in [-5, 10], x_2 \\in [0, 15]`
.. figure:: figures/Branin01.png
:alt: Branin 1 function
:align: center
**Two-dimensional Branin 1 function**
*Global optimum*: :math:`f(x_i) = 0.39788735772973816` for :math:`\\mathbf{x} = [-\\pi, 12.275]` or
:math:`\\mathbf{x} = [\\pi, 2.275]` or :math:`\\mathbf{x} = [9.42478, 2.475]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = [(-5., 10.), (0., 15.)]
self.global_optimum = [(-pi, 12.275), (pi, 2.275), (9.42478, 2.475)]
self.fglob = 0.39788735772973816
def evaluator(self, x, *args):
self.fun_evals += 1
return (x[1]-(5.1/(4*pi**2))*x[0]**2+5*x[0]/pi-6)**2+10*(1-1/(8*pi))*cos(x[0])+10
# -------------------------------------------------------------------------------- #
class Branin02(Benchmark):
"""
Branin 2 test objective function.
This class defines the Branin 2 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Branin02}}(\\mathbf{x}) = \\left(- 1.275 \\frac{x_1^{2}}{\pi^{2}} + 5 \\frac{x_1}{\pi} + x_2 -6\\right)^{2} + \\left(10 - \\frac{5}{4 \\pi} \\right) \\cos\\left(x_1\\right) \\cos\\left(x_2\\right) + \\log(x_1^2+x_2^2 +1) + 10
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-5, 15]` for :math:`i=1,2`.
.. figure:: figures/Branin02.png
:alt: Branin 2 function
:align: center
**Two-dimensional Branin 2 function**
*Global optimum*: :math:`f(x_i) = 5.559037` for :math:`\\mathbf{x} = [-3.2, 12.53]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = [(-5.0, 15.0), (-5.0, 15.0)]
self.global_optimum = [-3.2, 12.53]
self.fglob = 5.559037
def evaluator(self, x, *args):
self.fun_evals += 1
return (x[1]-(5.1/(4*pi**2))*x[0]**2+5*x[0]/pi-6)**2+10*(1-1/(8*pi))*cos(x[0])*cos(x[1])+log(x[0]**2.0+x[1]**2.0+1.0)+10
# -------------------------------------------------------------------------------- #
class Brent(Benchmark):
"""
Brent test objective function.
This class defines the Brent global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Brent}}(\\mathbf{x}) = (x_1 + 10)^2 + (x_2 + 10)^2 + e^{(-x_1^2-x_2^2)}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2`.
.. figure:: figures/Brent.png
:alt: Brent function
:align: center
**Two-dimensional Brent function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [-10, -10]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.custom_bounds = [(-10, 2), (-10, 2)]
self.global_optimum = [-10.0, -10.0]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
return (x[0] + 10.0)**2.0 + (x[1] + 10.0)**2.0 + exp(-x[0]**2.0 - x[1]**2.0)
# -------------------------------------------------------------------------------- #
class Brown(Benchmark):
"""
Brown test objective function.
This class defines the Brown global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Brown}}(\\mathbf{x}) = \\sum_{i=1}^{n-1}\\left[ \\left(x_i^2\\right)^{x_{i+1}^2+1} + \\left(x_{i+1}^2\\right)^{x_i^2+1} \\right]
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-1, 4]` for :math:`i=1,...,n`.
.. figure:: figures/Brown.png
:alt: Brown function
:align: center
**Two-dimensional Brown function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-1.0] * self.dimensions,
[ 4.0] * self.dimensions))
self.custom_bounds = [(-1.0, 1.0), (-1.0, 1.0)]
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
x0 = x[:-1]
x1 = x[1:]
return sum((x0**2.0)**(x1**2.0 + 1.0) + (x1**2.0)**(x0**2.0 + 1.0))
# -------------------------------------------------------------------------------- #
class Bukin02(Benchmark):
"""
Bukin 2 test objective function.
This class defines the Bukin 2 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Bukin02}}(\\mathbf{x}) = 100 (x_2 - 0.01x_1^2 + 1) + 0.01(x_1 + 10)^2
Here, :math:`n` represents the number of dimensions and :math:`x_1 \\in [-15, -5], x_2 \\in [-3, 3]`
.. figure:: figures/Bukin02.png
:alt: Bukin 2 function
:align: center
**Two-dimensional Bukin 2 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [-10, 0]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = [(-15.0, -5.0), (-3.0, 3.0)]
self.global_optimum = [-10.0, 0.0]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
return 100*(x[1]**2 - 0.01*x[0]**2 + 1.0) + 0.01*(x[0] + 10.0)**2.0
# -------------------------------------------------------------------------------- #
class Bukin04(Benchmark):
"""
Bukin 4 test objective function.
This class defines the Bukin 4 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Bukin04}}(\\mathbf{x}) = 100 x_2^{2} + 0.01 \\lvert{x_1 + 10} \\rvert
Here, :math:`n` represents the number of dimensions and :math:`x_1 \\in [-15, -5], x_2 \\in [-3, 3]`
.. figure:: figures/Bukin04.png
:alt: Bukin 4 function
:align: center
**Two-dimensional Bukin 4 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [-10, 0]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = [(-15.0, -5.0), (-3.0, 3.0)]
self.global_optimum = [-10.0, 0.0]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
return 100*x[1]**2 + 0.01*abs(x[0] + 10)
# -------------------------------------------------------------------------------- #
class Bukin06(Benchmark):
"""
Bukin 6 test objective function.
This class defines the Bukin 6 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Bukin06}}(\\mathbf{x}) = 100 \\sqrt{ \\lvert{x_2 - 0.01 x_1^{2}} \\rvert} + 0.01 \\lvert{x_1 + 10} \\rvert
Here, :math:`n` represents the number of dimensions and :math:`x_1 \\in [-15, -5], x_2 \\in [-3, 3]`
.. figure:: figures/Bukin06.png
:alt: Bukin 6 function
:align: center
**Two-dimensional Bukin 6 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [-10, 1]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = [(-15.0, -5.0), (-3.0, 3.0)]
self.global_optimum = [-10.0, 1.0]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
return 100*sqrt(abs(x[1] - 0.01*x[0]**2)) + 0.01*abs(x[0] + 10)
# -------------------------------------------------------------------------------- #
class CarromTable(Benchmark):
"""
CarromTable test objective function.
This class defines the CarromTable global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{CarromTable}}(\\mathbf{x}) = - \\frac{1}{30} e^{2 \\left|{1 - \\frac{\\sqrt{x_{1}^{2} + x_{2}^{2}}}{\pi}}\\right|} \\cos^{2}\\left(x_{1}\\right) \\cos^{2}\\left(x_{2}\\right)
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2`.
.. figure:: figures/CarromTable.png
:alt: CarromTable function
:align: center
**Two-dimensional CarromTable function**
*Global optimum*: :math:`f(x_i) = -24.15681551650653` for :math:`x_i = \\pm 9.646157266348881` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.global_optimum = [(9.646157266348881 , 9.646134286497169),
(-9.646157266348881, 9.646134286497169),
(9.646157266348881 , -9.646134286497169),
(-9.646157266348881, -9.646134286497169)]
self.fglob = -24.15681551650653
def evaluator(self, x, *args):
self.fun_evals += 1
return -((cos(x[0])*cos(x[1])*exp(abs(1 - sqrt(x[0]**2 + x[1]**2)/pi)))**2)/30
# -------------------------------------------------------------------------------- #
class Chichinadze(Benchmark):
"""
Chichinadze test objective function.
This class defines the Chichinadze global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Chichinadze}}(\\mathbf{x}) = x_{1}^{2} - 12 x_{1} + 8 \\sin\\left(\\frac{5}{2} \\pi x_{1}\\right) + 10 \\cos\\left(\\frac{1}{2} \\pi x_{1}\\right) + 11 - 0.2 \\frac{\\sqrt{5}}{e^{\\frac{1}{2} \\left(x_{2} -0.5\\right)^{2}}}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-30, 30]` for :math:`i=1,2`.
.. figure:: figures/Chichinadze.png
:alt: Chichinadze function
:align: center
**Two-dimensional Chichinadze function**
*Global optimum*: :math:`f(x_i) = -42.94438701899098` for :math:`\\mathbf{x} = [6.189866586965680, 0.5]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-30.0] * self.dimensions,
[ 30.0] * self.dimensions))
self.custom_bounds = [(-10, 10), (-10, 10)]
self.global_optimum = [6.189866586965680, 0.5]
self.fglob = -42.94438701899098
def evaluator(self, x, *args):
self.fun_evals += 1
return x[0]**2 - 12*x[0] + 11 + 10*cos(pi*x[0]/2) + 8*sin(5*pi*x[0]/2) - 1.0/sqrt(5)*exp(-((x[1] - 0.5)**2)/2)
# -------------------------------------------------------------------------------- #
class Cigar(Benchmark):
"""
Cigar test objective function.
This class defines the Cigar global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Cigar}}(\\mathbf{x}) = x_1^2 + 10^6\\sum_{i=2}^{n} x_i^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-100, 100]` for :math:`i=1,...,n`.
.. figure:: figures/Cigar.png
:alt: Cigar function
:align: center
**Two-dimensional Cigar function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-100.0] * self.dimensions,
[ 100.0] * self.dimensions))
self.custom_bounds = [(-5, 5), (-5, 5)]
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
return x[0]**2 + 1e6*sum(x[1:]**2)
# -------------------------------------------------------------------------------- #
class Cola(Benchmark):
"""
Cola test objective function.
This class defines the Cola global optimization problem. The 17-dimensional function computes
indirectly the formula :math:`f(n, u)` by setting :math:`x_0 = y_0, x_1 = u_0, x_i = u_{2(i−2)}, y_i = u_{2(i−2)+1}` :
.. math::
f_{\\text{Cola}}(\\mathbf{x}) = \\sum_{i<j}^{n} \\left (r_{i,j} - d_{i,j} \\right )^2
Where :math:`r_{i,j}` is given by:
.. math::
r_{i,j} = \\sqrt{(x_i - x_j)^2 + (y_i - y_j)^2}
And :math:`d` is a symmetric matrix given by:
.. math::
\\mathbf{d} = \\left [ d_{ij} \\right ] = \\begin{pmatrix}
1.27 & & & & & & & & \\\\
1.69 & 1.43 & & & & & & & \\\\
2.04 & 2.35 & 2.43 & & & & & & \\\\
3.09 & 3.18 & 3.26 & 2.85 & & & & & \\\\
3.20 & 3.22 & 3.27 & 2.88 & 1.55 & & & & \\\\
2.86 & 2.56 & 2.58 & 2.59 & 3.12 & 3.06 & & & \\\\
3.17 & 3.18 & 3.18 & 3.12 & 1.31 & 1.64 & 3.00 & \\\\
3.21 & 3.18 & 3.18 & 3.17 & 1.70 & 1.36 & 2.95 & 1.32 & \\\\
2.38 & 2.31 & 2.42 & 1.94 & 2.85 & 2.81 & 2.56 & 2.91 & 2.97
\\end{pmatrix}
This function has bounds :math:`0 \\leq x_0 \\leq 4` and :math:`-4 \\leq x_i \\leq 4` for :math:`i = 1,...,n-1`. It
has a global minimum of 11.7464.
"""
def __init__(self, dimensions=17):
Benchmark.__init__(self, dimensions)
self.bounds = [[0.0, 4.0]] + \
list(zip([-4.0] * (self.dimensions-1),
[ 4.0] * (self.dimensions-1)))
self.global_optimum = [0.651906, 1.30194, 0.099242, -0.883791,
-0.8796, 0.204651, -3.28414, 0.851188,
-3.46245, 2.53245, -0.895246, 1.40992,
-3.07367, 1.96257, -2.97872, -0.807849,
-1.68978]
self.fglob = 11.7464
def evaluator(self, x, *args):
self.fun_evals += 1
d = asarray([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[1.27, 0, 0, 0, 0, 0, 0, 0, 0],
[1.69, 1.43, 0, 0, 0, 0, 0, 0, 0],
[2.04, 2.35, 2.43, 0, 0, 0, 0, 0, 0],
[3.09, 3.18, 3.26, 2.85, 0, 0, 0, 0, 0],
[3.20, 3.22, 3.27, 2.88, 1.55, 0, 0, 0, 0],
[2.86, 2.56, 2.58, 2.59, 3.12, 3.06, 0, 0, 0],
[3.17, 3.18, 3.18, 3.12, 1.31, 1.64, 3.00, 0, 0],
[3.21, 3.18, 3.18, 3.17, 1.70, 1.36, 2.95, 1.32, 0],
[2.38, 2.31, 2.42, 1.94, 2.85, 2.81, 2.56, 2.91, 2.97]])
# WARNING: This doesn't seem to follow guidelines above...
x1 = asarray([0.0, x[0]] + list(x[1::2]))
x2 = asarray([0.0, 0.0] + list(x[2::2]))
y = 0.0
for i in range(1, len(x1)):
y += sum((sqrt((x1[i] - x1[0:i])**2.0 +
(x2[i] - x2[0:i])**2.0)
- d[i, 0:i])**2.0)
return y
# -------------------------------------------------------------------------------- #
class Colville(Benchmark):
"""
Colville test objective function.
This class defines the Colville global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Colville}}(\\mathbf{x}) = \\left(x_{1} -1\\right)^{2} + 100 \\left(x_{1}^{2} - x_{2}\\right)^{2} + 10.1 \\left(x_{2} -1\\right)^{2} + \\left(x_{3} -1\\right)^{2} + 90 \\left(x_{3}^{2} - x_{4}\\right)^{2} + 10.1 \\left(x_{4} -1\\right)^{2} + 19.8 \\frac{x_{4} -1}{x_{2}}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,...,4`.
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 1` for :math:`i=1,...,4`
"""
def __init__(self, dimensions=4):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.global_optimum = [1.0] * self.dimensions
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
return 100*(x[0]**2-x[1])**2+(x[0]-1)**2+(x[2]-1)**2+90*(x[2]**2-x[3])**2+ 10.1*((x[1]-1)**2+(x[3]-1)**2)+19.8*(1/x[1])*(x[3]-1)
# -------------------------------------------------------------------------------- #
class Corana(Benchmark):
"""
Corana test objective function.
This class defines the Corana global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Corana}}(\\mathbf{x}) = \\begin{cases} \\sum_{i=1}^n 0.15 d_i [z_i - 0.05\\textrm{sgn}(z_i)]^2 & \\textrm{if}|x_i-z_i| < 0.05 \\\\
d_ix_i^2 & \\textrm{otherwise}\\end{cases}
Where, in this exercise:
.. math::
z_i = 0.2 \\lfloor |x_i/s_i|+0.49999\\rfloor\\textrm{sgn}(x_i), d_i=(1,1000,10,100, ...)
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-5, 5]` for :math:`i=1,...,4`.
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,...,4`
"""
def __init__(self, dimensions=4):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-5.0] * self.dimensions,
[ 5.0] * self.dimensions))
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
d = [1., 1000., 10., 100.]
r = 0
for j in range(4):
zj = floor(abs(x[j]/0.2) + 0.49999)*sign(x[j]) * 0.2
if abs(x[j]-zj) < 0.05:
r += 0.15 * ((zj - 0.05*sign(zj))**2) * d[j]
else:
r += d[j] * x[j] * x[j]
return r
# -------------------------------------------------------------------------------- #
class CosineMixture(Benchmark):
"""
Cosine Mixture test objective function.
This class defines the Cosine Mixture global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{CosineMixture}}(\\mathbf{x}) = -0.1 \\sum_{i=1}^n \\cos(5 \\pi x_i) - \\sum_{i=1}^n x_i^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-1, 1]` for :math:`i=1,...,N`.
.. figure:: figures/CosineMixture.png
:alt: Cosine Mixture function
:align: center
**Two-dimensional Cosine Mixture function**
*Global optimum*: :math:`f(x_i) = -0.1N` for :math:`x_i = 0` for :math:`i=1,...,N`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.change_dimensionality = True
self.bounds = list(zip([-1.0] * self.dimensions,
[ 1.0] * self.dimensions))
self.global_optimum = [0.0] * self.dimensions
self.fglob = -0.1*self.dimensions
def evaluator(self, x, *args):
self.fun_evals += 1
return -0.1*sum(cos(5.0*pi*x)) - sum(x**2.0)
# -------------------------------------------------------------------------------- #
class CrossInTray(Benchmark):
"""
Cross-in-Tray test objective function.
This class defines the Cross-in-Tray global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{CrossInTray}}(\\mathbf{x}) = - 0.0001 \\left(\\left|{e^{\\left|{100 - \\frac{\\sqrt{x_{1}^{2} + x_{2}^{2}}}{\\pi}}\\right|} \\sin\\left(x_{1}\\right) \\sin\\left(x_{2}\\right)}\\right| + 1\\right)^{0.1}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-15, 15]` for :math:`i=1,2`.
.. figure:: figures/CrossInTray.png
:alt: Cross-in-Tray function
:align: center
**Two-dimensional Cross-in-Tray function**
*Global optimum*: :math:`f(x_i) = -2.062611870822739` for :math:`x_i = \\pm 1.349406608602084` for :math:`i=1,2`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.global_optimum = [(1.349406685353340 , 1.349406608602084),
(-1.349406685353340, 1.349406608602084),
(1.349406685353340, -1.349406608602084),
(-1.349406685353340, -1.349406608602084)]
self.fglob = -2.062611870822739
def evaluator(self, x, *args):
self.fun_evals += 1
return -0.0001*(abs(sin(x[0])*sin(x[1])*exp(abs(100 - sqrt(x[0]**2 + x[1]**2)/pi))) + 1)**(0.1)
# -------------------------------------------------------------------------------- #
class CrossLegTable(Benchmark):
"""
Cross-Leg-Table test objective function.
This class defines the Cross-Leg-Table global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{CrossLegTable}}(\\mathbf{x}) = - \\frac{1}{\\left(\\left|{e^{\\left|{100 - \\frac{\\sqrt{x_{1}^{2} + x_{2}^{2}}}{\\pi}}\\right|} \\sin\\left(x_{1}\\right) \\sin\\left(x_{2}\\right)}\\right| + 1\\right)^{0.1}}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2`.
.. figure:: figures/CrossLegTable.png
:alt: Cross-Leg-Table function
:align: center
**Two-dimensional Cross-Leg-Table function**
*Global optimum*: :math:`f(x_i) = -1`. The global minimum is found on the planes :math:`x_1 = 0` and :math:`x_2 = 0`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
# WARNING: There was an error here, I added the global optimum
self.global_optimum = [0.0, 2.0]
self.fglob = -1.0
def evaluator(self, x, *args):
self.fun_evals += 1
return -(abs(sin(x[0])*sin(x[1])*exp(abs(100 - sqrt(x[0]**2 + x[1]**2)/pi))) + 1)**(-0.1)
# -------------------------------------------------------------------------------- #
class CrownedCross(Benchmark):
"""
Crowned Cross test objective function.
This class defines the Crowned Cross global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{CrownedCross}}(\\mathbf{x}) = 0.0001 \\left(\\left|{e^{\\left|{100- \\frac{\\sqrt{x_{1}^{2} + x_{2}^{2}}}{\\pi}}\\right|} \\sin\\left(x_{1}\\right) \\sin\\left(x_{2}\\right)}\\right| + 1\\right)^{0.1}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2`.
.. figure:: figures/CrownedCross.png
:alt: Crowned Cross function
:align: center
**Two-dimensional Crowned Cross function**
*Global optimum*: :math:`f(x_i) = 0.0001`. The global minimum is found on the planes :math:`x_1 = 0` and :math:`x_2 = 0`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.global_optimum = [0, 0]
self.fglob = 0.0001
def evaluator(self, x, *args):
self.fun_evals += 1
return 0.0001*(abs(sin(x[0])*sin(x[1])*exp(abs(100 - sqrt(x[0]**2 + x[1]**2)/pi))) + 1)**(0.1)
# -------------------------------------------------------------------------------- #
class Csendes(Benchmark):
"""
Csendes test objective function.
This class defines the Csendes global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Csendes}}(\\mathbf{x}) = \\sum_{i=1}^n x_i^6 \\left[ 2 + \\sin \\left( \\frac{1}{x_i} \\right ) \\right]
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-1, 1]` for :math:`i=1,...,N`.
.. figure:: figures/Csendes.png
:alt: Csendes function
:align: center
**Two-dimensional Csendes function**
*Global optimum*: :math:`f(x_i) = 0.0` for :math:`x_i = 0` for :math:`i=1,...,N`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-1.0] * self.dimensions,
[ 1.0] * self.dimensions))
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
return sum((x**6.0)*(2.0 + sin(1.0/x)))
# -------------------------------------------------------------------------------- #
class Cube(Benchmark):
"""
Cube test objective function.
This class defines the Cube global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Cube}}(\\mathbf{x}) = 100(x_2 - x_1^3)^2 + (1 - x1)^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,...,N`.
.. figure:: figures/Cube.png
:alt: Cube function
:align: center
**Two-dimensional Cube function**
*Global optimum*: :math:`f(x_i) = 0.0` for :math:`\\mathbf{x} = [1, 1]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.custom_bounds = [(0, 2), (0, 2)]
self.global_optimum = [1.0, 1.0]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
return 100.0*(x[1] - x[0]**3.0)**2.0 + (1.0 - x[0])**2.0
# -------------------------------------------------------------------------------- #
class Damavandi(Benchmark):
"""
Damavandi test objective function.
This class defines the Damavandi global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Damavandi}}(\\mathbf{x}) = \\left[ 1 - \\lvert{\\frac{\\sin[\\pi(x_1-2)]\\sin[\\pi(x2-2)]}{\\pi^2(x_1-2)(x_2-2)}} \\rvert^5 \\right] \\left[2 + (x_1-7)^2 + 2(x_2-7)^2 \\right]
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 14]` for :math:`i=1,...,n`.
.. figure:: figures/Damavandi.png
:alt: Damavandi function
:align: center
**Two-dimensional Damavandi function**
*Global optimum*: :math:`f(x_i) = 0.0` for :math:`x_i = 2` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([ 0.0] * self.dimensions,
[14.0] * self.dimensions))
self.global_optimum = [2.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
numerator = sin(pi*(x1 - 2.0))*sin(pi*(x2 - 2.0))
denumerator = (pi**2)*(x1 - 2.0)*(x2 - 2.0)
factor1 = 1.0 - (abs(numerator / denumerator))**5.0
factor2 = 2 + (x1 - 7.0)**2.0 + 2*(x2 - 7.0)**2.0
return factor1*factor2
# -------------------------------------------------------------------------------- #
class Deb01(Benchmark):
"""
Deb 1 test objective function.
This class defines the Deb 1 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Deb01}}(\\mathbf{x}) = - \\frac{1}{N} \\sum_{i=1}^n \\sin^6(5 \\pi x_i)
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-1, 1]` for :math:`i=1,...,n`.
.. figure:: figures/Deb01.png
:alt: Deb 1 function
:align: center
**Two-dimensional Deb 1 function**
*Global optimum*: :math:`f(x_i) = 0.0`. The number of global minima is :math:`5^n` that are evenly spaced
in the function landscape, where :math:`n` represents the dimension of the problem.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-1.0] * self.dimensions,
[ 1.0] * self.dimensions))
self.global_optimum = [0.3, -0.3]
self.fglob = -1.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
return -(1.0/self.dimensions)*sum(sin(5*pi*x)**6.0)
# -------------------------------------------------------------------------------- #
class Deb02(Benchmark):
"""
Deb 2 test objective function.
This class defines the Deb 2 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Deb02}}(\\mathbf{x}) = - \\frac{1}{N} \\sum_{i=1}^n \\sin^6 \\left[ 5 \\pi \\left ( x_i^{3/4} - 0.05 \\right) \\right ]
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 1]` for :math:`i=1,...,n`.
.. figure:: figures/Deb02.png
:alt: Deb 2 function
:align: center
**Two-dimensional Deb 2 function**
*Global optimum*: :math:`f(x_i) = 0.0`. The number of global minima is :math:`5^n` that are evenly spaced
in the function landscape, where :math:`n` represents the dimension of the problem.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([0.0] * self.dimensions,
[1.0] * self.dimensions))
self.global_optimum = [0.93388314, 0.68141781]
self.fglob = -1.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
return -(1.0/self.dimensions)*sum(sin(5*pi*(x**0.75 - 0.05))**6.0)
# -------------------------------------------------------------------------------- #
class Decanomial(Benchmark):
"""
Decanomial test objective function.
This class defines the Decanomial function global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Decanomial}}(\\mathbf{x}) = 0.001 \\left(\\lvert{x_{2}^{4} + 12 x_{2}^{3} + 54 x_{2}^{2} + 108 x_{2} + 81.0}\\rvert + \\lvert{x_{1}^{10} - 20 x_{1}^{9} + 180 x_{1}^{8} - 960 x_{1}^{7} + 3360 x_{1}^{6} - 8064 x_{1}^{5} + 13340 x_{1}^{4} - 15360 x_{1}^{3} + 11520 x_{1}^{2} - 5120 x_{1} + 2624.0}\\rvert\\right)^{2}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2`.
.. figure:: figures/Decanomial.png
:alt: Decanomial function
:align: center
**Two-dimensional Decanomial function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [2, -3]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.custom_bounds = [(0, 2.5), (-2, -4)]
self.global_optimum = [2.0, -3.0]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
F1 = abs(x[0]**10 - 20*x[0]**9 + 180*x[0]**8 - 960*x[0]**7 + 3360*x[0]**6 - 8064*x[0]**5 + \
13340*x[0]**4 - 15360*x[0]**3 + 11520*x[0]**2 - 5120*x[0] + 2624.0)
F2 = abs(x[1]**4 + 12*x[1]**3 + 54*x[1]**2 + 108*x[1] + 81.0)
return 0.001*(F1 + F2)**2
# -------------------------------------------------------------------------------- #
class Deceptive(Benchmark):
"""
Deceptive test objective function.
This class defines the Deceptive global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Deceptive}}(\\mathbf{x}) = - \\left [\\frac{1}{n} \\sum_{i=1}^{n} g_i(x_i) \\right ]^{\\beta}
Where :math:`\\beta` is a fixed non-linearity factor; in this exercise, :math:`\\beta = 2`. The function :math:`g_i(x_i)`
is given by:
.. math::
g_i(x_i) = \\begin{cases} - \\frac{x}{\\alpha_i} + \\frac{4}{5} & \\textrm{if} \\hspace{5pt} 0 \\leq x_i \\leq \\frac{4}{5} \\alpha_i \\\\
\\frac{5x}{\\alpha_i} -4 & \\textrm{if} \\hspace{5pt} \\frac{4}{5} \\alpha_i \\le x_i \\leq \\alpha_i \\\\
\\frac{5(x - \\alpha_i)}{\\alpha_i-1} & \\textrm{if} \\hspace{5pt} \\alpha_i \\le x_i \\leq \\frac{1 + 4\\alpha_i}{5} \\\\
\\frac{x - 1}{1 - \\alpha_i} & \\textrm{if} \\hspace{5pt} \\frac{1 + 4\\alpha_i}{5} \\le x_i \\leq 1 \\end{cases}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 1]` for :math:`i=1,...,n`.
.. figure:: figures/Deceptive.png
:alt: Deceptive function
:align: center
**Two-dimensional Deceptive function**
*Global optimum*: :math:`f(x_i) = -1` for :math:`x_i = \\alpha_i` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([0.0] * self.dimensions,
[1.0] * self.dimensions))
n = self.dimensions
self.global_optimum = numpy.arange(1.0, n + 1.0)/(n + 1.0)
self.fglob = -1.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
n = self.dimensions
alpha = numpy.arange(1.0, n + 1.0)/(n + 1.0)
beta = 2.0
g = zeros((n, ))
for i in range(n):
if x[i] <= 0.0:
g[i] = x[i]
elif x[i] < 0.8*alpha[i]:
g[i] = -x[i]/alpha[i] + 0.8
elif x[i] < alpha[i]:
g[i] = 5.0*x[i]/alpha[i] - 4.0
elif x[i] < (1.0 + 4*alpha[i])/5.0:
g[i] = 5.0*(x[i] - alpha[i])/(alpha[i] - 1.0) + 1.0
elif x[i] <= 1.0:
g[i] = (x[i] - 1.0)/(1.0 - alpha[i]) + 4.0/5.0
else:
g[i] = x[i] - 1.0
return -((1.0/n)*sum(g))**beta
# -------------------------------------------------------------------------------- #
class DeckkersAarts(Benchmark):
"""
Deckkers-Aarts test objective function.
This class defines the Deckkers-Aarts global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{DeckkersAarts}}(\\mathbf{x}) = 10^5x_1^2 + x_2^2 - (x_1^2 + x_2^2)^2 + 10^{-5}(x_1^2 + x_2^2)^4
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-20, 20]` for :math:`i=1,2`.
.. figure:: figures/DeckkersAarts.png
:alt: DeckkersAarts function
:align: center
**Two-dimensional Deckkers-Aarts function**
*Global optimum*: :math:`f(x_i) = -24777` for :math:`\\mathbf{x} = [0, \\pm 15]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-20.0] * self.dimensions,
[ 20.0] * self.dimensions))
# WARNING: Custom bounds was a tuple of lists..
self.custom_bounds = [(-1, 1), (14, 16)]
self.global_optimum = [0.0, 15.0]
self.fglob = -24776.0
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
return 1e5*x1**2.0 + x2**2.0 - (x1**2.0 + x2**2.0)**2.0 + 1e-5*(x1**2.0 + x2**2.0)**4.0
# -------------------------------------------------------------------------------- #
class DeflectedCorrugatedSpring(Benchmark):
"""
DeflectedCorrugatedSpring test objective function.
This class defines the Deflected Corrugated Spring function global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{DeflectedCorrugatedSpring}}(\\mathbf{x}) = 0.1\\sum_{i=1}^n \\left[ (x_i - \\alpha)^2 - \\cos \\left( K \\sqrt {\\sum_{i=1}^n (x_i - \\alpha)^2} \\right ) \\right ]
Where, in this exercise, :math:`K = 5` and :math:`\\alpha = 5`.
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 2\\alpha]` for :math:`i=1,...,n`.
.. figure:: figures/DeflectedCorrugatedSpring.png
:alt: Deflected Corrugated Spring function
:align: center
**Two-dimensional Deflected Corrugated Spring function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = \\alpha` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
alpha = 5.0
self.bounds = list(zip([0] * self.dimensions,
[2*alpha] * self.dimensions))
self.global_optimum = [alpha] * self.dimensions
self.fglob = -1.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
K, alpha = 5.0, 5.0
return -cos(K*sqrt(sum((x - alpha)**2))) + 0.1*sum((x - alpha)**2)
# -------------------------------------------------------------------------------- #
class DeVilliersGlasser01(Benchmark):
"""
DeVilliers-Glasser 1 test objective function.
This class defines the DeVilliers-Glasser 1 function global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{DeVilliersGlasser01}}(\\mathbf{x}) = \\sum_{i=1}^{24} \\left[ x_1x_2^{t_i} \\sin(x_3t_i + x_4) - y_i \\right ]^2
Where, in this exercise, :math:`t_i = 0.1(i-1)` and :math:`y_i = 60.137(1.371^{t_i}) \\sin(3.112t_i + 1.761)`.
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [1, 100]` for :math:`i=1,...,n`.
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`\\mathbf{x} = [60.137, 1.371, 3.112, 1.761]`.
"""
def __init__(self, dimensions=4):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([ 1.0] * self.dimensions,
[100.0] * self.dimensions))
self.global_optimum = [60.137, 1.371, 3.112, 1.761]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
t_i = 0.1*numpy.arange(24)
y_i = 60.137*(1.371**t_i)*sin(3.112*t_i + 1.761)
x1, x2, x3, x4 = x
return sum((x1*(x2**t_i)*sin(x3*t_i + x4) - y_i)**2.0)
# -------------------------------------------------------------------------------- #
class DeVilliersGlasser02(Benchmark):
"""
DeVilliers-Glasser 2 test objective function.
This class defines the DeVilliers-Glasser 2 function global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{DeVilliersGlasser01}}(\\mathbf{x}) = \\sum_{i=1}^{24} \\left[ x_1x_2^{t_i} \\tanh \\left [x_3t_i + \\sin(x_4t_i) \\right] \\cos(t_ie^{x_5}) - y_i \\right ]^2
Where, in this exercise, :math:`t_i = 0.1(i-1)` and :math:`y_i = 53.81(1.27^{t_i}) \\tanh (3.012t_i + \\sin(2.13t_i)) \\cos(e^{0.507}t_i)`.
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [1, 60]` for :math:`i=1,...,n`.
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`\\mathbf{x} = [53.81, 1.27, 3.012, 2.13, 0.507]`.
"""
def __init__(self, dimensions=5):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([ 1.0] * self.dimensions,
[60.0] * self.dimensions))
self.global_optimum = [53.81, 1.27, 3.012, 2.13, 0.507]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
t_i = 0.1*numpy.arange(16)
y_i = 53.81*1.27**t_i*tanh(3.012*t_i + sin(2.13*t_i))*cos(exp(0.507)*t_i)
x1, x2, x3, x4, x5 = x
return sum((x1*(x2**t_i)*tanh(x3*t_i + sin(x4*t_i))*cos(t_i*exp(x5)) - y_i)**2.0)
# -------------------------------------------------------------------------------- #
class DixonPrice(Benchmark):
"""
Dixon and Price test objective function.
This class defines the Dixon and Price global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{DixonPrice}}(\\mathbf{x}) = (x_i - 1)^2 + \\sum_{i=2}^n i(2x_i^2 - x_{i-1})^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,...,n`.
.. figure:: figures/DixonPrice.png
:alt: Dixon and Price function
:align: center
**Two-dimensional Dixon and Price function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 2^{- \\frac{(2^i-2)}{2^i}}` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.custom_bounds = [(-2, 3), (-2, 3)]
self.global_optimum = [2.0**(-(2.0**i-2.0)/2.0**i)
for i in range(1, self.dimensions+1)]
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
s = 0.0
for i in range(1, self.dimensions):
s += i*(2.0*x[i]**2.0 - x[i-1])**2.0
y = s + (x[0] - 1.0)**2.0
return y
# -------------------------------------------------------------------------------- #
class Dolan(Benchmark):
"""
Dolan test objective function.
This class defines the Dolan global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Dolan}}(\\mathbf{x}) = \\lvert (x_1 + 1.7x_2)\\sin(x_1) - 1.5x_3 - 0.1x_4\\cos(x_5 + x_5 - x_1) + 0.2x_5^2 - x_2 - 1 \\rvert
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-100, 100]` for :math:`i=1,2`.
*Global optimum*: :math:`f(x_i) = 10^{-5}` for :math:`\\mathbf{x} = [8.39045925, 4.81424707, 7.34574133, 68.88246895, 3.85470806]`
"""
def __init__(self, dimensions=5):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-100.0] * self.dimensions,
[ 100.0] * self.dimensions))
self.global_optimum = [8.39045925, 4.81424707, 7.34574133,
68.88246895, 3.85470806]
self.fglob = 1e-5
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2, x3, x4, x5 = x
return abs((x1 + 1.7*x2)*sin(x1) - 1.5*x3 - 0.1*x4*cos(x4 + x5 - x1) + 0.2*x5**2.0 - x2 - 1.0)
# -------------------------------------------------------------------------------- #
class DropWave(Benchmark):
"""
DropWave test objective function.
This class defines the DropWave global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{DropWave}}(\\mathbf{x}) = - \\frac{1 + \\cos\\left(12 \\sqrt{\\sum_{i=1}^{n} x_i^{2}}\\right)}{2 + 0.5 \\sum_{i=1}^{n} x_i^{2}}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-5.12, 5.12]` for :math:`i=1,2`.
.. figure:: figures/DropWave.png
:alt: DropWave function
:align: center
**Two-dimensional DropWave function**
*Global optimum*: :math:`f(x_i) = -1` for :math:`x_i = 0` for :math:`i=1,2`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-5.12] * self.dimensions,
[ 5.12] * self.dimensions))
self.global_optimum = [0.0] * self.dimensions
self.fglob = -1.0
def evaluator(self, x, *args):
self.fun_evals += 1
norm_x = sum(x**2)
return -(1+cos(12 * sqrt(norm_x)))/(0.5 * norm_x + 2)
# -------------------------------------------------------------------------------- #
class Easom(Benchmark):
"""
Easom test objective function.
This class defines the Easom global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Easom}}(\\mathbf{x}) = a - \\frac{a}{e^{b \\sqrt{\\frac{\\sum_{i=1}^{n} x_i^{2}}{n}}}} + e - e^{\\frac{\\sum_{i=1}^{n} \\cos\\left(c x_i\\right)}{n}}
Where, in this exercise, :math:`a = 20, b = 0.2` and :math:`c = 2\\pi`.
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-100, 100]` for :math:`i=1,2`.
.. figure:: figures/Easom.png
:alt: Easom function
:align: center
**Two-dimensional Easom function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,2`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-100.0] * self.dimensions,
[ 100.0] * self.dimensions))
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
a = 20.0
b = 0.2
c = 2*pi
n = self.dimensions
return -a * exp(-b * sqrt(sum(x**2) / n)) - exp(sum(cos(c * x)) / n) + a + exp(1)
# -------------------------------------------------------------------------------- #
class EggCrate(Benchmark):
"""
Egg Crate test objective function.
This class defines the Egg Crate global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{EggCrate}}(\\mathbf{x}) = x_1^2 + x_2^2 + 25 \\left[ \\sin^2(x_1) + \\sin^2(x_2) \\right]
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-5, 5]` for :math:`i=1,2`.
.. figure:: figures/EggCrate.png
:alt: Egg Crate function
:align: center
**Two-dimensional Egg Crate function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,2`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-5.0] * self.dimensions,
[ 5.0] * self.dimensions))
self.global_optimum = [0.0, 0.0]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
return x1**2.0 + x2**2.0 + 25.0*(sin(x1)**2.0 + sin(x2)**2.0)
# -------------------------------------------------------------------------------- #
class EggHolder(Benchmark):
"""
Egg Holder test objective function.
This class defines the Egg Holder global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{EggHolder}}(\\mathbf{x}) = - x_{1} \\sin\\left(\\sqrt{\\lvert{x_{1} - x_{2} -47}\\rvert}\\right) - \\left(x_{2} + 47\\right) \\sin\\left(\\sqrt{\\left|{\\frac{1}{2} x_{1} + x_{2} + 47}\\right|}\\right)
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-512, 512]` for :math:`i=1,2`.
.. figure:: figures/EggHolder.png
:alt: Egg Holder function
:align: center
**Two-dimensional Egg Holder function**
*Global optimum*: :math:`f(x_i) = -959.640662711` for :math:`\\mathbf{x} = [512, 404.2319]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-512.0] * self.dimensions,
[ 512.0] * self.dimensions))
self.global_optimum = [512.0, 404.2319]
self.fglob = -959.640662711
def evaluator(self, x, *args):
self.fun_evals += 1
return -(x[1]+47)*sin(sqrt(abs(x[1]+x[0]/2+47)))-x[0]*sin(sqrt(abs(x[0]-(x[1]+47))))
# -------------------------------------------------------------------------------- #
class ElAttarVidyasagarDutta(Benchmark):
"""
El-Attar-Vidyasagar-Dutta test objective function.
This class defines the El-Attar-Vidyasagar-Dutta function global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{ElAttarVidyasagarDutta}}(\\mathbf{x}) = (x_1^2 + x_2 - 10)^2 + (x_1 + x_2^2 - 7)^2 + (x_1^2 + x_2^3 - 1)^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-100, 100]` for :math:`i=1,2`.
.. figure:: figures/ElAttarVidyasagarDutta.png
:alt: El-Attar-Vidyasagar-Dutta function
:align: center
**Two-dimensional El-Attar-Vidyasagar-Dutta function**
*Global optimum*: :math:`f(x_i) = 1.712780354` for :math:`\\mathbf{x} = [3.40918683, -2.17143304]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-100.0] * self.dimensions,
[ 100.0] * self.dimensions))
self.custom_bounds = [(-4, 4), (-4, 4)]
self.global_optimum = [3.40918683, -2.17143304]
self.fglob = 1.712780354
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
return (x1**2.0 + x2 - 10)**2.0 + (x1 + x2**2.0 - 7)**2.0 + (x1**2.0 + x2**3.0 - 1)**2.0
# -------------------------------------------------------------------------------- #
class Exp2(Benchmark):
"""
Exp2 test objective function.
This class defines the Exp2 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Exp2}}(\\mathbf{x}) = \\sum_{i=0}^9 \\left ( e^{-ix_1/10} - 5e^{-ix_2/10} -e^{-i/10} + 5e^{-i} \\right )^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 20]` for :math:`i=1,2`.
.. figure:: figures/Exp2.png
:alt: Exp2 function
:align: center
**Two-dimensional Exp2 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = [1, 0.1]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([ 0.0] * self.dimensions,
[20.0] * self.dimensions))
self.custom_bounds = [(0, 2), (0, 2)]
self.global_optimum = [1.0, 0.1]
self.fglob = 0
def evaluator(self, x, *args):
self.fun_evals += 1
y = 0.0
for i in range(10):
y += (exp(-i*x[0]/10.0) - 5*exp(-i*x[1]*10) - exp(-i/10.0) + 5*exp(-i))**2.0
return y
# -------------------------------------------------------------------------------- #
class Exponential(Benchmark):
"""
Exponential test objective function.
This class defines the Exponential global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Exponential}}(\\mathbf{x}) = -e^{-0.5 \\sum_{i=1}^n x_i^2}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-1, 1]` for :math:`i=1,...,n`.
.. figure:: figures/Exponential.png
:alt: Exponential function
:align: center
**Two-dimensional Exponential function**
*Global optimum*: :math:`f(x_i) = -1` for :math:`x_i = 0` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-1.0] * self.dimensions,
[ 1.0] * self.dimensions))
self.global_optimum = [0.0] * self.dimensions
self.fglob = -1.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
return -exp(-0.5*sum(x**2.0))
# -------------------------------------------------------------------------------- #
class FreudensteinRoth(Benchmark):
"""
FreudensteinRoth test objective function.
This class defines the Freudenstein & Roth global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{FreudensteinRoth}}(\\mathbf{x}) = \\left\{x_1 - 13 + \\left[(5 - x_2)x_2 - 2 \\right] x_2 \\right\}^2 + \\left \{x_1 - 29 + \\left[(x_2 + 1)x_2 - 14 \\right] x_2 \\right\}^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2`.
.. figure:: figures/FreudensteinRoth.png
:alt: FreudensteinRoth function
:align: center
**Two-dimensional FreudensteinRoth function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [5, 4]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.custom_bounds = [(-3, 3), (-5, 5)]
self.global_optimum = [5.0, 4.0]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
f1 = (-13.0 + x[0] + ((5.0 - x[1])*x[1] - 2.0)*x[1])**2
f2 = (-29.0 + x[0] + ((x[1] + 1.0)*x[1] - 14.0)*x[1])**2
return f1 + f2
# -------------------------------------------------------------------------------- #
class Gear(Benchmark):
"""
Gear test objective function.
This class defines the Gear global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Gear}}(\\mathbf{x}) = \\left \\{ \\frac{1.0}{6.931} - \\frac{\\lfloor x_1\\rfloor \\lfloor x_2 \\rfloor } {\\lfloor x_3 \\rfloor \\lfloor x_4 \\rfloor } \\right\\}^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [12, 60]` for :math:`i=1,...,4`.
*Global optimum*: :math:`f(x_i) = 2.7 \\cdot 10^{-12}` for :math:`\\mathbf{x} = [16, 19, 43, 49]`, where the various
:math:`x_i` may be permuted.
"""
def __init__(self, dimensions=4):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([12.0] * self.dimensions,
[60.0] * self.dimensions))
self.global_optimum = [16, 19, 43, 49]
self.fglob = 2.7e-12
def evaluator(self, x, *args):
self.fun_evals += 1
return (1.0/6.931 - floor(x[0])*floor(x[1])/(floor(x[2])*floor(x[3])))**2
# -------------------------------------------------------------------------------- #
class Giunta(Benchmark):
"""
Giunta test objective function.
This class defines the Giunta global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Giunta}}(\\mathbf{x}) = 0.6 + \\sum_{i=1}^{n} \\left[\\sin^{2}\\left(1 - \\frac{16}{15} x_i\\right) - \\frac{1}{50} \\sin\\left(4 - \\frac{64}{15} x_i\\right) - \\sin\\left(1 - \\frac{16}{15} x_i\\right)\\right]
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-1, 1]` for :math:`i=1,2`.
.. figure:: figures/Giunta.png
:alt: Giunta function
:align: center
**Two-dimensional Giunta function**
*Global optimum*: :math:`f(x_i) = 0.06447042053690566` for :math:`\\mathbf{x} = [0.4673200277395354, 0.4673200169591304]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-1.0] * self.dimensions,
[ 1.0] * self.dimensions))
self.global_optimum = [0.4673200277395354, 0.4673200169591304]
self.fglob = 0.06447042053690566
def evaluator(self, x, *args):
self.fun_evals += 1
arg = 16*x/15.0 - 1
return 0.6 + sum(sin(arg) + sin(arg)**2 + sin(4*arg)/50)
# -------------------------------------------------------------------------------- #
class GoldsteinPrice(Benchmark):
"""
Goldstein-Price test objective function.
This class defines the Goldstein-Price global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{GoldsteinPrice}}(\\mathbf{x}) = \\left[ 1+(x_1+x_2+1)^2(19-14x_1+3x_1^2-14x_2+6x_1x_2+3x_2^2) \\right] \\left[ 30+(2x_1-3x_2)^2(18-32x_1+12x_1^2+48x_2-36x_1x_2+27x_2^2) \\right]
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-2, 2]` for :math:`i=1,2`.
.. figure:: figures/GoldsteinPrice.png
:alt: Goldstein-Price function
:align: center
**Two-dimensional Goldstein-Price function**
*Global optimum*: :math:`f(x_i) = 3` for :math:`\\mathbf{x} = [0, -1]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-2.0] * self.dimensions,
[ 2.0] * self.dimensions))
self.global_optimum = [0., -1.]
self.fglob = 3.0
def evaluator(self, x, *args):
self.fun_evals += 1
a = 1+(x[0]+x[1]+1)**2*(19-14*x[0]+3*x[0]**2-14*x[1]+6*x[0]*x[1]+3*x[1]**2)
b = 30+(2*x[0]-3*x[1])**2*(18-32*x[0]+12*x[0]**2+48*x[1]-36*x[0]*x[1]+27*x[1]**2)
return a*b
# -------------------------------------------------------------------------------- #
class Griewank(Benchmark):
"""
Griewank test objective function.
This class defines the Griewank global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Griewank}}(\\mathbf{x}) = \\frac{1}{4000}\\sum_{i=1}^n x_i^2 - \\prod_{i=1}^n\\cos\\left(\\frac{x_i}{\\sqrt{i}}\\right) + 1
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-600, 600]` for :math:`i=1,...,n`.
.. figure:: figures/Griewank.png
:alt: Griewank function
:align: center
**Two-dimensional Griewank function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-600.0] * self.dimensions,
[ 600.0] * self.dimensions))
self.custom_bounds = [(-50, 50), (-50, 50)]
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
return sum(x**2)/4000.0 - prod(cos(x/sqrt(1.0+arange(len(x))))) + 1.0
# -------------------------------------------------------------------------------- #
class Gulf(Benchmark):
"""
Gulf test objective function.
This class defines the Gulf global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Gulf}}(\\mathbf{x}) = \\sum_{i=1}^m \\left( e^{-\\frac{\\lvert y_i - x_2 \\rvert^{x_3}}{x_1} } - t_i \\right)
Where, in this exercise:
.. math::
t_i = i/100 \\\\
y_i = 25 + [-50 \\log(t_i)]^{2/3}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 60]` for :math:`i=1,2,3`.
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [50, 25, 1.5]`
"""
def __init__(self, dimensions=3):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([ 0.0] * self.dimensions,
[50.0] * self.dimensions))
self.global_optimum = [50.0, 25.0, 1.5]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2, x3 = x
y = 0.0
for i in range(30):
ti = (i)*0.01;
yi = 25.0 + (-50*log(ti))**(2.0/3.0)
ai = yi - x2
y += (exp(-((abs(ai)**x3)/x1)) - ti)**2.0
return y
# -------------------------------------------------------------------------------- #
class Hansen(Benchmark):
"""
Hansen test objective function.
This class defines the Hansen global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Hansen}}(\\mathbf{x}) = \\left[ \\sum_{i=0}^4(i+1)\\cos(ix_1+i+1)\\right ] \\left[\\sum_{j=0}^4(j+1)\\cos[(j+2)x_2+j+1])\\right ]
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2`.
.. figure:: figures/Hansen.png
:alt: Hansen function
:align: center
**Two-dimensional Hansen function**
*Global optimum*: :math:`f(x_i) = -2.3458` for :math:`\\mathbf{x} = [-7.58989583, -7.70831466]`.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.global_optimum = [-7.58989583, -7.70831466]
self.fglob = -176.54
def evaluator(self, x, *args):
self.fun_evals += 1
f1 = f2 = 0.0
for i in range(5):
f1 += (i+1)*cos(i*x[0] + i + 1)
f2 += (i+1)*cos((i+2)*x[1] + i + 1)
return f1*f2
# -------------------------------------------------------------------------------- #
class Hartmann3(Benchmark):
"""
Hartmann3 test objective function.
This class defines the Hartmann3 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Hartmann3}}(\\mathbf{x}) = -\\sum\\limits_{i=1}^{4} c_i e^{-\\sum\\limits_{j=1}^{n}a_{ij}(x_j - p_{ij})^2}
Where, in this exercise:
.. math::
\\begin{array}{l|ccc|c|ccr}
\\hline
i & & a_{ij}& & c_i & & p_{ij} & \\\\
\\hline
1 & 3.0 & 10.0 & 30.0 & 1.0 & 0.689 & 0.1170 & 0.2673 \\\\
2 & 0.1 & 10.0 & 35.0 & 1.2 & 0.4699 & 0.4387 & 0.7470 \\\\
3 & 3.0 & 10.0 & 30.0 & 3.0 & 0.1091 & 0.8732 & 0.5547 \\\\
4 & 0.1 & 10.0 & 35.0 & 3.2 & 0.0381 & 0.5743 & 0.8828 \\\\
\\hline
\\end{array}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 1]` for :math:`i=1,2,3`.
*Global optimum*: :math:`f(x_i) = -3.86278214782076` for :math:`\\mathbf{x} = [0.1, 0.55592003, 0.85218259]`
"""
def __init__(self, dimensions=3):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([0.0] * self.dimensions,
[1.0] * self.dimensions))
self.global_optimum = [0.1, 0.55592003, 0.85218259]
self.fglob = -3.86278214782076
def evaluator(self, x, *args):
self.fun_evals += 1
a = asarray([[3.0, 0.1, 3.0, 0.1],
[10.0, 10.0, 10.0, 10.0],
[30.0, 35.0, 30.0, 35.0]])
p = asarray([[0.36890, 0.46990, 0.10910, 0.03815],
[0.11700, 0.43870, 0.87320, 0.57430],
[0.26730, 0.74700, 0.55470, 0.88280]])
c = asarray([1.0, 1.2, 3.0, 3.2])
d = zeros_like(c)
for i in range(4):
d[i] = sum(a[:, i]*(x - p[:, i])**2)
return -sum(c*exp(-d))
# -------------------------------------------------------------------------------- #
class Hartmann6(Benchmark):
"""
Hartmann6 test objective function.
This class defines the Hartmann6 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Hartmann6}}(\\mathbf{x}) = -\\sum\\limits_{i=1}^{4} c_i e^{-\\sum\\limits_{j=1}^{n}a_{ij}(x_j - p_{ij})^2}
Where, in this exercise:
.. math::
\\begin{array}{l|cccccc|r}
\\hline
i & & & a_{ij} & & & & c_i \\\\
\\hline
1 & 10.0 & 3.0 & 17.0 & 3.50 & 1.70 & 8.00 & 1.0 \\\\
2 & 0.05 & 10.0 & 17.0 & 0.10 & 8.00 & 14.00 & 1.2 \\\\
3 & 3.00 & 3.50 & 1.70 & 10.0 & 17.00 & 8.00 & 3.0 \\\\
4 & 17.00 & 8.00 & 0.05 & 10.00 & 0.10 & 14.00 & 3.2 \\\\
\\hline
\\end{array}
\\newline
\\\\
\\newline
\\begin{array}{l|cccccr}
\\hline
i & & & p_{ij} & & & \\\\
\\hline
1 & 0.1312 & 0.1696 & 0.5569 & 0.0124 & 0.8283 & 0.5886 \\\\
2 & 0.2329 & 0.4135 & 0.8307 & 0.3736 & 0.1004 & 0.9991 \\\\
3 & 0.2348 & 0.1451 & 0.3522 & 0.2883 & 0.3047 & 0.6650 \\\\
4 & 0.4047 & 0.8828 & 0.8732 & 0.5743 & 0.1091 & 0.0381 \\\\
\\hline
\\end{array}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 1]` for :math:`i=1,...,6`.
*Global optimum*: :math:`f(x_i) = -3.32236801141551` for :math:`\\mathbf{x} = [0.20168952, 0.15001069, 0.47687398, 0.27533243, 0.31165162, 0.65730054]`
"""
def __init__(self, dimensions=6):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([0.0] * self.dimensions,
[1.0] * self.dimensions))
self.global_optimum = [0.20168952, 0.15001069, 0.47687398,
0.27533243, 0.31165162, 0.65730054]
self.fglob = -3.32236801141551
def evaluator(self, x, *args):
self.fun_evals += 1
a = asarray([[10.00, 0.05, 3.00, 17.00],
[3.00, 10.00, 3.50, 8.00],
[17.00, 17.00, 1.70, 0.05],
[3.50, 0.10, 10.00, 10.00],
[1.70, 8.00, 17.00, 0.10],
[8.00, 14.00, 8.00, 14.00]])
p = asarray([[0.1312, 0.2329, 0.2348, 0.4047],
[0.1696, 0.4135, 0.1451, 0.8828],
[0.5569, 0.8307, 0.3522, 0.8732],
[0.0124, 0.3736, 0.2883, 0.5743],
[0.8283, 0.1004, 0.3047, 0.1091],
[0.5886, 0.9991, 0.6650, 0.0381]])
c = asarray([1.0, 1.2, 3.0, 3.2])
d = zeros_like(c)
for i in range(4):
d[i] = sum(a[:, i]*(x - p[:, i])**2)
return -sum(c*exp(-d))
# -------------------------------------------------------------------------------- #
class HelicalValley(Benchmark):
"""
HelicalValley test objective function.
This class defines the HelicalValley global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{HelicalValley}}(\\mathbf{x}) = 100{[z-10\\Psi(x_1,x_2)]^2+(\\sqrt{x_1^2+x_2^2}-1)^2}+x_3^2
Where, in this exercise:
.. math::
2\\pi\\Psi(x,y) = \\begin{cases} \\arctan(y/x) & \\textrm{for} x > 0 \\\\
\\pi + \\arctan(y/x) & \\textrm{for} x < 0 \\end{cases}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-\infty, \\infty]` for :math:`i=1,2,3`.
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [1, 0, 0]`
"""
def __init__(self, dimensions=3):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-100.0] * self.dimensions,
[ 100] * self.dimensions))
self.global_optimum = [1.0, 0.0, 0.0]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
return 100*((x[2] - 10*arctan2(x[1], x[0])/2/pi)**2 + (sqrt(x[0]**2 + x[1]**2) - 1)**2) + x[2]**2
# -------------------------------------------------------------------------------- #
class HimmelBlau(Benchmark):
"""
HimmelBlau test objective function.
This class defines the HimmelBlau global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{HimmelBlau}}(\\mathbf{x}) = (x_1^2 + x_2 - 11)^2 + (x_1 + x_2^2 -7)^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-6, 6]` for :math:`i=1,2`.
.. figure:: figures/HimmelBlau.png
:alt: HimmelBlau function
:align: center
**Two-dimensional HimmelBlau function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [0, 0]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-6] * self.dimensions,
[ 6] * self.dimensions))
self.global_optimum = [0.0, 0.0]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
return (x[0] * x[0] + x[1] - 11)**2 + (x[0] + x[1] * x[1] - 7)**2
# -------------------------------------------------------------------------------- #
class HolderTable(Benchmark):
"""
HolderTable test objective function.
This class defines the HolderTable global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{HolderTable}}(\\mathbf{x}) = - \\left|{e^{\\left|{1 - \\frac{\\sqrt{x_{1}^{2} + x_{2}^{2}}}{\\pi} }\\right|} \\sin\\left(x_{1}\\right) \\cos\\left(x_{2}\\right)}\\right|
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2`.
.. figure:: figures/HolderTable.png
:alt: HolderTable function
:align: center
**Two-dimensional HolderTable function**
*Global optimum*: :math:`f(x_i) = -19.20850256788675` for :math:`x_i = \\pm 9.664590028909654` for :math:`i=1,2`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.global_optimum = [(8.055023472141116 , 9.664590028909654),
(-8.055023472141116, 9.664590028909654),
(8.055023472141116 , -9.664590028909654),
(-8.055023472141116, -9.664590028909654)]
self.fglob = -19.20850256788675
def evaluator(self, x, *args):
self.fun_evals += 1
return -abs(sin(x[0])*cos(x[1])*exp(abs(1 - sqrt(x[0]**2 + x[1]**2)/pi)))
# -------------------------------------------------------------------------------- #
class Holzman(Benchmark):
"""
Holzman test objective function.
This class defines the Holzman global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Holzman}}(\\mathbf{x}) = \\sum_{i=0}^{99} \\left [ e^{\\frac{1}{x_1} (u_i-x_2)^{x_3}} -0.1(i+1) \\right ]
Where, in this exercise:
.. math::
u_i = 25 + (-50 \\log{[0.01(i+1)]})^{2/3}
Here, :math:`n` represents the number of dimensions and :math:`x_1 \\in [0, 100], x_2 \\in [0, 25.6], x_3 \\in [0, 5]`.
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [50, 25, 1.5]`
"""
def __init__(self, dimensions=3):
Benchmark.__init__(self, dimensions)
self.bounds = ([0.0, 100.0], [0.0, 25.6], [0.0, 5.0])
self.global_optimum = [50.0, 25.0, 1.5]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
y = 0.0
for i in range(100):
ui = 25.0 + (-50.0*log(0.01*(i+1)))**(2.0/3.0)
y += -0.1*(i+1) + exp(1.0/x[0]*(ui-x[1])**x[2])
return y
# -------------------------------------------------------------------------------- #
class Hosaki(Benchmark):
"""
Hosaki test objective function.
This class defines the Hosaki global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Hosaki}}(\\mathbf{x}) = \\left ( 1 - 8x_1 + 7x_1^2 - \\frac{7}{3}x_1^3 + \\frac{1}{4}x_1^4 \\right )x_2^2e^{-x_1}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 10]` for :math:`i=1,2`.
.. figure:: figures/Hosaki.png
:alt: Hosaki function
:align: center
**Two-dimensional Hosaki function**
*Global optimum*: :math:`f(x_i) = -2.3458` for :math:`\\mathbf{x} = [4, 2]`.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([ 0.0] * self.dimensions,
[10.0] * self.dimensions))
self.custom_bounds = [(0, 5), (0, 5)]
self.global_optimum = [4, 2]
self.fglob = -2.3458
def evaluator(self, x, *args):
self.fun_evals += 1
return (1 + x[0]*(-8 + x[0]*(7 + x[0]*(-7.0/3.0 + x[0] *1.0/4.0))))*x[1]*x[1] * exp(-x[1])
# -------------------------------------------------------------------------------- #
class Infinity(Benchmark):
"""
Infinity test objective function.
This class defines the Infinity global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Infinity}}(\\mathbf{x}) = \\sum_{i=1}^{n} x_i^{6} \\left [ \\sin\\left ( \\frac{1}{x_i} \\right )+2 \\right ]
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-1, 1]` for :math:`i=1,...,n`.
.. figure:: figures/Infinity.png
:alt: Infinity function
:align: center
**Two-dimensional Infinity function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-1.0] * self.dimensions,
[ 1.0] * self.dimensions))
self.global_optimum = [1e-16] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
return sum(x**6.0*(sin(1.0/x) + 2.0))
# -------------------------------------------------------------------------------- #
class JennrichSampson(Benchmark):
"""
Jennrich-Sampson test objective function.
This class defines the Jennrich-Sampson global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{JennrichSampson}}(\\mathbf{x}) = \\sum_{i=1}^{10} \\left [2 + 2i - (e^{ix_1} + e^{ix_2}) \\right ]^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-1, 1]` for :math:`i=1,2`.
.. figure:: figures/JennrichSampson.png
:alt: Jennrich-Sampson function
:align: center
**Two-dimensional Jennrich-Sampson function**
*Global optimum*: :math:`f(x_i) = 124.3621824` for :math:`\\mathbf{x} = [0.257825, 0.257825]`.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-1.0] * self.dimensions,
[ 1.0] * self.dimensions))
self.custom_bounds = [(-1, 0.34), (-1, 0.34)]
self.global_optimum = [0.257825, 0.257825]
self.fglob = 124.3621824
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
rng = numpy.arange(1.0, 11.0)
return sum((2.0 + 2.0*rng - (exp(rng*x1) + exp(rng*x2)))**2.0)
# -------------------------------------------------------------------------------- #
class Judge(Benchmark):
"""
Judge test objective function.
This class defines the Judge global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Judge}}(\\mathbf{x}) = \\sum_{i=1}^{20} \\left [ \\left (x_1 + A_i x_2 + B x_2^2 \\right ) - C_i \\right ]^2
Where, in this exercise:
.. math::
\\begin{cases} A = [4.284, 4.149, 3.877, 0.533, 2.211, 2.389, 2.145, 3.231, 1.998, 1.379, 2.106, 1.428, 1.011, 2.179, 2.858, 1.388, 1.651, 1.593, 1.046, 2.152] \\\\
B = [0.286, 0.973, 0.384, 0.276, 0.973, 0.543, 0.957, 0.948, 0.543, 0.797, 0.936, 0.889, 0.006, 0.828, 0.399, 0.617, 0.939, 0.784, 0.072, 0.889] \\\\
C = [0.645, 0.585, 0.310, 0.058, 0.455, 0.779, 0.259, 0.202, 0.028, 0.099, 0.142, 0.296, 0.175, 0.180, 0.842, 0.039, 0.103, 0.620, 0.158, 0.704] \\end{cases}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2`.
.. figure:: figures/Judge.png
:alt: Judge function
:align: center
**Two-dimensional Judge function**
*Global optimum*: :math:`f(x_i) = 16.0817307` for :math:`\\mathbf{x} = [0.86479, 1.2357]`.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.custom_bounds = [(-2.0, 2.0), (-2.0, 2.0)]
self.global_optimum = [0.86479, 1.2357]
self.fglob = 16.0817307
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
Y = asarray([4.284, 4.149, 3.877, 0.533, 2.211, 2.389, 2.145,
3.231, 1.998, 1.379, 2.106, 1.428, 1.011, 2.179,
2.858, 1.388, 1.651, 1.593, 1.046, 2.152])
X2 = asarray([0.286, 0.973, 0.384, 0.276, 0.973, 0.543, 0.957,
0.948, 0.543, 0.797, 0.936, 0.889, 0.006, 0.828,
0.399, 0.617, 0.939, 0.784, 0.072, 0.889])
X3 = asarray([0.645, 0.585, 0.310, 0.058, 0.455, 0.779, 0.259,
0.202, 0.028, 0.099, 0.142, 0.296, 0.175, 0.180,
0.842, 0.039, 0.103, 0.620, 0.158, 0.704])
return sum(((x1 + x2*X2 + (x2**2.0)*X3) - Y)**2.0)
# -------------------------------------------------------------------------------- #
class Katsuura(Benchmark):
"""
Katsuura test objective function.
This class defines the Katsuura global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Katsuura}}(\\mathbf{x}) = \\prod_{i=0}^{n-1} \\left [ 1 + (i+1) \\sum_{k=1}^{d} \\lfloor (2^k x_i) \\rfloor 2^{-k} \\right ]
Where, in this exercise, :math:`d = 32`.
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 100]` for :math:`i=1,...,n`.
.. figure:: figures/Katsuura.png
:alt: Katsuura function
:align: center
**Two-dimensional Katsuura function**
*Global optimum*: :math:`f(x_i) = 1` for :math:`x_i = 0` for :math:`i=1,...,n`.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([ 0.0] * self.dimensions,
[10.0] * self.dimensions))
self.custom_bounds = [(0, 1), (0, 1)]
self.global_optimum = [0.0] * self.dimensions
self.fglob = 1.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
d = 32
prod = 1.0
for i in range(self.dimensions):
s = 0.0
for k in range(1, d+1):
pow2 = 2.0**k
s += round(pow2*x[i])/pow2
prod = prod*(1.0 + (i+1.0)*s)
return prod
# -------------------------------------------------------------------------------- #
class Keane(Benchmark):
"""
Keane test objective function.
This class defines the Keane global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Keane}}(\\mathbf{x}) = \\frac{\\sin^2(x_1 - x_2)\\sin^2(x_1 + x_2)}{\\sqrt{x_1^2 + x_2^2}}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 10]` for :math:`i=1,2`.
.. figure:: figures/Keane.png
:alt: Keane function
:align: center
**Two-dimensional Keane function**
*Global optimum*: :math:`f(x_i) = 0.673668` for :math:`\\mathbf{x} = [0.0, 1.39325]`.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([ 0.0] * self.dimensions,
[10.0] * self.dimensions))
self.custom_bounds = [(-1, 0.34), (-1, 0.34)]
self.global_optimum = [0.0, 1.39325]
self.fglob = 0.673668
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
return (sin(x1 - x2)**2.0*sin(x1 + x2)**2.0)/sqrt(x1**2.0 + x2**2.0)
# -------------------------------------------------------------------------------- #
class Kowalik(Benchmark):
"""
Kowalik test objective function.
This class defines the Kowalik global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Kowalik}}(\\mathbf{x}) = \\sum_{i=0}^{10} \\left [ a_i - \\frac{x_1(b_i^2+b_ix_2)}{b_i^2 + b_ix_3 + x_4} \\right ]^2
Where:
.. math::
\\mathbf{a} = [4, 2, 1, 1/2, 1/4 1/8, 1/10, 1/12, 1/14, 1/16] \\\\
\\mathbf{b} = [0.1957, 0.1947, 0.1735, 0.1600, 0.0844, 0.0627, 0.0456, 0.0342, 0.0323, 0.0235, 0.0246]
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-5, 5]` for :math:`i=1,...,4`.
*Global optimum*: :math:`f(x_i) = 0.00030748610` for :math:`\\mathbf{x} = [0.192833, 0.190836, 0.123117, 0.135766]`.
"""
def __init__(self, dimensions=4):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-5.0] * self.dimensions,
[ 5.0] * self.dimensions))
self.global_optimum = [0.192833, 0.190836, 0.123117, 0.135766]
self.fglob = 0.00030748610
def evaluator(self, x, *args):
self.fun_evals += 1
b = asarray([4.0, 2.0, 1.0, 1/2.0, 1/4.0, 1/6.0, 1/8.0,
1/10.0, 1/12.0, 1/14.0, 1/16.0])
a = asarray([0.1957, 0.1947, 0.1735, 0.1600, 0.0844, 0.0627,
0.0456, 0.0342, 0.0323, 0.0235, 0.0246])
y = 0.0
for i in range(11):
bb = b[i]*b[i]
t = a[i] - (x[0]*(bb + b[i]*x[1])/(bb + b[i]*x[2]+x[3]))
y += t*t
return y
# -------------------------------------------------------------------------------- #
class Langermann(Benchmark):
"""
Langermann test objective function.
This class defines the Langermann global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Langermann}}(\\mathbf{x}) = - \\sum_{i=1}^{5} \\frac{c_i \\cos\\left\{\\pi \\left[\\left(x_{1}- a_i\\right)^{2} + \\left(x_{2} - b_i \\right)^{2}\\right]\\right\}}{e^{\\frac{\\left( x_{1} - a_i\\right)^{2} + \\left( x_{2} - b_i\\right)^{2}}{\\pi}}}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 10]` for :math:`i=1,2`.
.. figure:: figures/Langermann.png
:alt: Langermann function
:align: center
**Two-dimensional Langermann function**
*Global optimum*: :math:`f(x_i) = -5.1621259` for :math:`\\mathbf{x} = [2.00299219, 1.006096]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([ 0.0] * self.dimensions,
[10.0] * self.dimensions))
self.global_optimum = [2.00299219, 1.006096]
self.fglob = -5.1621259
def evaluator(self, x, *args):
self.fun_evals += 1
a = [3,5,2,1,7]
b = [5,2,1,4,9]
c = [1,2,5,2,3]
return -sum(c*exp(-(1/pi)*((x[0]-a)**2 + (x[1]-b)**2))*cos(pi*((x[0]-a)**2 + (x[1]-b)**2)))
# -------------------------------------------------------------------------------- #
class LennardJones(Benchmark):
"""
LennardJones test objective function.
This class defines the Lennard-Jones global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{LennardJones}}(\\mathbf{x}) = \\sum_{i=0}^{n-2}\\sum_{j>1}^{n-1}\\frac{1}{r_{ij}^{12}} - \\frac{1}{r_{ij}^{6}}
Where, in this exercise:
.. math::
r_{ij} = \\sqrt{(x_{3i}-x_{3j})^2 + (x_{3i+1}-x_{3j+1})^2) + (x_{3i+2}-x_{3j+2})^2}
Valid for any dimension, :math:`n = 3*k, k=2,3,4,...,20`. :math:`k` is the number of atoms in 3-D space
constraints: unconstrained type: multi-modal with one global minimum; non-separable
Value-to-reach: :math:`minima[k-2] + 0.0001`. See array of minima below; additional minima available at
the Cambridge cluster database:
http://www-wales.ch.cam.ac.uk/~jon/structures/LJ/tables.150.html
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-4, 4]` for :math:`i=1,...,n`.
*Global optimum*:
.. math::
minima = [-1.,-3.,-6.,-9.103852,-12.712062,-16.505384,-19.821489,-24.113360, \\\\
-28.422532,-32.765970,-37.967600,-44.326801,-47.845157,-52.322627, \\\\
-56.815742,-61.317995, -66.530949,-72.659782,-77.1777043]
"""
def __init__(self, dimensions=6):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-4.0] * self.dimensions,
[ 4.0] * self.dimensions))
minima = [-1.0, -3.0, -6.0, -9.103852, -12.712062, -16.505384,
-19.821489, -24.113360, -28.422532, -32.765970,
-37.967600, -44.326801, -47.845157, -52.322627,
-56.815742, -61.317995, -66.530949, -72.659782,
-77.1777043]
# WARNING: No global optimum for this function? Strange
self.global_optimum = []
k = dimensions//3
self.fglob = minima[k-2]
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
k = self.dimensions//3
s = 0.0
for i in range(k-1):
for j in range(i+1, k):
a = 3*i
b = 3*j
xd = x[a] - x[b]
yd = x[a+1] - x[b+1]
zd = x[a+2] - x[b+2]
ed = xd*xd + yd*yd + zd*zd
ud = ed*ed*ed
if ed > 0.0:
s += (1.0/ud-2.0)/ud
return s
# -------------------------------------------------------------------------------- #
class Leon(Benchmark):
"""
Leon test objective function.
This class defines the Leon global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Leon}}(\\mathbf{x}) = \\left(1 - x_{1}\\right)^{2} + 100 \\left(x_{2} - x_{1}^{2} \\right)^{2}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-1.2, 1.2]` for :math:`i=1,2`.
.. figure:: figures/Leon.png
:alt: Leon function
:align: center
**Two-dimensional Leon function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 1` for :math:`i=1,2`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-1.2] * self.dimensions,
[ 1.2] * self.dimensions))
self.global_optimum = [1.0] * self.dimensions
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
return 100*(x[1] - x[0]**2.0)**2.0 + (1 - x[0])**2.0
# -------------------------------------------------------------------------------- #
class Levy03(Benchmark):
"""
Levy 3 test objective function.
This class defines the Levy 3 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Levy03}}(\\mathbf{x}) = \\sin^2(\\pi y_1)+\\sum_{i=1}^{n-1}(y_i-1)^2[1+10\\sin^2(\\pi y_{i+1})]+(y_n-1)^2
Where, in this exercise:
.. math::
y_i=1+\\frac{x_i-1}{4}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,...,n`.
.. figure:: figures/Levy03.png
:alt: Levy 3 function
:align: center
**Two-dimensional Levy 3 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 1` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.custom_bounds = [(-5, 5), (-5, 5)]
self.global_optimum = [1.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
n = len(x)
z = zeros_like(x)
for i in range(n):
z[i] = 1+(x[i]-1)/4
s = sin(pi*z[0])**2
for i in range(n-1):
s = s + (z[i]-1)**2*(1+10*(sin(pi*z[i]+1))**2)
y = s+(z[n-1]-1)**2*(1+(sin(2*pi*z[n-1]))**2)
return y
# -------------------------------------------------------------------------------- #
class Levy05(Benchmark):
"""
Levy 5 test objective function.
This class defines the Levy 5 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Levy05}}(\\mathbf{x}) = \\sum_{i=1}^{5} i \\cos \\left[(i-1)x_1 + i \\right] \\times \\sum_{j=1}^{5} j \\cos \\left[(j+1)x_2 + j \\right] + (x_1 + 1.42513)^2 + (x_2 + 0.80032)^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,...,n`.
.. figure:: figures/Levy05.png
:alt: Levy 5 function
:align: center
**Two-dimensional Levy 5 function**
*Global optimum*: :math:`f(x_i) = -176.1375` for :math:`\\mathbf{x} = [-1.3068, -1.4248]`.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.custom_bounds = [(-2.0, 2.0), (-2.0, 2.0)]
self.global_optimum = [-1.30685, -1.42485]
self.fglob = -176.1375
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
rng = numpy.arange(1.0, 6.0)
return sum(rng*cos((rng-1.0)*x1 + rng))*sum(rng*cos((rng+1.0)*x2 + rng)) + (x1 + 1.42513)**2.0 + (x2 + 0.80032)**2.0
# -------------------------------------------------------------------------------- #
class Levy13(Benchmark):
"""
Levy13 test objective function.
This class defines the Levy13 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Levy13}}(\\mathbf{x}) = \\left(x_{1} -1\\right)^{2} \\left[\sin^{2}\\left(3 \\pi x_{2}\\right) + 1\\right] + \\left(x_{2} -1\\right)^{2} \\left[\\sin^{2}\\left(2 \\pi x_{2}\\right) + 1\\right] + \\sin^{2}\\left(3 \\pi x_{1}\\right)
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2`.
.. figure:: figures/Levy13.png
:alt: Levy13 function
:align: center
**Two-dimensional Levy13 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 1` for :math:`i=1,2`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.custom_bounds = [(-5, 5), (-5, 5)]
self.global_optimum = [1.0] * self.dimensions
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
return (sin(3*pi*x[0]))**2 + ((x[0]-1)**2)*(1 + (sin(3*pi*x[1]))**2) + ((x[1]-1)**2)*(1 + (sin(2*pi*x[1]))**2)
# -------------------------------------------------------------------------------- #
class Matyas(Benchmark):
"""
Matyas test objective function.
This class defines the Matyas global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Matyas}}(\\mathbf{x}) = 0.26(x_1^2 + x_2^2) - 0.48x_1x_2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2`.
.. figure:: figures/Matyas.png
:alt: Matyas function
:align: center
**Two-dimensional Matyas function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,2`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
return 0.26*(x[0]**2 + x[1]**2) - 0.48*x[0]*x[1]
# -------------------------------------------------------------------------------- #
class McCormick(Benchmark):
"""
McCormick test objective function.
This class defines the McCormick global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{McCormick}}(\\mathbf{x}) = - x_{1} + 2 x_{2} + \\left(x_{1} - x_{2}\\right)^{2} + \\sin\\left(x_{1} + x_{2}\\right) + 1
Here, :math:`n` represents the number of dimensions and :math:`x_1 \\in [-1.5, 4]`, :math:`x_2 \\in [-3, 4]`.
.. figure:: figures/McCormick.png
:alt: McCormick function
:align: center
**Two-dimensional McCormick function**
*Global optimum*: :math:`f(x_i) = -1.913222954981037` for :math:`\\mathbf{x} = [-0.5471975602214493, -1.547197559268372]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = [(-1.5, 4.0), (-3.0, 4.0)]
self.global_optimum = [-0.5471975602214493, -1.547197559268372]
self.fglob = -1.913222954981037
def evaluator(self, x, *args):
self.fun_evals += 1
return sin(x[0] + x[1]) + (x[0] - x[1])**2 - 1.5*x[0] + 2.5*x[1] + 1
# -------------------------------------------------------------------------------- #
class Michalewicz(Benchmark):
"""
Michalewicz test objective function.
This class defines the Michalewicz global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Michalewicz}}(\\mathbf{x}) = - \\sum_{i=1}^{2} \\sin\\left(x_i\\right) \\sin^{2 m}\\left(\\frac{i x_i^{2}}{\\pi}\\right)
Where, in this exercise, :math:`m = 10`.
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, \\pi]` for :math:`i=1,2`.
.. figure:: figures/Michalewicz.png
:alt: Michalewicz function
:align: center
**Two-dimensional Michalewicz function**
*Global optimum*: :math:`f(x_i) = -1.8013` for :math:`x_i = 0` for :math:`i=1,2`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([0.0] * self.dimensions,
[pi] * self.dimensions))
self.global_optimum = [0.0] * self.dimensions
self.fglob = -1.8013
def evaluator(self, x, *args):
self.fun_evals += 1
m = 10.0
i = arange(1, self.dimensions+1)
return -sum(sin(x) * (sin(i*x**2/pi))**(2*m))
# -------------------------------------------------------------------------------- #
class MieleCantrell(Benchmark):
"""
Miele-Cantrell test objective function.
This class defines the Miele-Cantrell global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{MieleCantrell}}(\\mathbf{x}) = (e^{-x_1} - x_2)^4 + 100(x_2 - x_3)^6 + \\tan^4(x_3 - x_4) + x_1^8
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-1, 1]` for :math:`i=1,...,4`.
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [0, 1, 1, 1]`
"""
def __init__(self, dimensions=4):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-1.0] * self.dimensions,
[ 1.0] * self.dimensions))
self.global_optimum = [0.0, 1.0, 1.0, 1.0]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2, x3, x4 = x
return (exp(-x1) - x2)**4.0 + 100.0*(x2 - x3)**6.0 + (tan(x3 - x4))**4.0 + x1**8.0
# -------------------------------------------------------------------------------- #
class Mishra01(Benchmark):
"""
Mishra 1 test objective function.
This class defines the Mishra 1 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Mishra01}}(\\mathbf{x}) = (1 + x_n)^{x_n} \\hspace{10pt} ; \\hspace{10pt} x_n = n - \\sum_{i=1}^{n-1} x_i
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 1]` for :math:`i=1,...,n`.
.. figure:: figures/Mishra01.png
:alt: Mishra 1 function
:align: center
**Two-dimensional Mishra 1 function**
*Global optimum*: :math:`f(x_i) = 2` for :math:`x_i = 1` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([0.0] * self.dimensions,
[1.0 + 1e-9] * self.dimensions))
self.global_optimum = [1.0] * self.dimensions
self.fglob = 2.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
n = self.dimensions
xn = n - sum(x[0:-1])
return (1 + xn)**xn
# -------------------------------------------------------------------------------- #
class Mishra02(Benchmark):
"""
Mishra 2 test objective function.
This class defines the Mishra 2 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Mishra02}}(\\mathbf{x}) = (1 + x_n)^{x_n} \\hspace{10pt} ; \\hspace{10pt} x_n = n - \\sum_{i=1}^{n-1} \\frac{(x_i + x_{i+1})}{2}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 1]` for :math:`i=1,...,n`.
.. figure:: figures/Mishra02.png
:alt: Mishra 2 function
:align: center
**Two-dimensional Mishra 2 function**
*Global optimum*: :math:`f(x_i) = 2` for :math:`x_i = 1` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([0.0] * self.dimensions,
[1.0 + 1e-9] * self.dimensions))
self.global_optimum = [1.0] * self.dimensions
self.fglob = 2.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
n = self.dimensions
xn = n - sum((x[0:-1] + x[1:])/2.0)
return (1 + xn)**xn
# -------------------------------------------------------------------------------- #
class Mishra03(Benchmark):
"""
Mishra 3 test objective function.
This class defines the Mishra 3 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Mishra03}}(\\mathbf{x}) = \\sqrt{\\lvert \\cos{\\sqrt{\\lvert x_1^2 + x_2^2 \\rvert}} \\rvert} + 0.01(x_1 + x_2)
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2`.
.. figure:: figures/Mishra03.png
:alt: Mishra 3 function
:align: center
**Two-dimensional Mishra 3 function**
*Global optimum*: :math:`f(x_i) = -0.18467` for :math:`x_i = -10` for :math:`i=1,2`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.global_optimum = [-10.0, -10.0]
self.fglob = -0.18467
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
return sqrt(abs(cos(sqrt(abs(x1**2.0 + x2**2.0))))) + 0.01*(x1 + x2)
# -------------------------------------------------------------------------------- #
class Mishra04(Benchmark):
"""
Mishra 4 test objective function.
This class defines the Mishra 4 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Mishra04}}(\\mathbf{x}) = \\sqrt{\\lvert \\sin{\\sqrt{\\lvert x_1^2 + x_2^2 \\rvert}} \\rvert} + 0.01(x_1 + x_2)
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2`.
.. figure:: figures/Mishra04.png
:alt: Mishra 4 function
:align: center
**Two-dimensional Mishra 4 function**
*Global optimum*: :math:`f(x_i) = -0.199409` for :math:`x_i = -10` for :math:`i=1,2`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.global_optimum = [-10.0, -10.0]
self.fglob = -0.199409
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
return sqrt(abs(sin(sqrt(abs(x1**2.0 + x2**2.0))))) + 0.01*(x1 + x2)
# -------------------------------------------------------------------------------- #
class Mishra05(Benchmark):
"""
Mishra 5 test objective function.
This class defines the Mishra 5 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Mishra05}}(\\mathbf{x}) = \\left [ \\sin^2 ((\\cos(x_1) + \\cos(x_2))^2) + \\cos^2 ((\\sin(x_1) + \\sin(x_2))^2) + x_1 \\right ]^2 + 0.01(x_1 + x_2)
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2`.
.. figure:: figures/Mishra05.png
:alt: Mishra 5 function
:align: center
**Two-dimensional Mishra 5 function**
*Global optimum*: :math:`f(x_i) = -0.119829` for :math:`\\mathbf{x} = [-1.98682, -10]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.global_optimum = [-1.98682, -10.0]
self.fglob = -0.119829
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
return ((sin((cos(x1) + cos(x2))**2.0)**2.0) + (cos((sin(x1) + sin(x2))**2.0)**2.0) + x1)**2.0 + 0.01*(x1 + x2)
# -------------------------------------------------------------------------------- #
class Mishra06(Benchmark):
"""
Mishra 6 test objective function.
This class defines the Mishra 6 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Mishra06}}(\\mathbf{x}) = -\\log{\\left [ \\sin^2 ((\\cos(x_1) + \\cos(x_2))^2) - \\cos^2 ((\\sin(x_1) + \\sin(x_2))^2) + x_1 \\right ]^2} + 0.01 \\left[(x_1 -1)^2 + (x_2 - 1)^2 \\right]
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2`.
.. figure:: figures/Mishra06.png
:alt: Mishra 6 function
:align: center
**Two-dimensional Mishra 6 function**
*Global optimum*: :math:`f(x_i) = -2.28395` for :math:`\\mathbf{x} = [2.88631, 1.82326]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.global_optimum = [2.88631, 1.82326]
self.fglob = -2.28395
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
return -log(((sin((cos(x1) + cos(x2))**2.0)**2.0) - (cos((sin(x1) + sin(x2))**2.0)**2.0) + x1)**2.0) + 0.1*((x1 - 1.0)**2.0 + (x2 - 1.0)**2.0)
# -------------------------------------------------------------------------------- #
class Mishra07(Benchmark):
"""
Mishra 7 test objective function.
This class defines the Mishra 7 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Mishra07}}(\\mathbf{x}) = \\left [\\prod_{i=1}^{n} x_i - n! \\right]^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,...,n`.
.. figure:: figures/Mishra07.png
:alt: Mishra 7 function
:align: center
**Two-dimensional Mishra 7 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = \\sqrt{n}` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.custom_bounds = [(-2, 2), (-2, 2)]
self.global_optimum = [sqrt(self.dimensions)] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
return (prod(x) - factorial(self.dimensions))**2.0
# -------------------------------------------------------------------------------- #
class Mishra08(Benchmark):
"""
Mishra 8 test objective function.
This class defines the Mishra 8 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Mishra08}}(\\mathbf{x}) = 0.001 \\left[\\lvert x_1^{10} - 20x_1^9 + 180x_1^8 - 960 x_1^7 + 3360x_1^6 - 8064x_1^5 + 13340x_1^4 - 15360x_1^3 + 11520x_1^2 - 5120x_1 + 2624 \\rvert \\lvert x_2^4 + 12x_2^3 + 54x_2^2 + 108x_2 + 81 \\rvert \\right]^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2`.
.. figure:: figures/Mishra08.png
:alt: Mishra 8 function
:align: center
**Two-dimensional Mishra 8 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [2, -3]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.custom_bounds = [(1.0, 2.0), (-4.0, 1.0)]
self.global_optimum = [2.0, -3.0]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
F1 = abs(x[0]**10-20*x[0]**9+180*x[0]**8-960*x[0]**7+3360*x[0]**6-8064*x[0]**5+13340*x[0]**4-15360*x[0]**3+11520*x[0]**2-5120*x[0]+2624)
F2 = abs(x[1]**4+12*x[1]**3+54*x[1]**2+108*x[1]+81.0)
return 0.001*(F1+F2)**2
# -------------------------------------------------------------------------------- #
class Mishra09(Benchmark):
"""
Mishra 9 test objective function.
This class defines the Mishra 9 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Mishra09}}(\\mathbf{x}) = \\left[ ab^2c + abc^2 + b^2 + (x_1 + x_2 - x_3)^2 \\right]^2
Where, in this exercise:
.. math::
\\begin{cases} a = 2x_1^3 + 5x_1x_2^2 + 4x_3 - 2x_1^2x_3 - 18 \\\\
b = x_1 + x_2^3 + x_1x_3^2 - 22 \\\\
c = 8x_1^2 + 2x_2x_3 + 2x_2^2 + 3x_2^3 - 52 \\end{cases}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2,3`.
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [1, 2, 3]`
"""
def __init__(self, dimensions=3):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.global_optimum = [1.0, 2.0, 3.0]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2, x3 = x
F1 = 2*x1**3+5*x1*x2+4*x3-2*x1**2*x3-18.0
F2 = x1+x2**3+x1*x2**2+x1*x3**2-22.0
F3 = 8*x1**2+2*x2*x3+2*x2**2+3*x2**3-52.0
return (F1*F3*F2**2+F1*F2*F3**2+F2**2+(x1+x2-x3)**2)**2
# -------------------------------------------------------------------------------- #
class Mishra10(Benchmark):
"""
Mishra 10 test objective function.
This class defines the Mishra 10 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Mishra10}}(\\mathbf{x}) = \\left[ \\lfloor x_1 \\perp x_2 \\rfloor - \\lfloor x_1 \\rfloor - \\lfloor x_2 \\rfloor \\right]^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2`.
.. figure:: figures/Mishra10.png
:alt: Mishra 10 function
:align: center
**Two-dimensional Mishra 10 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [2, 2]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[10.0] * self.dimensions))
self.global_optimum = [2.0, 2.0]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = int(x[0]), int(x[1])
f1 = x1 + x2
f2 = x1*x2
return (f1 - f2)**2.0
# -------------------------------------------------------------------------------- #
class Mishra11(Benchmark):
"""
Mishra 11 test objective function.
This class defines the Mishra 11 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Mishra11}}(\\mathbf{x}) = \\left [ \\frac{1}{n} \\sum_{i=1}^{n} \\lvert x_i \\rvert - \\left(\\prod_{i=1}^{n} \\lvert x_i \\rvert \\right )^{\\frac{1}{n}} \\right]^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,...,n`.
.. figure:: figures/Mishra11.png
:alt: Mishra 11 function
:align: center
**Two-dimensional Mishra 11 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.custom_bounds = [(-3, 3), (-3, 3)]
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
n = self.dimensions
return ((1.0/n)*sum(abs(x)) - (prod(abs(x)))**1.0/n)**2.0
# -------------------------------------------------------------------------------- #
class MultiModal(Benchmark):
"""
MultiModal test objective function.
This class defines the MultiModal global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{MultiModal}}(\\mathbf{x}) = \\left( \\sum_{i=1}^n \\lvert x_i \\rvert \\right) \\left( \\prod_{i=1}^n \\lvert x_i \\rvert \\right)
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,...,n`.
.. figure:: figures/MultiModal.png
:alt: MultiModal function
:align: center
**Two-dimensional MultiModal function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.custom_bounds = [(-5, 5), (-5, 5)]
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
return sum(abs(x))*prod(abs(x))
# -------------------------------------------------------------------------------- #
class NeedleEye(Benchmark):
"""
NeedleEye test objective function.
This class defines the Needle-Eye global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{NeedleEye}}(\\mathbf{x}) = \\begin{cases} 1 & \\textrm{if} \\hspace{5pt} \\lvert x_i \\rvert < eye \\hspace{5pt} \\forall i \\\\
\\sum_{i=1}^n (100 + \\lvert x_i \\rvert) & \\textrm{if} \\hspace{5pt} \\lvert x_i \\rvert > eye \\\\
0 & \\textrm{otherwise} \\end{cases}
Where, in this exercise, :math:`eye = 0.0001`.
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,...,n`.
.. figure:: figures/NeedleEye.png
:alt: NeedleEye function
:align: center
**Two-dimensional NeedleEye function**
*Global optimum*: :math:`f(x_i) = 1` for :math:`x_i = -1` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
# WARNING: Adding global optimum, not declared before
self.global_optimum = [1.0] * self.dimensions
self.fglob = 1.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
f = 0.0
fp = False
eye = 0.0001
for i in range(self.dimensions):
# WARNING: Changing this code, ambiguous variable "fp"
if abs(x[i]) >= eye:
fp = True
f += 100.0 + abs(x[i])
else:
f += 1.0
if not fp:
f = f/self.dimensions
return f
# -------------------------------------------------------------------------------- #
class NewFunction01(Benchmark):
"""
NewFunction01 test objective function.
This class defines the NewFunction01 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{NewFunction01}}(\\mathbf{x}) = \\left | {\\cos\\left(\\sqrt{\\left|{x_{1}^{2} + x_{2}}\\right|}\\right)} \\right |^{0.5} + (x_{1} + x_{2})/100
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2`.
.. figure:: figures/NewFunction01.png
:alt: NewFunction01 function
:align: center
**Two-dimensional NewFunction01 function**
*Global optimum*: :math:`f(x_i) = -0.17894509347721144` for :math:`\\mathbf{x} = [-8.4666, -9.9988]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.global_optimum = [-8.4666, -9.9988]
self.fglob = -0.17894509347721144
def evaluator(self, x, *args):
self.fun_evals += 1
return abs(cos(sqrt(abs(x[0]**2 + x[1]))))**0.5 + 0.01*x[0] + 0.01*x[1]
# -------------------------------------------------------------------------------- #
class NewFunction02(Benchmark):
"""
NewFunction02 test objective function.
This class defines the NewFunction02 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{NewFunction02}}(\\mathbf{x}) = \\left | {\\sin\\left(\\sqrt{\\lvert{x_{1}^{2} + x_{2}}\\rvert}\\right)} \\right |^{0.5} + (x_{1} + x_{2})/100
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2`.
.. figure:: figures/NewFunction02.png
:alt: NewFunction02 function
:align: center
**Two-dimensional NewFunction02 function**
*Global optimum*: :math:`f(x_i) = -0.1971881059905` for :math:`\\mathbf{x} = [-9.94112, -9.99952]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.global_optimum = [-9.94112, -9.99952]
self.fglob = -0.1971881059905
def evaluator(self, x, *args):
self.fun_evals += 1
return abs(sin(sqrt(abs(x[0]**2 + x[1]))))**0.5 + 0.01*x[0] + 0.01*x[1]
# -------------------------------------------------------------------------------- #
class NewFunction03(Benchmark):
"""
NewFunction03 test objective function.
This class defines the NewFunction03 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{NewFunction03}}(\\mathbf{x}) = 0.01 x_{1} + 0.1 x_{2} + \\left\{x_{1} + \\sin^{2}\\left[\\left(\\cos\\left(x_{1}\\right) + \\cos\\left(x_{2}\\right)\\right)^{2}\\right] + \\cos^{2}\\left[\\left(\\sin\\left(x_{1}\\right) + \\sin\\left(x_{2}\\right)\\right)^{2}\\right]\\right\}^{2}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2`.
.. figure:: figures/NewFunction03.png
:alt: NewFunction03 function
:align: center
**Two-dimensional NewFunction03 function**
*Global optimum*: :math:`f(x_i) = -1.019829` for :math:`\\mathbf{x} = [-1.98682, -10]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.global_optimum = [-1.98682, -10.0]
self.fglob = -1.019829
def evaluator(self, x, *args):
self.fun_evals += 1
f1 = sin((cos(x[0]) + cos(x[1]))**2)**2
f2 = cos((sin(x[0]) + sin(x[1]))**2)**2
f = (f1 + f2 + x[0])**2
f = f + 0.01*x[0] + 0.1*x[1]
return f
# -------------------------------------------------------------------------------- #
class OddSquare(Benchmark):
"""
Odd Square test objective function.
This class defines the Odd Square global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{OddSquare}}(\\mathbf{x}) = -e^{-\\frac{d}{2\\pi}} \\cos(\\pi d) \\left( 1 + \\frac{0.02h}{d + 0.01} \\right )
Where, in this exercise:
.. math::
\\begin{cases} d = n \\cdot \\smash{\\displaystyle\\max_{1 \leq i \leq n}} \\left[ (x_i - b_i)^2 \\right ] \\\\
\\\\
h = \\sum_{i=1}^{n} (x_i - b_i)^2 \\end{cases}
And :math:`\\mathbf{b} = [1, 1.3, 0.8, -0.4, -1.3, 1.6, -0.2, -0.6, 0.5, 1.4, 1, 1.3, 0.8, -0.4, -1.3, 1.6, -0.2, -0.6, 0.5, 1.4]`
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-5 \\pi, 5 \\pi]` for :math:`i=1,...,n` and :math:`n \\leq 20`.
.. figure:: figures/OddSquare.png
:alt: Odd Square function
:align: center
**Two-dimensional Odd Square function**
*Global optimum*: :math:`f(x_i) = -1.0084` for :math:`\\mathbf{x} \\approx b`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-5.0*pi] * self.dimensions,
[ 5.0*pi] * self.dimensions))
self.custom_bounds = [(-2.0, 4.0), (-2.0, 4.0)]
# WARNING: Max dimensions of 2 * 10 is implicit
self.a = asarray([1, 1.3, 0.8, -0.4, -1.3,
1.6, -0.2, -0.6, 0.5, 1.4]*2)
self.global_optimum = self.a[0:self.dimensions]
self.fglob = -1.0084
if self.dimensions > (2 * len(self.a)):
print("WARNING: Too many dimensions to calculate global"+
" optimum for function: OddSquare")
def evaluator(self, x, *args):
self.fun_evals += 1
c = 0.02
b = self.a[0:self.dimensions]
d = self.dimensions*max((x - b)**2.0)
h = sum((x - b)**2.0)
return -exp(-d/(2.0*pi))*cos(pi*d)*(1.0 + 0.02*h/(d + 0.01))
# -------------------------------------------------------------------------------- #
class Parsopoulos(Benchmark):
"""
Parsopoulos test objective function.
This class defines the Parsopoulos global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Parsopoulos}}(\\mathbf{x}) = \\cos(x_1)^2 + \\sin(x_2)^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-5, 5]` for :math:`i=1,2`.
.. figure:: figures/Parsopoulos.png
:alt: Parsopoulos function
:align: center
**Two-dimensional Parsopoulos function**
*Global optimum*: This function has infinite number of global minima in R2, at points :math:`\\left(k\\frac{\\pi}{2}, \\lambda \\pi \\right)`,
where :math:`k = \\pm1, \\pm3, ...` and :math:`\\lambda = 0, \\pm1, \\pm2, ...`
In the given domain problem, function has 12 global minima all equal to zero.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-5.0] * self.dimensions,
[ 5.0] * self.dimensions))
self.global_optimum = [pi/2.0, pi]
self.fglob = 0
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
return cos(x1)**2.0 + sin(x2)**2.0
# -------------------------------------------------------------------------------- #
class Pathological(Benchmark):
"""
Pathological test objective function.
This class defines the Pathological global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Pathological}}(\\mathbf{x}) = \\sum_{i=1}^{n -1} \\frac{\\sin^{2}\\left(\\sqrt{100 x_{i+1}^{2} + x_{i}^{2}}\\right) -0.5}{0.001 \\left(x_{i} - x_{i+1}\\right)^{4} + 0.50}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-100, 100]` for :math:`i=1,2`.
.. figure:: figures/Pathological.png
:alt: Pathological function
:align: center
**Two-dimensional Pathological function**
*Global optimum*: :math:`f(x_i) = -1.99600798403` for :math:`x_i = 0` for :math:`i=1,2`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-100.0] * self.dimensions,
[ 100.0] * self.dimensions))
self.global_optimum = [0.0] * self.dimensions
self.fglob = -1.99600798403
def evaluator(self, x, *args):
self.fun_evals += 1
x_ = roll(x, -1)
return sum((sin(sqrt(x_**2 + 100*x**2))**2 - 0.5) / (0.001 * ((x_ - x)**4 + 1.0) + 0.5))
# -------------------------------------------------------------------------------- #
class Paviani(Benchmark):
"""
Paviani test objective function.
This class defines the Paviani global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Paviani}}(\\mathbf{x}) = \\sum_{i=1}^{10} \\left[\\log^{2}\\left(10 - x_i\\right) + \\log^{2}\\left(x_i -2\\right)\\right] - \\left(\\prod_{i=1}^{10} x_i^{10} \\right)^{0.2}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [2.001, 9.999]` for :math:`i=1,...,n`.
*Global optimum*: :math:`f(x_i) = -45.7784684040686` for :math:`x_i = 9.350266` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=10):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([2.001] * self.dimensions,
[9.999] * self.dimensions))
self.global_optimum = [9.350266] * self.dimensions
self.fglob = -45.7784684040686
def evaluator(self, x, *args):
self.fun_evals += 1
return sum(log(x-2)**2.0 + log(10.0 - x)**2.0) - prod(x)**0.2
# -------------------------------------------------------------------------------- #
class Penalty01(Benchmark):
"""
Penalty 1 test objective function.
This class defines the Penalty 1 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Penalty01}}(\\mathbf{x}) = \\frac{\\pi}{30} \\left\\{10 \\sin^2(\\pi y_1) + \\sum_{i=1}^{n-1} (y_i - 1)^2 \\left[1 + 10 \\sin^2(\\pi y_{i+1}) \\right ] + (y_n - 1)^2 \\right \\} + \\sum_{i=1}^n u(x_i, 10, 100, 4)
Where, in this exercise:
.. math::
y_i = 1 + \\frac{1}{4}(x_i + 1)
And:
.. math::
u(x_i, a, k, m) = \\begin{cases} k(x_i - a)^m & \\textrm{if} \\hspace{5pt} x_i > a \\\\
0 & \\textrm{if} \\hspace{5pt} -a \\leq x_i \\leq a \\\\
k(-x_i - a)^m & \\textrm{if} \\hspace{5pt} x_i < -a \\end{cases}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-50, 50]` for :math:`i=1,...,n`.
.. figure:: figures/Penalty01.png
:alt: Penalty 1 function
:align: center
**Two-dimensional Penalty 1 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = -1` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-50.0] * self.dimensions,
[ 50.0] * self.dimensions))
self.custom_bounds = [(-5.0, 5.0), (-5.0, 5.0)]
self.global_optimum = [-1.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
a, b, c = 10.0, 100.0, 4.0
xx = abs(x)
u = where(xx > a, b*(xx - a)**c, 0.0)
y = 1.0 + (x + 1.0)/4.0
return sum(u) + (pi/30.0)*(10.0*sin(pi*y[0])**2.0 + sum((y[0:-1] - 1.0)**2.0 *(1.0 + 10.0*sin(pi*y[1:])**2.0)) + (y[-1] - 1)**2.0)
# -------------------------------------------------------------------------------- #
class Penalty02(Benchmark):
"""
Penalty 2 test objective function.
This class defines the Penalty 2 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Penalty02}}(\\mathbf{x}) = 0.1 \\left\\{\\sin^2(3\\pi x_1) + \\sum_{i=1}^{n-1} (x_i - 1)^2 \\left[1 + \\sin^2(3\\pi x_{i+1}) \\right ] + (x_n - 1)^2 \\left [1 + \\sin^2(2 \\pi x_n) \\right ]\\right \\} + \\sum_{i=1}^n u(x_i, 5, 100, 4)
Where, in this exercise:
.. math::
u(x_i, a, k, m) = \\begin{cases} k(x_i - a)^m & \\textrm{if} \\hspace{5pt} x_i > a \\\\
0 & \\textrm{if} \\hspace{5pt} -a \\leq x_i \\leq a \\\\
k(-x_i - a)^m & \\textrm{if} \\hspace{5pt} x_i < -a \\end{cases}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-50, 50]` for :math:`i=1,...,n`.
.. figure:: figures/Penalty02.png
:alt: Penalty 2 function
:align: center
**Two-dimensional Penalty 2 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 1` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-50.0] * self.dimensions,
[ 50.0] * self.dimensions))
self.custom_bounds = [(-4.0, 4.0), (-4.0, 4.0)]
self.global_optimum = [1.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
a, b, c = 5.0, 100.0, 4.0
xx = abs(x)
u = where(xx > a, b*(xx - a)**c, 0.0)
return sum(u) + 0.1*(10.0*sin(3.0*pi*x[0])**2.0 + sum((x[0:-1] - 1.0)**2.0 *(1.0 + sin(pi*x[1:])**2.0)) + (x[-1] - 1)**2.0*(1 + sin(2*pi*x[-1])**2.0))
# -------------------------------------------------------------------------------- #
class PenHolder(Benchmark):
"""
PenHolder test objective function.
This class defines the PenHolder global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{PenHolder}}(\\mathbf{x}) = -e^{\\left|{e^{\\left|{- \\frac{\\sqrt{x_{1}^{2} + x_{2}^{2}}}{\\pi} + 1}\\right|} \\cos\\left(x_{1}\\right) \\cos\\left(x_{2}\\right)}\\right|^{-1}}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-11, 11]` for :math:`i=1,2`.
.. figure:: figures/PenHolder.png
:alt: PenHolder function
:align: center
**Two-dimensional PenHolder function**
*Global optimum*: :math:`f(x_i) = -0.9635348327265058` for :math:`x_i = \\pm 9.646167671043401` for :math:`i=1,2`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-11.0] * self.dimensions,
[ 11.0] * self.dimensions))
self.global_optimum = [-9.646167708023526, 9.646167671043401]
self.fglob = -0.9635348327265058
def evaluator(self, x, *args):
self.fun_evals += 1
return -exp(-(abs(cos(x[0])*cos(x[1])*exp(abs(1 - sqrt(x[0]**2 + x[1]**2)/pi))))**(-1))
# -------------------------------------------------------------------------------- #
class PermFunction01(Benchmark):
"""
PermFunction 1 test objective function.
This class defines the Perm Function 1 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{PermFunction01}}(\\mathbf{x}) = \\sum_{k=1}^n \\left\\{ \\sum_{j=1}^n (j^k + \\beta) \\left[ \\left(\\frac{x_j}{j}\\right)^k - 1 \\right] \\right\\}^2
Here, :math '\\beta' is 0.5 and :math:`n` represents the number of dimensions and :math:`x_i \\in [-n, n+1]` for :math:`i=1,...,n`.
.. figure:: figures/PermFunction01.png
:alt: PermFunction 1 function
:align: center
**Two-dimensional PermFunction 1 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = i` for :math:`i=1,...,n`
"""
# WARNING: Definition of "Beta" was not included in documentation
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-self.dimensions] * self.dimensions,
[self.dimensions+1] * self.dimensions))
self.global_optimum = range(1, self.dimensions+1)
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
b = 0.5
s_out = 0.0
for k in range(1, self.dimensions+1):
s_in = 0.0
for j in range(1, self.dimensions+1):
s_in += (j**k + b)*((x[j-1]/j)**k - 1)
s_out += s_in**2
return s_out
# -------------------------------------------------------------------------------- #
class PermFunction02(Benchmark):
"""
PermFunction 2 test objective function.
This class defines the Perm Function 2 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{PermFunction02}}(\\mathbf{x}) = \\sum_{k=1}^n \\left\\{ \\sum_{j=1}^n (j + \\beta) \\left[ \\left(x_j^k - \\frac{1}{j} \\right ) \\right] \\right\\}^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-n, n+1]` for :math:`i=1,...,n`.
.. figure:: figures/PermFunction02.png
:alt: PermFunction 2 function
:align: center
**Two-dimensional PermFunction 2 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = \\frac{1}{i}` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-self.dimensions] * self.dimensions,
[ self.dimensions+1] * self.dimensions))
self.custom_bounds = [(0, 1.5), (0, 1.0)]
self.global_optimum = range(1, self.dimensions+1)
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
b = 10.0
s_out = 0.0
for k in range(1, self.dimensions+1):
s_in = 0.0
for j in range(1, self.dimensions+1):
s_in += (j + b)*(x[j-1]**k - (1.0/j)**k)
s_out += s_in**2
return s_out
# -------------------------------------------------------------------------------- #
class Pinter(Benchmark):
"""
Pinter test objective function.
This class defines the Pinter global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Pinter}}(\\mathbf{x}) = \\sum_{i=1}^n ix_i^2 + \\sum_{i=1}^n 20i \\sin^2 A + \\sum_{i=1}^n i \\log_{10} (1 + iB^2)
Where, in this exercise:
.. math::
\\begin{cases} A = x_{i-1} \\sin x_i + \\sin x_{i+1} \\\\
B = x_{i-1}^2 - 2x_i + 3x_{i+1} - \\cos x_i + 1 \\end{cases}
Where :math:`x_0 = x_n` and :math:`x_{n+1} = x_1`.
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,...,n`.
.. figure:: figures/Pinter.png
:alt: Pinter function
:align: center
**Two-dimensional Pinter function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
n = self.dimensions
f = 0.0
for i in range(n):
x_i = x[i]
if i == 0:
x_mi = x[-1]
x_pi = x[i+1]
elif i == n - 1:
x_mi = x[i-1]
x_pi = x[0]
else:
x_mi = x[i-1]
x_pi = x[i+1]
A = x_mi*sin(x_i) + sin(x_pi)
B = x_mi**2.0 - 2*x_i + 3*x_pi - cos(x_i) + 1.0
f += (i + 1.0)*x_i**2.0 + 20.0*(i + 1.0)*sin(A)**2.0 + (i + 1.0)*log10(1.0 + (i + 1.0)*B**2.0)
return f
# -------------------------------------------------------------------------------- #
class Plateau(Benchmark):
"""
Plateau test objective function.
This class defines the Plateau global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Plateau}}(\\mathbf{x}) = 30 + \\sum_{i=1}^n \\lfloor x_i \\rfloor
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-5.12, 5.12]` for :math:`i=1,...,n`.
.. figure:: figures/Plateau.png
:alt: Plateau function
:align: center
**Two-dimensional Plateau function**
*Global optimum*: :math:`f(x_i) = 30` for :math:`x_i = 0` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-5.12] * self.dimensions,
[ 5.12] * self.dimensions))
self.global_optimum = [0.0] * self.dimensions
self.fglob = 30.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
return 30.0 + sum(floor(abs(x)))
# -------------------------------------------------------------------------------- #
class Powell(Benchmark):
"""
Powell test objective function.
This class defines the Powell global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Powell}}(\\mathbf{x}) = (x_3+10x_1)^2+5(x_2-x_4)^2+(x_1-2x_2)^4+10(x_3-x_4)^4
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-4, 5]` for :math:`i=1,...,4`.
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,...,4`
"""
def __init__(self, dimensions=4):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-4.0] * self.dimensions,
[ 5.0] * self.dimensions))
self.global_optimum = [0, 0, 0, 0]
self.fglob = 0
def evaluator(self, x, *args):
self.fun_evals += 1
return (x[0] + 10*x[1])**2 + 5*(x[2] - x[3])**2 + (x[1] - 2*x[2])**4 + 10*(x[0] - x[3])**4
# -------------------------------------------------------------------------------- #
class PowerSum(Benchmark):
"""
Power sum test objective function.
This class defines the Power Sum global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{PowerSum}}(\\mathbf{x}) = \\sum_{k=1}^n\\left[\\left(\\sum_{i=1}^n x_i^k \\right) - b_k \\right]^2
Where, in this exercise, :math:`\\mathbf{b} = [8, 18, 44, 114]`
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 4]` for :math:`i=1,...,4`.
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [1, 2, 2, 3]`
"""
def __init__(self, dimensions=4):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([0.0] * self.dimensions,
[float(self.dimensions)] * self.dimensions))
self.global_optimum = [1.0, 2.0, 2.0, 3.0]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
b = [8.0, 18.0, 44.0, 114.0]
y = 0.0
for k in range(1, self.dimensions+1):
s_in = 0.0
for i in range(self.dimensions):
s_in = s_in + x[i]**k
y = y + (s_in - b[k-1])**2.0
return y
# -------------------------------------------------------------------------------- #
class Price01(Benchmark):
"""
Price 1 test objective function.
This class defines the Price 1 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Price01}}(\\mathbf{x}) = (\\lvert x_1 \\rvert - 5)^2 + (\\lvert x_2 \\rvert - 5)^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-500, 500]` for :math:`i=1,2`.
.. figure:: figures/Price01.png
:alt: Price 1 function
:align: center
**Two-dimensional Price 1 function**
*Global optimum*: :math:`f(x_i) = 0.0` for :math:`\\mathbf{x} = [5, 5]` or :math:`\\mathbf{x} = [5, -5]`
or :math:`\\mathbf{x} = [-5, 5]` or :math:`\\mathbf{x} = [-5, -5]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-500.0] * self.dimensions,
[ 500.0] * self.dimensions))
self.custom_bounds = [(-10.0, 10.0), (-10.0, 10.0)]
self.global_optimum = [5.0, 5.0]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
return (abs(x1) - 5.0)**2.0 + (abs(x2) - 5.0)**2.0
# -------------------------------------------------------------------------------- #
class Price02(Benchmark):
"""
Price 2 test objective function.
This class defines the Price 2 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Price02}}(\\mathbf{x}) = 1 + \\sin^2(x_1) + \\sin^2(x_2) - 0.1e^{(-x_1^2 - x_2^2)}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,...,n`.
.. figure:: figures/Price02.png
:alt: Price 2 function
:align: center
**Two-dimensional Price 2 function**
*Global optimum*: :math:`f(x_i) = 0.9` for :math:`x_i = 0` for :math:`i=1,2`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[10.0] * self.dimensions))
self.global_optimum = [0.0, 0.0]
self.fglob = 0.9
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
return 1.0 + sin(x1)**2.0 + sin(x2)**2.0 - 0.1*exp(-x1**2.0 - x2**2.0)
# -------------------------------------------------------------------------------- #
class Price03(Benchmark):
"""
Price 3 test objective function.
This class defines the Price 3 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Price03}}(\\mathbf{x}) = 100(x_2 - x_1^2)^2 + \\left[6.4(x_2 - 0.5)^2 - x_1 - 0.6 \\right]^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-50, 50]` for :math:`i=1,2`.
.. figure:: figures/Price03.png
:alt: Price 3 function
:align: center
**Two-dimensional Price 3 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [-5, -5]`, :math:`\\mathbf{x} = [-5, 5]`,
:math:`\\mathbf{x} = [5, -5]`, :math:`\\mathbf{x} = [5, 5]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-50.0] * self.dimensions,
[ 50.0] * self.dimensions))
self.custom_bounds = [(0, 2), (0, 2)]
self.global_optimum = [1.0, 1.0]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
return 100.0*(x2 - x1**2.0)**2.0 + (6.4*(x2 - 0.5)**2.0 - x1 - 0.6)**2.0
# -------------------------------------------------------------------------------- #
class Price04(Benchmark):
"""
Price 4 test objective function.
This class defines the Price 4 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Price04}}(\\mathbf{x}) = (2x_1^3x_2 - x_2^3)^2 + (6x_1 - x_2^2 + x_2)^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-50, 50]` for :math:`i=1,2`.
.. figure:: figures/Price04.png
:alt: Price 4 function
:align: center
**Two-dimensional Price 4 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [0, 0]`, :math:`\\mathbf{x} = [2, 4]` and
:math:`\\mathbf{x} = [1.464, -2.506]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-50.0] * self.dimensions,
[ 50.0] * self.dimensions))
self.custom_bounds = [(0, 2), (0, 2)]
self.global_optimum = [2.0, 4.0]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
return (2.0*x2*x1**3.0 - x2**3.0)**2.0 + (6.0*x1 - x2**2.0 + x2)**2.0
# -------------------------------------------------------------------------------- #
class Qing(Benchmark):
"""
Qing test objective function.
This class defines the Qing global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Qing}}(\\mathbf{x}) = \\sum_{i=1}^{n} (x_i^2 - i)^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-500, 500]` for :math:`i=1,...,n`.
.. figure:: figures/Qing.png
:alt: Qing function
:align: center
**Two-dimensional Qing function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = \\pm \\sqrt(i)` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-500.0] * self.dimensions,
[ 500.0] * self.dimensions))
self.custom_bounds = [(-2, 2), (-2, 2)]
self.global_optimum = [sqrt(i) for i in range(1, self.dimensions+1)]
self.fglob = 0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
rng = numpy.arange(1, self.dimensions+1)
return sum((x**2.0 - rng)**2.0)
# -------------------------------------------------------------------------------- #
class Quadratic(Benchmark):
"""
Quadratic test objective function.
This class defines the Quadratic global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Quadratic}}(\\mathbf{x}) = -3803.84 - 138.08x_1 - 232.92x_2 + 128.08x_1^2 + 203.64x_2^2 + 182.25x_1x_2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2`.
.. figure:: figures/Quadratic.png
:alt: Quadratic function
:align: center
**Two-dimensional Quadratic function**
*Global optimum*: :math:`f(x_i) = -3873.72418` for :math:`\\mathbf{x} = [0.19388, 0.48513]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.custom_bounds = [(0, 1), (0, 1)]
self.global_optimum = [0.19388, 0.48513]
self.fglob = -3873.72418
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
return -3803.84 - 138.08*x1 - 232.92*x2 + 128.08*x1**2.0 + 203.64*x2**2.0 + 182.25*x1*x2
# -------------------------------------------------------------------------------- #
class Quintic(Benchmark):
"""
Quintic test objective function.
This class defines the Quintic global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Quintic}}(\\mathbf{x}) = \\sum_{i=1}^{n} \\left|{x_{i}^{5} - 3 x_{i}^{4} + 4 x_{i}^{3} + 2 x_{i}^{2} - 10 x_{i} -4}\\right|
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,...,n`.
.. figure:: figures/Quintic.png
:alt: Quintic function
:align: center
**Two-dimensional Quintic function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = -1` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.custom_bounds = [(-2, 2), (-2, 2)]
self.global_optimum = [-1.0] * self.dimensions
self.fglob = 0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
return sum(abs(x**5 - 3*x**4 + 4*x**3 + 2*x**2 - 10*x - 4))
# -------------------------------------------------------------------------------- #
class Rana(Benchmark):
"""
Rana test objective function.
This class defines the Rana global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Rana}}(\\mathbf{x}) = \\sum_{i=1}^{n} \\left[x_{i} \\sin\\left(\\sqrt{\\lvert{x_{1} - x_{i} + 1}\\rvert}\\right) \\cos\\left(\\sqrt{\\lvert{x_{1} + x_{i} + 1}\\rvert}\\right) + \\left(x_{1} + 1\\right) \\sin\\left(\\sqrt{\\lvert{x_{1} + x_{i} + 1}\\rvert}\\right) \\cos\\left(\\sqrt{\\lvert{x_{1} - x_{i} + 1}\\rvert}\\right)\\right]
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-500.000001, 500.000001]` for :math:`i=1,...,n`.
.. figure:: figures/Rana.png
:alt: Rana function
:align: center
**Two-dimensional Rana function**
*Global optimum*: :math:`f(x_i) = -928.5478` for :math:`x_i = -500` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-500.000001] * self.dimensions,
[ 500.000001] * self.dimensions))
self.global_optimum = [-500.0] * self.dimensions
self.fglob = -928.5478
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
E = x + 1
return sum(E*cos(sqrt(abs(E-x)))*sin(sqrt(abs(E+x))) + x*cos(sqrt(abs(E+x)))*sin(sqrt(abs(E-x))))
# -------------------------------------------------------------------------------- #
class Rastrigin(Benchmark):
"""
Rastrigin test objective function.
This class defines the Rastrigin global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Rastrigin}}(\\mathbf{x}) = 10n \\sum_{i=1}^n \\left[ x_i^2 - 10 \\cos(2\\pi x_i) \\right]
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-5.12, 5.12]` for :math:`i=1,...,n`.
.. figure:: figures/Rastrigin.png
:alt: Rastrigin function
:align: center
**Two-dimensional Rastrigin function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-5.12] * self.dimensions,
[ 5.12] * self.dimensions))
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
return 10.0*self.dimensions + sum(x**2.0 - 10.0*cos(2.0*pi*x))
# -------------------------------------------------------------------------------- #
class Ripple01(Benchmark):
"""
Ripple 1 test objective function.
This class defines the Ripple 1 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Ripple01}}(\\mathbf{x}) = \\sum_{i=1}^2 -e^{-2 \\log 2 (\\frac{x_i-0.1}{0.8})^2} \\left[\\sin^6(5 \\pi x_i) + 0.1\\cos^2(500 \\pi x_i) \\right]
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 1]` for :math:`i=1,...,n`.
.. figure:: figures/Ripple01.png
:alt: Ripple 1 function
:align: center
**Two-dimensional Ripple 1 function**
*Global optimum*: :math:`f(x_i) = -2.2` for :math:`x_i = 0.1` for :math:`i=1,2`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([0.0] * self.dimensions,
[1.0] * self.dimensions))
self.global_optimum = [0.1] * self.dimensions
self.fglob = -2.2
def evaluator(self, x, *args):
self.fun_evals += 1
return sum(-exp(-2.0*log(2.0)*((x - 0.1)/0.8)**2.0)*(sin(5.0*pi*x)**6.0 + 0.1*cos(500.0*pi*x)**2.0))
# -------------------------------------------------------------------------------- #
class Ripple25(Benchmark):
"""
Ripple 25 test objective function.
This class defines the Ripple 25 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Ripple25}}(\\mathbf{x}) = \\sum_{i=1}^2 -e^{-2 \\log 2 (\\frac{x_i-0.1}{0.8})^2} \\left[\\sin^6(5 \\pi x_i) \\right]
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 1]` for :math:`i=1,...,n`.
.. figure:: figures/Ripple25.png
:alt: Ripple 25 function
:align: center
**Two-dimensional Ripple 25 function**
*Global optimum*: :math:`f(x_i) = -2` for :math:`x_i = 0.1` for :math:`i=1,2`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([0.0] * self.dimensions,
[1.0] * self.dimensions))
self.global_optimum = [0.1] * self.dimensions
self.fglob = -2.0
def evaluator(self, x, *args):
self.fun_evals += 1
return sum(-exp(-2.0*log(2.0)*((x - 0.1)/0.8)**2.0)*(sin(5.0*pi*x)**6.0))
# -------------------------------------------------------------------------------- #
class Rosenbrock(Benchmark):
"""
Rosenbrock test objective function.
This class defines the Rosenbrock global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Rosenbrock}}(\\mathbf{x}) = \\sum_{i=1}^{n-1} [100(x_i^2 - x_{i+1})^2 + (x_i - 1)^2]
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-5, 10]` for :math:`i=1,...,n`.
.. figure:: figures/Rosenbrock.png
:alt: Rosenbrock function
:align: center
**Two-dimensional Rosenbrock function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 1` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-5.0] * self.dimensions,
[10.0] * self.dimensions))
self.custom_bounds = [(-2, 2), (-2, 2)]
self.global_optimum = [1.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
# WARNING: This is not in accordance with the documentation
return sum(100.0*(x[:-1]-x[1:]**2.0)**2.0 + (1-x[:-1])**2.0)
# -------------------------------------------------------------------------------- #
class RosenbrockModified(Benchmark):
"""
Modified Rosenbrock test objective function.
This class defines the Modified Rosenbrock global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{RosenbrockModified}}(\\mathbf{x}) = 74 + 100(x_2 - x_1^2)^2 + (1 - x_1)^2 - 400 e^{-\\frac{(x_1+1)^2 + (x_2 + 1)^2}{0.1}}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-2, 2]` for :math:`i=1,2`.
.. figure:: figures/RosenbrockModified.png
:alt: Modified Rosenbrock function
:align: center
**Two-dimensional Modified Rosenbrock function**
*Global optimum*: :math:`f(x_i) = 34.37` for :math:`\\mathbf{x} = [-0.9, -0.95]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-2.0] * self.dimensions,
[ 2.0] * self.dimensions))
self.custom_bounds = [(-1.0, 0.5), (-1.0, 1.0)]
self.global_optimum = [-0.9, -0.95]
self.fglob = 34.37
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
return 74.0 + 100.0*(x2 - x1**2.0)**2.0 + (1.0 - x1)**2.0 - 400.0*exp(-((x1 + 1.0)**2.0 + (x2 + 1.0)**2.0)/0.1)
# -------------------------------------------------------------------------------- #
class RotatedEllipse01(Benchmark):
"""
Rotated Ellipse 1 test objective function.
This class defines the Rotated Ellipse 1 global optimization problem. This
is a unimodal minimization problem defined as follows:
.. math::
f_{\\text{RotatedEllipse01}}(\\mathbf{x}) = 7x_1^2 - 6 \\sqrt{3} x_1x_2 + 13x_2^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-500, 500]` for :math:`i=1,2`.
.. figure:: figures/RotatedEllipse01.png
:alt: Rotated Ellipse 1 function
:align: center
**Two-dimensional Rotated Ellipse 1 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [0, 0]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-500.0] * self.dimensions,
[500.0] * self.dimensions))
self.custom_bounds = [(-2.0, 2.0), (-2.0, 2.0)]
self.global_optimum = [0.0, 0.0]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
return 7.0*x1**2.0 - 6.0*sqrt(3)*x1*x2 + 13*x2**2.0
# -------------------------------------------------------------------------------- #
class RotatedEllipse02(Benchmark):
"""
Rotated Ellipse 2 test objective function.
This class defines the Rotated Ellipse 2 global optimization problem. This
is a unimodal minimization problem defined as follows:
.. math::
f_{\\text{RotatedEllipse02}}(\\mathbf{x}) = x_1^2 - x_1x_2 + x_2^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-500, 500]` for :math:`i=1,2`.
.. figure:: figures/RotatedEllipse02.png
:alt: Rotated Ellipse 2 function
:align: center
**Two-dimensional Rotated Ellipse 2 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [0, 0]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-500.0] * self.dimensions,
[500.0] * self.dimensions))
self.custom_bounds = [(-2.0, 2.0), (-2.0, 2.0)]
self.global_optimum = [0.0, 0.0]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
return x1**2.0 - x1*x2 + x2**2.0
# -------------------------------------------------------------------------------- #
class Salomon(Benchmark):
"""
Salomon test objective function.
This class defines the Salomon global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Salomon}}(\\mathbf{x}) = 1 - \\cos \\left (2 \\pi \\sqrt{\\sum_{i=1}^{n} x_i^2} \\right) + 0.1 \\sqrt{\\sum_{i=1}^n x_i^2}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-100, 100]` for :math:`i=1,...,n`.
.. figure:: figures/Salomon.png
:alt: Salomon function
:align: center
**Two-dimensional Salomon function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-100.0] * self.dimensions,
[ 100.0] * self.dimensions))
self.custom_bounds = [(-50, 50), (-50, 50)]
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
return 1.0 - cos(2.0*pi*sqrt(sum(x**2.0))) + 0.1*sqrt(sum(x**2.0))
# -------------------------------------------------------------------------------- #
class Sargan(Benchmark):
"""
Sargan test objective function.
This class defines the Sargan global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Sargan}}(\\mathbf{x}) = \\sum_{i=1}^{n} n \\left (x_i^2 + 0.4 \\sum_{i \\neq j}^{n} x_ix_j \\right)
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-100, 100]` for :math:`i=1,...,n`.
.. figure:: figures/Sargan.png
:alt: Sargan function
:align: center
**Two-dimensional Sargan function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-100.0] * self.dimensions,
[ 100.0] * self.dimensions))
self.custom_bounds = [(-5, 5), (-5, 5)]
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
x0 = x[:-1]
x1 = roll(x,-1)[:-1]
return sum(self.dimensions*(x**2 + 0.4*sum(x0*x1)))
# -------------------------------------------------------------------------------- #
class Schaffer01(Benchmark):
"""
Schaffer 1 test objective function.
This class defines the Schaffer 1 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Schaffer01}}(\\mathbf{x}) = 0.5 + \\frac{\\sin^2 (x_1^2 + x_2^2)^2 - 0.5}{1 + 0.001(x_1^2 + x_2^2)^2}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-100, 100]` for :math:`i=1,2`.
.. figure:: figures/Schaffer01.png
:alt: Schaffer 1 function
:align: center
**Two-dimensional Schaffer 1 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,2`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-100.0] * self.dimensions,
[ 100.0] * self.dimensions))
self.custom_bounds = [(-10, 10), (-10, 10)]
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
return 0.5 + (sin(x1**2.0 + x2**2.0)**2.0 - 0.5)/(1 + 0.001*(x1**2.0 + x2**2.0)**2.0)
# -------------------------------------------------------------------------------- #
class Schaffer02(Benchmark):
"""
Schaffer 2 test objective function.
This class defines the Schaffer 2 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Schaffer02}}(\\mathbf{x}) = 0.5 + \\frac{\\sin^2 (x_1^2 - x_2^2)^2 - 0.5}{1 + 0.001(x_1^2 + x_2^2)^2}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-100, 100]` for :math:`i=1,2`.
.. figure:: figures/Schaffer02.png
:alt: Schaffer 2 function
:align: center
**Two-dimensional Schaffer 2 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,2`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-100.0] * self.dimensions,
[ 100.0] * self.dimensions))
self.custom_bounds = [(-10, 10), (-10, 10)]
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
return 0.5 + (sin(x1**2.0 - x2**2.0)**2.0 - 0.5)/(1 + 0.001*(x1**2.0 + x2**2.0)**2.0)
# -------------------------------------------------------------------------------- #
class Schaffer03(Benchmark):
"""
Schaffer 3 test objective function.
This class defines the Schaffer 3 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Schaffer03}}(\\mathbf{x}) = 0.5 + \\frac{\\sin^2 \\left( \\cos \\lvert x_1^2 - x_2^2 \\rvert \\right ) - 0.5}{1 + 0.001(x_1^2 + x_2^2)^2}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-100, 100]` for :math:`i=1,2`.
.. figure:: figures/Schaffer03.png
:alt: Schaffer 3 function
:align: center
**Two-dimensional Schaffer 3 function**
*Global optimum*: :math:`f(x_i) = 0.00156685` for :math:`\\mathbf{x} = [0, 1.253115]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-100.0] * self.dimensions,
[ 100.0] * self.dimensions))
self.custom_bounds = [(-10, 10), (-10, 10)]
self.global_optimum = [0.0, 1.253115]
self.fglob = 0.00156685
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
return 0.5 + (sin(cos(abs(x1**2.0 - x2**2.0)))**2.0 - 0.5)/(1 + 0.001*(x1**2.0 + x2**2.0)**2.0)
# -------------------------------------------------------------------------------- #
class Schaffer04(Benchmark):
"""
Schaffer 4 test objective function.
This class defines the Schaffer 4 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Schaffer04}}(\\mathbf{x}) = 0.5 + \\frac{\\cos^2 \\left( \\sin(x_1^2 - x_2^2) \\right ) - 0.5}{1 + 0.001(x_1^2 + x_2^2)^2}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-100, 100]` for :math:`i=1,2`.
.. figure:: figures/Schaffer04.png
:alt: Schaffer 4 function
:align: center
**Two-dimensional Schaffer 4 function**
*Global optimum*: :math:`f(x_i) = 0.292579` for :math:`\\mathbf{x} = [0, 1.253115]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-100.0] * self.dimensions,
[ 100.0] * self.dimensions))
self.custom_bounds = [(-10, 10), (-10, 10)]
self.global_optimum = [0.0, 1.253115]
self.fglob = 0.292579
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
return 0.5 + (cos(sin(x1**2.0 - x2**2.0))**2.0 - 0.5)/(1 + 0.001*(x1**2.0 + x2**2.0)**2.0)
# -------------------------------------------------------------------------------- #
class SchmidtVetters(Benchmark):
"""
Schmidt-Vetters test objective function.
This class defines the Schmidt-Vetters global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{SchmidtVetters}}(\\mathbf{x}) = \\frac{1}{1 + (x_1 - x_2)^2} + \\sin \\left(\\frac{\\pi x_2 + x_3}{2} \\right) + e^{\\left(\\frac{x_1+x_2}{x_2} - 2\\right)^2}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 10]` for :math:`i=1,2,3`.
*Global optimum*: :math:`f(x_i) = 3` for :math:`x_i = 0.78547` for :math:`i=1,2,3`
"""
def __init__(self, dimensions=3):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([ 0.0] * self.dimensions,
[10.0] * self.dimensions))
self.global_optimum = [0.78547] * self.dimensions
self.fglob = 3.0
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2, x3 = x
return 1.0/(1.0 + (x1 - x2)**2.0) + sin((pi*x2 + x3)/2.0) + exp(((x1 + x2)/x2 - 2)**2.0)
# -------------------------------------------------------------------------------- #
class Schwefel01(Benchmark):
"""
Schwefel 1 test objective function.
This class defines the Schwefel 1 global optimization problem. This
is a unimodal minimization problem defined as follows:
.. math::
f_{\\text{Schwefel01}}(\\mathbf{x}) = \\left(\\sum_{i=1}^n x_i^2 \\right)^{\\alpha}
Where, in this exercise, :math:`\\alpha = \\sqrt{\\pi}`.
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-100, 100]` for :math:`i=1,...,n`.
.. figure:: figures/Schwefel01.png
:alt: Schwefel 1 function
:align: center
**Two-dimensional Schwefel 1 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-100.0] * self.dimensions,
[ 100.0] * self.dimensions))
self.custom_bounds = [(-4.0, 4.0), (-4.0, 4.0)]
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
alpha = sqrt(pi)
return (sum(x**2.0))**alpha
# -------------------------------------------------------------------------------- #
class Schwefel02(Benchmark):
"""
Schwefel 2 test objective function.
This class defines the Schwefel 2 global optimization problem. This
is a unimodal minimization problem defined as follows:
.. math::
f_{\\text{Schwefel02}}(\\mathbf{x}) = \\sum_{i=1}^n \\left(\\sum_{j=1}^i x_i \\right)^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-100, 100]` for :math:`i=1,...,n`.
.. figure:: figures/Schwefel02.png
:alt: Schwefel 2 function
:align: center
**Two-dimensional Schwefel 2 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-100.0] * self.dimensions,
[ 100.0] * self.dimensions))
self.custom_bounds = [(-4.0, 4.0), (-4.0, 4.0)]
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
s = 0.0
for i in range(self.dimensions):
temp = 0.0
for j in range(i):
temp += x[j]
s += temp**2.0
return s
# -------------------------------------------------------------------------------- #
class Schwefel04(Benchmark):
"""
Schwefel 4 test objective function.
This class defines the Schwefel 4 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Schwefel04}}(\\mathbf{x}) = \\sum_{i=1}^n \\left[(x_i - 1)^2 + (x_1 - x_i^2)^2 \\right]
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 10]` for :math:`i=1,...,n`.
.. figure:: figures/Schwefel04.png
:alt: Schwefel 4 function
:align: center
**Two-dimensional Schwefel 4 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 1` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([ 0.0] * self.dimensions,
[10.0] * self.dimensions))
self.custom_bounds = [(0.0, 2.0), (0.0, 2.0)]
self.global_optimum = [1.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
return sum((x - 1.0)**2.0 + (x[0] - x**2.0)**2.0)
# -------------------------------------------------------------------------------- #
class Schwefel06(Benchmark):
"""
Schwefel 6 test objective function.
This class defines the Schwefel 6 global optimization problem. This
is a unimodal minimization problem defined as follows:
.. math::
f_{\\text{Schwefel06}}(\\mathbf{x}) = \\max(\\lvert x_1 + 2x_2 - 7 \\rvert, \\lvert 2x_1 + x_2 - 5 \\rvert)
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-100, 100]` for :math:`i=1,2`.
.. figure:: figures/Schwefel06.png
:alt: Schwefel 6 function
:align: center
**Two-dimensional Schwefel 6 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [1, 3]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-100.0] * self.dimensions,
[ 100.0] * self.dimensions))
self.custom_bounds = [(-10.0, 10.0), (-10.0, 10.0)]
self.global_optimum = [1.0, 3.0]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
vector = [abs(x1 + 2*x2 - 7), abs(2*x1 + x2 - 5)]
return max(vector)
# -------------------------------------------------------------------------------- #
class Schwefel20(Benchmark):
"""
Schwefel 20 test objective function.
This class defines the Schwefel 20 global optimization problem. This
is a unimodal minimization problem defined as follows:
.. math::
f_{\\text{Schwefel20}}(\\mathbf{x}) = \\sum_{i=1}^n \\lvert x_i \\rvert
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-100, 100]` for :math:`i=1,...,n`.
.. figure:: figures/Schwefel20.png
:alt: Schwefel 20 function
:align: center
**Two-dimensional Schwefel 20 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-100.0] * self.dimensions,
[ 100.0] * self.dimensions))
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
return sum(abs(x))
# -------------------------------------------------------------------------------- #
class Schwefel21(Benchmark):
"""
Schwefel 21 test objective function.
This class defines the Schwefel 21 global optimization problem. This
is a unimodal minimization problem defined as follows:
.. math::
f_{\\text{Schwefel21}}(\\mathbf{x}) = \\smash{\\displaystyle\\max_{1 \leq i \leq n}} \\lvert x_i \\rvert
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-100, 100]` for :math:`i=1,...,n`.
.. figure:: figures/Schwefel21.png
:alt: Schwefel 21 function
:align: center
**Two-dimensional Schwefel 21 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-100.0] * self.dimensions,
[ 100.0] * self.dimensions))
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
return max(abs(x))
# -------------------------------------------------------------------------------- #
class Schwefel22(Benchmark):
"""
Schwefel 22 test objective function.
This class defines the Schwefel 22 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Schwefel22}}(\\mathbf{x}) = \\sum_{i=1}^n \\lvert x_i \\rvert + \\prod_{i=1}^n \\lvert x_i \\rvert
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-100, 100]` for :math:`i=1,...,n`.
.. figure:: figures/Schwefel22.png
:alt: Schwefel 22 function
:align: center
**Two-dimensional Schwefel 22 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-100.0] * self.dimensions,
[ 100.0] * self.dimensions))
self.custom_bounds = [(-10.0, 10.0), (-10.0, 10.0)]
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
return sum(abs(x)) + prod(abs(x))
# -------------------------------------------------------------------------------- #
class Schwefel26(Benchmark):
"""
Schwefel 26 test objective function.
This class defines the Schwefel 26 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Schwefel26}}(\\mathbf{x}) = 418.9829n - \\sum_{i=1}^n x_i \\sin(\\sqrt{|x_i|})
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-512, 512]` for :math:`i=1,...,n`.
.. figure:: figures/Schwefel26.png
:alt: Schwefel 26 function
:align: center
**Two-dimensional Schwefel 26 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 420.968746` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-512.0] * self.dimensions,
[ 512.0] * self.dimensions))
self.global_optimum = [420.968746] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
return 418.982887 * self.dimensions - sum([x * sin(sqrt(abs(x)))])
# -------------------------------------------------------------------------------- #
class Schwefel36(Benchmark):
"""
Schwefel 36 test objective function.
This class defines the Schwefel 36 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Schwefel36}}(\\mathbf{x}) = -x_1x_2(72 - 2x_1 - 2x_2)
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 500]` for :math:`i=1,2`.
.. figure:: figures/Schwefel36.png
:alt: Schwefel 36 function
:align: center
**Two-dimensional Schwefel 36 function**
*Global optimum*: :math:`f(x_i) = -3456` for :math:`\\mathbf{x} = [12, 12]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([ 0.0] * self.dimensions,
[500.0] * self.dimensions))
self.custom_bounds = [(0.0, 20.0), (0.0, 20.0)]
self.global_optimum = [12.0, 12.0]
self.fglob = -3456.0
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
return -x1*x2*(72.0 - 2.0*x1 - 2.0*x2)
# -------------------------------------------------------------------------------- #
class Shekel05(Benchmark):
"""
Shekel 5 test objective function.
This class defines the Shekel 5 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Shekel05}}(\\mathbf{x}) = \\sum_{i=1}^{m} \\frac{1}{c_{i} + \\sum_{j=1}^{n} (x_{j} - a_{ij})^2 }`
Where, in this exercise:
.. math::
\\mathbf{a} = \\begin{bmatrix} 4.0 & 4.0 & 4.0 & 4.0 \\\\ 1.0 & 1.0 & 1.0 & 1.0 \\\\ 8.0 & 8.0 & 8.0 & 8.0 \\\\ 6.0 & 6.0 & 6.0 & 6.0 \\\\ 3.0 & 7.0 & 3.0 & 7.0 \\end{bmatrix}
.. math::
\\mathbf{c} = \\begin{bmatrix} 0.1 \\\\ 0.2 \\\\ 0.2 \\\\ 0.4 \\\\ 0.6 \\end{bmatrix}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 10]` for :math:`i=1,...,4`.
*Global optimum*: :math:`f(x_i) = -10.1527` for :math:`x_i = 4` for :math:`i=1,...,4`
"""
def __init__(self, dimensions=4):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([ 0.0] * self.dimensions,
[10.0] * self.dimensions))
self.global_optimum = [4.0] * self.dimensions
self.fglob = -10.1527
def evaluator(self, x, *args):
self.fun_evals += 1
m = 5
A = asarray([[4.0, 4.0, 4.0, 4.0],
[1.0, 1.0, 1.0, 1.0],
[8.0, 8.0, 8.0, 8.0],
[6.0, 6.0, 6.0, 6.0],
[3.0, 7.0, 3.0, 7.0]])
C = asarray([0.1, 0.2, 0.2, 0.4, 0.6])
return -sum(1.0/(dot(x-a, x-a)+c) for a, c in zip(A, C))
# -------------------------------------------------------------------------------- #
class Shekel07(Benchmark):
"""
Shekel 7 test objective function.
This class defines the Shekel 7 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Shekel07}}(\\mathbf{x}) = \\sum_{i=1}^{m} \\frac{1}{c_{i} + \\sum_{j=1}^{n} (x_{j} - a_{ij})^2 }`
Where, in this exercise:
.. math::
\\mathbf{a} = \\begin{bmatrix} 4.0 & 4.0 & 4.0 & 4.0 \\\\ 1.0 & 1.0 & 1.0 & 1.0 \\\\ 8.0 & 8.0 & 8.0 & 8.0 \\\\
6.0 & 6.0 & 6.0 & 6.0 \\\\ 3.0 & 7.0 & 3.0 & 7.0 \\\\ 2.0 & 9.0 & 2.0 & 9.0 \\\\ 5.0 & 5.0 & 3.0 & 3.0 \\end{bmatrix}
.. math::
\\mathbf{c} = \\begin{bmatrix} 0.1 \\\\ 0.2 \\\\ 0.2 \\\\ 0.4 \\\\ 0.4 \\\\ 0.6 \\\\ 0.3 \\end{bmatrix}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 10]` for :math:`i=1,...,4`.
*Global optimum*: :math:`f(x_i) = -10.3999` for :math:`x_i = 4` for :math:`i=1,...,4`
"""
def __init__(self, dimensions=4):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([ 0.0] * self.dimensions,
[10.0] * self.dimensions))
self.global_optimum = [4.0] * self.dimensions
self.fglob = -10.3999
def evaluator(self, x, *args):
self.fun_evals += 1
m = 7
A = asarray([[4.0, 4.0, 4.0, 4.0],
[1.0, 1.0, 1.0, 1.0],
[8.0, 8.0, 8.0, 8.0],
[6.0, 6.0, 6.0, 6.0],
[3.0, 7.0, 3.0, 7.0],
[2.0, 9.0, 2.0, 9.0],
[5.0, 5.0, 3.0, 3.0]])
C = asarray([0.1, 0.2, 0.2, 0.4, 0.4, 0.6, 0.3])
return -sum(1.0/(dot(x-a, x-a)+c) for a, c in zip(A, C))
# -------------------------------------------------------------------------------- #
class Shekel10(Benchmark):
"""
Shekel 10 test objective function.
This class defines the Shekel 10 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Shekel10}}(\\mathbf{x}) = \\sum_{i=1}^{m} \\frac{1}{c_{i} + \\sum_{j=1}^{n} (x_{j} - a_{ij})^2 }`
Where, in this exercise:
.. math::
\\mathbf{a} = \\begin{bmatrix} 4.0 & 4.0 & 4.0 & 4.0 \\\\ 1.0 & 1.0 & 1.0 & 1.0 \\\\ 8.0 & 8.0 & 8.0 & 8.0 \\\\
6.0 & 6.0 & 6.0 & 6.0 \\\\ 3.0 & 7.0 & 3.0 & 7.0 \\\\ 2.0 & 9.0 & 2.0 & 9.0 \\\\ 5.0 & 5.0 & 3.0 & 3.0 \\\\
8.0 & 1.0 & 8.0 & 1.0 \\\\ 6.0 & 2.0 & 6.0 & 2.0 \\\\ 7.0 & 3.6 & 7.0 & 3.6 \\end{bmatrix}
.. math::
\\mathbf{c} = \\begin{bmatrix} 0.1 \\\\ 0.2 \\\\ 0.2 \\\\ 0.4 \\\\ 0.4 \\\\ 0.6 \\\\ 0.3 \\\\ 0.7 \\\\ 0.5 \\\\ 0.5 \\end{bmatrix}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, 10]` for :math:`i=1,...,4`.
*Global optimum*: :math:`f(x_i) = -10.5319` for :math:`x_i = 4` for :math:`i=1,...,4`
"""
def __init__(self, dimensions=4):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([ 0.0] * self.dimensions,
[10.0] * self.dimensions))
self.global_optimum = [4.0] * self.dimensions
self.fglob = -10.5319
def evaluator(self, x, *args):
self.fun_evals += 1
m = 10
A = asarray([[4.0, 4.0, 4.0, 4.0],
[1.0, 1.0, 1.0, 1.0],
[8.0, 8.0, 8.0, 8.0],
[6.0, 6.0, 6.0, 6.0],
[3.0, 7.0, 3.0, 7.0],
[2.0, 9.0, 2.0, 9.0],
[5.0, 5.0, 3.0, 3.0],
[8.0, 1.0, 8.0, 1.0],
[6.0, 2.0, 6.0, 2.0],
[7.0, 3.6, 7.0, 3.6]])
C = asarray([0.1, 0.2, 0.2, 0.4, 0.4, 0.6, 0.3, 0.7, 0.5, 0.5])
return -sum(1.0/(dot(x-a, x-a)+c) for a, c in zip(A, C))
# -------------------------------------------------------------------------------- #
class Shubert01(Benchmark):
"""
Shubert 1 test objective function.
This class defines the Shubert 1 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Shubert01}}(\\mathbf{x}) = \\left( \\sum\\limits_{i=1}^{5} i\\cos[(i+1)x_1 + i] \\right) \\left( \\sum\\limits_{i=1}^{5} i\\cos[(i+1)x_2 + i] \\right)
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,...,n`.
.. figure:: figures/Shubert01.png
:alt: Shubert 1 function
:align: center
**Two-dimensional Shubert 1 function**
*Global optimum*: :math:`f(x_i) = -186.7309` for :math:`\\mathbf{x} = [-7.0835, 4.8580]` (and many others).
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.global_optimum = [-7.0835, 4.8580]
self.fglob = -186.7309
def evaluator(self, x, *args):
self.fun_evals += 1
s1 = s2 = 0.0
for i in range(1, 6):
s1 = s1+i*cos((i+1)*x[0]+i)
s2 = s2+i*cos((i+1)*x[1]+i)
y = s1*s2
return y
# -------------------------------------------------------------------------------- #
class Shubert03(Benchmark):
"""
Shubert 3 test objective function.
This class defines the Shubert 3 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Shubert03}}(\\mathbf{x}) = \\sum_{i=1}^n \\sum_{j=1}^5 j \\sin \\left[(j+1)x_i \\right] + j
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,...,n`.
.. figure:: figures/Shubert03.png
:alt: Shubert 3 function
:align: center
**Two-dimensional Shubert 3 function**
*Global optimum*: :math:`f(x_i) = -24.062499` for :math:`\\mathbf{x} = [5.791794, 5.791794]` (and many others).
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.global_optimum = [5.791794, 5.791794]
self.fglob = -24.062499
def evaluator(self, x, *args):
self.fun_evals += 1
return -sin(2.0*x[0]+1.0) - 2.0*sin(3.0*x[0]+2.0) - 3.0*sin(4.0*x[0]+3.0) - 4.0*sin(5.0*x[0]+4.0) \
-5.0*sin(6.0*x[0]+5.0) - sin(2.0*x[1]+1.0) - 2.0*sin(3.0*x[1]+2.0) - 3.0*sin(4.0*x[1]+3.0) \
-4.0*sin(5.0*x[1]+4.0) - 5.0*sin(6.0*x[1]+5.0)
# -------------------------------------------------------------------------------- #
class Shubert04(Benchmark):
"""
Shubert 4 test objective function.
This class defines the Shubert 4 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Shubert04}}(\\mathbf{x}) = \\sum_{i=1}^n \\sum_{j=1}^5 j \\cos \\left[(j+1)x_i \\right] + j
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,...,n`.
.. figure:: figures/Shubert04.png
:alt: Shubert 4 function
:align: center
**Two-dimensional Shubert 4 function**
*Global optimum*: :math:`f(x_i) = -29.016015` for :math:`\\mathbf{x} = [-0.80032121, -7.08350592]` (and many others).
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.global_optimum = [-0.80032121, -7.08350592]
self.fglob = -29.016015
def evaluator(self, x, *args):
self.fun_evals += 1
return -cos(2.0*x[0]+1.0) - 2.0*cos(3.0*x[0]+2.0) - 3.0*cos(4.0*x[0]+3.0) - 4.0*cos(5.0*x[0]+4.0) \
-5.0*cos(6.0*x[0]+5.0) - cos(2.0*x[1]+1.0) - 2.0*cos(3.0*x[1]+2.0) - 3.0*cos(4.0*x[1]+3.0) \
-4.0*cos(5.0*x[1]+4.0) - 5.0*cos(6.0*x[1]+5.0)
# -------------------------------------------------------------------------------- #
class SineEnvelope(Benchmark):
"""
SineEnvelope test objective function.
This class defines the SineEnvelope global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{SineEnvelope}}(\\mathbf{x}) = -\\sum_{i=1}^{n-1}\\left[\\frac{\\sin^2(\\sqrt{x_{i+1}^2+x_{i}^2}-0.5)}{(0.001(x_{i+1}^2+x_{i}^2)+1)^2}+0.5\\right]
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-100, 100]` for :math:`i=1,...,n`.
.. figure:: figures/SineEnvelope.png
:alt: SineEnvelope function
:align: center
**Two-dimensional SineEnvelope function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-100.0] * self.dimensions,
[ 100.0] * self.dimensions))
self.custom_bounds = [(-20, 20), (-20, 20)]
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
X1 = x[0:-1]
X2 = x[1:]
X12X22 = X1**2 + X2**2
return sum((sin(sqrt(X12X22))**2 - 0.5)/(1 + 0.001*X12X22)**2 + 0.5)
# -------------------------------------------------------------------------------- #
class SixHumpCamel(Benchmark):
"""
Six Hump Camel test objective function.
This class defines the Six Hump Camel global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{SixHumpCamel}}(\\mathbf{x}) = 4x_1^2+x_1x_2-4x_2^2-2.1x_1^4+4x_2^4+\\frac{1}{3}x_1^6
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-5, 5]` for :math:`i=1,2`.
.. figure:: figures/SixHumpCamel.png
:alt: Six Hump Camel function
:align: center
**Two-dimensional Six Hump Camel function**
*Global optimum*: :math:`f(x_i) = -1.031628453489877` for :math:`\\mathbf{x} = [0.08984201368301331 , -0.7126564032704135]`
or :math:`\\mathbf{x} = [-0.08984201368301331, 0.7126564032704135]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-5.0] * self.dimensions,
[ 5.0] * self.dimensions))
self.custom_bounds = [(-2, 2), (-1.5, 1.5)]
self.global_optimum = [(0.08984201368301331 , -0.7126564032704135),
(-0.08984201368301331, 0.7126564032704135)]
self.fglob = -1.031628
def evaluator(self, x, *args):
self.fun_evals += 1
return (4 - 2.1*x[0]**2 + x[0]**4/3)*x[0]**2 + x[0]*x[1] + (4*x[1]**2 - 4)*x[1]**2
# -------------------------------------------------------------------------------- #
class Sodp(Benchmark):
"""
Sodp test objective function.
This class defines the Sum Of Different Powers global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Sodp}}(\\mathbf{x}) = \\sum_{i=1}^{n} \\lvert{x_{i}}\\rvert^{i + 1}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-1, 1]` for :math:`i=1,...,n`.
.. figure:: figures/Sodp.png
:alt: Sodp function
:align: center
**Two-dimensional Sum Of Different Powers function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-1.0] * self.dimensions,
[ 1.0] * self.dimensions))
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
i = arange(1, self.dimensions+1)
return sum(abs(x) ** (i+1))
# -------------------------------------------------------------------------------- #
class Sphere(Benchmark):
"""
Sphere test objective function.
This class defines the Sphere global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Sphere}}(\\mathbf{x}) = \\sum_{i=1}^{n} x_i^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-1, 1]` for :math:`i=1,...,n`.
.. figure:: figures/Sphere.png
:alt: Sphere function
:align: center
**Two-dimensional Sphere function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-5.12] * self.dimensions,
[ 5.12] * self.dimensions))
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
return sum(x**2)
# -------------------------------------------------------------------------------- #
class Step(Benchmark):
"""
Step test objective function.
This class defines the Step global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Step}}(\\mathbf{x}) = \\sum_{i=1}^{n} \\left ( \\lfloor x_i \\rfloor + 0.5 \\right )^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-100, 100]` for :math:`i=1,...,n`.
.. figure:: figures/Step.png
:alt: Step function
:align: center
**Two-dimensional Step function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0.5` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-100.0] * self.dimensions,
[ 100.0] * self.dimensions))
self.custom_bounds = [(-5, 5), (-5, 5)]
self.global_optimum = [0.5] * self.dimensions
self.fglob = 0.5
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
return sum((floor(x) + 0.5)**2.0)
# -------------------------------------------------------------------------------- #
class Stochastic(Benchmark):
"""
Stochastic test objective function.
This class defines a Stochastic global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Stochastic}}(\\mathbf{x}) = \\sum_{i=1}^{n} \\epsilon_i \\left | {x_i - \\frac{1}{i}} \\right |
The variable :math:`\\epsilon_i, (i=1,...,n)` is a random variable uniformly distributed in :math:`[0, 1]`.
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-5, 5]` for :math:`i=1,...,n`.
.. figure:: figures/Stochastic.png
:alt: Stochastic function
:align: center
**Two-dimensional Stochastic function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = [1/n]` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-5.0] * self.dimensions,
[ 5.0] * self.dimensions))
self.global_optimum = [1.0/i for i in range(1, self.dimensions+1)]
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
rnd = uniform(0.0, 1.0, size=(self.dimensions, ))
rng = arange(1, self.dimensions+1)
return sum(rnd*abs(x - 1.0/rng))
# -------------------------------------------------------------------------------- #
class StretchedV(Benchmark):
"""
StretchedV test objective function.
This class defines the Stretched V global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{StretchedV}}(\\mathbf{x}) = \sum_{i=1}^{n-1} t^{1/4} [\sin (50t^{0.1}) + 1]^2
Where, in this exercise:
.. math::
t = x_{i+1}^2 + x_i^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,...,n`.
.. figure:: figures/StretchedV.png
:alt: StretchedV function
:align: center
**Two-dimensional StretchedV function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [-9.38723188, 9.34026753]` when :math:`n = 2`.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10] * self.dimensions,
[ 10] * self.dimensions))
self.global_optimum = [-9.38723188, 9.34026753]
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
s = 0.0
for i in range(self.dimensions-1):
t = x[i+1]*x[i+1] + x[i]*x[i]
s += t**0.25 * (sin(50.0*t**0.1 + 1.0))**2.0
return s
# -------------------------------------------------------------------------------- #
class StyblinskiTang(Benchmark):
"""
StyblinskiTang test objective function.
This class defines the Styblinski-Tang global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{StyblinskiTang}}(\\mathbf{x}) = \\sum_{i=1}^{n} \\left(x_i^4 - 16x_i^2 + 5x_i \\right)
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-5, 5]` for :math:`i=1,...,n`.
.. figure:: figures/StyblinskiTang.png
:alt: StyblinskiTang function
:align: center
**Two-dimensional Styblinski-Tang function**
*Global optimum*: :math:`f(x_i) = -39.16616570377142n` for :math:`x_i = -2.903534018185960` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-5.0] * self.dimensions,
[ 5.0] * self.dimensions))
self.global_optimum = [-2.903534018185960] * self.dimensions
self.fglob = -39.16616570377142*self.dimensions
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
return sum(x**4 - 16*x**2 + 5*x)/2
# -------------------------------------------------------------------------------- #
class TestTubeHolder(Benchmark):
"""
TestTubeHolder test objective function.
This class defines the TestTubeHolder global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{TestTubeHolder}}(\\mathbf{x}) = - 4 \\left | {e^{\\left|{\\cos\\left(\\frac{1}{200} x_{1}^{2} + \\frac{1}{200} x_{2}^{2}\\right)}\\right|} \\sin\\left(x_{1}\\right) \\cos\\left(x_{2}\\right)}\\right |
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2`.
.. figure:: figures/TestTubeHolder.png
:alt: TestTubeHolder function
:align: center
**Two-dimensional TestTubeHolder function**
*Global optimum*: :math:`f(x_i) = -10.872299901558` for :math:`\\mathbf{x} = [-\\pi/2, 0]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.global_optimum = [-pi/2, 0.0]
self.fglob = -10.87229990155800
def evaluator(self, x, *args):
self.fun_evals += 1
return -4*abs(sin(x[0])*cos(x[1])*exp(abs(cos((x[0]**2 + x[1]**2)/200))))
# -------------------------------------------------------------------------------- #
class Treccani(Benchmark):
"""
Treccani test objective function.
This class defines the Treccani global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Treccani}}(\\mathbf{x}) = x_1^4 + 4x_1^3 + 4x_1^2 + x_2^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-5, 5]` for :math:`i=1,2`.
.. figure:: figures/Treccani.png
:alt: Treccani function
:align: center
**Two-dimensional Treccani function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [-2, 0]` or :math:`\\mathbf{x} = [0, 0]`.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-5.0] * self.dimensions,
[ 5.0] * self.dimensions))
self.custom_bounds = [(-2, 2), (-2, 2)]
self.global_optimum = [-2.0, 0.0]
self.fglob = 0
def evaluator(self, x, *args):
self.fun_evals += 1
return x[0]**4 + 4.0*x[0]**3 + 4.0*x[0]**2 + x[1]**2
# -------------------------------------------------------------------------------- #
class Trefethen(Benchmark):
"""
Trefethen test objective function.
This class defines the Trefethen global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Trefethen}}(\\mathbf{x}) = 0.25 x_{1}^{2} + 0.25 x_{2}^{2} + e^{\\sin\\left(50 x_{1}\\right)} - \\sin\\left(10 x_{1} + 10 x_{2}\\right) + \\sin\\left(60 e^{x_{2}}\\right) + \\sin\\left[70 \\sin\\left(x_{1}\\right)\\right] + \\sin\\left[\\sin\\left(80 x_{2}\\right)\\right]
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-10, 10]` for :math:`i=1,2`.
.. figure:: figures/Trefethen.png
:alt: Trefethen function
:align: center
**Two-dimensional Trefethen function**
*Global optimum*: :math:`f(x_i) = -3.3068686474` for :math:`\\mathbf{x} = [-0.02440307923, 0.2106124261]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-10.0] * self.dimensions,
[ 10.0] * self.dimensions))
self.custom_bounds = [(-5, 5), (-5, 5)]
self.global_optimum = [-0.02440307923, 0.2106124261]
self.fglob = -3.3068686474
def evaluator(self, x, *args):
self.fun_evals += 1
F = exp(sin(50*x[0])) + sin(60*exp(x[1])) + sin(70*sin(x[0])) + \
sin(sin(80*x[1])) - sin(10*(x[0]+x[1])) + 1.0/4*(x[0]**2 + x[1]**2)
return F
# -------------------------------------------------------------------------------- #
class ThreeHumpCamel(Benchmark):
"""
Three Hump Camel test objective function.
This class defines the Three Hump Camel global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{ThreeHumpCamel}}(\\mathbf{x}) = 2x_1^2 - 1.05x_1^4 + \\frac{x_1^6}{6} + x_1x_2 + x_2^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-5, 5]` for :math:`i=1,2`.
.. figure:: figures/ThreeHumpCamel.png
:alt: Three Hump Camel function
:align: center
**Two-dimensional Three Hump Camel function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [0, 0]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-5.0] * self.dimensions,
[ 5.0] * self.dimensions))
self.custom_bounds = [(-2, 2), (-1.5, 1.5)]
self.global_optimum = [0.0, 0.0]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
return 2.0*x1**2.0 - 1.05*x1**4.0 + x1**6/6.0 + x1*x2 + x2**2.0
# -------------------------------------------------------------------------------- #
class Trid(Benchmark):
"""
Trid test objective function.
This class defines the Trid global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Trid}}(\\mathbf{x}) = \\sum_{i=1}^{n}(x_i - 1)^2 - \\sum_{i=2}^{n} x_ix_{i-1}
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-20, 20]` for :math:`i=1,...,6`.
*Global optimum*: :math:`f(x_i) = -50` for :math:`\\mathbf{x} = [6, 10, 12, 12, 10, 6]`
"""
def __init__(self, dimensions=6):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([ 0.0] * self.dimensions,
[20.0] * self.dimensions))
self.global_optimum = [6, 10, 12, 12, 10, 6]
self.fglob = -50.0
def evaluator(self, x, *args):
self.fun_evals += 1
return sum((x - 1.0)**2.0) - sum(x[1:]*x[0:-1])
# -------------------------------------------------------------------------------- #
class Trigonometric01(Benchmark):
"""
Trigonometric 1 test objective function.
This class defines the Trigonometric 1 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Trigonometric01}}(\\mathbf{x}) = \\sum_{i=1}^{n} \\left [n - \\sum_{j=1}^{n} \\cos(x_j) + i \\left(1 - cos(x_i) - sin(x_i) \\right ) \\right]^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [0, \\pi]` for :math:`i=1,...,n`.
.. figure:: figures/Trigonometric01.png
:alt: Trigonometric 1 function
:align: center
**Two-dimensional Trigonometric 1 function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`x_i = 0` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([0.0] * self.dimensions,
[ pi] * self.dimensions))
self.global_optimum = [0.0] * self.dimensions
self.fglob = 0.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
rng = numpy.arange(1.0, self.dimensions+1)
return sum((self.dimensions - sum(cos(x) + rng*(1 - cos(x) - sin(x))))**2.0)
# -------------------------------------------------------------------------------- #
class Trigonometric02(Benchmark):
"""
Trigonometric 2 test objective function.
This class defines the Trigonometric 2 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Trigonometric2}}(\\mathbf{x}) = 1 + \\sum_{i=1}^{n} 8 \\sin^2 \\left[7(x_i - 0.9)^2 \\right] + 6 \\sin^2 \\left[14(x_i - 0.9)^2 \\right] + (x_i - 0.9)^2
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-500, 500]` for :math:`i=1,...,n`.
.. figure:: figures/Trigonometric02.png
:alt: Trigonometric 2 function
:align: center
**Two-dimensional Trigonometric 2 function**
*Global optimum*: :math:`f(x_i) = 1` for :math:`x_i = 0.9` for :math:`i=1,...,n`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-500.0] * self.dimensions,
[ 500.0] * self.dimensions))
self.custom_bounds = [(0, 2), (0, 2)]
self.global_optimum = [0.9] * self.dimensions
self.fglob = 1.0
self.change_dimensionality = True
def evaluator(self, x, *args):
self.fun_evals += 1
return 1.0 + sum(8.0*(sin(7.0*(x - 0.9)**2.0)**2.0) + 6.0*(sin(14.0*(x - 0.9)**2.0)**2.0) + (x - 0.9)**2.0)
# -------------------------------------------------------------------------------- #
class Tripod(Benchmark):
"""
Tripod test objective function.
This class defines the Tripod global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Tripod}}(\\mathbf{x}) = p(x_2) \\left[1 + p(x_1) \\right] + \\lvert x_1 + 50p(x_2) \\left[1 - 2p(x_1) \\right] \\rvert + \\lvert x_2 + 50\\left[1 - 2p(x_2)\\right] \\rvert
Here, :math:`n` represents the number of dimensions and :math:`x_i \\in [-100, 100]` for :math:`i=1,2`.
.. figure:: figures/Tripod.png
:alt: Tripod function
:align: center
**Two-dimensional Tripod function**
*Global optimum*: :math:`f(x_i) = 0` for :math:`\\mathbf{x} = [0, -50]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = list(zip([-100.0] * self.dimensions,
[ 100.0] * self.dimensions))
self.global_optimum = [0.0, -50.0]
self.fglob = 0.0
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
p1 = float(x1 >= 0)
p2 = float(x2 >= 0)
return p2*(1.0 + p1) + abs(x1 + 50.0*p2*(1.0-2.0*p1)) + abs(x2 + 50.0*(1.0-2.0*p2))
# -------------------------------------------------------------------------------- #
class Ursem01(Benchmark):
"""
Ursem 1 test objective function.
This class defines the Ursem 1 global optimization problem. This
is a unimodal minimization problem defined as follows:
.. math::
f_{\\text{Ursem01}}(\\mathbf{x}) = - \\sin(2x_1 - 0.5 \\pi) - 3 \\cos(x_2) - 0.5x_1
Here, :math:`n` represents the number of dimensions and :math:`x_1 \\in [-2.5, 3]`, :math:`x_2 \\in [-2, 2]`.
.. figure:: figures/Ursem01.png
:alt: Ursem 1 function
:align: center
**Two-dimensional Ursem 1 function**
*Global optimum*: :math:`f(x_i) = -4.8168` for :math:`\\mathbf{x} = [1.69714, 0.0]`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = [(-2.5, 3.0), (-2.0, 2.0)]
self.global_optimum = [1.69714, 0.0]
self.fglob = -4.8168
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
return -sin(2*x1 - 0.5*pi) - 3.0*cos(x2) - 0.5*x1
# -------------------------------------------------------------------------------- #
class Ursem03(Benchmark):
"""
Ursem 3 test objective function.
This class defines the Ursem 3 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Ursem03}}(\\mathbf{x}) = - \\sin(2.2 \\pi x_1 + 0.5 \\pi) \\frac{2 - \\lvert x_1 \\rvert}{2} \\frac{3 - \\lvert x_1 \\rvert}{2} - \\sin(2.2 \\pi x_2 + 0.5 \\pi) \\frac{2 - \\lvert x_2 \\rvert}{2} \\frac{3 - \\lvert x_2 \\rvert}{2}
Here, :math:`n` represents the number of dimensions and :math:`x_1 \\in [-2, 2]`, :math:`x_2 \\in [-1.5, 1.5]`.
.. figure:: figures/Ursem03.png
:alt: Ursem 3 function
:align: center
**Two-dimensional Ursem 3 function**
*Global optimum*: :math:`f(x_i) = -3` for :math:`x_i = 0` for :math:`i=1,2`
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self.bounds = [(-2, 2), (-1.5, 1.5)]
self.global_optimum = [0.0 for _ in range(self.dimensions)]
self.fglob = -3.0
def evaluator(self, x, *args):
self.fun_evals += 1
x1, x2 = x
return -sin(2.2*pi*x1 + 0.5*pi)*((2.0 - abs(x1))/2.0)*((3.0 -
|
abs(x1)
|
numpy.abs
|
import numpy
from tkp.utility import nice_format
from scipy.stats import norm
from sqlalchemy.sql.expression import desc
from tkp.db.model import Image
from tkp.db.quality import reject_reasons
def rms_invalid(rms, noise, low_bound=1, high_bound=50):
"""
Is the RMS value of an image outside the plausible range?
:param rms: RMS value of an image, can be computed with
tkp.quality.statistics.rms
:param noise: Theoretical noise level of instrument, can be calculated with
tkp.lofar.noise.noise_level
:param low_bound: multiplied with noise to define lower threshold
:param high_bound: multiplied with noise to define upper threshold
:returns: True/False
"""
if (rms < noise * low_bound) or (rms > noise * high_bound):
ratio = rms / noise
return "rms value (%s) is %s times theoretical noise (%s)" % \
(nice_format(rms), nice_format(ratio), nice_format(noise))
else:
return False
def rms(data):
"""Returns the RMS of the data about the median.
Args:
data: a numpy array
"""
data -= numpy.median(data)
return numpy.sqrt(
|
numpy.power(data, 2)
|
numpy.power
|
#/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 5 10:23:43 2019
@author: bzfkostr
"""
from __future__ import division
import numpy as np
import matplotlib
matplotlib.use("agg")
import time
import multiprocessing
from multiprocessing import Pool
from functools import partial
from Reaction import *
from Injection import *
from Parameters_LV import *
'''
This main file couples a PBS to a mean-field concentration and returns trajectories of particles for different simulations.
Before running this file it is necessary to calculate the continuous solution with FD.py. The functions for reactions are from Reaction.py and
the one for injection from Injection.py
The input is:
D1, D2=Diffusion coefficient of A and B
timesteps=stepsnumber of time
deltat=time-step size
l=number of cells in the FD scheme
a=vetical length of the doman
L=horizonal length of the domain
r1=first order microscopic rate A-> 2A
r2_macro=second order macroscopic rate, that corresponds to the rate in the FD scheme
r2=second order microscopic rate A+B-> 2B
sigma=reaction radius
r3=zero reaction B->0 miroscopic rate
deltar=width of the boundary domain (one boundary cell has the legnth and width of deltar)
The code consits of the following components
1) Calculate the boundary concentration for EACH timestep
2) Iteration with Strang Splitting using the function from Reaction.py and Injection.py: Injection, Reaction, Diffusion, Reaction, Injection.
3) Multiprocessing that does many simulations at same time
'''
'''1. Calculate the boundary councentration 'Boundaryconcentration' from the FD solution '''
x0 = np.array([L+1, L]) #location of source
dx_hist =a/l_coupling # length of histogram cell edge (squares of dx_hist by dx_hist)
yarray1 = np.arange(0,a,deltar1) # Array to lcoate boundary cells for species A
yarray2 = np.arange(0,a,deltar2) # Array to lcoate boundary cells for species B
# Simulation parameters
dtshould1 = deltar1*deltar1/(2.0*D1) # time-step size
dtshould2 = deltar2*deltar2/(2.0*D2) # time-step size
print(dtshould1, dtshould2, deltat, 'Should be equal')
maxtime = deltat*(timesteps-1) # maximum time simulation can reach
Time=np.linspace(0, maxtime, timesteps)
listC1=np.load('./Solutions/FDSolution1.npy') # gets Data from continuous solution of A
listC2=np.load('./Solutions/FDSolution2.npy') # gets Data from continuous solution of B
averageNumberParticles1 = np.zeros((len(yarray1),timesteps))
averageNumberParticles2 = np.zeros((len(yarray2),timesteps))
xlimits1 = [a/2,a/2+deltar1]
xlimits2 = [a/2,a/2+deltar2]
gamma1=D1/((deltar1)**2) #injection rate
gamma2=D2/((deltar2)**2)
'''Boundary concentration for A'''
for i in range(len(yarray1)):
ylimits1 = [yarray1[i], yarray1[i] + deltar1]
for k in range(timesteps):
if k == 0:
averageNumberParticles1[i,k] = 0.0
else:
averageNumberParticles1[i,k] = (deltar1) * (deltar1) * listC1[k][int(i/(len(yarray1)/l_coupling)),int(l_coupling/2) ]
'''Boundary concentration for B'''
for i in range(len(yarray2)):
ylimits2 = [yarray2[i], yarray2[i] + deltar2]
for k in range(timesteps):
if k == 0:
averageNumberParticles2[i,k] = 0.0
else:
averageNumberParticles2[i,k] = (deltar2) * (deltar2) * listC2[k][int(i/(len(yarray2)/l_coupling)),int(l_coupling/2) ]
Boundaryconcentration1=averageNumberParticles1
Boundaryconcentration2=averageNumberParticles2
''' 2. Iteration'''
def functionsimulation(simulations, ts):
'''
Returns each lists for A and B consisting of trajectories at every desirable time-step and for every simulation and the reference solutions
simulations=number of simulations
ts=which timestep we save: 0, ts, ts*2
PreySimulation=saves all simulations
PreyPostion=updating list containing the CURRENT position of preys as a
2D array
PreyPositionHalfTime=contains the positions at each desirable time step
Structure: PreySimulation=[[Simulation_1], [Simulation_2]...], Simulation_1=[[Time_1], [Time_2],..], for example Time_1=[[1,2], [3,4],[1.4,4]] (positions)
Analogou for predator
'''
PreySimulation=simulations*[None]
PredatorSimulation=simulations*[None]
for s in range(simulations):
PreyPosition=[]
PreyPositionHalfTime=[]
PredatorPosition=[]
PredatorPositionHalfTime=[]
Reference1=[]
Reference2=[]
for t in range(timesteps): #Prey has a position
'''Injection'''
PreyChildrenInj= concentrationmovement0(Boundaryconcentration1[:,t], deltat*1/2,deltar1,L,np.zeros(len(Boundaryconcentration1[:,t])), gamma1)
PredChildrenInj= concentrationmovement0(Boundaryconcentration2[:,t], deltat*1/2,deltar2,L,np.zeros(len(Boundaryconcentration2[:,t])), gamma2)
'''Reaction'''
PreyChildrenProlif=proliferation(PreyPosition, r1, deltat*0.25)
PredatorPosition=dying(PredatorPosition,r3, deltat*0.25, PredatorPosition)
PreyPosition, PredChildrenReact, PredB=eatcompact(PreyPosition, PredatorPosition, L, deltat*0.5,Boundaryconcentration1[:,t],Boundaryconcentration2[:,t], r2, sigma, deltat)
PreyChildrenProlif=proliferation(PreyPosition, r1, deltat*0.25)
PredatorPosition=dying(PredatorPosition,r3, deltat*0.25, PredB)
'''Put them all together'''
PreyPosition=PreyChildrenInj+PreyChildrenProlif+PreyPosition
PredatorPosition=PredChildrenInj+PredatorPosition+PredChildrenReact
'''Diffusion'''
PreyPosition=movement(PreyPosition, deltat, D1, L, a)
PredatorPosition=movement(PredatorPosition, deltat, D2, L, a)
"Reaction"
PreyChildrenProlif=proliferation(PreyPosition, r1, deltat*0.25)
PredatorPosition=dying(PredatorPosition,r3, deltat*0.25, PredatorPosition)
PreyPosition, PredChildrenReact, PredB=eatcompact(PreyPosition, PredatorPosition, L, deltat*0.5,Boundaryconcentration1[:,t],Boundaryconcentration2[:,t], r2, sigma, deltat)
PreyChildrenProlif=proliferation(PreyPosition, r1, deltat*0.25)
PredatorPosition=dying(PredatorPosition,r3, deltat*0.25, PredB)
PreyChildrenInj= concentrationmovement0(Boundaryconcentration1[:,t], deltat*1/2,deltar1,L,np.zeros(len(Boundaryconcentration1[:,t])), gamma1)
PredChildrenInj= concentrationmovement0(Boundaryconcentration2[:,t], deltat*1/2,deltar2,L,np.zeros(len(Boundaryconcentration2[:,t])), gamma2)
'''Put them all together'''
PreyPosition=PreyChildrenInj+PreyChildrenProlif+PreyPosition
PredatorPosition=PredChildrenInj+PredatorPosition+PredChildrenReact
if np.round(t*deltat,10) in np.round(np.arange(0, maxtime, ts),10):
PreyPositionHalfTime.append(PreyPosition)
PredatorPositionHalfTime.append(PredatorPosition)
print(t)
if s==simulations-1:
Reference1.append(listC1[t])
Reference2.append(listC2[t])
print( s, len(PreyPosition), len(PredatorPosition),t)
PreySimulation[s]=PreyPositionHalfTime
PredatorSimulation[s]=PredatorPositionHalfTime
return PreySimulation, Reference1, PredatorSimulation, Reference2
'''3. Multi-Processing'''
PreySimulation, Reference1, PredatorSimulation, Reference2=functionsimulation(1, s)
np.save( './Solutions/Reference1.npy', Reference1) # saves reference solution at the CORRECT time-step
np.save( './Solutions/Reference2.npy', Reference2) # saves reference solution at the CORRECT time-step
def runParallelSims(simnumber):
# Define seed
|
np.random.seed(simnumber)
|
numpy.random.seed
|
#
# In this short script, we show how to use RobotWrapper
# integrating different kinds of viewers
#
import pinocchio as pin
from pinocchio.robot_wrapper import RobotWrapper
from pinocchio.visualize import (GepettoVisualizer, MeshcatVisualizer)
from sys import argv
import time
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
from numpy.linalg import norm, solve
from pinocchio.utils import *
import os
from os.path import dirname, join, abspath
# If you want to visualize the robot in this example,
# you can choose which visualizer to employ
# by specifying an option from the command line:
# GepettoVisualizer: -g
# MeshcatVisualizer: -m
VISUALIZER = GepettoVisualizer
if len(argv)>1:
opt = argv[1]
if opt == '-g':
VISUALIZER = GepettoVisualizer
elif opt == '-m':
VISUALIZER = MeshcatVisualizer
#else:
# raise ValueError("Unrecognized option: " + opt)
# Load the URDF model with RobotWrapper
# Conversion with str seems to be necessary when executing this file with ipython
pinocchio_model_dir = join(dirname(dirname(str(abspath(__file__)))),"models")
model_path = join(pinocchio_model_dir,"others/robots")
mesh_dir = model_path
#urdf_filename = "2DOF_description.urdf"
#urdf_model_path = join(join(model_path,"2DOF_description/urdf"),urdf_filename)
urdf_filename = "double_pendulum.urdf"
urdf_model_path = join(join(model_path,"double_pendulum_description/urdf"),urdf_filename)
robot = RobotWrapper.BuildFromURDF(urdf_model_path, mesh_dir, pin.JointModelFreeFlyer())
model = robot.model
data = robot.data
#motion
q0 = np.array([0, 0 , 0 , 0 , 0 , 0 , 0 , 0, 0])
v = zero(6)
#velocity = robot.velocity(q0,v,3)
#generate waypoints
q1 = np.array([[0, 0 , 0 , 0 , 0 , 0 , 0 , 0, 0]])
t = 0
eps = 1e-4
a = 0
b = 3
c = 2
d = 2
q_l2 = q_l1 = 0.0
Q = np.array([[0,0,0]])
for i in range(1000):
q_l1 = a + b*t + c*t*t + d*t*t*t
q_l2 = a + b*t + c*t*t + d*t*t*t
v_l1 = b + 2*c*t + 3*d*t*t
v_l2 = b + 2*c*t + 3*d*t*t
a_l1 = 2*c + 6*d*t
a_l2 = 2*c + 6*d*t
t = t+ eps
q1 = np.append(q1,
|
np.array([[0,0,0, 0,0,0, 0,q_l1,q_l2]])
|
numpy.array
|
import numpy as np
from copy import deepcopy
from scipy import ndimage
from scipy.ndimage.interpolation import shift
import scipy.fftpack as sf
import math
import matplotlib.pyplot as plt
from scipy.signal import medfilt2d, medfilt
from pystackreg import StackReg
#import scipy.ndimage as sn
#from scipy.ndimage.filters import median_filter as medfilt
def rm_abnormal(img):
tmp = img.copy()
tmp[np.isnan(tmp)] = 0
tmp[np.isinf(tmp)] = 0
tmp[tmp < 0] = 0
return tmp
def img_smooth(img, kernal_size, axis=0):
s = img.shape
if len(s) == 2:
img_stack = img.reshape(1, s[0], s[1])
else:
img_stack = img.copy()
if axis == 0:
for i in range(img_stack.shape[0]):
img_stack[i] = medfilt2d(img_stack[i], kernal_size)
elif axis == 1:
for i in range(img_stack.shape[1]):
img_stack[:, i] = medfilt2d(img_stack[:,i], kernal_size)
elif axis == 2:
for i in range(img_stack.shape[2]):
img_stack[:, :, i] = medfilt2d(img_stack[:,:, i], kernal_size)
return img_stack
def img_fillhole(img, binary_threshold=0.5):
img_b = img.copy()
img_b[np.isnan(img_b)] = 0
img_b[np.isinf(img_b)] = 0
img_b[img_b > binary_threshold] = 1
img_b[img_b < 1] = 0
struct = ndimage.generate_binary_structure(2, 1)
mask = ndimage.binary_fill_holes(img_b, structure=struct).astype(img.dtype)
img_fillhole = img * mask
return mask, img_fillhole
def img_dilation(img, binary_threshold=0.5, iterations=2):
img_b = img.copy()
img_b[np.isnan(img_b)] = 0
img_b[np.isinf(img_b)] = 0
img_b[img_b > binary_threshold] = 1
img_b[img_b < 1] = 0
struct = ndimage.generate_binary_structure(2, 1)
mask = ndimage.binary_dilation(img_b, structure=struct, iterations=iterations).astype(img.dtype)
img_dilated = img * mask
return mask, img_dilated
def img_erosion(img, binary_threshold=0.5, iterations=2):
img_b = img.copy()
img_b[np.isnan(img_b)] = 0
img_b[np.isinf(img_b)] = 0
img_b[img_b > binary_threshold] = 1
img_b[img_b < 1] = 0
struct = ndimage.generate_binary_structure(2, 1)
mask = ndimage.binary_erosion(img_b, structure=struct, iterations=iterations).astype(img.dtype)
img_erosion = img * mask
return mask, img_erosion
def rm_noise(img, noise_level=2e-3, filter_size=3):
img_s = medfilt2d(img, filter_size)
img_diff = np.abs(img - img_s)
index = img_diff > noise_level
img_m = img.copy()
img_m[index] = img_s[index]
return img_m
def _get_mask(dx, dy, ratio):
"""
Calculate 2D boolean circular mask.
Parameters
----------
dx, dy : int
Dimensions of the 2D mask.
ratio : int
Ratio of the circle's diameter in pixels to
the smallest mask dimension.
Returns
-------
ndarray
2D boolean array.
"""
rad1 = dx / 2.
rad2 = dy / 2.
if dx > dy:
r2 = rad1 * rad1
else:
r2 = rad2 * rad2
y, x = np.ogrid[0.5 - rad1:0.5 + rad1, 0.5 - rad2:0.5 + rad2]
return x * x + y * y < ratio * ratio * r2
def circ_mask(img, axis, ratio=1, val=0):
im = np.float32(img)
s = im.shape
if len(s) == 2:
m = _get_mask(s[0], s[1], ratio)
m_out = (1 - m) * val
im_m = np.array(m, dtype=np.int) * im + m_out
else:
im = im.swapaxes(0, axis)
dx, dy, dz = im.shape
m = _get_mask(dy, dz, ratio)
m_out = (1 - m) * val
im_m = np.array(m, dtype=np.int) * im + m_out
im_m = im_m.swapaxes(0, axis)
return im_m
def pad(img, thick, direction):
"""
symmetrically padding the image with "0"
Parameters:
-----------
img: 2d or 3d array
2D or 3D images
thick: int
padding thickness for all directions
if thick == odd, automatically increase it to thick+1
direction: int
0: padding in axes = 0 (2D or 3D image)
1: padding in axes = 1 (2D or 3D image)
2: padding in axes = 2 (3D image)
Return:
-------
2d or 3d array
"""
thick = np.int32(thick)
if thick%2 == 1:
thick = thick + 1
print('Increasing padding thickness to: {}'.format(thick))
img = np.array(img)
s = np.array(img.shape)
if thick == 0 or direction > 3 or s.size > 3:
return img
hf = np.int32(np.ceil(abs(thick)+1) / 2) # half size of padding thickness
if thick > 0:
if s.size < 3: # 2D image
if direction == 0: # padding row
pad_image = np.zeros([s[0]+thick, s[1]])
pad_image[hf:(s[0]+hf), :] = img
else: # direction == 1, padding colume
pad_image = np.zeros([s[0], s[1]+thick])
pad_image[:, hf:(s[1]+hf)] = img
else: # s.size ==3, 3D image
if direction == 0: # padding slice
pad_image = np.zeros([s[0]+thick, s[1], s[2]])
pad_image[hf:(s[0]+hf), :, :] = img
elif direction ==1: # padding row
pad_image = np.zeros([s[0], s[1]+thick, s[2]])
pad_image[:, hf:(s[1]+hf), :] = img
else: # padding colume
pad_image = np.zeros([s[0],s[1],s[2]+thick])
pad_image[:, :, hf:(s[2]+hf)] = img
else: # thick < 0: shrink the image
if s.size < 3: # 2D image
if direction == 0: # shrink row
pad_image = img[hf:(s[0]-hf), :]
else: pad_image = img[:, hf:(s[1]-hf)] # shrink colume
else: # s.size == 3, 3D image
if direction == 0: # shrink slice
pad_image = img[hf:(s[0]-hf), :, :]
elif direction == 1: # shrink row
pad_image = img[:, hf:(s[1]-hf),:]
else: # shrik colume
pad_image = img[:, :, hf:(s[2]-hf)]
return pad_image
def img_analysis(img, n_comp=2):
from sklearn.cluster import KMeans
img_reshape = img.transpose(1, 2, 0)
s = img_reshape.shape
img_reshape_flat = img_reshape.reshape(-1, s[2])
result = {}
kmeans = KMeans(n_clusters=n_comp, random_state=0).fit(img_reshape_flat)
img_values = kmeans.cluster_centers_
img_labels = kmeans.labels_
img_compress = np.zeros([s[0] * s[1], s[2]])
for i in range(s[2]):
img_compress[:, i] = np.choose(img_labels, img_values[:, i])
img_labels = img_labels.reshape(s[0], s[1])
img_compress = img_compress.reshape(s)
img_compress = img_compress.transpose(2, 0, 1)
result['method'] = 'kmean'
result['img_compress'] = img_compress
result['img_labels'] = img_labels
result['img_values'] = img_values
return result
def kmean_mask(img, n_comp=2, index_select=-1):
import numpy as np
from scipy import ndimage
flag_3d_image = 1
s = img.shape
if len(s) == 2:
img3D = img.reshape([1, s[0], s[1]])
s = img3D.shape
flag_3d_image = 0
else:
img3D = img.copy()
res = img_analysis(img3D, n_comp=n_comp)
img_compress = res['img_compress'] # shape = (s[0], s[1], s[1]), e.g., (91, 750, 750)
img_values = res['img_values'] # shape = (n_comp, s[0]) e.g., (2, 91)
img_labels = res['img_labels'] # shape = (s[1], s[1]) e.g., (750, 750)
mask_comp = np.zeros([n_comp, s[1], s[2]])
try:
img_labels_copy = img_labels.copy()
val = img_values[:, index_select]
val_sort = np.sort(val)[::-1]
struct = ndimage.generate_binary_structure(2, 1)
struct = ndimage.iterate_structure(struct, 2).astype(int)
for i in range(n_comp):
id_mask = np.squeeze(np.where(val == val_sort[i]))
mask = np.zeros(img_labels.shape)
mask[img_labels_copy == id_mask] = 1
img_labels[img_labels_copy == id_mask] = n_comp - i - 1
#mask, _ = img_fill_holes(mask, struct=struct)
mask_comp[i] = mask
except:
pass
if flag_3d_image == 0:
return mask_comp, img_labels, img_compress
else:
return mask_comp, img_labels
def bin_ndarray(ndarray, new_shape=None, operation='mean'):
"""
Bins an ndarray in all axes based on the target shape, by summing or
averaging.
Number of output dimensions must match number of input dimensions and
new axes must divide old ones.
Example
-------
>>> m = np.arange(0,100,1).reshape((10,10))
>>> n = bin_ndarray(m, new_shape=(5,5), operation='sum')
>>> print(n)
[[ 22 30 38 46 54]
[102 110 118 126 134]
[182 190 198 206 214]
[262 270 278 286 294]
[342 350 358 366 374]]
"""
if new_shape == None:
s = np.array(ndarray.shape)
s1 = np.int32(s/2)
new_shape = tuple(s1)
operation = operation.lower()
if not operation in ['sum', 'mean']:
raise ValueError("Operation not supported.")
if ndarray.ndim != len(new_shape):
raise ValueError("Shape mismatch: {} -> {}".format(ndarray.shape,
new_shape))
compression_pairs = [(d, c//d) for d,c in zip(new_shape,
ndarray.shape)]
flattened = [l for p in compression_pairs for l in p]
ndarray = ndarray.reshape(flattened)
for i in range(len(new_shape)):
op = getattr(ndarray, operation)
ndarray = op(-1*(i+1))
return ndarray
def draw_circle(cen, r, theta=[0, 360.0]):
th = np.linspace(theta[0]/180.*np.pi, theta[1]/180.*np.pi, 361)
x = r * np.cos(th) + cen[0]
y = r * np.sin(th) + cen[1]
plt.plot(x,y,'r')
def get_circle_line_from_img(img, cen, r, pix_size=17.1, theta=[0, 360.0], f_out='circle_profile_with_fft.txt'):
d_th = 1 / 10.0 / r
th = np.arange(theta[0]/180.*np.pi, theta[1]/180.0*np.pi+d_th, d_th)
num_data = len(th)
x = r * np.sin(th) + cen[1]
y = r * np.cos(th) + cen[0]
x_int = np.int32(np.floor(x)); x_frac = x - x_int
y_int = np.int32(np.floor(y)); y_frac = y - y_int
data = []
for i in range(num_data):
t1 = img[x_int[i], y_int[i]] * (1 - x_frac[i]) * (1 - y_frac[i])
t2 = img[x_int[i], y_int[i]+1] * (1 - x_frac[i]) * y_frac[i]
t3 = img[x_int[i]+1, y_int[i]] * x_frac[i] * (1 - y_frac[i])
t4 = img[x_int[i]+1, y_int[i]+1] * x_frac[i] * y_frac[i]
t = t1 + t2 + t3 + t4
data.append(t)
line = th * r * pix_size
plt.figure()
plt.subplot(221)
plt.imshow(img)
draw_circle(cen, r, theta)
plt.subplot(223);plt.plot(line, data)
plt.title('line_profile: r={} pixels'.format(r))
data_fft = np.fft.fftshift(np.fft.fft(data))
fs = 1/(pix_size/10)
f = fs/2 * np.linspace(-1, 1, len(data_fft))
plt.subplot(224);plt.plot(f, np.abs(data_fft))
plt.xlim([-0.04,0.04])
plt.ylim([-10, 300])
plt.title('fft of line_profile')
# combine data to sigle variable and save it
data_comb = np.zeros([len(data), 4])
data_comb[:,0] = line
data_comb[:,1] = data
data_comb[:,2] = f
data_comb[:,3] = np.abs(data_fft)
np.savetxt(f_out, data_comb, fmt='%3.4e')
return data_comb
class IndexTracker(object):
def __init__(self, ax, X):
self.ax = ax
self._indx_txt = ax.set_title(' ', loc='center')
self.X = X
self.slices, rows, cols = X.shape
self.ind = self.slices//2
self.im = ax.imshow(self.X[self.ind, :, :], cmap='gray')
self.update()
def onscroll(self, event):
if event.button == 'up':
self.ind = (self.ind + 1) % self.slices
else:
self.ind = (self.ind - 1) % self.slices
self.update()
def update(self):
self.im.set_data(self.X[self.ind, :, :])
#self.ax.set_ylabel('slice %s' % self.ind)
self._indx_txt.set_text(f"frame {self.ind + 1} of {self.slices}")
self.im.axes.figure.canvas.draw()
def image_movie(data, *, ax=None):
# show a movie of image in python environment
if ax is None:
fig, ax = plt.subplots()
else:
fig = ax.figure
tracker = IndexTracker(ax, data)
# monkey patch the tracker onto the figure to keep it alive
fig._tracker = tracker
fig.canvas.mpl_connect('scroll_event', tracker.onscroll)
return tracker
from PIL import Image
_errstr = "Mode is unknown or incompatible with input array shape."
def bytescale(data, cmin=None, cmax=None, high=255, low=0):
"""
Byte scales an array (image).
Byte scaling means converting the input image to uint8 dtype and scaling
the range to ``(low, high)`` (default 0-255).
If the input image already has dtype uint8, no scaling is done.
This function is only available if Python Imaging Library (PIL) is installed.
Parameters
----------
data : ndarray
PIL image data array.
cmin : scalar, optional
Bias scaling of small values. Default is ``data.min()``.
cmax : scalar, optional
Bias scaling of large values. Default is ``data.max()``.
high : scalar, optional
Scale max value to `high`. Default is 255.
low : scalar, optional
Scale min value to `low`. Default is 0.
Returns
-------
img_array : uint8 ndarray
The byte-scaled array.
Examples
--------
>>> from scipy.misc import bytescale
>>> img = np.array([[ 91.06794177, 3.39058326, 84.4221549 ],
... [ 73.88003259, 80.91433048, 4.88878881],
... [ 51.53875334, 34.45808177, 27.5873488 ]])
>>> bytescale(img)
array([[255, 0, 236],
[205, 225, 4],
[140, 90, 70]], dtype=uint8)
>>> bytescale(img, high=200, low=100)
array([[200, 100, 192],
[180, 188, 102],
[155, 135, 128]], dtype=uint8)
>>> bytescale(img, cmin=0, cmax=255)
array([[91, 3, 84],
[74, 81, 5],
[52, 34, 28]], dtype=uint8)
"""
if data.dtype == np.uint8:
return data
if high > 255:
raise ValueError("`high` should be less than or equal to 255.")
if low < 0:
raise ValueError("`low` should be greater than or equal to 0.")
if high < low:
raise ValueError("`high` should be greater than or equal to `low`.")
if cmin is None:
cmin = data.min()
if cmax is None:
cmax = data.max()
cscale = cmax - cmin
if cscale < 0:
raise ValueError("`cmax` should be larger than `cmin`.")
elif cscale == 0:
cscale = 1
scale = float(high - low) / cscale
bytedata = (data - cmin) * scale + low
return (bytedata.clip(low, high) + 0.5).astype(np.uint8)
def toimage(arr, high=255, low=0, cmin=None, cmax=None, pal=None,
mode=None, channel_axis=None):
"""Takes a numpy array and returns a PIL image.
This function is only available if Python Imaging Library (PIL) is installed.
The mode of the PIL image depends on the array shape and the `pal` and
`mode` keywords.
For 2-D arrays, if `pal` is a valid (N,3) byte-array giving the RGB values
(from 0 to 255) then ``mode='P'``, otherwise ``mode='L'``, unless mode
is given as 'F' or 'I' in which case a float and/or integer array is made.
.. warning::
This function uses `bytescale` under the hood to rescale images to use
the full (0, 255) range if ``mode`` is one of ``None, 'L', 'P', 'l'``.
It will also cast data for 2-D images to ``uint32`` for ``mode=None``
(which is the default).
Notes
-----
For 3-D arrays, the `channel_axis` argument tells which dimension of the
array holds the channel data.
For 3-D arrays if one of the dimensions is 3, the mode is 'RGB'
by default or 'YCbCr' if selected.
The numpy array must be either 2 dimensional or 3 dimensional.
"""
data = np.asarray(arr)
if np.iscomplexobj(data):
raise ValueError("Cannot convert a complex-valued array.")
shape = list(data.shape)
valid = len(shape) == 2 or ((len(shape) == 3) and
((3 in shape) or (4 in shape)))
if not valid:
raise ValueError("'arr' does not have a suitable array shape for "
"any mode.")
if len(shape) == 2:
shape = (shape[1], shape[0]) # columns show up first
if mode == 'F':
data32 = data.astype(np.float32)
image = Image.frombytes(mode, shape, data32.tostring())
return image
if mode in [None, 'L', 'P']:
bytedata = bytescale(data, high=high, low=low,
cmin=cmin, cmax=cmax)
image = Image.frombytes('L', shape, bytedata.tostring())
if pal is not None:
image.putpalette(np.asarray(pal, dtype=np.uint8).tostring())
# Becomes a mode='P' automagically.
elif mode == 'P': # default gray-scale
pal = (np.arange(0, 256, 1, dtype=np.uint8)[:, np.newaxis] *
np.ones((3,), dtype=np.uint8)[np.newaxis, :])
image.putpalette(np.asarray(pal, dtype=np.uint8).tostring())
return image
if mode == '1': # high input gives threshold for 1
bytedata = (data > high)
image = Image.frombytes('1', shape, bytedata.tostring())
return image
if cmin is None:
cmin = np.amin(np.ravel(data))
if cmax is None:
cmax = np.amax(np.ravel(data))
data = (data*1.0 - cmin)*(high - low)/(cmax - cmin) + low
if mode == 'I':
data32 = data.astype(np.uint32)
image = Image.frombytes(mode, shape, data32.tostring())
else:
raise ValueError(_errstr)
return image
# if here then 3-d array with a 3 or a 4 in the shape length.
# Check for 3 in datacube shape --- 'RGB' or 'YCbCr'
if channel_axis is None:
if (3 in shape):
ca = np.flatnonzero(np.asarray(shape) == 3)[0]
else:
ca = np.flatnonzero(np.asarray(shape) == 4)
if len(ca):
ca = ca[0]
else:
raise ValueError("Could not find channel dimension.")
else:
ca = channel_axis
numch = shape[ca]
if numch not in [3, 4]:
raise ValueError("Channel axis dimension is not valid.")
bytedata = bytescale(data, high=high, low=low, cmin=cmin, cmax=cmax)
if ca == 2:
strdata = bytedata.tostring()
shape = (shape[1], shape[0])
elif ca == 1:
strdata = np.transpose(bytedata, (0, 2, 1)).tostring()
shape = (shape[2], shape[0])
elif ca == 0:
strdata = np.transpose(bytedata, (1, 2, 0)).tostring()
shape = (shape[2], shape[1])
if mode is None:
if numch == 3:
mode = 'RGB'
else:
mode = 'RGBA'
if mode not in ['RGB', 'RGBA', 'YCbCr', 'CMYK']:
raise ValueError(_errstr)
if mode in ['RGB', 'YCbCr']:
if numch != 3:
raise ValueError("Invalid array shape for mode.")
if mode in ['RGBA', 'CMYK']:
if numch != 4:
raise ValueError("Invalid array shape for mode.")
# Here we know data and mode is correct
image = Image.frombytes(mode, shape, strdata)
return image
###################################################################
def dftregistration(buf1ft,buf2ft,usfac=100):
"""
# function [output Greg] = dftregistration(buf1ft,buf2ft,usfac);
# Efficient subpixel image registration by crosscorrelation. This code
# gives the same precision as the FFT upsampled cross correlation in a
# small fraction of the computation time and with reduced memory
# requirements. It obtains an initial estimate of the
crosscorrelation peak
# by an FFT and then refines the shift estimation by upsampling the DFT
# only in a small neighborhood of that estimate by means of a
# matrix-multiply DFT. With this procedure all the image points
are used to
# compute the upsampled crosscorrelation.
# <NAME> - Dec 13, 2007
# Portions of this code were taken from code written by <NAME>
# and <NAME>.
# <NAME> and <NAME>, "Phase retrieval for a complex-valued
# object by using a low-resolution image," J. Opt. Soc. Am. A 7, 450-458
# (1990).
# Citation for this algorithm:
# <NAME>, <NAME>, and <NAME>,
# "Efficient subpixel image registration algorithms," Opt. Lett. 33,
# 156-158 (2008).
# Inputs
# buf1ft Fourier transform of reference image,
# DC in (1,1) [DO NOT FFTSHIFT]
# buf2ft Fourier transform of image to register,
# DC in (1,1) [DO NOT FFTSHIFT]
# usfac Upsampling factor (integer). Images will be registered to
# within 1/usfac of a pixel. For example usfac = 20 means the
# images will be registered within 1/20 of a pixel.
(default = 1)
# Outputs
# output = [error,diffphase,net_row_shift,net_col_shift]
# error Translation invariant normalized RMS error between f and g
# diffphase Global phase difference between the two images (should be
# zero if images are non-negative).
# net_row_shift net_col_shift Pixel shifts between images
# Greg (Optional) Fourier transform of registered version of buf2ft,
# the global phase difference is compensated for.
"""
# Compute error for no pixel shift
if usfac == 0:
CCmax = np.sum(buf1ft*np.conj(buf2ft))
rfzero = np.sum(abs(buf1ft)**2)
rgzero = np.sum(abs(buf2ft)**2)
error = 1.0 - CCmax*np.conj(CCmax)/(rgzero*rfzero)
error = np.sqrt(np.abs(error))
diffphase = np.arctan2(np.imag(CCmax),np.real(CCmax))
return error, diffphase
# Whole-pixel shift - Compute crosscorrelation by an IFFT and locate the
# peak
elif usfac == 1:
ndim = np.shape(buf1ft)
m = ndim[0]
n = ndim[1]
CC = sf.ifft2(buf1ft*np.conj(buf2ft))
max1,loc1 = idxmax(CC)
rloc = loc1[0]
cloc = loc1[1]
CCmax=CC[rloc,cloc]
rfzero = np.sum(np.abs(buf1ft)**2)/(m*n)
rgzero = np.sum(np.abs(buf2ft)**2)/(m*n)
error = 1.0 - CCmax*np.conj(CCmax)/(rgzero*rfzero)
error = np.sqrt(np.abs(error))
diffphase=np.arctan2(np.imag(CCmax),np.real(CCmax))
md2 = np.fix(m/2)
nd2 = np.fix(n/2)
if rloc > md2:
row_shift = rloc - m
else:
row_shift = rloc
if cloc > nd2:
col_shift = cloc - n
else:
col_shift = cloc
ndim = np.shape(buf2ft)
nr = int(round(ndim[0]))
nc = int(round(ndim[1]))
Nr = sf.ifftshift(np.arange(-np.fix(1.*nr/2),np.ceil(1.*nr/2)))
Nc = sf.ifftshift(np.arange(-np.fix(1.*nc/2),np.ceil(1.*nc/2)))
Nc,Nr = np.meshgrid(Nc,Nr)
Greg = buf2ft*np.exp(1j*2*np.pi*(-1.*row_shift*Nr/nr-1.*col_shift*Nc/nc))
Greg = Greg*np.exp(1j*diffphase)
image_reg = sf.ifft2(Greg) * np.sqrt(nr*nc)
#return error,diffphase,row_shift,col_shift
return error,diffphase,row_shift,col_shift, image_reg
# Partial-pixel shift
else:
# First upsample by a factor of 2 to obtain initial estimate
# Embed Fourier data in a 2x larger array
ndim = np.shape(buf1ft)
m = int(round(ndim[0]))
n = int(round(ndim[1]))
mlarge=m*2
nlarge=n*2
CC=np.zeros([mlarge,nlarge],dtype=np.complex128)
CC[int(m-np.fix(m/2)):int(m+1+np.fix((m-1)/2)),int(n-np.fix(n/2)):int(n+1+np.fix((n-1)/2))] = (sf.fftshift(buf1ft)*np.conj(sf.fftshift(buf2ft)))[:,:]
# Compute crosscorrelation and locate the peak
CC = sf.ifft2(sf.ifftshift(CC)) # Calculate cross-correlation
max1,loc1 = idxmax(np.abs(CC))
rloc = int(round(loc1[0]))
cloc = int(round(loc1[1]))
CCmax = CC[rloc,cloc]
# Obtain shift in original pixel grid from the position of the
# crosscorrelation peak
ndim = np.shape(CC)
m = ndim[0]
n = ndim[1]
md2 = np.fix(m/2)
nd2 = np.fix(n/2)
if rloc > md2:
row_shift = rloc - m
else:
row_shift = rloc
if cloc > nd2:
col_shift = cloc - n
else:
col_shift = cloc
row_shift=row_shift/2
col_shift=col_shift/2
# If upsampling > 2, then refine estimate with matrix multiply DFT
if usfac > 2:
### DFT computation ###
# Initial shift estimate in upsampled grid
row_shift = 1.*np.round(row_shift*usfac)/usfac;
col_shift = 1.*np.round(col_shift*usfac)/usfac;
dftshift = np.fix(np.ceil(usfac*1.5)/2); ## Center of output array at dftshift+1
# Matrix multiply DFT around the current shift estimate
CC = np.conj(dftups(buf2ft*np.conj(buf1ft),np.ceil(usfac*1.5),np.ceil(usfac*1.5),usfac,\
dftshift-row_shift*usfac,dftshift-col_shift*usfac))/(md2*nd2*usfac**2)
# Locate maximum and map back to original pixel grid
max1,loc1 = idxmax(np.abs(CC))
rloc = int(round(loc1[0]))
cloc = int(round(loc1[1]))
CCmax = CC[rloc,cloc]
rg00 = dftups(buf1ft*np.conj(buf1ft),1,1,usfac)/(md2*nd2*usfac**2)
rf00 = dftups(buf2ft*np.conj(buf2ft),1,1,usfac)/(md2*nd2*usfac**2)
rloc = rloc - dftshift
cloc = cloc - dftshift
row_shift = 1.*row_shift + 1.*rloc/usfac
col_shift = 1.*col_shift + 1.*cloc/usfac
# If upsampling = 2, no additional pixel shift refinement
else:
rg00 = np.sum(buf1ft*np.conj(buf1ft))/m/n;
rf00 = np.sum(buf2ft*np.conj(buf2ft))/m/n;
error = 1.0 - CCmax*np.conj(CCmax)/(rg00*rf00);
error = np.sqrt(np.abs(error));
diffphase = np.arctan2(np.imag(CCmax),np.real(CCmax));
# If its only one row or column the shift along that dimension has no
# effect. We set to zero.
if md2 == 1:
row_shift = 0
if nd2 == 1:
col_shift = 0;
# Compute registered version of buf2ft
if usfac > 0:
ndim = np.shape(buf2ft)
nr = ndim[0]
nc = ndim[1]
Nr = sf.ifftshift(np.arange(-np.fix(1.*nr/2),np.ceil(1.*nr/2)))
Nc = sf.ifftshift(np.arange(-np.fix(1.*nc/2),np.ceil(1.*nc/2)))
Nc,Nr = np.meshgrid(Nc,Nr)
Greg = buf2ft*np.exp(1j*2*np.pi*(-1.*row_shift*Nr/nr-1.*col_shift*Nc/nc))
Greg = Greg*np.exp(1j*diffphase)
elif (nargout > 1) and (usfac == 0):
Greg = np.dot(buf2ft,np.exp(1j*diffphase))
#plt.figure(3)
image_reg = sf.ifft2(Greg) * np.sqrt(nr*nc)
#imgplot = plt.imshow(np.abs(image_reg))
#a_ini = np.zeros((100,100))
#a_ini[40:59,40:59] = 1.
#a = a_ini * np.exp(1j*15.)
#plt.figure(6)
#imgplot = plt.imshow(np.abs(a))
#plt.figure(3)
#imgplot = plt.imshow(np.abs(a)-np.abs(image_reg))
#plt.colorbar()
# return error,diffphase,row_shift,col_shift,Greg
return error,diffphase,row_shift,col_shift, image_reg
def dftups(inp,nor,noc,usfac=1,roff=0,coff=0):
"""
# function out=dftups(in,nor,noc,usfac,roff,coff);
# Upsampled DFT by matrix multiplies, can compute an upsampled
DFT in just
# a small region.
# usfac Upsampling factor (default usfac = 1)
# [nor,noc] Number of pixels in the output upsampled DFT, in
# units of upsampled pixels (default = size(in))
# roff, coff Row and column offsets, allow to shift the
output array to
# a region of interest on the DFT (default = 0)
# Recieves DC in upper left corner, image center must be in (1,1)
# <NAME> - Dec 13, 2007
# Modified from dftus, by <NAME> 7/31/06
# This code is intended to provide the same result as if the following
# operations were performed
# - Embed the array "in" in an array that is usfac times larger in each
# dimension. ifftshift to bring the center of the image to (1,1).
# - Take the FFT of the larger array
# - Extract an [nor, noc] region of the result. Starting with the
# [roff+1 coff+1] element.
# It achieves this result by computing the DFT in the output
array without
# the need to zeropad. Much faster and memory efficient than the
# zero-padded FFT approach if [nor noc] are much smaller than
[nr*usfac nc*usfac]
"""
ndim = np.shape(inp)
nr = int(round(ndim[0]))
nc = int(round(ndim[1]))
noc = int(round(noc))
nor = int(round(nor))
# Compute kernels and obtain DFT by matrix products
a = np.zeros([nc,1])
a[:,0] = ((sf.ifftshift(
|
np.arange(nc)
|
numpy.arange
|
from vivarium.library.schema import array_from, array_to
import numpy as np
def one_to_one_map(bsp1, bsp2, species_map):
#Takes two Bioscrape Processes, bsp1 and bsp2 and
# species_map: dictionar(a species in bsp1: a species in bsp2)
#Produces the mapping species1[i] --> species2[i]
all_species1 = bsp1.get_model_species_ids()
all_species2 = bsp2.get_model_species_ids()
projection = np.zeros((len(all_species1), len(all_species2)))
#Create the projection matrix
for s1 in species_map:
if s1 not in all_species1:
raise ValueError(f"{s1} not found in Bioscrape Process {bsp1.sbml_file}.")
else:
ind1 = all_species1.index(s1)
s2 = species_map[s1]
if s2 not in all_species2:
raise ValueError(f"{s2} not found in Bioscrape Process {bsp2.sbml_file}.")
else:
ind2 = all_species2.index(s2)
projection[ind1, ind2] = 1
def map_function(states):
source_delta_array = array_from(states['source_deltas'])
output_delta_array = np.dot(source_delta_array, projection)
return array_to(all_species2, output_delta_array)
return map_function
def one_to_many_map(bsp1, bsp2, species_map, proportional = True):
#Takes two Bioscrape Processes, bsp1 and bsp2 and
#species_map { species in bsp1: [list of species in bsp2] }
#Produces the mapping species1[i] --> [species2[i]]
#proprtional: True/False
# if True the amount of species1[i] mapped to species2[i][j] is proportianal to the amount of species2[i][j]
# one_to_many_map(species1[i]) ~ species2[i][j]/sum(species[2][i])
# if False: the amount of species1[i] mapped to species2[i][j] ~ 1/len(species[2][i])
all_species1 = bsp1.get_model_species_ids()
all_species2 = bsp2.get_model_species_ids()
projection = np.zeros((len(all_species1), len(all_species2)))
#Create the projection matrix
for s1 in species_map:
if s1 not in all_species1:
raise ValueError(f"{s1} not found in Bioscrape Process {bsp1.sbml_file}.")
else:
ind1 = all_species1.index(s1)
for s2 in species_map[s1]:
if s2 not in all_species2:
raise ValueError(f"{s2} not found in Bioscrape Process {bsp2.sbml_file}.")
else:
ind2 = all_species2.index(s2)
projection[ind1, ind2] = 1
def map_function(states):
source_delta_array = array_from(states['source_deltas'])
if not proportional:
normalizer = 1/np.sum(projection, 1)
normalized_proj = normalizer[:, np.newaxis]*projection
else:
#Normalize the projection matrix based upon the total concentrations of each target species
output_array = array_from(states['target_state'])
normalized_proj = projection*output_array
normalizer = np.sum(normalized_proj, 1)[:, np.newaxis]*projection
normalized_proj = normalized_proj/(normalizer+(normalizer == 0))
#Cycle through projections, find degenerate cases
#Degenerate cases occur when none of the many species are present
#In this case, the normalizer is uniform.
for i in range(projection.shape[0]):
if
|
np.sum(normalizer[i, :])
|
numpy.sum
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : multi_processing.py
@Modify Time @Author @Version @Desciption
------------ ------- -------- -----------
2021/9/15 1:13 lintean 1.0 None
'''
import math
import time
import random
import numpy as np
import pandas as pd
from dotmap import DotMap
from utils import cart2sph, pol2cart, makePath
from keras.layers import Input, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D
from keras.layers import AveragePooling2D, MaxPooling2D, Dropout, GlobalMaxPooling2D, GlobalAveragePooling2D
from keras.models import Sequential
import keras.backend as K
from sklearn.preprocessing import scale
from scipy.interpolate import griddata
from keras.utils import np_utils
from scipy.io import loadmat
import keras
import os
from importlib import reload
np.set_printoptions(suppress=True)
def get_logger(name, log_path):
import logging
reload(logging)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logfile = makePath(log_path) + "/Train_" + name + ".log"
fh = logging.FileHandler(logfile, mode='w')
fh.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s - %(levelname)s: %(message)s")
fh.setFormatter(formatter)
logger.addHandler(fh)
if log_path == "./result/test":
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch.setFormatter(formatter)
logger.addHandler(ch)
return logger
def azim_proj(pos):
"""
Computes the Azimuthal Equidistant Projection of input point in 3D Cartesian Coordinates.
Imagine a plane being placed against (tangent to) a globe. If
a light source inside the globe projects the graticule onto
the plane the result would be a planar, or azimuthal, map
projection.
:param pos: position in 3D Cartesian coordinates
:return: projected coordinates using Azimuthal Equidistant Projection
"""
[r, elev, az] = cart2sph(pos[0], pos[1], pos[2])
return pol2cart(az, math.pi / 2 - elev)
def gen_images(data, args):
locs = loadmat('locs_orig.mat')
locs_3d = locs['data']
locs_2d = []
for e in locs_3d:
locs_2d.append(azim_proj(e))
locs_2d_final = np.array(locs_2d)
grid_x, grid_y = np.mgrid[
min(np.array(locs_2d)[:, 0]):max(np.array(locs_2d)[:, 0]):args.image_size * 1j,
min(np.array(locs_2d)[:, 1]):max(np.array(locs_2d)[:, 1]):args.image_size * 1j]
images = []
for i in range(data.shape[0]):
images.append(griddata(locs_2d_final, data[i, :], (grid_x, grid_y), method='cubic', fill_value=np.nan))
images = np.stack(images, axis=0)
images[~
|
np.isnan(images)
|
numpy.isnan
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Matrix Factorization which does not learns only UI-matrix but also attributes.
"""
import numpy as np
import sys
from sub_module import util
class MF:
def __init__(self, n_latent_factor=200, learning_rate=0.005,
regularization_weight=0.02, n_epochs=20,
global_bias=True, id_bias=True,
verbose=False, random_seed=None):
"""
Collabolative Filtering so called Matrix Factorization.
Arguments:
- n_latent_factor [int]:
number of latent dimensions
- learning_rate [float]:
learning rate
- regularization_weight [float]:
regularization parameter
- global_bias [True/False]:
set bias of global.
- id_bias [True/False]:
set bias of user_id, item_id.
- n_epochs [int]:
number of epoch of train(SGD)
- random_seed [int]:
random seed to set in np.random.seed()
"""
# set random_seed
if random_seed:
np.random.seed(random_seed)
self.n_latent_factor = n_latent_factor
self.learning_rate = learning_rate
self.regularization_weight = regularization_weight
self.global_bias = global_bias
self.id_bias = id_bias
self.n_epochs = n_epochs
self.verbose = verbose
def fit(self, user_ids, item_ids, ratings,
user_attributes=None, item_attributes=None):
"""
Arguments:
- user_ids [array-like-object]:
the array of user id.
- item_ids [array-like-object]:
the array of item id.
- ratings [array-like-object]:
the array of rating.
- user_attributes [dictinary]:
dictinary which key is user_id and value is vector of user attributes.
if None, doesn't train on the attributes.
ex) {'user00' : [0,1,0], 'user01': [.5,0,.5]]}
- item_attributes [dictinary]:
dictinary which key is item_id and value is vector of item attributes.
if None, doesn't train on the attributes.
ex) {'item00' : [0,1,0], 'item01': [.5,0,.5]]}
"""
# Set up before fit
self._fit_setup(
user_ids, item_ids, ratings,
user_attributes, item_attributes
)
# Initialize coefficents of attributes.
if user_attributes:
self.a_u =
|
np.zeros(self.n_dim_user_attributes, np.double)
|
numpy.zeros
|
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 13 15:21:55 2019
@author: raryapratama
"""
#%%
#Step (1): Import Python libraries, set land conversion scenarios general parameters
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import quad
import seaborn as sns
import pandas as pd
#PF_PO Scenario
##Set parameters
#Parameters for primary forest
initAGB = 233 #source: van Beijma et al. (2018)
initAGB_min = 233-72
initAGB_max = 233 + 72
#parameters for oil palm plantation. Source: Khasanah et al. (2015)
tf_palmoil = 26
a_nucleus = 2.8167
b_nucleus = 6.8648
a_plasma = 2.5449
b_plasma = 5.0007
c_cont_po_nucleus = 0.5448 #carbon content in biomass
c_cont_po_plasma = 0.5454
tf = 201
a = 0.082
b = 2.53
#%%
#Step (2_1): C loss from the harvesting/clear cut
df1nu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_S1nu')
df1pl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_S1pl')
df3nu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_Enu')
df3pl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_Epl')
t = range(0,tf,1)
c_firewood_energy_S1nu = df1nu['Firewood_other_energy_use'].values
c_firewood_energy_S1pl = df1pl['Firewood_other_energy_use'].values
c_firewood_energy_Enu = df3nu['Firewood_other_energy_use'].values
c_firewood_energy_Epl = df3pl['Firewood_other_energy_use'].values
#%%
#Step (2_2): C loss from the harvesting/clear cut as wood pellets
dfEnu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_Enu')
dfEpl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_Epl')
c_pellets_Enu = dfEnu['Wood_pellets'].values
c_pellets_Epl = dfEpl['Wood_pellets'].values
#%%
#Step (3): Aboveground biomass (AGB) decomposition
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_S1nu')
tf = 201
t = np.arange(tf)
def decomp(t,remainAGB):
return (1-(1-np.exp(-a*t))**b)*remainAGB
#set zero matrix
output_decomp = np.zeros((len(t),len(df['C_remainAGB'].values)))
for i,remain_part in enumerate(df['C_remainAGB'].values):
#print(i,remain_part)
output_decomp[i:,i] = decomp(t[:len(t)-i],remain_part)
print(output_decomp[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix = np.zeros((len(t)-1,len(df['C_remainAGB'].values-1)))
i = 0
while i < tf:
subs_matrix[:,i] = np.diff(output_decomp[:,i])
i = i + 1
print(subs_matrix[:,:4])
print(len(subs_matrix))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix = subs_matrix.clip(max=0)
print(subs_matrix[:,:4])
#make the results as absolute values
subs_matrix = abs(subs_matrix)
print(subs_matrix[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix = np.zeros((len(t)-200,len(df['C_remainAGB'].values)))
print(zero_matrix)
subs_matrix = np.vstack((zero_matrix, subs_matrix))
print(subs_matrix[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot = (tf,1)
decomp_emissions = np.zeros(matrix_tot)
i = 0
while i < tf:
decomp_emissions[:,0] = decomp_emissions[:,0] + subs_matrix[:,i]
i = i + 1
print(decomp_emissions[:,0])
#%%
#Step (4): Dynamic stock model of in-use wood materials
from dynamic_stock_model import DynamicStockModel
df1nu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_S1nu')
df1pl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_S1pl')
df3nu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_Enu')
df3pl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_Epl')
#product lifetime
#building materials
B = 35
TestDSM1nu = DynamicStockModel(t = df1nu['Year'].values, i = df1nu['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([B]), 'StdDev': np.array([0.3*B])})
TestDSM1pl = DynamicStockModel(t = df1pl['Year'].values, i = df1pl['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([B]), 'StdDev': np.array([0.3*B])})
TestDSM3nu = DynamicStockModel(t = df3nu['Year'].values, i = df3nu['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([B]), 'StdDev': np.array([0.3*B])})
TestDSM3pl = DynamicStockModel(t = df3pl['Year'].values, i = df3pl['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([B]), 'StdDev': np.array([0.3*B])})
CheckStr1nu, ExitFlag1nu = TestDSM1nu.dimension_check()
CheckStr1pl, ExitFlag1nu = TestDSM1pl.dimension_check()
CheckStr3nu, ExitFlag3nu = TestDSM3nu.dimension_check()
CheckStr3pl, ExitFlag3pl = TestDSM3pl.dimension_check()
Stock_by_cohort1nu, ExitFlag1nu = TestDSM1nu.compute_s_c_inflow_driven()
Stock_by_cohort1pl, ExitFlag1pl = TestDSM1pl.compute_s_c_inflow_driven()
Stock_by_cohort3nu, ExitFlag3nu = TestDSM3nu.compute_s_c_inflow_driven()
Stock_by_cohort3pl, ExitFlag3pl = TestDSM3pl.compute_s_c_inflow_driven()
S1nu, ExitFlag1nu = TestDSM1nu.compute_stock_total()
S1pl, ExitFlag1pl = TestDSM1pl.compute_stock_total()
S3nu, ExitFlag3nu = TestDSM3nu.compute_stock_total()
S3pl, ExitFlag3pl = TestDSM3pl.compute_stock_total()
O_C1nu, ExitFlag1nu = TestDSM1nu.compute_o_c_from_s_c()
O_C1pl, ExitFlag1pl = TestDSM1pl.compute_o_c_from_s_c()
O_C3nu, ExitFlag3nu = TestDSM3nu.compute_o_c_from_s_c()
O_C3pl, ExitFlag3pl = TestDSM3pl.compute_o_c_from_s_c()
O1nu, ExitFlag1nu = TestDSM1nu.compute_outflow_total()
O1pl, ExitFlag1pl = TestDSM1pl.compute_outflow_total()
O3nu, ExitFlag3nu = TestDSM3nu.compute_outflow_total()
O3pl, ExitFlag3pl = TestDSM3pl.compute_outflow_total()
DS1nu, ExitFlag1nu = TestDSM1nu.compute_stock_change()
DS1pl, ExitFlag1pl = TestDSM1pl.compute_stock_change()
DS3nu, ExitFlag3nu = TestDSM3nu.compute_stock_change()
DS3pl, ExitFlag3pl = TestDSM3pl.compute_stock_change()
Bal1nu, ExitFlag1nu = TestDSM1nu.check_stock_balance()
Bal1pl, ExitFlag1pl = TestDSM1pl.check_stock_balance()
Bal3nu, ExitFlag3nu = TestDSM3nu.check_stock_balance()
Bal3pl, ExitFlag3pl = TestDSM3pl.check_stock_balance()
#print output flow
print(TestDSM1nu.o)
print(TestDSM1pl.o)
print(TestDSM3nu.o)
print(TestDSM3pl.o)
#plt.plot(TestDSM1.s)
#plt.xlim([0, 100])
#plt.ylim([0,50])
#plt.show()
#%%
#Step (5): Biomass growth
A = range(0,tf_palmoil,1)
#calculate the biomass and carbon content of palm oil trees over time
def Y_nucleus(A):
return (44/12*1000*c_cont_po_nucleus*(a_nucleus*A + b_nucleus))
output_Y_nucleus = np.array([Y_nucleus(Ai) for Ai in A])
print(output_Y_nucleus)
def Y_plasma(A):
return (44/12*1000*c_cont_po_plasma*(a_plasma*A + b_plasma))
output_Y_plasma = np.array([Y_plasma(Ai) for Ai in A])
print(output_Y_plasma)
##8 times 25-year cycle of new AGB of oil palm, one year gap between the cycle
#nucleus
counter = range(0,8,1)
y_nucleus = []
for i in counter:
y_nucleus.append(output_Y_nucleus)
flat_list_nucleus = []
for sublist in y_nucleus:
for item in sublist:
flat_list_nucleus.append(item)
#the length of the list is now 208, so we remove the last 7 elements of the list to make the len=tf
flat_list_nucleus = flat_list_nucleus[:len(flat_list_nucleus)-7]
#plasma
y_plasma = []
for i in counter:
y_plasma.append(output_Y_plasma)
flat_list_plasma = []
for sublist in y_plasma:
for item in sublist:
flat_list_plasma.append(item)
#the length of the list is now 208, so we remove the last 7 elements of the list to make the len=tf
flat_list_plasma = flat_list_plasma[:len(flat_list_plasma)-7]
#plotting
t = range (0,tf,1)
plt.xlim([0, 200])
plt.plot(t, flat_list_nucleus)
plt.plot(t, flat_list_plasma, color='seagreen')
plt.fill_between(t, flat_list_nucleus, flat_list_plasma, color='darkseagreen', alpha=0.4)
plt.xlabel('Time (year)')
plt.ylabel('AGB (tCO2-eq/ha)')
plt.show()
###Yearly Sequestration
###Nucleus
#find the yearly sequestration by calculating the differences between elements in list 'flat_list_nucleus(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
flat_list_nucleus = [p - q for q, p in zip(flat_list_nucleus, flat_list_nucleus[1:])]
#since there is no sequestration between the replanting year (e.g., year 25 to 26), we have to replace negative numbers in 'flat_list_nuclues' with 0 values
flat_list_nucleus = [0 if i < 0 else i for i in flat_list_nucleus]
#insert 0 value to the list as the first element, because there is no sequestration in year 0
var = 0
flat_list_nucleus.insert(0,var)
#make 'flat_list_nucleus' elements negative numbers to denote sequestration
flat_list_nucleus = [ -x for x in flat_list_nucleus]
print(flat_list_nucleus)
#Plasma
#find the yearly sequestration by calculating the differences between elements in list 'flat_list_plasma(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
flat_list_plasma = [t - u for u, t in zip(flat_list_plasma, flat_list_plasma[1:])]
#since there is no sequestration between the replanting year (e.g., year 25 to 26), we have to replace negative numbers in 'flat_list_plasma' with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
flat_list_plasma = [0 if i < 0 else i for i in flat_list_plasma]
#insert 0 value to the list as the first element, because there is no sequestration in year 0
var = 0
flat_list_plasma.insert(0,var)
#make 'flat_list_plasma' elements negative numbers to denote sequestration
flat_list_plasma = [ -x for x in flat_list_plasma]
print(flat_list_plasma)
#%%
#Step(6): post-harvest processing of wood/palm oil
#post-harvest wood processing
df1nu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_S1nu')
df1pl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_S1pl')
dfEnu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_Enu')
dfEpl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_Epl')
t = range(0,tf,1)
PH_Emissions_HWP_S1nu = df1nu['PH_Emissions_HWP'].values
PH_Emissions_HWP_S1pl = df1pl['PH_Emissions_HWP'].values
PH_Emissions_HWP_Enu = df3pl['PH_Emissions_HWP'].values
PH_Emissions_HWP_Epl = df3pl['PH_Emissions_HWP'].values
#post-harvest palm oil processing
df1nu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_S1nu')
df1pl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_S1pl')
dfEnu = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_Enu')
dfEpl = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_Epl')
t = range(0,tf,1)
PH_Emissions_PO_S1nu = df1nu['PH_Emissions_PO'].values
PH_Emissions_PO_S1pl = df1pl['PH_Emissions_PO'].values
PH_Emissions_PO_Enu = df3pl['PH_Emissions_PO'].values
PH_Emissions_PO_Epl = df3pl['PH_Emissions_PO'].values
#%%
#Step (7_1): landfill gas decomposition (CH4)
#CH4 decomposition
hl = 20 #half-live
k = (np.log(2))/hl
#S1nu
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_S1nu')
tf = 201
t = np.arange(tf)
def decomp_CH4_S1nu(t,remainAGB_CH4_S1nu):
return (1-(1-np.exp(-k*t)))*remainAGB_CH4_S1nu
#set zero matrix
output_decomp_CH4_S1nu = np.zeros((len(t),len(df['Landfill_decomp_CH4'].values)))
for i,remain_part_CH4_S1nu in enumerate(df['Landfill_decomp_CH4'].values):
#print(i,remain_part)
output_decomp_CH4_S1nu[i:,i] = decomp_CH4_S1nu(t[:len(t)-i],remain_part_CH4_S1nu)
print(output_decomp_CH4_S1nu[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CH4_S1nu = np.zeros((len(t)-1,len(df['Landfill_decomp_CH4'].values-1)))
i = 0
while i < tf:
subs_matrix_CH4_S1nu[:,i] = np.diff(output_decomp_CH4_S1nu[:,i])
i = i + 1
print(subs_matrix_CH4_S1nu[:,:4])
print(len(subs_matrix_CH4_S1nu))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CH4_S1nu = subs_matrix_CH4_S1nu.clip(max=0)
print(subs_matrix_CH4_S1nu[:,:4])
#make the results as absolute values
subs_matrix_CH4_S1nu = abs(subs_matrix_CH4_S1nu)
print(subs_matrix_CH4_S1nu[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CH4_S1nu = np.zeros((len(t)-200,len(df['Landfill_decomp_CH4'].values)))
print(zero_matrix_CH4_S1nu)
subs_matrix_CH4_S1nu = np.vstack((zero_matrix_CH4_S1nu, subs_matrix_CH4_S1nu))
print(subs_matrix_CH4_S1nu[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CH4_S1nu = (tf,1)
decomp_tot_CH4_S1nu = np.zeros(matrix_tot_CH4_S1nu)
i = 0
while i < tf:
decomp_tot_CH4_S1nu[:,0] = decomp_tot_CH4_S1nu[:,0] + subs_matrix_CH4_S1nu[:,i]
i = i + 1
print(decomp_tot_CH4_S1nu[:,0])
#S1pl
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_S1pl')
tf = 201
t = np.arange(tf)
def decomp_CH4_S1pl(t,remainAGB_CH4_S1pl):
return (1-(1-np.exp(-k*t)))*remainAGB_CH4_S1pl
#set zero matrix
output_decomp_CH4_S1pl = np.zeros((len(t),len(df['Landfill_decomp_CH4'].values)))
for i,remain_part_CH4_S1pl in enumerate(df['Landfill_decomp_CH4'].values):
#print(i,remain_part)
output_decomp_CH4_S1pl[i:,i] = decomp_CH4_S1pl(t[:len(t)-i],remain_part_CH4_S1pl)
print(output_decomp_CH4_S1pl[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CH4_S1pl = np.zeros((len(t)-1,len(df['Landfill_decomp_CH4'].values-1)))
i = 0
while i < tf:
subs_matrix_CH4_S1pl[:,i] = np.diff(output_decomp_CH4_S1pl[:,i])
i = i + 1
print(subs_matrix_CH4_S1pl[:,:4])
print(len(subs_matrix_CH4_S1pl))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CH4_S1pl = subs_matrix_CH4_S1pl.clip(max=0)
print(subs_matrix_CH4_S1pl[:,:4])
#make the results as absolute values
subs_matrix_CH4_S1pl= abs(subs_matrix_CH4_S1pl)
print(subs_matrix_CH4_S1pl[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CH4_S1pl = np.zeros((len(t)-200,len(df['Landfill_decomp_CH4'].values)))
print(zero_matrix_CH4_S1pl)
subs_matrix_CH4_S1pl = np.vstack((zero_matrix_CH4_S1pl, subs_matrix_CH4_S1pl))
print(subs_matrix_CH4_S1pl[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CH4_S1pl = (tf,1)
decomp_tot_CH4_S1pl = np.zeros(matrix_tot_CH4_S1pl)
i = 0
while i < tf:
decomp_tot_CH4_S1pl[:,0] = decomp_tot_CH4_S1pl[:,0] + subs_matrix_CH4_S1pl[:,i]
i = i + 1
print(decomp_tot_CH4_S1pl[:,0])
#Enu
df = pd.read_excel('C:\\Work\\Programming\\Practice\\PF_PO_EC.xlsx', 'PF_PO_Enu')
tf = 201
t =
|
np.arange(tf)
|
numpy.arange
|
print('====================================================================================================')
print('== 문제 66. 위의 one-hot encoding 된 t 값과 확률 y, y2 를 각각 평균제곱오차 함수(비용함수)에 입력해서 오차가 '
'어떤게 더 낮은지 출력하시오!')
print('====================================================================================================\n')
import numpy as np
def mean_squared_error(y, t):
return 0.5*np.mean(np.square(y - t), dtype=np.float32)
t = np.array([0., 0., 1., 0., 0., 0., 0., 0., 0., 0.])
y = np.array([0.1, 0.05, 0.1, 0.0, 0.05, 0.1, 0.0, 0.6, 0.0, 0.0])
print(mean_squared_error(y, t))
def cross_entropy_error(y, t):
delta = 1e-7
return -np.sum(t*np.log(y+delta))
t = np.array([0., 0., 1., 0., 0., 0., 0., 0., 0., 0.])
y = np.array([0.1, 0.05, 0.1, 0.0, 0.05, 0.1, 0.0, 0.6, 0.0, 0.0])
print(cross_entropy_error(y, t))
print('====================================================================================================')
print('== 문제 67. (점심시간 문제) 아래의 넘파이 배열을 교차 엔트로피 오차 함수를 이용해서 오차율이 어떻게 되는지'
'for loop 문을 사용해서 한번에 알아내게 하시오.')
print('====================================================================================================\n')
t = np.array([0., 0., 1., 0., 0., 0., 0., 0., 0., 0.])
y = np.array([[0.1,0.05,0.1,0.0,0.05,0.1,0.0,0.1,0.0,0.0],
[0.1,0.05,0.2,0.0,0.05,0.1,0.0,0.6,0.0,0.0],
[0.0,0.05,0.3,0.0,0.05,0.1,0.0,0.6,0.0,0.0],
[0.0,0.05,0.4,0.0,0.05,0.0,0.0,0.5,0.0,0.0],
[0.0,0.05,0.5,0.0,0.05,0.0,0.0,0.4,0.0,0.0],
[0.0,0.05,0.6,0.0,0.05,0.0,0.0,0.3,0.0,0.0],
[0.0,0.05,0.7,0.0,0.05,0.0,0.0,0.2,0.0,0.0],
[0.0,0.1,0.8,0.0,0.1,0.0,0.0,0.2,0.0,0.0],
[0.0,0.05,0.9,0.0,0.05,0.0,0.0,0.0,0.0,0.0]])
for y_ in y:
print(cross_entropy_error(y_, t))
print('====================================================================================================')
print('== 문제 68. 60000 미만의 숫자중에서 무작위로 10개를 출력하시오.')
print('====================================================================================================\n')
a = np.random.choice(range(0, 60000), 10)
print(a)
import numpy as np
import pickle
from DeepLearningClass.dataset.mnist import load_mnist
from DeepLearningClass.common.functions import sigmoid, softmax
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, one_hot_label=True)
train_size = 60000
batch_size = 10
batch_mask = np.random.choice(train_size, batch_size)
x_batch = x_train[batch_mask]
y_batch = t_train[batch_mask]
print(len(x_batch))
print(x_batch.shape)
print('====================================================================================================')
print('== 문제 70. 데이터 1개를 가지고 오차를 구하는 교차 엔트로피 값을 구하시오.')
print('====================================================================================================\n')
def cross_entropy_error(y, t):
delta = 1e-7
return -np.sum(t * np.log(y+delta)) / len(y)
print(cross_entropy_error(y, t))
print('====================================================================================================')
print('== 문제 71. 데이터 10개를 가지고 오차를 구하는 교차 엔트로피 값을 구하시오.')
print('====================================================================================================\n')
import sys, os
sys.path.append(os.pardir)
import numpy as np
from DeepLearningClass.dataset.mnist import load_mnist
import pickle
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, one_hot_label=True)
train_size = 60000
batch_size = 10
def get_data():
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, flatten=True, one_hot_label=False)
return x_test, t_test
def softmax(a):
c = np.max(a)
exp_a = np.exp(a-c)
sum_exp_a = np.sum(exp_a)
y = exp_a / sum_exp_a
return y
def cross_entropy_error(y, t):
delta = 1e-7
return -np.sum(t * np.log(y+delta)) / len(y)
def init_network():
with open("DeepLearningClass/chapter4/sample_weight.pkl", 'rb') as f:
network = pickle.load(f)
return network
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def predict(network, x):
w1, w2, w3 = network['W1'], network['W2'], network['W3']
b1, b2, b3 = network['b1'], network['b2'], network['b3']
a1 = np.dot(x, w1) + b1
z1 = sigmoid(a1)
a2 = np.dot(z1, w2) + b2
z2 = sigmoid(a2)
a3 = np.dot(z2, w3) + b3
y = softmax(a3)
return y
cost_list = []
for _ in range(100000):
batch_mask = np.random.choice(train_size, batch_size)
x_batch = x_train[batch_mask]
y_batch = t_train[batch_mask]
y_ = predict(init_network(), x_batch)
c = cross_entropy_error(y_, y_batch)
cost_list.append(c)
print(np.max(cost_list))
print('====================================================================================================')
print('== 문제 72. 근사로 구한 미분 함수를 파이썬으로 구현하시오!')
print('====================================================================================================\n')
def numerical_diff(f, x):
delta = 1e-4
return (f(x+delta)-f(x-delta))/(2*delta)
def func(x):
return 0.001 * x**2 + 0.1 * x
print(numerical_diff(func, 10))
print('====================================================================================================')
print('== NCS 문제 1. 아래의 함수를 x = 7 에서 수치미분하면 기울기가 어떻게 되는가?')
print('====================================================================================================\n')
def numerical_diff(f, x):
delta = 1e-4
return (f(x+delta)-f(x-delta))/(2*delta)
def func(x):
return 3 * x**2 + 4 * x
print(numerical_diff(func, 4))
print('====================================================================================================')
print('== NCS 문제 2. 아래의 행렬 x 의 100 개의 각각의 행에서 가장 큰 원소를 빼서 다시 x2 라는 변수에 저장하시오!')
print('====================================================================================================\n')
import numpy as np
def softmax(x):
if x.ndim == 2:
x = x.T
x = x - np.max(x, axis=0)
y = np.exp(x) / np.sum(np.exp(x), axis=0)
return y.T
x = x - np.max(x) # 오버플로 대책
return np.exp(x) / np.sum(np.exp(x))
x = np.random.rand(100, 784)
x2 = np.argmax(x, axis=1)
print(x2)
# ■ 4.5 편미분
#
# 변수가 2개 이상인 함수를 미분할 때 미분 대상 변수 외 나머지 변수를 상수처럼 고정시켜 미분하는 것을 편미분이라고 한다.
# f(x0,x1) = x0^2 + x1^2 의 그래프를 보면 아래와 같다.
print('====================================================================================================')
print('== 문제 76. f(x0,x1) = x0^2 + x1^2 이 함수를 편미분하는데 x0가 3이고 x1이 4일 때 구하시오.')
print('====================================================================================================\n')
print(numerical_diff(samplefunction3,3))
print('====================================================================================================')
print('== 문제 77. x0가 3이고 x1이 4일 때 아래의 함수를 아래와 같이 편미분하시오.')
print('====================================================================================================\n')
print(numerical_diff(samplefunction3,4))
print('====================================================================================================')
print('== 문제 78. 아래의 함수를 x0로 편미분하시오.')
print('====================================================================================================\n')
def numerical_diff(f,x):
h=0.0001
return (f(x+h)-f(x-h))/(2*h)
def samplefunction3(x):
return 2*x**2
print(numerical_diff(samplefunction3,3))
print('====================================================================================================')
print('== 문제 79. 아래의 함수를 x1에 대해 편미분하시오. (x0=6, x1=7) : lambda식 이용하기')
print('====================================================================================================\n')
def numerical_diff(f,x):
h=0.0001
return (f(x+h)-f(x-h))/(2*h)
func = lambda x0:2*x0**2
print(numerical_diff(func,6))
print('====================================================================================================')
print('== 문제 80. (점심시간 문제) for loop 문을 이용해서 아래의 함수를 x0로 편미분하고 x1로 편미분이 각각 수행되게 하시오.')
print('====================================================================================================\n')
def f1(x):
return 6*x**2
def f2(x):
return 2*x**2
def numerical_diff(f, x):
h = 0.0001
return (f(x+h)-f(x-h))/(2*h)
funcs = [f1, f2]
xs = [6.0, 7.0]
for i in range(2):
print(numerical_diff(funcs[i], xs[i]))
print('====================================================================================================')
print('== 문제 81. 위의 편미분을 코딩하시오.')
print('====================================================================================================\n')
import numpy as np
def numerical_gradient(f, x):
h = 0.0001
grad = np.zeros_like(x) # x 와 형상이 같은 배열 grad 가 만들어짐
for idx in range(x.size):
tmp_val = x[idx]
x[idx] = tmp_val + h
fxh1 = f(x)
x[idx] = tmp_val - h
fxh2 = f(x)
grad[idx] = (fxh1 - fxh2) / (2*h)
x[idx] = tmp_val
return grad
def samplefunc4(x):
return x[0]**2 + x[1]**2
print(numerical_gradient(samplefunc4, np.array([3.0, 4.0])))
print('====================================================================================================')
print('== 문제 82. np.zeros_like 가 무엇인지 확인해보시오')
print('====================================================================================================\n')
def numerical_gradient(f, x):
h = 0.0001
grad = np.zeros_like(x) # x 와 형상이 같은 배열 grad 가 만들어짐
for idx in range(x.size):
tmp_val = x[idx]
x[idx] = tmp_val + h
fxh1 = f(x)
x[idx] = tmp_val - h
fxh2 = f(x)
grad[idx] = (fxh1 - fxh2) / (2 * h)
x[idx] = tmp_val
return grad
def samplefunc4(x):
return x[0] ** 2 + x[1] ** 2
print('====================================================================================================')
print('== 문제 83. x0=3.0, x1=0.0 일때의 기울기 벡터를 구하시오.')
print('====================================================================================================\n')
print(numerical_gradient(samplefunc4, np.array([3.0, 0.0])))
print('====================================================================================================')
print('== 문제 85. 경사 감소 함수를 파이썬으로 구현하시오.')
print('====================================================================================================\n')
def gradient_descent(f, init_x, lr=0.01, step_num=100):
x = init_x
for i in range(step_num):
grade = numerical_gradient(f, x)
x -= lr * grade
return x
init_x = np.array([-3.0, 4.0])
def function_2(x):
return x[0]**2 + x[1]**2
print(gradient_descent(function_2, init_x, lr=10))
print(gradient_descent(function_2, init_x, lr=1e-10))
print('====================================================================================================')
print('== 문제 86. 위의 식을 그대로 사용해서 테스트를 수행하는데 학습률이 너무 크면 발산을하고 학습률이 너무 작으면 수렴을 못'
'한다는 것을 테스트하시오.')
print('====================================================================================================\n')
def numerical_gradient(f, x):
h = 0.0001
grad = np.zeros_like(x) # x 와 형상이 같은 배열 grad 가 만들어짐
for idx in range(x.size):
tmp_val = x[idx]
x[idx] = tmp_val + h
fxh1 = f(x)
x[idx] = tmp_val - h
fxh2 = f(x)
grad[idx] = (fxh1 - fxh2) / (2 * h)
x[idx] = tmp_val
return grad
def gradient_descent(f, init_x, lr=0.01, step_num=100):
x = init_x
for i in range(step_num):
grade = numerical_gradient(f, x)
x -= lr * grade
return x
init_x = np.array([-3.0, 4.0])
def function_2(x):
return x[0]**2 + x[1]**2
print(gradient_descent(function_2, init_x, lr=10))
print('====================================================================================================')
print('== 문제 87. learning rate 를 1e-10 으로 했을 때 기울기가 0으로 수렴하려면 step_num 을 몇으로 줘야하는지 확인히시오.')
print('====================================================================================================\n')
print('====================================================================================================')
print('== 문제 88. 위의 2 x 3 의 가중치를 랜덤으로 생성하고 간단한 신경망을 구현해서 기울기를 구하는 파이썬 코드를 작성하시오.')
print('====================================================================================================\n')
import numpy as np
from DeepLearningClass.common.gradient import numerical_gradient
class simpleNet:
def __init__(self):
self.W = np.random.randn(2, 3)
def predict(self, x):
return np.dot(x, self.W)
def loss(self, x, t):
z = self.predict(x)
y = self.softmax(z)
loss = self.cross_entropy_error(y, t)
return loss
def softmax(self, x):
if x.ndim == 2:
x = x.T
x = x - np.max(x, axis=0)
y = np.exp(x) / np.sum(np.exp(x), axis=0)
return y.T
x = x - np.max(x) # 오버플로 대책
return np.exp(x) / np.sum(np.exp(x))
def cross_entropy_error(self, y, t):
delta = 1e-7
return -np.sum(t * np.log(y + delta)) / len(y)
print('====================================================================================================')
print('== 문제 89. 문제 88 번에서 만든 신경망에 입력값[0.6, 0.9]을 입력하고 target 은 [0,0,1]로 해서 즉 정답'
'레이블이 2번이다라고 가정하고서 오차가 얼마나 발생하는지 확인하시오.')
print('====================================================================================================\n')
nn = simpleNet()
nn.loss(np.array([0.6, 0.9]), np.array([0,0,1]))
print('====================================================================================================')
print('== 문제 90. 어제 만든 수치미분함수에 위에서 만든 신경망의 비용함수와 가중치(2x3)의 가중치를 입력해서'
'기울기(2x3)를 구하시오.')
print('====================================================================================================\n')
net = simpleNet()
x = np.array([0.6, 0.9])
t = np.array([0,0,1])
def f(W):
return net.loss(x, t)
dW = numerical_gradient(f, net.W)
print(dW)
print('====================================================================================================')
print('== 문제 91. 아래에서 만든 함수 f를 그냥 lambda 식으로 구현해서 f 라는 변수에 넣고 아래와 같이 수행하면 '
'기울기가 출력되게 하시오.')
print('====================================================================================================\n')
dW = numerical_gradient(lambda _: net.loss(x, t), net.W)
print(dW)
print('====================================================================================================')
print('== 문제 92. 아래의 w1 의 차원을 확인하시오.')
print('====================================================================================================\n')
w1 = np.random.randn(784, 50)
print(w1.shape, w1.ndim)
print('====================================================================================================')
print('== 문제 93. 아래의 배열을 눈으로 확인하시오.')
print('====================================================================================================\n')
b1 = np.zeros(50)
print(b1)
print('====================================================================================================')
print('== 문제 94. 아래의 x(입력값), t(target 값), y(예상값)을 아래와 같이 설정하고 위에서 만든 2층 신경망을 '
'객체화해서 W1, W2, b1, b2 의 차원이 어떻게 되는지 프린트하시오.')
print('====================================================================================================\n')
from DeepLearningClass.chapter4.two_layer_net import TwoLayerNet
net = TwoLayerNet(input_size=784, hidden_size=100, output_size=10)
x = np.random.rand(100, 784)
y = net.predict(x)
t = np.random.rand(100, 10)
print(net.params['W1'], net.params['W2'], net.params['b1'], net.params['b2'])
print('====================================================================================================')
print('== 문제 95. 아래의 x(입력값), t(target 값), y(예상값)을 아래와 같이 설정하고 위에서 만든 2층 신경망을 '
'객체화해서 W1, W2, b1, b2 의 기울기의 차원이 어떻게 되는지 프린트하시오.')
print('====================================================================================================\n')
from DeepLearningClass.chapter4.two_layer_net import TwoLayerNet
net = TwoLayerNet(input_size=784, hidden_size=100, output_size=10)
x =
|
np.random.rand(100, 784)
|
numpy.random.rand
|
# coding: utf-8
import pylab
import scipy.integrate
import fenics as fe
import numpy as np
import matplotlib.pyplot as plt
def setup_vectorspace(mesh):
"""setup"""
V = fe.VectorFunctionSpace(mesh, "CG", 1, dim=3)
v = fe.TestFunction(V)
u = fe.TrialFunction(V)
m = fe.Function(V)
Heff = fe.Function(V)
return m, Heff, u, v, V
# Material parameters
Ms = 8.6e5 # saturation magnetisation (A/m)
alpha = 0.1 # Gilbert damping
gamma = 2.211e5 # gyromagnetic ratio
A = 1e-11 # exchange constant (J/m)
#A = 0
D = 0*1.58e-3 # DMI constant (FeGe: D = 1.58e-3 J/m²)
K = 0*5e3 # anisotropy constant (Co: K = 4.5e5 J/m³)
# External magnetic field.
B = 0.1 # (T)
mu0 = 4 * np.pi * 1e-7 # vacuum permeability
# Zeeman field
H = Ms / 2 * fe.Constant((0,0,1))
# easy axis
ea = fe.Constant((0,1,1))
# mesh parameters
d = 100e-9
thickness = 100e-9
nx = ny = 20
nz = 10
# create mesh
p1 = fe.Point(0, 0, 0)
p2 = fe.Point(d, d, thickness)
mesh = fe.BoxMesh(p1, p2, nx, ny, nz)
m, Heff, u, v, V = setup_vectorspace(mesh)
def effective_field(m, volume=None):
w_Zeeman = - mu0 * Ms * fe.dot(m, H)
w_exchange = A * fe.inner(fe.grad(m), fe.grad(m))
w_DMI = D * fe.inner(m, fe.curl(m))
w_ani = - K * fe.inner(m, ea)**2
w = w_Zeeman + w_exchange + w_DMI + w_ani
return -1/(mu0*Ms) * fe.derivative(w*fe.dx, m)
m_init = fe.Constant((1, 0, 0))
m = fe.interpolate(m_init, V)
# Effective field
Heff_form = effective_field(m)
# Preassemble projection Matrix
Amat = fe.assemble(fe.dot(u, v)*fe.dx)
LU = fe.LUSolver()
LU.set_operator(Amat)
def compute_dmdt(m):
"""Convenience function that does all in one go"""
# Assemble RHS
b = fe.assemble(Heff_form)
# Project onto Heff
LU.solve(Heff.vector(), b)
LLG = -gamma/(1+alpha*alpha)*fe.cross(m, Heff) - alpha*gamma/(1+alpha*alpha)*fe.cross(m, fe.cross(m, Heff))
result = fe.assemble(fe.dot(LLG, v)*fe.dP)
return result.array()
# function for integration of system of ODEs
def rhs_micromagnetic(m_vector_array, t, counter=[0]):
assert isinstance(m_vector_array, np.ndarray)
m.vector()[:] = m_vector_array[:]
dmdt = compute_dmdt(m)
return dmdt
ts = np.linspace(0, 1e-11, 100)
# empty call of time integrator, just to get FEniCS to cache all forms etc
rhs_micromagnetic(m.vector().array(), 0)
ms = scipy.integrate.odeint(rhs_micromagnetic, y0=m.vector().array(), t=ts, rtol=1e-10, atol=1e-10)
def macrospin_analytic_solution(alpha, gamma, H, t_array):
"""
Computes the analytic solution of magnetisation x component
as a function of time for the macrospin in applied external
magnetic field H.
Source: PhD Thesis <NAME>,
http://eprints.soton.ac.uk/161207/1.hasCoversheetVersion/thesis.pdf,
Appendix B, page 127
"""
t0 = 1 / (gamma * alpha * H) * np.log(
|
np.sin(np.pi / 2)
|
numpy.sin
|
"""
This module implements the scae core.
Current version is a fast implementation
"""
#sphinx-build -b html rst html
#sphinx-apidoc -o . .
import numpy as np
import tensorflow as tf
import time
from sklearn.decomposition import PCA
class _BaseCyclum:
"""
The base class for all the realizations.
All this class knows is math.
"""
__slots__ = ["Y_value", "N", "P", "Q", "Y", "X", "Y2", "isbuilt"]
def __init__(self, Y, Q, ispreload=True):
"""
:type Y: numpy matrix
:param Y:
:type q: int
:param q: dimension of embedding (pseudotime)
"""
self.Y_value = Y
self.N, self.P = Y.shape
self.Q = Q
if ispreload:
self.Y = tf.constant(Y, dtype=tf.float32)
else:
self.Y = tf.placeholder([None, self.P])
self.isbuilt = False
def linear_encoder(qs):
"""
declare a linear encoder
:param qs: the index of rows in embedding to generate
:return: an dict in a list; the sum of these list is the required encoder configuration for build()
"""
return [dict(type='linear', qs=qs)]
def nonlinear_encoder(qs, hidden_qs):
"""
declare a nonlinear encoder
:param qs: the index of rows in embedding to generate
:return: an dict in a list; the sum of these list is the required decoder configuration for build()
"""
return [dict(type='nonlinear', qs=qs, hidden_qs=hidden_qs)]
def linear_decoder(qs):
"""
declare a linear decoder
:param qs: the index of rows in embedding to be used
:return: an dict in a list; the sum of these list is the required input of build()
"""
return [dict(type='linear', qs=qs)]
def circular_decoder(qs):
"""
declare a circular decoder
:param qs: the index of rows in embedding to be used
:return: an dict in a list; the sum of these list is the required input of build()
"""
return [dict(type='circular', qs=qs)]
def _make_linear_layer(Z, Q):
"""
make a linear layer
:param Z: the input tensor
:param Q: dimension of the output
:return: the output tensor of this layer
"""
W = tf.Variable(tf.random_normal([int(Z.shape[-1]), Q]) / 4, name='W')
b = tf.Variable(tf.zeros([1, Q]), name='b')
print(W)
return tf.add(Z @ W, b, name='Z')
def _make_nonlinear_layer(Z, Q):
"""
make a nonlinear layer
:param Z: the input tensor
:param Q: dimension of the output
:return: the output tensor of this layer
"""
W = tf.Variable(tf.random_normal([int(Z.shape[-1]), Q]), name='W')
b = tf.Variable(tf.zeros([1, Q]), name='b')
print(W)
return tf.tanh(Z @ W + b, name='Z')
def _make_circular_layer(Z, Q):
"""
make a circular layer
:param Z: the input tensor
:param Q: dimension of the output
:return: the output tensor of this layer
"""
assert Z.shape[1] == 1
W = tf.Variable(tf.random_normal([3, Q]), name='W')
temp = tf.concat([tf.cos(Z + i * 2 * np.pi / 3) for i in range(3)], 1)
return tf.matmul(temp, W, name='Z')
def _make_nonlinear_encoder(Z, Q, hidden_qs):
"""
sum up the nonlinear layers to make a nonlinear encoder
:param Z: the input tensor
:param Q: the final output dimension
:param hidden_qs: the hidden layer dimensions
:return: the output tensor of this encoder
"""
temp = Z
for i, q in enumerate(hidden_qs):
with tf.name_scope('layer' + str(i)):
temp = _BaseCyclum._make_nonlinear_layer(temp, q)
with tf.name_scope('output'):
return _BaseCyclum._make_linear_layer(temp, Q)
def _make_linear_encoder(Z, Q):
"""
Use one linear layer as a linear encoder.
No need to have hidden layers due to the property of linear transformation.
:param Z: the input tensor
:param Q: the output dimension
:return: the output tensor of this encoder
"""
with tf.name_scope('output'):
return _BaseCyclum._make_linear_layer(Z, Q)
def _make_linear_decoder(Z, P):
"""
Use one linear layer as a linear decoder.
No need to have hidden layers due to the property of linear transformation.
:param Z: the input tensor
:param P: the output dimension
:return: the output tensor of this encoder
"""
with tf.name_scope('output'):
return _BaseCyclum._make_linear_layer(Z, P)
def _make_circular_decoder(Z, P):
"""
Use one circular layer as a linear decoder.
:param Z: the input tensor
:param P: the output dimension
:return: the output tensor of this encoder
"""
with tf.name_scope('output'):
return _BaseCyclum._make_circular_layer(Z, P)
def _make_encoder(Y, Q, encoder):
"""
make the full encoder
:param Y: the input tensor
:param Q: the output dimension
:param encoder: the encoder configuration
:return: the output tensor of the full encoder
"""
temp = [tf.zeros([tf.shape(Y)[0]]) for i in range(Q)]
for i, component in enumerate(encoder):
with tf.name_scope('encoder' + str(i)):
if component['type'] == 'linear':
res = _BaseCyclum._make_linear_encoder(Y, len(component['qs']))
for j, q in enumerate(component['qs']):
temp[q] += res[:, j]
elif component['type'] == 'nonlinear':
res = _BaseCyclum._make_nonlinear_encoder(Y, len(component['qs']), component['hidden_qs'])
for j, q in enumerate(component['qs']):
temp[q] += res[:, j]
else:
assert False # You should never get here
return tf.stack(temp, axis=1, name='neck')
def _make_decoder(X, P, decoder):
"""
make the full decoder
:param X: the input tensor
:param P: the output dimension
:param decoder: the decoder configuration
:return: the output tensor of the full decoder
"""
temp = []
for i, component in enumerate(decoder):
with tf.name_scope('decoder' + str(i)):
if component['type'] == 'linear':
temp.append(_BaseCyclum._make_linear_decoder(tf.gather(X, component['qs'], axis=1), P))
elif component['type'] == 'circular':
temp.append(_BaseCyclum._make_circular_decoder(tf.gather(X, component['qs'], axis=1), P))
else:
assert False
return tf.add_n(temp)
def build(self, encoder, decoder):
"""
build the model
:param encoder: encoder configuration
:param decoder: decoder configuration
:return: None
"""
self.X = _BaseCyclum._make_encoder(self.Y, self.Q, encoder)
self.Y2 = _BaseCyclum._make_decoder(self.X, self.P, decoder)
def train(self):
"""
Train the model. To be implemented in derived classes.
"""
raise NotImplementedError
def predict(self):
raise NotImplementedError
def generate(self):
raise NotImplementedError
class PreloadCyclum2(_BaseCyclum):
def __init__(self, Y):
super().__init__(Y, 2)
encoder = _BaseCyclum.nonlinear_encoder([0], [30, 20]) + _BaseCyclum.linear_encoder([1])
decoder = _BaseCyclum.circular_decoder([0]) + _BaseCyclum.linear_decoder([1])
self.build(encoder, decoder)
def _get_initial_value(self, n_candidate=5):
"""
Get initial value by running on the first few PCs.
:param n_candidate: number of PCs.
:return: proposed initial value.
"""
pc = PCA(n_components=10, copy=True, whiten=False, svd_solver="auto",
tol=0.0, iterated_power="auto", random_state=None).fit_transform(self.Y_value)
spc = pc / np.std(pc, axis=0)
unit_score_1d = []
uniform_score_1d = []
ind_1d = [(i, j) for i in range(n_candidate) for j in range(i)]
for i, j in ind_1d:
temp = np.sqrt(spc[:, i] ** 2 + spc[:, j] ** 2)
temp = temp / np.mean(temp)
unit_score_1d.append(np.mean(np.abs(temp - 1)))
temp = np.angle(spc[:, i] + spc[:, j] * 1j)
temp.sort()
diff = np.append(
|
np.diff(temp)
|
numpy.diff
|
import importlib.resources
import numpy as np
import warnings
from hexrd.imageutil import snip1d, snip1d_quad
from hexrd.crystallography import PlaneData
from hexrd.material import Material
from hexrd.valunits import valWUnit
from hexrd.spacegroup import Allowed_HKLs
from hexrd.utils.multiprocess_generic import GenericMultiprocessing
from hexrd import spacegroup as SG
from hexrd import symmetry, symbols, constants
import hexrd.resources
import lmfit
import matplotlib.pyplot as plt
from scipy.interpolate import CubicSpline, interp1d
from scipy import signal
from hexrd.valunits import valWUnit
import yaml
from os import path
import pickle
import time
import h5py
from pathlib import Path
from pylab import plot, ginput, show, \
axis, close, title, xlabel, ylabel
import copy
class Parameters:
"""
==================================================================================
==================================================================================
>> @AUTHOR: <NAME>, Lanwrence Livermore National Lab,
<EMAIL>
>> @DATE: 05/18/2020 SS 1.0 original
>> @DETAILS: this is the parameter class which handles all refinement parameters
for both the Rietveld and the LeBail refimentment problems
===============================================================================
===============================================================================
"""
def __init__(self,
name=None,
vary=False,
value=0.0,
lb=-np.Inf,
ub=np.Inf):
self.param_dict = {}
if(name is not None):
self.add(name=name,
vary=vary,
value=value,
lb=min,
ub=max)
def add(self,
name,
vary=False,
value=0.0,
lb=-np.Inf,
ub=np.Inf):
"""
>> @AUTHOR: <NAME>, Lawrence Livermore National Lab, <EMAIL>
>> @DATE: 05/18/2020 SS 1.0 original
>> @DETAILS: add a single named parameter
"""
self[name] = Parameter(name=name, vary=vary, value=value, lb=lb, ub=ub)
def add_many(self,
names,
varies,
values,
lbs,
ubs):
"""
>> @AUTHOR: <NAME>, Lawrence Livermore National Lab, <EMAIL>
>> @DATE: 05/18/2020 SS 1.0 original
>> @DETAILS: load a list of named parameters
"""
assert len(names) == len(varies), "lengths of tuples not consistent"
assert len(names) == len(values), "lengths of tuples not consistent"
assert len(names) == len(lbs), "lengths of tuples not consistent"
assert len(names) == len(ubs), "lengths of tuples not consistent"
for i, n in enumerate(names):
self.add(n, vary=varies[i], value=values[i], lb=lbs[i], ub=ubs[i])
def load(self, fname):
"""
>> @AUTHOR: <NAME>, Lawrence Livermore National Lab, <EMAIL>
>> @DATE: 05/18/2020 SS 1.0 original
>> @DETAILS: load parameters from yaml file
"""
with open(fname) as file:
dic = yaml.load(file, Loader=yaml.FullLoader)
for k in dic.keys():
v = dic[k]
self.add(k, value=np.float(v[0]), lb=np.float(v[1]),
ub=np.float(v[2]), vary=np.bool(v[3]))
def dump(self, fname):
"""
>> @AUTHOR: <NAME>, Lawrence Livermore National Lab, <EMAIL>
>> @DATE: 05/18/2020 SS 1.0 original
>> @DETAILS: dump the class to a yaml looking file. name is the key and the list
has [value, lb, ub, vary] in that order
"""
dic = {}
for k in self.param_dict.keys():
dic[k] = [self[k].value, self[k].lb, self[k].ub, self[k].vary]
with open(fname, 'w') as f:
data = yaml.dump(dic, f, sort_keys=False)
def dump_hdf5(self, file):
"""
>> @AUTHOR: <NAME>, Lawrence Livermore National Lab, <EMAIL>
>> @DATE: 01/15/2021 SS 1.0 original
>> @DETAILS: dump the class to a hdf5 file. the file argument could either be a
string or a h5.File instance. If it is a filename, then HDF5 file
is created, a parameter group is created and data is written out
with data names being the parameter name. Else data written to Parameter
group in existing file object
"""
if(isinstance(file, str)):
fexist = path.isfile(file)
if(fexist):
fid = h5py.File(file, 'r+')
else:
fid = h5py.File(file, 'x')
elif(isinstance(file, h5py.File)):
fid = file
else:
raise RuntimeError(
'Parameters: dump_hdf5 Pass in a \
filename string or h5py.File object')
if("/Parameters" in fid):
del(fid["Parameters"])
gid_top = fid.create_group("Parameters")
for p in self:
param = self[p]
gid = gid_top.create_group(p)
# write the value, lower and upper bounds and vary status
did = gid.create_dataset("value", (1, ), dtype=np.float64)
did.write_direct(np.array(param.value, dtype=np.float64))
did = gid.create_dataset("lb", (1, ), dtype=np.float64)
did.write_direct(np.array(param.lb, dtype=np.float64))
did = gid.create_dataset("ub", (1, ), dtype=np.float64)
did.write_direct(np.array(param.ub, dtype=np.float64))
did = gid.create_dataset("vary", (1, ), dtype=np.bool)
did.write_direct(np.array(param.vary, dtype=np.bool))
def __getitem__(self, key):
if(key in self.param_dict.keys()):
return self.param_dict[key]
else:
raise ValueError('variable with name not found')
def __setitem__(self, key, parm_cls):
if(key in self.param_dict.keys()):
warnings.warn(
'variable already in parameter list. overwriting ...')
if(isinstance(parm_cls, Parameter)):
self.param_dict[key] = parm_cls
else:
raise ValueError('input not a Parameter class')
def __iter__(self):
self.n = 0
return self
def __next__(self):
if(self.n < len(self.param_dict.keys())):
res = list(self.param_dict.keys())[self.n]
self.n += 1
return res
else:
raise StopIteration
def __str__(self):
retstr = 'Parameters{\n'
for k in self.param_dict.keys():
retstr += self[k].__str__()+'\n'
retstr += '}'
return retstr
class Parameter:
"""
===================================================================================
===================================================================================
>> @AUTHOR: <NAME>, Lawrence Livermore National Lab, <EMAIL>
>> @DATE: 05/18/2020 SS 1.0 original
>> @DETAILS: the parameters class (previous one) is a collection of this
parameter class indexed by the name of each variable
================================================================================
=================================================================================
"""
def __init__(self,
name=None,
vary=False,
value=0.0,
lb=-np.Inf,
ub=np.Inf):
self.name = name
self.vary = vary
self.value = value
self.lb = lb
self.ub = ub
def __str__(self):
retstr = '< Parameter \''+self.name+'\'; value : ' + \
str(self.value)+'; bounds : ['+str(self.lb)+',' + \
str(self.ub)+' ]; vary :'+str(self.vary)+' >'
return retstr
@property
def name(self):
return self._name
@name.setter
def name(self, name):
if(isinstance(name, str)):
self._name = name
@property
def value(self):
return self._value
@value.setter
def value(self, val):
self._value = val
@property
def min(self):
return self._min
@min.setter
def min(self, minval):
self._min = minval
@property
def max(self):
return self._max
@max.setter
def max(self, maxval):
self._max = maxval
@property
def vary(self):
return self._vary
@vary.setter
def vary(self, vary):
if(isinstance(vary, bool)):
self._vary = vary
class Spectrum:
"""
==================================================================================
==================================================================================
>> @AUTHOR: <NAME>, Lawrence Livermore National Lab, <EMAIL>
>> @DATE: 05/18/2020 SS 1.0 original
>> @DETAILS: spectrum class holds the a pair of x,y data, in this case, would be
2theta-intensity values
==================================================================================
==================================================================================
"""
def __init__(self, x=None, y=None, name=''):
if x is None:
self._x = np.linspace(10., 100., 500)
else:
self._x = x
if y is None:
self._y = np.log(self._x ** 2) - (self._x * 0.2) ** 2
else:
self._y = y
self.name = name
self.offset = 0
self._scaling = 1
self.smoothing = 0
self.bkg_Spectrum = None
@staticmethod
def from_file(filename, skip_rows=0):
try:
if filename.endswith('.chi'):
skip_rows = 4
data = np.loadtxt(filename, skiprows=skip_rows)
x = data.T[0]
y = data.T[1]
name = path.basename(filename).split('.')[:-1][0]
return Spectrum(x, y, name)
except ValueError:
print('Wrong data format for spectrum file! - ' + filename)
return -1
def save(self, filename, header=''):
data = np.dstack((self._x, self._y))
np.savetxt(filename, data[0], header=header)
def set_background(self, Spectrum):
self.bkg_spectrum = Spectrum
def reset_background(self):
self.bkg_spectrum = None
def set_smoothing(self, amount):
self.smoothing = amount
def rebin(self, bin_size):
"""
Returns a new Spectrum which is a rebinned version of the current one.
"""
x, y = self.data
x_min = np.round(np.min(x) / bin_size) * bin_size
x_max = np.round(np.max(x) / bin_size) * bin_size
new_x = np.arange(x_min, x_max + 0.1 * bin_size, bin_size)
bins = np.hstack((x_min - bin_size * 0.5, new_x + bin_size * 0.5))
new_y = (np.histogram(x, bins, weights=y)
[0] / np.histogram(x, bins)[0])
return Spectrum(new_x, new_y)
def dump_hdf5(self, file, name):
"""
>> @AUTHOR: <NAME>, Lawrence Livermore National Lab, <EMAIL>
>> @DATE: 01/15/2021 SS 1.0 original
>> @DETAILS: dump the class to a hdf5 file. the file argument could either be a
string or a h5.File instance. If it is a filename, then HDF5 file
is created, a Spectrum group is created and data is written out.
Else data written to Spectrum group in existing file object
>> @PARAMS file file name string or h5py.File object
name name ID of the spectrum e.g. experimental or simulated or background
"""
if(isinstance(file, str)):
fexist = path.isfile(file)
if(fexist):
fid = h5py.File(file, 'r+')
else:
fid = h5py.File(file, 'x')
elif(isinstance(file, h5py.File)):
fid = file
else:
raise RuntimeError(
'Parameters: dump_hdf5 Pass in a filename \
string or h5py.File object')
name_spectrum = 'Spectrum/'+name
if(name_spectrum in fid):
del(fid[name_spectrum])
gid = fid.create_group(name_spectrum)
tth, I = self.data
# make sure these arrays are not zero sized
if(tth.shape[0] > 0):
did = gid.create_dataset("tth", tth.shape, dtype=np.float64)
did.write_direct(tth.astype(np.float64))
if(I.shape[0] > 0):
did = gid.create_dataset("intensity", I.shape, dtype=np.float64)
did.write_direct(I.astype(np.float64))
@property
def data(self):
if self.bkg_Spectrum is not None:
# create background function
x_bkg, y_bkg = self.bkg_Spectrum.data
if not np.array_equal(x_bkg, self._x):
# the background will be interpolated
f_bkg = interp1d(x_bkg, y_bkg, kind='linear')
# find overlapping x and y values:
ind = np.where((self._x <= np.max(x_bkg)) &
(self._x >= np.min(x_bkg)))
x = self._x[ind]
y = self._y[ind]
if len(x) == 0:
""" if there is no overlapping between background
and Spectrum, raise an error """
raise BkgNotInRangeError(self.name)
y = y * self._scaling + self.offset - f_bkg(x)
else:
""" if Spectrum and bkg have the same
x basis we just delete y-y_bkg"""
x, y = self._x, self._y * \
self._scaling + self.offset - y_bkg
else:
x, y = self.original_data
if self.smoothing > 0:
y = gaussian_filter1d(y, self.smoothing)
return x, y
@data.setter
def data(self, data):
(x, y) = data
self._x = x
self._y = y
self.scaling = 1
self.offset = 0
@property
def original_data(self):
return self._x, self._y * self._scaling +\
self.offset
@property
def x(self):
return self._x
@x.setter
def x(self, new_value):
self._x = new_value
@property
def y(self):
return self._y
@y.setter
def y(self, new_y):
self._y = new_y
@property
def scaling(self):
return self._scaling
@scaling.setter
def scaling(self, value):
if value < 0:
self._scaling = 0
else:
self._scaling = value
def limit(self, x_min, x_max):
x, y = self.data
return Spectrum(x[np.where((x_min < x) & (x < x_max))],
y[
|
np.where((x_min < x) & (x < x_max))
|
numpy.where
|
import numpy as np
import warnings
from numdifftools import Jacobian
from numpy.linalg import inv
from pandas import DataFrame
from scipy import optimize
from scipy.linalg import block_diag
from scipy.special import expit
from scipy.stats import norm
from scipy.stats import kendalltau
__all__ = ['OrderedLogit', 'OrderedProbit']
class LinearOrdinalRegression():
"""
A general class for linear ordinal regression fitting. The cumulative distribution
for the probability of being classified into category p depends linearly on the regressors
through a link function Phi:
P(Y < p | X_i) = Phi(alpha_p - X_i.beta)
Parameters:
link: a link function that is increasing and bounded by 0 and 1
deriv_link: the derivative of the link function
significance: the significance of confidence levels reported in the fit summary
"""
def __init__(self, link, deriv_link, significance=0.95):
self.significance = significance
self.link = link
self.deriv_link = deriv_link
def fit(self, X, y, maxfun=100000, maxiter=100000, epsilon=10E-9):
"""
Fit a linear ordinal regression model to the input data by maximizing the
log likelihood function.
Parameters:
X: a pandas DataFrame or numpy array of numerical regressors
y: a column of ordinal-valued data
maxfun: the maximum number of function calls used by scipy.optimize()
maxiter: the maximum number of iterations used by scipy.optimize()
epsilon: the minimum difference between successive intercepts, alpha_{p+1} - alpha_p
Returns:
self, with alpha_, beta_, coef_, se_, p_values_ and score_ properties determined
"""
X_data, X_scale, X_mean, X_std = self._prepare_X(X)
y_data = self._prepare_y(y)
beta_guess = np.zeros(self.n_attributes)
gamma_guess = np.ones(self.n_classes - 1)
bounds = [(None, None)] * (self.n_attributes + 1) + [(epsilon, None)] * (self.n_classes - 2)
optimization = optimize.minimize(
self._log_likelihood,
np.append(beta_guess, gamma_guess),
jac=self._gradient,
args=(X_scale, y_data),
bounds=bounds,
method='L-BFGS-B',
options={'maxfun': maxfun, 'maxiter': maxiter}
)
if not optimization.success:
message = 'Likelihood maximization failed - ' + str(optimization.message, 'utf-8')
warnings.warn(message, RuntimeWarning)
self.beta_ = optimization.x[:self.n_attributes] / X_std
gamma = optimization.x[self.n_attributes:]
gamma[0] = gamma[0] + X_mean.dot(self.beta_)
self.alpha_ = np.cumsum(gamma)
self.se_ = self._compute_standard_errors(np.append(self.beta_, gamma), X_data, y_data)
self.p_values_ = self._compute_p_values()
self.score_ = self._compute_score(X_data, y_data)
return self
@property
def coef_(self):
return np.append(self.beta_, self.alpha_)
@property
def summary(self):
"""
Summary statistics describing the fit.
Returns:
a pandas DataFrame with columns coef, se(coef), p, lower, upper
"""
significance_std_normal = norm.ppf((1. + self.significance) / 2.)
df = self.attribute_names.set_index('attribute names')
df['beta'] = self.beta_
df['se(beta)'] = self.se_[:self.n_attributes]
df['p'] = self.p_values_[:self.n_attributes]
conf_interval = significance_std_normal * self.se_[:self.n_attributes]
df['lower %.2f' % self.significance] = self.beta_ - conf_interval
df['upper %.2f' % self.significance] = self.beta_ + conf_interval
return df
def print_summary(self):
"""
Print summary statistics describing the fit.
"""
def significance_code(p):
if p < 0.001:
return '***'
elif p < 0.01:
return '**'
elif p < 0.05:
return '*'
elif p < 0.1:
return '.'
else:
return ' '
df = self.summary
# Significance codes last
df[''] = [significance_code(p) for p in df['p']]
# Print information about data first
print('n={}'.format(self.N), end="\n")
print(df.to_string(float_format=lambda f: '{:4.4f}'.format(f)))
# Significance code explanation
print('---')
print("Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 ",
end='\n\n')
print("Somers' D = {:.3f}".format(self.score_))
return
def predict_linear_product(self, X):
"""
Predict the linear product score X.beta for a set of input variables
Parameters:
X: a pandas DataFrame or numpy array of inputs to predict, one row per input
Returns:
a numpy array with the predicted linear product score for each input
"""
if X.ndim == 1:
X = X[None, :]
return X.dot(self.beta_)[:, None]
def predict_probabilities(self, X):
"""
Predict the probability of input variables belonging to each ordinal class
Parameters:
X: a pandas DataFrame or numpy array of inputs to predict, one row per input
Returns:
a numpy array with n_classes columns listing the probability of belonging to each class
"""
bounded_alpha = self._bounded_alpha(self.alpha_)
z = bounded_alpha - self.predict_linear_product(X)
cumulative_dist = self.link(z)
return np.diff(cumulative_dist)
def predict_class(self, X):
"""
Predict the most likely class for a set of input variables
Parameters:
X: a pandas DataFrame or numpy array of inputs to predict, one row per input
Returns:
a numpy array with the predicted most likely class for each input
"""
probs = self.predict_probabilities(X)
raw_predictions = np.argmax(probs, axis=1) + 1
return np.vectorize(self._y_dict.get)(raw_predictions)
def _prepare_X(self, X):
X_data = np.asarray(X)
X_data = X_data[:, None] if len(X_data.shape) == 1 else X_data
self.N, self.n_attributes = X_data.shape
self.attribute_names = self._get_column_names(X)
X_std = X_data.std(0)
X_mean = X_data.mean(0)
trivial_X = X_std == 0
if any(trivial_X):
raise ValueError(
'The regressors {} have 0 variance.'.format(self.attribute_names[trivial_X].values)
)
return X_data, (X_data - X_mean) / X_std, X_mean, X_std
def _prepare_y(self, y):
y_data = np.asarray(y).astype(np.int)
y_values = np.sort(np.unique(y_data))
self.n_classes = len(y_values)
y_range = np.arange(1, self.n_classes + 1)
self._y_dict = dict(zip(y_range, y_values))
y_data = np.vectorize(dict(zip(y_values, y_range)).get)(y_data)
self._indicator_plus = np.array([y_data == i + 1 for i in range(self.n_classes - 1)]) * 1.0
self._indicator_minus = np.array([y_data - 1 == i + 1 for i in range(self.n_classes - 1)]) * 1.0
return y_data
def _get_column_names(self, X):
if isinstance(X, DataFrame):
column_names = X.columns.tolist()
else:
column_names = ['column_' + str(i+1) for i in range(self.n_attributes)]
return DataFrame(column_names, columns=['attribute names'])
def _log_likelihood(self, coefficients, X_data, y_data):
beta = coefficients[:self.n_attributes]
gamma = coefficients[self.n_attributes:]
bounded_alpha = self._bounded_alpha(np.cumsum(gamma))
z_plus = bounded_alpha[y_data] - X_data.dot(beta)
z_minus = bounded_alpha[y_data-1] - X_data.dot(beta)
return - 1.0 * np.sum(np.log(self.link(z_plus) - self.link(z_minus)))
def _gradient(self, coefficients, X_data, y_data):
beta = coefficients[:self.n_attributes]
gamma = coefficients[self.n_attributes:]
bounded_alpha = self._bounded_alpha(np.cumsum(gamma))
deriv_link_plus = self.deriv_link(bounded_alpha[y_data] - X_data.dot(beta))
deriv_link_minus = self.deriv_link(bounded_alpha[y_data-1] - X_data.dot(beta))
denominator = self.link(bounded_alpha[y_data] - X_data.dot(beta)) - self.link(bounded_alpha[y_data-1] - X_data.dot(beta))
#the only way the denominator can vanish is if the numerator also vanishes
#so we can safely overwrite any division by zero that arises numerically
denominator[denominator == 0] = 1
quotient_plus = deriv_link_plus / denominator
quotient_minus = deriv_link_minus / denominator
alpha_gradient = (quotient_plus - quotient_minus).dot(X_data)
beta_gradient = self._indicator_minus.dot(quotient_minus) - self._indicator_plus.dot(quotient_plus)
return np.append(alpha_gradient, beta_gradient).dot(self._compute_basis_change())
def _compute_standard_errors(self, coefficients, X_data, y_data):
hessian_function = Jacobian(self._gradient, method='forward')
H = hessian_function(coefficients, X_data, y_data)
P = self._compute_basis_change()
return np.sqrt(np.diagonal(P.dot(
|
inv(H)
|
numpy.linalg.inv
|
import os
import sys
import matplotlib.pyplot as plt
import numpy as np
import pytest
from sklearn.cluster import KMeans
def test_representations_examples():
# Disable graphics for testing purposes
plt.show = lambda: None
here = os.path.dirname(os.path.realpath(__file__))
sys.path.append(here + "/../example")
import diagram_vectorizations_distances_kernels
return None
from gudhi.representations.vector_methods import Atol
from gudhi.representations.metrics import *
from gudhi.representations.kernel_methods import *
def _n_diags(n):
l = []
for _ in range(n):
a =
|
np.random.rand(50, 2)
|
numpy.random.rand
|
# To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %% [markdown]
# # Rayleigth-Ritz Method
#
# 
# %%
import numpy as np
from scipy import integrate
from scipy.misc import derivative
from scipy.linalg import eigh
import matplotlib.pyplot as plt
import json
pi = np.pi
# %%
# Beam parameters
L = 1.5 # Length [m]
rho = 7700 # Density [kg/m**3]
E = 2.1e11 # Young's modulus [Pa]
A1 = pi * 0.03 ** 2 / 4 # Area of section 1 [m**2]
A2 = pi * 0.06 ** 2 / 4 # Area of section 2 [m**2]
A3 = pi * 0.03 ** 2 / 4 # Area of section 3 [m**2]
I1 = (pi / 4) * (0.03 / 2) ** 4 # Area moment of inetia of section 1 [m**4]
I2 = (pi / 4) * (0.06 / 2) ** 4 # Area moment of inetia of section 2 [m**4]
I3 = (pi / 4) * (0.03 / 2) ** 4 # Area moment of inetia of section 3 [m**4]
m_eng = 20 # Gear mass [kg]
J_eng = 0.25 * m_eng * 0.1 ** 2 # Gear moment of inertiza about Z axis [kg*m**2]
k_mola = 2 * 10e3 # Equivalent stiffness of the two springs [N/m]
# Base function feinition d_j(x)
def eta(j):
if j == 1:
return 1.875 / L
if j > 1:
return (j - 0.5) * pi / L
def D(etaj):
etajL = etaj * L
return (
(np.cos(etajL) + np.cosh(etajL)) / (np.sin(etajL) - np.sinh(etajL))
).tolist()
def d(x, etaj):
etajx = etaj * x
D_ = D(etaj)
# It was necessary to reformulate to avoid numerical errors when working with very large and very small numbers
return (
np.sin(etajx)
+ D_ * np.cos(etajx)
- 0.5 * ((D_ - 1) * np.exp(-etajx) + (D_ + 1) * np.exp(etajx))
)
# %% [markdown]
# ## Computing [K] and [M]
# %%
n = np.array([4, 8, 12, 16]) # Array with the number of base functions
N = np.max(n)
K = np.zeros((N, N)) # Stiffness matrix
M = np.zeros((N, N)) # Mass matrix
# Beam segments for numerical integration
L1 = np.linspace(0, L / 3, 50000)
L2 = np.linspace(L / 3, 2 * L / 3, 50000)
L3 = np.linspace(2 * L / 3, L, 50000)
didj = lambda x, i, j: d(x, eta(i)) * d(x, eta(j))
diff2 = lambda x, i, j: derivative(d, x, n=2, dx=1e-6, args=(eta(i),)) * derivative(
d, x, n=2, dx=1e-6, args=(eta(j),)
)
for i in range(1, N + 1):
for j in range(i, N + 1):
M[i - 1, j - 1] = (
rho * A1 * integrate.simpson(didj(L1, i, j), L1)
+ rho * A2 * integrate.simpson(didj(L2, i, j), L2)
+ rho * A3 * integrate.simpson(didj(L3, i, j), L3)
+ m_eng * d(L / 3, eta(i)) * d(L / 3, eta(j))
+ J_eng
* derivative(d, L / 3, dx=1e-6, args=(eta(i),))
* derivative(d, L / 3, dx=1e-6, args=(eta(j),))
)
K[i - 1, j - 1] = (
E * I1 * integrate.simpson(diff2(L1, i, j), L1)
+ E * I2 * integrate.simpson(diff2(L2, i, j), L2)
+ E * I3 * integrate.simpson(diff2(L3, i, j), L3)
+ k_mola * d(2 * L / 3, eta(i)) * d(2 * L / 3, eta(j))
)
# Mirrorring the matrices, since they are symmetric
M = (M + M.T - np.diag(np.diagonal(M))).real
K = (K + K.T - np.diag(np.diagonal(K))).real
# %% [markdown]
# ## Solving the generalized eigenvalue problem for [K] e [M]
# %%
results = dict()
x = np.linspace(
0, L, int(L / 0.001 + 1)
) # Position where the rensponse will be calculated
# Computing the base functions at x
d_arr = np.zeros((N, x.size))
for j in range(1, N + 1):
d_arr[j - 1, :] = d(x, eta(j))
for p in n:
p = int(p)
# Eigenvalue problem for real, symmetric dense matrices [K] and [M].
# W is a 1D ndarray and Vc is a 2D ndarray with columns normalized to 1
W, A = eigh(a=K[:p, :p], b=M[:p, :p])
# Ordering eigenvalues and the eigenvectors matrix
idx = W.argsort()
W = W[idx].real
A = A[:, idx].real
# Normalizing eigenvectors matrix by the mass matrix, such that Vc.T @ M @ Vc = I
m_r = np.diagonal(A.T @ M[:p, :p] @ A)
m_r = np.reciprocal(np.sqrt(m_r))
for a in range(A.shape[1]):
A[:, a] *= m_r[a] # multiply every column by the scale factor
dj = d_arr[:p, :] # slices the array up to the desired number of base functions
# Make the mode shapes have the same orientation analising the covariance with the first coputed case
# to be able to compare between different number of base functions
phi = dj.T @ A
try:
for k in range(p): # k-th mode
cov = np.cov(results[n[0]]["V"][:, k], phi[:, k])[0][1]
cov = cov / np.abs(cov) # -1 or 1
phi[:, k] *= cov
except:
pass
# Store in a dict
results[p] = dict()
results[p]["V"] = phi
results[p]["fn"] =
|
np.real(W ** 0.5 / (2 * np.pi))
|
numpy.real
|
"""
integration testing module for tunable coupler element
and line specific chain of signal generation.
"""
# System imports
import copy
import pickle
import pytest
import numpy as np
# Main C3 objects
from c3.c3objs import Quantity as Qty
from c3.parametermap import ParameterMap as PMap
from c3.experiment import Experiment as Exp
from c3.system.model import Model as Mdl
from c3.generator.generator import Generator as Gnr
# Building blocks
import c3.generator.devices as devices
import c3.signal.gates as gates
import c3.system.chip as chip
import c3.signal.pulse as pulse
# Libs and helpers
import c3.libraries.hamiltonians as hamiltonians
import c3.libraries.envelopes as envelopes
lindblad = False
dressed = True
q1_lvls = 3
q2_lvls = 3
tc_lvls = 3
freq_q1 = 6.189e9
freq_q2 = 5.089e9
freq_tc = 8.1e9
phi_0_tc = 10
fluxpoint = phi_0_tc * 0.23
d = 0.36
anhar_q1 = -286e6
anhar_q2 = -310e6
anhar_TC = -235e6
coupling_strength_q1tc = 142e6
coupling_strength_q2tc = 116e6
coupling_strength_q1q2 = 0 * 1e6
t1_q1 = 23e-6
t1_q2 = 70e-6
t1_tc = 15e-6
t2star_q1 = 27e-6
t2star_q2 = 50e-6
t2star_tc = 7e-6
init_temp = 0.06
v2hz = 1e9
t_final = 10e-9 # Time for single qubit gates
sim_res = 100e9
awg_res = 2.4e9
cphase_time = 100e-9 # Two qubit gate
flux_freq = 829 * 1e6
offset = 0 * 1e6
fluxamp = 0.1 * phi_0_tc
t_down = cphase_time - 5e-9
xy_angle = 0.3590456701578104
framechange_q1 = 0.725 * np.pi
framechange_q2 = 1.221 * np.pi
# ### MAKE MODEL
q1 = chip.Qubit(
name="Q1",
desc="Qubit 1",
freq=Qty(value=freq_q1, min_val=5.0e9, max_val=8.0e9, unit="Hz 2pi"),
anhar=Qty(value=anhar_q1, min_val=-380e6, max_val=-120e6, unit="Hz 2pi"),
hilbert_dim=q1_lvls,
t1=Qty(value=t1_q1, min_val=5e-6, max_val=90e-6, unit="s"),
t2star=Qty(value=t2star_q1, min_val=10e-6, max_val=90e-6, unit="s"),
temp=Qty(value=init_temp, min_val=0.0, max_val=0.12, unit="K"),
)
q2 = chip.Qubit(
name="Q2",
desc="Qubit 2",
freq=Qty(value=freq_q2, min_val=5.0e9, max_val=8.0e9, unit="Hz 2pi"),
anhar=Qty(value=anhar_q2, min_val=-380e6, max_val=-120e6, unit="Hz 2pi"),
hilbert_dim=q2_lvls,
t1=Qty(value=t1_q2, min_val=5e-6, max_val=90e-6, unit="s"),
t2star=Qty(value=t2star_q2, min_val=10e-6, max_val=90e-6, unit="s"),
temp=Qty(value=init_temp, min_val=0.0, max_val=0.12, unit="K"),
)
tc_at = chip.Transmon(
name="TC",
desc="Tunable Coupler",
freq=Qty(value=freq_tc, min_val=0.0e9, max_val=10.0e9, unit="Hz 2pi"),
phi=Qty(
value=fluxpoint, min_val=-5.0 * phi_0_tc, max_val=5.0 * phi_0_tc, unit="Wb"
),
phi_0=Qty(
value=phi_0_tc, min_val=phi_0_tc * 0.9, max_val=phi_0_tc * 1.1, unit="Wb"
),
d=Qty(value=d, min_val=d * 0.9, max_val=d * 1.1, unit=""),
hilbert_dim=tc_lvls,
anhar=Qty(value=anhar_TC, min_val=-380e6, max_val=-120e6, unit="Hz 2pi"),
t1=Qty(value=t1_tc, min_val=1e-6, max_val=90e-6, unit="s"),
t2star=Qty(value=t2star_tc, min_val=1e-6, max_val=90e-6, unit="s"),
temp=Qty(value=init_temp, min_val=0.0, max_val=0.12, unit="K"),
)
q1tc = chip.Coupling(
name="Q1-TC",
desc="Coupling qubit 1 to tunable coupler",
connected=["Q1", "TC"],
strength=Qty(
value=coupling_strength_q1tc, min_val=0 * 1e4, max_val=200e6, unit="Hz 2pi"
),
hamiltonian_func=hamiltonians.int_XX,
)
q2tc = chip.Coupling(
name="Q2-TC",
desc="Coupling qubit 2 to t×unable coupler",
connected=["Q2", "TC"],
strength=Qty(
value=coupling_strength_q2tc, min_val=0 * 1e4, max_val=200e6, unit="Hz 2pi"
),
hamiltonian_func=hamiltonians.int_XX,
)
q1q2 = chip.Coupling(
name="Q1-Q2",
desc="Coupling qubit 1 to qubit 2",
connected=["Q1", "Q2"],
strength=Qty(
value=coupling_strength_q1q2, min_val=0 * 1e4, max_val=200e6, unit="Hz 2pi"
),
hamiltonian_func=hamiltonians.int_XX,
)
drive_q1 = chip.Drive(
name="Q1",
desc="Drive on Q1",
connected=["Q1"],
hamiltonian_func=hamiltonians.x_drive,
)
drive_q2 = chip.Drive(
name="Q2",
desc="Drive on Q2",
connected=["Q2"],
hamiltonian_func=hamiltonians.x_drive,
)
flux = chip.Drive(
name="TC",
desc="Flux drive/control on tunable couler",
connected=["TC"],
hamiltonian_func=hamiltonians.z_drive,
)
phys_components = [tc_at, q1, q2]
line_components = [flux, q1tc, q2tc, q1q2, drive_q1, drive_q2]
model = Mdl(phys_components, line_components, [])
model.set_lindbladian(lindblad)
model.set_dressed(dressed)
# ### MAKE GENERATOR
lo = devices.LO(name="lo", resolution=sim_res)
awg = devices.AWG(name="awg", resolution=awg_res)
dig_to_an = devices.DigitalToAnalog(name="dac", resolution=sim_res)
resp = devices.Response(
name="resp",
rise_time=Qty(value=0.3e-9, min_val=0.05e-9, max_val=0.6e-9, unit="s"),
resolution=sim_res,
)
mixer = devices.Mixer(name="mixer")
fluxbias = devices.FluxTuning(
name="fluxbias",
phi_0=Qty(
value=phi_0_tc, min_val=0.9 * phi_0_tc, max_val=1.1 * phi_0_tc, unit="Wb"
),
phi=Qty(
value=fluxpoint, min_val=-1.0 * phi_0_tc, max_val=1.0 * phi_0_tc, unit="Wb"
),
omega_0=Qty(
value=freq_tc, min_val=0.9 * freq_tc, max_val=1.1 * freq_tc, unit="Hz 2pi"
),
d=Qty(value=d, min_val=d * 0.9, max_val=d * 1.1, unit=""),
anhar=Qty(value=anhar_q1, min_val=-380e6, max_val=-120e6, unit="Hz 2pi"),
)
v_to_hz = devices.VoltsToHertz(
name="v2hz",
V_to_Hz=Qty(value=v2hz, min_val=0.9 * v2hz, max_val=1.1 * v2hz, unit="Hz 2pi/V"),
)
device_dict = {
dev.name: dev for dev in [lo, awg, mixer, dig_to_an, resp, v_to_hz, fluxbias]
}
generator = Gnr(
devices=device_dict,
chains={
"TC": ["lo", "awg", "dac", "resp", "mixer", "fluxbias"],
"Q1": ["lo", "awg", "dac", "resp", "mixer", "v2hz"],
"Q2": ["lo", "awg", "dac", "resp", "mixer", "v2hz"],
},
)
# ### MAKE GATESET
nodrive_env = pulse.Envelope(name="no_drive", params={}, shape=envelopes.no_drive)
carrier_parameters = {
"freq": Qty(value=freq_q1, min_val=0e9, max_val=10e9, unit="Hz 2pi"),
"framechange": Qty(value=0.0, min_val=-3 * np.pi, max_val=5 * np.pi, unit="rad"),
}
carr_q1 = pulse.Carrier(
name="carrier", desc="Frequency of the local oscillator", params=carrier_parameters
)
carr_q2 = copy.deepcopy(carr_q1)
carr_q2.params["freq"].set_value(freq_q2)
carr_tc = copy.deepcopy(carr_q1)
carr_tc.params["freq"].set_value(flux_freq)
flux_params = {
"amp": Qty(value=fluxamp, min_val=0.0, max_val=5, unit="V"),
"t_final": Qty(
value=cphase_time,
min_val=0.5 * cphase_time,
max_val=1.5 * cphase_time,
unit="s",
),
"t_up": Qty(
value=5 * 1e-9, min_val=0.0 * cphase_time, max_val=0.5 * cphase_time, unit="s"
),
"t_down": Qty(
value=t_down, min_val=0.5 * cphase_time, max_val=1.0 * cphase_time, unit="s"
),
"risefall": Qty(
value=5 * 1e-9, min_val=0.0 * cphase_time, max_val=1.0 * cphase_time, unit="s"
),
"freq_offset": Qty(
value=offset, min_val=-50 * 1e6, max_val=50 * 1e6, unit="Hz 2pi"
),
"xy_angle": Qty(
value=xy_angle, min_val=-0.5 * np.pi, max_val=2.5 * np.pi, unit="rad"
),
}
flux_env = pulse.Envelope(
name="flux",
desc="Flux bias for tunable coupler",
params=flux_params,
shape=envelopes.flattop,
)
CRZp = gates.Instruction(
name="Id:CRZp", t_start=0.0, t_end=cphase_time, channels=["Q1", "Q2", "TC"]
)
CRZp.add_component(flux_env, "TC")
CRZp.add_component(carr_tc, "TC")
CRZp.add_component(nodrive_env, "Q1")
CRZp.add_component(carr_q1, "Q1")
CRZp.comps["Q1"]["carrier"].params["framechange"].set_value(framechange_q1)
CRZp.add_component(nodrive_env, "Q2")
CRZp.add_component(carr_q2, "Q2")
CRZp.comps["Q2"]["carrier"].params["framechange"].set_value(framechange_q2)
# ### MAKE EXPERIMENT
parameter_map = PMap(instructions=[CRZp], model=model, generator=generator)
exp = Exp(pmap=parameter_map)
##### TESTING ######
with open("test/tunable_coupler_data.pickle", "rb") as filename:
data = pickle.load(filename)
@pytest.mark.integration
def test_coupler_frequency() -> None:
coupler_01 = np.abs(
np.abs(model.eigenframe[model.state_labels.index((0, 0, 0))])
- np.abs(model.eigenframe[model.state_labels.index((1, 0, 0))])
)
rel_diff = np.abs((coupler_01 - data["coupler_01"]) / data["coupler_01"])
assert rel_diff < 1e-12
@pytest.mark.integration
def test_coupler_anahrmonicity() -> None:
coupler_12 = np.abs(
np.abs(model.eigenframe[model.state_labels.index((1, 0, 0))])
- np.abs(model.eigenframe[model.state_labels.index((2, 0, 0))])
)
rel_diff = np.abs((coupler_12 - data["coupler_12"]) / data["coupler_12"])
assert rel_diff < 1e-12
@pytest.mark.integration
def test_energy_levels() -> None:
model = parameter_map.model
parameter_map.set_parameters([0.0], [[["TC-phi"]]])
model.update_model()
labels = [
model.state_labels[indx]
for indx in np.argsort(np.abs(model.eigenframe) / 2 / np.pi / 1e9)
]
product_basis = []
dressed_basis = []
ordered_basis = []
transforms = []
steps = 101
min_ratio = -0.10
max_ratio = 0.7
flux_ratios = np.linspace(min_ratio, max_ratio, steps, endpoint=True)
for flux_ratio in flux_ratios:
flux_bias = flux_ratio * phi_0_tc
parameter_map.set_parameters(
[flux_bias, 0.0, 0.0, 0.0],
[
[["TC-phi"]],
[["Q1-TC-strength"]],
[["Q2-TC-strength"]],
[["Q1-Q2-strength"]],
],
)
model.update_model()
product_basis.append(
[
model.eigenframe[model.state_labels.index(label)] / 2 / np.pi / 1e9
for label in labels
]
)
parameter_map.set_parameters(
[coupling_strength_q1tc, coupling_strength_q2tc, coupling_strength_q1q2],
[[["Q1-TC-strength"]], [["Q2-TC-strength"]], [["Q1-Q2-strength"]]],
)
model.update_model()
ordered_basis.append(
[
model.eigenframe[model.state_labels.index(label)] / 2 / np.pi / 1e9
for label in labels
]
)
parameter_map.model.update_dressed(ordered=False)
dressed_basis.append(
[
model.eigenframe[model.state_labels.index(label)] / 2 / np.pi / 1e9
for label in model.state_labels
]
)
transforms.append(
np.array(
[
np.real(model.transform[model.state_labels.index(label)])
for label in labels
]
)
)
parameter_map.set_parameters([fluxpoint], [[["TC-phi"]]])
model.update_model()
dressed_basis = np.array(dressed_basis)
ordered_basis = np.array(ordered_basis)
product_basis = np.array(product_basis)
print((np.abs(product_basis - data["product_basis"]) < 1).all())
assert (np.abs(product_basis - data["product_basis"]) < 1).all()
assert (np.abs(ordered_basis - data["ordered_basis"]) < 1).all()
# Dressed basis might change at avoided crossings depending on how we
# decide to deal with it. Atm no state with largest probability is chosen.
assert (
|
np.abs(dressed_basis - data["dressed_basis"])
|
numpy.abs
|
import cv2
import torch
import kornia
import numpy as np
def four_point_to_homography(corners, deltas, crop=False):
"""
Args:
corners ():
deltas ():
crop (bool): If set to true, homography will aready contain cropping part.
"""
assert len(corners.shape) == 3, 'corners should be of size B, 4, 2, but got: {}'.format(corners.shape)
assert len(deltas.shape) == 3, 'deltas should be of size B, 4, 2, but got: {}'.format(deltas.shape)
# in order to apply transform and center crop,
# subtract points by top-left corner (corners[N, 0])
if 'torch' in str(type(corners)):
if crop:
corners = corners - corners[:, 0].view(-1, 1, 2)
corners_hat = corners + deltas
return kornia.get_perspective_transform(corners, corners_hat)
elif 'numpy' in str(type(corners)):
if crop:
corners = corners - corners[:, 0].reshape(-1, 1, 2)
corners_hat = corners + deltas
return cv2.getPerspectiveTransform(np.float32(corners), np.float32(corners_hat))
else:
assert False, 'Wrong type?'
def image_shape_to_corners(patch):
assert len(patch.shape) == 4, 'patch should be of size B, C, H, W'
batch_size = patch.shape[0]
image_width = patch.shape[-2]
image_height = patch.shape[-1]
if 'torch' in str(type(patch)):
corners = torch.tensor([[0, 0], [image_width, 0], [image_width, image_height], [0, image_height]],
device=patch.device, dtype=patch.dtype, requires_grad=False)
corners = corners.repeat(batch_size, 1, 1)
elif 'numpy' in str(type(patch)):
corners = np.float32([[0, 0], [image_width, 0], [image_width, image_height], [0, image_height]])
corners = np.tile(np.expand_dims(corners, axis=0), (batch_size, 1, 1))
else:
assert False, 'Wrong type?'
return corners
def warp_image(image, homography, target_h, target_w, inverse=True):
if 'torch' in str(type(homography)):
if inverse:
homography = torch.inverse(homography)
return kornia.warp_perspective(image, homography, tuple((target_h, target_w)))
elif 'numpy' in str(type(homography)):
if inverse:
homography = np.linalg.inv(homography)
return cv2.warpPerspective(image, homography, dsize=tuple((target_w, target_h)))
else:
assert False, 'Wrong type?'
def perspectiveTransform(points, homography):
"""
Transform point with given homography.
Args:
points (np.array of size Nx2) - 2D points to be transformed
homography (np.array of size 3x3) - homography matrix
Returns:
(np.array of size Nx2) - transformed 2D points
"""
# Asserts
assert len(points.shape) == 2 and points.shape[1] == 2, 'points arg should be of size Nx2, but has size: {}'. \
format(points.shape)
assert homography.shape == (3, 3), 'homography arg should be of size 3x3, but has size: {}'.format(homography.shape)
if 'torch' in str(type(homography)) and 'torch' in str(type(points)):
# if inverse:
# homography = torch.inverse(homography)
points = torch.nn.functional.pad(points, (0, 1), "constant", 1.)
points_transformed = homography @ (points.permute(1, 0))
points_transformed = points_transformed.permute(1, 0)
return points_transformed[:, :2] / points_transformed[:, 2:].repeat(1, 2)
elif 'numpy' in str(type(homography)) and 'numpy' in str(type(points)):
# if inverse:
# homography = np.linalg.inv(homography)
return cv2.perspectiveTransform([points], homography).squeeze()
else:
assert False, 'Wrong or inconsistent types?'
def perspectiveTransformBatched(points, homography):
"""
Transform point with given homography.
Args:
points (np.array of size BxNx2) - 2D points to be transformed
homography (np.array of size Bx3x3) - homography matrix
Returns:
(np.array of size BxNx2) - transformed 2D points
"""
# Asserts
assert len(points.shape) == 3 and points.shape[2] == 2, 'points arg should be of size Nx2, but has size: {}'. \
format(points.shape)
assert homography.shape[1:] == (3, 3), 'homography arg should be of size 3x3, but has size: {}'\
.format(homography.shape)
if 'torch' in str(type(homography)) and 'torch' in str(type(points)):
points = torch.nn.functional.pad(points, (0, 1), "constant", 1.)
points_transformed = homography @ (points.permute(0, 2, 1))
points_transformed = points_transformed.permute(0, 2, 1)
return points_transformed[:, :, :2] / points_transformed[:, :, 2:].repeat(1, 1, 2)
elif 'numpy' in str(type(homography)) and 'numpy' in str(type(points)):
assert False, 'Not implemented - I was too lazy, sorry!'
else:
assert False, 'Wrong or inconsistent types?'
def calc_reprojection_error(source_points, target_points, homography):
"""
Calculate reprojection error for a given homography.
Args:
source_points (np.array of size Nx2) - 2D points to be transformed
target_points (np.array of size Nx2) - target 2D points
homography (np.array of size 3x3) - homography matrix
Returns:
(float) - reprojection error
"""
# Asserts
assert len(source_points.shape) == 2 and source_points.shape[1] == 2, 'source_points arg should be of size Nx2, ' \
'but has size: {}'.format(source_points.shape)
assert len(target_points.shape) == 2 and target_points.shape[1] == 2, 'target_points arg should be of size Nx2, ' \
'but has size: {}'.format(target_points.shape)
assert homography.shape == (3, 3), 'homography arg should be of size 3x3, but has size: {}'.format(homography.shape)
if 'torch' in str(type(homography)) and 'torch' in str(type(source_points)) and 'torch' in str(type(target_points)):
transformed_points = perspectiveTransform(source_points, homography)
reprojection_error = torch.sum((transformed_points - target_points) ** 2)
return reprojection_error
if 'numpy' in str(type(homography)) and 'numpy' in str(type(source_points)) and 'numpy' in str(type(target_points)):
transformed_points = cv2.perspectiveTransform(
|
np.expand_dims(source_points, axis=0)
|
numpy.expand_dims
|
import numpy as np
from matplotlib import pyplot as plt
from pynput import keyboard, mouse
from time import time
def main():
size = 15
global key; key = None # register keypresses
listener = keyboard.Listener(on_press=on_press);listener.start()
last_mouse = [0,0]
posx, posy, rot = (1, np.random.randint(1, size -1), 1) # player pos
bg = np.linspace(0, 1, 150) #background gradient
mapc, maph, mapr, ex, ey = maze_generator(posx, posy, size)# map, exit
plt.figure(num = 'Pycaster 2.0')
while True: #main game loop
start = time()
rot, last_mouse = rotation(rot, last_mouse)
plt.hlines(-0.5, 0, 60, colors='k', lw=165, alpha=np.sin((rot+np.pi/2)/2)**2/2)
plt.hlines(0.5, 0, 60, colors='k', lw=165, alpha=np.sin((rot-np.pi/2)/2)**2/2)
plt.scatter([30]*150, -bg, c=-bg, s=200000, marker='_', cmap='Greys')
plt.scatter([30]*150, bg, c=bg, s=200000, marker='_', cmap='Blues')
tx, ty, tc = ([], [], [])
for i in range(60): #vision loop
rot_i = rot + np.deg2rad(i - 30)
x, y = (posx, posy)
sin, cos = (0.04*np.sin(rot_i), 0.04*
|
np.cos(rot_i)
|
numpy.cos
|
# -*- coding: utf-8 -*-
import glob
import os
from functools import partial
from multiprocessing import Pool
import numpy as np
import gzip
import pickle
import random
import nibabel as nib
def read_image(img_path):
data = nib.load(img_path)
hdr = data.header
# convert to numpy
data = data.get_data()
return data, hdr
def image_norm(img):
pixels = img[img > 0]
mean = pixels.mean()
std = pixels.std()
out = (img - mean)/std
out[img==0] = 0
return out
def savenpy(id, filelist, pre_data_result, train_val_test, context_num):
print('start processing %s \t %d/%d' % (
filelist[id], id + 1,
len(filelist),
))
name = os.path.split(filelist[id])[1].split('.')[0]
print(name)
pre_data_result = os.path.join(pre_data_result, name)
if not os.path.exists(pre_data_result):
os.makedirs(pre_data_result)
_img, hdr = read_image(filelist[id])
# img = image_norm(_img)
img = _img
x_num, y_num, z_num = img.shape
if context_num:
img = np.pad(img, [[0, 0], [0, 0], [context_num, context_num]], 'constant')
if train_val_test == 'train' or train_val_test == 'val' or 'fold' in train_val_test:
label, _ = read_image(filelist[id].replace(name.split('_')[0], name[0:2]+'Segmentation'))
if np.sum(label > 0):
_, _, organ_zz = np.where(label > 0)
min_organ_z =
|
np.min(organ_zz)
|
numpy.min
|
import cv2
from glob import glob
import numpy as np
import os
from PIL import Image
import platform
import sys
import torch
sys.path.append(os.path.abspath("./"))
from model import FCRN_A
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
network = FCRN_A(input_filters=3, N=2).to(device)
network.train(False)
sys_os = platform.system()
if sys_os == 'Windows':
model_path = (r"P:\Robert\objects_counting_dmap_experiments"
r"\batch56-fcrn-small-dataset-pixel-based-loss-redo-lr-0.0025"
r"\complete_nets\egg-fullsize-pt-presample-compare-2021-03-23"
"_FCRN_A_Yang-Lab-Dell3_2021-06-08 00-25-29.900685_300epochs.pth")
img_folder = 'P:/Robert/objects_counting_dmap/egg_source/combined_robert_uli_temp'
elif sys_os == 'Linux':
model_path = '/media/Synology3/Robert/objects_counting_dmap_experiments/batch56-fcrn-small-dataset-pixel-based-loss-redo-lr-0.0025/complete_nets/egg-fullsize-pt-presample-compare-2021-03-23' +\
"_FCRN_A_Yang-Lab-Dell3_2021-06-08 00-25-29.900685_300epochs.pth"
img_folder = '/media/Synology3/Robert/objects_counting_dmap/egg_source/combined_robert_uli_temp'
img_paths = glob(os.path.join(img_folder, '*.jpg'))
network = torch.nn.DataParallel(network)
network.load_state_dict(torch.load(model_path))
img_paths = [os.path.join(img_folder, '2020-12-02_img_0007_0_1_left_6A9RT.jpg')]
for pth in img_paths:
for y_cropping in range(300):
img_orig = Image.open(pth)
half_crop = y_cropping / 2
img_orig = img_orig.crop((0, np.floor(half_crop), img_orig.width, img_orig.height -np.ceil( half_crop)))
img = torch.from_numpy(
(1 / 255) * np.expand_dims(np.moveaxis(
|
np.array(img_orig, dtype=np.float32)
|
numpy.array
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.