code
stringlengths 501
4.91M
| package
stringlengths 2
88
| path
stringlengths 11
291
| filename
stringlengths 4
197
| parsed_code
stringlengths 0
4.91M
| quality_prob
float64 0
0.99
| learning_prob
float64 0.02
1
|
---|---|---|---|---|---|---|
r"""
TV-Regularized Sparse-View CT Reconstruction
============================================
This example demonstrates solution of a sparse-view CT reconstruction
problem with isotropic total variation (TV) regularization
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - A \mathbf{x}
\|_2^2 + \lambda \| C \mathbf{x} \|_{2,1} \;,$$
where $A$ is the Radon transform, $\mathbf{y}$ is the sinogram, $C$ is
a 2D finite difference operator, and $\mathbf{x}$ is the desired
image.
"""
import numpy as np
import jax
from mpl_toolkits.axes_grid1 import make_axes_locatable
from xdesign import Foam, discrete_phantom
import scico.numpy as snp
from scico import functional, linop, loss, metric, plot
from scico.linop.radon_astra import TomographicProjector
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
"""
Create a ground truth image.
"""
N = 512 # phantom size
np.random.seed(1234)
x_gt = discrete_phantom(Foam(size_range=[0.075, 0.0025], gap=1e-3, porosity=1), size=N)
x_gt = jax.device_put(x_gt) # convert to jax type, push to GPU
"""
Configure CT projection operator and generate synthetic measurements.
"""
n_projection = 45 # number of projections
angles = np.linspace(0, np.pi, n_projection) # evenly spaced projection angles
A = TomographicProjector(x_gt.shape, 1, N, angles) # Radon transform operator
y = A @ x_gt # sinogram
"""
Set up ADMM solver object.
"""
λ = 2e0 # L1 norm regularization parameter
ρ = 5e0 # ADMM penalty parameter
maxiter = 25 # number of ADMM iterations
cg_tol = 1e-4 # CG relative tolerance
cg_maxiter = 25 # maximum CG iterations per ADMM iteration
# The append=0 option makes the results of horizontal and vertical
# finite differences the same shape, which is required for the L21Norm,
# which is used so that g(Cx) corresponds to isotropic TV.
C = linop.FiniteDifference(input_shape=x_gt.shape, append=0)
g = λ * functional.L21Norm()
f = loss.SquaredL2Loss(y=y, A=A)
x0 = snp.clip(A.fbp(y), 0, 1.0)
solver = ADMM(
f=f,
g_list=[g],
C_list=[C],
rho_list=[ρ],
x0=x0,
maxiter=maxiter,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": cg_tol, "maxiter": cg_maxiter}),
itstat_options={"display": True, "period": 5},
)
"""
Run the solver.
"""
print(f"Solving on {device_info()}\n")
solver.solve()
hist = solver.itstat_object.history(transpose=True)
x_reconstruction = snp.clip(solver.x, 0, 1.0)
"""
Show the recovered image.
"""
fig, ax = plot.subplots(nrows=1, ncols=3, figsize=(15, 5))
plot.imview(x_gt, title="Ground truth", cbar=None, fig=fig, ax=ax[0])
plot.imview(
x0,
title="FBP Reconstruction: \nSNR: %.2f (dB), MAE: %.3f"
% (metric.snr(x_gt, x0), metric.mae(x_gt, x0)),
cbar=None,
fig=fig,
ax=ax[1],
)
plot.imview(
x_reconstruction,
title="TV Reconstruction\nSNR: %.2f (dB), MAE: %.3f"
% (metric.snr(x_gt, x_reconstruction), metric.mae(x_gt, x_reconstruction)),
fig=fig,
ax=ax[2],
)
divider = make_axes_locatable(ax[2])
cax = divider.append_axes("right", size="5%", pad=0.2)
fig.colorbar(ax[2].get_images()[0], cax=cax, label="arbitrary units")
fig.show()
"""
Plot convergence statistics.
"""
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
hist.Objective,
title="Objective function",
xlbl="Iteration",
ylbl="Functional value",
fig=fig,
ax=ax[0],
)
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
fig=fig,
ax=ax[1],
)
fig.show()
input("\nWaiting for input to close figures and exit")
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/ct_astra_tv_admm.py
|
ct_astra_tv_admm.py
|
r"""
TV-Regularized Sparse-View CT Reconstruction
============================================
This example demonstrates solution of a sparse-view CT reconstruction
problem with isotropic total variation (TV) regularization
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - A \mathbf{x}
\|_2^2 + \lambda \| C \mathbf{x} \|_{2,1} \;,$$
where $A$ is the Radon transform, $\mathbf{y}$ is the sinogram, $C$ is
a 2D finite difference operator, and $\mathbf{x}$ is the desired
image.
"""
import numpy as np
import jax
from mpl_toolkits.axes_grid1 import make_axes_locatable
from xdesign import Foam, discrete_phantom
import scico.numpy as snp
from scico import functional, linop, loss, metric, plot
from scico.linop.radon_astra import TomographicProjector
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
"""
Create a ground truth image.
"""
N = 512 # phantom size
np.random.seed(1234)
x_gt = discrete_phantom(Foam(size_range=[0.075, 0.0025], gap=1e-3, porosity=1), size=N)
x_gt = jax.device_put(x_gt) # convert to jax type, push to GPU
"""
Configure CT projection operator and generate synthetic measurements.
"""
n_projection = 45 # number of projections
angles = np.linspace(0, np.pi, n_projection) # evenly spaced projection angles
A = TomographicProjector(x_gt.shape, 1, N, angles) # Radon transform operator
y = A @ x_gt # sinogram
"""
Set up ADMM solver object.
"""
λ = 2e0 # L1 norm regularization parameter
ρ = 5e0 # ADMM penalty parameter
maxiter = 25 # number of ADMM iterations
cg_tol = 1e-4 # CG relative tolerance
cg_maxiter = 25 # maximum CG iterations per ADMM iteration
# The append=0 option makes the results of horizontal and vertical
# finite differences the same shape, which is required for the L21Norm,
# which is used so that g(Cx) corresponds to isotropic TV.
C = linop.FiniteDifference(input_shape=x_gt.shape, append=0)
g = λ * functional.L21Norm()
f = loss.SquaredL2Loss(y=y, A=A)
x0 = snp.clip(A.fbp(y), 0, 1.0)
solver = ADMM(
f=f,
g_list=[g],
C_list=[C],
rho_list=[ρ],
x0=x0,
maxiter=maxiter,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": cg_tol, "maxiter": cg_maxiter}),
itstat_options={"display": True, "period": 5},
)
"""
Run the solver.
"""
print(f"Solving on {device_info()}\n")
solver.solve()
hist = solver.itstat_object.history(transpose=True)
x_reconstruction = snp.clip(solver.x, 0, 1.0)
"""
Show the recovered image.
"""
fig, ax = plot.subplots(nrows=1, ncols=3, figsize=(15, 5))
plot.imview(x_gt, title="Ground truth", cbar=None, fig=fig, ax=ax[0])
plot.imview(
x0,
title="FBP Reconstruction: \nSNR: %.2f (dB), MAE: %.3f"
% (metric.snr(x_gt, x0), metric.mae(x_gt, x0)),
cbar=None,
fig=fig,
ax=ax[1],
)
plot.imview(
x_reconstruction,
title="TV Reconstruction\nSNR: %.2f (dB), MAE: %.3f"
% (metric.snr(x_gt, x_reconstruction), metric.mae(x_gt, x_reconstruction)),
fig=fig,
ax=ax[2],
)
divider = make_axes_locatable(ax[2])
cax = divider.append_axes("right", size="5%", pad=0.2)
fig.colorbar(ax[2].get_images()[0], cax=cax, label="arbitrary units")
fig.show()
"""
Plot convergence statistics.
"""
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
hist.Objective,
title="Objective function",
xlbl="Iteration",
ylbl="Functional value",
fig=fig,
ax=ax[0],
)
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
fig=fig,
ax=ax[1],
)
fig.show()
input("\nWaiting for input to close figures and exit")
| 0.895323 | 0.928959 |
r"""
Deconvolution Microscopy (All Channels)
=======================================
This example partially replicates a [GlobalBioIm
example](https://biomedical-imaging-group.github.io/GlobalBioIm/examples.html)
using the [microscopy data](http://bigwww.epfl.ch/deconvolution/bio/)
provided by the EPFL Biomedical Imaging Group.
The deconvolution problem is solved using class
[admm.ADMM](../_autosummary/scico.optimize.rst#scico.optimize.ADMM) to
solve an image deconvolution problem with isotropic total variation (TV)
regularization
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| M (\mathbf{y} - A \mathbf{x})
\|_2^2 + \lambda \| C \mathbf{x} \|_{2,1} +
\iota_{\mathrm{NN}}(\mathbf{x}) \;,$$
where $M$ is a mask operator, $A$ is circular convolution,
$\mathbf{y}$ is the blurred image, $C$ is a convolutional gradient
operator, $\iota_{\mathrm{NN}}$ is the indicator function of the
non-negativity constraint, and $\mathbf{x}$ is the desired image.
"""
import numpy as np
import jax
import ray
import scico.numpy as snp
from scico import functional, linop, loss, plot
from scico.examples import downsample_volume, epfl_deconv_data, tile_volume_slices
from scico.optimize.admm import ADMM, CircularConvolveSolver
"""
Get and preprocess data. We downsample the data for the for purposes of
the example. Reducing the downsampling rate will make the example slower
and more memory-intensive. To run this example on a GPU it may be
necessary to set environment variables
`XLA_PYTHON_CLIENT_ALLOCATOR=platform` and
`XLA_PYTHON_CLIENT_PREALLOCATE=false`. If your GPU does not have enough
memory, you can try setting the environment variable
`JAX_PLATFORM_NAME=cpu` to run on CPU.
"""
downsampling_rate = 2
y_list = []
y_pad_list = []
psf_list = []
for channel in range(3):
y, psf = epfl_deconv_data(channel, verbose=True) # get data
y = downsample_volume(y, downsampling_rate) # downsample
psf = downsample_volume(psf, downsampling_rate)
y -= y.min() # normalize y
y /= y.max()
psf /= psf.sum() # normalize psf
if channel == 0:
padding = [[0, p] for p in snp.array(psf.shape) - 1]
mask = snp.pad(snp.ones_like(y), padding)
y_pad = snp.pad(y, padding) # zero-padded version of y
y_list.append(y)
y_pad_list.append(y_pad)
psf_list.append(psf)
y = snp.stack(y_list, axis=-1)
yshape = y.shape
del y_list
"""
Define problem and algorithm parameters.
"""
λ = 2e-6 # ℓ1 norm regularization parameter
ρ0 = 1e-3 # ADMM penalty parameter for first auxiliary variable
ρ1 = 1e-3 # ADMM penalty parameter for second auxiliary variable
ρ2 = 1e-3 # ADMM penalty parameter for third auxiliary variable
maxiter = 100 # number of ADMM iterations
"""
Initialize ray, determine available computing resources, and put large arrays
in object store.
"""
ray.init()
ngpu = 0
ar = ray.available_resources()
ncpu = max(int(ar["CPU"]) // 3, 1)
if "GPU" in ar:
ngpu = int(ar["GPU"]) // 3
print(f"Running on {ncpu} CPUs and {ngpu} GPUs per process")
y_pad_list = ray.put(y_pad_list)
psf_list = ray.put(psf_list)
mask_store = ray.put(mask)
"""
Define ray remote function for parallel solves.
"""
@ray.remote(num_cpus=ncpu, num_gpus=ngpu)
def deconvolve_channel(channel):
"""Deconvolve a single channel."""
y_pad = jax.device_put(ray.get(y_pad_list)[channel])
psf = jax.device_put(ray.get(psf_list)[channel])
mask = jax.device_put(ray.get(mask_store))
M = linop.Diagonal(mask)
C0 = linop.CircularConvolve(
h=psf, input_shape=mask.shape, h_center=snp.array(psf.shape) / 2 - 0.5 # forward operator
)
C1 = linop.FiniteDifference(input_shape=mask.shape, circular=True) # gradient operator
C2 = linop.Identity(mask.shape) # identity operator
g0 = loss.SquaredL2Loss(y=y_pad, A=M) # loss function (forward model)
g1 = λ * functional.L21Norm() # TV penalty (when applied to gradient)
g2 = functional.NonNegativeIndicator() # non-negativity constraint
if channel == 0:
print("Displaying solver status for channel 0")
display = True
else:
display = False
solver = ADMM(
f=None,
g_list=[g0, g1, g2],
C_list=[C0, C1, C2],
rho_list=[ρ0, ρ1, ρ2],
maxiter=maxiter,
itstat_options={"display": display, "period": 10, "overwrite": False},
x0=y_pad,
subproblem_solver=CircularConvolveSolver(),
)
x_pad = solver.solve()
x = x_pad[: yshape[0], : yshape[1], : yshape[2]]
return (x, solver.itstat_object.history(transpose=True))
"""
Solve problems for all three channels in parallel and extract results.
"""
ray_return = ray.get([deconvolve_channel.remote(channel) for channel in range(3)])
x = snp.stack([t[0] for t in ray_return], axis=-1)
solve_stats = [t[1] for t in ray_return]
"""
Show the recovered image.
"""
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(14, 7))
plot.imview(tile_volume_slices(y), title="Blurred measurements", fig=fig, ax=ax[0])
plot.imview(tile_volume_slices(x), title="Deconvolved image", fig=fig, ax=ax[1])
fig.show()
"""
Plot convergence statistics.
"""
fig, ax = plot.subplots(nrows=1, ncols=3, figsize=(18, 5))
plot.plot(
np.stack([s.Objective for s in solve_stats]).T,
title="Objective function",
xlbl="Iteration",
ylbl="Functional value",
lgnd=("CY3", "DAPI", "FITC"),
fig=fig,
ax=ax[0],
)
plot.plot(
np.stack([s.Prml_Rsdl for s in solve_stats]).T,
ptyp="semilogy",
title="Primal Residual",
xlbl="Iteration",
lgnd=("CY3", "DAPI", "FITC"),
fig=fig,
ax=ax[1],
)
plot.plot(
np.stack([s.Dual_Rsdl for s in solve_stats]).T,
ptyp="semilogy",
title="Dual Residual",
xlbl="Iteration",
lgnd=("CY3", "DAPI", "FITC"),
fig=fig,
ax=ax[2],
)
fig.show()
input("\nWaiting for input to close figures and exit")
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/deconv_microscopy_allchn_tv_admm.py
|
deconv_microscopy_allchn_tv_admm.py
|
r"""
Deconvolution Microscopy (All Channels)
=======================================
This example partially replicates a [GlobalBioIm
example](https://biomedical-imaging-group.github.io/GlobalBioIm/examples.html)
using the [microscopy data](http://bigwww.epfl.ch/deconvolution/bio/)
provided by the EPFL Biomedical Imaging Group.
The deconvolution problem is solved using class
[admm.ADMM](../_autosummary/scico.optimize.rst#scico.optimize.ADMM) to
solve an image deconvolution problem with isotropic total variation (TV)
regularization
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| M (\mathbf{y} - A \mathbf{x})
\|_2^2 + \lambda \| C \mathbf{x} \|_{2,1} +
\iota_{\mathrm{NN}}(\mathbf{x}) \;,$$
where $M$ is a mask operator, $A$ is circular convolution,
$\mathbf{y}$ is the blurred image, $C$ is a convolutional gradient
operator, $\iota_{\mathrm{NN}}$ is the indicator function of the
non-negativity constraint, and $\mathbf{x}$ is the desired image.
"""
import numpy as np
import jax
import ray
import scico.numpy as snp
from scico import functional, linop, loss, plot
from scico.examples import downsample_volume, epfl_deconv_data, tile_volume_slices
from scico.optimize.admm import ADMM, CircularConvolveSolver
"""
Get and preprocess data. We downsample the data for the for purposes of
the example. Reducing the downsampling rate will make the example slower
and more memory-intensive. To run this example on a GPU it may be
necessary to set environment variables
`XLA_PYTHON_CLIENT_ALLOCATOR=platform` and
`XLA_PYTHON_CLIENT_PREALLOCATE=false`. If your GPU does not have enough
memory, you can try setting the environment variable
`JAX_PLATFORM_NAME=cpu` to run on CPU.
"""
downsampling_rate = 2
y_list = []
y_pad_list = []
psf_list = []
for channel in range(3):
y, psf = epfl_deconv_data(channel, verbose=True) # get data
y = downsample_volume(y, downsampling_rate) # downsample
psf = downsample_volume(psf, downsampling_rate)
y -= y.min() # normalize y
y /= y.max()
psf /= psf.sum() # normalize psf
if channel == 0:
padding = [[0, p] for p in snp.array(psf.shape) - 1]
mask = snp.pad(snp.ones_like(y), padding)
y_pad = snp.pad(y, padding) # zero-padded version of y
y_list.append(y)
y_pad_list.append(y_pad)
psf_list.append(psf)
y = snp.stack(y_list, axis=-1)
yshape = y.shape
del y_list
"""
Define problem and algorithm parameters.
"""
λ = 2e-6 # ℓ1 norm regularization parameter
ρ0 = 1e-3 # ADMM penalty parameter for first auxiliary variable
ρ1 = 1e-3 # ADMM penalty parameter for second auxiliary variable
ρ2 = 1e-3 # ADMM penalty parameter for third auxiliary variable
maxiter = 100 # number of ADMM iterations
"""
Initialize ray, determine available computing resources, and put large arrays
in object store.
"""
ray.init()
ngpu = 0
ar = ray.available_resources()
ncpu = max(int(ar["CPU"]) // 3, 1)
if "GPU" in ar:
ngpu = int(ar["GPU"]) // 3
print(f"Running on {ncpu} CPUs and {ngpu} GPUs per process")
y_pad_list = ray.put(y_pad_list)
psf_list = ray.put(psf_list)
mask_store = ray.put(mask)
"""
Define ray remote function for parallel solves.
"""
@ray.remote(num_cpus=ncpu, num_gpus=ngpu)
def deconvolve_channel(channel):
"""Deconvolve a single channel."""
y_pad = jax.device_put(ray.get(y_pad_list)[channel])
psf = jax.device_put(ray.get(psf_list)[channel])
mask = jax.device_put(ray.get(mask_store))
M = linop.Diagonal(mask)
C0 = linop.CircularConvolve(
h=psf, input_shape=mask.shape, h_center=snp.array(psf.shape) / 2 - 0.5 # forward operator
)
C1 = linop.FiniteDifference(input_shape=mask.shape, circular=True) # gradient operator
C2 = linop.Identity(mask.shape) # identity operator
g0 = loss.SquaredL2Loss(y=y_pad, A=M) # loss function (forward model)
g1 = λ * functional.L21Norm() # TV penalty (when applied to gradient)
g2 = functional.NonNegativeIndicator() # non-negativity constraint
if channel == 0:
print("Displaying solver status for channel 0")
display = True
else:
display = False
solver = ADMM(
f=None,
g_list=[g0, g1, g2],
C_list=[C0, C1, C2],
rho_list=[ρ0, ρ1, ρ2],
maxiter=maxiter,
itstat_options={"display": display, "period": 10, "overwrite": False},
x0=y_pad,
subproblem_solver=CircularConvolveSolver(),
)
x_pad = solver.solve()
x = x_pad[: yshape[0], : yshape[1], : yshape[2]]
return (x, solver.itstat_object.history(transpose=True))
"""
Solve problems for all three channels in parallel and extract results.
"""
ray_return = ray.get([deconvolve_channel.remote(channel) for channel in range(3)])
x = snp.stack([t[0] for t in ray_return], axis=-1)
solve_stats = [t[1] for t in ray_return]
"""
Show the recovered image.
"""
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(14, 7))
plot.imview(tile_volume_slices(y), title="Blurred measurements", fig=fig, ax=ax[0])
plot.imview(tile_volume_slices(x), title="Deconvolved image", fig=fig, ax=ax[1])
fig.show()
"""
Plot convergence statistics.
"""
fig, ax = plot.subplots(nrows=1, ncols=3, figsize=(18, 5))
plot.plot(
np.stack([s.Objective for s in solve_stats]).T,
title="Objective function",
xlbl="Iteration",
ylbl="Functional value",
lgnd=("CY3", "DAPI", "FITC"),
fig=fig,
ax=ax[0],
)
plot.plot(
np.stack([s.Prml_Rsdl for s in solve_stats]).T,
ptyp="semilogy",
title="Primal Residual",
xlbl="Iteration",
lgnd=("CY3", "DAPI", "FITC"),
fig=fig,
ax=ax[1],
)
plot.plot(
np.stack([s.Dual_Rsdl for s in solve_stats]).T,
ptyp="semilogy",
title="Dual Residual",
xlbl="Iteration",
lgnd=("CY3", "DAPI", "FITC"),
fig=fig,
ax=ax[2],
)
fig.show()
input("\nWaiting for input to close figures and exit")
| 0.931346 | 0.900573 |
r"""
Circulant Blur Image Deconvolution with TV Regularization
=========================================================
This example demonstrates the solution of an image deconvolution problem
with isotropic total variation (TV) regularization
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - A \mathbf{x}
\|_2^2 + \lambda \| C \mathbf{x} \|_{2,1} \;,$$
where $A$ is a circular convolution operator, $\mathbf{y}$ is the blurred
image, $C$ is a 2D finite difference operator, and $\mathbf{x}$ is the
deconvolved image.
"""
import jax
from xdesign import SiemensStar, discrete_phantom
import scico.numpy as snp
import scico.random
from scico import functional, linop, loss, metric, plot
from scico.optimize.admm import ADMM, CircularConvolveSolver
from scico.util import device_info
"""
Create a ground truth image.
"""
phantom = SiemensStar(32)
N = 256 # image size
x_gt = snp.pad(discrete_phantom(phantom, N - 16), 8)
x_gt = jax.device_put(x_gt) # convert to jax type, push to GPU
"""
Set up the forward operator and create a test signal consisting of a
blurred signal with additive Gaussian noise.
"""
n = 5 # convolution kernel size
σ = 20.0 / 255 # noise level
psf = snp.ones((n, n)) / (n * n)
A = linop.CircularConvolve(h=psf, input_shape=x_gt.shape)
Ax = A(x_gt) # blurred image
noise, key = scico.random.randn(Ax.shape, seed=0)
y = Ax + σ * noise
"""
Set up an ADMM solver object.
"""
λ = 2e-2 # L21 norm regularization parameter
ρ = 5e-1 # ADMM penalty parameter
maxiter = 50 # number of ADMM iterations
f = loss.SquaredL2Loss(y=y, A=A)
# Penalty parameters must be accounted for in the gi functions, not as
# additional inputs.
g = λ * functional.L21Norm() # regularization functionals gi
C = linop.FiniteDifference(x_gt.shape, circular=True)
solver = ADMM(
f=f,
g_list=[g],
C_list=[C],
rho_list=[ρ],
x0=A.adj(y),
maxiter=maxiter,
subproblem_solver=CircularConvolveSolver(),
itstat_options={"display": True, "period": 10},
)
"""
Run the solver.
"""
print(f"Solving on {device_info()}\n")
x = solver.solve()
hist = solver.itstat_object.history(transpose=True)
"""
Show the recovered image.
"""
fig, ax = plot.subplots(nrows=1, ncols=3, figsize=(15, 5))
plot.imview(x_gt, title="Ground truth", fig=fig, ax=ax[0])
plot.imview(y, title="Blurred, noisy image: %.2f (dB)" % metric.psnr(x_gt, y), fig=fig, ax=ax[1])
plot.imview(x, title="Deconvolved image: %.2f (dB)" % metric.psnr(x_gt, x), fig=fig, ax=ax[2])
fig.show()
"""
Plot convergence statistics.
"""
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
hist.Objective,
title="Objective function",
xlbl="Iteration",
ylbl="Functional value",
fig=fig,
ax=ax[0],
)
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
fig=fig,
ax=ax[1],
)
fig.show()
input("\nWaiting for input to close figures and exit")
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/deconv_circ_tv_admm.py
|
deconv_circ_tv_admm.py
|
r"""
Circulant Blur Image Deconvolution with TV Regularization
=========================================================
This example demonstrates the solution of an image deconvolution problem
with isotropic total variation (TV) regularization
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - A \mathbf{x}
\|_2^2 + \lambda \| C \mathbf{x} \|_{2,1} \;,$$
where $A$ is a circular convolution operator, $\mathbf{y}$ is the blurred
image, $C$ is a 2D finite difference operator, and $\mathbf{x}$ is the
deconvolved image.
"""
import jax
from xdesign import SiemensStar, discrete_phantom
import scico.numpy as snp
import scico.random
from scico import functional, linop, loss, metric, plot
from scico.optimize.admm import ADMM, CircularConvolveSolver
from scico.util import device_info
"""
Create a ground truth image.
"""
phantom = SiemensStar(32)
N = 256 # image size
x_gt = snp.pad(discrete_phantom(phantom, N - 16), 8)
x_gt = jax.device_put(x_gt) # convert to jax type, push to GPU
"""
Set up the forward operator and create a test signal consisting of a
blurred signal with additive Gaussian noise.
"""
n = 5 # convolution kernel size
σ = 20.0 / 255 # noise level
psf = snp.ones((n, n)) / (n * n)
A = linop.CircularConvolve(h=psf, input_shape=x_gt.shape)
Ax = A(x_gt) # blurred image
noise, key = scico.random.randn(Ax.shape, seed=0)
y = Ax + σ * noise
"""
Set up an ADMM solver object.
"""
λ = 2e-2 # L21 norm regularization parameter
ρ = 5e-1 # ADMM penalty parameter
maxiter = 50 # number of ADMM iterations
f = loss.SquaredL2Loss(y=y, A=A)
# Penalty parameters must be accounted for in the gi functions, not as
# additional inputs.
g = λ * functional.L21Norm() # regularization functionals gi
C = linop.FiniteDifference(x_gt.shape, circular=True)
solver = ADMM(
f=f,
g_list=[g],
C_list=[C],
rho_list=[ρ],
x0=A.adj(y),
maxiter=maxiter,
subproblem_solver=CircularConvolveSolver(),
itstat_options={"display": True, "period": 10},
)
"""
Run the solver.
"""
print(f"Solving on {device_info()}\n")
x = solver.solve()
hist = solver.itstat_object.history(transpose=True)
"""
Show the recovered image.
"""
fig, ax = plot.subplots(nrows=1, ncols=3, figsize=(15, 5))
plot.imview(x_gt, title="Ground truth", fig=fig, ax=ax[0])
plot.imview(y, title="Blurred, noisy image: %.2f (dB)" % metric.psnr(x_gt, y), fig=fig, ax=ax[1])
plot.imview(x, title="Deconvolved image: %.2f (dB)" % metric.psnr(x_gt, x), fig=fig, ax=ax[2])
fig.show()
"""
Plot convergence statistics.
"""
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
hist.Objective,
title="Objective function",
xlbl="Iteration",
ylbl="Functional value",
fig=fig,
ax=ax[0],
)
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
fig=fig,
ax=ax[1],
)
fig.show()
input("\nWaiting for input to close figures and exit")
| 0.932122 | 0.933613 |
import numpy as np
import jax
import matplotlib.pyplot as plt
import svmbir
from xdesign import Foam, discrete_phantom
import scico.numpy as snp
from scico import functional, linop, metric, plot
from scico.linop import Diagonal
from scico.linop.radon_svmbir import SVMBIRSquaredL2Loss, TomographicProjector
from scico.optimize import PDHG, LinearizedADMM
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
"""
Generate a ground truth image.
"""
N = 256 # image size
density = 0.025 # attenuation density of the image
np.random.seed(1234)
x_gt = discrete_phantom(Foam(size_range=[0.05, 0.02], gap=0.02, porosity=0.3), size=N - 10)
x_gt = x_gt / np.max(x_gt) * density
x_gt = np.pad(x_gt, 5)
x_gt[x_gt < 0] = 0
"""
Generate tomographic projector and sinogram.
"""
num_angles = int(N / 2)
num_channels = N
angles = snp.linspace(0, snp.pi, num_angles, dtype=snp.float32)
A = TomographicProjector(x_gt.shape, angles, num_channels)
sino = A @ x_gt
"""
Impose Poisson noise on sinogram. Higher max_intensity means less noise.
"""
max_intensity = 2000
expected_counts = max_intensity * np.exp(-sino)
noisy_counts = np.random.poisson(expected_counts).astype(np.float32)
noisy_counts[noisy_counts == 0] = 1 # deal with 0s
y = -np.log(noisy_counts / max_intensity)
"""
Reconstruct using default prior of SVMBIR :cite:`svmbir-2020`.
"""
weights = svmbir.calc_weights(y, weight_type="transmission")
x_mrf = svmbir.recon(
np.array(y[:, np.newaxis]),
np.array(angles),
weights=weights[:, np.newaxis],
num_rows=N,
num_cols=N,
positivity=True,
verbose=0,
)[0]
"""
Set up problem.
"""
y, x0, weights = jax.device_put([y, x_mrf, weights])
λ = 1e-1 # L1 norm regularization parameter
f = SVMBIRSquaredL2Loss(y=y, A=A, W=Diagonal(weights), scale=0.5)
g = λ * functional.L21Norm() # regularization functional
# The append=0 option makes the results of horizontal and vertical finite
# differences the same shape, which is required for the L21Norm.
C = linop.FiniteDifference(input_shape=x_gt.shape, append=0)
"""
Solve via ADMM.
"""
solve_admm = ADMM(
f=f,
g_list=[g],
C_list=[C],
rho_list=[2e1],
x0=x0,
maxiter=50,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": 1e-4, "maxiter": 10}),
itstat_options={"display": True, "period": 10},
)
print(f"Solving on {device_info()}\n")
x_admm = solve_admm.solve()
hist_admm = solve_admm.itstat_object.history(transpose=True)
print(f"PSNR: {metric.psnr(x_gt, x_admm):.2f} dB\n")
"""
Solve via Linearized ADMM.
"""
solver_ladmm = LinearizedADMM(
f=f,
g=g,
C=C,
mu=3e-2,
nu=2e-1,
x0=x0,
maxiter=50,
itstat_options={"display": True, "period": 10},
)
x_ladmm = solver_ladmm.solve()
hist_ladmm = solver_ladmm.itstat_object.history(transpose=True)
print(f"PSNR: {metric.psnr(x_gt, x_ladmm):.2f} dB\n")
"""
Solve via PDHG.
"""
solver_pdhg = PDHG(
f=f,
g=g,
C=C,
tau=2e-2,
sigma=8e0,
x0=x0,
maxiter=50,
itstat_options={"display": True, "period": 10},
)
x_pdhg = solver_pdhg.solve()
hist_pdhg = solver_pdhg.itstat_object.history(transpose=True)
print(f"PSNR: {metric.psnr(x_gt, x_pdhg):.2f} dB\n")
"""
Show the recovered images.
"""
norm = plot.matplotlib.colors.Normalize(vmin=-0.1 * density, vmax=1.2 * density)
fig, ax = plt.subplots(1, 2, figsize=[10, 5])
plot.imview(img=x_gt, title="Ground Truth Image", cbar=True, fig=fig, ax=ax[0], norm=norm)
plot.imview(
img=x_mrf,
title=f"MRF (PSNR: {metric.psnr(x_gt, x_mrf):.2f} dB)",
cbar=True,
fig=fig,
ax=ax[1],
norm=norm,
)
fig.show()
fig, ax = plt.subplots(1, 3, figsize=[15, 5])
plot.imview(
img=x_admm,
title=f"TV ADMM (PSNR: {metric.psnr(x_gt, x_admm):.2f} dB)",
cbar=True,
fig=fig,
ax=ax[0],
norm=norm,
)
plot.imview(
img=x_ladmm,
title=f"TV LinADMM (PSNR: {metric.psnr(x_gt, x_ladmm):.2f} dB)",
cbar=True,
fig=fig,
ax=ax[1],
norm=norm,
)
plot.imview(
img=x_pdhg,
title=f"TV PDHG (PSNR: {metric.psnr(x_gt, x_pdhg):.2f} dB)",
cbar=True,
fig=fig,
ax=ax[2],
norm=norm,
)
fig.show()
"""
Plot convergence statistics.
"""
fig, ax = plot.subplots(nrows=1, ncols=3, sharex=True, sharey=False, figsize=(27, 6))
plot.plot(
snp.vstack((hist_admm.Objective, hist_ladmm.Objective, hist_pdhg.Objective)).T,
ptyp="semilogy",
title="Objective function",
xlbl="Iteration",
lgnd=("ADMM", "LinADMM", "PDHG"),
fig=fig,
ax=ax[0],
)
plot.plot(
snp.vstack((hist_admm.Prml_Rsdl, hist_ladmm.Prml_Rsdl, hist_pdhg.Prml_Rsdl)).T,
ptyp="semilogy",
title="Primal residual",
xlbl="Iteration",
lgnd=("ADMM", "LinADMM", "PDHG"),
fig=fig,
ax=ax[1],
)
plot.plot(
snp.vstack((hist_admm.Dual_Rsdl, hist_ladmm.Dual_Rsdl, hist_pdhg.Dual_Rsdl)).T,
ptyp="semilogy",
title="Dual residual",
xlbl="Iteration",
lgnd=("ADMM", "LinADMM", "PDHG"),
fig=fig,
ax=ax[2],
)
fig.show()
fig, ax = plot.subplots(nrows=1, ncols=3, sharex=True, sharey=False, figsize=(27, 6))
plot.plot(
snp.vstack((hist_admm.Objective, hist_ladmm.Objective, hist_pdhg.Objective)).T,
snp.vstack((hist_admm.Time, hist_ladmm.Time, hist_pdhg.Time)).T,
ptyp="semilogy",
title="Objective function",
xlbl="Time (s)",
lgnd=("ADMM", "LinADMM", "PDHG"),
fig=fig,
ax=ax[0],
)
plot.plot(
snp.vstack((hist_admm.Prml_Rsdl, hist_ladmm.Prml_Rsdl, hist_pdhg.Prml_Rsdl)).T,
snp.vstack((hist_admm.Time, hist_ladmm.Time, hist_pdhg.Time)).T,
ptyp="semilogy",
title="Primal residual",
xlbl="Time (s)",
lgnd=("ADMM", "LinADMM", "PDHG"),
fig=fig,
ax=ax[1],
)
plot.plot(
snp.vstack((hist_admm.Dual_Rsdl, hist_ladmm.Dual_Rsdl, hist_pdhg.Dual_Rsdl)).T,
snp.vstack((hist_admm.Time, hist_ladmm.Time, hist_pdhg.Time)).T,
ptyp="semilogy",
title="Dual residual",
xlbl="Time (s)",
lgnd=("ADMM", "LinADMM", "PDHG"),
fig=fig,
ax=ax[2],
)
fig.show()
input("\nWaiting for input to close figures and exit")
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/ct_svmbir_tv_multi.py
|
ct_svmbir_tv_multi.py
|
import numpy as np
import jax
import matplotlib.pyplot as plt
import svmbir
from xdesign import Foam, discrete_phantom
import scico.numpy as snp
from scico import functional, linop, metric, plot
from scico.linop import Diagonal
from scico.linop.radon_svmbir import SVMBIRSquaredL2Loss, TomographicProjector
from scico.optimize import PDHG, LinearizedADMM
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
"""
Generate a ground truth image.
"""
N = 256 # image size
density = 0.025 # attenuation density of the image
np.random.seed(1234)
x_gt = discrete_phantom(Foam(size_range=[0.05, 0.02], gap=0.02, porosity=0.3), size=N - 10)
x_gt = x_gt / np.max(x_gt) * density
x_gt = np.pad(x_gt, 5)
x_gt[x_gt < 0] = 0
"""
Generate tomographic projector and sinogram.
"""
num_angles = int(N / 2)
num_channels = N
angles = snp.linspace(0, snp.pi, num_angles, dtype=snp.float32)
A = TomographicProjector(x_gt.shape, angles, num_channels)
sino = A @ x_gt
"""
Impose Poisson noise on sinogram. Higher max_intensity means less noise.
"""
max_intensity = 2000
expected_counts = max_intensity * np.exp(-sino)
noisy_counts = np.random.poisson(expected_counts).astype(np.float32)
noisy_counts[noisy_counts == 0] = 1 # deal with 0s
y = -np.log(noisy_counts / max_intensity)
"""
Reconstruct using default prior of SVMBIR :cite:`svmbir-2020`.
"""
weights = svmbir.calc_weights(y, weight_type="transmission")
x_mrf = svmbir.recon(
np.array(y[:, np.newaxis]),
np.array(angles),
weights=weights[:, np.newaxis],
num_rows=N,
num_cols=N,
positivity=True,
verbose=0,
)[0]
"""
Set up problem.
"""
y, x0, weights = jax.device_put([y, x_mrf, weights])
λ = 1e-1 # L1 norm regularization parameter
f = SVMBIRSquaredL2Loss(y=y, A=A, W=Diagonal(weights), scale=0.5)
g = λ * functional.L21Norm() # regularization functional
# The append=0 option makes the results of horizontal and vertical finite
# differences the same shape, which is required for the L21Norm.
C = linop.FiniteDifference(input_shape=x_gt.shape, append=0)
"""
Solve via ADMM.
"""
solve_admm = ADMM(
f=f,
g_list=[g],
C_list=[C],
rho_list=[2e1],
x0=x0,
maxiter=50,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": 1e-4, "maxiter": 10}),
itstat_options={"display": True, "period": 10},
)
print(f"Solving on {device_info()}\n")
x_admm = solve_admm.solve()
hist_admm = solve_admm.itstat_object.history(transpose=True)
print(f"PSNR: {metric.psnr(x_gt, x_admm):.2f} dB\n")
"""
Solve via Linearized ADMM.
"""
solver_ladmm = LinearizedADMM(
f=f,
g=g,
C=C,
mu=3e-2,
nu=2e-1,
x0=x0,
maxiter=50,
itstat_options={"display": True, "period": 10},
)
x_ladmm = solver_ladmm.solve()
hist_ladmm = solver_ladmm.itstat_object.history(transpose=True)
print(f"PSNR: {metric.psnr(x_gt, x_ladmm):.2f} dB\n")
"""
Solve via PDHG.
"""
solver_pdhg = PDHG(
f=f,
g=g,
C=C,
tau=2e-2,
sigma=8e0,
x0=x0,
maxiter=50,
itstat_options={"display": True, "period": 10},
)
x_pdhg = solver_pdhg.solve()
hist_pdhg = solver_pdhg.itstat_object.history(transpose=True)
print(f"PSNR: {metric.psnr(x_gt, x_pdhg):.2f} dB\n")
"""
Show the recovered images.
"""
norm = plot.matplotlib.colors.Normalize(vmin=-0.1 * density, vmax=1.2 * density)
fig, ax = plt.subplots(1, 2, figsize=[10, 5])
plot.imview(img=x_gt, title="Ground Truth Image", cbar=True, fig=fig, ax=ax[0], norm=norm)
plot.imview(
img=x_mrf,
title=f"MRF (PSNR: {metric.psnr(x_gt, x_mrf):.2f} dB)",
cbar=True,
fig=fig,
ax=ax[1],
norm=norm,
)
fig.show()
fig, ax = plt.subplots(1, 3, figsize=[15, 5])
plot.imview(
img=x_admm,
title=f"TV ADMM (PSNR: {metric.psnr(x_gt, x_admm):.2f} dB)",
cbar=True,
fig=fig,
ax=ax[0],
norm=norm,
)
plot.imview(
img=x_ladmm,
title=f"TV LinADMM (PSNR: {metric.psnr(x_gt, x_ladmm):.2f} dB)",
cbar=True,
fig=fig,
ax=ax[1],
norm=norm,
)
plot.imview(
img=x_pdhg,
title=f"TV PDHG (PSNR: {metric.psnr(x_gt, x_pdhg):.2f} dB)",
cbar=True,
fig=fig,
ax=ax[2],
norm=norm,
)
fig.show()
"""
Plot convergence statistics.
"""
fig, ax = plot.subplots(nrows=1, ncols=3, sharex=True, sharey=False, figsize=(27, 6))
plot.plot(
snp.vstack((hist_admm.Objective, hist_ladmm.Objective, hist_pdhg.Objective)).T,
ptyp="semilogy",
title="Objective function",
xlbl="Iteration",
lgnd=("ADMM", "LinADMM", "PDHG"),
fig=fig,
ax=ax[0],
)
plot.plot(
snp.vstack((hist_admm.Prml_Rsdl, hist_ladmm.Prml_Rsdl, hist_pdhg.Prml_Rsdl)).T,
ptyp="semilogy",
title="Primal residual",
xlbl="Iteration",
lgnd=("ADMM", "LinADMM", "PDHG"),
fig=fig,
ax=ax[1],
)
plot.plot(
snp.vstack((hist_admm.Dual_Rsdl, hist_ladmm.Dual_Rsdl, hist_pdhg.Dual_Rsdl)).T,
ptyp="semilogy",
title="Dual residual",
xlbl="Iteration",
lgnd=("ADMM", "LinADMM", "PDHG"),
fig=fig,
ax=ax[2],
)
fig.show()
fig, ax = plot.subplots(nrows=1, ncols=3, sharex=True, sharey=False, figsize=(27, 6))
plot.plot(
snp.vstack((hist_admm.Objective, hist_ladmm.Objective, hist_pdhg.Objective)).T,
snp.vstack((hist_admm.Time, hist_ladmm.Time, hist_pdhg.Time)).T,
ptyp="semilogy",
title="Objective function",
xlbl="Time (s)",
lgnd=("ADMM", "LinADMM", "PDHG"),
fig=fig,
ax=ax[0],
)
plot.plot(
snp.vstack((hist_admm.Prml_Rsdl, hist_ladmm.Prml_Rsdl, hist_pdhg.Prml_Rsdl)).T,
snp.vstack((hist_admm.Time, hist_ladmm.Time, hist_pdhg.Time)).T,
ptyp="semilogy",
title="Primal residual",
xlbl="Time (s)",
lgnd=("ADMM", "LinADMM", "PDHG"),
fig=fig,
ax=ax[1],
)
plot.plot(
snp.vstack((hist_admm.Dual_Rsdl, hist_ladmm.Dual_Rsdl, hist_pdhg.Dual_Rsdl)).T,
snp.vstack((hist_admm.Time, hist_ladmm.Time, hist_pdhg.Time)).T,
ptyp="semilogy",
title="Dual residual",
xlbl="Time (s)",
lgnd=("ADMM", "LinADMM", "PDHG"),
fig=fig,
ax=ax[2],
)
fig.show()
input("\nWaiting for input to close figures and exit")
| 0.772144 | 0.544378 |
r"""
Deconvolution Microscopy (Single Channel)
=========================================
This example partially replicates a [GlobalBioIm
example](https://biomedical-imaging-group.github.io/GlobalBioIm/examples.html)
using the [microscopy data](http://bigwww.epfl.ch/deconvolution/bio/)
provided by the EPFL Biomedical Imaging Group.
The deconvolution problem is solved using class
[admm.ADMM](../_autosummary/scico.optimize.rst#scico.optimize.ADMM) to
solve an image deconvolution problem with isotropic total variation (TV)
regularization
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| M (\mathbf{y} - A \mathbf{x})
\|_2^2 + \lambda \| C \mathbf{x} \|_{2,1} +
\iota_{\mathrm{NN}}(\mathbf{x}) \;,$$
where $M$ is a mask operator, $A$ is circular convolution,
$\mathbf{y}$ is the blurred image, $C$ is a convolutional gradient
operator, $\iota_{\mathrm{NN}}$ is the indicator function of the
non-negativity constraint, and $\mathbf{x}$ is the desired image.
"""
import scico.numpy as snp
from scico import functional, linop, loss, plot, util
from scico.examples import downsample_volume, epfl_deconv_data, tile_volume_slices
from scico.optimize.admm import ADMM, CircularConvolveSolver
"""
Get and preprocess data. We downsample the data for the for purposes of
the example. Reducing the downsampling rate will make the example slower
and more memory-intensive. To run this example on a GPU it may be
necessary to set environment variables
`XLA_PYTHON_CLIENT_ALLOCATOR=platform` and
`XLA_PYTHON_CLIENT_PREALLOCATE=false`. If your GPU does not have enough
memory, you can try setting the environment variable
`JAX_PLATFORM_NAME=cpu` to run on CPU.
"""
channel = 0
downsampling_rate = 2
y, psf = epfl_deconv_data(channel, verbose=True)
y = downsample_volume(y, downsampling_rate)
psf = downsample_volume(psf, downsampling_rate)
y -= y.min()
y /= y.max()
psf /= psf.sum()
"""
Pad data and create mask.
"""
padding = [[0, p] for p in snp.array(psf.shape) - 1]
y_pad = snp.pad(y, padding)
mask = snp.pad(snp.ones_like(y), padding)
"""
Define problem and algorithm parameters.
"""
λ = 2e-6 # ℓ1 norm regularization parameter
ρ0 = 1e-3 # ADMM penalty parameter for first auxiliary variable
ρ1 = 1e-3 # ADMM penalty parameter for second auxiliary variable
ρ2 = 1e-3 # ADMM penalty parameter for third auxiliary variable
maxiter = 100 # number of ADMM iterations
"""
Create operators.
"""
M = linop.Diagonal(mask)
C0 = linop.CircularConvolve(h=psf, input_shape=mask.shape, h_center=snp.array(psf.shape) / 2 - 0.5)
C1 = linop.FiniteDifference(input_shape=mask.shape, circular=True)
C2 = linop.Identity(mask.shape)
"""
Create functionals.
"""
g0 = loss.SquaredL2Loss(y=y_pad, A=M) # loss function (forward model)
g1 = λ * functional.L21Norm() # TV penalty (when applied to gradient)
g2 = functional.NonNegativeIndicator() # non-negativity constraint
"""
Set up ADMM solver object and solve problem.
"""
solver = ADMM(
f=None,
g_list=[g0, g1, g2],
C_list=[C0, C1, C2],
rho_list=[ρ0, ρ1, ρ2],
maxiter=maxiter,
itstat_options={"display": True, "period": 10},
x0=y_pad,
subproblem_solver=CircularConvolveSolver(),
)
print("Solving on %s\n" % util.device_info())
solver.solve()
solve_stats = solver.itstat_object.history(transpose=True)
x_pad = solver.x
x = x_pad[: y.shape[0], : y.shape[1], : y.shape[2]]
"""
Show the recovered image.
"""
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(14, 7))
plot.imview(tile_volume_slices(y), title="Blurred measurements", fig=fig, ax=ax[0])
plot.imview(tile_volume_slices(x), title="Deconvolved image", fig=fig, ax=ax[1])
fig.show()
"""
Plot convergence statistics.
"""
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
solve_stats.Objective,
title="Objective function",
xlbl="Iteration",
ylbl="Functional value",
fig=fig,
ax=ax[0],
)
plot.plot(
snp.vstack((solve_stats.Prml_Rsdl, solve_stats.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
fig=fig,
ax=ax[1],
)
fig.show()
input("\nWaiting for input to close figures and exit")
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/deconv_microscopy_tv_admm.py
|
deconv_microscopy_tv_admm.py
|
r"""
Deconvolution Microscopy (Single Channel)
=========================================
This example partially replicates a [GlobalBioIm
example](https://biomedical-imaging-group.github.io/GlobalBioIm/examples.html)
using the [microscopy data](http://bigwww.epfl.ch/deconvolution/bio/)
provided by the EPFL Biomedical Imaging Group.
The deconvolution problem is solved using class
[admm.ADMM](../_autosummary/scico.optimize.rst#scico.optimize.ADMM) to
solve an image deconvolution problem with isotropic total variation (TV)
regularization
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| M (\mathbf{y} - A \mathbf{x})
\|_2^2 + \lambda \| C \mathbf{x} \|_{2,1} +
\iota_{\mathrm{NN}}(\mathbf{x}) \;,$$
where $M$ is a mask operator, $A$ is circular convolution,
$\mathbf{y}$ is the blurred image, $C$ is a convolutional gradient
operator, $\iota_{\mathrm{NN}}$ is the indicator function of the
non-negativity constraint, and $\mathbf{x}$ is the desired image.
"""
import scico.numpy as snp
from scico import functional, linop, loss, plot, util
from scico.examples import downsample_volume, epfl_deconv_data, tile_volume_slices
from scico.optimize.admm import ADMM, CircularConvolveSolver
"""
Get and preprocess data. We downsample the data for the for purposes of
the example. Reducing the downsampling rate will make the example slower
and more memory-intensive. To run this example on a GPU it may be
necessary to set environment variables
`XLA_PYTHON_CLIENT_ALLOCATOR=platform` and
`XLA_PYTHON_CLIENT_PREALLOCATE=false`. If your GPU does not have enough
memory, you can try setting the environment variable
`JAX_PLATFORM_NAME=cpu` to run on CPU.
"""
channel = 0
downsampling_rate = 2
y, psf = epfl_deconv_data(channel, verbose=True)
y = downsample_volume(y, downsampling_rate)
psf = downsample_volume(psf, downsampling_rate)
y -= y.min()
y /= y.max()
psf /= psf.sum()
"""
Pad data and create mask.
"""
padding = [[0, p] for p in snp.array(psf.shape) - 1]
y_pad = snp.pad(y, padding)
mask = snp.pad(snp.ones_like(y), padding)
"""
Define problem and algorithm parameters.
"""
λ = 2e-6 # ℓ1 norm regularization parameter
ρ0 = 1e-3 # ADMM penalty parameter for first auxiliary variable
ρ1 = 1e-3 # ADMM penalty parameter for second auxiliary variable
ρ2 = 1e-3 # ADMM penalty parameter for third auxiliary variable
maxiter = 100 # number of ADMM iterations
"""
Create operators.
"""
M = linop.Diagonal(mask)
C0 = linop.CircularConvolve(h=psf, input_shape=mask.shape, h_center=snp.array(psf.shape) / 2 - 0.5)
C1 = linop.FiniteDifference(input_shape=mask.shape, circular=True)
C2 = linop.Identity(mask.shape)
"""
Create functionals.
"""
g0 = loss.SquaredL2Loss(y=y_pad, A=M) # loss function (forward model)
g1 = λ * functional.L21Norm() # TV penalty (when applied to gradient)
g2 = functional.NonNegativeIndicator() # non-negativity constraint
"""
Set up ADMM solver object and solve problem.
"""
solver = ADMM(
f=None,
g_list=[g0, g1, g2],
C_list=[C0, C1, C2],
rho_list=[ρ0, ρ1, ρ2],
maxiter=maxiter,
itstat_options={"display": True, "period": 10},
x0=y_pad,
subproblem_solver=CircularConvolveSolver(),
)
print("Solving on %s\n" % util.device_info())
solver.solve()
solve_stats = solver.itstat_object.history(transpose=True)
x_pad = solver.x
x = x_pad[: y.shape[0], : y.shape[1], : y.shape[2]]
"""
Show the recovered image.
"""
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(14, 7))
plot.imview(tile_volume_slices(y), title="Blurred measurements", fig=fig, ax=ax[0])
plot.imview(tile_volume_slices(x), title="Deconvolved image", fig=fig, ax=ax[1])
fig.show()
"""
Plot convergence statistics.
"""
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
solve_stats.Objective,
title="Objective function",
xlbl="Iteration",
ylbl="Functional value",
fig=fig,
ax=ax[0],
)
plot.plot(
snp.vstack((solve_stats.Prml_Rsdl, solve_stats.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
fig=fig,
ax=ax[1],
)
fig.show()
input("\nWaiting for input to close figures and exit")
| 0.917261 | 0.936168 |
import numpy as np
import jax
from xdesign import Foam, discrete_phantom
import scico.numpy as snp
from scico import functional, linop, loss, metric, plot, random
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
"""
Create a ground truth image.
"""
np.random.seed(1234)
N = 512 # image size
x_gt = discrete_phantom(Foam(size_range=[0.075, 0.0025], gap=1e-3, porosity=1), size=N)
x_gt = jax.device_put(x_gt) # convert to jax array, push to GPU
"""
Set up forward operator and test signal consisting of blurred signal with
additive Gaussian noise.
"""
n = 5 # convolution kernel size
σ = 20.0 / 255 # noise level
psf = snp.ones((n, n)) / (n * n)
A = linop.Convolve(h=psf, input_shape=x_gt.shape)
Ax = A(x_gt) # blurred image
noise, key = random.randn(Ax.shape)
y = Ax + σ * noise
"""
Set up ADMM solver.
"""
f = loss.SquaredL2Loss(y=y, A=A)
C = linop.Identity(x_gt.shape)
λ = 20.0 / 255 # BM3D regularization strength
g = λ * functional.BM3D()
ρ = 1.0 # ADMM penalty parameter
maxiter = 10 # number of ADMM iterations
solver = ADMM(
f=f,
g_list=[g],
C_list=[C],
rho_list=[ρ],
x0=A.T @ y,
maxiter=maxiter,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": 1e-3, "maxiter": 100}),
itstat_options={"display": True},
)
"""
Run the solver.
"""
print(f"Solving on {device_info()}\n")
x = solver.solve()
x = snp.clip(x, 0, 1)
hist = solver.itstat_object.history(transpose=True)
"""
Show the recovered image.
"""
fig, ax = plot.subplots(nrows=1, ncols=3, figsize=(15, 5))
plot.imview(x_gt, title="Ground truth", fig=fig, ax=ax[0])
nc = n // 2
yc = snp.clip(y[nc:-nc, nc:-nc], 0, 1)
plot.imview(y, title="Blurred, noisy image: %.2f (dB)" % metric.psnr(x_gt, yc), fig=fig, ax=ax[1])
plot.imview(x, title="Deconvolved image: %.2f (dB)" % metric.psnr(x_gt, x), fig=fig, ax=ax[2])
fig.show()
"""
Plot convergence statistics.
"""
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
)
input("\nWaiting for input to close figures and exit")
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/deconv_ppp_bm3d_admm.py
|
deconv_ppp_bm3d_admm.py
|
import numpy as np
import jax
from xdesign import Foam, discrete_phantom
import scico.numpy as snp
from scico import functional, linop, loss, metric, plot, random
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
"""
Create a ground truth image.
"""
np.random.seed(1234)
N = 512 # image size
x_gt = discrete_phantom(Foam(size_range=[0.075, 0.0025], gap=1e-3, porosity=1), size=N)
x_gt = jax.device_put(x_gt) # convert to jax array, push to GPU
"""
Set up forward operator and test signal consisting of blurred signal with
additive Gaussian noise.
"""
n = 5 # convolution kernel size
σ = 20.0 / 255 # noise level
psf = snp.ones((n, n)) / (n * n)
A = linop.Convolve(h=psf, input_shape=x_gt.shape)
Ax = A(x_gt) # blurred image
noise, key = random.randn(Ax.shape)
y = Ax + σ * noise
"""
Set up ADMM solver.
"""
f = loss.SquaredL2Loss(y=y, A=A)
C = linop.Identity(x_gt.shape)
λ = 20.0 / 255 # BM3D regularization strength
g = λ * functional.BM3D()
ρ = 1.0 # ADMM penalty parameter
maxiter = 10 # number of ADMM iterations
solver = ADMM(
f=f,
g_list=[g],
C_list=[C],
rho_list=[ρ],
x0=A.T @ y,
maxiter=maxiter,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": 1e-3, "maxiter": 100}),
itstat_options={"display": True},
)
"""
Run the solver.
"""
print(f"Solving on {device_info()}\n")
x = solver.solve()
x = snp.clip(x, 0, 1)
hist = solver.itstat_object.history(transpose=True)
"""
Show the recovered image.
"""
fig, ax = plot.subplots(nrows=1, ncols=3, figsize=(15, 5))
plot.imview(x_gt, title="Ground truth", fig=fig, ax=ax[0])
nc = n // 2
yc = snp.clip(y[nc:-nc, nc:-nc], 0, 1)
plot.imview(y, title="Blurred, noisy image: %.2f (dB)" % metric.psnr(x_gt, yc), fig=fig, ax=ax[1])
plot.imview(x, title="Deconvolved image: %.2f (dB)" % metric.psnr(x_gt, x), fig=fig, ax=ax[2])
fig.show()
"""
Plot convergence statistics.
"""
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
)
input("\nWaiting for input to close figures and exit")
| 0.81372 | 0.526282 |
r"""
3D TV-Regularized Sparse-View CT Reconstruction
===============================================
This example demonstrates solution of a sparse-view, 3D CT
reconstruction problem with isotropic total variation (TV)
regularization
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - A \mathbf{x}
\|_2^2 + \lambda \| C \mathbf{x} \|_{2,1} \;,$$
where $A$ is the Radon transform, $\mathbf{y}$ is the sinogram, $C$ is
a 3D finite difference operator, and $\mathbf{x}$ is the desired
image.
"""
import numpy as np
import jax
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scico import functional, linop, loss, metric, plot
from scico.examples import create_tangle_phantom
from scico.linop.radon_astra import TomographicProjector
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
"""
Create a ground truth image and projector.
"""
Nx = 128
Ny = 256
Nz = 64
tangle = create_tangle_phantom(Nx, Ny, Nz)
tangle = jax.device_put(tangle)
n_projection = 10 # number of projections
angles = np.linspace(0, np.pi, n_projection) # evenly spaced projection angles
A = TomographicProjector(
tangle.shape, [1.0, 1.0], [Nz, max(Nx, Ny)], angles
) # Radon transform operator
y = A @ tangle # sinogram
"""
Set up ADMM solver object.
"""
λ = 2e0 # L1 norm regularization parameter
ρ = 5e0 # ADMM penalty parameter
maxiter = 25 # number of ADMM iterations
cg_tol = 1e-4 # CG relative tolerance
cg_maxiter = 25 # maximum CG iterations per ADMM iteration
# The append=0 option makes the results of horizontal and vertical
# finite differences the same shape, which is required for the L21Norm,
# which is used so that g(Cx) corresponds to isotropic TV.
C = linop.FiniteDifference(input_shape=tangle.shape, append=0)
g = λ * functional.L21Norm()
f = loss.SquaredL2Loss(y=y, A=A)
x0 = A.T(y)
solver = ADMM(
f=f,
g_list=[g],
C_list=[C],
rho_list=[ρ],
x0=x0,
maxiter=maxiter,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": cg_tol, "maxiter": cg_maxiter}),
itstat_options={"display": True, "period": 5},
)
"""
Run the solver.
"""
print(f"Solving on {device_info()}\n")
solver.solve()
hist = solver.itstat_object.history(transpose=True)
tangle_recon = solver.x
print(
"TV Restruction\nSNR: %.2f (dB), MAE: %.3f"
% (metric.snr(tangle, tangle_recon), metric.mae(tangle, tangle_recon))
)
"""
Show the recovered image.
"""
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(7, 5))
plot.imview(tangle[32], title="Ground truth (central slice)", cbar=None, fig=fig, ax=ax[0])
plot.imview(
tangle_recon[32],
title="TV Reconstruction (central slice)\nSNR: %.2f (dB), MAE: %.3f"
% (metric.snr(tangle, tangle_recon), metric.mae(tangle, tangle_recon)),
fig=fig,
ax=ax[1],
)
divider = make_axes_locatable(ax[1])
cax = divider.append_axes("right", size="5%", pad=0.2)
fig.colorbar(ax[1].get_images()[0], cax=cax, label="arbitrary units")
fig.show()
input("\nWaiting for input to close figures and exit")
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/ct_astra_3d_tv_admm.py
|
ct_astra_3d_tv_admm.py
|
r"""
3D TV-Regularized Sparse-View CT Reconstruction
===============================================
This example demonstrates solution of a sparse-view, 3D CT
reconstruction problem with isotropic total variation (TV)
regularization
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - A \mathbf{x}
\|_2^2 + \lambda \| C \mathbf{x} \|_{2,1} \;,$$
where $A$ is the Radon transform, $\mathbf{y}$ is the sinogram, $C$ is
a 3D finite difference operator, and $\mathbf{x}$ is the desired
image.
"""
import numpy as np
import jax
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scico import functional, linop, loss, metric, plot
from scico.examples import create_tangle_phantom
from scico.linop.radon_astra import TomographicProjector
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
"""
Create a ground truth image and projector.
"""
Nx = 128
Ny = 256
Nz = 64
tangle = create_tangle_phantom(Nx, Ny, Nz)
tangle = jax.device_put(tangle)
n_projection = 10 # number of projections
angles = np.linspace(0, np.pi, n_projection) # evenly spaced projection angles
A = TomographicProjector(
tangle.shape, [1.0, 1.0], [Nz, max(Nx, Ny)], angles
) # Radon transform operator
y = A @ tangle # sinogram
"""
Set up ADMM solver object.
"""
λ = 2e0 # L1 norm regularization parameter
ρ = 5e0 # ADMM penalty parameter
maxiter = 25 # number of ADMM iterations
cg_tol = 1e-4 # CG relative tolerance
cg_maxiter = 25 # maximum CG iterations per ADMM iteration
# The append=0 option makes the results of horizontal and vertical
# finite differences the same shape, which is required for the L21Norm,
# which is used so that g(Cx) corresponds to isotropic TV.
C = linop.FiniteDifference(input_shape=tangle.shape, append=0)
g = λ * functional.L21Norm()
f = loss.SquaredL2Loss(y=y, A=A)
x0 = A.T(y)
solver = ADMM(
f=f,
g_list=[g],
C_list=[C],
rho_list=[ρ],
x0=x0,
maxiter=maxiter,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": cg_tol, "maxiter": cg_maxiter}),
itstat_options={"display": True, "period": 5},
)
"""
Run the solver.
"""
print(f"Solving on {device_info()}\n")
solver.solve()
hist = solver.itstat_object.history(transpose=True)
tangle_recon = solver.x
print(
"TV Restruction\nSNR: %.2f (dB), MAE: %.3f"
% (metric.snr(tangle, tangle_recon), metric.mae(tangle, tangle_recon))
)
"""
Show the recovered image.
"""
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(7, 5))
plot.imview(tangle[32], title="Ground truth (central slice)", cbar=None, fig=fig, ax=ax[0])
plot.imview(
tangle_recon[32],
title="TV Reconstruction (central slice)\nSNR: %.2f (dB), MAE: %.3f"
% (metric.snr(tangle, tangle_recon), metric.mae(tangle, tangle_recon)),
fig=fig,
ax=ax[1],
)
divider = make_axes_locatable(ax[1])
cax = divider.append_axes("right", size="5%", pad=0.2)
fig.colorbar(ax[1].get_images()[0], cax=cax, label="arbitrary units")
fig.show()
input("\nWaiting for input to close figures and exit")
| 0.920348 | 0.948775 |
r"""
Video Decomposition via Robust PCA
==================================
This example demonstrates video foreground/background separation via a
variant of the Robust PCA problem
$$\mathrm{argmin}_{\mathbf{x}_0, \mathbf{x}_1} \; (1/2) \| \mathbf{x}_0
+ \mathbf{x}_1 - \mathbf{y} \|_2^2 + \lambda_0 \| \mathbf{x}_0 \|_*
+ \lambda_1 \| \mathbf{x}_1 \|_1 \;,$$
where $\mathbf{x}_0$ and $\mathbf{x}_1$ are respectively low-rank and
sparse components, $\| \cdot \|_*$ denotes the nuclear norm, and
$\| \cdot \|_1$ denotes the $\ell_1$ norm.
Note: while video foreground/background separation is not an example of
the scientific and computational imaging problems that are the focus of
SCICO, it provides a convenient demonstration of Robust PCA, which does
have potential application in scientific imaging problems.
"""
import imageio
import scico.numpy as snp
from scico import functional, linop, loss, plot
from scico.examples import rgb2gray
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
"""
Load example video.
"""
reader = imageio.get_reader("imageio:newtonscradle.gif")
nfrm = reader.get_length()
frmlst = []
for i, frm in enumerate(reader):
frmlst.append(rgb2gray(frm[..., 0:3].astype(snp.float32) / 255.0))
vid = snp.stack(frmlst, axis=2)
"""
Construct matrix with each column consisting of a vectorised video frame.
"""
y = vid.reshape((-1, vid.shape[-1]))
"""
Define functional for Robust PCA problem.
"""
A = linop.Sum(axis=0, input_shape=(2,) + y.shape)
f = loss.SquaredL2Loss(y=y, A=A)
C0 = linop.Slice(idx=0, input_shape=(2,) + y.shape)
g0 = functional.NuclearNorm()
C1 = linop.Slice(idx=1, input_shape=(2,) + y.shape)
g1 = functional.L1Norm()
"""
Set up an ADMM solver object.
"""
λ0 = 1e1 # nuclear norm regularization parameter
λ1 = 3e1 # l1 norm regularization parameter
ρ0 = 2e1 # ADMM penalty parameter
ρ1 = 2e1 # ADMM penalty parameter
maxiter = 50 # number of ADMM iterations
solver = ADMM(
f=f,
g_list=[λ0 * g0, λ1 * g1],
C_list=[C0, C1],
rho_list=[ρ0, ρ1],
x0=A.adj(y),
maxiter=maxiter,
subproblem_solver=LinearSubproblemSolver(),
itstat_options={"display": True, "period": 10},
)
"""
Run the solver.
"""
print(f"Solving on {device_info()}\n")
x = solver.solve()
hist = solver.itstat_object.history(transpose=True)
"""
Plot convergence statistics.
"""
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
hist.Objective,
title="Objective function",
xlbl="Iteration",
ylbl="Functional value",
fig=fig,
ax=ax[0],
)
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
fig=fig,
ax=ax[1],
)
fig.show()
"""
Reshape low-rank component as background video sequence and sparse component
as foreground video sequence.
"""
xlr = C0(x)
xsp = C1(x)
vbg = xlr.reshape(vid.shape)
vfg = xsp.reshape(vid.shape)
"""
Display original video frames and corresponding background and foreground frames.
"""
fig, ax = plot.subplots(nrows=4, ncols=3, figsize=(10, 10))
ax[0][0].set_title("Original")
ax[0][1].set_title("Background")
ax[0][2].set_title("Foreground")
for n, fn in enumerate(range(1, 9, 2)):
plot.imview(vid[..., fn], fig=fig, ax=ax[n][0])
plot.imview(vbg[..., fn], fig=fig, ax=ax[n][1])
plot.imview(vfg[..., fn], fig=fig, ax=ax[n][2])
ax[n][0].set_ylabel("Frame %d" % fn, labelpad=5, rotation=90, size="large")
fig.tight_layout()
fig.show()
input("\nWaiting for input to close figures and exit")
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/video_rpca_admm.py
|
video_rpca_admm.py
|
r"""
Video Decomposition via Robust PCA
==================================
This example demonstrates video foreground/background separation via a
variant of the Robust PCA problem
$$\mathrm{argmin}_{\mathbf{x}_0, \mathbf{x}_1} \; (1/2) \| \mathbf{x}_0
+ \mathbf{x}_1 - \mathbf{y} \|_2^2 + \lambda_0 \| \mathbf{x}_0 \|_*
+ \lambda_1 \| \mathbf{x}_1 \|_1 \;,$$
where $\mathbf{x}_0$ and $\mathbf{x}_1$ are respectively low-rank and
sparse components, $\| \cdot \|_*$ denotes the nuclear norm, and
$\| \cdot \|_1$ denotes the $\ell_1$ norm.
Note: while video foreground/background separation is not an example of
the scientific and computational imaging problems that are the focus of
SCICO, it provides a convenient demonstration of Robust PCA, which does
have potential application in scientific imaging problems.
"""
import imageio
import scico.numpy as snp
from scico import functional, linop, loss, plot
from scico.examples import rgb2gray
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
"""
Load example video.
"""
reader = imageio.get_reader("imageio:newtonscradle.gif")
nfrm = reader.get_length()
frmlst = []
for i, frm in enumerate(reader):
frmlst.append(rgb2gray(frm[..., 0:3].astype(snp.float32) / 255.0))
vid = snp.stack(frmlst, axis=2)
"""
Construct matrix with each column consisting of a vectorised video frame.
"""
y = vid.reshape((-1, vid.shape[-1]))
"""
Define functional for Robust PCA problem.
"""
A = linop.Sum(axis=0, input_shape=(2,) + y.shape)
f = loss.SquaredL2Loss(y=y, A=A)
C0 = linop.Slice(idx=0, input_shape=(2,) + y.shape)
g0 = functional.NuclearNorm()
C1 = linop.Slice(idx=1, input_shape=(2,) + y.shape)
g1 = functional.L1Norm()
"""
Set up an ADMM solver object.
"""
λ0 = 1e1 # nuclear norm regularization parameter
λ1 = 3e1 # l1 norm regularization parameter
ρ0 = 2e1 # ADMM penalty parameter
ρ1 = 2e1 # ADMM penalty parameter
maxiter = 50 # number of ADMM iterations
solver = ADMM(
f=f,
g_list=[λ0 * g0, λ1 * g1],
C_list=[C0, C1],
rho_list=[ρ0, ρ1],
x0=A.adj(y),
maxiter=maxiter,
subproblem_solver=LinearSubproblemSolver(),
itstat_options={"display": True, "period": 10},
)
"""
Run the solver.
"""
print(f"Solving on {device_info()}\n")
x = solver.solve()
hist = solver.itstat_object.history(transpose=True)
"""
Plot convergence statistics.
"""
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
hist.Objective,
title="Objective function",
xlbl="Iteration",
ylbl="Functional value",
fig=fig,
ax=ax[0],
)
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
fig=fig,
ax=ax[1],
)
fig.show()
"""
Reshape low-rank component as background video sequence and sparse component
as foreground video sequence.
"""
xlr = C0(x)
xsp = C1(x)
vbg = xlr.reshape(vid.shape)
vfg = xsp.reshape(vid.shape)
"""
Display original video frames and corresponding background and foreground frames.
"""
fig, ax = plot.subplots(nrows=4, ncols=3, figsize=(10, 10))
ax[0][0].set_title("Original")
ax[0][1].set_title("Background")
ax[0][2].set_title("Foreground")
for n, fn in enumerate(range(1, 9, 2)):
plot.imview(vid[..., fn], fig=fig, ax=ax[n][0])
plot.imview(vbg[..., fn], fig=fig, ax=ax[n][1])
plot.imview(vfg[..., fn], fig=fig, ax=ax[n][2])
ax[n][0].set_ylabel("Frame %d" % fn, labelpad=5, rotation=90, size="large")
fig.tight_layout()
fig.show()
input("\nWaiting for input to close figures and exit")
| 0.89197 | 0.965803 |
import numpy as np
import jax
from bm3d import bm3d_rgb
from colour_demosaicing import demosaicing_CFA_Bayer_Menon2007
import scico
import scico.numpy as snp
import scico.random
from scico import functional, linop, loss, metric, plot
from scico.data import kodim23
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
"""
Read a ground truth image.
"""
img = kodim23(asfloat=True)[160:416, 60:316]
img = jax.device_put(img) # convert to jax type, push to GPU
"""
Define demosaicing forward operator and its transpose.
"""
def Afn(x):
"""Map an RGB image to a single channel image with each pixel
representing a single colour according to the colour filter array.
"""
y = snp.zeros(x.shape[0:2])
y = y.at[1::2, 1::2].set(x[1::2, 1::2, 0])
y = y.at[0::2, 1::2].set(x[0::2, 1::2, 1])
y = y.at[1::2, 0::2].set(x[1::2, 0::2, 1])
y = y.at[0::2, 0::2].set(x[0::2, 0::2, 2])
return y
def ATfn(x):
"""Back project a single channel raw image to an RGB image with zeros
at the locations of undefined samples.
"""
y = snp.zeros(x.shape + (3,))
y = y.at[1::2, 1::2, 0].set(x[1::2, 1::2])
y = y.at[0::2, 1::2, 1].set(x[0::2, 1::2])
y = y.at[1::2, 0::2, 1].set(x[1::2, 0::2])
y = y.at[0::2, 0::2, 2].set(x[0::2, 0::2])
return y
"""
Define a baseline demosaicing function based on the demosaicing
algorithm of :cite:`menon-2007-demosaicing` from package
[colour_demosaicing](https://github.com/colour-science/colour-demosaicing).
"""
def demosaic(cfaimg):
"""Apply baseline demosaicing."""
return demosaicing_CFA_Bayer_Menon2007(cfaimg, pattern="BGGR").astype(np.float32)
"""
Create a test image by color filter array sampling and adding Gaussian
white noise.
"""
s = Afn(img)
rgbshp = s.shape + (3,) # shape of reconstructed RGB image
σ = 2e-2 # noise standard deviation
noise, key = scico.random.randn(s.shape, seed=0)
sn = s + σ * noise
"""
Compute a baseline demosaicing solution.
"""
imgb = jax.device_put(bm3d_rgb(demosaic(sn), 3 * σ).astype(np.float32))
"""
Set up an ADMM solver object. Note the use of the baseline solution
as an initializer. We use BM3D :cite:`dabov-2008-image` as the
denoiser, using the [code](https://pypi.org/project/bm3d) released
with :cite:`makinen-2019-exact`.
"""
A = linop.LinearOperator(input_shape=rgbshp, output_shape=s.shape, eval_fn=Afn, adj_fn=ATfn)
f = loss.SquaredL2Loss(y=sn, A=A)
C = linop.Identity(input_shape=rgbshp)
g = 1.8e-1 * 6.1e-2 * functional.BM3D(is_rgb=True)
ρ = 1.8e-1 # ADMM penalty parameter
maxiter = 12 # number of ADMM iterations
solver = ADMM(
f=f,
g_list=[g],
C_list=[C],
rho_list=[ρ],
x0=imgb,
maxiter=maxiter,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": 1e-3, "maxiter": 100}),
itstat_options={"display": True},
)
"""
Run the solver.
"""
print(f"Solving on {device_info()}\n")
x = solver.solve()
hist = solver.itstat_object.history(transpose=True)
"""
Show reference and demosaiced images.
"""
fig, ax = plot.subplots(nrows=1, ncols=3, sharex=True, sharey=True, figsize=(21, 7))
plot.imview(img, title="Reference", fig=fig, ax=ax[0])
plot.imview(imgb, title="Baseline demoisac: %.2f (dB)" % metric.psnr(img, imgb), fig=fig, ax=ax[1])
plot.imview(x, title="PPP demoisac: %.2f (dB)" % metric.psnr(img, x), fig=fig, ax=ax[2])
fig.show()
"""
Plot convergence statistics.
"""
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
)
input("\nWaiting for input to close figures and exit")
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/demosaic_ppp_bm3d_admm.py
|
demosaic_ppp_bm3d_admm.py
|
import numpy as np
import jax
from bm3d import bm3d_rgb
from colour_demosaicing import demosaicing_CFA_Bayer_Menon2007
import scico
import scico.numpy as snp
import scico.random
from scico import functional, linop, loss, metric, plot
from scico.data import kodim23
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
"""
Read a ground truth image.
"""
img = kodim23(asfloat=True)[160:416, 60:316]
img = jax.device_put(img) # convert to jax type, push to GPU
"""
Define demosaicing forward operator and its transpose.
"""
def Afn(x):
"""Map an RGB image to a single channel image with each pixel
representing a single colour according to the colour filter array.
"""
y = snp.zeros(x.shape[0:2])
y = y.at[1::2, 1::2].set(x[1::2, 1::2, 0])
y = y.at[0::2, 1::2].set(x[0::2, 1::2, 1])
y = y.at[1::2, 0::2].set(x[1::2, 0::2, 1])
y = y.at[0::2, 0::2].set(x[0::2, 0::2, 2])
return y
def ATfn(x):
"""Back project a single channel raw image to an RGB image with zeros
at the locations of undefined samples.
"""
y = snp.zeros(x.shape + (3,))
y = y.at[1::2, 1::2, 0].set(x[1::2, 1::2])
y = y.at[0::2, 1::2, 1].set(x[0::2, 1::2])
y = y.at[1::2, 0::2, 1].set(x[1::2, 0::2])
y = y.at[0::2, 0::2, 2].set(x[0::2, 0::2])
return y
"""
Define a baseline demosaicing function based on the demosaicing
algorithm of :cite:`menon-2007-demosaicing` from package
[colour_demosaicing](https://github.com/colour-science/colour-demosaicing).
"""
def demosaic(cfaimg):
"""Apply baseline demosaicing."""
return demosaicing_CFA_Bayer_Menon2007(cfaimg, pattern="BGGR").astype(np.float32)
"""
Create a test image by color filter array sampling and adding Gaussian
white noise.
"""
s = Afn(img)
rgbshp = s.shape + (3,) # shape of reconstructed RGB image
σ = 2e-2 # noise standard deviation
noise, key = scico.random.randn(s.shape, seed=0)
sn = s + σ * noise
"""
Compute a baseline demosaicing solution.
"""
imgb = jax.device_put(bm3d_rgb(demosaic(sn), 3 * σ).astype(np.float32))
"""
Set up an ADMM solver object. Note the use of the baseline solution
as an initializer. We use BM3D :cite:`dabov-2008-image` as the
denoiser, using the [code](https://pypi.org/project/bm3d) released
with :cite:`makinen-2019-exact`.
"""
A = linop.LinearOperator(input_shape=rgbshp, output_shape=s.shape, eval_fn=Afn, adj_fn=ATfn)
f = loss.SquaredL2Loss(y=sn, A=A)
C = linop.Identity(input_shape=rgbshp)
g = 1.8e-1 * 6.1e-2 * functional.BM3D(is_rgb=True)
ρ = 1.8e-1 # ADMM penalty parameter
maxiter = 12 # number of ADMM iterations
solver = ADMM(
f=f,
g_list=[g],
C_list=[C],
rho_list=[ρ],
x0=imgb,
maxiter=maxiter,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": 1e-3, "maxiter": 100}),
itstat_options={"display": True},
)
"""
Run the solver.
"""
print(f"Solving on {device_info()}\n")
x = solver.solve()
hist = solver.itstat_object.history(transpose=True)
"""
Show reference and demosaiced images.
"""
fig, ax = plot.subplots(nrows=1, ncols=3, sharex=True, sharey=True, figsize=(21, 7))
plot.imview(img, title="Reference", fig=fig, ax=ax[0])
plot.imview(imgb, title="Baseline demoisac: %.2f (dB)" % metric.psnr(img, imgb), fig=fig, ax=ax[1])
plot.imview(x, title="PPP demoisac: %.2f (dB)" % metric.psnr(img, x), fig=fig, ax=ax[2])
fig.show()
"""
Plot convergence statistics.
"""
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
)
input("\nWaiting for input to close figures and exit")
| 0.865835 | 0.614654 |
import numpy as np
import jax
import matplotlib.pyplot as plt
import svmbir
from matplotlib.ticker import MaxNLocator
from xdesign import Foam, discrete_phantom
import scico.numpy as snp
from scico import metric, plot
from scico.functional import BM3D, NonNegativeIndicator
from scico.linop import Diagonal, Identity
from scico.linop.radon_svmbir import (
SVMBIRExtendedLoss,
SVMBIRSquaredL2Loss,
TomographicProjector,
)
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
"""
Generate a ground truth image.
"""
N = 256 # image size
density = 0.025 # attenuation density of the image
np.random.seed(1234)
x_gt = discrete_phantom(Foam(size_range=[0.05, 0.02], gap=0.02, porosity=0.3), size=N - 10)
x_gt = x_gt / np.max(x_gt) * density
x_gt = np.pad(x_gt, 5)
x_gt[x_gt < 0] = 0
"""
Generate tomographic projector and sinogram.
"""
num_angles = int(N / 2)
num_channels = N
angles = snp.linspace(0, snp.pi, num_angles, endpoint=False, dtype=snp.float32)
A = TomographicProjector(x_gt.shape, angles, num_channels)
sino = A @ x_gt
"""
Impose Poisson noise on sinogram. Higher max_intensity means less noise.
"""
max_intensity = 2000
expected_counts = max_intensity * np.exp(-sino)
noisy_counts = np.random.poisson(expected_counts).astype(np.float32)
noisy_counts[noisy_counts == 0] = 1 # deal with 0s
y = -np.log(noisy_counts / max_intensity)
"""
Reconstruct using default prior of SVMBIR :cite:`svmbir-2020`.
"""
weights = svmbir.calc_weights(y, weight_type="transmission")
x_mrf = svmbir.recon(
np.array(y[:, np.newaxis]),
np.array(angles),
weights=weights[:, np.newaxis],
num_rows=N,
num_cols=N,
positivity=True,
verbose=0,
)[0]
"""
Push arrays to device.
"""
y, x0, weights = jax.device_put([y, x_mrf, weights])
"""
Set problem parameters and BM3D pseudo-functional.
"""
ρ = 10 # ADMM penalty parameter
σ = density * 0.26 # denoiser sigma
g0 = σ * ρ * BM3D()
"""
Set up problem using `SVMBIRSquaredL2Loss` and `NonNegativeIndicator`.
"""
f_l2loss = SVMBIRSquaredL2Loss(
y=y, A=A, W=Diagonal(weights), scale=0.5, prox_kwargs={"maxiter": 5, "ctol": 0.0}
)
g1 = NonNegativeIndicator()
solver_l2loss = ADMM(
f=None,
g_list=[f_l2loss, g0, g1],
C_list=[Identity(x_mrf.shape), Identity(x_mrf.shape), Identity(x_mrf.shape)],
rho_list=[ρ, ρ, ρ],
x0=x0,
maxiter=20,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": 1e-3, "maxiter": 100}),
itstat_options={"display": True},
)
"""
Run the ADMM solver.
"""
print(f"Solving on {device_info()}\n")
x_l2loss = solver_l2loss.solve()
hist_l2loss = solver_l2loss.itstat_object.history(transpose=True)
"""
Set up problem using `SVMBIRExtendedLoss`, without need for `NonNegativeIndicator`.
"""
f_extloss = SVMBIRExtendedLoss(
y=y,
A=A,
W=Diagonal(weights),
scale=0.5,
positivity=True,
prox_kwargs={"maxiter": 5, "ctol": 0.0},
)
solver_extloss = ADMM(
f=None,
g_list=[f_extloss, g0],
C_list=[Identity(x_mrf.shape), Identity(x_mrf.shape)],
rho_list=[ρ, ρ],
x0=x0,
maxiter=20,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": 1e-3, "maxiter": 100}),
itstat_options={"display": True},
)
"""
Run the ADMM solver.
"""
print()
x_extloss = solver_extloss.solve()
hist_extloss = solver_extloss.itstat_object.history(transpose=True)
"""
Show the recovered images.
"""
norm = plot.matplotlib.colors.Normalize(vmin=-0.1 * density, vmax=1.2 * density)
fig, ax = plt.subplots(2, 2, figsize=(15, 15))
plot.imview(img=x_gt, title="Ground Truth Image", cbar=True, fig=fig, ax=ax[0, 0], norm=norm)
plot.imview(
img=x_mrf,
title=f"MRF (PSNR: {metric.psnr(x_gt, x_mrf):.2f} dB)",
cbar=True,
fig=fig,
ax=ax[0, 1],
norm=norm,
)
plot.imview(
img=x_l2loss,
title=f"SquaredL2Loss + non-negativity (PSNR: {metric.psnr(x_gt, x_l2loss):.2f} dB)",
cbar=True,
fig=fig,
ax=ax[1, 0],
norm=norm,
)
plot.imview(
img=x_extloss,
title=f"ExtendedLoss (PSNR: {metric.psnr(x_gt, x_extloss):.2f} dB)",
cbar=True,
fig=fig,
ax=ax[1, 1],
norm=norm,
)
fig.show()
"""
Plot convergence statistics.
"""
fig, ax = plt.subplots(1, 2, figsize=(15, 5))
plot.plot(
snp.vstack((hist_l2loss.Prml_Rsdl, hist_l2loss.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals (SquaredL2Loss + non-negativity)",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
fig=fig,
ax=ax[0],
)
ax[0].set_ylim([5e-3, 1e0])
ax[0].xaxis.set_major_locator(MaxNLocator(integer=True))
plot.plot(
snp.vstack((hist_extloss.Prml_Rsdl, hist_extloss.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals (ExtendedLoss)",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
fig=fig,
ax=ax[1],
)
ax[1].set_ylim([5e-3, 1e0])
ax[1].xaxis.set_major_locator(MaxNLocator(integer=True))
fig.show()
input("\nWaiting for input to close figures and exit")
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/ct_svmbir_ppp_bm3d_admm_prox.py
|
ct_svmbir_ppp_bm3d_admm_prox.py
|
import numpy as np
import jax
import matplotlib.pyplot as plt
import svmbir
from matplotlib.ticker import MaxNLocator
from xdesign import Foam, discrete_phantom
import scico.numpy as snp
from scico import metric, plot
from scico.functional import BM3D, NonNegativeIndicator
from scico.linop import Diagonal, Identity
from scico.linop.radon_svmbir import (
SVMBIRExtendedLoss,
SVMBIRSquaredL2Loss,
TomographicProjector,
)
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
"""
Generate a ground truth image.
"""
N = 256 # image size
density = 0.025 # attenuation density of the image
np.random.seed(1234)
x_gt = discrete_phantom(Foam(size_range=[0.05, 0.02], gap=0.02, porosity=0.3), size=N - 10)
x_gt = x_gt / np.max(x_gt) * density
x_gt = np.pad(x_gt, 5)
x_gt[x_gt < 0] = 0
"""
Generate tomographic projector and sinogram.
"""
num_angles = int(N / 2)
num_channels = N
angles = snp.linspace(0, snp.pi, num_angles, endpoint=False, dtype=snp.float32)
A = TomographicProjector(x_gt.shape, angles, num_channels)
sino = A @ x_gt
"""
Impose Poisson noise on sinogram. Higher max_intensity means less noise.
"""
max_intensity = 2000
expected_counts = max_intensity * np.exp(-sino)
noisy_counts = np.random.poisson(expected_counts).astype(np.float32)
noisy_counts[noisy_counts == 0] = 1 # deal with 0s
y = -np.log(noisy_counts / max_intensity)
"""
Reconstruct using default prior of SVMBIR :cite:`svmbir-2020`.
"""
weights = svmbir.calc_weights(y, weight_type="transmission")
x_mrf = svmbir.recon(
np.array(y[:, np.newaxis]),
np.array(angles),
weights=weights[:, np.newaxis],
num_rows=N,
num_cols=N,
positivity=True,
verbose=0,
)[0]
"""
Push arrays to device.
"""
y, x0, weights = jax.device_put([y, x_mrf, weights])
"""
Set problem parameters and BM3D pseudo-functional.
"""
ρ = 10 # ADMM penalty parameter
σ = density * 0.26 # denoiser sigma
g0 = σ * ρ * BM3D()
"""
Set up problem using `SVMBIRSquaredL2Loss` and `NonNegativeIndicator`.
"""
f_l2loss = SVMBIRSquaredL2Loss(
y=y, A=A, W=Diagonal(weights), scale=0.5, prox_kwargs={"maxiter": 5, "ctol": 0.0}
)
g1 = NonNegativeIndicator()
solver_l2loss = ADMM(
f=None,
g_list=[f_l2loss, g0, g1],
C_list=[Identity(x_mrf.shape), Identity(x_mrf.shape), Identity(x_mrf.shape)],
rho_list=[ρ, ρ, ρ],
x0=x0,
maxiter=20,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": 1e-3, "maxiter": 100}),
itstat_options={"display": True},
)
"""
Run the ADMM solver.
"""
print(f"Solving on {device_info()}\n")
x_l2loss = solver_l2loss.solve()
hist_l2loss = solver_l2loss.itstat_object.history(transpose=True)
"""
Set up problem using `SVMBIRExtendedLoss`, without need for `NonNegativeIndicator`.
"""
f_extloss = SVMBIRExtendedLoss(
y=y,
A=A,
W=Diagonal(weights),
scale=0.5,
positivity=True,
prox_kwargs={"maxiter": 5, "ctol": 0.0},
)
solver_extloss = ADMM(
f=None,
g_list=[f_extloss, g0],
C_list=[Identity(x_mrf.shape), Identity(x_mrf.shape)],
rho_list=[ρ, ρ],
x0=x0,
maxiter=20,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": 1e-3, "maxiter": 100}),
itstat_options={"display": True},
)
"""
Run the ADMM solver.
"""
print()
x_extloss = solver_extloss.solve()
hist_extloss = solver_extloss.itstat_object.history(transpose=True)
"""
Show the recovered images.
"""
norm = plot.matplotlib.colors.Normalize(vmin=-0.1 * density, vmax=1.2 * density)
fig, ax = plt.subplots(2, 2, figsize=(15, 15))
plot.imview(img=x_gt, title="Ground Truth Image", cbar=True, fig=fig, ax=ax[0, 0], norm=norm)
plot.imview(
img=x_mrf,
title=f"MRF (PSNR: {metric.psnr(x_gt, x_mrf):.2f} dB)",
cbar=True,
fig=fig,
ax=ax[0, 1],
norm=norm,
)
plot.imview(
img=x_l2loss,
title=f"SquaredL2Loss + non-negativity (PSNR: {metric.psnr(x_gt, x_l2loss):.2f} dB)",
cbar=True,
fig=fig,
ax=ax[1, 0],
norm=norm,
)
plot.imview(
img=x_extloss,
title=f"ExtendedLoss (PSNR: {metric.psnr(x_gt, x_extloss):.2f} dB)",
cbar=True,
fig=fig,
ax=ax[1, 1],
norm=norm,
)
fig.show()
"""
Plot convergence statistics.
"""
fig, ax = plt.subplots(1, 2, figsize=(15, 5))
plot.plot(
snp.vstack((hist_l2loss.Prml_Rsdl, hist_l2loss.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals (SquaredL2Loss + non-negativity)",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
fig=fig,
ax=ax[0],
)
ax[0].set_ylim([5e-3, 1e0])
ax[0].xaxis.set_major_locator(MaxNLocator(integer=True))
plot.plot(
snp.vstack((hist_extloss.Prml_Rsdl, hist_extloss.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals (ExtendedLoss)",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
fig=fig,
ax=ax[1],
)
ax[1].set_ylim([5e-3, 1e0])
ax[1].xaxis.set_major_locator(MaxNLocator(integer=True))
fig.show()
input("\nWaiting for input to close figures and exit")
| 0.793466 | 0.509032 |
r"""
Convolutional Sparse Coding with Mask Decoupling (ADMM)
=======================================================
This example demonstrates the solution of a convolutional sparse coding
problem
$$\mathrm{argmin}_{\mathbf{x}} \; \frac{1}{2} \Big\| \mathbf{y} -
B \Big( \sum_k \mathbf{h}_k \ast \mathbf{x}_k \Big) \Big\|_2^2 +
\lambda \sum_k ( \| \mathbf{x}_k \|_1 - \| \mathbf{x}_k \|_2 ) \;,$$
where the $\mathbf{h}$_k is a set of filters comprising the dictionary,
the $\mathbf{x}$_k is a corrresponding set of coefficient maps,
$\mathbf{y}$ is the signal to be represented, and $B$ is a cropping
operator that allows the boundary artifacts resulting from circular
convolution to be avoided. Following the mask decoupling approach
:cite:`almeida-2013-deconvolving`, the problem is posed in ADMM form
as
$$\mathrm{argmin}_{\mathbf{x}, \mathbf{z}_0, \mathbf{z}_1} \; (1/2) \|
\mathbf{y} - B \mb{z}_0 \|_2^2 + \lambda \sum_k ( \| \mathbf{z}_{1,k}
\|_1 - \| \mathbf{z}_{1,k} \|_2 ) \\ \;\; \text{s.t.} \;\;
\mathbf{z}_0 = \sum_k \mathbf{h}_k \ast \mathbf{x}_k \;\;
\mathbf{z}_{1,k} = \mathbf{x}_k\;,$$.
The most computationally expensive step in the ADMM algorithm is solved
using the frequency-domain approach proposed in
:cite:`wohlberg-2014-efficient`.
"""
import numpy as np
import jax
import scico.numpy as snp
from scico import plot
from scico.examples import create_conv_sparse_phantom
from scico.functional import L1MinusL2Norm, ZeroFunctional
from scico.linop import CircularConvolve, Crop, Identity, Sum
from scico.loss import SquaredL2Loss
from scico.optimize.admm import ADMM, G0BlockCircularConvolveSolver
from scico.util import device_info
"""
Set problem size and create random convolutional dictionary (a set of
filters) and a corresponding sparse random set of coefficient maps.
"""
N = 121 # image size
Nnz = 128 # number of non-zeros in coefficient maps
h, x0 = create_conv_sparse_phantom(N, Nnz)
"""
Normalize dictionary filters and scale coefficient maps accordingly.
"""
hnorm = np.sqrt(np.sum(h**2, axis=(1, 2), keepdims=True))
h /= hnorm
x0 *= hnorm
"""
Convert numpy arrays to jax arrays.
"""
h = jax.device_put(h)
x0 = jax.device_put(x0)
"""
Set up required padding and corresponding crop operator.
"""
h_center = (h.shape[1] // 2, h.shape[2] // 2)
pad_width = ((0, 0), (h_center[0], h_center[0]), (h_center[1], h_center[1]))
x0p = snp.pad(x0, pad_width=pad_width)
B = Crop(pad_width[1:], input_shape=x0p.shape[1:])
"""
Set up sum-of-convolutions forward operator.
"""
C = CircularConvolve(h, input_shape=x0p.shape, ndims=2, h_center=h_center)
S = Sum(input_shape=C.output_shape, axis=0)
A = S @ C
"""
Construct test image from dictionary $\mathbf{h}$ and padded version of
coefficient maps $\mathbf{x}_0$.
"""
y = B(A(x0p))
"""
Set functional and solver parameters.
"""
λ = 1e0 # l1-l2 norm regularization parameter
ρ0 = 1e0 # ADMM penalty parameters
ρ1 = 3e0
maxiter = 200 # number of ADMM iterations
"""
Define loss function and regularization. Note the use of the
$\ell_1 - \ell_2$ norm, which has been found to provide slightly better
performance than the $\ell_1$ norm in this type of problem
:cite:`wohlberg-2021-psf`.
"""
f = ZeroFunctional()
g0 = SquaredL2Loss(y=y, A=B)
g1 = λ * L1MinusL2Norm()
C0 = A
C1 = Identity(input_shape=x0p.shape)
"""
Initialize ADMM solver.
"""
solver = ADMM(
f=f,
g_list=[g0, g1],
C_list=[C0, C1],
rho_list=[ρ0, ρ1],
alpha=1.8,
maxiter=maxiter,
subproblem_solver=G0BlockCircularConvolveSolver(check_solve=True),
itstat_options={"display": True, "period": 10},
)
"""
Run the solver.
"""
print(f"Solving on {device_info()}\n")
x1 = solver.solve()
hist = solver.itstat_object.history(transpose=True)
"""
Show the recovered coefficient maps.
"""
fig, ax = plot.subplots(nrows=2, ncols=3, figsize=(12, 8.6))
plot.imview(x0[0], title="Coef. map 0", cmap=plot.cm.Blues, fig=fig, ax=ax[0, 0])
ax[0, 0].set_ylabel("Ground truth")
plot.imview(x0[1], title="Coef. map 1", cmap=plot.cm.Blues, fig=fig, ax=ax[0, 1])
plot.imview(x0[2], title="Coef. map 2", cmap=plot.cm.Blues, fig=fig, ax=ax[0, 2])
plot.imview(x1[0], cmap=plot.cm.Blues, fig=fig, ax=ax[1, 0])
ax[1, 0].set_ylabel("Recovered")
plot.imview(x1[1], cmap=plot.cm.Blues, fig=fig, ax=ax[1, 1])
plot.imview(x1[2], cmap=plot.cm.Blues, fig=fig, ax=ax[1, 2])
fig.tight_layout()
fig.show()
"""
Show test image and reconstruction from recovered coefficient maps. Note
the absence of the wrap-around effects at the boundary that can be seen
in the corresponding images in the [related example](sparsecode_conv_admm.rst).
"""
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 6))
plot.imview(y, title="Test image", cmap=plot.cm.gist_heat_r, fig=fig, ax=ax[0])
plot.imview(B(A(x1)), title="Reconstructed image", cmap=plot.cm.gist_heat_r, fig=fig, ax=ax[1])
fig.show()
"""
Plot convergence statistics.
"""
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
hist.Objective,
title="Objective function",
xlbl="Iteration",
ylbl="Functional value",
fig=fig,
ax=ax[0],
)
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
fig=fig,
ax=ax[1],
)
fig.show()
input("\nWaiting for input to close figures and exit")
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/sparsecode_conv_md_admm.py
|
sparsecode_conv_md_admm.py
|
r"""
Convolutional Sparse Coding with Mask Decoupling (ADMM)
=======================================================
This example demonstrates the solution of a convolutional sparse coding
problem
$$\mathrm{argmin}_{\mathbf{x}} \; \frac{1}{2} \Big\| \mathbf{y} -
B \Big( \sum_k \mathbf{h}_k \ast \mathbf{x}_k \Big) \Big\|_2^2 +
\lambda \sum_k ( \| \mathbf{x}_k \|_1 - \| \mathbf{x}_k \|_2 ) \;,$$
where the $\mathbf{h}$_k is a set of filters comprising the dictionary,
the $\mathbf{x}$_k is a corrresponding set of coefficient maps,
$\mathbf{y}$ is the signal to be represented, and $B$ is a cropping
operator that allows the boundary artifacts resulting from circular
convolution to be avoided. Following the mask decoupling approach
:cite:`almeida-2013-deconvolving`, the problem is posed in ADMM form
as
$$\mathrm{argmin}_{\mathbf{x}, \mathbf{z}_0, \mathbf{z}_1} \; (1/2) \|
\mathbf{y} - B \mb{z}_0 \|_2^2 + \lambda \sum_k ( \| \mathbf{z}_{1,k}
\|_1 - \| \mathbf{z}_{1,k} \|_2 ) \\ \;\; \text{s.t.} \;\;
\mathbf{z}_0 = \sum_k \mathbf{h}_k \ast \mathbf{x}_k \;\;
\mathbf{z}_{1,k} = \mathbf{x}_k\;,$$.
The most computationally expensive step in the ADMM algorithm is solved
using the frequency-domain approach proposed in
:cite:`wohlberg-2014-efficient`.
"""
import numpy as np
import jax
import scico.numpy as snp
from scico import plot
from scico.examples import create_conv_sparse_phantom
from scico.functional import L1MinusL2Norm, ZeroFunctional
from scico.linop import CircularConvolve, Crop, Identity, Sum
from scico.loss import SquaredL2Loss
from scico.optimize.admm import ADMM, G0BlockCircularConvolveSolver
from scico.util import device_info
"""
Set problem size and create random convolutional dictionary (a set of
filters) and a corresponding sparse random set of coefficient maps.
"""
N = 121 # image size
Nnz = 128 # number of non-zeros in coefficient maps
h, x0 = create_conv_sparse_phantom(N, Nnz)
"""
Normalize dictionary filters and scale coefficient maps accordingly.
"""
hnorm = np.sqrt(np.sum(h**2, axis=(1, 2), keepdims=True))
h /= hnorm
x0 *= hnorm
"""
Convert numpy arrays to jax arrays.
"""
h = jax.device_put(h)
x0 = jax.device_put(x0)
"""
Set up required padding and corresponding crop operator.
"""
h_center = (h.shape[1] // 2, h.shape[2] // 2)
pad_width = ((0, 0), (h_center[0], h_center[0]), (h_center[1], h_center[1]))
x0p = snp.pad(x0, pad_width=pad_width)
B = Crop(pad_width[1:], input_shape=x0p.shape[1:])
"""
Set up sum-of-convolutions forward operator.
"""
C = CircularConvolve(h, input_shape=x0p.shape, ndims=2, h_center=h_center)
S = Sum(input_shape=C.output_shape, axis=0)
A = S @ C
"""
Construct test image from dictionary $\mathbf{h}$ and padded version of
coefficient maps $\mathbf{x}_0$.
"""
y = B(A(x0p))
"""
Set functional and solver parameters.
"""
λ = 1e0 # l1-l2 norm regularization parameter
ρ0 = 1e0 # ADMM penalty parameters
ρ1 = 3e0
maxiter = 200 # number of ADMM iterations
"""
Define loss function and regularization. Note the use of the
$\ell_1 - \ell_2$ norm, which has been found to provide slightly better
performance than the $\ell_1$ norm in this type of problem
:cite:`wohlberg-2021-psf`.
"""
f = ZeroFunctional()
g0 = SquaredL2Loss(y=y, A=B)
g1 = λ * L1MinusL2Norm()
C0 = A
C1 = Identity(input_shape=x0p.shape)
"""
Initialize ADMM solver.
"""
solver = ADMM(
f=f,
g_list=[g0, g1],
C_list=[C0, C1],
rho_list=[ρ0, ρ1],
alpha=1.8,
maxiter=maxiter,
subproblem_solver=G0BlockCircularConvolveSolver(check_solve=True),
itstat_options={"display": True, "period": 10},
)
"""
Run the solver.
"""
print(f"Solving on {device_info()}\n")
x1 = solver.solve()
hist = solver.itstat_object.history(transpose=True)
"""
Show the recovered coefficient maps.
"""
fig, ax = plot.subplots(nrows=2, ncols=3, figsize=(12, 8.6))
plot.imview(x0[0], title="Coef. map 0", cmap=plot.cm.Blues, fig=fig, ax=ax[0, 0])
ax[0, 0].set_ylabel("Ground truth")
plot.imview(x0[1], title="Coef. map 1", cmap=plot.cm.Blues, fig=fig, ax=ax[0, 1])
plot.imview(x0[2], title="Coef. map 2", cmap=plot.cm.Blues, fig=fig, ax=ax[0, 2])
plot.imview(x1[0], cmap=plot.cm.Blues, fig=fig, ax=ax[1, 0])
ax[1, 0].set_ylabel("Recovered")
plot.imview(x1[1], cmap=plot.cm.Blues, fig=fig, ax=ax[1, 1])
plot.imview(x1[2], cmap=plot.cm.Blues, fig=fig, ax=ax[1, 2])
fig.tight_layout()
fig.show()
"""
Show test image and reconstruction from recovered coefficient maps. Note
the absence of the wrap-around effects at the boundary that can be seen
in the corresponding images in the [related example](sparsecode_conv_admm.rst).
"""
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 6))
plot.imview(y, title="Test image", cmap=plot.cm.gist_heat_r, fig=fig, ax=ax[0])
plot.imview(B(A(x1)), title="Reconstructed image", cmap=plot.cm.gist_heat_r, fig=fig, ax=ax[1])
fig.show()
"""
Plot convergence statistics.
"""
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
hist.Objective,
title="Objective function",
xlbl="Iteration",
ylbl="Functional value",
fig=fig,
ax=ax[0],
)
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
fig=fig,
ax=ax[1],
)
fig.show()
input("\nWaiting for input to close figures and exit")
| 0.954542 | 0.972727 |
r"""
TV-Regularized 3D DiffuserCam Reconstruction
============================================
This example demonstrates reconstruction of a 3D DiffuserCam
:cite:`antipa-2018-diffusercam`
[dataset](https://github.com/Waller-Lab/DiffuserCam/tree/master/example_data).
The inverse problem can be written as
$$\mathrm{argmin}_{\mathbf{x}} \; \frac{1}{2} \Big\| \mathbf{y} -
M \Big( \sum_k \mathbf{h}_k \ast \mathbf{x}_k \Big) \Big\|_2^2 +
\lambda_0 \sum_k \| D \mathbf{x}_k \|_{2,1} +
\lambda_1 \sum_k \| \mathbf{x}_k \|_1 \;,$$
where the $\mathbf{h}$_k are the components of the PSF stack, the
$\mathbf{x}$_k are the corrresponding components of the reconstructed
volume, $\mathbf{y}$ is the measured image, and $M$ is a cropping
operator that allows the boundary artifacts resulting from circular
convolution to be avoided. Following the mask decoupling approach
:cite:`almeida-2013-deconvolving`, the problem is posed in ADMM form
as
$$\mathrm{argmin}_{\mathbf{x}, \mathbf{z}_0, \mathbf{z}_1,
\mathbf{z}_2} \; \frac{1}{2} \| \mathbf{y} - M \mathbf{z}_0 \|_2^2 +
\lambda_0 \sum_k \| \mathbf{z}_{1,k} \|_{2,1} +
\lambda_1 \sum_k \| \mathbf{z}_{2,k}
\|_1 \\ \;\; \text{s.t.} \;\; \mathbf{z}_0 = \sum_k \mathbf{h}_k \ast
\mathbf{x}_k \qquad \mathbf{z}_{1,k} = D \mathbf{x}_k \qquad
\mathbf{z}_{2,k} = \mathbf{x}_k \;.$$
The most computationally expensive step in the ADMM algorithm is solved
using the frequency-domain approach proposed in
:cite:`wohlberg-2014-efficient`.
"""
import numpy as np
import jax
import scico.numpy as snp
from scico import plot
from scico.examples import ucb_diffusercam_data
from scico.functional import L1Norm, L21Norm, ZeroFunctional
from scico.linop import CircularConvolve, Crop, FiniteDifference, Identity, Sum
from scico.loss import SquaredL2Loss
from scico.optimize.admm import ADMM, G0BlockCircularConvolveSolver
from scico.util import device_info
"""
Load the DiffuserCam PSF stack and measured image. The computational cost
of the reconstruction is reduced slightly by removing parts of the PSF
stack that don't make a significant contribution to the reconstruction.
"""
y, psf = ucb_diffusercam_data()
psf = psf[..., 1:-7]
"""
To avoid boundary artifacts, the measured image is padded by half the PSF
width/height and then cropped within the data fidelity term. This padding
is implicit in that the reconstruction volume is computed at the padded
size, but the actual measured image is never explicitly padded since it is
used at the original (unpadded) size within the data fidelity term due to
the cropping operation. The PSF axis order is modified to put the stack
axis at index 0, as required by components of the ADMM solver to be used.
Finally, each PSF in the stack is individually normalized.
"""
half_psf = np.array(psf.shape[0:2]) // 2
pad_spec = ((half_psf[0],) * 2, (half_psf[1],) * 2)
y_pad_shape = tuple(np.array(y.shape) + np.array(pad_spec).sum(axis=1))
x_shape = (psf.shape[-1],) + y_pad_shape
psf = psf.transpose((2, 0, 1))
psf /= np.sqrt(np.sum(psf**2, axis=(1, 2), keepdims=True))
"""
Convert the image and PSF stack to JAX arrays with `float32` dtype since
JAX by default does not support double-precision floating point
arithmetic. This limited precision leads to relatively poor, but still
acceptable accuracy within the ADMM solver x-step. To experiment with the
effect of higher numerical precision, set the environment variable
`JAX_ENABLE_X64=True` and change `dtype` below to `np.float64`.
"""
dtype = np.float32
y = jax.device_put(y.astype(dtype))
psf = jax.device_put(psf.astype(dtype))
"""
Define problem and algorithm parameters.
"""
λ0 = 3e-3 # TV regularization parameter
λ1 = 1e-2 # ℓ1 norm regularization parameter
ρ0 = 1e0 # ADMM penalty parameter for first auxiliary variable
ρ1 = 5e0 # ADMM penalty parameter for second auxiliary variable
ρ2 = 1e1 # ADMM penalty parameter for third auxiliary variable
maxiter = 100 # number of ADMM iterations
"""
Create operators.
"""
C = CircularConvolve(psf, input_shape=x_shape, input_dtype=dtype, h_center=half_psf, ndims=2)
S = Sum(input_shape=x_shape, input_dtype=dtype, axis=0)
M = Crop(pad_spec, input_shape=y_pad_shape, input_dtype=dtype)
"""
Create functionals.
"""
g0 = SquaredL2Loss(y=y, A=M)
g1 = λ0 * L21Norm()
g2 = λ1 * L1Norm()
C0 = S @ C
C1 = FiniteDifference(input_shape=x_shape, input_dtype=dtype, axes=(-2, -1), circular=True)
C2 = Identity(input_shape=x_shape, input_dtype=dtype)
"""
Set up ADMM solver object and solve problem.
"""
solver = ADMM(
f=ZeroFunctional(),
g_list=[g0, g1, g2],
C_list=[C0, C1, C2],
rho_list=[ρ0, ρ1, ρ2],
alpha=1.4,
maxiter=maxiter,
nanstop=True,
subproblem_solver=G0BlockCircularConvolveSolver(ndims=2, check_solve=True),
itstat_options={"display": True, "period": 10},
)
print(f"Solving on {device_info()}\n")
x = solver.solve()
hist = solver.itstat_object.history(transpose=True)
"""
Show the measured image and samples from PDF stack
"""
plot.imview(y, cmap=plot.plt.cm.Blues, cbar=True, title="Measured Image")
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(14, 7))
plot.imview(psf[0], title="Nearest PSF", cmap=plot.plt.cm.Blues, fig=fig, ax=ax[0])
plot.imview(psf[-1], title="Furthest PSF", cmap=plot.plt.cm.Blues, fig=fig, ax=ax[1])
fig.show()
"""
Show the recovered volume with depth indicated by color.
"""
XCrop = Crop(((0, 0),) + pad_spec, input_shape=x_shape, input_dtype=dtype)
xm = np.array(XCrop(x[..., ::-1]))
xmr = xm.transpose((1, 2, 0))[..., np.newaxis] / xm.max()
cmap = plot.plt.cm.viridis_r
cmval = cmap(np.arange(0, xm.shape[0]).reshape(1, 1, -1) / (xm.shape[0] - 1))
xms = np.sum(cmval * xmr, axis=2)[..., 0:3]
plot.imview(xms, cmap=cmap, cbar=True, title="Recovered Volume")
"""
Plot convergence statistics.
"""
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
hist.Objective,
title="Objective function",
xlbl="Iteration",
ylbl="Functional value",
fig=fig,
ax=ax[0],
)
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
fig=fig,
ax=ax[1],
)
fig.show()
input("\nWaiting for input to close figures and exit")
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/diffusercam_tv_admm.py
|
diffusercam_tv_admm.py
|
r"""
TV-Regularized 3D DiffuserCam Reconstruction
============================================
This example demonstrates reconstruction of a 3D DiffuserCam
:cite:`antipa-2018-diffusercam`
[dataset](https://github.com/Waller-Lab/DiffuserCam/tree/master/example_data).
The inverse problem can be written as
$$\mathrm{argmin}_{\mathbf{x}} \; \frac{1}{2} \Big\| \mathbf{y} -
M \Big( \sum_k \mathbf{h}_k \ast \mathbf{x}_k \Big) \Big\|_2^2 +
\lambda_0 \sum_k \| D \mathbf{x}_k \|_{2,1} +
\lambda_1 \sum_k \| \mathbf{x}_k \|_1 \;,$$
where the $\mathbf{h}$_k are the components of the PSF stack, the
$\mathbf{x}$_k are the corrresponding components of the reconstructed
volume, $\mathbf{y}$ is the measured image, and $M$ is a cropping
operator that allows the boundary artifacts resulting from circular
convolution to be avoided. Following the mask decoupling approach
:cite:`almeida-2013-deconvolving`, the problem is posed in ADMM form
as
$$\mathrm{argmin}_{\mathbf{x}, \mathbf{z}_0, \mathbf{z}_1,
\mathbf{z}_2} \; \frac{1}{2} \| \mathbf{y} - M \mathbf{z}_0 \|_2^2 +
\lambda_0 \sum_k \| \mathbf{z}_{1,k} \|_{2,1} +
\lambda_1 \sum_k \| \mathbf{z}_{2,k}
\|_1 \\ \;\; \text{s.t.} \;\; \mathbf{z}_0 = \sum_k \mathbf{h}_k \ast
\mathbf{x}_k \qquad \mathbf{z}_{1,k} = D \mathbf{x}_k \qquad
\mathbf{z}_{2,k} = \mathbf{x}_k \;.$$
The most computationally expensive step in the ADMM algorithm is solved
using the frequency-domain approach proposed in
:cite:`wohlberg-2014-efficient`.
"""
import numpy as np
import jax
import scico.numpy as snp
from scico import plot
from scico.examples import ucb_diffusercam_data
from scico.functional import L1Norm, L21Norm, ZeroFunctional
from scico.linop import CircularConvolve, Crop, FiniteDifference, Identity, Sum
from scico.loss import SquaredL2Loss
from scico.optimize.admm import ADMM, G0BlockCircularConvolveSolver
from scico.util import device_info
"""
Load the DiffuserCam PSF stack and measured image. The computational cost
of the reconstruction is reduced slightly by removing parts of the PSF
stack that don't make a significant contribution to the reconstruction.
"""
y, psf = ucb_diffusercam_data()
psf = psf[..., 1:-7]
"""
To avoid boundary artifacts, the measured image is padded by half the PSF
width/height and then cropped within the data fidelity term. This padding
is implicit in that the reconstruction volume is computed at the padded
size, but the actual measured image is never explicitly padded since it is
used at the original (unpadded) size within the data fidelity term due to
the cropping operation. The PSF axis order is modified to put the stack
axis at index 0, as required by components of the ADMM solver to be used.
Finally, each PSF in the stack is individually normalized.
"""
half_psf = np.array(psf.shape[0:2]) // 2
pad_spec = ((half_psf[0],) * 2, (half_psf[1],) * 2)
y_pad_shape = tuple(np.array(y.shape) + np.array(pad_spec).sum(axis=1))
x_shape = (psf.shape[-1],) + y_pad_shape
psf = psf.transpose((2, 0, 1))
psf /= np.sqrt(np.sum(psf**2, axis=(1, 2), keepdims=True))
"""
Convert the image and PSF stack to JAX arrays with `float32` dtype since
JAX by default does not support double-precision floating point
arithmetic. This limited precision leads to relatively poor, but still
acceptable accuracy within the ADMM solver x-step. To experiment with the
effect of higher numerical precision, set the environment variable
`JAX_ENABLE_X64=True` and change `dtype` below to `np.float64`.
"""
dtype = np.float32
y = jax.device_put(y.astype(dtype))
psf = jax.device_put(psf.astype(dtype))
"""
Define problem and algorithm parameters.
"""
λ0 = 3e-3 # TV regularization parameter
λ1 = 1e-2 # ℓ1 norm regularization parameter
ρ0 = 1e0 # ADMM penalty parameter for first auxiliary variable
ρ1 = 5e0 # ADMM penalty parameter for second auxiliary variable
ρ2 = 1e1 # ADMM penalty parameter for third auxiliary variable
maxiter = 100 # number of ADMM iterations
"""
Create operators.
"""
C = CircularConvolve(psf, input_shape=x_shape, input_dtype=dtype, h_center=half_psf, ndims=2)
S = Sum(input_shape=x_shape, input_dtype=dtype, axis=0)
M = Crop(pad_spec, input_shape=y_pad_shape, input_dtype=dtype)
"""
Create functionals.
"""
g0 = SquaredL2Loss(y=y, A=M)
g1 = λ0 * L21Norm()
g2 = λ1 * L1Norm()
C0 = S @ C
C1 = FiniteDifference(input_shape=x_shape, input_dtype=dtype, axes=(-2, -1), circular=True)
C2 = Identity(input_shape=x_shape, input_dtype=dtype)
"""
Set up ADMM solver object and solve problem.
"""
solver = ADMM(
f=ZeroFunctional(),
g_list=[g0, g1, g2],
C_list=[C0, C1, C2],
rho_list=[ρ0, ρ1, ρ2],
alpha=1.4,
maxiter=maxiter,
nanstop=True,
subproblem_solver=G0BlockCircularConvolveSolver(ndims=2, check_solve=True),
itstat_options={"display": True, "period": 10},
)
print(f"Solving on {device_info()}\n")
x = solver.solve()
hist = solver.itstat_object.history(transpose=True)
"""
Show the measured image and samples from PDF stack
"""
plot.imview(y, cmap=plot.plt.cm.Blues, cbar=True, title="Measured Image")
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(14, 7))
plot.imview(psf[0], title="Nearest PSF", cmap=plot.plt.cm.Blues, fig=fig, ax=ax[0])
plot.imview(psf[-1], title="Furthest PSF", cmap=plot.plt.cm.Blues, fig=fig, ax=ax[1])
fig.show()
"""
Show the recovered volume with depth indicated by color.
"""
XCrop = Crop(((0, 0),) + pad_spec, input_shape=x_shape, input_dtype=dtype)
xm = np.array(XCrop(x[..., ::-1]))
xmr = xm.transpose((1, 2, 0))[..., np.newaxis] / xm.max()
cmap = plot.plt.cm.viridis_r
cmval = cmap(np.arange(0, xm.shape[0]).reshape(1, 1, -1) / (xm.shape[0] - 1))
xms = np.sum(cmval * xmr, axis=2)[..., 0:3]
plot.imview(xms, cmap=cmap, cbar=True, title="Recovered Volume")
"""
Plot convergence statistics.
"""
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
hist.Objective,
title="Objective function",
xlbl="Iteration",
ylbl="Functional value",
fig=fig,
ax=ax[0],
)
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
fig=fig,
ax=ax[1],
)
fig.show()
input("\nWaiting for input to close figures and exit")
| 0.932776 | 0.972779 |
r"""
TV-Regularized Low-Dose CT Reconstruction
=========================================
This example demonstrates solution of a low-dose CT reconstruction problem
with isotropic total variation (TV) regularization
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - A \mathbf{x}
\|_W^2 + \lambda \| C \mathbf{x} \|_{2,1} \;,$$
where $A$ is the Radon transform, $\mathbf{y}$ is the sinogram, the norm
weighting $W$ is chosen so that the weighted norm is an approximation to
the Poisson negative log likelihood :cite:`sauer-1993-local`, $C$ is
a 2D finite difference operator, and $\mathbf{x}$ is the desired
image.
"""
import numpy as np
import jax
from xdesign import Soil, discrete_phantom
import scico.numpy as snp
from scico import functional, linop, loss, metric, plot
from scico.linop.radon_astra import TomographicProjector
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
"""
Create a ground truth image.
"""
N = 512 # phantom size
np.random.seed(0)
x_gt = discrete_phantom(Soil(porosity=0.80), size=384)
x_gt = np.ascontiguousarray(np.pad(x_gt, (64, 64)))
x_gt = np.clip(x_gt, 0, np.inf) # clip to positive values
x_gt = jax.device_put(x_gt) # convert to jax type, push to GPU
"""
Configure CT projection operator and generate synthetic measurements.
"""
n_projection = 360 # number of projections
Io = 1e3 # source flux
𝛼 = 1e-2 # attenuation coefficient
angles = np.linspace(0, 2 * np.pi, n_projection) # evenly spaced projection angles
A = TomographicProjector(x_gt.shape, 1.0, N, angles) # Radon transform operator
y_c = A @ x_gt # sinogram
r"""
Add Poisson noise to projections according to
$$\mathrm{counts} \sim \mathrm{Poi}\left(I_0 exp\left\{- \alpha A
\mathbf{x} \right\}\right)$$
$$\mathbf{y} = - \frac{1}{\alpha} \log\left(\mathrm{counts} /
I_0\right).$$
We use the NumPy random functionality so we can generate using 64-bit
numbers.
"""
counts = np.random.poisson(Io * snp.exp(-𝛼 * A @ x_gt))
counts = np.clip(counts, a_min=1, a_max=np.inf) # replace any 0s count with 1
y = -1 / 𝛼 * np.log(counts / Io)
y = jax.device_put(y) # convert back to float32
"""
Set up post processing. For this example, we clip all reconstructions
to the range of the ground truth.
"""
def postprocess(x):
return snp.clip(x, 0, snp.max(x_gt))
"""
Compute an FBP reconstruction as an initial guess.
"""
x0 = postprocess(A.fbp(y))
r"""
Set up and solve the un-weighted reconstruction problem
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - A \mathbf{x}
\|_2^2 + \lambda \| C \mathbf{x} \|_{2,1} \;.$$
"""
# Note that rho and lambda were selected via a parameter sweep (not
# shown here).
ρ = 2.5e3 # ADMM penalty parameter
lambda_unweighted = 3e2 # regularization strength
maxiter = 100 # number of ADMM iterations
cg_tol = 1e-5 # CG relative tolerance
cg_maxiter = 10 # maximum CG iterations per ADMM iteration
f = loss.SquaredL2Loss(y=y, A=A)
admm_unweighted = ADMM(
f=f,
g_list=[lambda_unweighted * functional.L21Norm()],
C_list=[linop.FiniteDifference(x_gt.shape, append=0)],
rho_list=[ρ],
x0=x0,
maxiter=maxiter,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": cg_tol, "maxiter": cg_maxiter}),
itstat_options={"display": True, "period": 10},
)
print(f"Solving on {device_info()}\n")
admm_unweighted.solve()
x_unweighted = postprocess(admm_unweighted.x)
r"""
Set up and solve the weighted reconstruction problem
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - A \mathbf{x}
\|_W^2 + \lambda \| C \mathbf{x} \|_{2,1} \;,$$
where
$$W = \mathrm{diag}\left\{ \mathrm{counts} / I_0 \right\} \;.$$
The data fidelity term in this formulation follows
:cite:`sauer-1993-local` (9) except for the scaling by $I_0$, which we
use to maintain balance between the data and regularization terms if
$I_0$ changes.
"""
lambda_weighted = 5e1
weights = jax.device_put(counts / Io)
f = loss.SquaredL2Loss(y=y, A=A, W=linop.Diagonal(weights))
admm_weighted = ADMM(
f=f,
g_list=[lambda_weighted * functional.L21Norm()],
C_list=[linop.FiniteDifference(x_gt.shape, append=0)],
rho_list=[ρ],
maxiter=maxiter,
x0=x0,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": cg_tol, "maxiter": cg_maxiter}),
itstat_options={"display": True, "period": 10},
)
admm_weighted.solve()
x_weighted = postprocess(admm_weighted.x)
"""
Show recovered images.
"""
def plot_recon(x, title, ax):
"""Plot an image with title indicating error metrics."""
plot.imview(
x,
title=f"{title}\nSNR: {metric.snr(x_gt, x):.2f} (dB), MAE: {metric.mae(x_gt, x):.3f}",
fig=fig,
ax=ax,
)
fig, ax = plot.subplots(nrows=2, ncols=2, figsize=(11, 10))
plot.imview(x_gt, title="Ground truth", fig=fig, ax=ax[0, 0])
plot_recon(x0, "FBP Reconstruction", ax=ax[0, 1])
plot_recon(x_unweighted, "Unweighted TV Reconstruction", ax=ax[1, 0])
plot_recon(x_weighted, "Weighted TV Reconstruction", ax=ax[1, 1])
for ax_ in ax.ravel():
ax_.set_xlim(64, 448)
ax_.set_ylim(64, 448)
fig.subplots_adjust(left=0.1, right=0.99, top=0.95, bottom=0.05, wspace=0.2, hspace=0.01)
fig.colorbar(
ax[0, 0].get_images()[0], ax=ax, location="right", shrink=0.9, pad=0.05, label="arbitrary units"
)
fig.show()
input("\nWaiting for input to close figures and exit")
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/ct_astra_weighted_tv_admm.py
|
ct_astra_weighted_tv_admm.py
|
r"""
TV-Regularized Low-Dose CT Reconstruction
=========================================
This example demonstrates solution of a low-dose CT reconstruction problem
with isotropic total variation (TV) regularization
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - A \mathbf{x}
\|_W^2 + \lambda \| C \mathbf{x} \|_{2,1} \;,$$
where $A$ is the Radon transform, $\mathbf{y}$ is the sinogram, the norm
weighting $W$ is chosen so that the weighted norm is an approximation to
the Poisson negative log likelihood :cite:`sauer-1993-local`, $C$ is
a 2D finite difference operator, and $\mathbf{x}$ is the desired
image.
"""
import numpy as np
import jax
from xdesign import Soil, discrete_phantom
import scico.numpy as snp
from scico import functional, linop, loss, metric, plot
from scico.linop.radon_astra import TomographicProjector
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
"""
Create a ground truth image.
"""
N = 512 # phantom size
np.random.seed(0)
x_gt = discrete_phantom(Soil(porosity=0.80), size=384)
x_gt = np.ascontiguousarray(np.pad(x_gt, (64, 64)))
x_gt = np.clip(x_gt, 0, np.inf) # clip to positive values
x_gt = jax.device_put(x_gt) # convert to jax type, push to GPU
"""
Configure CT projection operator and generate synthetic measurements.
"""
n_projection = 360 # number of projections
Io = 1e3 # source flux
𝛼 = 1e-2 # attenuation coefficient
angles = np.linspace(0, 2 * np.pi, n_projection) # evenly spaced projection angles
A = TomographicProjector(x_gt.shape, 1.0, N, angles) # Radon transform operator
y_c = A @ x_gt # sinogram
r"""
Add Poisson noise to projections according to
$$\mathrm{counts} \sim \mathrm{Poi}\left(I_0 exp\left\{- \alpha A
\mathbf{x} \right\}\right)$$
$$\mathbf{y} = - \frac{1}{\alpha} \log\left(\mathrm{counts} /
I_0\right).$$
We use the NumPy random functionality so we can generate using 64-bit
numbers.
"""
counts = np.random.poisson(Io * snp.exp(-𝛼 * A @ x_gt))
counts = np.clip(counts, a_min=1, a_max=np.inf) # replace any 0s count with 1
y = -1 / 𝛼 * np.log(counts / Io)
y = jax.device_put(y) # convert back to float32
"""
Set up post processing. For this example, we clip all reconstructions
to the range of the ground truth.
"""
def postprocess(x):
return snp.clip(x, 0, snp.max(x_gt))
"""
Compute an FBP reconstruction as an initial guess.
"""
x0 = postprocess(A.fbp(y))
r"""
Set up and solve the un-weighted reconstruction problem
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - A \mathbf{x}
\|_2^2 + \lambda \| C \mathbf{x} \|_{2,1} \;.$$
"""
# Note that rho and lambda were selected via a parameter sweep (not
# shown here).
ρ = 2.5e3 # ADMM penalty parameter
lambda_unweighted = 3e2 # regularization strength
maxiter = 100 # number of ADMM iterations
cg_tol = 1e-5 # CG relative tolerance
cg_maxiter = 10 # maximum CG iterations per ADMM iteration
f = loss.SquaredL2Loss(y=y, A=A)
admm_unweighted = ADMM(
f=f,
g_list=[lambda_unweighted * functional.L21Norm()],
C_list=[linop.FiniteDifference(x_gt.shape, append=0)],
rho_list=[ρ],
x0=x0,
maxiter=maxiter,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": cg_tol, "maxiter": cg_maxiter}),
itstat_options={"display": True, "period": 10},
)
print(f"Solving on {device_info()}\n")
admm_unweighted.solve()
x_unweighted = postprocess(admm_unweighted.x)
r"""
Set up and solve the weighted reconstruction problem
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - A \mathbf{x}
\|_W^2 + \lambda \| C \mathbf{x} \|_{2,1} \;,$$
where
$$W = \mathrm{diag}\left\{ \mathrm{counts} / I_0 \right\} \;.$$
The data fidelity term in this formulation follows
:cite:`sauer-1993-local` (9) except for the scaling by $I_0$, which we
use to maintain balance between the data and regularization terms if
$I_0$ changes.
"""
lambda_weighted = 5e1
weights = jax.device_put(counts / Io)
f = loss.SquaredL2Loss(y=y, A=A, W=linop.Diagonal(weights))
admm_weighted = ADMM(
f=f,
g_list=[lambda_weighted * functional.L21Norm()],
C_list=[linop.FiniteDifference(x_gt.shape, append=0)],
rho_list=[ρ],
maxiter=maxiter,
x0=x0,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": cg_tol, "maxiter": cg_maxiter}),
itstat_options={"display": True, "period": 10},
)
admm_weighted.solve()
x_weighted = postprocess(admm_weighted.x)
"""
Show recovered images.
"""
def plot_recon(x, title, ax):
"""Plot an image with title indicating error metrics."""
plot.imview(
x,
title=f"{title}\nSNR: {metric.snr(x_gt, x):.2f} (dB), MAE: {metric.mae(x_gt, x):.3f}",
fig=fig,
ax=ax,
)
fig, ax = plot.subplots(nrows=2, ncols=2, figsize=(11, 10))
plot.imview(x_gt, title="Ground truth", fig=fig, ax=ax[0, 0])
plot_recon(x0, "FBP Reconstruction", ax=ax[0, 1])
plot_recon(x_unweighted, "Unweighted TV Reconstruction", ax=ax[1, 0])
plot_recon(x_weighted, "Weighted TV Reconstruction", ax=ax[1, 1])
for ax_ in ax.ravel():
ax_.set_xlim(64, 448)
ax_.set_ylim(64, 448)
fig.subplots_adjust(left=0.1, right=0.99, top=0.95, bottom=0.05, wspace=0.2, hspace=0.01)
fig.colorbar(
ax[0, 0].get_images()[0], ax=ax, location="right", shrink=0.9, pad=0.05, label="arbitrary units"
)
fig.show()
input("\nWaiting for input to close figures and exit")
| 0.908544 | 0.914023 |
import numpy as np
import jax
import matplotlib.pyplot as plt
import svmbir
from xdesign import Foam, discrete_phantom
import scico.numpy as snp
from scico import metric, plot
from scico.functional import BM3D, NonNegativeIndicator
from scico.linop import Diagonal, Identity
from scico.linop.radon_svmbir import SVMBIRSquaredL2Loss, TomographicProjector
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
"""
Generate a ground truth image.
"""
N = 256 # image size
density = 0.025 # attenuation density of the image
np.random.seed(1234)
x_gt = discrete_phantom(Foam(size_range=[0.05, 0.02], gap=0.02, porosity=0.3), size=N - 10)
x_gt = x_gt / np.max(x_gt) * density
x_gt = np.pad(x_gt, 5)
x_gt[x_gt < 0] = 0
"""
Generate tomographic projector and sinogram.
"""
num_angles = int(N / 2)
num_channels = N
angles = snp.linspace(0, snp.pi, num_angles, endpoint=False, dtype=snp.float32)
A = TomographicProjector(x_gt.shape, angles, num_channels)
sino = A @ x_gt
"""
Impose Poisson noise on sinogram. Higher max_intensity means less noise.
"""
max_intensity = 2000
expected_counts = max_intensity * np.exp(-sino)
noisy_counts = np.random.poisson(expected_counts).astype(np.float32)
noisy_counts[noisy_counts == 0] = 1 # deal with 0s
y = -np.log(noisy_counts / max_intensity)
"""
Reconstruct using default prior of SVMBIR :cite:`svmbir-2020`.
"""
weights = svmbir.calc_weights(y, weight_type="transmission")
x_mrf = svmbir.recon(
np.array(y[:, np.newaxis]),
np.array(angles),
weights=weights[:, np.newaxis],
num_rows=N,
num_cols=N,
positivity=True,
verbose=0,
)[0]
"""
Set up an ADMM solver.
"""
y, x0, weights = jax.device_put([y, x_mrf, weights])
ρ = 15 # ADMM penalty parameter
σ = density * 0.18 # denoiser sigma
f = SVMBIRSquaredL2Loss(y=y, A=A, W=Diagonal(weights), scale=0.5)
g0 = σ * ρ * BM3D()
g1 = NonNegativeIndicator()
solver = ADMM(
f=f,
g_list=[g0, g1],
C_list=[Identity(x_mrf.shape), Identity(x_mrf.shape)],
rho_list=[ρ, ρ],
x0=x0,
maxiter=20,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": 1e-4, "maxiter": 100}),
itstat_options={"display": True, "period": 1},
)
"""
Run the solver.
"""
print(f"Solving on {device_info()}\n")
x_bm3d = solver.solve()
hist = solver.itstat_object.history(transpose=True)
"""
Show the recovered image.
"""
norm = plot.matplotlib.colors.Normalize(vmin=-0.1 * density, vmax=1.2 * density)
fig, ax = plt.subplots(1, 3, figsize=[15, 5])
plot.imview(img=x_gt, title="Ground Truth Image", cbar=True, fig=fig, ax=ax[0], norm=norm)
plot.imview(
img=x_mrf,
title=f"MRF (PSNR: {metric.psnr(x_gt, x_mrf):.2f} dB)",
cbar=True,
fig=fig,
ax=ax[1],
norm=norm,
)
plot.imview(
img=x_bm3d,
title=f"BM3D (PSNR: {metric.psnr(x_gt, x_bm3d):.2f} dB)",
cbar=True,
fig=fig,
ax=ax[2],
norm=norm,
)
fig.show()
"""
Plot convergence statistics.
"""
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
)
input("\nWaiting for input to close figures and exit")
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/ct_svmbir_ppp_bm3d_admm_cg.py
|
ct_svmbir_ppp_bm3d_admm_cg.py
|
import numpy as np
import jax
import matplotlib.pyplot as plt
import svmbir
from xdesign import Foam, discrete_phantom
import scico.numpy as snp
from scico import metric, plot
from scico.functional import BM3D, NonNegativeIndicator
from scico.linop import Diagonal, Identity
from scico.linop.radon_svmbir import SVMBIRSquaredL2Loss, TomographicProjector
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
"""
Generate a ground truth image.
"""
N = 256 # image size
density = 0.025 # attenuation density of the image
np.random.seed(1234)
x_gt = discrete_phantom(Foam(size_range=[0.05, 0.02], gap=0.02, porosity=0.3), size=N - 10)
x_gt = x_gt / np.max(x_gt) * density
x_gt = np.pad(x_gt, 5)
x_gt[x_gt < 0] = 0
"""
Generate tomographic projector and sinogram.
"""
num_angles = int(N / 2)
num_channels = N
angles = snp.linspace(0, snp.pi, num_angles, endpoint=False, dtype=snp.float32)
A = TomographicProjector(x_gt.shape, angles, num_channels)
sino = A @ x_gt
"""
Impose Poisson noise on sinogram. Higher max_intensity means less noise.
"""
max_intensity = 2000
expected_counts = max_intensity * np.exp(-sino)
noisy_counts = np.random.poisson(expected_counts).astype(np.float32)
noisy_counts[noisy_counts == 0] = 1 # deal with 0s
y = -np.log(noisy_counts / max_intensity)
"""
Reconstruct using default prior of SVMBIR :cite:`svmbir-2020`.
"""
weights = svmbir.calc_weights(y, weight_type="transmission")
x_mrf = svmbir.recon(
np.array(y[:, np.newaxis]),
np.array(angles),
weights=weights[:, np.newaxis],
num_rows=N,
num_cols=N,
positivity=True,
verbose=0,
)[0]
"""
Set up an ADMM solver.
"""
y, x0, weights = jax.device_put([y, x_mrf, weights])
ρ = 15 # ADMM penalty parameter
σ = density * 0.18 # denoiser sigma
f = SVMBIRSquaredL2Loss(y=y, A=A, W=Diagonal(weights), scale=0.5)
g0 = σ * ρ * BM3D()
g1 = NonNegativeIndicator()
solver = ADMM(
f=f,
g_list=[g0, g1],
C_list=[Identity(x_mrf.shape), Identity(x_mrf.shape)],
rho_list=[ρ, ρ],
x0=x0,
maxiter=20,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": 1e-4, "maxiter": 100}),
itstat_options={"display": True, "period": 1},
)
"""
Run the solver.
"""
print(f"Solving on {device_info()}\n")
x_bm3d = solver.solve()
hist = solver.itstat_object.history(transpose=True)
"""
Show the recovered image.
"""
norm = plot.matplotlib.colors.Normalize(vmin=-0.1 * density, vmax=1.2 * density)
fig, ax = plt.subplots(1, 3, figsize=[15, 5])
plot.imview(img=x_gt, title="Ground Truth Image", cbar=True, fig=fig, ax=ax[0], norm=norm)
plot.imview(
img=x_mrf,
title=f"MRF (PSNR: {metric.psnr(x_gt, x_mrf):.2f} dB)",
cbar=True,
fig=fig,
ax=ax[1],
norm=norm,
)
plot.imview(
img=x_bm3d,
title=f"BM3D (PSNR: {metric.psnr(x_gt, x_bm3d):.2f} dB)",
cbar=True,
fig=fig,
ax=ax[2],
norm=norm,
)
fig.show()
"""
Plot convergence statistics.
"""
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
)
input("\nWaiting for input to close figures and exit")
| 0.768125 | 0.616301 |
import numpy as np
import jax
import matplotlib.pyplot as plt
import svmbir
from matplotlib.ticker import MaxNLocator
from xdesign import Foam, discrete_phantom
import scico.numpy as snp
from scico import metric, plot
from scico.functional import BM3D
from scico.linop import Diagonal, Identity
from scico.linop.radon_svmbir import SVMBIRExtendedLoss, TomographicProjector
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
"""
Generate a ground truth image.
"""
N = 256 # image size
density = 0.025 # attenuation density of the image
np.random.seed(1234)
pad_len = 5
x_gt = discrete_phantom(Foam(size_range=[0.05, 0.02], gap=0.02, porosity=0.3), size=N - 2 * pad_len)
x_gt = x_gt / np.max(x_gt) * density
x_gt = np.pad(x_gt, pad_len)
x_gt[x_gt < 0] = 0
"""
Generate tomographic projector and sinogram for fan beam and parallel beam.
For fan beam, use view angles spanning 2π since unlike parallel beam, views
at 0 and π are not equivalent.
"""
num_angles = int(N / 2)
num_channels = N
# Use angles in the range [0, 2*pi] for fan beam
angles = snp.linspace(0, 2 * snp.pi, num_angles, endpoint=False, dtype=snp.float32)
dist_source_detector = 1500.0
magnification = 1.2
A_fan = TomographicProjector(
x_gt.shape,
angles,
num_channels,
geometry="fan-curved",
dist_source_detector=dist_source_detector,
magnification=magnification,
)
A_parallel = TomographicProjector(
x_gt.shape,
angles,
num_channels,
geometry="parallel",
)
sino_fan = A_fan @ x_gt
"""
Impose Poisson noise on sinograms. Higher max_intensity means less noise.
"""
def add_poisson_noise(sino, max_intensity):
expected_counts = max_intensity * np.exp(-sino)
noisy_counts = np.random.poisson(expected_counts).astype(np.float32)
noisy_counts[noisy_counts == 0] = 1 # deal with 0s
y = -np.log(noisy_counts / max_intensity)
return y
y_fan = add_poisson_noise(sino_fan, max_intensity=500)
"""
Reconstruct using default prior of SVMBIR :cite:`svmbir-2020`.
"""
weights_fan = svmbir.calc_weights(y_fan, weight_type="transmission")
x_mrf_fan = svmbir.recon(
np.array(y_fan[:, np.newaxis]),
np.array(angles),
weights=weights_fan[:, np.newaxis],
num_rows=N,
num_cols=N,
positivity=True,
verbose=0,
stop_threshold=0.0,
geometry="fan-curved",
dist_source_detector=dist_source_detector,
magnification=magnification,
delta_channel=1.0,
delta_pixel=1.0 / magnification,
)[0]
x_mrf_parallel = svmbir.recon(
np.array(y_fan[:, np.newaxis]),
np.array(angles),
weights=weights_fan[:, np.newaxis],
num_rows=N,
num_cols=N,
positivity=True,
verbose=0,
stop_threshold=0.0,
geometry="parallel",
)[0]
"""
Push arrays to device.
"""
y_fan, x0_fan, weights_fan = jax.device_put([y_fan, x_mrf_fan, weights_fan])
x0_parallel = jax.device_put(x_mrf_parallel)
"""
Set problem parameters and BM3D pseudo-functional.
"""
ρ = 10 # ADMM penalty parameter
σ = density * 0.6 # denoiser sigma
g0 = σ * ρ * BM3D()
"""
Set up problem using `SVMBIRExtendedLoss`.
"""
f_extloss_fan = SVMBIRExtendedLoss(
y=y_fan,
A=A_fan,
W=Diagonal(weights_fan),
scale=0.5,
positivity=True,
prox_kwargs={"maxiter": 5, "ctol": 0.0},
)
f_extloss_parallel = SVMBIRExtendedLoss(
y=y_fan,
A=A_parallel,
W=Diagonal(weights_fan),
scale=0.5,
positivity=True,
prox_kwargs={"maxiter": 5, "ctol": 0.0},
)
solver_extloss_fan = ADMM(
f=None,
g_list=[f_extloss_fan, g0],
C_list=[Identity(x_mrf_fan.shape), Identity(x_mrf_fan.shape)],
rho_list=[ρ, ρ],
x0=x0_fan,
maxiter=20,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": 1e-3, "maxiter": 100}),
itstat_options={"display": True},
)
solver_extloss_parallel = ADMM(
f=None,
g_list=[f_extloss_parallel, g0],
C_list=[Identity(x_mrf_parallel.shape), Identity(x_mrf_parallel.shape)],
rho_list=[ρ, ρ],
x0=x0_parallel,
maxiter=20,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": 1e-3, "maxiter": 100}),
itstat_options={"display": True},
)
"""
Run the ADMM solvers.
"""
print(f"Solving on {device_info()}\n")
x_extloss_fan = solver_extloss_fan.solve()
hist_extloss_fan = solver_extloss_fan.itstat_object.history(transpose=True)
print()
x_extloss_parallel = solver_extloss_parallel.solve()
hist_extloss_parallel = solver_extloss_parallel.itstat_object.history(transpose=True)
"""
Show the recovered images. The parallel beam reconstruction is poor because
the parallel beam is a poor approximation of the specific fan beam geometry
used here.
"""
norm = plot.matplotlib.colors.Normalize(vmin=-0.1 * density, vmax=1.2 * density)
fig, ax = plt.subplots(1, 3, figsize=(20, 7))
plot.imview(img=x_gt, title="Ground Truth Image", cbar=True, fig=fig, ax=ax[0], norm=norm)
plot.imview(
img=x_mrf_parallel,
title=f"Parallel-beam MRF (PSNR: {metric.psnr(x_gt, x_mrf_parallel):.2f} dB)",
cbar=True,
fig=fig,
ax=ax[1],
norm=norm,
)
plot.imview(
img=x_extloss_parallel,
title=f"Parallel-beam Extended Loss (PSNR: {metric.psnr(x_gt, x_extloss_parallel):.2f} dB)",
cbar=True,
fig=fig,
ax=ax[2],
norm=norm,
)
fig.show()
fig, ax = plt.subplots(1, 3, figsize=(20, 7))
plot.imview(img=x_gt, title="Ground Truth Image", cbar=True, fig=fig, ax=ax[0], norm=norm)
plot.imview(
img=x_mrf_fan,
title=f"Fan-beam MRF (PSNR: {metric.psnr(x_gt, x_mrf_fan):.2f} dB)",
cbar=True,
fig=fig,
ax=ax[1],
norm=norm,
)
plot.imview(
img=x_extloss_fan,
title=f"Fan-beam Extended Loss (PSNR: {metric.psnr(x_gt, x_extloss_fan):.2f} dB)",
cbar=True,
fig=fig,
ax=ax[2],
norm=norm,
)
fig.show()
"""
Plot convergence statistics.
"""
fig, ax = plt.subplots(1, 2, figsize=(15, 6))
plot.plot(
snp.vstack((hist_extloss_parallel.Prml_Rsdl, hist_extloss_parallel.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals for parallel-beam reconstruction",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
fig=fig,
ax=ax[0],
)
ax[0].set_ylim([5e-3, 1e0])
ax[0].xaxis.set_major_locator(MaxNLocator(integer=True))
plot.plot(
snp.vstack((hist_extloss_fan.Prml_Rsdl, hist_extloss_fan.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals for fan-beam reconstruction",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
fig=fig,
ax=ax[1],
)
ax[1].set_ylim([5e-3, 1e0])
ax[1].xaxis.set_major_locator(MaxNLocator(integer=True))
fig.show()
input("\nWaiting for input to close figures and exit")
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/ct_fan_svmbir_ppp_bm3d_admm_prox.py
|
ct_fan_svmbir_ppp_bm3d_admm_prox.py
|
import numpy as np
import jax
import matplotlib.pyplot as plt
import svmbir
from matplotlib.ticker import MaxNLocator
from xdesign import Foam, discrete_phantom
import scico.numpy as snp
from scico import metric, plot
from scico.functional import BM3D
from scico.linop import Diagonal, Identity
from scico.linop.radon_svmbir import SVMBIRExtendedLoss, TomographicProjector
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
"""
Generate a ground truth image.
"""
N = 256 # image size
density = 0.025 # attenuation density of the image
np.random.seed(1234)
pad_len = 5
x_gt = discrete_phantom(Foam(size_range=[0.05, 0.02], gap=0.02, porosity=0.3), size=N - 2 * pad_len)
x_gt = x_gt / np.max(x_gt) * density
x_gt = np.pad(x_gt, pad_len)
x_gt[x_gt < 0] = 0
"""
Generate tomographic projector and sinogram for fan beam and parallel beam.
For fan beam, use view angles spanning 2π since unlike parallel beam, views
at 0 and π are not equivalent.
"""
num_angles = int(N / 2)
num_channels = N
# Use angles in the range [0, 2*pi] for fan beam
angles = snp.linspace(0, 2 * snp.pi, num_angles, endpoint=False, dtype=snp.float32)
dist_source_detector = 1500.0
magnification = 1.2
A_fan = TomographicProjector(
x_gt.shape,
angles,
num_channels,
geometry="fan-curved",
dist_source_detector=dist_source_detector,
magnification=magnification,
)
A_parallel = TomographicProjector(
x_gt.shape,
angles,
num_channels,
geometry="parallel",
)
sino_fan = A_fan @ x_gt
"""
Impose Poisson noise on sinograms. Higher max_intensity means less noise.
"""
def add_poisson_noise(sino, max_intensity):
expected_counts = max_intensity * np.exp(-sino)
noisy_counts = np.random.poisson(expected_counts).astype(np.float32)
noisy_counts[noisy_counts == 0] = 1 # deal with 0s
y = -np.log(noisy_counts / max_intensity)
return y
y_fan = add_poisson_noise(sino_fan, max_intensity=500)
"""
Reconstruct using default prior of SVMBIR :cite:`svmbir-2020`.
"""
weights_fan = svmbir.calc_weights(y_fan, weight_type="transmission")
x_mrf_fan = svmbir.recon(
np.array(y_fan[:, np.newaxis]),
np.array(angles),
weights=weights_fan[:, np.newaxis],
num_rows=N,
num_cols=N,
positivity=True,
verbose=0,
stop_threshold=0.0,
geometry="fan-curved",
dist_source_detector=dist_source_detector,
magnification=magnification,
delta_channel=1.0,
delta_pixel=1.0 / magnification,
)[0]
x_mrf_parallel = svmbir.recon(
np.array(y_fan[:, np.newaxis]),
np.array(angles),
weights=weights_fan[:, np.newaxis],
num_rows=N,
num_cols=N,
positivity=True,
verbose=0,
stop_threshold=0.0,
geometry="parallel",
)[0]
"""
Push arrays to device.
"""
y_fan, x0_fan, weights_fan = jax.device_put([y_fan, x_mrf_fan, weights_fan])
x0_parallel = jax.device_put(x_mrf_parallel)
"""
Set problem parameters and BM3D pseudo-functional.
"""
ρ = 10 # ADMM penalty parameter
σ = density * 0.6 # denoiser sigma
g0 = σ * ρ * BM3D()
"""
Set up problem using `SVMBIRExtendedLoss`.
"""
f_extloss_fan = SVMBIRExtendedLoss(
y=y_fan,
A=A_fan,
W=Diagonal(weights_fan),
scale=0.5,
positivity=True,
prox_kwargs={"maxiter": 5, "ctol": 0.0},
)
f_extloss_parallel = SVMBIRExtendedLoss(
y=y_fan,
A=A_parallel,
W=Diagonal(weights_fan),
scale=0.5,
positivity=True,
prox_kwargs={"maxiter": 5, "ctol": 0.0},
)
solver_extloss_fan = ADMM(
f=None,
g_list=[f_extloss_fan, g0],
C_list=[Identity(x_mrf_fan.shape), Identity(x_mrf_fan.shape)],
rho_list=[ρ, ρ],
x0=x0_fan,
maxiter=20,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": 1e-3, "maxiter": 100}),
itstat_options={"display": True},
)
solver_extloss_parallel = ADMM(
f=None,
g_list=[f_extloss_parallel, g0],
C_list=[Identity(x_mrf_parallel.shape), Identity(x_mrf_parallel.shape)],
rho_list=[ρ, ρ],
x0=x0_parallel,
maxiter=20,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": 1e-3, "maxiter": 100}),
itstat_options={"display": True},
)
"""
Run the ADMM solvers.
"""
print(f"Solving on {device_info()}\n")
x_extloss_fan = solver_extloss_fan.solve()
hist_extloss_fan = solver_extloss_fan.itstat_object.history(transpose=True)
print()
x_extloss_parallel = solver_extloss_parallel.solve()
hist_extloss_parallel = solver_extloss_parallel.itstat_object.history(transpose=True)
"""
Show the recovered images. The parallel beam reconstruction is poor because
the parallel beam is a poor approximation of the specific fan beam geometry
used here.
"""
norm = plot.matplotlib.colors.Normalize(vmin=-0.1 * density, vmax=1.2 * density)
fig, ax = plt.subplots(1, 3, figsize=(20, 7))
plot.imview(img=x_gt, title="Ground Truth Image", cbar=True, fig=fig, ax=ax[0], norm=norm)
plot.imview(
img=x_mrf_parallel,
title=f"Parallel-beam MRF (PSNR: {metric.psnr(x_gt, x_mrf_parallel):.2f} dB)",
cbar=True,
fig=fig,
ax=ax[1],
norm=norm,
)
plot.imview(
img=x_extloss_parallel,
title=f"Parallel-beam Extended Loss (PSNR: {metric.psnr(x_gt, x_extloss_parallel):.2f} dB)",
cbar=True,
fig=fig,
ax=ax[2],
norm=norm,
)
fig.show()
fig, ax = plt.subplots(1, 3, figsize=(20, 7))
plot.imview(img=x_gt, title="Ground Truth Image", cbar=True, fig=fig, ax=ax[0], norm=norm)
plot.imview(
img=x_mrf_fan,
title=f"Fan-beam MRF (PSNR: {metric.psnr(x_gt, x_mrf_fan):.2f} dB)",
cbar=True,
fig=fig,
ax=ax[1],
norm=norm,
)
plot.imview(
img=x_extloss_fan,
title=f"Fan-beam Extended Loss (PSNR: {metric.psnr(x_gt, x_extloss_fan):.2f} dB)",
cbar=True,
fig=fig,
ax=ax[2],
norm=norm,
)
fig.show()
"""
Plot convergence statistics.
"""
fig, ax = plt.subplots(1, 2, figsize=(15, 6))
plot.plot(
snp.vstack((hist_extloss_parallel.Prml_Rsdl, hist_extloss_parallel.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals for parallel-beam reconstruction",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
fig=fig,
ax=ax[0],
)
ax[0].set_ylim([5e-3, 1e0])
ax[0].xaxis.set_major_locator(MaxNLocator(integer=True))
plot.plot(
snp.vstack((hist_extloss_fan.Prml_Rsdl, hist_extloss_fan.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals for fan-beam reconstruction",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
fig=fig,
ax=ax[1],
)
ax[1].set_ylim([5e-3, 1e0])
ax[1].xaxis.set_major_locator(MaxNLocator(integer=True))
fig.show()
input("\nWaiting for input to close figures and exit")
| 0.806853 | 0.555073 |
r"""
Convolutional Sparse Coding (ADMM)
==================================
This example demonstrates the solution of a simple convolutional sparse
coding problem
$$\mathrm{argmin}_{\mathbf{x}} \; \frac{1}{2} \Big\| \mathbf{y} -
\sum_k \mathbf{h}_k \ast \mathbf{x}_k \Big\|_2^2 + \lambda \sum_k
( \| \mathbf{x}_k \|_1 - \| \mathbf{x}_k \|_2 ) \;,$$
where the $\mathbf{h}$_k is a set of filters comprising the dictionary,
the $\mathbf{x}$_k is a corrresponding set of coefficient maps, and
$\mathbf{y}$ is the signal to be represented. The problem is solved via
an ADMM algorithm using the frequency-domain approach proposed in
:cite:`wohlberg-2014-efficient`.
"""
import numpy as np
import jax
import scico.numpy as snp
from scico import plot
from scico.examples import create_conv_sparse_phantom
from scico.functional import L1MinusL2Norm
from scico.linop import CircularConvolve, Identity, Sum
from scico.loss import SquaredL2Loss
from scico.optimize.admm import ADMM, FBlockCircularConvolveSolver
from scico.util import device_info
"""
Set problem size and create random convolutional dictionary (a set of
filters) and a corresponding sparse random set of coefficient maps.
"""
N = 128 # image size
Nnz = 128 # number of non-zeros in coefficient maps
h, x0 = create_conv_sparse_phantom(N, Nnz)
"""
Normalize dictionary filters and scale coefficient maps accordingly.
"""
hnorm = np.sqrt(np.sum(h**2, axis=(1, 2), keepdims=True))
h /= hnorm
x0 *= hnorm
"""
Convert numpy arrays to jax arrays.
"""
h = jax.device_put(h)
x0 = jax.device_put(x0)
"""
Set up sum-of-convolutions forward operator.
"""
C = CircularConvolve(h, input_shape=x0.shape, ndims=2)
S = Sum(input_shape=C.output_shape, axis=0)
A = S @ C
"""
Construct test image from dictionary $\mathbf{h}$ and coefficient maps
$\mathbf{x}_0$.
"""
y = A(x0)
"""
Set functional and solver parameters.
"""
λ = 1e0 # l1-l2 norm regularization parameter
ρ = 2e0 # ADMM penalty parameter
maxiter = 200 # number of ADMM iterations
"""
Define loss function and regularization. Note the use of the
$\ell_1 - \ell_2$ norm, which has been found to provide slightly better
performance than the $\ell_1$ norm in this type of problem
:cite:`wohlberg-2021-psf`.
"""
f = SquaredL2Loss(y=y, A=A)
g0 = λ * L1MinusL2Norm()
C0 = Identity(input_shape=x0.shape)
"""
Initialize ADMM solver.
"""
solver = ADMM(
f=f,
g_list=[g0],
C_list=[C0],
rho_list=[ρ],
alpha=1.8,
maxiter=maxiter,
subproblem_solver=FBlockCircularConvolveSolver(check_solve=True),
itstat_options={"display": True, "period": 10},
)
"""
Run the solver.
"""
print(f"Solving on {device_info()}\n")
x1 = solver.solve()
hist = solver.itstat_object.history(transpose=True)
"""
Show the recovered coefficient maps.
"""
fig, ax = plot.subplots(nrows=2, ncols=3, figsize=(12, 8.6))
plot.imview(x0[0], title="Coef. map 0", cmap=plot.cm.Blues, fig=fig, ax=ax[0, 0])
ax[0, 0].set_ylabel("Ground truth")
plot.imview(x0[1], title="Coef. map 1", cmap=plot.cm.Blues, fig=fig, ax=ax[0, 1])
plot.imview(x0[2], title="Coef. map 2", cmap=plot.cm.Blues, fig=fig, ax=ax[0, 2])
plot.imview(x1[0], cmap=plot.cm.Blues, fig=fig, ax=ax[1, 0])
ax[1, 0].set_ylabel("Recovered")
plot.imview(x1[1], cmap=plot.cm.Blues, fig=fig, ax=ax[1, 1])
plot.imview(x1[2], cmap=plot.cm.Blues, fig=fig, ax=ax[1, 2])
fig.tight_layout()
fig.show()
"""
Show test image and reconstruction from recovered coefficient maps.
"""
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 6))
plot.imview(y, title="Test image", cmap=plot.cm.gist_heat_r, fig=fig, ax=ax[0])
plot.imview(A(x1), title="Reconstructed image", cmap=plot.cm.gist_heat_r, fig=fig, ax=ax[1])
fig.show()
"""
Plot convergence statistics.
"""
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
hist.Objective,
title="Objective function",
xlbl="Iteration",
ylbl="Functional value",
fig=fig,
ax=ax[0],
)
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
fig=fig,
ax=ax[1],
)
fig.show()
input("\nWaiting for input to close figures and exit")
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/sparsecode_conv_admm.py
|
sparsecode_conv_admm.py
|
r"""
Convolutional Sparse Coding (ADMM)
==================================
This example demonstrates the solution of a simple convolutional sparse
coding problem
$$\mathrm{argmin}_{\mathbf{x}} \; \frac{1}{2} \Big\| \mathbf{y} -
\sum_k \mathbf{h}_k \ast \mathbf{x}_k \Big\|_2^2 + \lambda \sum_k
( \| \mathbf{x}_k \|_1 - \| \mathbf{x}_k \|_2 ) \;,$$
where the $\mathbf{h}$_k is a set of filters comprising the dictionary,
the $\mathbf{x}$_k is a corrresponding set of coefficient maps, and
$\mathbf{y}$ is the signal to be represented. The problem is solved via
an ADMM algorithm using the frequency-domain approach proposed in
:cite:`wohlberg-2014-efficient`.
"""
import numpy as np
import jax
import scico.numpy as snp
from scico import plot
from scico.examples import create_conv_sparse_phantom
from scico.functional import L1MinusL2Norm
from scico.linop import CircularConvolve, Identity, Sum
from scico.loss import SquaredL2Loss
from scico.optimize.admm import ADMM, FBlockCircularConvolveSolver
from scico.util import device_info
"""
Set problem size and create random convolutional dictionary (a set of
filters) and a corresponding sparse random set of coefficient maps.
"""
N = 128 # image size
Nnz = 128 # number of non-zeros in coefficient maps
h, x0 = create_conv_sparse_phantom(N, Nnz)
"""
Normalize dictionary filters and scale coefficient maps accordingly.
"""
hnorm = np.sqrt(np.sum(h**2, axis=(1, 2), keepdims=True))
h /= hnorm
x0 *= hnorm
"""
Convert numpy arrays to jax arrays.
"""
h = jax.device_put(h)
x0 = jax.device_put(x0)
"""
Set up sum-of-convolutions forward operator.
"""
C = CircularConvolve(h, input_shape=x0.shape, ndims=2)
S = Sum(input_shape=C.output_shape, axis=0)
A = S @ C
"""
Construct test image from dictionary $\mathbf{h}$ and coefficient maps
$\mathbf{x}_0$.
"""
y = A(x0)
"""
Set functional and solver parameters.
"""
λ = 1e0 # l1-l2 norm regularization parameter
ρ = 2e0 # ADMM penalty parameter
maxiter = 200 # number of ADMM iterations
"""
Define loss function and regularization. Note the use of the
$\ell_1 - \ell_2$ norm, which has been found to provide slightly better
performance than the $\ell_1$ norm in this type of problem
:cite:`wohlberg-2021-psf`.
"""
f = SquaredL2Loss(y=y, A=A)
g0 = λ * L1MinusL2Norm()
C0 = Identity(input_shape=x0.shape)
"""
Initialize ADMM solver.
"""
solver = ADMM(
f=f,
g_list=[g0],
C_list=[C0],
rho_list=[ρ],
alpha=1.8,
maxiter=maxiter,
subproblem_solver=FBlockCircularConvolveSolver(check_solve=True),
itstat_options={"display": True, "period": 10},
)
"""
Run the solver.
"""
print(f"Solving on {device_info()}\n")
x1 = solver.solve()
hist = solver.itstat_object.history(transpose=True)
"""
Show the recovered coefficient maps.
"""
fig, ax = plot.subplots(nrows=2, ncols=3, figsize=(12, 8.6))
plot.imview(x0[0], title="Coef. map 0", cmap=plot.cm.Blues, fig=fig, ax=ax[0, 0])
ax[0, 0].set_ylabel("Ground truth")
plot.imview(x0[1], title="Coef. map 1", cmap=plot.cm.Blues, fig=fig, ax=ax[0, 1])
plot.imview(x0[2], title="Coef. map 2", cmap=plot.cm.Blues, fig=fig, ax=ax[0, 2])
plot.imview(x1[0], cmap=plot.cm.Blues, fig=fig, ax=ax[1, 0])
ax[1, 0].set_ylabel("Recovered")
plot.imview(x1[1], cmap=plot.cm.Blues, fig=fig, ax=ax[1, 1])
plot.imview(x1[2], cmap=plot.cm.Blues, fig=fig, ax=ax[1, 2])
fig.tight_layout()
fig.show()
"""
Show test image and reconstruction from recovered coefficient maps.
"""
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 6))
plot.imview(y, title="Test image", cmap=plot.cm.gist_heat_r, fig=fig, ax=ax[0])
plot.imview(A(x1), title="Reconstructed image", cmap=plot.cm.gist_heat_r, fig=fig, ax=ax[1])
fig.show()
"""
Plot convergence statistics.
"""
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
hist.Objective,
title="Objective function",
xlbl="Iteration",
ylbl="Functional value",
fig=fig,
ax=ax[0],
)
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
fig=fig,
ax=ax[1],
)
fig.show()
input("\nWaiting for input to close figures and exit")
| 0.944817 | 0.938181 |
r"""
Total Variation Denoising (ADMM)
================================
This example compares denoising via isotropic and anisotropic total
variation (TV) regularization :cite:`rudin-1992-nonlinear`
:cite:`goldstein-2009-split`. It solves the denoising problem
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - \mathbf{x}
\|_2^2 + \lambda R(\mathbf{x}) \;,$$
where $R$ is either the isotropic or anisotropic TV regularizer.
In SCICO, switching between these two regularizers is a one-line
change: replacing an
[L1Norm](../_autosummary/scico.functional.rst#scico.functional.L1Norm)
with a
[L21Norm](../_autosummary/scico.functional.rst#scico.functional.L21Norm).
Note that the isotropic version exhibits fewer block-like artifacts on
edges that are not vertical or horizontal.
"""
import jax
from xdesign import SiemensStar, discrete_phantom
import scico.numpy as snp
import scico.random
from scico import functional, linop, loss, plot
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
"""
Create a ground truth image.
"""
N = 256 # image size
phantom = SiemensStar(16)
x_gt = snp.pad(discrete_phantom(phantom, N - 16), 8)
x_gt = jax.device_put(x_gt) # convert to jax type, push to GPU
x_gt = x_gt / x_gt.max()
"""
Add noise to create a noisy test image.
"""
σ = 0.75 # noise standard deviation
noise, key = scico.random.randn(x_gt.shape, seed=0)
y = x_gt + σ * noise
"""
Denoise with isotropic total variation.
"""
λ_iso = 1.4e0
f = loss.SquaredL2Loss(y=y)
g_iso = λ_iso * functional.L21Norm()
# The append=0 option makes the results of horizontal and vertical finite
# differences the same shape, which is required for the L21Norm.
C = linop.FiniteDifference(input_shape=x_gt.shape, append=0)
solver = ADMM(
f=f,
g_list=[g_iso],
C_list=[C],
rho_list=[1e1],
x0=y,
maxiter=100,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": 1e-3, "maxiter": 20}),
itstat_options={"display": True, "period": 10},
)
print(f"Solving on {device_info()}\n")
solver.solve()
x_iso = solver.x
print()
"""
Denoise with anisotropic total variation for comparison.
"""
# Tune the weight to give the same data fidelty as the isotropic case.
λ_aniso = 1.2e0
g_aniso = λ_aniso * functional.L1Norm()
solver = ADMM(
f=f,
g_list=[g_aniso],
C_list=[C],
rho_list=[1e1],
x0=y,
maxiter=100,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": 1e-3, "maxiter": 20}),
itstat_options={"display": True, "period": 10},
)
solver.solve()
x_aniso = solver.x
print()
"""
Compute and print the data fidelity.
"""
for x, name in zip((x_iso, x_aniso), ("Isotropic", "Anisotropic")):
df = f(x)
print(f"Data fidelity for {name} TV was {df:.2e}")
"""
Plot results.
"""
plt_args = dict(norm=plot.matplotlib.colors.Normalize(vmin=0, vmax=1.5))
fig, ax = plot.subplots(nrows=2, ncols=2, sharex=True, sharey=True, figsize=(11, 10))
plot.imview(x_gt, title="Ground truth", fig=fig, ax=ax[0, 0], **plt_args)
plot.imview(y, title="Noisy version", fig=fig, ax=ax[0, 1], **plt_args)
plot.imview(x_iso, title="Isotropic TV denoising", fig=fig, ax=ax[1, 0], **plt_args)
plot.imview(x_aniso, title="Anisotropic TV denoising", fig=fig, ax=ax[1, 1], **plt_args)
fig.subplots_adjust(left=0.1, right=0.99, top=0.95, bottom=0.05, wspace=0.2, hspace=0.01)
fig.colorbar(
ax[0, 0].get_images()[0], ax=ax, location="right", shrink=0.9, pad=0.05, label="Arbitrary Units"
)
fig.suptitle("Denoising comparison")
fig.show()
# zoomed version
fig, ax = plot.subplots(nrows=2, ncols=2, sharex=True, sharey=True, figsize=(11, 10))
plot.imview(x_gt, title="Ground truth", fig=fig, ax=ax[0, 0], **plt_args)
plot.imview(y, title="Noisy version", fig=fig, ax=ax[0, 1], **plt_args)
plot.imview(x_iso, title="Isotropic TV denoising", fig=fig, ax=ax[1, 0], **plt_args)
plot.imview(x_aniso, title="Anisotropic TV denoising", fig=fig, ax=ax[1, 1], **plt_args)
ax[0, 0].set_xlim(N // 4, N // 4 + N // 2)
ax[0, 0].set_ylim(N // 4, N // 4 + N // 2)
fig.subplots_adjust(left=0.1, right=0.99, top=0.95, bottom=0.05, wspace=0.2, hspace=0.01)
fig.colorbar(
ax[0, 0].get_images()[0], ax=ax, location="right", shrink=0.9, pad=0.05, label="Arbitrary Units"
)
fig.suptitle("Denoising comparison (zoomed)")
fig.show()
input("\nWaiting for input to close figures and exit")
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/denoise_tv_admm.py
|
denoise_tv_admm.py
|
r"""
Total Variation Denoising (ADMM)
================================
This example compares denoising via isotropic and anisotropic total
variation (TV) regularization :cite:`rudin-1992-nonlinear`
:cite:`goldstein-2009-split`. It solves the denoising problem
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - \mathbf{x}
\|_2^2 + \lambda R(\mathbf{x}) \;,$$
where $R$ is either the isotropic or anisotropic TV regularizer.
In SCICO, switching between these two regularizers is a one-line
change: replacing an
[L1Norm](../_autosummary/scico.functional.rst#scico.functional.L1Norm)
with a
[L21Norm](../_autosummary/scico.functional.rst#scico.functional.L21Norm).
Note that the isotropic version exhibits fewer block-like artifacts on
edges that are not vertical or horizontal.
"""
import jax
from xdesign import SiemensStar, discrete_phantom
import scico.numpy as snp
import scico.random
from scico import functional, linop, loss, plot
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
"""
Create a ground truth image.
"""
N = 256 # image size
phantom = SiemensStar(16)
x_gt = snp.pad(discrete_phantom(phantom, N - 16), 8)
x_gt = jax.device_put(x_gt) # convert to jax type, push to GPU
x_gt = x_gt / x_gt.max()
"""
Add noise to create a noisy test image.
"""
σ = 0.75 # noise standard deviation
noise, key = scico.random.randn(x_gt.shape, seed=0)
y = x_gt + σ * noise
"""
Denoise with isotropic total variation.
"""
λ_iso = 1.4e0
f = loss.SquaredL2Loss(y=y)
g_iso = λ_iso * functional.L21Norm()
# The append=0 option makes the results of horizontal and vertical finite
# differences the same shape, which is required for the L21Norm.
C = linop.FiniteDifference(input_shape=x_gt.shape, append=0)
solver = ADMM(
f=f,
g_list=[g_iso],
C_list=[C],
rho_list=[1e1],
x0=y,
maxiter=100,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": 1e-3, "maxiter": 20}),
itstat_options={"display": True, "period": 10},
)
print(f"Solving on {device_info()}\n")
solver.solve()
x_iso = solver.x
print()
"""
Denoise with anisotropic total variation for comparison.
"""
# Tune the weight to give the same data fidelty as the isotropic case.
λ_aniso = 1.2e0
g_aniso = λ_aniso * functional.L1Norm()
solver = ADMM(
f=f,
g_list=[g_aniso],
C_list=[C],
rho_list=[1e1],
x0=y,
maxiter=100,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": 1e-3, "maxiter": 20}),
itstat_options={"display": True, "period": 10},
)
solver.solve()
x_aniso = solver.x
print()
"""
Compute and print the data fidelity.
"""
for x, name in zip((x_iso, x_aniso), ("Isotropic", "Anisotropic")):
df = f(x)
print(f"Data fidelity for {name} TV was {df:.2e}")
"""
Plot results.
"""
plt_args = dict(norm=plot.matplotlib.colors.Normalize(vmin=0, vmax=1.5))
fig, ax = plot.subplots(nrows=2, ncols=2, sharex=True, sharey=True, figsize=(11, 10))
plot.imview(x_gt, title="Ground truth", fig=fig, ax=ax[0, 0], **plt_args)
plot.imview(y, title="Noisy version", fig=fig, ax=ax[0, 1], **plt_args)
plot.imview(x_iso, title="Isotropic TV denoising", fig=fig, ax=ax[1, 0], **plt_args)
plot.imview(x_aniso, title="Anisotropic TV denoising", fig=fig, ax=ax[1, 1], **plt_args)
fig.subplots_adjust(left=0.1, right=0.99, top=0.95, bottom=0.05, wspace=0.2, hspace=0.01)
fig.colorbar(
ax[0, 0].get_images()[0], ax=ax, location="right", shrink=0.9, pad=0.05, label="Arbitrary Units"
)
fig.suptitle("Denoising comparison")
fig.show()
# zoomed version
fig, ax = plot.subplots(nrows=2, ncols=2, sharex=True, sharey=True, figsize=(11, 10))
plot.imview(x_gt, title="Ground truth", fig=fig, ax=ax[0, 0], **plt_args)
plot.imview(y, title="Noisy version", fig=fig, ax=ax[0, 1], **plt_args)
plot.imview(x_iso, title="Isotropic TV denoising", fig=fig, ax=ax[1, 0], **plt_args)
plot.imview(x_aniso, title="Anisotropic TV denoising", fig=fig, ax=ax[1, 1], **plt_args)
ax[0, 0].set_xlim(N // 4, N // 4 + N // 2)
ax[0, 0].set_ylim(N // 4, N // 4 + N // 2)
fig.subplots_adjust(left=0.1, right=0.99, top=0.95, bottom=0.05, wspace=0.2, hspace=0.01)
fig.colorbar(
ax[0, 0].get_images()[0], ax=ax, location="right", shrink=0.9, pad=0.05, label="Arbitrary Units"
)
fig.suptitle("Denoising comparison (zoomed)")
fig.show()
input("\nWaiting for input to close figures and exit")
| 0.89241 | 0.828176 |
r"""
Training of DnCNN for Denoising
===============================
This example demonstrates the training and application of the DnCNN model
from :cite:`zhang-2017-dncnn` to denoise images that have been corrupted
with additive Gaussian noise.
"""
import os
from time import time
import numpy as np
import jax
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scico import flax as sflax
from scico import metric, plot
from scico.flax.examples import load_image_data
"""
Prepare parallel processing. Set an arbitrary processor count (only
applies if GPU is not available).
"""
os.environ["XLA_FLAGS"] = "--xla_force_host_platform_device_count=8"
platform = jax.lib.xla_bridge.get_backend().platform
print("Platform: ", platform)
"""
Read data from cache or generate if not available.
"""
size = 40 # patch size
train_nimg = 400 # number of training images
test_nimg = 16 # number of testing images
nimg = train_nimg + test_nimg
gray = True # use gray scale images
data_mode = "dn" # Denoising problem
noise_level = 0.1 # Standard deviation of noise
noise_range = False # Use fixed noise level
stride = 23 # Stride to sample multiple patches from each image
train_ds, test_ds = load_image_data(
train_nimg,
test_nimg,
size,
gray,
data_mode,
verbose=True,
noise_level=noise_level,
noise_range=noise_range,
stride=stride,
)
"""
Define configuration dictionary for model and training loop.
Parameters have been selected for demonstration purposes and relatively
short training. The depth of the model has been reduced to 6, instead of
the 17 of the original model. The suggested settings can be found in the
original paper.
"""
# model configuration
model_conf = {
"depth": 6,
"num_filters": 64,
}
# training configuration
train_conf: sflax.ConfigDict = {
"seed": 0,
"opt_type": "ADAM",
"batch_size": 128,
"num_epochs": 50,
"base_learning_rate": 1e-3,
"warmup_epochs": 0,
"log_every_steps": 5000,
"log": True,
}
"""
Construct DnCNN model.
"""
channels = train_ds["image"].shape[-1]
model = sflax.DnCNNNet(
depth=model_conf["depth"],
channels=channels,
num_filters=model_conf["num_filters"],
)
"""
Run training loop.
"""
workdir = os.path.join(os.path.expanduser("~"), ".cache", "scico", "examples", "dncnn_out")
train_conf["workdir"] = workdir
print(f"{'JAX process: '}{jax.process_index()}{' / '}{jax.process_count()}")
print(f"{'JAX local devices: '}{jax.local_devices()}")
trainer = sflax.BasicFlaxTrainer(
train_conf,
model,
train_ds,
test_ds,
)
start_time = time()
modvar, stats_object = trainer.train()
time_train = time() - start_time
"""
Evaluate on testing data.
"""
test_patches = 720
start_time = time()
fmap = sflax.FlaxMap(model, modvar)
output = fmap(test_ds["image"][:test_patches])
time_eval = time() - start_time
output = np.clip(output, a_min=0, a_max=1.0)
"""
Compare trained model in terms of reconstruction time and data fidelity.
"""
snr_eval = metric.snr(test_ds["label"][:test_patches], output)
psnr_eval = metric.psnr(test_ds["label"][:test_patches], output)
print(
f"{'DnCNNNet training':18s}{'epochs:':2s}{train_conf['num_epochs']:>5d}"
f"{'':21s}{'time[s]:':10s}{time_train:>7.2f}"
)
print(
f"{'DnCNNNet testing':18s}{'SNR:':5s}{snr_eval:>5.2f}{' dB'}{'':3s}"
f"{'PSNR:':6s}{psnr_eval:>5.2f}{' dB'}{'':3s}{'time[s]:':10s}{time_eval:>7.2f}"
)
"""
Plot comparison. Note that patches have small sizes, thus, plots may
correspond to unidentifiable fragments.
"""
np.random.seed(123)
indx = np.random.randint(0, high=test_patches)
fig, ax = plot.subplots(nrows=1, ncols=3, figsize=(15, 5))
plot.imview(test_ds["label"][indx, ..., 0], title="Ground truth", cbar=None, fig=fig, ax=ax[0])
plot.imview(
test_ds["image"][indx, ..., 0],
title="Noisy: \nSNR: %.2f (dB), PSNR: %.2f"
% (
metric.snr(test_ds["label"][indx, ..., 0], test_ds["image"][indx, ..., 0]),
metric.psnr(test_ds["label"][indx, ..., 0], test_ds["image"][indx, ..., 0]),
),
cbar=None,
fig=fig,
ax=ax[1],
)
plot.imview(
output[indx, ..., 0],
title="DnCNNNet Reconstruction\nSNR: %.2f (dB), PSNR: %.2f"
% (
metric.snr(test_ds["label"][indx, ..., 0], output[indx, ..., 0]),
metric.psnr(test_ds["label"][indx, ..., 0], output[indx, ..., 0]),
),
fig=fig,
ax=ax[2],
)
divider = make_axes_locatable(ax[2])
cax = divider.append_axes("right", size="5%", pad=0.2)
fig.colorbar(ax[2].get_images()[0], cax=cax, label="arbitrary units")
fig.show()
"""
Plot convergence statistics. Statistics only generated if a training
cycle was done (i.e. not reading final epoch results from checkpoint).
"""
if stats_object is not None:
hist = stats_object.history(transpose=True)
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
np.vstack((hist.Train_Loss, hist.Eval_Loss)).T,
x=hist.Epoch,
ptyp="semilogy",
title="Loss function",
xlbl="Epoch",
ylbl="Loss value",
lgnd=("Train", "Test"),
fig=fig,
ax=ax[0],
)
plot.plot(
np.vstack((hist.Train_SNR, hist.Eval_SNR)).T,
x=hist.Epoch,
title="Metric",
xlbl="Epoch",
ylbl="SNR (dB)",
lgnd=("Train", "Test"),
fig=fig,
ax=ax[1],
)
fig.show()
input("\nWaiting for input to close figures and exit")
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/denoise_dncnn_train_bsds.py
|
denoise_dncnn_train_bsds.py
|
r"""
Training of DnCNN for Denoising
===============================
This example demonstrates the training and application of the DnCNN model
from :cite:`zhang-2017-dncnn` to denoise images that have been corrupted
with additive Gaussian noise.
"""
import os
from time import time
import numpy as np
import jax
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scico import flax as sflax
from scico import metric, plot
from scico.flax.examples import load_image_data
"""
Prepare parallel processing. Set an arbitrary processor count (only
applies if GPU is not available).
"""
os.environ["XLA_FLAGS"] = "--xla_force_host_platform_device_count=8"
platform = jax.lib.xla_bridge.get_backend().platform
print("Platform: ", platform)
"""
Read data from cache or generate if not available.
"""
size = 40 # patch size
train_nimg = 400 # number of training images
test_nimg = 16 # number of testing images
nimg = train_nimg + test_nimg
gray = True # use gray scale images
data_mode = "dn" # Denoising problem
noise_level = 0.1 # Standard deviation of noise
noise_range = False # Use fixed noise level
stride = 23 # Stride to sample multiple patches from each image
train_ds, test_ds = load_image_data(
train_nimg,
test_nimg,
size,
gray,
data_mode,
verbose=True,
noise_level=noise_level,
noise_range=noise_range,
stride=stride,
)
"""
Define configuration dictionary for model and training loop.
Parameters have been selected for demonstration purposes and relatively
short training. The depth of the model has been reduced to 6, instead of
the 17 of the original model. The suggested settings can be found in the
original paper.
"""
# model configuration
model_conf = {
"depth": 6,
"num_filters": 64,
}
# training configuration
train_conf: sflax.ConfigDict = {
"seed": 0,
"opt_type": "ADAM",
"batch_size": 128,
"num_epochs": 50,
"base_learning_rate": 1e-3,
"warmup_epochs": 0,
"log_every_steps": 5000,
"log": True,
}
"""
Construct DnCNN model.
"""
channels = train_ds["image"].shape[-1]
model = sflax.DnCNNNet(
depth=model_conf["depth"],
channels=channels,
num_filters=model_conf["num_filters"],
)
"""
Run training loop.
"""
workdir = os.path.join(os.path.expanduser("~"), ".cache", "scico", "examples", "dncnn_out")
train_conf["workdir"] = workdir
print(f"{'JAX process: '}{jax.process_index()}{' / '}{jax.process_count()}")
print(f"{'JAX local devices: '}{jax.local_devices()}")
trainer = sflax.BasicFlaxTrainer(
train_conf,
model,
train_ds,
test_ds,
)
start_time = time()
modvar, stats_object = trainer.train()
time_train = time() - start_time
"""
Evaluate on testing data.
"""
test_patches = 720
start_time = time()
fmap = sflax.FlaxMap(model, modvar)
output = fmap(test_ds["image"][:test_patches])
time_eval = time() - start_time
output = np.clip(output, a_min=0, a_max=1.0)
"""
Compare trained model in terms of reconstruction time and data fidelity.
"""
snr_eval = metric.snr(test_ds["label"][:test_patches], output)
psnr_eval = metric.psnr(test_ds["label"][:test_patches], output)
print(
f"{'DnCNNNet training':18s}{'epochs:':2s}{train_conf['num_epochs']:>5d}"
f"{'':21s}{'time[s]:':10s}{time_train:>7.2f}"
)
print(
f"{'DnCNNNet testing':18s}{'SNR:':5s}{snr_eval:>5.2f}{' dB'}{'':3s}"
f"{'PSNR:':6s}{psnr_eval:>5.2f}{' dB'}{'':3s}{'time[s]:':10s}{time_eval:>7.2f}"
)
"""
Plot comparison. Note that patches have small sizes, thus, plots may
correspond to unidentifiable fragments.
"""
np.random.seed(123)
indx = np.random.randint(0, high=test_patches)
fig, ax = plot.subplots(nrows=1, ncols=3, figsize=(15, 5))
plot.imview(test_ds["label"][indx, ..., 0], title="Ground truth", cbar=None, fig=fig, ax=ax[0])
plot.imview(
test_ds["image"][indx, ..., 0],
title="Noisy: \nSNR: %.2f (dB), PSNR: %.2f"
% (
metric.snr(test_ds["label"][indx, ..., 0], test_ds["image"][indx, ..., 0]),
metric.psnr(test_ds["label"][indx, ..., 0], test_ds["image"][indx, ..., 0]),
),
cbar=None,
fig=fig,
ax=ax[1],
)
plot.imview(
output[indx, ..., 0],
title="DnCNNNet Reconstruction\nSNR: %.2f (dB), PSNR: %.2f"
% (
metric.snr(test_ds["label"][indx, ..., 0], output[indx, ..., 0]),
metric.psnr(test_ds["label"][indx, ..., 0], output[indx, ..., 0]),
),
fig=fig,
ax=ax[2],
)
divider = make_axes_locatable(ax[2])
cax = divider.append_axes("right", size="5%", pad=0.2)
fig.colorbar(ax[2].get_images()[0], cax=cax, label="arbitrary units")
fig.show()
"""
Plot convergence statistics. Statistics only generated if a training
cycle was done (i.e. not reading final epoch results from checkpoint).
"""
if stats_object is not None:
hist = stats_object.history(transpose=True)
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
np.vstack((hist.Train_Loss, hist.Eval_Loss)).T,
x=hist.Epoch,
ptyp="semilogy",
title="Loss function",
xlbl="Epoch",
ylbl="Loss value",
lgnd=("Train", "Test"),
fig=fig,
ax=ax[0],
)
plot.plot(
np.vstack((hist.Train_SNR, hist.Eval_SNR)).T,
x=hist.Epoch,
title="Metric",
xlbl="Epoch",
ylbl="SNR (dB)",
lgnd=("Train", "Test"),
fig=fig,
ax=ax[1],
)
fig.show()
input("\nWaiting for input to close figures and exit")
| 0.920994 | 0.649051 |
import numpy as np
import jax
from xdesign import Foam, discrete_phantom
import scico.numpy as snp
from scico import functional, linop, loss, metric, plot, random
from scico.optimize import ProximalADMM
from scico.util import device_info
"""
Create a ground truth image.
"""
np.random.seed(1234)
N = 512 # image size
x_gt = discrete_phantom(Foam(size_range=[0.075, 0.0025], gap=1e-3, porosity=1), size=N)
x_gt = jax.device_put(x_gt) # convert to jax array, push to GPU
"""
Set up forward operator $A$ and test signal consisting of blurred signal with
additive Gaussian noise.
"""
n = 5 # convolution kernel size
σ = 20.0 / 255 # noise level
psf = snp.ones((n, n)) / (n * n)
A = linop.Convolve(h=psf, input_shape=x_gt.shape)
Ax = A(x_gt) # blurred image
noise, key = random.randn(Ax.shape)
y = Ax + σ * noise
"""
Set up the problem to be solved. We want to minimize the functional
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - A \mathbf{x}
\|_2^2 + R(\mathbf{x}) \;$$
where $R(\cdot)$ is a pseudo-functional having the DnCNN denoiser as its
proximal operator. A slightly unusual variable splitting is used,\
including setting the $f$ functional to the $R(\cdot)$ term and the $g$
functional to the data fidelity term to allow the use of proximal ADMM,
which avoids the need for conjugate gradient sub-iterations in the solver
steps.
"""
f = functional.DnCNN(variant="17M")
g = loss.SquaredL2Loss(y=y)
"""
Set up proximal ADMM solver.
"""
ρ = 0.2 # ADMM penalty parameter
maxiter = 10 # number of proximal ADMM iterations
mu, nu = ProximalADMM.estimate_parameters(A)
solver = ProximalADMM(
f=f,
g=g,
A=A,
rho=ρ,
mu=mu,
nu=nu,
x0=A.T @ y,
maxiter=maxiter,
itstat_options={"display": True},
)
"""
Run the solver.
"""
print(f"Solving on {device_info()}\n")
x = solver.solve()
x = snp.clip(x, 0, 1)
hist = solver.itstat_object.history(transpose=True)
"""
Show the recovered image.
"""
fig, ax = plot.subplots(nrows=1, ncols=3, figsize=(15, 5))
plot.imview(x_gt, title="Ground truth", fig=fig, ax=ax[0])
nc = n // 2
yc = snp.clip(y[nc:-nc, nc:-nc], 0, 1)
plot.imview(y, title="Blurred, noisy image: %.2f (dB)" % metric.psnr(x_gt, yc), fig=fig, ax=ax[1])
plot.imview(x, title="Deconvolved image: %.2f (dB)" % metric.psnr(x_gt, x), fig=fig, ax=ax[2])
fig.show()
"""
Plot convergence statistics.
"""
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
)
input("\nWaiting for input to close figures and exit")
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/deconv_ppp_dncnn_padmm.py
|
deconv_ppp_dncnn_padmm.py
|
import numpy as np
import jax
from xdesign import Foam, discrete_phantom
import scico.numpy as snp
from scico import functional, linop, loss, metric, plot, random
from scico.optimize import ProximalADMM
from scico.util import device_info
"""
Create a ground truth image.
"""
np.random.seed(1234)
N = 512 # image size
x_gt = discrete_phantom(Foam(size_range=[0.075, 0.0025], gap=1e-3, porosity=1), size=N)
x_gt = jax.device_put(x_gt) # convert to jax array, push to GPU
"""
Set up forward operator $A$ and test signal consisting of blurred signal with
additive Gaussian noise.
"""
n = 5 # convolution kernel size
σ = 20.0 / 255 # noise level
psf = snp.ones((n, n)) / (n * n)
A = linop.Convolve(h=psf, input_shape=x_gt.shape)
Ax = A(x_gt) # blurred image
noise, key = random.randn(Ax.shape)
y = Ax + σ * noise
"""
Set up the problem to be solved. We want to minimize the functional
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - A \mathbf{x}
\|_2^2 + R(\mathbf{x}) \;$$
where $R(\cdot)$ is a pseudo-functional having the DnCNN denoiser as its
proximal operator. A slightly unusual variable splitting is used,\
including setting the $f$ functional to the $R(\cdot)$ term and the $g$
functional to the data fidelity term to allow the use of proximal ADMM,
which avoids the need for conjugate gradient sub-iterations in the solver
steps.
"""
f = functional.DnCNN(variant="17M")
g = loss.SquaredL2Loss(y=y)
"""
Set up proximal ADMM solver.
"""
ρ = 0.2 # ADMM penalty parameter
maxiter = 10 # number of proximal ADMM iterations
mu, nu = ProximalADMM.estimate_parameters(A)
solver = ProximalADMM(
f=f,
g=g,
A=A,
rho=ρ,
mu=mu,
nu=nu,
x0=A.T @ y,
maxiter=maxiter,
itstat_options={"display": True},
)
"""
Run the solver.
"""
print(f"Solving on {device_info()}\n")
x = solver.solve()
x = snp.clip(x, 0, 1)
hist = solver.itstat_object.history(transpose=True)
"""
Show the recovered image.
"""
fig, ax = plot.subplots(nrows=1, ncols=3, figsize=(15, 5))
plot.imview(x_gt, title="Ground truth", fig=fig, ax=ax[0])
nc = n // 2
yc = snp.clip(y[nc:-nc, nc:-nc], 0, 1)
plot.imview(y, title="Blurred, noisy image: %.2f (dB)" % metric.psnr(x_gt, yc), fig=fig, ax=ax[1])
plot.imview(x, title="Deconvolved image: %.2f (dB)" % metric.psnr(x_gt, x), fig=fig, ax=ax[2])
fig.show()
"""
Plot convergence statistics.
"""
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
)
input("\nWaiting for input to close figures and exit")
| 0.81637 | 0.684989 |
r"""
Comparison of Optimization Algorithms for Total Variation Denoising
===================================================================
This example compares the performance of alternating direction method of
multipliers (ADMM), linearized ADMM, proximal ADMM, and primal–dual
hybrid gradient (PDHG) in solving the isotropic total variation (TV)
denoising problem
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - \mathbf{x}
\|_2^2 + \lambda R(\mathbf{x}) \;,$$
where $R$ is the isotropic TV: the sum of the norms of the gradient
vectors at each point in the image $\mathbf{x}$.
"""
import jax
from xdesign import SiemensStar, discrete_phantom
import scico.numpy as snp
import scico.random
from scico import functional, linop, loss, plot
from scico.optimize import PDHG, LinearizedADMM, ProximalADMM
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
"""
Create a ground truth image.
"""
phantom = SiemensStar(32)
N = 256 # image size
x_gt = snp.pad(discrete_phantom(phantom, N - 16), 8)
x_gt = jax.device_put(x_gt) # convert to jax type, push to GPU
"""
Add noise to create a noisy test image.
"""
σ = 1.0 # noise standard deviation
noise, key = scico.random.randn(x_gt.shape, seed=0)
y = x_gt + σ * noise
"""
Construct operators and functionals and set regularization parameter.
"""
# The append=0 option makes the results of horizontal and vertical
# finite differences the same shape, which is required for the L21Norm.
C = linop.FiniteDifference(input_shape=x_gt.shape, append=0)
f = loss.SquaredL2Loss(y=y)
λ = 1e0
g = λ * functional.L21Norm()
"""
The first step of the first-run solver is much slower than the
following steps, presumably due to just-in-time compilation of
relevant operators in first use. The code below performs a preliminary
solver step, the result of which is discarded, to reduce this bias in
the timing results. The precise cause of the remaining differences in
time required to compute the first step of each algorithm is unknown,
but it is worth noting that this difference becomes negligible when
just-in-time compilation is disabled (e.g. via the JAX_DISABLE_JIT
environment variable).
"""
solver_admm = ADMM(
f=f,
g_list=[g],
C_list=[C],
rho_list=[1e1],
x0=y,
maxiter=1,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"maxiter": 1}),
)
solver_admm.solve(); # fmt: skip
# trailing semi-colon suppresses output in notebook
"""
Solve via ADMM with a maximum of 2 CG iterations.
"""
solver_admm = ADMM(
f=f,
g_list=[g],
C_list=[C],
rho_list=[1e1],
x0=y,
maxiter=200,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"maxiter": 2}),
itstat_options={"display": True, "period": 10},
)
print(f"Solving on {device_info()}\n")
print("ADMM solver")
solver_admm.solve()
hist_admm = solver_admm.itstat_object.history(transpose=True)
"""
Solve via Linearized ADMM.
"""
solver_ladmm = LinearizedADMM(
f=f,
g=g,
C=C,
mu=1e-2,
nu=1e-1,
x0=y,
maxiter=200,
itstat_options={"display": True, "period": 10},
)
print("\nLinearized ADMM solver")
solver_ladmm.solve()
hist_ladmm = solver_ladmm.itstat_object.history(transpose=True)
"""
Solve via Proximal ADMM.
"""
mu, nu = ProximalADMM.estimate_parameters(C)
solver_padmm = ProximalADMM(
f=f,
g=g,
A=C,
rho=1e0,
mu=mu,
nu=nu,
x0=y,
maxiter=200,
itstat_options={"display": True, "period": 10},
)
print("\nProximal ADMM solver")
solver_padmm.solve()
hist_padmm = solver_padmm.itstat_object.history(transpose=True)
"""
Solve via PDHG.
"""
tau, sigma = PDHG.estimate_parameters(C, factor=1.5)
solver_pdhg = PDHG(
f=f,
g=g,
C=C,
tau=tau,
sigma=sigma,
maxiter=200,
itstat_options={"display": True, "period": 10},
)
print("\nPDHG solver")
solver_pdhg.solve()
hist_pdhg = solver_pdhg.itstat_object.history(transpose=True)
"""
Plot results. It is worth noting that:
1. PDHG outperforms ADMM both with respect to iterations and time.
2. Proximal ADMM has similar performance to PDHG with respect to iterations,
but is slightly inferior with respect to time.
3. ADMM greatly outperforms Linearized ADMM with respect to iterations.
4. ADMM slightly outperforms Linearized ADMM with respect to time. This is
possible because the ADMM $\mathbf{x}$-update can be solved relatively
cheaply, with only 2 CG iterations. If more CG iterations were required,
the time comparison would be favorable to Linearized ADMM.
"""
fig, ax = plot.subplots(nrows=1, ncols=3, sharex=True, sharey=False, figsize=(27, 6))
plot.plot(
snp.vstack(
(hist_admm.Objective, hist_ladmm.Objective, hist_padmm.Objective, hist_pdhg.Objective)
).T,
ptyp="semilogy",
title="Objective function",
xlbl="Iteration",
lgnd=("ADMM", "LinADMM", "ProxADMM", "PDHG"),
fig=fig,
ax=ax[0],
)
plot.plot(
snp.vstack(
(hist_admm.Prml_Rsdl, hist_ladmm.Prml_Rsdl, hist_padmm.Prml_Rsdl, hist_pdhg.Prml_Rsdl)
).T,
ptyp="semilogy",
title="Primal residual",
xlbl="Iteration",
lgnd=("ADMM", "LinADMM", "ProxADMM", "PDHG"),
fig=fig,
ax=ax[1],
)
plot.plot(
snp.vstack(
(hist_admm.Dual_Rsdl, hist_ladmm.Dual_Rsdl, hist_padmm.Dual_Rsdl, hist_pdhg.Dual_Rsdl)
).T,
ptyp="semilogy",
title="Dual residual",
xlbl="Iteration",
lgnd=("ADMM", "LinADMM", "ProxADMM", "PDHG"),
fig=fig,
ax=ax[2],
)
fig.show()
fig, ax = plot.subplots(nrows=1, ncols=3, sharex=True, sharey=False, figsize=(27, 6))
plot.plot(
snp.vstack(
(hist_admm.Objective, hist_ladmm.Objective, hist_padmm.Objective, hist_pdhg.Objective)
).T,
snp.vstack((hist_admm.Time, hist_ladmm.Time, hist_padmm.Time, hist_pdhg.Time)).T,
ptyp="semilogy",
title="Objective function",
xlbl="Time (s)",
lgnd=("ADMM", "LinADMM", "ProxADMM", "PDHG"),
fig=fig,
ax=ax[0],
)
plot.plot(
snp.vstack(
(hist_admm.Prml_Rsdl, hist_ladmm.Prml_Rsdl, hist_padmm.Prml_Rsdl, hist_pdhg.Prml_Rsdl)
).T,
snp.vstack((hist_admm.Time, hist_ladmm.Time, hist_padmm.Time, hist_pdhg.Time)).T,
ptyp="semilogy",
title="Primal residual",
xlbl="Time (s)",
lgnd=("ADMM", "LinADMM", "ProxADMM", "PDHG"),
fig=fig,
ax=ax[1],
)
plot.plot(
snp.vstack(
(hist_admm.Dual_Rsdl, hist_ladmm.Dual_Rsdl, hist_padmm.Dual_Rsdl, hist_pdhg.Dual_Rsdl)
).T,
snp.vstack((hist_admm.Time, hist_ladmm.Time, hist_padmm.Time, hist_pdhg.Time)).T,
ptyp="semilogy",
title="Dual residual",
xlbl="Time (s)",
lgnd=("ADMM", "LinADMM", "ProxADMM", "PDHG"),
fig=fig,
ax=ax[2],
)
fig.show()
input("\nWaiting for input to close figures and exit")
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/denoise_tv_multi.py
|
denoise_tv_multi.py
|
r"""
Comparison of Optimization Algorithms for Total Variation Denoising
===================================================================
This example compares the performance of alternating direction method of
multipliers (ADMM), linearized ADMM, proximal ADMM, and primal–dual
hybrid gradient (PDHG) in solving the isotropic total variation (TV)
denoising problem
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - \mathbf{x}
\|_2^2 + \lambda R(\mathbf{x}) \;,$$
where $R$ is the isotropic TV: the sum of the norms of the gradient
vectors at each point in the image $\mathbf{x}$.
"""
import jax
from xdesign import SiemensStar, discrete_phantom
import scico.numpy as snp
import scico.random
from scico import functional, linop, loss, plot
from scico.optimize import PDHG, LinearizedADMM, ProximalADMM
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
"""
Create a ground truth image.
"""
phantom = SiemensStar(32)
N = 256 # image size
x_gt = snp.pad(discrete_phantom(phantom, N - 16), 8)
x_gt = jax.device_put(x_gt) # convert to jax type, push to GPU
"""
Add noise to create a noisy test image.
"""
σ = 1.0 # noise standard deviation
noise, key = scico.random.randn(x_gt.shape, seed=0)
y = x_gt + σ * noise
"""
Construct operators and functionals and set regularization parameter.
"""
# The append=0 option makes the results of horizontal and vertical
# finite differences the same shape, which is required for the L21Norm.
C = linop.FiniteDifference(input_shape=x_gt.shape, append=0)
f = loss.SquaredL2Loss(y=y)
λ = 1e0
g = λ * functional.L21Norm()
"""
The first step of the first-run solver is much slower than the
following steps, presumably due to just-in-time compilation of
relevant operators in first use. The code below performs a preliminary
solver step, the result of which is discarded, to reduce this bias in
the timing results. The precise cause of the remaining differences in
time required to compute the first step of each algorithm is unknown,
but it is worth noting that this difference becomes negligible when
just-in-time compilation is disabled (e.g. via the JAX_DISABLE_JIT
environment variable).
"""
solver_admm = ADMM(
f=f,
g_list=[g],
C_list=[C],
rho_list=[1e1],
x0=y,
maxiter=1,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"maxiter": 1}),
)
solver_admm.solve(); # fmt: skip
# trailing semi-colon suppresses output in notebook
"""
Solve via ADMM with a maximum of 2 CG iterations.
"""
solver_admm = ADMM(
f=f,
g_list=[g],
C_list=[C],
rho_list=[1e1],
x0=y,
maxiter=200,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"maxiter": 2}),
itstat_options={"display": True, "period": 10},
)
print(f"Solving on {device_info()}\n")
print("ADMM solver")
solver_admm.solve()
hist_admm = solver_admm.itstat_object.history(transpose=True)
"""
Solve via Linearized ADMM.
"""
solver_ladmm = LinearizedADMM(
f=f,
g=g,
C=C,
mu=1e-2,
nu=1e-1,
x0=y,
maxiter=200,
itstat_options={"display": True, "period": 10},
)
print("\nLinearized ADMM solver")
solver_ladmm.solve()
hist_ladmm = solver_ladmm.itstat_object.history(transpose=True)
"""
Solve via Proximal ADMM.
"""
mu, nu = ProximalADMM.estimate_parameters(C)
solver_padmm = ProximalADMM(
f=f,
g=g,
A=C,
rho=1e0,
mu=mu,
nu=nu,
x0=y,
maxiter=200,
itstat_options={"display": True, "period": 10},
)
print("\nProximal ADMM solver")
solver_padmm.solve()
hist_padmm = solver_padmm.itstat_object.history(transpose=True)
"""
Solve via PDHG.
"""
tau, sigma = PDHG.estimate_parameters(C, factor=1.5)
solver_pdhg = PDHG(
f=f,
g=g,
C=C,
tau=tau,
sigma=sigma,
maxiter=200,
itstat_options={"display": True, "period": 10},
)
print("\nPDHG solver")
solver_pdhg.solve()
hist_pdhg = solver_pdhg.itstat_object.history(transpose=True)
"""
Plot results. It is worth noting that:
1. PDHG outperforms ADMM both with respect to iterations and time.
2. Proximal ADMM has similar performance to PDHG with respect to iterations,
but is slightly inferior with respect to time.
3. ADMM greatly outperforms Linearized ADMM with respect to iterations.
4. ADMM slightly outperforms Linearized ADMM with respect to time. This is
possible because the ADMM $\mathbf{x}$-update can be solved relatively
cheaply, with only 2 CG iterations. If more CG iterations were required,
the time comparison would be favorable to Linearized ADMM.
"""
fig, ax = plot.subplots(nrows=1, ncols=3, sharex=True, sharey=False, figsize=(27, 6))
plot.plot(
snp.vstack(
(hist_admm.Objective, hist_ladmm.Objective, hist_padmm.Objective, hist_pdhg.Objective)
).T,
ptyp="semilogy",
title="Objective function",
xlbl="Iteration",
lgnd=("ADMM", "LinADMM", "ProxADMM", "PDHG"),
fig=fig,
ax=ax[0],
)
plot.plot(
snp.vstack(
(hist_admm.Prml_Rsdl, hist_ladmm.Prml_Rsdl, hist_padmm.Prml_Rsdl, hist_pdhg.Prml_Rsdl)
).T,
ptyp="semilogy",
title="Primal residual",
xlbl="Iteration",
lgnd=("ADMM", "LinADMM", "ProxADMM", "PDHG"),
fig=fig,
ax=ax[1],
)
plot.plot(
snp.vstack(
(hist_admm.Dual_Rsdl, hist_ladmm.Dual_Rsdl, hist_padmm.Dual_Rsdl, hist_pdhg.Dual_Rsdl)
).T,
ptyp="semilogy",
title="Dual residual",
xlbl="Iteration",
lgnd=("ADMM", "LinADMM", "ProxADMM", "PDHG"),
fig=fig,
ax=ax[2],
)
fig.show()
fig, ax = plot.subplots(nrows=1, ncols=3, sharex=True, sharey=False, figsize=(27, 6))
plot.plot(
snp.vstack(
(hist_admm.Objective, hist_ladmm.Objective, hist_padmm.Objective, hist_pdhg.Objective)
).T,
snp.vstack((hist_admm.Time, hist_ladmm.Time, hist_padmm.Time, hist_pdhg.Time)).T,
ptyp="semilogy",
title="Objective function",
xlbl="Time (s)",
lgnd=("ADMM", "LinADMM", "ProxADMM", "PDHG"),
fig=fig,
ax=ax[0],
)
plot.plot(
snp.vstack(
(hist_admm.Prml_Rsdl, hist_ladmm.Prml_Rsdl, hist_padmm.Prml_Rsdl, hist_pdhg.Prml_Rsdl)
).T,
snp.vstack((hist_admm.Time, hist_ladmm.Time, hist_padmm.Time, hist_pdhg.Time)).T,
ptyp="semilogy",
title="Primal residual",
xlbl="Time (s)",
lgnd=("ADMM", "LinADMM", "ProxADMM", "PDHG"),
fig=fig,
ax=ax[1],
)
plot.plot(
snp.vstack(
(hist_admm.Dual_Rsdl, hist_ladmm.Dual_Rsdl, hist_padmm.Dual_Rsdl, hist_pdhg.Dual_Rsdl)
).T,
snp.vstack((hist_admm.Time, hist_ladmm.Time, hist_padmm.Time, hist_pdhg.Time)).T,
ptyp="semilogy",
title="Dual residual",
xlbl="Time (s)",
lgnd=("ADMM", "LinADMM", "ProxADMM", "PDHG"),
fig=fig,
ax=ax[2],
)
fig.show()
input("\nWaiting for input to close figures and exit")
| 0.907091 | 0.909385 |
r"""
Parameter Tuning for TV-Regularized Abel Inversion
==================================================
This example demonstrates the use of
[scico.ray.tune](../_autosummary/scico.ray.tune.rst) to tune
parameters for the companion [example script](ct_abel_tv_admm.rst). The
`ray.tune` class API is used in this example.
This script is hard-coded to run on CPU only to avoid the large number of
warnings that are emitted when GPU resources are requested but not available,
and due to the difficulty of supressing these warnings in a way that does
not force use of the CPU only. To enable GPU usage, comment out the
`os.environ` statements near the beginning of the script, and change the
value of the "gpu" entry in the `resources` dict from 0 to 1. Note that
two environment variables are set to suppress the warnings because
`JAX_PLATFORMS` was intended to replace `JAX_PLATFORM_NAME` but this change
has yet to be correctly implemented
(see [google/jax#6805](https://github.com/google/jax/issues/6805) and
[google/jax#10272](https://github.com/google/jax/pull/10272).
"""
# isort: off
import os
os.environ["JAX_PLATFORM_NAME"] = "cpu"
os.environ["JAX_PLATFORMS"] = "cpu"
import numpy as np
import jax
import scico.numpy as snp
from scico import functional, linop, loss, metric, plot
from scico.examples import create_circular_phantom
from scico.linop.abel import AbelProjector
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.ray import tune
"""
Create a ground truth image.
"""
N = 256 # image size
x_gt = create_circular_phantom((N, N), [0.4 * N, 0.2 * N, 0.1 * N], [1, 0, 0.5])
"""
Set up the forward operator and create a test measurement.
"""
A = AbelProjector(x_gt.shape)
y = A @ x_gt
np.random.seed(12345)
y = y + np.random.normal(size=y.shape).astype(np.float32)
"""
Compute inverse Abel transform solution for use as initial solution.
"""
x_inv = A.inverse(y)
x0 = snp.clip(x_inv, 0.0, 1.0)
"""
Define performance evaluation class.
"""
class Trainable(tune.Trainable):
"""Parameter evaluation class."""
def setup(self, config, x_gt, x0, y):
"""This method initializes a new parameter evaluation object. It
is called once when a new parameter evaluation object is created.
The `config` parameter is a dict of specific parameters for
evaluation of a single parameter set (a pair of parameters in
this case). The remaining parameters are objects that are passed
to the evaluation function via the ray object store.
"""
# Put main arrays on jax device.
self.x_gt, self.x0, self.y = jax.device_put([x_gt, x0, y])
# Set up problem to be solved.
self.A = AbelProjector(self.x_gt.shape)
self.f = loss.SquaredL2Loss(y=self.y, A=self.A)
self.C = linop.FiniteDifference(input_shape=self.x_gt.shape)
self.reset_config(config)
def reset_config(self, config):
"""This method is only required when `scico.ray.tune.Tuner` is
initialized with `reuse_actors` set to ``True`` (the default). In
this case, a set of parameter evaluation processes and
corresponding objects are created once (including initialization
via a call to the `setup` method), and this method is called when
switching to evaluation of a different parameter configuration.
If `reuse_actors` is set to ``False``, then a new process and
object are created for each parameter configuration, and this
method is not used.
"""
# Extract solver parameters from config dict.
λ, ρ = config["lambda"], config["rho"]
# Set up parameter-dependent functional.
g = λ * functional.L1Norm()
# Define solver.
cg_tol = 1e-4
cg_maxiter = 25
self.solver = ADMM(
f=self.f,
g_list=[g],
C_list=[self.C],
rho_list=[ρ],
x0=self.x0,
maxiter=10,
subproblem_solver=LinearSubproblemSolver(
cg_kwargs={"tol": cg_tol, "maxiter": cg_maxiter}
),
)
return True
def step(self):
"""This method is called for each step in the evaluation of a
single parameter configuration. The maximum number of times it
can be called is controlled by the `num_iterations` parameter
in the initialization of a `scico.ray.tune.Tuner` object.
"""
# Perform 10 solver steps for every ray.tune step
x_tv = snp.clip(self.solver.solve(), 0.0, 1.0)
return {"psnr": float(metric.psnr(self.x_gt, x_tv))}
"""
Define parameter search space and resources per trial.
"""
config = {"lambda": tune.loguniform(1e0, 1e2), "rho": tune.loguniform(1e1, 1e3)}
resources = {"gpu": 0, "cpu": 1} # gpus per trial, cpus per trial
"""
Run parameter search.
"""
tuner = tune.Tuner(
tune.with_parameters(Trainable, x_gt=x_gt, x0=x0, y=y),
param_space=config,
resources=resources,
metric="psnr",
mode="max",
num_samples=100, # perform 100 parameter evaluations
num_iterations=10, # perform at most 10 steps for each parameter evaluation
)
results = tuner.fit()
"""
Display best parameters and corresponding performance.
"""
best_result = results.get_best_result()
best_config = best_result.config
print(f"Best PSNR: {best_result.metrics['psnr']:.2f} dB")
print("Best config: " + ", ".join([f"{k}: {v:.2e}" for k, v in best_config.items()]))
"""
Plot parameter values visited during parameter search. Marker sizes are
proportional to number of iterations run at each parameter pair. The best
point in the parameter space is indicated in red.
"""
fig = plot.figure(figsize=(8, 8))
trials = results.get_dataframe()
for t in trials.iloc:
n = t["training_iteration"]
plot.plot(
t["config/lambda"],
t["config/rho"],
ptyp="loglog",
lw=0,
ms=(0.5 + 1.5 * n),
marker="o",
mfc="blue",
mec="blue",
fig=fig,
)
plot.plot(
best_config["lambda"],
best_config["rho"],
ptyp="loglog",
title="Parameter search sampling locations\n(marker size proportional to number of iterations)",
xlbl=r"$\rho$",
ylbl=r"$\lambda$",
lw=0,
ms=5.0,
marker="o",
mfc="red",
mec="red",
fig=fig,
)
ax = fig.axes[0]
ax.set_xlim([config["rho"].lower, config["rho"].upper])
ax.set_ylim([config["lambda"].lower, config["lambda"].upper])
fig.show()
"""
Plot parameter values visited during parameter search and corresponding
reconstruction PSNRs.The best point in the parameter space is indicated
in red.
"""
𝜌 = [t["config/rho"] for t in trials.iloc]
𝜆 = [t["config/lambda"] for t in trials.iloc]
psnr = [t["psnr"] for t in trials.iloc]
minpsnr = min(max(psnr), 20.0)
𝜌, 𝜆, psnr = zip(*filter(lambda x: x[2] >= minpsnr, zip(𝜌, 𝜆, psnr)))
fig, ax = plot.subplots(figsize=(10, 8))
sc = ax.scatter(𝜌, 𝜆, c=psnr, cmap=plot.cm.plasma_r)
fig.colorbar(sc)
plot.plot(
best_config["lambda"],
best_config["rho"],
ptyp="loglog",
lw=0,
ms=12.0,
marker="2",
mfc="red",
mec="red",
fig=fig,
ax=ax,
)
ax.set_xscale("log")
ax.set_yscale("log")
ax.set_xlabel(r"$\rho$")
ax.set_ylabel(r"$\lambda$")
ax.set_title("PSNR at each sample location\n(values below 20 dB omitted)")
fig.show()
input("\nWaiting for input to close figures and exit")
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/ct_abel_tv_admm_tune.py
|
ct_abel_tv_admm_tune.py
|
r"""
Parameter Tuning for TV-Regularized Abel Inversion
==================================================
This example demonstrates the use of
[scico.ray.tune](../_autosummary/scico.ray.tune.rst) to tune
parameters for the companion [example script](ct_abel_tv_admm.rst). The
`ray.tune` class API is used in this example.
This script is hard-coded to run on CPU only to avoid the large number of
warnings that are emitted when GPU resources are requested but not available,
and due to the difficulty of supressing these warnings in a way that does
not force use of the CPU only. To enable GPU usage, comment out the
`os.environ` statements near the beginning of the script, and change the
value of the "gpu" entry in the `resources` dict from 0 to 1. Note that
two environment variables are set to suppress the warnings because
`JAX_PLATFORMS` was intended to replace `JAX_PLATFORM_NAME` but this change
has yet to be correctly implemented
(see [google/jax#6805](https://github.com/google/jax/issues/6805) and
[google/jax#10272](https://github.com/google/jax/pull/10272).
"""
# isort: off
import os
os.environ["JAX_PLATFORM_NAME"] = "cpu"
os.environ["JAX_PLATFORMS"] = "cpu"
import numpy as np
import jax
import scico.numpy as snp
from scico import functional, linop, loss, metric, plot
from scico.examples import create_circular_phantom
from scico.linop.abel import AbelProjector
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.ray import tune
"""
Create a ground truth image.
"""
N = 256 # image size
x_gt = create_circular_phantom((N, N), [0.4 * N, 0.2 * N, 0.1 * N], [1, 0, 0.5])
"""
Set up the forward operator and create a test measurement.
"""
A = AbelProjector(x_gt.shape)
y = A @ x_gt
np.random.seed(12345)
y = y + np.random.normal(size=y.shape).astype(np.float32)
"""
Compute inverse Abel transform solution for use as initial solution.
"""
x_inv = A.inverse(y)
x0 = snp.clip(x_inv, 0.0, 1.0)
"""
Define performance evaluation class.
"""
class Trainable(tune.Trainable):
"""Parameter evaluation class."""
def setup(self, config, x_gt, x0, y):
"""This method initializes a new parameter evaluation object. It
is called once when a new parameter evaluation object is created.
The `config` parameter is a dict of specific parameters for
evaluation of a single parameter set (a pair of parameters in
this case). The remaining parameters are objects that are passed
to the evaluation function via the ray object store.
"""
# Put main arrays on jax device.
self.x_gt, self.x0, self.y = jax.device_put([x_gt, x0, y])
# Set up problem to be solved.
self.A = AbelProjector(self.x_gt.shape)
self.f = loss.SquaredL2Loss(y=self.y, A=self.A)
self.C = linop.FiniteDifference(input_shape=self.x_gt.shape)
self.reset_config(config)
def reset_config(self, config):
"""This method is only required when `scico.ray.tune.Tuner` is
initialized with `reuse_actors` set to ``True`` (the default). In
this case, a set of parameter evaluation processes and
corresponding objects are created once (including initialization
via a call to the `setup` method), and this method is called when
switching to evaluation of a different parameter configuration.
If `reuse_actors` is set to ``False``, then a new process and
object are created for each parameter configuration, and this
method is not used.
"""
# Extract solver parameters from config dict.
λ, ρ = config["lambda"], config["rho"]
# Set up parameter-dependent functional.
g = λ * functional.L1Norm()
# Define solver.
cg_tol = 1e-4
cg_maxiter = 25
self.solver = ADMM(
f=self.f,
g_list=[g],
C_list=[self.C],
rho_list=[ρ],
x0=self.x0,
maxiter=10,
subproblem_solver=LinearSubproblemSolver(
cg_kwargs={"tol": cg_tol, "maxiter": cg_maxiter}
),
)
return True
def step(self):
"""This method is called for each step in the evaluation of a
single parameter configuration. The maximum number of times it
can be called is controlled by the `num_iterations` parameter
in the initialization of a `scico.ray.tune.Tuner` object.
"""
# Perform 10 solver steps for every ray.tune step
x_tv = snp.clip(self.solver.solve(), 0.0, 1.0)
return {"psnr": float(metric.psnr(self.x_gt, x_tv))}
"""
Define parameter search space and resources per trial.
"""
config = {"lambda": tune.loguniform(1e0, 1e2), "rho": tune.loguniform(1e1, 1e3)}
resources = {"gpu": 0, "cpu": 1} # gpus per trial, cpus per trial
"""
Run parameter search.
"""
tuner = tune.Tuner(
tune.with_parameters(Trainable, x_gt=x_gt, x0=x0, y=y),
param_space=config,
resources=resources,
metric="psnr",
mode="max",
num_samples=100, # perform 100 parameter evaluations
num_iterations=10, # perform at most 10 steps for each parameter evaluation
)
results = tuner.fit()
"""
Display best parameters and corresponding performance.
"""
best_result = results.get_best_result()
best_config = best_result.config
print(f"Best PSNR: {best_result.metrics['psnr']:.2f} dB")
print("Best config: " + ", ".join([f"{k}: {v:.2e}" for k, v in best_config.items()]))
"""
Plot parameter values visited during parameter search. Marker sizes are
proportional to number of iterations run at each parameter pair. The best
point in the parameter space is indicated in red.
"""
fig = plot.figure(figsize=(8, 8))
trials = results.get_dataframe()
for t in trials.iloc:
n = t["training_iteration"]
plot.plot(
t["config/lambda"],
t["config/rho"],
ptyp="loglog",
lw=0,
ms=(0.5 + 1.5 * n),
marker="o",
mfc="blue",
mec="blue",
fig=fig,
)
plot.plot(
best_config["lambda"],
best_config["rho"],
ptyp="loglog",
title="Parameter search sampling locations\n(marker size proportional to number of iterations)",
xlbl=r"$\rho$",
ylbl=r"$\lambda$",
lw=0,
ms=5.0,
marker="o",
mfc="red",
mec="red",
fig=fig,
)
ax = fig.axes[0]
ax.set_xlim([config["rho"].lower, config["rho"].upper])
ax.set_ylim([config["lambda"].lower, config["lambda"].upper])
fig.show()
"""
Plot parameter values visited during parameter search and corresponding
reconstruction PSNRs.The best point in the parameter space is indicated
in red.
"""
𝜌 = [t["config/rho"] for t in trials.iloc]
𝜆 = [t["config/lambda"] for t in trials.iloc]
psnr = [t["psnr"] for t in trials.iloc]
minpsnr = min(max(psnr), 20.0)
𝜌, 𝜆, psnr = zip(*filter(lambda x: x[2] >= minpsnr, zip(𝜌, 𝜆, psnr)))
fig, ax = plot.subplots(figsize=(10, 8))
sc = ax.scatter(𝜌, 𝜆, c=psnr, cmap=plot.cm.plasma_r)
fig.colorbar(sc)
plot.plot(
best_config["lambda"],
best_config["rho"],
ptyp="loglog",
lw=0,
ms=12.0,
marker="2",
mfc="red",
mec="red",
fig=fig,
ax=ax,
)
ax.set_xscale("log")
ax.set_yscale("log")
ax.set_xlabel(r"$\rho$")
ax.set_ylabel(r"$\lambda$")
ax.set_title("PSNR at each sample location\n(values below 20 dB omitted)")
fig.show()
input("\nWaiting for input to close figures and exit")
| 0.925626 | 0.755997 |
r"""
Parameter Tuning for Image Deconvolution with TV Regularization (ADMM Solver)
=============================================================================
This example demonstrates the use of
[scico.ray.tune](../_autosummary/scico.ray.tune.rst) to tune parameters
for the companion [example script](deconv_tv_admm.rst). The `ray.tune`
function API is used in this example.
This script is hard-coded to run on CPU only to avoid the large number of
warnings that are emitted when GPU resources are requested but not available,
and due to the difficulty of supressing these warnings in a way that does
not force use of the CPU only. To enable GPU usage, comment out the
`os.environ` statements near the beginning of the script, and change the
value of the "gpu" entry in the `resources` dict from 0 to 1. Note that
two environment variables are set to suppress the warnings because
`JAX_PLATFORMS` was intended to replace `JAX_PLATFORM_NAME` but this change
has yet to be correctly implemented
(see [google/jax#6805](https://github.com/google/jax/issues/6805) and
[google/jax#10272](https://github.com/google/jax/pull/10272)).
"""
# isort: off
import os
os.environ["JAX_PLATFORM_NAME"] = "cpu"
os.environ["JAX_PLATFORMS"] = "cpu"
import jax
from xdesign import SiemensStar, discrete_phantom
import scico.numpy as snp
import scico.random
from scico import functional, linop, loss, metric, plot
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.ray import report, tune
"""
Create a ground truth image.
"""
phantom = SiemensStar(32)
N = 256 # image size
x_gt = snp.pad(discrete_phantom(phantom, N - 16), 8)
"""
Set up the forward operator and create a test signal consisting of a
blurred signal with additive Gaussian noise.
"""
n = 5 # convolution kernel size
σ = 20.0 / 255 # noise level
psf = snp.ones((n, n)) / (n * n)
A = linop.Convolve(h=psf, input_shape=x_gt.shape)
Ax = A(x_gt) # blurred image
noise, key = scico.random.randn(Ax.shape, seed=0)
y = Ax + σ * noise
"""
Define performance evaluation function.
"""
def eval_params(config, x_gt, psf, y):
"""Parameter evaluation function. The `config` parameter is a
dict of specific parameters for evaluation of a single parameter
set (a pair of parameters in this case). The remaining parameters
are objects that are passed to the evaluation function via the
ray object store.
"""
# Extract solver parameters from config dict.
λ, ρ = config["lambda"], config["rho"]
# Put main arrays on jax device.
x_gt, psf, y = jax.device_put([x_gt, psf, y])
# Set up problem to be solved.
A = linop.Convolve(h=psf, input_shape=x_gt.shape)
f = loss.SquaredL2Loss(y=y, A=A)
g = λ * functional.L21Norm()
C = linop.FiniteDifference(input_shape=x_gt.shape, append=0)
# Define solver.
solver = ADMM(
f=f,
g_list=[g],
C_list=[C],
rho_list=[ρ],
x0=A.adj(y),
maxiter=10,
subproblem_solver=LinearSubproblemSolver(),
)
# Perform 50 iterations, reporting performance to ray.tune every 10 iterations.
for step in range(5):
x_admm = solver.solve()
report({"psnr": float(metric.psnr(x_gt, x_admm))})
"""
Define parameter search space and resources per trial.
"""
config = {"lambda": tune.loguniform(1e-3, 1e-1), "rho": tune.loguniform(1e-2, 1e0)}
resources = {"cpu": 4, "gpu": 0} # cpus per trial, gpus per trial
"""
Run parameter search.
"""
tuner = tune.Tuner(
tune.with_parameters(eval_params, x_gt=x_gt, psf=psf, y=y),
param_space=config,
resources=resources,
metric="psnr",
mode="max",
num_samples=100, # perform 100 parameter evaluations
)
results = tuner.fit()
"""
Display best parameters and corresponding performance.
"""
best_result = results.get_best_result()
best_config = best_result.config
print(f"Best PSNR: {best_result.metrics['psnr']:.2f} dB")
print("Best config: " + ", ".join([f"{k}: {v:.2e}" for k, v in best_config.items()]))
"""
Plot parameter values visited during parameter search. Marker sizes are
proportional to number of iterations run at each parameter pair. The best
point in the parameter space is indicated in red.
"""
fig = plot.figure(figsize=(8, 8))
trials = results.get_dataframe()
for t in trials.iloc:
n = t["training_iteration"]
plot.plot(
t["config/lambda"],
t["config/rho"],
ptyp="loglog",
lw=0,
ms=(0.5 + 1.5 * n),
marker="o",
mfc="blue",
mec="blue",
fig=fig,
)
plot.plot(
best_config["lambda"],
best_config["rho"],
ptyp="loglog",
title="Parameter search sampling locations\n(marker size proportional to number of iterations)",
xlbl=r"$\rho$",
ylbl=r"$\lambda$",
lw=0,
ms=5.0,
marker="o",
mfc="red",
mec="red",
fig=fig,
)
ax = fig.axes[0]
ax.set_xlim([config["rho"].lower, config["rho"].upper])
ax.set_ylim([config["lambda"].lower, config["lambda"].upper])
fig.show()
"""
Plot parameter values visited during parameter search and corresponding
reconstruction PSNRs.The best point in the parameter space is indicated
in red.
"""
𝜌 = [t["config/rho"] for t in trials.iloc]
𝜆 = [t["config/lambda"] for t in trials.iloc]
psnr = [t["psnr"] for t in trials.iloc]
minpsnr = min(max(psnr), 18.0)
𝜌, 𝜆, psnr = zip(*filter(lambda x: x[2] >= minpsnr, zip(𝜌, 𝜆, psnr)))
fig, ax = plot.subplots(figsize=(10, 8))
sc = ax.scatter(𝜌, 𝜆, c=psnr, cmap=plot.cm.plasma_r)
fig.colorbar(sc)
plot.plot(
best_config["lambda"],
best_config["rho"],
ptyp="loglog",
lw=0,
ms=12.0,
marker="2",
mfc="red",
mec="red",
fig=fig,
ax=ax,
)
ax.set_xscale("log")
ax.set_yscale("log")
ax.set_xlabel(r"$\rho$")
ax.set_ylabel(r"$\lambda$")
ax.set_title("PSNR at each sample location\n(values below 18 dB omitted)")
fig.show()
input("\nWaiting for input to close figures and exit")
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/deconv_tv_admm_tune.py
|
deconv_tv_admm_tune.py
|
r"""
Parameter Tuning for Image Deconvolution with TV Regularization (ADMM Solver)
=============================================================================
This example demonstrates the use of
[scico.ray.tune](../_autosummary/scico.ray.tune.rst) to tune parameters
for the companion [example script](deconv_tv_admm.rst). The `ray.tune`
function API is used in this example.
This script is hard-coded to run on CPU only to avoid the large number of
warnings that are emitted when GPU resources are requested but not available,
and due to the difficulty of supressing these warnings in a way that does
not force use of the CPU only. To enable GPU usage, comment out the
`os.environ` statements near the beginning of the script, and change the
value of the "gpu" entry in the `resources` dict from 0 to 1. Note that
two environment variables are set to suppress the warnings because
`JAX_PLATFORMS` was intended to replace `JAX_PLATFORM_NAME` but this change
has yet to be correctly implemented
(see [google/jax#6805](https://github.com/google/jax/issues/6805) and
[google/jax#10272](https://github.com/google/jax/pull/10272)).
"""
# isort: off
import os
os.environ["JAX_PLATFORM_NAME"] = "cpu"
os.environ["JAX_PLATFORMS"] = "cpu"
import jax
from xdesign import SiemensStar, discrete_phantom
import scico.numpy as snp
import scico.random
from scico import functional, linop, loss, metric, plot
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.ray import report, tune
"""
Create a ground truth image.
"""
phantom = SiemensStar(32)
N = 256 # image size
x_gt = snp.pad(discrete_phantom(phantom, N - 16), 8)
"""
Set up the forward operator and create a test signal consisting of a
blurred signal with additive Gaussian noise.
"""
n = 5 # convolution kernel size
σ = 20.0 / 255 # noise level
psf = snp.ones((n, n)) / (n * n)
A = linop.Convolve(h=psf, input_shape=x_gt.shape)
Ax = A(x_gt) # blurred image
noise, key = scico.random.randn(Ax.shape, seed=0)
y = Ax + σ * noise
"""
Define performance evaluation function.
"""
def eval_params(config, x_gt, psf, y):
"""Parameter evaluation function. The `config` parameter is a
dict of specific parameters for evaluation of a single parameter
set (a pair of parameters in this case). The remaining parameters
are objects that are passed to the evaluation function via the
ray object store.
"""
# Extract solver parameters from config dict.
λ, ρ = config["lambda"], config["rho"]
# Put main arrays on jax device.
x_gt, psf, y = jax.device_put([x_gt, psf, y])
# Set up problem to be solved.
A = linop.Convolve(h=psf, input_shape=x_gt.shape)
f = loss.SquaredL2Loss(y=y, A=A)
g = λ * functional.L21Norm()
C = linop.FiniteDifference(input_shape=x_gt.shape, append=0)
# Define solver.
solver = ADMM(
f=f,
g_list=[g],
C_list=[C],
rho_list=[ρ],
x0=A.adj(y),
maxiter=10,
subproblem_solver=LinearSubproblemSolver(),
)
# Perform 50 iterations, reporting performance to ray.tune every 10 iterations.
for step in range(5):
x_admm = solver.solve()
report({"psnr": float(metric.psnr(x_gt, x_admm))})
"""
Define parameter search space and resources per trial.
"""
config = {"lambda": tune.loguniform(1e-3, 1e-1), "rho": tune.loguniform(1e-2, 1e0)}
resources = {"cpu": 4, "gpu": 0} # cpus per trial, gpus per trial
"""
Run parameter search.
"""
tuner = tune.Tuner(
tune.with_parameters(eval_params, x_gt=x_gt, psf=psf, y=y),
param_space=config,
resources=resources,
metric="psnr",
mode="max",
num_samples=100, # perform 100 parameter evaluations
)
results = tuner.fit()
"""
Display best parameters and corresponding performance.
"""
best_result = results.get_best_result()
best_config = best_result.config
print(f"Best PSNR: {best_result.metrics['psnr']:.2f} dB")
print("Best config: " + ", ".join([f"{k}: {v:.2e}" for k, v in best_config.items()]))
"""
Plot parameter values visited during parameter search. Marker sizes are
proportional to number of iterations run at each parameter pair. The best
point in the parameter space is indicated in red.
"""
fig = plot.figure(figsize=(8, 8))
trials = results.get_dataframe()
for t in trials.iloc:
n = t["training_iteration"]
plot.plot(
t["config/lambda"],
t["config/rho"],
ptyp="loglog",
lw=0,
ms=(0.5 + 1.5 * n),
marker="o",
mfc="blue",
mec="blue",
fig=fig,
)
plot.plot(
best_config["lambda"],
best_config["rho"],
ptyp="loglog",
title="Parameter search sampling locations\n(marker size proportional to number of iterations)",
xlbl=r"$\rho$",
ylbl=r"$\lambda$",
lw=0,
ms=5.0,
marker="o",
mfc="red",
mec="red",
fig=fig,
)
ax = fig.axes[0]
ax.set_xlim([config["rho"].lower, config["rho"].upper])
ax.set_ylim([config["lambda"].lower, config["lambda"].upper])
fig.show()
"""
Plot parameter values visited during parameter search and corresponding
reconstruction PSNRs.The best point in the parameter space is indicated
in red.
"""
𝜌 = [t["config/rho"] for t in trials.iloc]
𝜆 = [t["config/lambda"] for t in trials.iloc]
psnr = [t["psnr"] for t in trials.iloc]
minpsnr = min(max(psnr), 18.0)
𝜌, 𝜆, psnr = zip(*filter(lambda x: x[2] >= minpsnr, zip(𝜌, 𝜆, psnr)))
fig, ax = plot.subplots(figsize=(10, 8))
sc = ax.scatter(𝜌, 𝜆, c=psnr, cmap=plot.cm.plasma_r)
fig.colorbar(sc)
plot.plot(
best_config["lambda"],
best_config["rho"],
ptyp="loglog",
lw=0,
ms=12.0,
marker="2",
mfc="red",
mec="red",
fig=fig,
ax=ax,
)
ax.set_xscale("log")
ax.set_yscale("log")
ax.set_xlabel(r"$\rho$")
ax.set_ylabel(r"$\lambda$")
ax.set_title("PSNR at each sample location\n(values below 18 dB omitted)")
fig.show()
input("\nWaiting for input to close figures and exit")
| 0.917043 | 0.814828 |
r"""
Image Deconvolution with TV Regularization (Proximal ADMM Solver)
=================================================================
This example demonstrates the solution of an image deconvolution problem
with isotropic total variation (TV) regularization
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - C \mathbf{x}
\|_2^2 + \lambda \| D \mathbf{x} \|_{2,1} \;,$$
where $C$ is a convolution operator, $\mathbf{y}$ is the blurred image,
$D$ is a 2D finite fifference operator, and $\mathbf{x}$ is the
deconvolved image.
In this example the problem is solved via proximal ADMM, while standard
ADMM is used in a [companion example](deconv_tv_admm.rst).
"""
import jax
from xdesign import SiemensStar, discrete_phantom
import scico.numpy as snp
import scico.random
from scico import functional, linop, loss, metric, plot
from scico.optimize import ProximalADMM
from scico.util import device_info
"""
Create a ground truth image.
"""
phantom = SiemensStar(32)
N = 256 # image size
x_gt = snp.pad(discrete_phantom(phantom, N - 16), 8)
x_gt = jax.device_put(x_gt) # convert to jax type, push to GPU
"""
Set up the forward operator and create a test signal consisting of a
blurred signal with additive Gaussian noise.
"""
n = 5 # convolution kernel size
σ = 20.0 / 255 # noise level
psf = snp.ones((n, n)) / (n * n)
C = linop.Convolve(h=psf, input_shape=x_gt.shape)
Cx = C(x_gt) # blurred image
noise, key = scico.random.randn(Cx.shape, seed=0)
y = Cx + σ * noise
r"""
Set up the problem to be solved. We want to minimize the functional
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - C \mathbf{x}
\|_2^2 + \lambda \| D \mathbf{x} \|_{2,1} \;,$$
where $C$ is the convolution operator and $D$ is a finite difference
operator. This problem can be expressed as
$$\mathrm{argmin}_{\mathbf{x}, \mathbf{z}} \; (1/2) \| \mathbf{y} -
\mathbf{z}_0 \|_2^2 + \lambda \| \mathbf{z}_1 \|_{2,1} \;\;
\text{such that} \;\; \mathbf{z}_0 = C \mathbf{x} \;\; \text{and} \;\;
\mathbf{z}_1 = D \mathbf{x} \;,$$
which can be written in the form of a standard ADMM problem
$$\mathrm{argmin}_{\mathbf{x}, \mathbf{z}} \; f(\mathbf{x}) + g(\mathbf{z})
\;\; \text{such that} \;\; A \mathbf{x} + B \mathbf{z} = \mathbf{c}$$
with
$$f = 0 \quad g = g_0 + g_1$$
$$g_0(\mathbf{z}_0) = (1/2) \| \mathbf{y} - \mathbf{z}_0 \|_2^2 \quad
g_1(\mathbf{z}_1) = \lambda \| \mathbf{z}_1 \|_{2,1}$$
$$A = \left( \begin{array}{c} C \\ D \end{array} \right) \quad
B = \left( \begin{array}{cc} -I & 0 \\ 0 & -I \end{array} \right) \quad
\mathbf{c} = \left( \begin{array}{c} 0 \\ 0 \end{array} \right) \;.$$
This is a more complex splitting than that used in the
[companion example](deconv_tv_admm.rst), but it allows the use of a
proximal ADMM solver in a way that avoids the need for the conjugate
gradient sub-iterations used by the ADMM solver in the
[companion example](deconv_tv_admm.rst).
"""
f = functional.ZeroFunctional()
g0 = loss.SquaredL2Loss(y=y)
λ = 2.0e-2 # L1 norm regularization parameter
g1 = λ * functional.L21Norm()
g = functional.SeparableFunctional((g0, g1))
D = linop.FiniteDifference(input_shape=x_gt.shape, append=0)
A = linop.VerticalStack((C, D))
"""
Set up a proximal ADMM solver object.
"""
ρ = 1.0e-1 # ADMM penalty parameter
maxiter = 50 # number of ADMM iterations
mu, nu = ProximalADMM.estimate_parameters(D)
solver = ProximalADMM(
f=f,
g=g,
A=A,
B=None,
rho=ρ,
mu=mu,
nu=nu,
x0=C.adj(y),
maxiter=maxiter,
itstat_options={"display": True, "period": 10},
)
"""
Run the solver.
"""
print(f"Solving on {device_info()}\n")
x = solver.solve()
hist = solver.itstat_object.history(transpose=True)
"""
Show the recovered image.
"""
fig, ax = plot.subplots(nrows=1, ncols=3, figsize=(15, 5))
plot.imview(x_gt, title="Ground truth", fig=fig, ax=ax[0])
nc = n // 2
yc = y[nc:-nc, nc:-nc]
plot.imview(y, title="Blurred, noisy image: %.2f (dB)" % metric.psnr(x_gt, yc), fig=fig, ax=ax[1])
plot.imview(
solver.x, title="Deconvolved image: %.2f (dB)" % metric.psnr(x_gt, solver.x), fig=fig, ax=ax[2]
)
fig.show()
"""
Plot convergence statistics.
"""
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
hist.Objective,
title="Objective function",
xlbl="Iteration",
ylbl="Functional value",
fig=fig,
ax=ax[0],
)
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
fig=fig,
ax=ax[1],
)
fig.show()
input("\nWaiting for input to close figures and exit")
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/deconv_tv_padmm.py
|
deconv_tv_padmm.py
|
r"""
Image Deconvolution with TV Regularization (Proximal ADMM Solver)
=================================================================
This example demonstrates the solution of an image deconvolution problem
with isotropic total variation (TV) regularization
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - C \mathbf{x}
\|_2^2 + \lambda \| D \mathbf{x} \|_{2,1} \;,$$
where $C$ is a convolution operator, $\mathbf{y}$ is the blurred image,
$D$ is a 2D finite fifference operator, and $\mathbf{x}$ is the
deconvolved image.
In this example the problem is solved via proximal ADMM, while standard
ADMM is used in a [companion example](deconv_tv_admm.rst).
"""
import jax
from xdesign import SiemensStar, discrete_phantom
import scico.numpy as snp
import scico.random
from scico import functional, linop, loss, metric, plot
from scico.optimize import ProximalADMM
from scico.util import device_info
"""
Create a ground truth image.
"""
phantom = SiemensStar(32)
N = 256 # image size
x_gt = snp.pad(discrete_phantom(phantom, N - 16), 8)
x_gt = jax.device_put(x_gt) # convert to jax type, push to GPU
"""
Set up the forward operator and create a test signal consisting of a
blurred signal with additive Gaussian noise.
"""
n = 5 # convolution kernel size
σ = 20.0 / 255 # noise level
psf = snp.ones((n, n)) / (n * n)
C = linop.Convolve(h=psf, input_shape=x_gt.shape)
Cx = C(x_gt) # blurred image
noise, key = scico.random.randn(Cx.shape, seed=0)
y = Cx + σ * noise
r"""
Set up the problem to be solved. We want to minimize the functional
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - C \mathbf{x}
\|_2^2 + \lambda \| D \mathbf{x} \|_{2,1} \;,$$
where $C$ is the convolution operator and $D$ is a finite difference
operator. This problem can be expressed as
$$\mathrm{argmin}_{\mathbf{x}, \mathbf{z}} \; (1/2) \| \mathbf{y} -
\mathbf{z}_0 \|_2^2 + \lambda \| \mathbf{z}_1 \|_{2,1} \;\;
\text{such that} \;\; \mathbf{z}_0 = C \mathbf{x} \;\; \text{and} \;\;
\mathbf{z}_1 = D \mathbf{x} \;,$$
which can be written in the form of a standard ADMM problem
$$\mathrm{argmin}_{\mathbf{x}, \mathbf{z}} \; f(\mathbf{x}) + g(\mathbf{z})
\;\; \text{such that} \;\; A \mathbf{x} + B \mathbf{z} = \mathbf{c}$$
with
$$f = 0 \quad g = g_0 + g_1$$
$$g_0(\mathbf{z}_0) = (1/2) \| \mathbf{y} - \mathbf{z}_0 \|_2^2 \quad
g_1(\mathbf{z}_1) = \lambda \| \mathbf{z}_1 \|_{2,1}$$
$$A = \left( \begin{array}{c} C \\ D \end{array} \right) \quad
B = \left( \begin{array}{cc} -I & 0 \\ 0 & -I \end{array} \right) \quad
\mathbf{c} = \left( \begin{array}{c} 0 \\ 0 \end{array} \right) \;.$$
This is a more complex splitting than that used in the
[companion example](deconv_tv_admm.rst), but it allows the use of a
proximal ADMM solver in a way that avoids the need for the conjugate
gradient sub-iterations used by the ADMM solver in the
[companion example](deconv_tv_admm.rst).
"""
f = functional.ZeroFunctional()
g0 = loss.SquaredL2Loss(y=y)
λ = 2.0e-2 # L1 norm regularization parameter
g1 = λ * functional.L21Norm()
g = functional.SeparableFunctional((g0, g1))
D = linop.FiniteDifference(input_shape=x_gt.shape, append=0)
A = linop.VerticalStack((C, D))
"""
Set up a proximal ADMM solver object.
"""
ρ = 1.0e-1 # ADMM penalty parameter
maxiter = 50 # number of ADMM iterations
mu, nu = ProximalADMM.estimate_parameters(D)
solver = ProximalADMM(
f=f,
g=g,
A=A,
B=None,
rho=ρ,
mu=mu,
nu=nu,
x0=C.adj(y),
maxiter=maxiter,
itstat_options={"display": True, "period": 10},
)
"""
Run the solver.
"""
print(f"Solving on {device_info()}\n")
x = solver.solve()
hist = solver.itstat_object.history(transpose=True)
"""
Show the recovered image.
"""
fig, ax = plot.subplots(nrows=1, ncols=3, figsize=(15, 5))
plot.imview(x_gt, title="Ground truth", fig=fig, ax=ax[0])
nc = n // 2
yc = y[nc:-nc, nc:-nc]
plot.imview(y, title="Blurred, noisy image: %.2f (dB)" % metric.psnr(x_gt, yc), fig=fig, ax=ax[1])
plot.imview(
solver.x, title="Deconvolved image: %.2f (dB)" % metric.psnr(x_gt, solver.x), fig=fig, ax=ax[2]
)
fig.show()
"""
Plot convergence statistics.
"""
fig, ax = plot.subplots(nrows=1, ncols=2, figsize=(12, 5))
plot.plot(
hist.Objective,
title="Objective function",
xlbl="Iteration",
ylbl="Functional value",
fig=fig,
ax=ax[0],
)
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
fig=fig,
ax=ax[1],
)
fig.show()
input("\nWaiting for input to close figures and exit")
| 0.927388 | 0.964355 |
r"""
Total Variation Denoising with Constraint (APGM)
================================================
This example demonstrates the solution of the isotropic total variation
(TV) denoising problem
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - \mathbf{x}
\|_2^2 + \lambda R(\mathbf{x}) + \iota_C(\mathbf{x}) \;,$$
where $R$ is a TV regularizer, $\iota_C(\cdot)$ is the indicator function
of constraint set $C$, and $C = \{ \mathbf{x} \, | \, x_i \in [0, 1] \}$,
i.e. the set of vectors with components constrained to be in the interval
$[0, 1]$. The problem is solved seperately with $R$ taken as isotropic
and anisotropic TV regularization
The solution via APGM is based on the approach in :cite:`beck-2009-tv`,
which involves constructing a dual for the constrained denoising problem.
The APGM solution minimizes the resulting dual. In this case, switching
between the two regularizers corresponds to switching between two
different projectors.
"""
from typing import Callable, Optional, Union
import jax
import jax.numpy as jnp
from xdesign import SiemensStar, discrete_phantom
import scico.numpy as snp
import scico.random
from scico import functional, linop, loss, operator, plot
from scico.numpy import Array, BlockArray
from scico.numpy.util import ensure_on_device
from scico.optimize.pgm import AcceleratedPGM, RobustLineSearchStepSize
from scico.util import device_info
"""
Create a ground truth image.
"""
N = 256 # image size
phantom = SiemensStar(16)
x_gt = snp.pad(discrete_phantom(phantom, N - 16), 8)
x_gt = jax.device_put(x_gt) # convert to jax type, push to GPU
x_gt = x_gt / x_gt.max()
"""
Add noise to create a noisy test image.
"""
σ = 0.75 # noise standard deviation
noise, key = scico.random.randn(x_gt.shape, seed=0)
y = x_gt + σ * noise
"""
Define finite difference operator and adjoint.
"""
# The append=0 option appends 0 to the input along the axis
# prior to performing the difference to make the results of
# horizontal and vertical finite differences the same shape.
C = linop.FiniteDifference(input_shape=x_gt.shape, append=0)
A = C.adj
"""
Define a zero array as initial estimate.
"""
x0 = jnp.zeros(C(y).shape)
"""
Define the dual of the total variation denoising problem.
"""
class DualTVLoss(loss.Loss):
def __init__(
self,
y: Union[Array, BlockArray],
A: Optional[Union[Callable, operator.Operator]] = None,
lmbda: float = 0.5,
):
y = ensure_on_device(y)
self.functional = functional.SquaredL2Norm()
super().__init__(y=y, A=A, scale=1.0)
self.lmbda = lmbda
def __call__(self, x: Union[Array, BlockArray]) -> float:
xint = self.y - self.lmbda * self.A(x)
return -1.0 * self.functional(xint - jnp.clip(xint, 0.0, 1.0)) + self.functional(xint)
"""
Denoise with isotropic total variation. Define projector for isotropic
total variation.
"""
# Evaluation of functional set to zero.
class IsoProjector(functional.Functional):
has_eval = True
has_prox = True
def __call__(self, x: Union[Array, BlockArray]) -> float:
return 0.0
def prox(self, v: Array, lam: float, **kwargs) -> Array:
norm_v_ptp = jnp.sqrt(jnp.sum(jnp.abs(v) ** 2, axis=0))
x_out = v / jnp.maximum(jnp.ones(v.shape), norm_v_ptp)
out1 = v[0, :, -1] / jnp.maximum(jnp.ones(v[0, :, -1].shape), jnp.abs(v[0, :, -1]))
x_out = x_out.at[0, :, -1].set(out1)
out2 = v[1, -1, :] / jnp.maximum(jnp.ones(v[1, -1, :].shape), jnp.abs(v[1, -1, :]))
x_out = x_out.at[1, -1, :].set(out2)
return x_out
"""
Use RobustLineSearchStepSize object and set up AcceleratedPGM solver
object. Run the solver.
"""
reg_weight_iso = 1.4e0
f_iso = DualTVLoss(y=y, A=A, lmbda=reg_weight_iso)
g_iso = IsoProjector()
solver_iso = AcceleratedPGM(
f=f_iso,
g=g_iso,
L0=16.0 * f_iso.lmbda**2,
x0=x0,
maxiter=100,
itstat_options={"display": True, "period": 10},
step_size=RobustLineSearchStepSize(),
)
# Run the solver.
print(f"Solving on {device_info()}\n")
x = solver_iso.solve()
hist_iso = solver_iso.itstat_object.history(transpose=True)
# Project to constraint set.
x_iso = jnp.clip(y - f_iso.lmbda * f_iso.A(x), 0.0, 1.0)
"""
Denoise with anisotropic total variation for comparison. Define
projector for anisotropic total variation.
"""
# Evaluation of functional set to zero.
class AnisoProjector(functional.Functional):
has_eval = True
has_prox = True
def __call__(self, x: Union[Array, BlockArray]) -> float:
return 0.0
def prox(self, v: Array, lam: float, **kwargs) -> Array:
return v / jnp.maximum(jnp.ones(v.shape), jnp.abs(v))
"""
Use RobustLineSearchStepSize object and set up AcceleratedPGM solver
object. Weight was tuned to give the same data fidelty as the
isotropic case. Run the solver.
"""
reg_weight_aniso = 1.2e0
f = DualTVLoss(y=y, A=A, lmbda=reg_weight_aniso)
g = AnisoProjector()
solver = AcceleratedPGM(
f=f,
g=g,
L0=16.0 * f.lmbda**2,
x0=x0,
maxiter=100,
itstat_options={"display": True, "period": 10},
step_size=RobustLineSearchStepSize(),
)
# Run the solver.
print()
x = solver.solve()
# Project to constraint set.
x_aniso = jnp.clip(y - f.lmbda * f.A(x), 0.0, 1.0)
"""
Compute the data fidelity.
"""
df = hist_iso.Objective[-1]
print(f"\nData fidelity for isotropic TV was {df:.2e}")
hist = solver.itstat_object.history(transpose=True)
df = hist.Objective[-1]
print(f"Data fidelity for anisotropic TV was {df:.2e}")
"""
Plot results.
"""
plt_args = dict(norm=plot.matplotlib.colors.Normalize(vmin=0, vmax=1.5))
fig, ax = plot.subplots(nrows=2, ncols=2, sharex=True, sharey=True, figsize=(11, 10))
plot.imview(x_gt, title="Ground truth", fig=fig, ax=ax[0, 0], **plt_args)
plot.imview(y, title="Noisy version", fig=fig, ax=ax[0, 1], **plt_args)
plot.imview(x_iso, title="Isotropic TV denoising", fig=fig, ax=ax[1, 0], **plt_args)
plot.imview(x_aniso, title="Anisotropic TV denoising", fig=fig, ax=ax[1, 1], **plt_args)
fig.subplots_adjust(left=0.1, right=0.99, top=0.95, bottom=0.05, wspace=0.2, hspace=0.01)
fig.colorbar(
ax[0, 0].get_images()[0], ax=ax, location="right", shrink=0.9, pad=0.05, label="Arbitrary Units"
)
fig.suptitle("Denoising comparison")
fig.show()
# zoomed version
fig, ax = plot.subplots(nrows=2, ncols=2, sharex=True, sharey=True, figsize=(11, 10))
plot.imview(x_gt, title="Ground truth", fig=fig, ax=ax[0, 0], **plt_args)
plot.imview(y, title="Noisy version", fig=fig, ax=ax[0, 1], **plt_args)
plot.imview(x_iso, title="Isotropic TV denoising", fig=fig, ax=ax[1, 0], **plt_args)
plot.imview(x_aniso, title="Anisotropic TV denoising", fig=fig, ax=ax[1, 1], **plt_args)
ax[0, 0].set_xlim(N // 4, N // 4 + N // 2)
ax[0, 0].set_ylim(N // 4, N // 4 + N // 2)
fig.subplots_adjust(left=0.1, right=0.99, top=0.95, bottom=0.05, wspace=0.2, hspace=0.01)
fig.colorbar(
ax[0, 0].get_images()[0], ax=ax, location="right", shrink=0.9, pad=0.05, label="Arbitrary Units"
)
fig.suptitle("Denoising comparison (zoomed)")
fig.show()
input("\nWaiting for input to close figures and exit")
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/denoise_tv_pgm.py
|
denoise_tv_pgm.py
|
r"""
Total Variation Denoising with Constraint (APGM)
================================================
This example demonstrates the solution of the isotropic total variation
(TV) denoising problem
$$\mathrm{argmin}_{\mathbf{x}} \; (1/2) \| \mathbf{y} - \mathbf{x}
\|_2^2 + \lambda R(\mathbf{x}) + \iota_C(\mathbf{x}) \;,$$
where $R$ is a TV regularizer, $\iota_C(\cdot)$ is the indicator function
of constraint set $C$, and $C = \{ \mathbf{x} \, | \, x_i \in [0, 1] \}$,
i.e. the set of vectors with components constrained to be in the interval
$[0, 1]$. The problem is solved seperately with $R$ taken as isotropic
and anisotropic TV regularization
The solution via APGM is based on the approach in :cite:`beck-2009-tv`,
which involves constructing a dual for the constrained denoising problem.
The APGM solution minimizes the resulting dual. In this case, switching
between the two regularizers corresponds to switching between two
different projectors.
"""
from typing import Callable, Optional, Union
import jax
import jax.numpy as jnp
from xdesign import SiemensStar, discrete_phantom
import scico.numpy as snp
import scico.random
from scico import functional, linop, loss, operator, plot
from scico.numpy import Array, BlockArray
from scico.numpy.util import ensure_on_device
from scico.optimize.pgm import AcceleratedPGM, RobustLineSearchStepSize
from scico.util import device_info
"""
Create a ground truth image.
"""
N = 256 # image size
phantom = SiemensStar(16)
x_gt = snp.pad(discrete_phantom(phantom, N - 16), 8)
x_gt = jax.device_put(x_gt) # convert to jax type, push to GPU
x_gt = x_gt / x_gt.max()
"""
Add noise to create a noisy test image.
"""
σ = 0.75 # noise standard deviation
noise, key = scico.random.randn(x_gt.shape, seed=0)
y = x_gt + σ * noise
"""
Define finite difference operator and adjoint.
"""
# The append=0 option appends 0 to the input along the axis
# prior to performing the difference to make the results of
# horizontal and vertical finite differences the same shape.
C = linop.FiniteDifference(input_shape=x_gt.shape, append=0)
A = C.adj
"""
Define a zero array as initial estimate.
"""
x0 = jnp.zeros(C(y).shape)
"""
Define the dual of the total variation denoising problem.
"""
class DualTVLoss(loss.Loss):
def __init__(
self,
y: Union[Array, BlockArray],
A: Optional[Union[Callable, operator.Operator]] = None,
lmbda: float = 0.5,
):
y = ensure_on_device(y)
self.functional = functional.SquaredL2Norm()
super().__init__(y=y, A=A, scale=1.0)
self.lmbda = lmbda
def __call__(self, x: Union[Array, BlockArray]) -> float:
xint = self.y - self.lmbda * self.A(x)
return -1.0 * self.functional(xint - jnp.clip(xint, 0.0, 1.0)) + self.functional(xint)
"""
Denoise with isotropic total variation. Define projector for isotropic
total variation.
"""
# Evaluation of functional set to zero.
class IsoProjector(functional.Functional):
has_eval = True
has_prox = True
def __call__(self, x: Union[Array, BlockArray]) -> float:
return 0.0
def prox(self, v: Array, lam: float, **kwargs) -> Array:
norm_v_ptp = jnp.sqrt(jnp.sum(jnp.abs(v) ** 2, axis=0))
x_out = v / jnp.maximum(jnp.ones(v.shape), norm_v_ptp)
out1 = v[0, :, -1] / jnp.maximum(jnp.ones(v[0, :, -1].shape), jnp.abs(v[0, :, -1]))
x_out = x_out.at[0, :, -1].set(out1)
out2 = v[1, -1, :] / jnp.maximum(jnp.ones(v[1, -1, :].shape), jnp.abs(v[1, -1, :]))
x_out = x_out.at[1, -1, :].set(out2)
return x_out
"""
Use RobustLineSearchStepSize object and set up AcceleratedPGM solver
object. Run the solver.
"""
reg_weight_iso = 1.4e0
f_iso = DualTVLoss(y=y, A=A, lmbda=reg_weight_iso)
g_iso = IsoProjector()
solver_iso = AcceleratedPGM(
f=f_iso,
g=g_iso,
L0=16.0 * f_iso.lmbda**2,
x0=x0,
maxiter=100,
itstat_options={"display": True, "period": 10},
step_size=RobustLineSearchStepSize(),
)
# Run the solver.
print(f"Solving on {device_info()}\n")
x = solver_iso.solve()
hist_iso = solver_iso.itstat_object.history(transpose=True)
# Project to constraint set.
x_iso = jnp.clip(y - f_iso.lmbda * f_iso.A(x), 0.0, 1.0)
"""
Denoise with anisotropic total variation for comparison. Define
projector for anisotropic total variation.
"""
# Evaluation of functional set to zero.
class AnisoProjector(functional.Functional):
has_eval = True
has_prox = True
def __call__(self, x: Union[Array, BlockArray]) -> float:
return 0.0
def prox(self, v: Array, lam: float, **kwargs) -> Array:
return v / jnp.maximum(jnp.ones(v.shape), jnp.abs(v))
"""
Use RobustLineSearchStepSize object and set up AcceleratedPGM solver
object. Weight was tuned to give the same data fidelty as the
isotropic case. Run the solver.
"""
reg_weight_aniso = 1.2e0
f = DualTVLoss(y=y, A=A, lmbda=reg_weight_aniso)
g = AnisoProjector()
solver = AcceleratedPGM(
f=f,
g=g,
L0=16.0 * f.lmbda**2,
x0=x0,
maxiter=100,
itstat_options={"display": True, "period": 10},
step_size=RobustLineSearchStepSize(),
)
# Run the solver.
print()
x = solver.solve()
# Project to constraint set.
x_aniso = jnp.clip(y - f.lmbda * f.A(x), 0.0, 1.0)
"""
Compute the data fidelity.
"""
df = hist_iso.Objective[-1]
print(f"\nData fidelity for isotropic TV was {df:.2e}")
hist = solver.itstat_object.history(transpose=True)
df = hist.Objective[-1]
print(f"Data fidelity for anisotropic TV was {df:.2e}")
"""
Plot results.
"""
plt_args = dict(norm=plot.matplotlib.colors.Normalize(vmin=0, vmax=1.5))
fig, ax = plot.subplots(nrows=2, ncols=2, sharex=True, sharey=True, figsize=(11, 10))
plot.imview(x_gt, title="Ground truth", fig=fig, ax=ax[0, 0], **plt_args)
plot.imview(y, title="Noisy version", fig=fig, ax=ax[0, 1], **plt_args)
plot.imview(x_iso, title="Isotropic TV denoising", fig=fig, ax=ax[1, 0], **plt_args)
plot.imview(x_aniso, title="Anisotropic TV denoising", fig=fig, ax=ax[1, 1], **plt_args)
fig.subplots_adjust(left=0.1, right=0.99, top=0.95, bottom=0.05, wspace=0.2, hspace=0.01)
fig.colorbar(
ax[0, 0].get_images()[0], ax=ax, location="right", shrink=0.9, pad=0.05, label="Arbitrary Units"
)
fig.suptitle("Denoising comparison")
fig.show()
# zoomed version
fig, ax = plot.subplots(nrows=2, ncols=2, sharex=True, sharey=True, figsize=(11, 10))
plot.imview(x_gt, title="Ground truth", fig=fig, ax=ax[0, 0], **plt_args)
plot.imview(y, title="Noisy version", fig=fig, ax=ax[0, 1], **plt_args)
plot.imview(x_iso, title="Isotropic TV denoising", fig=fig, ax=ax[1, 0], **plt_args)
plot.imview(x_aniso, title="Anisotropic TV denoising", fig=fig, ax=ax[1, 1], **plt_args)
ax[0, 0].set_xlim(N // 4, N // 4 + N // 2)
ax[0, 0].set_ylim(N // 4, N // 4 + N // 2)
fig.subplots_adjust(left=0.1, right=0.99, top=0.95, bottom=0.05, wspace=0.2, hspace=0.01)
fig.colorbar(
ax[0, 0].get_images()[0], ax=ax, location="right", shrink=0.9, pad=0.05, label="Arbitrary Units"
)
fig.suptitle("Denoising comparison (zoomed)")
fig.show()
input("\nWaiting for input to close figures and exit")
| 0.963265 | 0.883739 |
import numpy as np
import jax
import scico.numpy as snp
from scico import functional, linop, loss, metric, plot, random
from scico.examples import create_3d_foam_phantom, downsample_volume, tile_volume_slices
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
"""
Create a ground truth image.
"""
np.random.seed(1234)
N = 128 # phantom size
Nx, Ny, Nz = N, N, N // 4
upsamp = 2
x_gt_hires = create_3d_foam_phantom((upsamp * Nz, upsamp * Ny, upsamp * Nx), N_sphere=100)
x_gt = downsample_volume(x_gt_hires, upsamp)
x_gt = jax.device_put(x_gt) # convert to jax array, push to GPU
"""
Set up forward operator and test signal consisting of blurred signal with
additive Gaussian noise.
"""
n = 5 # convolution kernel size
σ = 20.0 / 255 # noise level
psf = snp.ones((n, n, n)) / (n**3)
A = linop.Convolve(h=psf, input_shape=x_gt.shape)
Ax = A(x_gt) # blurred image
noise, key = random.randn(Ax.shape)
y = Ax + σ * noise
"""
Set up ADMM solver.
"""
f = loss.SquaredL2Loss(y=y, A=A)
C = linop.Identity(x_gt.shape)
λ = 40.0 / 255 # BM4D regularization strength
g = λ * functional.BM4D()
ρ = 1.0 # ADMM penalty parameter
maxiter = 10 # number of ADMM iterations
solver = ADMM(
f=f,
g_list=[g],
C_list=[C],
rho_list=[ρ],
x0=A.T @ y,
maxiter=maxiter,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": 1e-3, "maxiter": 100}),
itstat_options={"display": True},
)
"""
Run the solver.
"""
print(f"Solving on {device_info()}\n")
x = solver.solve()
x = snp.clip(x, 0, 1)
hist = solver.itstat_object.history(transpose=True)
"""
Show slices of the recovered 3D volume.
"""
show_id = Nz // 2
fig, ax = plot.subplots(nrows=1, ncols=3, figsize=(15, 5))
plot.imview(tile_volume_slices(x_gt), title="Ground truth", fig=fig, ax=ax[0])
nc = n // 2
yc = y[nc:-nc, nc:-nc, nc:-nc]
yc = snp.clip(yc, 0, 1)
plot.imview(
tile_volume_slices(yc),
title="Slices of blurred, noisy volume: %.2f (dB)" % metric.psnr(x_gt, yc),
fig=fig,
ax=ax[1],
)
plot.imview(
tile_volume_slices(x),
title="Slices of deconvolved volume: %.2f (dB)" % metric.psnr(x_gt, x),
fig=fig,
ax=ax[2],
)
fig.show()
"""
Plot convergence statistics.
"""
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
)
input("\nWaiting for input to close figures and exit")
|
scico
|
/scico-0.0.4.tar.gz/scico-0.0.4/examples/scripts/deconv_ppp_bm4d_admm.py
|
deconv_ppp_bm4d_admm.py
|
import numpy as np
import jax
import scico.numpy as snp
from scico import functional, linop, loss, metric, plot, random
from scico.examples import create_3d_foam_phantom, downsample_volume, tile_volume_slices
from scico.optimize.admm import ADMM, LinearSubproblemSolver
from scico.util import device_info
"""
Create a ground truth image.
"""
np.random.seed(1234)
N = 128 # phantom size
Nx, Ny, Nz = N, N, N // 4
upsamp = 2
x_gt_hires = create_3d_foam_phantom((upsamp * Nz, upsamp * Ny, upsamp * Nx), N_sphere=100)
x_gt = downsample_volume(x_gt_hires, upsamp)
x_gt = jax.device_put(x_gt) # convert to jax array, push to GPU
"""
Set up forward operator and test signal consisting of blurred signal with
additive Gaussian noise.
"""
n = 5 # convolution kernel size
σ = 20.0 / 255 # noise level
psf = snp.ones((n, n, n)) / (n**3)
A = linop.Convolve(h=psf, input_shape=x_gt.shape)
Ax = A(x_gt) # blurred image
noise, key = random.randn(Ax.shape)
y = Ax + σ * noise
"""
Set up ADMM solver.
"""
f = loss.SquaredL2Loss(y=y, A=A)
C = linop.Identity(x_gt.shape)
λ = 40.0 / 255 # BM4D regularization strength
g = λ * functional.BM4D()
ρ = 1.0 # ADMM penalty parameter
maxiter = 10 # number of ADMM iterations
solver = ADMM(
f=f,
g_list=[g],
C_list=[C],
rho_list=[ρ],
x0=A.T @ y,
maxiter=maxiter,
subproblem_solver=LinearSubproblemSolver(cg_kwargs={"tol": 1e-3, "maxiter": 100}),
itstat_options={"display": True},
)
"""
Run the solver.
"""
print(f"Solving on {device_info()}\n")
x = solver.solve()
x = snp.clip(x, 0, 1)
hist = solver.itstat_object.history(transpose=True)
"""
Show slices of the recovered 3D volume.
"""
show_id = Nz // 2
fig, ax = plot.subplots(nrows=1, ncols=3, figsize=(15, 5))
plot.imview(tile_volume_slices(x_gt), title="Ground truth", fig=fig, ax=ax[0])
nc = n // 2
yc = y[nc:-nc, nc:-nc, nc:-nc]
yc = snp.clip(yc, 0, 1)
plot.imview(
tile_volume_slices(yc),
title="Slices of blurred, noisy volume: %.2f (dB)" % metric.psnr(x_gt, yc),
fig=fig,
ax=ax[1],
)
plot.imview(
tile_volume_slices(x),
title="Slices of deconvolved volume: %.2f (dB)" % metric.psnr(x_gt, x),
fig=fig,
ax=ax[2],
)
fig.show()
"""
Plot convergence statistics.
"""
plot.plot(
snp.vstack((hist.Prml_Rsdl, hist.Dual_Rsdl)).T,
ptyp="semilogy",
title="Residuals",
xlbl="Iteration",
lgnd=("Primal", "Dual"),
)
input("\nWaiting for input to close figures and exit")
| 0.803868 | 0.543893 |
MIT License
Copyright (c) 2021 Malte Vogl (ModelSEN project)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
|
scicom
|
/scicom-0.1.2-py3-none-any.whl/scicom-0.1.2.dist-info/LICENSE.md
|
LICENSE.md
|
MIT License
Copyright (c) 2021 Malte Vogl (ModelSEN project)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
| 0.807574 | 0.210543 |
MIT License
Copyright (c) [2021] [Thomas Bury]
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
|
scicomap
|
/scicomap-0.4.1.tar.gz/scicomap-0.4.1/LICENSE.md
|
LICENSE.md
|
MIT License
Copyright (c) [2021] [Thomas Bury]
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
| 0.826642 | 0.352898 |
import os
import shutil
import subprocess
from pathlib import Path
PROJECT_DIRECTORY = Path(os.path.abspath(os.path.curdir)).resolve()
UNUSED_DOCS_DIRS = [
PROJECT_DIRECTORY / 'docs-mkdocs',
PROJECT_DIRECTORY / 'docs-sphinx',
PROJECT_DIRECTORY / 'docs-jupyter-book'
]
DOCUMENTATION_ENGINE = "{{ cookiecutter.documentation_engine }}"
DOCS_SPEC_DIR = UNUSED_DOCS_DIRS.pop(
UNUSED_DOCS_DIRS.index(
PROJECT_DIRECTORY / f'docs-{DOCUMENTATION_ENGINE}'
)
)
USE_SRC_LAYOUT = {{ cookiecutter.project_layout == "src" }}
if USE_SRC_LAYOUT:
PACKAGE_PATH = PROJECT_DIRECTORY / "src" / "{{ cookiecutter.package_slug}}"
else:
PACKAGE_PATH = PROJECT_DIRECTORY / "{{ cookiecutter.package_slug}}"
USE_BLACK = {{ cookiecutter.use_black == "yes" }}
USE_BLUE = {{ cookiecutter.use_blue == "yes" }}
USE_BANDIT = {{ cookiecutter.use_bandit == "yes" }}
USE_CONTAINERS = {{ cookiecutter.use_containers in ['Docker', 'Podman'] }}
USE_CLI = {{ cookiecutter.command_line_interface != "No command-line interface" }}
USE_CONDA = {{ cookiecutter.use_conda == "yes" }}
{% if cookiecutter.code_of_conduct == "contributor-covenant" -%}
COC_PATH = PROJECT_DIRECTORY / 'coc' / 'CONTRIBUTOR_COVENANT.md'
{%- elif cookiecutter.code_of_conduct == "citizen-code-of-conduct" -%}
COC_PATH = PROJECT_DIRECTORY / 'coc' / 'CITIZEN.md'
{% else %}
COC_PATH = None
{%- endif %}
{% if cookiecutter.governance_document == "numpy-governance" -%}
GOVERNANCE_PATH = PROJECT_DIRECTORY / 'governance' / 'numpy_governance.md'
{% elif cookiecutter.code_of_conduct == "sciml-governance" -%}
GOVERNANCE_PATH = PROJECT_DIRECTORY / 'governance' / 'sciml_governance.md'
{% else -%}
GOVERNANCE_PATH = None
{%- endif %}
{% if cookiecutter.roadmap_document == "pytorch-ignite-roadmap" -%}
ROADMAP_PATH = PROJECT_DIRECTORY / 'roadmap' / 'ignite_roadmap.md'
{%- else %}
ROADMAP_PATH = None
{%- endif %}
{% if cookiecutter.build_system == "poetry" -%}
BUILD_SYSTEM = "poetry"
{% elif cookiecutter.build_system == "flit" -%}
BUILD_SYSTEM = "flit"
{% elif cookiecutter.build_system == "mesonpy" -%}
BUILD_SYSTEM = "mesonpy"
{% elif cookiecutter.build_system == "setuptools" -%}
BUILD_SYSTEM = "setuptools"
{% elif cookiecutter.build_system == "pdm" -%}
BUILD_SYSTEM = "pdm"
{%- else %}
BUILD_SYSTEM = None
{%- endif %}
def remove_dirs(dirs: list):
for dirs in dirs:
shutil.rmtree(dirs)
def remove_dir(dir_path):
"""Remove a directory located at PROJECT_DIRECTORY/dir_path"""
shutil.rmtree(PROJECT_DIRECTORY/dir_path)
def remove_project_file(filepath: str):
os.remove(PROJECT_DIRECTORY / filepath)
def remove_package_file(filepath: str):
os.remove(PACKAGE_PATH / filepath)
def move_selected_doc_dir():
docs_target_dir = PROJECT_DIRECTORY / "docs"
for file_name in os.listdir(DOCS_SPEC_DIR):
shutil.move(DOCS_SPEC_DIR / file_name, docs_target_dir)
if DOCUMENTATION_ENGINE == "sphinx":
remove_project_file(Path("docs") / "index.md")
remove_project_file(Path("docs/api") / "references.md")
shutil.rmtree(DOCS_SPEC_DIR)
def clean_up_docs():
remove_dirs(UNUSED_DOCS_DIRS)
move_selected_doc_dir()
def clean_up_project_layout():
if USE_SRC_LAYOUT:
if not os.path.exists("src"):
os.mkdir("src")
shutil.move('{{cookiecutter.package_slug}}', 'src')
def clean_up_code_of_conduct():
if COC_PATH:
shutil.move(
COC_PATH,
PROJECT_DIRECTORY / 'CODE_OF_CONDUCT.md'
)
remove_dir("coc")
def clean_up_conda():
if not USE_CONDA:
remove_dir("conda")
def clean_up_governance():
if GOVERNANCE_PATH:
shutil.move(
GOVERNANCE_PATH,
PROJECT_DIRECTORY / 'governance.md'
)
remove_dir("governance")
def clean_up_roadmap():
if ROADMAP_PATH:
shutil.move(
ROADMAP_PATH,
PROJECT_DIRECTORY / 'roadmap.md'
)
remove_dir("roadmap")
def clean_up_containers():
if not USE_CONTAINERS:
remove_dir("containers")
def clean_up_cli():
if not USE_CLI:
remove_package_file("__main__.py")
def clean_up_build_system():
build_system_dir = PROJECT_DIRECTORY / "build-system"
if BUILD_SYSTEM == "poetry":
shutil.move(
build_system_dir / "poetry-pyproject.toml",
PROJECT_DIRECTORY / 'pyproject.toml'
)
elif BUILD_SYSTEM == "flit":
shutil.move(
build_system_dir / "flit-pyproject.toml",
PROJECT_DIRECTORY / 'pyproject.toml'
)
elif BUILD_SYSTEM == "mesonpy":
shutil.move(
build_system_dir / "mesonpy-pyproject.toml",
PROJECT_DIRECTORY / 'pyproject.toml'
)
shutil.move(
build_system_dir / "meson.build",
PROJECT_DIRECTORY / 'meson.build'
)
elif BUILD_SYSTEM == "setuptools":
shutil.move(
build_system_dir / "setuptools-pyproject.toml",
PROJECT_DIRECTORY / 'pyproject.toml'
)
elif BUILD_SYSTEM == "pdm":
shutil.move(
build_system_dir / "pdm-pyproject.toml",
PROJECT_DIRECTORY / 'pyproject.toml'
)
else:
shutil.move(
build_system_dir / "base-pyproject.toml",
PROJECT_DIRECTORY / 'pyproject.toml'
)
remove_dir("build-system")
def http2ssh(url):
url = url.replace("https://", "git@")
return url.replace("/", ":", 1)
def validation():
if USE_BLUE and USE_BLACK:
raise Exception(
"The libs Blue and Black were selected, but you need to choose "
"just one of them."
)
def prepare_git():
subprocess.call(["git", "init"])
git_https_origin = http2ssh("{{cookiecutter.git_https_origin}}")
git_https_upstream = http2ssh("{{cookiecutter.git_https_upstream}}")
git_main_branch = http2ssh("{{cookiecutter.git_main_branch}}")
git_new_branch = "add-initial-structure"
if git_https_origin != "":
subprocess.call(["git", "remote", "add", "origin", git_https_origin])
subprocess.call(["git", "fetch", "--all"])
if git_https_upstream != "":
subprocess.call(
["git", "remote", "add", "upstream", git_https_upstream]
)
subprocess.call(["git", "checkout", f"upstream/{git_main_branch}"])
subprocess.call(["git", "fetch", "--all"])
subprocess.call(
["git", "config", "user.name", "{{cookiecutter.author_full_name}}"]
)
subprocess.call(
["git", "config", "user.email", "{{cookiecutter.author_email}}"]
)
subprocess.call(["git", "checkout", "-b", git_new_branch])
subprocess.call(["git", "add", "."])
subprocess.call(["git", "commit", "-m", "Initial commit", "--no-verify"])
print("=" * 80)
print("NOTE: Run `git rebase -i upstream/{{ cookiecutter.git_main_branch }}`")
print("=" * 80)
def post_gen():
validation()
# keep this one first, because it changes the package folder
clean_up_project_layout()
clean_up_cli()
clean_up_code_of_conduct()
clean_up_conda()
clean_up_containers()
clean_up_docs()
clean_up_governance()
clean_up_roadmap()
clean_up_build_system()
# keep it at the end, because it will create a new git commit
prepare_git()
if __name__ == "__main__":
post_gen()
|
scicookie
|
/hooks/post_gen_project.py
|
post_gen_project.py
|
import os
import shutil
import subprocess
from pathlib import Path
PROJECT_DIRECTORY = Path(os.path.abspath(os.path.curdir)).resolve()
UNUSED_DOCS_DIRS = [
PROJECT_DIRECTORY / 'docs-mkdocs',
PROJECT_DIRECTORY / 'docs-sphinx',
PROJECT_DIRECTORY / 'docs-jupyter-book'
]
DOCUMENTATION_ENGINE = "{{ cookiecutter.documentation_engine }}"
DOCS_SPEC_DIR = UNUSED_DOCS_DIRS.pop(
UNUSED_DOCS_DIRS.index(
PROJECT_DIRECTORY / f'docs-{DOCUMENTATION_ENGINE}'
)
)
USE_SRC_LAYOUT = {{ cookiecutter.project_layout == "src" }}
if USE_SRC_LAYOUT:
PACKAGE_PATH = PROJECT_DIRECTORY / "src" / "{{ cookiecutter.package_slug}}"
else:
PACKAGE_PATH = PROJECT_DIRECTORY / "{{ cookiecutter.package_slug}}"
USE_BLACK = {{ cookiecutter.use_black == "yes" }}
USE_BLUE = {{ cookiecutter.use_blue == "yes" }}
USE_BANDIT = {{ cookiecutter.use_bandit == "yes" }}
USE_CONTAINERS = {{ cookiecutter.use_containers in ['Docker', 'Podman'] }}
USE_CLI = {{ cookiecutter.command_line_interface != "No command-line interface" }}
USE_CONDA = {{ cookiecutter.use_conda == "yes" }}
{% if cookiecutter.code_of_conduct == "contributor-covenant" -%}
COC_PATH = PROJECT_DIRECTORY / 'coc' / 'CONTRIBUTOR_COVENANT.md'
{%- elif cookiecutter.code_of_conduct == "citizen-code-of-conduct" -%}
COC_PATH = PROJECT_DIRECTORY / 'coc' / 'CITIZEN.md'
{% else %}
COC_PATH = None
{%- endif %}
{% if cookiecutter.governance_document == "numpy-governance" -%}
GOVERNANCE_PATH = PROJECT_DIRECTORY / 'governance' / 'numpy_governance.md'
{% elif cookiecutter.code_of_conduct == "sciml-governance" -%}
GOVERNANCE_PATH = PROJECT_DIRECTORY / 'governance' / 'sciml_governance.md'
{% else -%}
GOVERNANCE_PATH = None
{%- endif %}
{% if cookiecutter.roadmap_document == "pytorch-ignite-roadmap" -%}
ROADMAP_PATH = PROJECT_DIRECTORY / 'roadmap' / 'ignite_roadmap.md'
{%- else %}
ROADMAP_PATH = None
{%- endif %}
{% if cookiecutter.build_system == "poetry" -%}
BUILD_SYSTEM = "poetry"
{% elif cookiecutter.build_system == "flit" -%}
BUILD_SYSTEM = "flit"
{% elif cookiecutter.build_system == "mesonpy" -%}
BUILD_SYSTEM = "mesonpy"
{% elif cookiecutter.build_system == "setuptools" -%}
BUILD_SYSTEM = "setuptools"
{% elif cookiecutter.build_system == "pdm" -%}
BUILD_SYSTEM = "pdm"
{%- else %}
BUILD_SYSTEM = None
{%- endif %}
def remove_dirs(dirs: list):
for dirs in dirs:
shutil.rmtree(dirs)
def remove_dir(dir_path):
"""Remove a directory located at PROJECT_DIRECTORY/dir_path"""
shutil.rmtree(PROJECT_DIRECTORY/dir_path)
def remove_project_file(filepath: str):
os.remove(PROJECT_DIRECTORY / filepath)
def remove_package_file(filepath: str):
os.remove(PACKAGE_PATH / filepath)
def move_selected_doc_dir():
docs_target_dir = PROJECT_DIRECTORY / "docs"
for file_name in os.listdir(DOCS_SPEC_DIR):
shutil.move(DOCS_SPEC_DIR / file_name, docs_target_dir)
if DOCUMENTATION_ENGINE == "sphinx":
remove_project_file(Path("docs") / "index.md")
remove_project_file(Path("docs/api") / "references.md")
shutil.rmtree(DOCS_SPEC_DIR)
def clean_up_docs():
remove_dirs(UNUSED_DOCS_DIRS)
move_selected_doc_dir()
def clean_up_project_layout():
if USE_SRC_LAYOUT:
if not os.path.exists("src"):
os.mkdir("src")
shutil.move('{{cookiecutter.package_slug}}', 'src')
def clean_up_code_of_conduct():
if COC_PATH:
shutil.move(
COC_PATH,
PROJECT_DIRECTORY / 'CODE_OF_CONDUCT.md'
)
remove_dir("coc")
def clean_up_conda():
if not USE_CONDA:
remove_dir("conda")
def clean_up_governance():
if GOVERNANCE_PATH:
shutil.move(
GOVERNANCE_PATH,
PROJECT_DIRECTORY / 'governance.md'
)
remove_dir("governance")
def clean_up_roadmap():
if ROADMAP_PATH:
shutil.move(
ROADMAP_PATH,
PROJECT_DIRECTORY / 'roadmap.md'
)
remove_dir("roadmap")
def clean_up_containers():
if not USE_CONTAINERS:
remove_dir("containers")
def clean_up_cli():
if not USE_CLI:
remove_package_file("__main__.py")
def clean_up_build_system():
build_system_dir = PROJECT_DIRECTORY / "build-system"
if BUILD_SYSTEM == "poetry":
shutil.move(
build_system_dir / "poetry-pyproject.toml",
PROJECT_DIRECTORY / 'pyproject.toml'
)
elif BUILD_SYSTEM == "flit":
shutil.move(
build_system_dir / "flit-pyproject.toml",
PROJECT_DIRECTORY / 'pyproject.toml'
)
elif BUILD_SYSTEM == "mesonpy":
shutil.move(
build_system_dir / "mesonpy-pyproject.toml",
PROJECT_DIRECTORY / 'pyproject.toml'
)
shutil.move(
build_system_dir / "meson.build",
PROJECT_DIRECTORY / 'meson.build'
)
elif BUILD_SYSTEM == "setuptools":
shutil.move(
build_system_dir / "setuptools-pyproject.toml",
PROJECT_DIRECTORY / 'pyproject.toml'
)
elif BUILD_SYSTEM == "pdm":
shutil.move(
build_system_dir / "pdm-pyproject.toml",
PROJECT_DIRECTORY / 'pyproject.toml'
)
else:
shutil.move(
build_system_dir / "base-pyproject.toml",
PROJECT_DIRECTORY / 'pyproject.toml'
)
remove_dir("build-system")
def http2ssh(url):
url = url.replace("https://", "git@")
return url.replace("/", ":", 1)
def validation():
if USE_BLUE and USE_BLACK:
raise Exception(
"The libs Blue and Black were selected, but you need to choose "
"just one of them."
)
def prepare_git():
subprocess.call(["git", "init"])
git_https_origin = http2ssh("{{cookiecutter.git_https_origin}}")
git_https_upstream = http2ssh("{{cookiecutter.git_https_upstream}}")
git_main_branch = http2ssh("{{cookiecutter.git_main_branch}}")
git_new_branch = "add-initial-structure"
if git_https_origin != "":
subprocess.call(["git", "remote", "add", "origin", git_https_origin])
subprocess.call(["git", "fetch", "--all"])
if git_https_upstream != "":
subprocess.call(
["git", "remote", "add", "upstream", git_https_upstream]
)
subprocess.call(["git", "checkout", f"upstream/{git_main_branch}"])
subprocess.call(["git", "fetch", "--all"])
subprocess.call(
["git", "config", "user.name", "{{cookiecutter.author_full_name}}"]
)
subprocess.call(
["git", "config", "user.email", "{{cookiecutter.author_email}}"]
)
subprocess.call(["git", "checkout", "-b", git_new_branch])
subprocess.call(["git", "add", "."])
subprocess.call(["git", "commit", "-m", "Initial commit", "--no-verify"])
print("=" * 80)
print("NOTE: Run `git rebase -i upstream/{{ cookiecutter.git_main_branch }}`")
print("=" * 80)
def post_gen():
validation()
# keep this one first, because it changes the package folder
clean_up_project_layout()
clean_up_cli()
clean_up_code_of_conduct()
clean_up_conda()
clean_up_containers()
clean_up_docs()
clean_up_governance()
clean_up_roadmap()
clean_up_build_system()
# keep it at the end, because it will create a new git commit
prepare_git()
if __name__ == "__main__":
post_gen()
| 0.188697 | 0.092155 |
import os
import sys
import logging
from shutil import copyfile
from distutils.dir_util import copy_tree
import click
import click_log
log = logging.getLogger()
click_log.basic_config(log)
class ConfUpdate(object):
def __init__(self, repo_dir, safe_mode):
log.debug('init : %s, safe : %s' % (repo_dir, safe_mode))
self.safe_mode = safe_mode
if os.path.isdir(repo_dir):
log.debug('repo_dir good')
self.repo_dir = repo_dir
else:
message = ('this is bad, quitting : bad --> (%s)\n\n'
'and you should feel bad' % repo_dir)
log.critical(message)
sys.exit(2)
repo_name = os.path.basename(os.path.normpath(repo_dir))
log.debug('repo_name : %s' % repo_name)
self.solr_repo_name = repo_name
self.solr_conf_dir = self.conf_path_detect()
self.fgs_index_dir = ('/usr/local/fedora/tomcat/webapps/'
'fedoragsearch/WEB-INF/classes/fgsconfigFinal/'
'index/FgsIndex/')
def conf_path_detect(self):
log.debug('conf_path_detect')
product = None
c1 = '/usr/local/fedora/solr/conf'
c2 = '/usr/local/fedora/solr/collection1/conf'
if os.path.isdir(c1):
# product = '/usr/local/fedora/solr'
product = c1
elif os.path.isdir(c2):
# product = '/usr/local/fedora/solr/collection1'
product = c2
else:
log.critical('not locating solr config dir')
sys.exit(3)
product = None
log.debug('conf location : %s' % product)
return product
def copy_folder(self, src, dst):
log.debug('copy_folder\n src: %s\n dst: %s' % (src, dst))
p = os.path.join(self.repo_dir, src)
if not self.safe_mode:
copy_tree(p, dst)
def copy_file(self, src, dst):
log.debug('copy_file\n src: %s\n dst: %s' % (src, dst))
p = os.path.join(self.repo_dir, src)
if not self.safe_mode:
copyfile(p, dst)
def execute(self):
log.debug('execute')
self.copy_folder('conf', self.solr_conf_dir)
transform_dest = os.path.join(
self.fgs_index_dir,
'islandora_transforms'
)
self.copy_folder('islandora_transforms', transform_dest)
self.copy_file(
'foxmlToSolr.xslt',
os.path.join(self.fgs_index_dir, 'foxmlToSolr.xslt')
)
self.copy_file(
'index.properties',
os.path.join(self.fgs_index_dir, 'index.properties')
)
@click.command()
@click.option(
'--repo',
'repo_dir',
required=True,
)
@click.option(
'--safe',
is_flag=True,
default=False,
help="dry run, don't copy files"
)
@click_log.simple_verbosity_option(log)
def main(repo_dir, safe):
log.debug('main')
conf = ConfUpdate(repo_dir, safe)
conf.execute()
if __name__ == '__main__':
main()
|
scid
|
/scid-0.3.tar.gz/scid-0.3/scid.py
|
scid.py
|
import os
import sys
import logging
from shutil import copyfile
from distutils.dir_util import copy_tree
import click
import click_log
log = logging.getLogger()
click_log.basic_config(log)
class ConfUpdate(object):
def __init__(self, repo_dir, safe_mode):
log.debug('init : %s, safe : %s' % (repo_dir, safe_mode))
self.safe_mode = safe_mode
if os.path.isdir(repo_dir):
log.debug('repo_dir good')
self.repo_dir = repo_dir
else:
message = ('this is bad, quitting : bad --> (%s)\n\n'
'and you should feel bad' % repo_dir)
log.critical(message)
sys.exit(2)
repo_name = os.path.basename(os.path.normpath(repo_dir))
log.debug('repo_name : %s' % repo_name)
self.solr_repo_name = repo_name
self.solr_conf_dir = self.conf_path_detect()
self.fgs_index_dir = ('/usr/local/fedora/tomcat/webapps/'
'fedoragsearch/WEB-INF/classes/fgsconfigFinal/'
'index/FgsIndex/')
def conf_path_detect(self):
log.debug('conf_path_detect')
product = None
c1 = '/usr/local/fedora/solr/conf'
c2 = '/usr/local/fedora/solr/collection1/conf'
if os.path.isdir(c1):
# product = '/usr/local/fedora/solr'
product = c1
elif os.path.isdir(c2):
# product = '/usr/local/fedora/solr/collection1'
product = c2
else:
log.critical('not locating solr config dir')
sys.exit(3)
product = None
log.debug('conf location : %s' % product)
return product
def copy_folder(self, src, dst):
log.debug('copy_folder\n src: %s\n dst: %s' % (src, dst))
p = os.path.join(self.repo_dir, src)
if not self.safe_mode:
copy_tree(p, dst)
def copy_file(self, src, dst):
log.debug('copy_file\n src: %s\n dst: %s' % (src, dst))
p = os.path.join(self.repo_dir, src)
if not self.safe_mode:
copyfile(p, dst)
def execute(self):
log.debug('execute')
self.copy_folder('conf', self.solr_conf_dir)
transform_dest = os.path.join(
self.fgs_index_dir,
'islandora_transforms'
)
self.copy_folder('islandora_transforms', transform_dest)
self.copy_file(
'foxmlToSolr.xslt',
os.path.join(self.fgs_index_dir, 'foxmlToSolr.xslt')
)
self.copy_file(
'index.properties',
os.path.join(self.fgs_index_dir, 'index.properties')
)
@click.command()
@click.option(
'--repo',
'repo_dir',
required=True,
)
@click.option(
'--safe',
is_flag=True,
default=False,
help="dry run, don't copy files"
)
@click_log.simple_verbosity_option(log)
def main(repo_dir, safe):
log.debug('main')
conf = ConfUpdate(repo_dir, safe)
conf.execute()
if __name__ == '__main__':
main()
| 0.135032 | 0.053576 |
# scida

scida is an out-of-the-box analysis tool for large scientific datasets. It primarily supports the astrophysics community, focusing on cosmological and galaxy formation simulations using particles or unstructured meshes, as well as large observational datasets.
This tool uses dask, allowing analysis to scale up from your personal computer to HPC resources and the cloud.
## Features
- Unified, high-level interface to load and analyze large datasets from a variety of sources.
- Parallel, task-based data processing with dask arrays.
- Physical unit support via pint.
- Easily extensible architecture.
## Requirements
- Python >= 3.9
## Documentation
The documentation can be found [here](https://cbyrohl.github.io/scida/).
## Install
```
pip install scida
```
## First Steps
After installing scida, follow the [tutorial](https://cbyrohl.github.io/scida/tutorial/).
## License
Distributed under the terms of the [MIT license](LICENSE),
_scida_ is free and open source software.
## Issues
If you encounter any problems,
please [file an issue](https://github.com/cbyrohl/scida/issues/new) along with a detailed description.
## Contributors
In alphabetical order:
- @ayromlou
- @cbyrohl
- @dnelson86
## Acknowledgements
The project structure was adapted from [Wolt](https://github.com/woltapp/wolt-python-package-cookiecutter) and [Hypermodern Python](https://github.com/cjolowicz/cookiecutter-hypermodern-python) cookiecutter templates.
|
scida
|
/scida-0.2.4.tar.gz/scida-0.2.4/README.md
|
README.md
|
pip install scida
| 0.710528 | 0.909907 |
<div align="center">
<h1>scidantic</h1>
<p>
<em>
An extension of <a href="https://github.com/pydantic/pydantic">pydantic</a> providing types for NumPy-like arrays and much more.
</em>
</p>
<a href="https://github.com/gabrielmbmb/scidantic/actions/workflows/test.yaml">
<img src="https://github.com/gabrielmbmb/flowtastic/actions/workflows/test.yaml/badge.svg" alt="Test Workflow">>
</a>
<a href="https://pypi.org/project/scidantic">
<img src="https://img.shields.io/pypi/v/scidantic" alt="Python package version">
</a>
<a href="https://pypi.org/project/scidantic">
<img src="https://img.shields.io/pypi/pyversions/scidantic" alt="Supported Python versions">
</a>
</div>
## Installation
```shell
pip install scidantic
```
|
scidantic
|
/scidantic-0.0.1.tar.gz/scidantic-0.0.1/README.md
|
README.md
|
pip install scidantic
| 0.647241 | 0.656462 |
import logging
import copy
import dpath.util
from scidash_api.exceptions import ScidashClientException
from scidash_api.validator import ScidashClientDataValidator
logger = logging.getLogger(__name__)
class ScidashClientMapper(object):
"""ScidashClientMapper
util class for converting raw data from Sciunit to data acceptable in
Scidash
"""
# Expected output format
OUTPUT_SCHEME = {
'score_class': {
'class_name': None,
'url': None
},
'model_instance': {
'model_class': {
'class_name': None,
'url': '',
'capabilities': []
},
'backend': None,
'hash_id': None,
'attributes': {},
'name': None,
'run_params': {},
'url': None
},
'prediction': None,
'raw': None,
'score': None,
'hash_id': None,
'sort_key': None,
'score_type': None,
'summary': None,
'test_instance': {
'description': None,
'test_suites': [],
'hash_id': None,
'test_class': {
'class_name': None,
'url': None
},
'observation': {
'mean': None,
'std': None,
'url': None
},
'verbose': None
}
}
KEYS_MAPPING = [
(
'score_class/class_name',
'_class/name'
),
(
'score_class/url',
'_class/url'
),
(
'model_instance/model_class/class_name',
'model/_class/name'
),
(
'model_instance/model_class/url',
'model/_class/url'
),
(
'model_instance/name',
'model/name'
),
(
'model_instance/url',
'model/url'
),
(
'prediction',
'prediction'
),
(
'raw',
'raw'
),
(
'score',
'score'
),
(
'score_type',
'score_type'
),
(
'summary',
'summary'
),
(
'test_instance/description',
'test/description'
),
(
'test_instance/test_class/class_name',
'test/name'
),
(
'test_instance/test_class/url',
'test/_class/url'
),
(
'test_instance/observation',
'test/observation'
),
(
'test_instance/verbose',
'test/verbose'
),
]
OPTIONAL_KEYS_MAPPING = [
(
'model_instance/backend',
'model/backend'
),
(
'model_instance/attrs',
'model/attrs'
)
]
def __init__(self):
self.errors = []
self.validator = ScidashClientDataValidator()
def convert(self, raw_data=None, strict=False):
"""convert
main method for converting
:param raw_data:dict with data from sciunit
:returns dict
"""
if raw_data is None:
return raw_data
if not self.validator.validate_score(raw_data) and strict:
raise ScidashClientException('CLIENT -> INVALID DATA: '
'{}'.format(self.validator.get_errors()))
elif not self.validator.validate_score(raw_data):
logger.error('CLIENT -> INVALID DATA: '
'{}'.format(self.validator.get_errors()))
self.errors.append(self.validator.get_errors())
return None
result = copy.deepcopy(self.OUTPUT_SCHEME)
for item, address in self.KEYS_MAPPING:
dpath.util.set(result, item, dpath.util.get(raw_data, address))
for item, address in self.OPTIONAL_KEYS_MAPPING:
try:
dpath.util.set(result, item, dpath.util.get(raw_data, address))
except KeyError:
logger.info("Optional value {} is not found".format(item))
for capability in dpath.util.get(raw_data, 'model/capabilities'):
result.get('model_instance').get('model_class') \
.get('capabilities').append({
'class_name': capability
})
try:
for test_suite in dpath.util.get(raw_data, 'test/test_suites'):
result.get('test_instance').get('test_suites').append({
'name': test_suite.get('name'),
'hash': test_suite.get('hash')
})
except KeyError:
pass
model_instance_hash_id = '{}_{}'.format(
raw_data.get('model').get('hash'),
raw_data.get('model').get('_id')
)
test_instance_hash_id = '{}_{}'.format(
raw_data.get('test').get('hash'),
raw_data.get('test').get('_id')
)
score_instance_hash_id = '{}_{}'.format(
raw_data.get('hash'),
raw_data.get('_id')
)
sort_key = raw_data.get('norm_score') if not raw_data.get('sort_key',
False) else \
raw_data.get('sort_key')
run_params = raw_data.get('model').get('run_params', False)
if run_params:
for key in run_params:
run_params.update({
key: str(run_params.get(key))
})
result.get('model_instance').update({'hash_id':
model_instance_hash_id})
result.get('test_instance').update({'hash_id':
test_instance_hash_id})
result.update({'hash_id':
score_instance_hash_id})
result.update({
'sort_key': sort_key
})
if run_params:
result.get('model_instance').update({
'run_params': run_params
})
if type(result.get('score')) is bool:
result['score'] = float(result.get('score'))
return result
|
scidash-api
|
/scidash-api-1.3.0.tar.gz/scidash-api-1.3.0/scidash_api/mapper.py
|
mapper.py
|
import logging
import copy
import dpath.util
from scidash_api.exceptions import ScidashClientException
from scidash_api.validator import ScidashClientDataValidator
logger = logging.getLogger(__name__)
class ScidashClientMapper(object):
"""ScidashClientMapper
util class for converting raw data from Sciunit to data acceptable in
Scidash
"""
# Expected output format
OUTPUT_SCHEME = {
'score_class': {
'class_name': None,
'url': None
},
'model_instance': {
'model_class': {
'class_name': None,
'url': '',
'capabilities': []
},
'backend': None,
'hash_id': None,
'attributes': {},
'name': None,
'run_params': {},
'url': None
},
'prediction': None,
'raw': None,
'score': None,
'hash_id': None,
'sort_key': None,
'score_type': None,
'summary': None,
'test_instance': {
'description': None,
'test_suites': [],
'hash_id': None,
'test_class': {
'class_name': None,
'url': None
},
'observation': {
'mean': None,
'std': None,
'url': None
},
'verbose': None
}
}
KEYS_MAPPING = [
(
'score_class/class_name',
'_class/name'
),
(
'score_class/url',
'_class/url'
),
(
'model_instance/model_class/class_name',
'model/_class/name'
),
(
'model_instance/model_class/url',
'model/_class/url'
),
(
'model_instance/name',
'model/name'
),
(
'model_instance/url',
'model/url'
),
(
'prediction',
'prediction'
),
(
'raw',
'raw'
),
(
'score',
'score'
),
(
'score_type',
'score_type'
),
(
'summary',
'summary'
),
(
'test_instance/description',
'test/description'
),
(
'test_instance/test_class/class_name',
'test/name'
),
(
'test_instance/test_class/url',
'test/_class/url'
),
(
'test_instance/observation',
'test/observation'
),
(
'test_instance/verbose',
'test/verbose'
),
]
OPTIONAL_KEYS_MAPPING = [
(
'model_instance/backend',
'model/backend'
),
(
'model_instance/attrs',
'model/attrs'
)
]
def __init__(self):
self.errors = []
self.validator = ScidashClientDataValidator()
def convert(self, raw_data=None, strict=False):
"""convert
main method for converting
:param raw_data:dict with data from sciunit
:returns dict
"""
if raw_data is None:
return raw_data
if not self.validator.validate_score(raw_data) and strict:
raise ScidashClientException('CLIENT -> INVALID DATA: '
'{}'.format(self.validator.get_errors()))
elif not self.validator.validate_score(raw_data):
logger.error('CLIENT -> INVALID DATA: '
'{}'.format(self.validator.get_errors()))
self.errors.append(self.validator.get_errors())
return None
result = copy.deepcopy(self.OUTPUT_SCHEME)
for item, address in self.KEYS_MAPPING:
dpath.util.set(result, item, dpath.util.get(raw_data, address))
for item, address in self.OPTIONAL_KEYS_MAPPING:
try:
dpath.util.set(result, item, dpath.util.get(raw_data, address))
except KeyError:
logger.info("Optional value {} is not found".format(item))
for capability in dpath.util.get(raw_data, 'model/capabilities'):
result.get('model_instance').get('model_class') \
.get('capabilities').append({
'class_name': capability
})
try:
for test_suite in dpath.util.get(raw_data, 'test/test_suites'):
result.get('test_instance').get('test_suites').append({
'name': test_suite.get('name'),
'hash': test_suite.get('hash')
})
except KeyError:
pass
model_instance_hash_id = '{}_{}'.format(
raw_data.get('model').get('hash'),
raw_data.get('model').get('_id')
)
test_instance_hash_id = '{}_{}'.format(
raw_data.get('test').get('hash'),
raw_data.get('test').get('_id')
)
score_instance_hash_id = '{}_{}'.format(
raw_data.get('hash'),
raw_data.get('_id')
)
sort_key = raw_data.get('norm_score') if not raw_data.get('sort_key',
False) else \
raw_data.get('sort_key')
run_params = raw_data.get('model').get('run_params', False)
if run_params:
for key in run_params:
run_params.update({
key: str(run_params.get(key))
})
result.get('model_instance').update({'hash_id':
model_instance_hash_id})
result.get('test_instance').update({'hash_id':
test_instance_hash_id})
result.update({'hash_id':
score_instance_hash_id})
result.update({
'sort_key': sort_key
})
if run_params:
result.get('model_instance').update({
'run_params': run_params
})
if type(result.get('score')) is bool:
result['score'] = float(result.get('score'))
return result
| 0.460532 | 0.080755 |
from __future__ import unicode_literals, print_function
import json
import logging
from platform import platform, system
import requests
import six
from scidash_api import settings
from scidash_api.mapper import ScidashClientMapper
from scidash_api import exceptions
from scidash_api import helper
logger = logging.getLogger(__name__)
class ScidashClient(object):
"""Base client class for all actions with Scidash API"""
def __init__(self, config=None, build_info=None, hostname=None):
"""__init__
:param config:
:param build_info:
:param hostname:
"""
self.token = None
self.config = settings.CONFIG
self.data = {}
self.errors = []
if build_info is None:
self.build_info = "{}/{}".format(platform(), system())
else:
self.build_info = build_info
self.hostname = hostname
self.mapper = ScidashClientMapper()
if config is not None:
self.config.update(config)
self.test_config()
def test_config(self):
"""
Check, is config is fine
:returns: void
:raises: ScidashClientWrongConfigException
"""
if self.config.get('base_url')[-1] is '/':
raise exceptions.ScidashClientWrongConfigException('Remove last '
'slash '
'from base_url')
def get_headers(self):
"""
Shortcut for gettings headers for uploading
"""
return {
'Authorization': 'JWT {}'.format(self.token)
}
def login(self, username, password):
"""
Getting API token from Scidash
:param username:
:param password:
"""
credentials = {
"username": username,
"password": password
}
auth_url = self.config.get('auth_url')
base_url = self.config.get('base_url')
r = requests.post('{}{}'.format(base_url, auth_url), data=credentials)
try:
self.token = r.json().get('token')
except Exception as e:
raise exceptions.ScidashClientException('Authentication'
' Failed: {}'.format(e))
if self.token is None:
raise exceptions.ScidashClientException('Authentication Failed: '
'{}'.format(r.json()))
return self
def set_data(self, data=None):
"""
Sets data for uploading
:param data:
:returns: self
"""
if isinstance(data, six.string_types):
data = json.loads(data)
elif not isinstance(data, dict):
data = json.loads(data.json(add_props=True, string=True))
self.data = self.mapper.convert(data)
if self.data is not None:
self.data.get('test_instance').update({
"build_info": self.build_info,
"hostname": self.hostname
})
else:
self.errors = self.errors + self.mapper.errors
return self
def upload_test_score(self, data=None):
"""
Main method for uploading
:returns: urllib3 requests object
"""
if data is not None:
self.set_data(data)
if self.data is None:
return False
files = {
'file': (self.config.get('file_name'), json.dumps(self.data))
}
headers = self.get_headers()
upload_url = \
self.config.get('upload_url') \
.format(filename=self.config.get('file_name'))
base_url = self.config.get('base_url')
r = requests.put('{}{}'.format(base_url, upload_url), headers=headers,
files=files)
if r.status_code == 400 or r.status_code == 500:
self.errors.append(r.text)
if r.status_code == 400:
logger.error('SERVER -> INVALID DATA: '
'{}'.format(self.errors))
if r.status_code == 500:
logger.error('SERVER -> SERVER ERROR: '
'{}'.format(self.errors))
return r
def upload_score(self, data=None):
helper.deprecated(method_name="upload_score()",
will_be_removed="2.0.0", replacement="upload_test_score()")
return self.upload_test_score(data)
def upload_suite_score(self, suite, score_matrix):
"""upload_suite
uploading score matrix with suite information
:param suite:
:param score_matrix:
:returns: urllib3 requests object list
"""
if isinstance(suite, six.string_types):
suite = json.loads(suite)
elif not isinstance(suite, dict):
suite = json.loads(suite.json(add_props=True, string=True))
if isinstance(score_matrix, six.string_types):
score_matrix = json.loads(score_matrix)
elif not isinstance(score_matrix, dict):
score_matrix = json.loads(score_matrix.json(add_props=True,
string=True))
hash_list = []
for test in suite.get('tests'):
hash_list.append(test.get('hash'))
responses = []
raw_score_list = score_matrix.get('scores')
flat_score_list = [score for score_list in raw_score_list for score in
score_list]
for score in flat_score_list:
if score.get('test').get('hash') in hash_list:
if 'test_suites' not in score.get('test'):
score.get('test').update({
'test_suites': []
})
score.get('test').get('test_suites').append(suite)
responses.append(self.upload_test_score(data=score))
return responses
def upload_suite(self, suite, score_matrix):
helper.deprecated(method_name="upload_suite()",
will_be_removed="2.0.0", replacement="upload_suite_score()")
return self.upload_suite_score(suite, score_matrix)
|
scidash-api
|
/scidash-api-1.3.0.tar.gz/scidash-api-1.3.0/scidash_api/client.py
|
client.py
|
from __future__ import unicode_literals, print_function
import json
import logging
from platform import platform, system
import requests
import six
from scidash_api import settings
from scidash_api.mapper import ScidashClientMapper
from scidash_api import exceptions
from scidash_api import helper
logger = logging.getLogger(__name__)
class ScidashClient(object):
"""Base client class for all actions with Scidash API"""
def __init__(self, config=None, build_info=None, hostname=None):
"""__init__
:param config:
:param build_info:
:param hostname:
"""
self.token = None
self.config = settings.CONFIG
self.data = {}
self.errors = []
if build_info is None:
self.build_info = "{}/{}".format(platform(), system())
else:
self.build_info = build_info
self.hostname = hostname
self.mapper = ScidashClientMapper()
if config is not None:
self.config.update(config)
self.test_config()
def test_config(self):
"""
Check, is config is fine
:returns: void
:raises: ScidashClientWrongConfigException
"""
if self.config.get('base_url')[-1] is '/':
raise exceptions.ScidashClientWrongConfigException('Remove last '
'slash '
'from base_url')
def get_headers(self):
"""
Shortcut for gettings headers for uploading
"""
return {
'Authorization': 'JWT {}'.format(self.token)
}
def login(self, username, password):
"""
Getting API token from Scidash
:param username:
:param password:
"""
credentials = {
"username": username,
"password": password
}
auth_url = self.config.get('auth_url')
base_url = self.config.get('base_url')
r = requests.post('{}{}'.format(base_url, auth_url), data=credentials)
try:
self.token = r.json().get('token')
except Exception as e:
raise exceptions.ScidashClientException('Authentication'
' Failed: {}'.format(e))
if self.token is None:
raise exceptions.ScidashClientException('Authentication Failed: '
'{}'.format(r.json()))
return self
def set_data(self, data=None):
"""
Sets data for uploading
:param data:
:returns: self
"""
if isinstance(data, six.string_types):
data = json.loads(data)
elif not isinstance(data, dict):
data = json.loads(data.json(add_props=True, string=True))
self.data = self.mapper.convert(data)
if self.data is not None:
self.data.get('test_instance').update({
"build_info": self.build_info,
"hostname": self.hostname
})
else:
self.errors = self.errors + self.mapper.errors
return self
def upload_test_score(self, data=None):
"""
Main method for uploading
:returns: urllib3 requests object
"""
if data is not None:
self.set_data(data)
if self.data is None:
return False
files = {
'file': (self.config.get('file_name'), json.dumps(self.data))
}
headers = self.get_headers()
upload_url = \
self.config.get('upload_url') \
.format(filename=self.config.get('file_name'))
base_url = self.config.get('base_url')
r = requests.put('{}{}'.format(base_url, upload_url), headers=headers,
files=files)
if r.status_code == 400 or r.status_code == 500:
self.errors.append(r.text)
if r.status_code == 400:
logger.error('SERVER -> INVALID DATA: '
'{}'.format(self.errors))
if r.status_code == 500:
logger.error('SERVER -> SERVER ERROR: '
'{}'.format(self.errors))
return r
def upload_score(self, data=None):
helper.deprecated(method_name="upload_score()",
will_be_removed="2.0.0", replacement="upload_test_score()")
return self.upload_test_score(data)
def upload_suite_score(self, suite, score_matrix):
"""upload_suite
uploading score matrix with suite information
:param suite:
:param score_matrix:
:returns: urllib3 requests object list
"""
if isinstance(suite, six.string_types):
suite = json.loads(suite)
elif not isinstance(suite, dict):
suite = json.loads(suite.json(add_props=True, string=True))
if isinstance(score_matrix, six.string_types):
score_matrix = json.loads(score_matrix)
elif not isinstance(score_matrix, dict):
score_matrix = json.loads(score_matrix.json(add_props=True,
string=True))
hash_list = []
for test in suite.get('tests'):
hash_list.append(test.get('hash'))
responses = []
raw_score_list = score_matrix.get('scores')
flat_score_list = [score for score_list in raw_score_list for score in
score_list]
for score in flat_score_list:
if score.get('test').get('hash') in hash_list:
if 'test_suites' not in score.get('test'):
score.get('test').update({
'test_suites': []
})
score.get('test').get('test_suites').append(suite)
responses.append(self.upload_test_score(data=score))
return responses
def upload_suite(self, suite, score_matrix):
helper.deprecated(method_name="upload_suite()",
will_be_removed="2.0.0", replacement="upload_suite_score()")
return self.upload_suite_score(suite, score_matrix)
| 0.615088 | 0.100879 |
import math
import numbers
from cerberus import Validator
from scidash_api.exceptions import ScidashClientValidatorException
class ValidatorExtended(Validator):
def _validate_isnan(self, isnan, field, value):
"""
Check, is value NaN or not
The rule's arguments are validated against this schema:
{'type': 'boolean'}
"""
if not isinstance(value, numbers.Number):
return
if not isnan and math.isnan(value):
self._error(field, "Value can't be NaN")
class ScidashClientDataValidator():
errors = None
# Validation schema for raw score
SCORE_SCHEMA = {
'_class': {
'type': 'dict',
'schema': {
'url': {
'type': 'string',
'required': True
},
'name': {
'type': 'string',
'required': True
}
}
},
'model': {
'type': 'dict',
'schema': {
'_class': {
'type': 'dict',
'schema': {
'name': {
'type': 'string'
},
'url': {
'type': 'string',
'required': True
}
}
},
'attrs': {
'type': 'dict',
'required': False
},
'hash': {
'type': 'string',
'required': True
},
'_id': {
'type': 'number',
'required': True
},
'capabilities': {
'type': 'list',
'required': True,
'schema': {
'type': 'string'
}
},
'name': {
'type': 'string',
'required': True
},
'run_params': {
'type': 'dict',
'required': False
},
'url': {
'type': 'string',
'required': True
}
}
},
'observation': {
'type': 'dict',
'required': True
},
'prediction': {
'type': ['number', 'dict'],
'required': True,
'isnan': False
},
'raw': {
'type': 'string',
'required': True
},
'score': {
'type':['number', 'boolean'],
'isnan': False,
'required': True
},
'score_type': {
'type': 'string'
},
'sort_key': {
'type': 'number',
'isnan': False,
'required': False
},
'norm_score': {
'type': 'number',
'isnan': False,
'required': False
},
'summary': {
'type': 'string',
'required': True
},
'hash': {
'type': 'string',
'required': True
},
'_id': {
'type': 'number',
'required': True
},
'test': {
'type': 'dict',
'schema': {
'_class': {
'type': 'dict',
'schema': {
'name': {
'type': 'string',
'required': True
},
'url': {
'type': 'string',
'required': True
}
},
'required': True
},
'description': {
'type': 'string',
'nullable': True,
'required': True
},
'hash': {
'type': 'string',
'required': True
},
'_id': {
'type': 'number',
'required': True
},
'name': {
'type': 'string',
'required': True
},
'observation': {
'type': 'dict',
'required': True
},
'verbose': {
'type': 'number',
'isnan': False,
'required': True
}
}
}
}
def validate_score(self, raw_data):
"""
Checks, is score raw data valid and can be processed
:raw_data: raw data dictionary
:returns: boolean
"""
validator = ValidatorExtended(self.SCORE_SCHEMA)
validator.allow_unknown = True
valid = validator.validate(raw_data)
if not valid:
self.errors = validator.errors
if not raw_data.get('sort_key', False):
if not raw_data.get('norm_score', False):
raise ScidashClientValidatorException("sort_key or norm_score"
"not found")
return valid
def get_errors(self):
"""
Returns errors from last validation procedure, if any
"""
return self.errors
def validate_suite(self, raw_data):
raise NotImplementedError("Not implemented yet")
|
scidash-api
|
/scidash-api-1.3.0.tar.gz/scidash-api-1.3.0/scidash_api/validator.py
|
validator.py
|
import math
import numbers
from cerberus import Validator
from scidash_api.exceptions import ScidashClientValidatorException
class ValidatorExtended(Validator):
def _validate_isnan(self, isnan, field, value):
"""
Check, is value NaN or not
The rule's arguments are validated against this schema:
{'type': 'boolean'}
"""
if not isinstance(value, numbers.Number):
return
if not isnan and math.isnan(value):
self._error(field, "Value can't be NaN")
class ScidashClientDataValidator():
errors = None
# Validation schema for raw score
SCORE_SCHEMA = {
'_class': {
'type': 'dict',
'schema': {
'url': {
'type': 'string',
'required': True
},
'name': {
'type': 'string',
'required': True
}
}
},
'model': {
'type': 'dict',
'schema': {
'_class': {
'type': 'dict',
'schema': {
'name': {
'type': 'string'
},
'url': {
'type': 'string',
'required': True
}
}
},
'attrs': {
'type': 'dict',
'required': False
},
'hash': {
'type': 'string',
'required': True
},
'_id': {
'type': 'number',
'required': True
},
'capabilities': {
'type': 'list',
'required': True,
'schema': {
'type': 'string'
}
},
'name': {
'type': 'string',
'required': True
},
'run_params': {
'type': 'dict',
'required': False
},
'url': {
'type': 'string',
'required': True
}
}
},
'observation': {
'type': 'dict',
'required': True
},
'prediction': {
'type': ['number', 'dict'],
'required': True,
'isnan': False
},
'raw': {
'type': 'string',
'required': True
},
'score': {
'type':['number', 'boolean'],
'isnan': False,
'required': True
},
'score_type': {
'type': 'string'
},
'sort_key': {
'type': 'number',
'isnan': False,
'required': False
},
'norm_score': {
'type': 'number',
'isnan': False,
'required': False
},
'summary': {
'type': 'string',
'required': True
},
'hash': {
'type': 'string',
'required': True
},
'_id': {
'type': 'number',
'required': True
},
'test': {
'type': 'dict',
'schema': {
'_class': {
'type': 'dict',
'schema': {
'name': {
'type': 'string',
'required': True
},
'url': {
'type': 'string',
'required': True
}
},
'required': True
},
'description': {
'type': 'string',
'nullable': True,
'required': True
},
'hash': {
'type': 'string',
'required': True
},
'_id': {
'type': 'number',
'required': True
},
'name': {
'type': 'string',
'required': True
},
'observation': {
'type': 'dict',
'required': True
},
'verbose': {
'type': 'number',
'isnan': False,
'required': True
}
}
}
}
def validate_score(self, raw_data):
"""
Checks, is score raw data valid and can be processed
:raw_data: raw data dictionary
:returns: boolean
"""
validator = ValidatorExtended(self.SCORE_SCHEMA)
validator.allow_unknown = True
valid = validator.validate(raw_data)
if not valid:
self.errors = validator.errors
if not raw_data.get('sort_key', False):
if not raw_data.get('norm_score', False):
raise ScidashClientValidatorException("sort_key or norm_score"
"not found")
return valid
def get_errors(self):
"""
Returns errors from last validation procedure, if any
"""
return self.errors
def validate_suite(self, raw_data):
raise NotImplementedError("Not implemented yet")
| 0.564098 | 0.182881 |
# Sci-dat: Download Annotate TCGA
[](https://codecov.io/github/ArianeMora/scidat?branch=master)
[](https://pypi.org/project/scidat/)
A package developed to enable the download an annotation of TCGA data from `https://portal.gdc.cancer.gov/`
## Docs
https://arianemora.github.io/scidat/
## Install
```
pip install scidat
```
## Use
### API
The API combines the functions in Download and Annotation. It removes some of the ability to set specific directories etc but makes it easier to perform the functions.
See example notebook for how we get the following from the TCGA site:
```
1. manifest_file
2. gdc_client
3. clinical_file
4. sample_file
```
```
api = API(manifest_file, gdc_client, clinical_file, sample_file, requires_lst=None, clin_cols=None,
max_cnt=100, sciutil=None, split_manifest_dir='.', download_dir='.', meta_dir='.', sep='_')
```
Step 1. Download manifest data
```
# Downloads every file using default parameters in the manifest file
api.download_data_from_manifest()
# This will also unzip and copy the files all into one directory
```
Step 2. Annotation
```
# Builds the annotation information
api.build_annotation()
```
Step 3. Download mutation data
```
# Downloads all the mutation data for all the cases in the clinical_file
api.download_mutation_data()
```
Step 4. Generate RNAseq dataframe
```
# Generates the RNA dataframe from the downloaded folder
api.build_rna_df()
```
Step 5. Get cases that have any mutations or specific mutations
```
# Returns a list of cases that have mutations (either in any gene if gene_list = None or in specific genes)
list_of_cases = api.get_cases_with_mutations(gene_list=None, id_type='symbol')
# Get genes with a small deletion
filter_col = 'ssm.consequence.0.transcript.gene.symbol'
genes = api.get_mutation_values_on_filter(filter_col, ['Small deletion'], 'ssm.mutation_subtype')
# Get genes with a specifc genomic change: ssm.genomic_dna_change
filter_col = 'case_id'
cases = api.get_mutation_values_on_filter(filter_col, ['chr13:g.45340134A>G'], 'ssm.genomic_dna_change')
```
Step 6. Get cases with specific metadata information
Metadata list:
```
submitter_id
project_id
age_at_index
gender
race
vital_status
tumor_stage
normal_samples
tumor_samples
case_files
tumor_stage_num
example: {'gender': ['female'], 'tumor_stage_num': [1, 2]}
```
Method can be `any` i.e. it satisfies any of the conditions, or `all`, a case has to satisfy all the conditions in the meta_dict
```
# Returns cases that have the chosen metadata information e.g. gender, race, tumour_stage_num
cases_list = api.get_cases_with_meta(meta: dict, method="all")
```
Step 7. Get genes with mutations
```
# Returns a list of genes with mutations for specific cases
list_of_genes = api.get_genes_with_mutations(case_ids=None, id_type='symbol')
```
Step 8. Get values from the dataframe
```
# Returns the values, columns, dataframe of a subset of the RNAseq dataframe
values, columns, dataframe = get_values_from_df(df: pd.DataFrame, gene_id_column: str, case_ids=None, gene_ids=None,
column_name_includes=None, column_name_method="all")
```
### Download
```
# Downloads data using a manifest file
download = Download(manifest_file, split_manifest_dir, download_dir, gdc_client, max_cnt=100)
download.download()
```
```
# Downloads data from API to complement data from manifest file
# example datatype = mutation (this is the only one implemented for now)
download.download_data_using_api(case_ids: list, data_type: str)
```
### Annotate
** Generate annotation using clinical information from TCGA **
```
annotator = Annotate(output_dir: str, clinical_file: str, sample_file: str, manifest_file: str, file_types: list,
sep='_', clin_cols=None)
# Generate the annotate dataframe
annotator.build_annotation()
# Save the dataframe to a csv file
annotator.save_annotation(output_directory: str, filename: str)
# Save the clinical information to a csv file
annotator.save_annotated_clinical_df(output_directory: str, filename: str)
```
** Download mutation data for the cases of interest **
Note we first need to download the data using the `download_data_using_api` from above.
```
annotator.build_mutation_df(mutation_dir)
# Get that dataframe
mutation_df = annotator.get_mutation_df()
# Save the mutation dataframe to a csv
annotator.save_mutation_df(output_directory: str, filename: str)
```
|
scidat
|
/scidat-1.0.6.tar.gz/scidat-1.0.6/README.md
|
README.md
|
pip install scidat
1. manifest_file
2. gdc_client
3. clinical_file
4. sample_file
api = API(manifest_file, gdc_client, clinical_file, sample_file, requires_lst=None, clin_cols=None,
max_cnt=100, sciutil=None, split_manifest_dir='.', download_dir='.', meta_dir='.', sep='_')
# Downloads every file using default parameters in the manifest file
api.download_data_from_manifest()
# This will also unzip and copy the files all into one directory
# Builds the annotation information
api.build_annotation()
# Downloads all the mutation data for all the cases in the clinical_file
api.download_mutation_data()
# Generates the RNA dataframe from the downloaded folder
api.build_rna_df()
# Returns a list of cases that have mutations (either in any gene if gene_list = None or in specific genes)
list_of_cases = api.get_cases_with_mutations(gene_list=None, id_type='symbol')
# Get genes with a small deletion
filter_col = 'ssm.consequence.0.transcript.gene.symbol'
genes = api.get_mutation_values_on_filter(filter_col, ['Small deletion'], 'ssm.mutation_subtype')
# Get genes with a specifc genomic change: ssm.genomic_dna_change
filter_col = 'case_id'
cases = api.get_mutation_values_on_filter(filter_col, ['chr13:g.45340134A>G'], 'ssm.genomic_dna_change')
submitter_id
project_id
age_at_index
gender
race
vital_status
tumor_stage
normal_samples
tumor_samples
case_files
tumor_stage_num
example: {'gender': ['female'], 'tumor_stage_num': [1, 2]}
# Returns cases that have the chosen metadata information e.g. gender, race, tumour_stage_num
cases_list = api.get_cases_with_meta(meta: dict, method="all")
# Returns a list of genes with mutations for specific cases
list_of_genes = api.get_genes_with_mutations(case_ids=None, id_type='symbol')
# Returns the values, columns, dataframe of a subset of the RNAseq dataframe
values, columns, dataframe = get_values_from_df(df: pd.DataFrame, gene_id_column: str, case_ids=None, gene_ids=None,
column_name_includes=None, column_name_method="all")
# Downloads data using a manifest file
download = Download(manifest_file, split_manifest_dir, download_dir, gdc_client, max_cnt=100)
download.download()
# Downloads data from API to complement data from manifest file
# example datatype = mutation (this is the only one implemented for now)
download.download_data_using_api(case_ids: list, data_type: str)
annotator = Annotate(output_dir: str, clinical_file: str, sample_file: str, manifest_file: str, file_types: list,
sep='_', clin_cols=None)
# Generate the annotate dataframe
annotator.build_annotation()
# Save the dataframe to a csv file
annotator.save_annotation(output_directory: str, filename: str)
# Save the clinical information to a csv file
annotator.save_annotated_clinical_df(output_directory: str, filename: str)
annotator.build_mutation_df(mutation_dir)
# Get that dataframe
mutation_df = annotator.get_mutation_df()
# Save the mutation dataframe to a csv
annotator.save_mutation_df(output_directory: str, filename: str)
| 0.740456 | 0.971402 |
SciDB-Bridge: Python Library to access externally stored SciDB data
===================================================================
.. image:: https://img.shields.io/badge/SciDB-22.5-blue.svg
:target: https://paradigm4.atlassian.net/wiki/spaces/scidb/pages/2828833854/22.5+Release+Notes
.. image:: https://img.shields.io/badge/arrow-11.0.0-blue.svg
:target: https://arrow.apache.org/release/11.0.0.html
Requirements
------------
- Python ``3.5.x``, ``3.6.x``, ``3.7.x``, ``3.8.x``, ``3.9.x``, or ``3.10.x``
- SciDB ``19.11`` or newer
- SciDB-Py ``19.11.4`` or newer
- Apache PyArrow ``5.0.0`` up to ``11.0.0``
- Boto3 ``1.14.12`` for Amazon Simple Storage Service (S3) support
Installation
------------
Install latest release::
pip install scidb-bridge
Install development version from GitHub::
pip install git+http://github.com/paradigm4/bridge.git#subdirectory=py_pkg
Contributing
------------
Check code style before committing code
.. code:: bash
pip install pycodestyle
pycodestyle py_pkg
For Visual Studio Code see `Linting Python in Visual Studio Code <https://code.visualstudio.com/docs/python/linting>`_
|
scidb-bridge
|
/scidb-bridge-19.11.6.tar.gz/scidb-bridge-19.11.6/README.rst
|
README.rst
|
SciDB-Bridge: Python Library to access externally stored SciDB data
===================================================================
.. image:: https://img.shields.io/badge/SciDB-22.5-blue.svg
:target: https://paradigm4.atlassian.net/wiki/spaces/scidb/pages/2828833854/22.5+Release+Notes
.. image:: https://img.shields.io/badge/arrow-11.0.0-blue.svg
:target: https://arrow.apache.org/release/11.0.0.html
Requirements
------------
- Python ``3.5.x``, ``3.6.x``, ``3.7.x``, ``3.8.x``, ``3.9.x``, or ``3.10.x``
- SciDB ``19.11`` or newer
- SciDB-Py ``19.11.4`` or newer
- Apache PyArrow ``5.0.0`` up to ``11.0.0``
- Boto3 ``1.14.12`` for Amazon Simple Storage Service (S3) support
Installation
------------
Install latest release::
pip install scidb-bridge
Install development version from GitHub::
pip install git+http://github.com/paradigm4/bridge.git#subdirectory=py_pkg
Contributing
------------
Check code style before committing code
.. code:: bash
pip install pycodestyle
pycodestyle py_pkg
For Visual Studio Code see `Linting Python in Visual Studio Code <https://code.visualstudio.com/docs/python/linting>`_
| 0.79542 | 0.340842 |
import boto3
import os
import pyarrow
import pyarrow.parquet
import urllib.parse
import shutil
class Driver:
default_format = 'arrow'
default_compression = 'lz4'
index_format = 'arrow'
index_compression = 'lz4'
_s3_client = None
_s3_resource = None
@staticmethod
def s3_client():
if Driver._s3_client is None:
Driver._s3_client = boto3.client('s3')
return Driver._s3_client
@staticmethod
def s3_resource():
if Driver._s3_resource is None:
Driver._s3_resource = boto3.resource('s3')
return Driver._s3_resource
@staticmethod
def list(url):
parts = urllib.parse.urlparse(url)
# S3
if parts.scheme == 's3':
bucket = parts.netloc
key = parts.path[1:] + '/'
pages = Driver.s3_client().get_paginator(
'list_objects_v2').paginate(Bucket=bucket, Prefix=key)
for page in pages:
if 'Contents' in page.keys():
for obj in page['Contents']:
yield 's3://{}/{}'.format(bucket, obj['Key'])
# File System
elif parts.scheme == 'file':
path = os.path.join(parts.netloc, parts.path)
for fn in os.listdir(path):
if os.path.isfile(os.path.join(path, fn)):
yield 'file://' + os.path.join(path, fn)
else:
raise Exception('URL {} not supported'.format(url))
@staticmethod
def init_array(url):
parts = urllib.parse.urlparse(url)
# S3
if parts.scheme == 's3':
pass
# File System
elif parts.scheme == 'file':
path = os.path.join(parts.netloc, parts.path)
os.makedirs(path, exist_ok=True)
os.mkdir(os.path.join(path, 'index'))
os.mkdir(os.path.join(path, 'chunks'))
else:
raise Exception('URL {} not supported'.format(url))
@staticmethod
def read_metadata(url):
parts = urllib.parse.urlparse(url)
# S3
if parts.scheme == 's3':
bucket = parts.netloc
key = parts.path[1:] + '/metadata'
obj = Driver.s3_client().get_object(Bucket=bucket, Key=key)
return obj['Body'].read().decode('utf-8')
# File System
elif parts.scheme == 'file':
path = os.path.join(parts.netloc, parts.path, 'metadata')
return open(path).read()
else:
raise Exception('URL {} not supported'.format(url))
@staticmethod
def write_metadata(url, metadata):
parts = urllib.parse.urlparse(url)
# S3
if parts.scheme == 's3':
bucket = parts.netloc
key = parts.path[1:] + '/metadata'
Driver.s3_client().put_object(Body=metadata,
Bucket=bucket,
Key=key)
# File System
elif parts.scheme == 'file':
path = os.path.join(parts.netloc, parts.path, 'metadata')
with open(path, 'w') as f:
f.write(metadata)
else:
raise Exception('URL {} not supported'.format(url))
@staticmethod
def read_table(url,
format=default_format,
compression=default_compression):
parts = urllib.parse.urlparse(url)
# S3
if parts.scheme == 's3':
bucket = parts.netloc
key = parts.path[1:]
obj = Driver.s3_client().get_object(Bucket=bucket, Key=key)
buf = pyarrow.py_buffer(obj['Body'].read())
if format == 'arrow':
strm = pyarrow.input_stream(buf,
compression=compression)
return pyarrow.RecordBatchStreamReader(strm).read_all()
elif format == 'parquet':
return pyarrow.parquet.read_table(buf)
else:
raise Exception('Format {} not supported'.format(format))
# File System
elif parts.scheme == 'file':
path = os.path.join(parts.netloc, parts.path)
if format == 'arrow':
strm = pyarrow.input_stream(path, compression=compression)
return pyarrow.RecordBatchStreamReader(strm).read_all()
if format == 'parquet':
return pyarrow.parquet.read_table(path)
else:
raise Exception('Format {} not supported'.format(format))
else:
raise Exception('URL {} not supported'.format(url))
@staticmethod
def write_table(table,
url,
schema,
format=default_format,
compression=default_compression):
parts = urllib.parse.urlparse(url)
# S3
if parts.scheme == 's3':
bucket = parts.netloc
key = parts.path[1:]
buf = pyarrow.BufferOutputStream()
if format == 'arrow':
stream = pyarrow.output_stream(buf, compression=compression)
writer = pyarrow.RecordBatchStreamWriter(stream, schema)
writer.write_table(table)
writer.close()
stream.close()
elif format == 'parquet':
pyarrow.parquet.write_table(
table, buf, compression=compression)
else:
raise Exception('Format {} not supported'.format(format))
Driver.s3_client().put_object(Body=buf.getvalue().to_pybytes(),
Bucket=bucket,
Key=key)
# File System
elif parts.scheme == 'file':
path = os.path.join(parts.netloc, parts.path)
if format == 'arrow':
stream = pyarrow.output_stream(path, compression=compression)
writer = pyarrow.RecordBatchStreamWriter(stream, schema)
writer.write_table(table)
writer.close()
stream.close()
elif format == 'parquet':
pyarrow.parquet.write_table(
table, path, compression=compression)
else:
raise Exception('Format {} not supported'.format(format))
else:
raise Exception('URL {} not supported'.format(url))
@staticmethod
def delete_all(url):
parts = urllib.parse.urlparse(url)
# S3
if parts.scheme == 's3':
bucket = parts.netloc
key = parts.path[1:]
Driver.s3_resource().Bucket(
bucket).objects.filter(Prefix=key).delete()
# File System
elif parts.scheme == 'file':
path = os.path.join(parts.netloc, parts.path)
for fn in os.listdir(path):
tfn = os.path.join(path, fn)
if os.path.isdir(tfn):
shutil.rmtree(tfn)
else:
os.unlink(tfn)
else:
raise Exception('URL {} not supported'.format(url))
@staticmethod
def delete(url):
parts = urllib.parse.urlparse(url)
# S3
if parts.scheme == 's3':
bucket = parts.netloc
key = parts.path[1:]
Driver.s3_client().delete_object(Bucket=bucket, Key=key)
# File System
elif parts.scheme == 'file':
path = os.path.join(parts.netloc, parts.path)
os.unlink(path)
else:
raise Exception('URL {} not supported'.format(url))
|
scidb-bridge
|
/scidb-bridge-19.11.6.tar.gz/scidb-bridge-19.11.6/scidbbridge/driver.py
|
driver.py
|
import boto3
import os
import pyarrow
import pyarrow.parquet
import urllib.parse
import shutil
class Driver:
default_format = 'arrow'
default_compression = 'lz4'
index_format = 'arrow'
index_compression = 'lz4'
_s3_client = None
_s3_resource = None
@staticmethod
def s3_client():
if Driver._s3_client is None:
Driver._s3_client = boto3.client('s3')
return Driver._s3_client
@staticmethod
def s3_resource():
if Driver._s3_resource is None:
Driver._s3_resource = boto3.resource('s3')
return Driver._s3_resource
@staticmethod
def list(url):
parts = urllib.parse.urlparse(url)
# S3
if parts.scheme == 's3':
bucket = parts.netloc
key = parts.path[1:] + '/'
pages = Driver.s3_client().get_paginator(
'list_objects_v2').paginate(Bucket=bucket, Prefix=key)
for page in pages:
if 'Contents' in page.keys():
for obj in page['Contents']:
yield 's3://{}/{}'.format(bucket, obj['Key'])
# File System
elif parts.scheme == 'file':
path = os.path.join(parts.netloc, parts.path)
for fn in os.listdir(path):
if os.path.isfile(os.path.join(path, fn)):
yield 'file://' + os.path.join(path, fn)
else:
raise Exception('URL {} not supported'.format(url))
@staticmethod
def init_array(url):
parts = urllib.parse.urlparse(url)
# S3
if parts.scheme == 's3':
pass
# File System
elif parts.scheme == 'file':
path = os.path.join(parts.netloc, parts.path)
os.makedirs(path, exist_ok=True)
os.mkdir(os.path.join(path, 'index'))
os.mkdir(os.path.join(path, 'chunks'))
else:
raise Exception('URL {} not supported'.format(url))
@staticmethod
def read_metadata(url):
parts = urllib.parse.urlparse(url)
# S3
if parts.scheme == 's3':
bucket = parts.netloc
key = parts.path[1:] + '/metadata'
obj = Driver.s3_client().get_object(Bucket=bucket, Key=key)
return obj['Body'].read().decode('utf-8')
# File System
elif parts.scheme == 'file':
path = os.path.join(parts.netloc, parts.path, 'metadata')
return open(path).read()
else:
raise Exception('URL {} not supported'.format(url))
@staticmethod
def write_metadata(url, metadata):
parts = urllib.parse.urlparse(url)
# S3
if parts.scheme == 's3':
bucket = parts.netloc
key = parts.path[1:] + '/metadata'
Driver.s3_client().put_object(Body=metadata,
Bucket=bucket,
Key=key)
# File System
elif parts.scheme == 'file':
path = os.path.join(parts.netloc, parts.path, 'metadata')
with open(path, 'w') as f:
f.write(metadata)
else:
raise Exception('URL {} not supported'.format(url))
@staticmethod
def read_table(url,
format=default_format,
compression=default_compression):
parts = urllib.parse.urlparse(url)
# S3
if parts.scheme == 's3':
bucket = parts.netloc
key = parts.path[1:]
obj = Driver.s3_client().get_object(Bucket=bucket, Key=key)
buf = pyarrow.py_buffer(obj['Body'].read())
if format == 'arrow':
strm = pyarrow.input_stream(buf,
compression=compression)
return pyarrow.RecordBatchStreamReader(strm).read_all()
elif format == 'parquet':
return pyarrow.parquet.read_table(buf)
else:
raise Exception('Format {} not supported'.format(format))
# File System
elif parts.scheme == 'file':
path = os.path.join(parts.netloc, parts.path)
if format == 'arrow':
strm = pyarrow.input_stream(path, compression=compression)
return pyarrow.RecordBatchStreamReader(strm).read_all()
if format == 'parquet':
return pyarrow.parquet.read_table(path)
else:
raise Exception('Format {} not supported'.format(format))
else:
raise Exception('URL {} not supported'.format(url))
@staticmethod
def write_table(table,
url,
schema,
format=default_format,
compression=default_compression):
parts = urllib.parse.urlparse(url)
# S3
if parts.scheme == 's3':
bucket = parts.netloc
key = parts.path[1:]
buf = pyarrow.BufferOutputStream()
if format == 'arrow':
stream = pyarrow.output_stream(buf, compression=compression)
writer = pyarrow.RecordBatchStreamWriter(stream, schema)
writer.write_table(table)
writer.close()
stream.close()
elif format == 'parquet':
pyarrow.parquet.write_table(
table, buf, compression=compression)
else:
raise Exception('Format {} not supported'.format(format))
Driver.s3_client().put_object(Body=buf.getvalue().to_pybytes(),
Bucket=bucket,
Key=key)
# File System
elif parts.scheme == 'file':
path = os.path.join(parts.netloc, parts.path)
if format == 'arrow':
stream = pyarrow.output_stream(path, compression=compression)
writer = pyarrow.RecordBatchStreamWriter(stream, schema)
writer.write_table(table)
writer.close()
stream.close()
elif format == 'parquet':
pyarrow.parquet.write_table(
table, path, compression=compression)
else:
raise Exception('Format {} not supported'.format(format))
else:
raise Exception('URL {} not supported'.format(url))
@staticmethod
def delete_all(url):
parts = urllib.parse.urlparse(url)
# S3
if parts.scheme == 's3':
bucket = parts.netloc
key = parts.path[1:]
Driver.s3_resource().Bucket(
bucket).objects.filter(Prefix=key).delete()
# File System
elif parts.scheme == 'file':
path = os.path.join(parts.netloc, parts.path)
for fn in os.listdir(path):
tfn = os.path.join(path, fn)
if os.path.isdir(tfn):
shutil.rmtree(tfn)
else:
os.unlink(tfn)
else:
raise Exception('URL {} not supported'.format(url))
@staticmethod
def delete(url):
parts = urllib.parse.urlparse(url)
# S3
if parts.scheme == 's3':
bucket = parts.netloc
key = parts.path[1:]
Driver.s3_client().delete_object(Bucket=bucket, Key=key)
# File System
elif parts.scheme == 'file':
path = os.path.join(parts.netloc, parts.path)
os.unlink(path)
else:
raise Exception('URL {} not supported'.format(url))
| 0.337313 | 0.099733 |
import boto3
import itertools
import os
import os.path
import pandas
import pyarrow
import scidbpy
from .driver import Driver
from .coord import coord2delta, delta2coord
__version__ = '19.11.6'
type_map_pyarrow = dict(
[(t.__str__(), t) for t in (pyarrow.binary(),
pyarrow.bool_(),
pyarrow.int16(),
pyarrow.int32(),
pyarrow.int64(),
pyarrow.int8(),
pyarrow.string(),
pyarrow.uint16(),
pyarrow.uint32(),
pyarrow.uint64(),
pyarrow.uint8())] +
[('char', pyarrow.string()),
('datetime', pyarrow.timestamp('s')),
('double', pyarrow.float64()),
('float', pyarrow.float32())])
class Array(object):
"""Wrapper for SciDB array stored externally
Constructor parameters:
:param string url: URL of the SciDB array. Supported schemas are
``s3://`` and ``file://``.
:param string schema: SciDB array schema for creating a new
array. Can be specified as ``string`` or ``scidbpy.Schema``
"""
def __init__(self,
url,
schema=None,
format='arrow',
compression='lz4',
namespace='public',
index_split=100000):
self.url = url
if schema is None:
self._metadata = None
self._schema = None
else: # Create new array
if type(schema) is scidbpy.Schema:
self._schema = schema
else:
self._schema = scidbpy.Schema.fromstring(schema)
self._metadata = {
'attribute': 'ALL',
'format': format,
'version': '1',
'schema': self._schema.__str__(),
'compression': None if compression == 'none' else compression,
'index_split': index_split,
'namespace': namespace
}
Driver.init_array(url)
Driver.write_metadata(
url,
Array.metadata_to_string(self._metadata.copy()))
def __iter__(self):
return (i for i in (self.url, ))
def __eq__(self):
return tuple(self) == tuple(other)
def __repr__(self):
return ('{}(url={!r})').format(type(self).__name__, *self)
def __str__(self):
return self.url
@property
def metadata(self):
if self._metadata is None:
self._metadata = Array.metadata_from_string(
Driver.read_metadata(self.url))
return self._metadata
@property
def schema(self):
if self._schema is None:
self._schema = scidbpy.Schema.fromstring(
self.metadata['schema'])
return self._schema
def delete(self):
# Delete metadata file first, deleting large arrays could take sometime
Driver.delete('{}/metadata'.format(self.url))
Driver.delete_all(self.url)
def read_index(self):
# Read index as Arrow Table
tables = []
for index_url in Driver.list('{}/index'.format(self.url)):
tables.append(
Driver.read_table(index_url,
Driver.index_format,
Driver.index_compression))
if len(tables):
table = pyarrow.concat_tables(tables)
# Convert Arrow Table index to Pandas DataFrame
index = table.to_pandas(split_blocks=True, self_destruct=True)
# https://arrow.apache.org/docs/python/pandas.html#reducing-
# memory-use-i
del table
index.sort_values(by=list(index.columns),
inplace=True,
ignore_index=True)
return index
return pandas.DataFrame()
def build_index(self):
dims = self.schema.dims
index = pandas.DataFrame.from_records(
map(lambda x: Array.url_to_coords(x, dims),
Driver.list('{}/chunks'.format(self.url))),
columns=[d.name for d in dims])
index.sort_values(by=list(index.columns),
inplace=True,
ignore_index=True)
return index
def write_index(self, index, split_size=None):
# Check for a DataFrame
if not isinstance(index, pandas.DataFrame):
raise Exception("Value provided as argument " +
"is not a Pandas DataFrame")
# Check index columns matches array dimentions
dim_names = [d.name for d in self.schema.dims]
if len(index.columns) != len(dim_names):
raise Exception(
("Index columns count {} does not match " +
"array dimensions count {}").format(len(index.columns),
len(dim_names)))
if not (index.columns == dim_names).all():
raise Exception(
("Index columns {} does not match " +
"array dimensions {}").format(index.columns, dim_names))
# Check for coordinates outside chunk boundaries
for dim in self.schema.dims:
vals = index[dim.name]
if any(vals < dim.low_value):
raise Exception("Index values smaller than " +
"lower bound on dimension " + dim.name)
if dim.high_value != '*' and any(vals > dim.high_value):
raise Exception("Index values bigger than " +
"upper bound on dimension " + dim.name)
if (dim.chunk_length != '*'
and any((vals - dim.low_value) % dim.chunk_length != 0)):
raise Exception("Index values misaligned " +
"with chunk size on dimension " + dim.name)
# Check for duplicates
if index.duplicated().any():
raise Exception("Duplicate entries")
index.sort_values(by=list(index.columns),
inplace=True,
ignore_index=True)
if split_size is None:
split_size = int(self.metadata['index_split'])
index_schema = pyarrow.schema(
[(d.name, pyarrow.int64(), False) for d in self.schema.dims])
chunk_size = split_size // len(index.columns)
# Remove existing index
Driver.delete_all('{}/index'.format(self.url))
# Write new index
i = 0
for offset in range(0, len(index), chunk_size):
table = pyarrow.Table.from_pandas(
index.iloc[offset:offset + chunk_size], index_schema)
Driver.write_table(table,
'{}/index/{}'.format(self.url, i),
index_schema,
Driver.index_format,
Driver.index_compression)
i += 1
def get_chunk(self, *argv):
return Chunk(self, *argv)
@staticmethod
def metadata_from_string(input):
res = dict(ln.split('\t') for ln in input.strip().split('\n'))
try:
if res['compression'] == 'none':
res['compression'] = None
except KeyError:
pass
return res
@staticmethod
def metadata_to_string(input):
if input['compression'] is None:
input['compression'] = 'none'
return '\n'.join('{}\t{}'.format(k, v)
for (k, v) in input.items()) + '\n'
@staticmethod
def coords_to_url_suffix(coords, dims):
parts = ['c']
for (coord, dim) in zip(coords, dims):
if (coord < dim.low_value or
dim.high_value != '*' and coord > dim.high_value):
raise Exception(
('Coordinate value, {}, is outside of dimension range, '
'[{}:{}]').format(
coord, dim.low_value, dim.high_value))
part = coord - dim.low_value
if part % dim.chunk_length != 0:
raise Exception(
('Coordinate value, {}, is not a multiple of ' +
'chunk size, {}').format(
coord, dim.chunk_length))
part = part // dim.chunk_length
parts.append(part)
return '_'.join(map(str, parts))
@staticmethod
def url_to_coords(url, dims):
part = url[url.rindex('/') + 1:]
return tuple(
map(lambda x: int(x[0]) * x[1].chunk_length + x[1].low_value,
zip(part.split('_')[1:], dims)))
class Chunk(object):
"""Wrapper for SciDB array chunk stored externally"""
def __init__(self, array, *argv):
self.array = array
self.coords = argv
if (len(argv) == 1 and
type(argv[0]) is pandas.core.series.Series):
argv = tuple(argv[0])
dims = self.array.schema.dims
if len(argv) != len(dims):
raise Exception(
('Number of arguments, {}, does not match the number of ' +
'dimensions, {}. Please specify one start coordiante for ' +
'each dimension.').format(len(argv),
len(self.array.schema.dims)))
part = Array.coords_to_url_suffix(self.coords, dims)
self.url = '{}/chunks/{}'.format(self.array.url, part)
self._table = None
def __iter__(self):
return (i for i in (self.array, self.url))
def __eq__(self, other):
return tuple(self) == tuple(other)
def __repr__(self):
return ('{}(array={!r}, url={!r})').format(
type(self).__name__, *self)
def __str__(self):
return self.url
@property
def table(self):
if self._table is None:
self._table = Driver.read_table(
self.url,
format=self.array.metadata['format'],
compression=self.array.metadata['compression'])
return self._table
def to_pandas(self):
return delta2coord(
self.table.to_pandas(), self.array.schema, self.coords)
def from_pandas(self, pd):
# Check for a DataFrame
if not isinstance(pd, pandas.DataFrame):
raise Exception("Value provided as argument " +
"is not a Pandas DataFrame")
# Check for empty DataFrame
if pd.empty:
raise Exception("Pandas DataFrame is empty. " +
"Nothing to do.")
# Check that columns match array schema
dims = [d.name for d in self.array.schema.dims]
columns = [a.name for a in self.array.schema.atts] + dims
if len(pd.columns) != len(columns):
raise Exception(
("Argument columns count {} do not match " +
"array attributes plus dimensions count {}").format(
len(pd.columns), len(columns)))
if sorted(list(pd.columns)) != sorted(columns):
raise Exception(
("Argument columns {} does not match " +
"array schema {}").format(pd.columns, columns))
# Use schema order
pd = pd[columns]
# Sort by dimensions
pd = pd.sort_values(by=dims, ignore_index=True)
# Check for duplicates
if pd.duplicated(subset=dims).any():
raise Exception("Duplicate coordinates")
# Check for coordinates outside chunk boundaries
for (coord, dim) in zip(self.coords, self.array.schema.dims):
vals = pd[dim.name]
if (vals.iloc[0] < coord or
vals.iloc[-1] >= coord + dim.chunk_length):
raise Exception("Coordinates outside chunk boundaries")
# Build schema
schema = pyarrow.schema(
[(a.name, type_map_pyarrow[a.type_name], not a.not_null)
for a in self.array.schema.atts] +
[('@delta', pyarrow.int64(), False)])
pd['@delta'] = coord2delta(pd, self.array.schema.dims, self.coords)
self._table = pyarrow.Table.from_pandas(pd, schema)
self._table = self._table.replace_schema_metadata()
def save(self):
Driver.write_table(self._table,
self.url,
self._table.schema,
self.array.metadata['format'],
self.array.metadata['compression'])
|
scidb-bridge
|
/scidb-bridge-19.11.6.tar.gz/scidb-bridge-19.11.6/scidbbridge/__init__.py
|
__init__.py
|
import boto3
import itertools
import os
import os.path
import pandas
import pyarrow
import scidbpy
from .driver import Driver
from .coord import coord2delta, delta2coord
__version__ = '19.11.6'
type_map_pyarrow = dict(
[(t.__str__(), t) for t in (pyarrow.binary(),
pyarrow.bool_(),
pyarrow.int16(),
pyarrow.int32(),
pyarrow.int64(),
pyarrow.int8(),
pyarrow.string(),
pyarrow.uint16(),
pyarrow.uint32(),
pyarrow.uint64(),
pyarrow.uint8())] +
[('char', pyarrow.string()),
('datetime', pyarrow.timestamp('s')),
('double', pyarrow.float64()),
('float', pyarrow.float32())])
class Array(object):
"""Wrapper for SciDB array stored externally
Constructor parameters:
:param string url: URL of the SciDB array. Supported schemas are
``s3://`` and ``file://``.
:param string schema: SciDB array schema for creating a new
array. Can be specified as ``string`` or ``scidbpy.Schema``
"""
def __init__(self,
url,
schema=None,
format='arrow',
compression='lz4',
namespace='public',
index_split=100000):
self.url = url
if schema is None:
self._metadata = None
self._schema = None
else: # Create new array
if type(schema) is scidbpy.Schema:
self._schema = schema
else:
self._schema = scidbpy.Schema.fromstring(schema)
self._metadata = {
'attribute': 'ALL',
'format': format,
'version': '1',
'schema': self._schema.__str__(),
'compression': None if compression == 'none' else compression,
'index_split': index_split,
'namespace': namespace
}
Driver.init_array(url)
Driver.write_metadata(
url,
Array.metadata_to_string(self._metadata.copy()))
def __iter__(self):
return (i for i in (self.url, ))
def __eq__(self):
return tuple(self) == tuple(other)
def __repr__(self):
return ('{}(url={!r})').format(type(self).__name__, *self)
def __str__(self):
return self.url
@property
def metadata(self):
if self._metadata is None:
self._metadata = Array.metadata_from_string(
Driver.read_metadata(self.url))
return self._metadata
@property
def schema(self):
if self._schema is None:
self._schema = scidbpy.Schema.fromstring(
self.metadata['schema'])
return self._schema
def delete(self):
# Delete metadata file first, deleting large arrays could take sometime
Driver.delete('{}/metadata'.format(self.url))
Driver.delete_all(self.url)
def read_index(self):
# Read index as Arrow Table
tables = []
for index_url in Driver.list('{}/index'.format(self.url)):
tables.append(
Driver.read_table(index_url,
Driver.index_format,
Driver.index_compression))
if len(tables):
table = pyarrow.concat_tables(tables)
# Convert Arrow Table index to Pandas DataFrame
index = table.to_pandas(split_blocks=True, self_destruct=True)
# https://arrow.apache.org/docs/python/pandas.html#reducing-
# memory-use-i
del table
index.sort_values(by=list(index.columns),
inplace=True,
ignore_index=True)
return index
return pandas.DataFrame()
def build_index(self):
dims = self.schema.dims
index = pandas.DataFrame.from_records(
map(lambda x: Array.url_to_coords(x, dims),
Driver.list('{}/chunks'.format(self.url))),
columns=[d.name for d in dims])
index.sort_values(by=list(index.columns),
inplace=True,
ignore_index=True)
return index
def write_index(self, index, split_size=None):
# Check for a DataFrame
if not isinstance(index, pandas.DataFrame):
raise Exception("Value provided as argument " +
"is not a Pandas DataFrame")
# Check index columns matches array dimentions
dim_names = [d.name for d in self.schema.dims]
if len(index.columns) != len(dim_names):
raise Exception(
("Index columns count {} does not match " +
"array dimensions count {}").format(len(index.columns),
len(dim_names)))
if not (index.columns == dim_names).all():
raise Exception(
("Index columns {} does not match " +
"array dimensions {}").format(index.columns, dim_names))
# Check for coordinates outside chunk boundaries
for dim in self.schema.dims:
vals = index[dim.name]
if any(vals < dim.low_value):
raise Exception("Index values smaller than " +
"lower bound on dimension " + dim.name)
if dim.high_value != '*' and any(vals > dim.high_value):
raise Exception("Index values bigger than " +
"upper bound on dimension " + dim.name)
if (dim.chunk_length != '*'
and any((vals - dim.low_value) % dim.chunk_length != 0)):
raise Exception("Index values misaligned " +
"with chunk size on dimension " + dim.name)
# Check for duplicates
if index.duplicated().any():
raise Exception("Duplicate entries")
index.sort_values(by=list(index.columns),
inplace=True,
ignore_index=True)
if split_size is None:
split_size = int(self.metadata['index_split'])
index_schema = pyarrow.schema(
[(d.name, pyarrow.int64(), False) for d in self.schema.dims])
chunk_size = split_size // len(index.columns)
# Remove existing index
Driver.delete_all('{}/index'.format(self.url))
# Write new index
i = 0
for offset in range(0, len(index), chunk_size):
table = pyarrow.Table.from_pandas(
index.iloc[offset:offset + chunk_size], index_schema)
Driver.write_table(table,
'{}/index/{}'.format(self.url, i),
index_schema,
Driver.index_format,
Driver.index_compression)
i += 1
def get_chunk(self, *argv):
return Chunk(self, *argv)
@staticmethod
def metadata_from_string(input):
res = dict(ln.split('\t') for ln in input.strip().split('\n'))
try:
if res['compression'] == 'none':
res['compression'] = None
except KeyError:
pass
return res
@staticmethod
def metadata_to_string(input):
if input['compression'] is None:
input['compression'] = 'none'
return '\n'.join('{}\t{}'.format(k, v)
for (k, v) in input.items()) + '\n'
@staticmethod
def coords_to_url_suffix(coords, dims):
parts = ['c']
for (coord, dim) in zip(coords, dims):
if (coord < dim.low_value or
dim.high_value != '*' and coord > dim.high_value):
raise Exception(
('Coordinate value, {}, is outside of dimension range, '
'[{}:{}]').format(
coord, dim.low_value, dim.high_value))
part = coord - dim.low_value
if part % dim.chunk_length != 0:
raise Exception(
('Coordinate value, {}, is not a multiple of ' +
'chunk size, {}').format(
coord, dim.chunk_length))
part = part // dim.chunk_length
parts.append(part)
return '_'.join(map(str, parts))
@staticmethod
def url_to_coords(url, dims):
part = url[url.rindex('/') + 1:]
return tuple(
map(lambda x: int(x[0]) * x[1].chunk_length + x[1].low_value,
zip(part.split('_')[1:], dims)))
class Chunk(object):
"""Wrapper for SciDB array chunk stored externally"""
def __init__(self, array, *argv):
self.array = array
self.coords = argv
if (len(argv) == 1 and
type(argv[0]) is pandas.core.series.Series):
argv = tuple(argv[0])
dims = self.array.schema.dims
if len(argv) != len(dims):
raise Exception(
('Number of arguments, {}, does not match the number of ' +
'dimensions, {}. Please specify one start coordiante for ' +
'each dimension.').format(len(argv),
len(self.array.schema.dims)))
part = Array.coords_to_url_suffix(self.coords, dims)
self.url = '{}/chunks/{}'.format(self.array.url, part)
self._table = None
def __iter__(self):
return (i for i in (self.array, self.url))
def __eq__(self, other):
return tuple(self) == tuple(other)
def __repr__(self):
return ('{}(array={!r}, url={!r})').format(
type(self).__name__, *self)
def __str__(self):
return self.url
@property
def table(self):
if self._table is None:
self._table = Driver.read_table(
self.url,
format=self.array.metadata['format'],
compression=self.array.metadata['compression'])
return self._table
def to_pandas(self):
return delta2coord(
self.table.to_pandas(), self.array.schema, self.coords)
def from_pandas(self, pd):
# Check for a DataFrame
if not isinstance(pd, pandas.DataFrame):
raise Exception("Value provided as argument " +
"is not a Pandas DataFrame")
# Check for empty DataFrame
if pd.empty:
raise Exception("Pandas DataFrame is empty. " +
"Nothing to do.")
# Check that columns match array schema
dims = [d.name for d in self.array.schema.dims]
columns = [a.name for a in self.array.schema.atts] + dims
if len(pd.columns) != len(columns):
raise Exception(
("Argument columns count {} do not match " +
"array attributes plus dimensions count {}").format(
len(pd.columns), len(columns)))
if sorted(list(pd.columns)) != sorted(columns):
raise Exception(
("Argument columns {} does not match " +
"array schema {}").format(pd.columns, columns))
# Use schema order
pd = pd[columns]
# Sort by dimensions
pd = pd.sort_values(by=dims, ignore_index=True)
# Check for duplicates
if pd.duplicated(subset=dims).any():
raise Exception("Duplicate coordinates")
# Check for coordinates outside chunk boundaries
for (coord, dim) in zip(self.coords, self.array.schema.dims):
vals = pd[dim.name]
if (vals.iloc[0] < coord or
vals.iloc[-1] >= coord + dim.chunk_length):
raise Exception("Coordinates outside chunk boundaries")
# Build schema
schema = pyarrow.schema(
[(a.name, type_map_pyarrow[a.type_name], not a.not_null)
for a in self.array.schema.atts] +
[('@delta', pyarrow.int64(), False)])
pd['@delta'] = coord2delta(pd, self.array.schema.dims, self.coords)
self._table = pyarrow.Table.from_pandas(pd, schema)
self._table = self._table.replace_schema_metadata()
def save(self):
Driver.write_table(self._table,
self.url,
self._table.schema,
self.array.metadata['format'],
self.array.metadata['compression'])
| 0.603231 | 0.236913 |
SciDB-Py: Python Interface to SciDB
===================================
.. image:: https://img.shields.io/badge/SciDB-22.5-blue.svg
:target: https://paradigm4.atlassian.net/wiki/spaces/scidb/pages/2828833854/22.5+Release+Notes
.. image:: https://img.shields.io/badge/arrow-11.0.0-blue.svg
:target: https://arrow.apache.org/release/11.0.0.html
.. image:: https://github.com/Paradigm4/SciDB-Py/actions/workflows/test-ee.yml/badge.svg
:target: https://github.com/Paradigm4/SciDB-Py/actions/workflows/test-ee.yml
Version Information
-------------------
The major and minor version numbers of SciDB-Py track the major and
minor version of SciDB they are compatible with. For example SciDB-Py
``16.9.1``, ``16.9.2`` or ``16.9.10`` are all compatible with SciDB
``16.9.x``.
During SciDB ``16.9``, Shim (HTTP service for SciDB) transitioned from
query authentication to session authentication. SciDB-Py has been
updated to be compatible with the new Shim. Below is the compatibility
matrix between SciDB-Py and Shim:
=========== =====
SciDB-Py Shim
=========== =====
``16.9.1`` query authentication (old Shim)
``16.9.2`` query authentication (old Shim)
``16.9.10`` session authentication (new Shim)
=========== =====
From ``16.9.10`` onwards only Shim with session authentication is
supported.
Since SciDB-Py Release **16.9.1** (released in `September 2017`) the
library has been rewritten entirely from scratch. **16.9.1** and newer
versions are **not compatible** with the previous versions of the
library. The documentation for the previous versions is available at
`SciDB-Py documentation (legacy)
<http://scidb-py.readthedocs.io/en/v16.9-legacy/>`_. GitHub pull
requests are still accepted for the previous versions, but the code is
not actively maintained.
Requirements
------------
SciDB ``19.11`` or newer with Shim
Python ``3.5.x``, ``3.6.x``, ``3.7.x``, ``3.8.x``, ``3.9.x``, or ``3.10.x``
Required Python packages::
backports.weakref
enum34
numpy
pandas (see version requirements in setup.py)
pyarrow (see version requirements in setup.py)
requests
six
CentOS 6 and Red Hat Enterprise Linux 6
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
CentOS ``6`` and Red Hat Enterprise Linux ``6`` come with Python
``2.6``. SciDB-Py requires Python ``2.7`` or newer (see above). The
default Python cannot be upgraded on these operating systems. Instead
a different Python version can be installed in parallel using
`Software Collections <https://www.softwarecollections.org/en/>`_. For
example, `here
<https://www.softwarecollections.org/en/scls/rhscl/python27/>`_ are
the instructions to install Python ``2.7`` using Software Collections.
Installation
------------
Install latest release::
pip install scidb-py
Install development version from GitHub::
pip install git+http://github.com/paradigm4/scidb-py.git
Documentation
-------------
See `SciDB-Py Documentation <http://paradigm4.github.io/SciDB-Py/>`_.
|
scidb-py
|
/scidb-py-19.11.6.tar.gz/scidb-py-19.11.6/README.rst
|
README.rst
|
SciDB-Py: Python Interface to SciDB
===================================
.. image:: https://img.shields.io/badge/SciDB-22.5-blue.svg
:target: https://paradigm4.atlassian.net/wiki/spaces/scidb/pages/2828833854/22.5+Release+Notes
.. image:: https://img.shields.io/badge/arrow-11.0.0-blue.svg
:target: https://arrow.apache.org/release/11.0.0.html
.. image:: https://github.com/Paradigm4/SciDB-Py/actions/workflows/test-ee.yml/badge.svg
:target: https://github.com/Paradigm4/SciDB-Py/actions/workflows/test-ee.yml
Version Information
-------------------
The major and minor version numbers of SciDB-Py track the major and
minor version of SciDB they are compatible with. For example SciDB-Py
``16.9.1``, ``16.9.2`` or ``16.9.10`` are all compatible with SciDB
``16.9.x``.
During SciDB ``16.9``, Shim (HTTP service for SciDB) transitioned from
query authentication to session authentication. SciDB-Py has been
updated to be compatible with the new Shim. Below is the compatibility
matrix between SciDB-Py and Shim:
=========== =====
SciDB-Py Shim
=========== =====
``16.9.1`` query authentication (old Shim)
``16.9.2`` query authentication (old Shim)
``16.9.10`` session authentication (new Shim)
=========== =====
From ``16.9.10`` onwards only Shim with session authentication is
supported.
Since SciDB-Py Release **16.9.1** (released in `September 2017`) the
library has been rewritten entirely from scratch. **16.9.1** and newer
versions are **not compatible** with the previous versions of the
library. The documentation for the previous versions is available at
`SciDB-Py documentation (legacy)
<http://scidb-py.readthedocs.io/en/v16.9-legacy/>`_. GitHub pull
requests are still accepted for the previous versions, but the code is
not actively maintained.
Requirements
------------
SciDB ``19.11`` or newer with Shim
Python ``3.5.x``, ``3.6.x``, ``3.7.x``, ``3.8.x``, ``3.9.x``, or ``3.10.x``
Required Python packages::
backports.weakref
enum34
numpy
pandas (see version requirements in setup.py)
pyarrow (see version requirements in setup.py)
requests
six
CentOS 6 and Red Hat Enterprise Linux 6
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
CentOS ``6`` and Red Hat Enterprise Linux ``6`` come with Python
``2.6``. SciDB-Py requires Python ``2.7`` or newer (see above). The
default Python cannot be upgraded on these operating systems. Instead
a different Python version can be installed in parallel using
`Software Collections <https://www.softwarecollections.org/en/>`_. For
example, `here
<https://www.softwarecollections.org/en/scls/rhscl/python27/>`_ are
the instructions to install Python ``2.7`` using Software Collections.
Installation
------------
Install latest release::
pip install scidb-py
Install development version from GitHub::
pip install git+http://github.com/paradigm4/scidb-py.git
Documentation
-------------
See `SciDB-Py Documentation <http://paradigm4.github.io/SciDB-Py/>`_.
| 0.856902 | 0.396156 |
SciDB-Strm: Python Library for SciDB Streaming
==============================================
.. image:: https://img.shields.io/badge/SciDB-22.5-blue.svg
:target: https://paradigm4.atlassian.net/wiki/spaces/scidb/pages/2828833854/22.5+Release+Notes
.. image:: https://img.shields.io/badge/arrow-11.0.0-blue.svg
:target: https://arrow.apache.org/release/11.0.0.html
.. image:: https://github.com/Paradigm4/stream/actions/workflows/test.yml/badge.svg
:target: https://github.com/Paradigm4/stream/actions/workflows/test.yml
Requirements
------------
SciDB ``19.11`` or newer.
Apache Arrow ``5.0.0`` to ``11.0.0``.
Python ``3.6.x``, ``3.7.x``, ``3.8.x``, ``3.9.x``, or ``3.10.x``
Required Python packages::
dill
pandas
pyarrow
Installation
------------
Install latest release::
pip install scidb-strm
Install development version from GitHub::
pip install git+http://github.com/paradigm4/stream.git#subdirectory=py_pkg
The Python library needs to be installed on the SciDB server. The
library needs to be installed on the client as well, if Python code is
to be send from the client to the server.
SciDB-Strm Python API and Examples
----------------------------------
Once installed the *SciDB-Strm* Python library can be imported with
``import scidbstrm``. The library provides a high and low-level access
to the SciDB ``stream`` operator as well as the ability to send Python
code to the SciDB server.
High-level access is provided by the function ``map``:
``map(map_fun, finalize_fun=None)``
Read SciDB chunks. For each chunk, call ``map_fun`` and stream its
result back to SciDB. If ``finalize_fun`` is provided, call it after
all the chunks have been processed.
See `0-iquery.txt <examples/0-iquery.txt>`_ for a succinct example
using the ``map`` function.
See `1-map-finalize.py <examples/1-map-finalize.py>`_ for an example
using the ``map`` function. The Python script has to be copied onto
the SciDB instance.
Python code can be send to the SciDB server for execution using
the ``pack_func`` and ``read_func`` functions:
``pack_func(func)``
Serialize Python function for use as ``upload_data`` in ``input`` or
``load`` operators.
``read_func()``
Read and de-serialize function from SciDB.
See `2-pack-func.py <examples/2-pack-func.py>`_ for an example of
using the ``pack_func`` and ``read_func`` functions.
Low-level access is provided by the ``read`` and ``write`` functions:
``read()``
Read a data chunk from SciDB. Returns a Pandas DataFrame or None.
``write(df=None)``
Write a data chunk to SciDB.
See `3-read-write.py <examples/3-read-write.py>`_ for an example using
the ``read`` and ``write`` functions. The Python script has to be
copied onto the SciDB instance.
A convenience invocation of the Python interpreter is provided in
``python_map`` variable and it is set to::
python -uc "import scidbstrm; scidbstrm.map(scidbstrm.read_func())"
Finally, see `4-machine-learning.py <examples/4-machine-learning.py>`_
for a more complex example of going through the steps of using
machine learning (preprocessing, training, and prediction).
Debugging Python Code
---------------------
When debugging Python code executed as part of the ``stream`` operator
*do not* use the ``print`` function. The ``stream`` operator
communicates with the Python process using ``stdout``. The ``print``
function writes output to ``stdout``. So, using the ``print`` function
would interfere with the inter-process communication.
Instead, use the ``debug`` function provided by the library. The
function formats the arguments as strings and printed them all out
separated by space. For example::
debug("Value of i is", 10)
Alternatively, output can be written directly to ``stderr`` using the
``write`` function. For example::
import sys
x = [1, 2, 3]
sys.stderr.write("{}\n".format(x))
The output is written in the ``scidb-stderr.log`` files of each
instance, for example::
/opt/scidb/18.1/DB-scidb/0/0/scidb-stderr.log
/opt/scidb/18.1/DB-scidb/0/1/scidb-stderr.log
If using SciDB ``18.1`` installed in the default location and
configured with one server and two instances.
ImportError: No module named
----------------------------
When trying to de-serialize a Python function uploaded to SciDB using
``pack_func``, one might encounter::
ImportError: No module named ...
This error is because ``dill``, the Python serialization library,
links the function to the module in which it is defined. This can be
resolved in two ways:
1. Make the named module available on all the SciDB instances
2. If the module is small, the recursive ``dill`` mode can be
used. Replace::
foo_pack = scidbstrm.pack_func(foo)
with::
foo_pack = numpy.array([dill.dumps(foo, 0, recurse=True)])
|
scidb-strm
|
/scidb-strm-19.11.4.tar.gz/scidb-strm-19.11.4/README.rst
|
README.rst
|
SciDB-Strm: Python Library for SciDB Streaming
==============================================
.. image:: https://img.shields.io/badge/SciDB-22.5-blue.svg
:target: https://paradigm4.atlassian.net/wiki/spaces/scidb/pages/2828833854/22.5+Release+Notes
.. image:: https://img.shields.io/badge/arrow-11.0.0-blue.svg
:target: https://arrow.apache.org/release/11.0.0.html
.. image:: https://github.com/Paradigm4/stream/actions/workflows/test.yml/badge.svg
:target: https://github.com/Paradigm4/stream/actions/workflows/test.yml
Requirements
------------
SciDB ``19.11`` or newer.
Apache Arrow ``5.0.0`` to ``11.0.0``.
Python ``3.6.x``, ``3.7.x``, ``3.8.x``, ``3.9.x``, or ``3.10.x``
Required Python packages::
dill
pandas
pyarrow
Installation
------------
Install latest release::
pip install scidb-strm
Install development version from GitHub::
pip install git+http://github.com/paradigm4/stream.git#subdirectory=py_pkg
The Python library needs to be installed on the SciDB server. The
library needs to be installed on the client as well, if Python code is
to be send from the client to the server.
SciDB-Strm Python API and Examples
----------------------------------
Once installed the *SciDB-Strm* Python library can be imported with
``import scidbstrm``. The library provides a high and low-level access
to the SciDB ``stream`` operator as well as the ability to send Python
code to the SciDB server.
High-level access is provided by the function ``map``:
``map(map_fun, finalize_fun=None)``
Read SciDB chunks. For each chunk, call ``map_fun`` and stream its
result back to SciDB. If ``finalize_fun`` is provided, call it after
all the chunks have been processed.
See `0-iquery.txt <examples/0-iquery.txt>`_ for a succinct example
using the ``map`` function.
See `1-map-finalize.py <examples/1-map-finalize.py>`_ for an example
using the ``map`` function. The Python script has to be copied onto
the SciDB instance.
Python code can be send to the SciDB server for execution using
the ``pack_func`` and ``read_func`` functions:
``pack_func(func)``
Serialize Python function for use as ``upload_data`` in ``input`` or
``load`` operators.
``read_func()``
Read and de-serialize function from SciDB.
See `2-pack-func.py <examples/2-pack-func.py>`_ for an example of
using the ``pack_func`` and ``read_func`` functions.
Low-level access is provided by the ``read`` and ``write`` functions:
``read()``
Read a data chunk from SciDB. Returns a Pandas DataFrame or None.
``write(df=None)``
Write a data chunk to SciDB.
See `3-read-write.py <examples/3-read-write.py>`_ for an example using
the ``read`` and ``write`` functions. The Python script has to be
copied onto the SciDB instance.
A convenience invocation of the Python interpreter is provided in
``python_map`` variable and it is set to::
python -uc "import scidbstrm; scidbstrm.map(scidbstrm.read_func())"
Finally, see `4-machine-learning.py <examples/4-machine-learning.py>`_
for a more complex example of going through the steps of using
machine learning (preprocessing, training, and prediction).
Debugging Python Code
---------------------
When debugging Python code executed as part of the ``stream`` operator
*do not* use the ``print`` function. The ``stream`` operator
communicates with the Python process using ``stdout``. The ``print``
function writes output to ``stdout``. So, using the ``print`` function
would interfere with the inter-process communication.
Instead, use the ``debug`` function provided by the library. The
function formats the arguments as strings and printed them all out
separated by space. For example::
debug("Value of i is", 10)
Alternatively, output can be written directly to ``stderr`` using the
``write`` function. For example::
import sys
x = [1, 2, 3]
sys.stderr.write("{}\n".format(x))
The output is written in the ``scidb-stderr.log`` files of each
instance, for example::
/opt/scidb/18.1/DB-scidb/0/0/scidb-stderr.log
/opt/scidb/18.1/DB-scidb/0/1/scidb-stderr.log
If using SciDB ``18.1`` installed in the default location and
configured with one server and two instances.
ImportError: No module named
----------------------------
When trying to de-serialize a Python function uploaded to SciDB using
``pack_func``, one might encounter::
ImportError: No module named ...
This error is because ``dill``, the Python serialization library,
links the function to the module in which it is defined. This can be
resolved in two ways:
1. Make the named module available on all the SciDB instances
2. If the module is small, the recursive ``dill`` mode can be
used. Replace::
foo_pack = scidbstrm.pack_func(foo)
with::
foo_pack = numpy.array([dill.dumps(foo, 0, recurse=True)])
| 0.890127 | 0.447641 |
import dill
import struct
import sys
import pyarrow
# Workaround for NumPy bug #10338
# https://github.com/numpy/numpy/issues/10338
try:
import numpy
except KeyError:
import os
os.environ.setdefault('PATH', '')
import numpy
__version__ = '19.11.4'
python_map = ("'" +
'python{major}.{minor} -uc '.format(
major=sys.version_info.major,
minor=sys.version_info.minor) +
'"import scidbstrm; scidbstrm.map(scidbstrm.read_func())"' +
"'")
# Python 2 and 3 compatibility fix for reading/writing binary data
# to/from STDIN/STDOUT
if hasattr(sys.stdout, 'buffer'):
# Python 3
stdin = sys.stdin.buffer
stdout = sys.stdout.buffer
else:
# Python 2
stdin = sys.stdin
stdout = sys.stdout
def read():
"""Read a data chunk from SciDB. Returns a Pandas DataFrame or None.
"""
sz = struct.unpack('<Q', stdin.read(8))[0]
if sz:
stream = pyarrow.ipc.open_stream(stdin)
df = stream.read_pandas()
return df
else: # Last Chunk
return None
def write(df=None):
"""Write a data chunk to SciDB.
"""
if df is None:
stdout.write(struct.pack('<Q', 0))
return
buf = pyarrow.BufferOutputStream()
table = pyarrow.Table.from_pandas(df)
table = table.replace_schema_metadata() # Remove metadata
writer = pyarrow.RecordBatchStreamWriter(buf, table.schema)
writer.write_table(table)
writer.close()
byt = buf.getvalue().to_pybytes()
sz = len(byt)
stdout.write(struct.pack('<Q', sz))
stdout.write(byt)
def pack_func(func):
"""Serialize function to upload to SciDB. The result can be used as
`upload_data` in `input` or `load` operators.
"""
return numpy.array(
[dill.dumps(func, 0)] # Serialize streaming function
)
def read_func():
"""Read and de-serialize function from SciDB.
"""
func = dill.loads(read().iloc[0, 0])
write() # SciDB expects a message back
return func
def map(map_fun, finalize_fun=None):
"""Read SciDB chunks. For each chunk, call `map_fun` and stream its
result back to SciDB. If `finalize_fun` is provided, call it after
all the chunks have been processed.
"""
while True:
# Read DataFrame
df = read()
if df is None:
# End of stream
break
# Write DataFrame
write(map_fun(df))
# Write final DataFrame (if any)
if finalize_fun is None:
write()
else:
write(finalize_fun())
def debug(*args):
"""Print debug message to scidb-stderr.log file"""
sys.stderr.write(' '.join('{}'.format(i) for i in args) + '\n')
sys.stderr.flush()
|
scidb-strm
|
/scidb-strm-19.11.4.tar.gz/scidb-strm-19.11.4/scidbstrm/__init__.py
|
__init__.py
|
import dill
import struct
import sys
import pyarrow
# Workaround for NumPy bug #10338
# https://github.com/numpy/numpy/issues/10338
try:
import numpy
except KeyError:
import os
os.environ.setdefault('PATH', '')
import numpy
__version__ = '19.11.4'
python_map = ("'" +
'python{major}.{minor} -uc '.format(
major=sys.version_info.major,
minor=sys.version_info.minor) +
'"import scidbstrm; scidbstrm.map(scidbstrm.read_func())"' +
"'")
# Python 2 and 3 compatibility fix for reading/writing binary data
# to/from STDIN/STDOUT
if hasattr(sys.stdout, 'buffer'):
# Python 3
stdin = sys.stdin.buffer
stdout = sys.stdout.buffer
else:
# Python 2
stdin = sys.stdin
stdout = sys.stdout
def read():
"""Read a data chunk from SciDB. Returns a Pandas DataFrame or None.
"""
sz = struct.unpack('<Q', stdin.read(8))[0]
if sz:
stream = pyarrow.ipc.open_stream(stdin)
df = stream.read_pandas()
return df
else: # Last Chunk
return None
def write(df=None):
"""Write a data chunk to SciDB.
"""
if df is None:
stdout.write(struct.pack('<Q', 0))
return
buf = pyarrow.BufferOutputStream()
table = pyarrow.Table.from_pandas(df)
table = table.replace_schema_metadata() # Remove metadata
writer = pyarrow.RecordBatchStreamWriter(buf, table.schema)
writer.write_table(table)
writer.close()
byt = buf.getvalue().to_pybytes()
sz = len(byt)
stdout.write(struct.pack('<Q', sz))
stdout.write(byt)
def pack_func(func):
"""Serialize function to upload to SciDB. The result can be used as
`upload_data` in `input` or `load` operators.
"""
return numpy.array(
[dill.dumps(func, 0)] # Serialize streaming function
)
def read_func():
"""Read and de-serialize function from SciDB.
"""
func = dill.loads(read().iloc[0, 0])
write() # SciDB expects a message back
return func
def map(map_fun, finalize_fun=None):
"""Read SciDB chunks. For each chunk, call `map_fun` and stream its
result back to SciDB. If `finalize_fun` is provided, call it after
all the chunks have been processed.
"""
while True:
# Read DataFrame
df = read()
if df is None:
# End of stream
break
# Write DataFrame
write(map_fun(df))
# Write final DataFrame (if any)
if finalize_fun is None:
write()
else:
write(finalize_fun())
def debug(*args):
"""Print debug message to scidb-stderr.log file"""
sys.stderr.write(' '.join('{}'.format(i) for i in args) + '\n')
sys.stderr.flush()
| 0.366476 | 0.172712 |
scidb4py — SciDB for Python
===========================
Pure python SciDB client library.
This library aims to provide access to SciDB server through native network protocol based on protobuf. It still on early
stages of development, so do not expect complete features support and stable work :)
Any feedback and patches are welcome: https://github.com/artyom-smirnov/scidb4py/
Runtime dependencies
--------------------
* python >= 2.7 or pypy >= 1.8 (python 3 not supported yet)
* python-protobuf >= 2.4
* bitstring
Build dependencies
------------------
* protobuf-compiler >= 2.4
Installation
------------
::
sudo pip install scidb4py
or
::
sudo python setup.py install
Examples
--------
Iterating through array item-by-item
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
::
from scidb4py import Connection
conn = Connection('localhost', 1239)
conn.open()
array = conn.execute("select * from array(<a:int32>[x=0:3,2,0], '[0,1,2,3]')")
for pos, val in array:
print '%d - %d' % (pos['x'], val['a'])
conn.close()
Iterating through array chunk-by-chunk, item-by-item
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
::
from scidb4py import Connection
conn = Connection('localhost', 1239)
conn.open()
array = conn.execute("select * from array(<a:int32 null>[x=0:2,3,0, y=0:2,3,0], '[[1,2,3][4,5,6][7,8,9]]')")
while not array.end:
while not array.chunk_end:
print '%s - %s' % (array.get_coordinates(), array.get_item("a"))
array.next_item()
array.next_chunk()
conn.close()
|
scidb4py
|
/scidb4py-0.0.6.tar.gz/scidb4py-0.0.6/README.rst
|
README.rst
|
scidb4py — SciDB for Python
===========================
Pure python SciDB client library.
This library aims to provide access to SciDB server through native network protocol based on protobuf. It still on early
stages of development, so do not expect complete features support and stable work :)
Any feedback and patches are welcome: https://github.com/artyom-smirnov/scidb4py/
Runtime dependencies
--------------------
* python >= 2.7 or pypy >= 1.8 (python 3 not supported yet)
* python-protobuf >= 2.4
* bitstring
Build dependencies
------------------
* protobuf-compiler >= 2.4
Installation
------------
::
sudo pip install scidb4py
or
::
sudo python setup.py install
Examples
--------
Iterating through array item-by-item
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
::
from scidb4py import Connection
conn = Connection('localhost', 1239)
conn.open()
array = conn.execute("select * from array(<a:int32>[x=0:3,2,0], '[0,1,2,3]')")
for pos, val in array:
print '%d - %d' % (pos['x'], val['a'])
conn.close()
Iterating through array chunk-by-chunk, item-by-item
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
::
from scidb4py import Connection
conn = Connection('localhost', 1239)
conn.open()
array = conn.execute("select * from array(<a:int32 null>[x=0:2,3,0, y=0:2,3,0], '[[1,2,3][4,5,6][7,8,9]]')")
while not array.end:
while not array.chunk_end:
print '%s - %s' % (array.get_coordinates(), array.get_item("a"))
array.next_item()
array.next_chunk()
conn.close()
| 0.603465 | 0.318134 |
import sys
import os
from loguru import logger
import click
import asyncio
from asyncio.subprocess import PIPE, STDOUT
from typing import List
def setup_logger():
logger.remove()
logger.add(
sys.stdout,
enqueue=True,
level="DEBUG",
format="<green>{time:HH:mm:ss zz}</green> | <cyan>{process}</cyan> | <level>{message}</level>",
)
async def _command_loop(cmd: List[str], cwd: str = None, timeout: int = 600) -> int:
timeout = 600
if not cwd:
cwd = os.getcwd()
# start child process
try:
assert (
cmd is not None and len(cmd) > 0
), "Trying to run a command loop without commands"
process = None
returncode = -1
process = await asyncio.create_subprocess_exec(
cmd[0], *cmd[1:], cwd=cwd, stdout=PIPE, stderr=STDOUT
)
assert process is not None, "Something went wrong creating the process"
logger.info(f"Process for {cmd} started pid:{process.pid}")
while True: # type: ignore
try:
line = await process.stdout.readline() # type: ignore
except asyncio.TimeoutError:
logger.error(
f"Process has timed out while reading line, timeout set to {timeout}"
)
raise
if not line:
logger.info("Output for process has finished")
break
try:
line = line.decode()
except AttributeError:
# Probably already a string
pass
logger.info(line.strip())
logger.info("Waiting for process to finish")
await process.wait()
returncode = process.returncode
logger.info(f"Process finish with return code {returncode}")
except Exception as exc:
error_str = f"Something went wrong with running the command {cmd}, killing the process. Error: {exc}"
logger.error(error_str)
if process:
process.kill()
return returncode
def run_command_with_output(cmd: List[str], cwd: str = None, timeout: int = 600) -> int:
"""Runs a command line tool printing the stdout as it runs, NOTE: This requires python 3.8
Arguments:
cmd {List[str]} -- [List of arguments to run on the commandline]
Keyword Arguments:
cwd {str} -- [Current working directory]
timeout {int} -- [Max time between stdout lines to judge if the program is stuck] (default: {600})
Returns:
int -- [0 if successful, -1 if there's an error]
"""
loop = asyncio.get_event_loop()
if loop.is_closed:
loop = asyncio.new_event_loop()
try:
returncode = loop.run_until_complete(_command_loop(cmd, cwd, timeout))
finally:
loop.close()
logger.info(f"Command: {cmd} has completed with the return code: {returncode}")
return returncode
class command:
def __init__(self, name=None, cls=click.Command, **attrs):
self.name = name
self.cls = cls
self.attrs = attrs
def __call__(self, method):
def __command__(this):
def wrapper(*args, **kwargs):
return method(this, *args, **kwargs)
if hasattr(method, "__options__"):
options = method.__options__
return self.cls(self.name, callback=wrapper, params=options, **self.attrs)
method.__command__ = __command__
return method
class option:
def __init__(self, *param_decls, **attrs):
self.param_decls = param_decls
self.attrs = attrs
def __call__(self, method):
if not hasattr(method, "__options__"):
method.__options__ = []
method.__options__.append(
click.Option(param_decls=self.param_decls, **self.attrs)
)
return method
class Cli:
def __new__(cls, *args, **kwargs):
self = super(Cli, cls).__new__(cls, *args, **kwargs)
self._cli = click.Group()
# Wrap instance options
self.__option_callbacks__ = set()
for attr_name in dir(cls):
attr = getattr(cls, attr_name)
if hasattr(attr, "__options__") and not hasattr(attr, "__command__"):
self._cli.params.extend(attr.__options__)
self.__option_callbacks__.add(attr)
# Wrap commands
for attr_name in dir(cls):
attr = getattr(cls, attr_name)
if hasattr(attr, "__command__"):
command = attr.__command__(self)
# command.params.extend(_options)
self._cli.add_command(command)
return self
def run(self):
"""Run the CLI application."""
self()
def __call__(self):
"""Run the CLI application."""
self._cli()
|
scidra-module-utils
|
/scidra_module_utils-0.2.1-py3-none-any.whl/scidra/module_utils/utils.py
|
utils.py
|
import sys
import os
from loguru import logger
import click
import asyncio
from asyncio.subprocess import PIPE, STDOUT
from typing import List
def setup_logger():
logger.remove()
logger.add(
sys.stdout,
enqueue=True,
level="DEBUG",
format="<green>{time:HH:mm:ss zz}</green> | <cyan>{process}</cyan> | <level>{message}</level>",
)
async def _command_loop(cmd: List[str], cwd: str = None, timeout: int = 600) -> int:
timeout = 600
if not cwd:
cwd = os.getcwd()
# start child process
try:
assert (
cmd is not None and len(cmd) > 0
), "Trying to run a command loop without commands"
process = None
returncode = -1
process = await asyncio.create_subprocess_exec(
cmd[0], *cmd[1:], cwd=cwd, stdout=PIPE, stderr=STDOUT
)
assert process is not None, "Something went wrong creating the process"
logger.info(f"Process for {cmd} started pid:{process.pid}")
while True: # type: ignore
try:
line = await process.stdout.readline() # type: ignore
except asyncio.TimeoutError:
logger.error(
f"Process has timed out while reading line, timeout set to {timeout}"
)
raise
if not line:
logger.info("Output for process has finished")
break
try:
line = line.decode()
except AttributeError:
# Probably already a string
pass
logger.info(line.strip())
logger.info("Waiting for process to finish")
await process.wait()
returncode = process.returncode
logger.info(f"Process finish with return code {returncode}")
except Exception as exc:
error_str = f"Something went wrong with running the command {cmd}, killing the process. Error: {exc}"
logger.error(error_str)
if process:
process.kill()
return returncode
def run_command_with_output(cmd: List[str], cwd: str = None, timeout: int = 600) -> int:
"""Runs a command line tool printing the stdout as it runs, NOTE: This requires python 3.8
Arguments:
cmd {List[str]} -- [List of arguments to run on the commandline]
Keyword Arguments:
cwd {str} -- [Current working directory]
timeout {int} -- [Max time between stdout lines to judge if the program is stuck] (default: {600})
Returns:
int -- [0 if successful, -1 if there's an error]
"""
loop = asyncio.get_event_loop()
if loop.is_closed:
loop = asyncio.new_event_loop()
try:
returncode = loop.run_until_complete(_command_loop(cmd, cwd, timeout))
finally:
loop.close()
logger.info(f"Command: {cmd} has completed with the return code: {returncode}")
return returncode
class command:
def __init__(self, name=None, cls=click.Command, **attrs):
self.name = name
self.cls = cls
self.attrs = attrs
def __call__(self, method):
def __command__(this):
def wrapper(*args, **kwargs):
return method(this, *args, **kwargs)
if hasattr(method, "__options__"):
options = method.__options__
return self.cls(self.name, callback=wrapper, params=options, **self.attrs)
method.__command__ = __command__
return method
class option:
def __init__(self, *param_decls, **attrs):
self.param_decls = param_decls
self.attrs = attrs
def __call__(self, method):
if not hasattr(method, "__options__"):
method.__options__ = []
method.__options__.append(
click.Option(param_decls=self.param_decls, **self.attrs)
)
return method
class Cli:
def __new__(cls, *args, **kwargs):
self = super(Cli, cls).__new__(cls, *args, **kwargs)
self._cli = click.Group()
# Wrap instance options
self.__option_callbacks__ = set()
for attr_name in dir(cls):
attr = getattr(cls, attr_name)
if hasattr(attr, "__options__") and not hasattr(attr, "__command__"):
self._cli.params.extend(attr.__options__)
self.__option_callbacks__.add(attr)
# Wrap commands
for attr_name in dir(cls):
attr = getattr(cls, attr_name)
if hasattr(attr, "__command__"):
command = attr.__command__(self)
# command.params.extend(_options)
self._cli.add_command(command)
return self
def run(self):
"""Run the CLI application."""
self()
def __call__(self):
"""Run the CLI application."""
self._cli()
| 0.462959 | 0.147893 |
import os
import json
import abc
import shutil
from zipfile import ZipFile
from click import Path as ClickPath, UsageError
from clint.textui import progress
from typing import Dict, List
from pathlib import Path
import pprint
import requests
from loguru import logger
from .utils import option, command, Cli, setup_logger as default_setup_logger
from .models import FileRef, Output
class BaseModule(abc.ABC, Cli):
OUTPUT_FILENAME: str = os.getenv("OUTPUT_FILENAME", "outputs.json")
CHUNK_SIZE: int = 2391975
@abc.abstractmethod
def run_job_logic(self, parameters: dict, files: Dict[str, FileRef]) -> Output:
"""
This is the custom implementation of what will become an interface. Does the necessary setup
to execute the existing module code. This method should represent 90% or more of the custom code
required to create a module using pre existing logic.
Arguments:
parameters {dict} -- [description]
files {dict} -- [description]
output_path {str} -- [description]
"""
pass
@classmethod
def setup_logger(cls):
default_setup_logger()
def create_artifacts(
self, output: Output, artifact_path: str = "./", zip: bool = False
):
logger.info("Creating job artifacts")
if artifact_path != "./":
Path(artifact_path).mkdir(parents=True, exist_ok=True)
outfile_path = os.path.join(artifact_path, self.OUTPUT_FILENAME)
with open(outfile_path, "w") as outfile:
outfile.write(output.output_json + "\n")
logger.info(f"Output JSON saved to {outfile_path}")
if output.files is not None:
to_zip = []
logger.info(f"Ensuring output files are in correct folder: {artifact_path}")
for _file in output.files:
target = Path(os.path.join(artifact_path, f"{_file.name}"))
if not target.exists() and _file.path is not None:
logger.info(f"Moving {_file.path} to {target}")
shutil.move(_file.path, target)
to_zip.append({"path": str(target), "name": f"{_file.name}"})
if zip:
zip_path = os.path.join(artifact_path, "files.zip")
logger.info(f"Creating output files zip: {zip_path}")
with ZipFile(zip_path, "w") as zipObj:
for zf in to_zip:
zipObj.write(zf["path"], zf["name"])
logger.info(f"Added {zf['name']} to {zip_path}")
def download_files(
self, file_refs: List[dict], files_path: str = "./"
) -> Dict[str, FileRef]:
output_file_refs = {}
for _fr in file_refs:
file_ref = FileRef(**_fr)
if file_ref is None:
raise ValueError(
f"File Ref {file_ref.name} has no url to download the file"
)
r = requests.get(file_ref.url, stream=True) # type: ignore
target_path = Path(os.path.join(files_path, f"{file_ref.name}"))
target_path.parent.mkdir(parents=True, exist_ok=True)
with open(target_path, "wb") as _file:
length = r.headers.get("content-length")
total_length = None
if length is not None:
total_length = int(length)
logger.info(
f"Downloading {file_ref.name} Size: {length} to {target_path}"
)
if total_length is not None:
for ch in progress.bar(
r.iter_content(chunk_size=self.CHUNK_SIZE),
expected_size=(total_length / 1024) + 1,
):
if ch:
_file.write(ch)
else:
for ch in r.iter_content(chunk_size=self.CHUNK_SIZE):
_file.write(ch)
file_ref.path = str(target_path)
output_file_refs[file_ref.id] = file_ref
return output_file_refs
@command("run-job")
@option(
"params_path",
"--params-path",
default=None,
envvar="PARAMS_PATH",
type=ClickPath(exists=True),
)
@option(
"params_json", "--params-json", default=None, envvar="PARAMS_JSON", type=str
)
@option(
"file_refs_json",
"--files-json",
default=None,
envvar="FILE_REFS_JSON",
type=str,
)
@option(
"file_refs_path",
"--files-path",
default=None,
envvar="FILE_REFS_PATH",
type=ClickPath(exists=True),
)
@option(
"input_path",
"--input",
default="input",
envvar="FILES_IN_PATH",
type=ClickPath(),
)
@option(
"output_path",
"--output",
default="output",
envvar="OUTPUT_PATH",
type=ClickPath(),
)
@option("--zip", is_flag=True)
def run_job(
self,
params_path,
params_json,
file_refs_json,
file_refs_path,
input_path,
output_path,
zip,
):
self.setup_logger()
if params_json:
parameters = json.loads(params_json)
elif params_path:
with open(params_path) as json_file:
parameters = json.load(json_file)
else:
err_str = "One of either --params-json or --params-path is required"
logger.error(err_str)
raise UsageError(err_str)
logger.info(f"--- Using Parameters --- \n {pprint.pformat(parameters)}")
file_refs = None
if file_refs_json:
file_refs = json.loads(file_refs_json)
elif file_refs_path:
with open(file_refs_path) as json_file:
file_refs = json.load(json_file)
# Download inputs
if file_refs is not None:
file_refs = self.download_files(file_refs, input_path)
if file_refs is not None:
logger.info("--- Using Files ---")
for fr in file_refs.keys():
logger.info(f"{fr} - Path: {file_refs[fr].path}")
else:
logger.info("--- No Input Files ---")
output = self.run_job_logic(parameters, file_refs)
# Package up outputs
self.create_artifacts(output, output_path, zip)
|
scidra-module-utils
|
/scidra_module_utils-0.2.1-py3-none-any.whl/scidra/module_utils/base_module.py
|
base_module.py
|
import os
import json
import abc
import shutil
from zipfile import ZipFile
from click import Path as ClickPath, UsageError
from clint.textui import progress
from typing import Dict, List
from pathlib import Path
import pprint
import requests
from loguru import logger
from .utils import option, command, Cli, setup_logger as default_setup_logger
from .models import FileRef, Output
class BaseModule(abc.ABC, Cli):
OUTPUT_FILENAME: str = os.getenv("OUTPUT_FILENAME", "outputs.json")
CHUNK_SIZE: int = 2391975
@abc.abstractmethod
def run_job_logic(self, parameters: dict, files: Dict[str, FileRef]) -> Output:
"""
This is the custom implementation of what will become an interface. Does the necessary setup
to execute the existing module code. This method should represent 90% or more of the custom code
required to create a module using pre existing logic.
Arguments:
parameters {dict} -- [description]
files {dict} -- [description]
output_path {str} -- [description]
"""
pass
@classmethod
def setup_logger(cls):
default_setup_logger()
def create_artifacts(
self, output: Output, artifact_path: str = "./", zip: bool = False
):
logger.info("Creating job artifacts")
if artifact_path != "./":
Path(artifact_path).mkdir(parents=True, exist_ok=True)
outfile_path = os.path.join(artifact_path, self.OUTPUT_FILENAME)
with open(outfile_path, "w") as outfile:
outfile.write(output.output_json + "\n")
logger.info(f"Output JSON saved to {outfile_path}")
if output.files is not None:
to_zip = []
logger.info(f"Ensuring output files are in correct folder: {artifact_path}")
for _file in output.files:
target = Path(os.path.join(artifact_path, f"{_file.name}"))
if not target.exists() and _file.path is not None:
logger.info(f"Moving {_file.path} to {target}")
shutil.move(_file.path, target)
to_zip.append({"path": str(target), "name": f"{_file.name}"})
if zip:
zip_path = os.path.join(artifact_path, "files.zip")
logger.info(f"Creating output files zip: {zip_path}")
with ZipFile(zip_path, "w") as zipObj:
for zf in to_zip:
zipObj.write(zf["path"], zf["name"])
logger.info(f"Added {zf['name']} to {zip_path}")
def download_files(
self, file_refs: List[dict], files_path: str = "./"
) -> Dict[str, FileRef]:
output_file_refs = {}
for _fr in file_refs:
file_ref = FileRef(**_fr)
if file_ref is None:
raise ValueError(
f"File Ref {file_ref.name} has no url to download the file"
)
r = requests.get(file_ref.url, stream=True) # type: ignore
target_path = Path(os.path.join(files_path, f"{file_ref.name}"))
target_path.parent.mkdir(parents=True, exist_ok=True)
with open(target_path, "wb") as _file:
length = r.headers.get("content-length")
total_length = None
if length is not None:
total_length = int(length)
logger.info(
f"Downloading {file_ref.name} Size: {length} to {target_path}"
)
if total_length is not None:
for ch in progress.bar(
r.iter_content(chunk_size=self.CHUNK_SIZE),
expected_size=(total_length / 1024) + 1,
):
if ch:
_file.write(ch)
else:
for ch in r.iter_content(chunk_size=self.CHUNK_SIZE):
_file.write(ch)
file_ref.path = str(target_path)
output_file_refs[file_ref.id] = file_ref
return output_file_refs
@command("run-job")
@option(
"params_path",
"--params-path",
default=None,
envvar="PARAMS_PATH",
type=ClickPath(exists=True),
)
@option(
"params_json", "--params-json", default=None, envvar="PARAMS_JSON", type=str
)
@option(
"file_refs_json",
"--files-json",
default=None,
envvar="FILE_REFS_JSON",
type=str,
)
@option(
"file_refs_path",
"--files-path",
default=None,
envvar="FILE_REFS_PATH",
type=ClickPath(exists=True),
)
@option(
"input_path",
"--input",
default="input",
envvar="FILES_IN_PATH",
type=ClickPath(),
)
@option(
"output_path",
"--output",
default="output",
envvar="OUTPUT_PATH",
type=ClickPath(),
)
@option("--zip", is_flag=True)
def run_job(
self,
params_path,
params_json,
file_refs_json,
file_refs_path,
input_path,
output_path,
zip,
):
self.setup_logger()
if params_json:
parameters = json.loads(params_json)
elif params_path:
with open(params_path) as json_file:
parameters = json.load(json_file)
else:
err_str = "One of either --params-json or --params-path is required"
logger.error(err_str)
raise UsageError(err_str)
logger.info(f"--- Using Parameters --- \n {pprint.pformat(parameters)}")
file_refs = None
if file_refs_json:
file_refs = json.loads(file_refs_json)
elif file_refs_path:
with open(file_refs_path) as json_file:
file_refs = json.load(json_file)
# Download inputs
if file_refs is not None:
file_refs = self.download_files(file_refs, input_path)
if file_refs is not None:
logger.info("--- Using Files ---")
for fr in file_refs.keys():
logger.info(f"{fr} - Path: {file_refs[fr].path}")
else:
logger.info("--- No Input Files ---")
output = self.run_job_logic(parameters, file_refs)
# Package up outputs
self.create_artifacts(output, output_path, zip)
| 0.512205 | 0.164785 |
# sci-Epi2Gene
[](https://codecov.io/github/ArianeMora/sciepi2gene?branch=master)
[](https://pypi.org/project/scie2g/)
[](https://zenodo.org/badge/latestdoi/316410924)
[Link to docs](https://arianemora.github.io/sciepi2gene/)
## Warning!!
If you have non normal chr's please remove them it will make the program extremely slow.
Another warning: If you have duplicates (i.e. multiple things with the same start and end it will be extremely slow!
Sci-epi2gene maps events annotated to a genome location to nearby genes - i.e. peaks from histone modification data
ChIP-seq experiemnts stored as bed data, or DNA methylation data in csv format (e.g. output from DMRseq, methylKit or methylSig).
The user provides a SORTED gene annotation file with start, end, and direction for each gene (we recommend using
[sci-biomart](https://github.com/ArianeMora/scibiomart), see examples for detail.
The user then selects how to annotate, i.e. whether it is in the promoter region, or overlaps the gene body. Finally,
the parameters for overlap on each side are chosen.
It is available under the [GNU General Public License (Version 3) ](https://www.gnu.org/licenses/gpl-3.0.en.html).
This package is a wrapper that allows various epigenetic data types to be annotated to genes. [Examples are in the docs](https://arianemora.github.io/sciepi2gene/)
I also wanted to have different upper flanking and lower flanking distances that took into account the directionality of the strand
and also an easy output csv file that can be filtered and used in downstream analyses. This is why I keep all features
that fall within the annotation region of a gene (example below):
The overlapping methods are as follows:
1) overlaps: this means does ANY part of the peak/feature overlap the gene body + some buffer before the TSS and some buffer on the non-TSS side
2) promoter: does ANY part of the peak/feature overlap with the TSS of the gene taking into account buffers on either side of the TSS.
.. image:: _static/example_overlaps.png
:width: 600
As you can see from the above screenshot using IGV, the input peaks are in purple, and the green are the output
peaks as annotated to genes. The function *convert_to_bed* converts the output csv to bed files for viewing. This example
shows that a peak/feature can be annotated to multiple genes. Peaks/features outside of the regions of genes (e.g.
the first peak) are dropped from the output.
We show this example in the notebook (see examples folder), where we use [IGV](https://github.com/igvteam/igv-jupyter#igvjs-jupyter-extension)
to view the tracks (see image below).
.. image:: _static/igv_jupyter.png
:width: 600
Lastly, there are sometimes differences between annotations (i.e. the TSS on your annotation in IGV may differ to the
annotation you input to sciepi2gene), naturally, how your genes/features are annotated depends on the input file so if you see differences check this first!
Please post questions and issues related to sci-epi2gene on the `Issues <https://github.com/ArianeMora/sciepi2gene/issues>`_ section of the GitHub repository.
|
scie2g
|
/scie2g-1.0.3.tar.gz/scie2g-1.0.3/README.md
|
README.md
|
# sci-Epi2Gene
[](https://codecov.io/github/ArianeMora/sciepi2gene?branch=master)
[](https://pypi.org/project/scie2g/)
[](https://zenodo.org/badge/latestdoi/316410924)
[Link to docs](https://arianemora.github.io/sciepi2gene/)
## Warning!!
If you have non normal chr's please remove them it will make the program extremely slow.
Another warning: If you have duplicates (i.e. multiple things with the same start and end it will be extremely slow!
Sci-epi2gene maps events annotated to a genome location to nearby genes - i.e. peaks from histone modification data
ChIP-seq experiemnts stored as bed data, or DNA methylation data in csv format (e.g. output from DMRseq, methylKit or methylSig).
The user provides a SORTED gene annotation file with start, end, and direction for each gene (we recommend using
[sci-biomart](https://github.com/ArianeMora/scibiomart), see examples for detail.
The user then selects how to annotate, i.e. whether it is in the promoter region, or overlaps the gene body. Finally,
the parameters for overlap on each side are chosen.
It is available under the [GNU General Public License (Version 3) ](https://www.gnu.org/licenses/gpl-3.0.en.html).
This package is a wrapper that allows various epigenetic data types to be annotated to genes. [Examples are in the docs](https://arianemora.github.io/sciepi2gene/)
I also wanted to have different upper flanking and lower flanking distances that took into account the directionality of the strand
and also an easy output csv file that can be filtered and used in downstream analyses. This is why I keep all features
that fall within the annotation region of a gene (example below):
The overlapping methods are as follows:
1) overlaps: this means does ANY part of the peak/feature overlap the gene body + some buffer before the TSS and some buffer on the non-TSS side
2) promoter: does ANY part of the peak/feature overlap with the TSS of the gene taking into account buffers on either side of the TSS.
.. image:: _static/example_overlaps.png
:width: 600
As you can see from the above screenshot using IGV, the input peaks are in purple, and the green are the output
peaks as annotated to genes. The function *convert_to_bed* converts the output csv to bed files for viewing. This example
shows that a peak/feature can be annotated to multiple genes. Peaks/features outside of the regions of genes (e.g.
the first peak) are dropped from the output.
We show this example in the notebook (see examples folder), where we use [IGV](https://github.com/igvteam/igv-jupyter#igvjs-jupyter-extension)
to view the tracks (see image below).
.. image:: _static/igv_jupyter.png
:width: 600
Lastly, there are sometimes differences between annotations (i.e. the TSS on your annotation in IGV may differ to the
annotation you input to sciepi2gene), naturally, how your genes/features are annotated depends on the input file so if you see differences check this first!
Please post questions and issues related to sci-epi2gene on the `Issues <https://github.com/ArianeMora/sciepi2gene/issues>`_ section of the GitHub repository.
| 0.683631 | 0.878627 |
[](https://badge.fury.io/py/sciebo-rds-cli)
Status: not for production, yet
# Sciebo RDS CLI
This is a helper tool to install sciebo RDS to your owncloud instances. It supports ssh and kubectl.
## Usage
You need python3 (>= 3.10) and pip to use this tool.
```bash
pip install sciebo-rds-cli
sciebords --help
```
If you prefer the sourcecode way:
```bash
git clone https://github.com/Heiss/Sciebo-RDS-Install.git && cd Sciebo-RDS-Install
pip install -r requirements.txt
chmod +x sciebo_rds_install/main.py
sciebo_rds_install/main.py --help
```
If you have poetry installed, you can use it, too. So the installation will not rubbish your local python environment, because it uses virtualenv on its own.
```bash
git clone https://github.com/Heiss/Sciebo-RDS-Install.git && cd Sciebo-RDS-Install
poetry install
poetry shell
sciebords --help
```
The application will look for a `values.yaml`, which is needed for the sciebo RDS helm supported installation process. So you only have to maintain a single yaml file. Just append the content of `config.yaml.example` to your `values.yaml`. But you can also set your config stuff for this tool in a separated `config.yaml` with `--config` flag. For options for the configuration, please take a look into the `config.yaml.example`, because it holds everything with documentation you can configure for this app. Also you should take a look into the help parameter, because it shows, what the tool can do for you.
## Developer installation
This project uses [poetry](https://python-poetry.org/docs/#installation) for dependencies. Install it with the described methods over there in the official poetry documentation.
Then you need to install the developer environment.
```bash
poetry install --with dev
poetry shell
```
After this you can run the application in this environment.
```bash
sciebords --help
```
If you add or update the dependencies, you have to generate a new requirementst.txt for easier user installations.
```bash
poetry export -f requirements.txt --output requirements.txt
```
|
sciebo-rds-cli
|
/sciebo_rds_cli-0.1.6.tar.gz/sciebo_rds_cli-0.1.6/README.md
|
README.md
|
pip install sciebo-rds-cli
sciebords --help
git clone https://github.com/Heiss/Sciebo-RDS-Install.git && cd Sciebo-RDS-Install
pip install -r requirements.txt
chmod +x sciebo_rds_install/main.py
sciebo_rds_install/main.py --help
git clone https://github.com/Heiss/Sciebo-RDS-Install.git && cd Sciebo-RDS-Install
poetry install
poetry shell
sciebords --help
poetry install --with dev
poetry shell
sciebords --help
poetry export -f requirements.txt --output requirements.txt
| 0.425367 | 0.691894 |
import click
import paramiko
import kubernetes
from secrets import choice
import yaml
import string
import os
import requests
from pathlib import Path
def random(N=64):
return "".join(
[
choice(string.ascii_lowercase + string.ascii_uppercase + string.digits)
for _ in range(N)
]
)
def get_commands():
commands = [
# "{owncloud_path}occ market:install oauth2",
# "{owncloud_path}occ market:install rds",
"{owncloud_path}occ app:enable oauth2",
"{owncloud_path}occ app:enable rds",
"{owncloud_path}occ oauth2:add-client {oauthname} {client_id} {client_secret} {rds_domain}",
"{owncloud_path}occ rds:set-oauthname {oauthname}",
"{owncloud_path}occ rds:set-url {rds_domain}",
"{owncloud_path}occ rds:create-keys",
]
return commands
def execute_ssh(ssh, cmd):
_, stdout, stderr = ssh.exec_command(cmd)
err = stderr.read()
if err != "":
click.echo(f"Error in ssh command: {err}", err=True)
exit(1)
return stdout
def execute_kubectl(k8s, cmd):
k8s.write_stdin(cmd + "\n")
err = k8s.read_stderr()
if err != "":
click.echo(f"Error in kubectl command: {err}")
exit(1)
return k8s.read_stdout(timeout=3)
def execute_helm(values_file, install=False, dry_run=False):
if install and not dry_run:
click.echo("Preparing helm for sciebo RDS.")
click.echo("Remove installed sciebo rds from k8s if it is already there.")
os.system("helm uninstall sciebo-rds")
click.echo("Remove sciebo RDS from helm repo list.")
os.system("helm repo remove sciebo-rds")
click.echo("Add sciebo RDS in helm repo list again.")
os.system(
"helm repo add sciebo-rds https://www.research-data-services.org/charts/stable"
)
click.echo("Update helm repo list.")
os.system("helm repo up sciebo-rds")
click.echo("Finish preparation.")
click.echo("Installing sciebo RDS via helm.")
cmd = f"helm upgrade -i sciebo-rds sciebo-rds/all --values {values_file}"
if dry_run:
cmd += " --dry-run"
error_code = os.system(cmd)
if error_code > 0:
click.echo("There was an error while installing sciebo RDS via helm.")
else:
click.echo(
"Sciebo RDS is installed now via helm. Check it out via `kubectl get pods`."
)
def execute(
channel, fun, commands, owncloud_host_hostname_command, owncloud_host_config_command
):
for cmd in commands:
click.echo(f"Running command: {cmd}")
fun(channel, cmd)
# via php hostname
owncloud_url = fun(channel, owncloud_host_hostname_command)
# via overwrites from config
for overwrite in fun(channel, owncloud_host_config_command):
# remove comma, because we look at php dict parts
overwrite = overwrite.replace(",", "", 1)
# separate key and value
_, _, val = str(overwrite).partition(":")
owncloud_url = val
return owncloud_url
@click.group()
def cli():
pass
values_file_path = "values.yaml"
config_file_path = "config.yaml"
cert_file_path = "create_certs.sh"
@click.command()
@click.option(
"--self-signed-cert",
"-s",
"self_signed",
is_flag=True,
default=False,
help=f"Creates the script {cert_file_path} for self-signed certificates. Not recommended for production use, but handy for testing.",
)
@click.option(
"--force",
"-f",
"overwrite_values",
is_flag=True,
default=False,
help=f"Overwrites the {values_file_path}, if it already exists. Otherwise exits with statuscode greater then 0.",
)
@click.option(
"--one-file",
"-c",
"single_file",
is_flag=True,
default=False,
help=f"Writes down the needed config stuff in {values_file_path}. Otherwise it creates a separate file {config_file_path}.",
)
def init(self_signed, overwrite_values, single_file):
"""
Initialize needed files for sciebo RDS. Places the files in the current folder.
"""
if self_signed:
click.echo("Self-signed script selected.")
if not os.path.isfile(cert_file_path) or overwrite_values:
if overwrite_values:
click.echo(f"WARN: Overwrites {cert_file_path} if it exists.")
cnt = requests.get(
"https://raw.githubusercontent.com/Sciebo-RDS/Sciebo-RDS/release/getting-started/create_certs.sh.example"
).text
with open(cert_file_path, "w") as f:
f.write(cnt)
click.echo(f"{cert_file_path} created.")
else:
click.echo(
f"{cert_file_path} already in place. Delete it or use -f to overwrite it.",
err=True,
)
cnt = requests.get(
"https://raw.githubusercontent.com/Sciebo-RDS/Sciebo-RDS/release/getting-started/values.yaml.example"
).text
cfg = requests.get(
"https://raw.githubusercontent.com/Sciebo-RDS/Sciebo-RDS-CLI/develop/config.yaml.example"
).text
if (not os.path.isfile(config_file_path) or overwrite_values) and not single_file:
if overwrite_values:
click.echo(f"WARN: Overwrites {config_file_path} if it exists.")
with open(config_file_path, "w") as f:
f.write(cfg)
click.echo(f"{config_file_path} created.")
if not os.path.isfile(values_file_path) or overwrite_values:
if overwrite_values:
click.echo(f"WARN: Overwrites {values_file_path} if it exists.")
if single_file:
click.echo(
f"WARN: Places {config_file_path} content at the top of {values_file_path}."
)
cnt = cfg + "\n\n\n" + cnt
with open(values_file_path, "w") as f:
f.write(cnt)
click.echo(f"{values_file_path} created.")
else:
click.echo(
f"{values_file_path} already in place. Delete it or use -f to overwrite it.",
err=True,
)
if not single_file:
click.echo(
f"Adjust the {values_file_path} and {config_file_path} to your needs with your favourite editor, before you `install` sciebo RDS."
)
else:
click.echo(
f"Adjust the {values_file_path} to your needs with your favourite editor, before you `install` sciebo RDS."
)
@click.command()
@click.option(
"--one-file",
"-c",
"single_file",
is_flag=True,
default=False,
help=f"Writes down the needed config stuff in {values_file_path}. Otherwise it creates a separate file {config_file_path}.",
)
@click.option(
"--helm-sciebords-name",
"-n",
"helm_name",
default="sciebords",
help="Use the given name for helm install process. Defaults to 'sciebords'.",
)
def checks(single_file, helm_name):
"""
Runs several checks if all requirements for sciebo RDS are fulfilled.
"""
error_found = False
if not os.path.isfile(values_file_path):
click.echo(f"values.yaml is not in place: {values_file_path}", err=True)
error_found = True
return
if not single_file and not os.path.isfile(config_file_path):
click.echo(f"config.yaml is not in place: {config_file_path}", err=True)
error_found = True
return
if os.system("kubectl version") > 0:
click.echo("kubectl not found", err=True)
error_found = True
if os.system("helm version") > 0:
click.echo("helm not found", err=True)
error_found = True
if (
os.path.isfile(values_file_path)
and os.system(
f"helm upgrade -i {helm_name} sciebo-rds/all --values {values_file_path} --dry-run"
)
> 0
):
click.echo(f"{values_file_path} not valid. Helm founds error.", err=True)
error_found = True
if not error_found:
click.echo("Everything is fine. You should be good to install sciebo RDS.")
@click.command()
@click.option(
"--dry-run",
"dry_run",
is_flag=True,
default=False,
help="Execute install without any changes. WARNING: It connects to the ownCloud instances and your k8s cluster via SSH and Kubectl to get some informations. Nevertheless it does not change anything.",
)
@click.argument(
"values_file",
default=Path(f"{os.getcwd()}/values.yaml"),
type=click.Path(exists=True),
)
def upgrade(dry_run, values_file):
"""
A wrapper method for convenience to upgrade the sciebo RDS instance with helm. Use this command if you changed something in your values.yaml
"""
execute_helm(values_file, install=False, dry_run=dry_run)
@click.command()
def commands():
"""
Shows all commands, which will be executed to configure the owncloud instances properly.
"""
data = {
"client_id": "${CLIENT_ID}",
"client_secret": "${CLIENT_SECRET}",
"oauthname": "${OAUTHNAME}",
"rds_domain": "${RDS_DOMAIN}",
"owncloud_path": "${OWNCLOUD_PATH}",
}
click.echo(
"""Conditions:
$CLIENT_ID and $CLIENT_SECRET has a length of 64 characters (no special character like [/\.,] allowed).
$OWNCLOUD_PATH is empty "" (occ can be found through $PATH) or set to a folder with trailing slash / e.g. /var/www/owncloud/
$OAUTHNAME is not in use for oauth2 already.
$RDS_DOMAIN points to the sciebo-rds installation root domain.
Remember that you also need the domainname of the owncloud instance to configure the values.yaml, which will be automatically guessed by this script.
ownCloud needs php-gmp for oauth2 plugin. Install it on your own.
"""
)
click.echo("Commands: ")
for cmd in get_commands():
click.echo(cmd.format(**data))
@click.command()
@click.option(
"--only-kubeconfig",
"-k",
"force_kubectl",
is_flag=True,
default=False,
help="Ignore servers object in config.yaml and use the user kubeconfig for a single pod configuration.",
)
@click.option(
"-h",
"--helm-install",
"helm_install",
is_flag=True,
default=False,
help="A convenient parameter. It runs all needed helm commands to install sciebo-rds in your current kubectl context after configuration. Helm upgrades should not use this parameter. Please use `sciebords upgrade` for this.",
)
@click.option(
"-c",
"--config",
"file",
type=click.Path(exists=True),
is_flag=False,
flag_value=Path(f"{os.getcwd()}/config.yaml"),
help="The given path will be used as config.yaml file. If not given, it will use the values.yaml per default as a single-file-configuration otherwise.",
)
@click.argument(
"values_file",
default=Path(f"{os.getcwd()}/values.yaml"),
type=click.Path(exists=True),
)
@click.option(
"--dry-run",
"dry_run",
is_flag=True,
default=False,
help="Execute install without any changes. WARNING: It connects to the ownCloud instances and your k8s cluster via SSH and Kubectl to get some informations. Nevertheless it does not change anything.",
)
def install(force_kubectl, helm_install, values_file, file, dry_run):
"""
Use defined interfaces in given VALUES_FILE to get all needed informations from ownCloud installations and prepare the VALUES_FILE to install sciebo RDS.
VALUES_FILE defaults to ./values.yaml. Take a look at --config to specify a different file for interface configuration.
Primarily it sets up all needed plugins in ownCloud, gets everything in place and writes down the domains object in the values.yaml file, which will be used later to install sciebo RDS.
"""
config_file = None
values = None
config = None
try:
with open(values_file, "r") as f:
try:
values = yaml.safe_load(f)
except yaml.YAMLError as exc:
click.echo(f"Error in values.yaml: {exc}", err=True)
exit(1)
except OSError as exc:
click.echo(f"Missing file: {values_file}", err=True)
exit(1)
if config_file is None:
config = values
else:
try:
with open(config_file, "r") as f:
try:
config = yaml.safe_load(f)
except yaml.YAMLError as exc:
click.echo(f"Error in config.yaml: {exc}", err=True)
exit(1)
except OSError as exc:
click.echo(f"Missing file: {config_file}", err=True)
exit(1)
owncloud_path_global = config.get("owncloud_path", "")
if force_kubectl:
try:
config["servers"] = [{"selector": config["k8sselector"]}]
except KeyError as exc:
click.echo(
"Missing `k8sselector` field in config. --only-kubeconfig needs this field.",
err=True,
)
exit(1)
click.echo("use kubeconfig only")
servers = config.get("servers", [])
if len(servers) == 0:
click.echo("No servers were found.")
exit(1)
for val in servers:
key_filename = val.get("private_key")
if key_filename is not None:
key_filename = key_filename.replace("{$HOME}", os.environ["HOME"])
client_id, client_secret = (random(), random())
oauthname = config.get("oauthname", "sciebo-rds")
rds_domain = config["rds"]
owncloud_path = val.get("owncloud_path", owncloud_path_global)
if owncloud_path != "" and not str(owncloud_path).endswith("/"):
owncloud_path += "/"
data = {
"client_id": client_id,
"client_secret": client_secret,
"oauthname": oauthname,
"rds_domain": rds_domain,
"owncloud_path": owncloud_path,
}
commands = [cmd.format(**data) for cmd in get_commands()]
owncloud_host_hostname_command = 'php -r "echo gethostname();"'
owncloud_host_config_command = (
f'{owncloud_path}occ config:list | grep "overwritehost\|overwrite.cli.url"'
)
owncloud_url = ""
if "address" in val:
ssh = paramiko.client.SSHClient()
ssh.load_system_host_keys()
ssh.connect(
val["address"],
username=val.get("user"),
password=val.get("password"),
key_filename=key_filename,
)
if dry_run:
click.echo(
"SSH can connect to ownCloud server: {}".format(val["address"])
)
continue
owncloud_url = execute(
ssh,
execute_ssh,
commands,
owncloud_host_hostname_command,
owncloud_host_config_command,
)
ssh.close()
elif "namespace" in val:
context = val.get("context", config.get("k8scontext"))
selector = val.get("selector", config.get("k8sselector"))
containername = val.get("containername", config.get("k8scontainername"))
kubernetes.config.load_kube_config(context=context)
namespace = val.get(
"namespace",
config.get(
"k8snamespace",
kubernetes.config.list_kube_config_contexts()[1]["context"][
"namespace"
],
),
)
api = kubernetes.client.CoreV1Api()
pods = api.list_namespaced_pod(
namespace=namespace,
label_selector=selector,
field_selector="status.phase=Running",
)
k8s = None
for pod in pods.items:
k8s = kubernetes.stream.stream(
api.connect_get_namespaced_pod_exec,
pod.metadata.name,
namespace,
container=containername,
command="/bin/bash",
stderr=True,
stdin=True,
stdout=True,
tty=False,
_preload_content=False,
)
if k8s.is_open():
continue
if k8s is None or not k8s.is_open():
click.echo(f"No connection via kubectl possible: {val}")
exit(1)
click.echo(
f"kubectl initialized: Connected to pod {pod.metadata.name}, container {containername} in namespace {namespace}"
)
if dry_run:
click.echo(
"kubectl can connect to ownCloud label: {}, container: {}".format(
selector, containername
)
)
continue
owncloud_url = execute(
k8s,
execute_kubectl,
commands,
owncloud_host_hostname_command,
owncloud_host_config_command,
)
k8s.close()
else:
click.echo(
f"Skipped: Server was not valid to work with: {val}\nIt needs to be an object with `address` for ssh or `namespace` for kubectl"
)
continue
if not owncloud_url:
click.echo(
f"owncloud domain cannot be found automatically for {val}. Enter the correct domain without protocol. If port needed, add it too.\nExample: sciebords.uni-muenster.de, localhost:8000"
)
value = ""
while not value:
value = input(f"Address: ")
if value:
owncloud_url = value
else:
exit(1)
domain = {
"name": val["name"],
"ADDRESS": owncloud_url,
"OAUTH_CLIENT_ID": client_id,
"OAUTH_CLIENT_SECRET": client_secret,
}
values["global"]["domains"].append(domain)
if not dry_run:
with open(values_file, "w") as yaml_file:
yaml.dump(values, yaml_file, default_flow_style=False)
if helm_install:
execute_helm(values_file, install=True, dry_run=dry_run)
cli.add_command(commands, "get-commands")
cli.add_command(init, "init")
cli.add_command(install, "install")
cli.add_command(checks, "checks")
cli.add_command(upgrade, "upgrade")
if __name__ == "__main__":
cli()
|
sciebo-rds-cli
|
/sciebo_rds_cli-0.1.6.tar.gz/sciebo_rds_cli-0.1.6/sciebo_rds_cli/main.py
|
main.py
|
import click
import paramiko
import kubernetes
from secrets import choice
import yaml
import string
import os
import requests
from pathlib import Path
def random(N=64):
return "".join(
[
choice(string.ascii_lowercase + string.ascii_uppercase + string.digits)
for _ in range(N)
]
)
def get_commands():
commands = [
# "{owncloud_path}occ market:install oauth2",
# "{owncloud_path}occ market:install rds",
"{owncloud_path}occ app:enable oauth2",
"{owncloud_path}occ app:enable rds",
"{owncloud_path}occ oauth2:add-client {oauthname} {client_id} {client_secret} {rds_domain}",
"{owncloud_path}occ rds:set-oauthname {oauthname}",
"{owncloud_path}occ rds:set-url {rds_domain}",
"{owncloud_path}occ rds:create-keys",
]
return commands
def execute_ssh(ssh, cmd):
_, stdout, stderr = ssh.exec_command(cmd)
err = stderr.read()
if err != "":
click.echo(f"Error in ssh command: {err}", err=True)
exit(1)
return stdout
def execute_kubectl(k8s, cmd):
k8s.write_stdin(cmd + "\n")
err = k8s.read_stderr()
if err != "":
click.echo(f"Error in kubectl command: {err}")
exit(1)
return k8s.read_stdout(timeout=3)
def execute_helm(values_file, install=False, dry_run=False):
if install and not dry_run:
click.echo("Preparing helm for sciebo RDS.")
click.echo("Remove installed sciebo rds from k8s if it is already there.")
os.system("helm uninstall sciebo-rds")
click.echo("Remove sciebo RDS from helm repo list.")
os.system("helm repo remove sciebo-rds")
click.echo("Add sciebo RDS in helm repo list again.")
os.system(
"helm repo add sciebo-rds https://www.research-data-services.org/charts/stable"
)
click.echo("Update helm repo list.")
os.system("helm repo up sciebo-rds")
click.echo("Finish preparation.")
click.echo("Installing sciebo RDS via helm.")
cmd = f"helm upgrade -i sciebo-rds sciebo-rds/all --values {values_file}"
if dry_run:
cmd += " --dry-run"
error_code = os.system(cmd)
if error_code > 0:
click.echo("There was an error while installing sciebo RDS via helm.")
else:
click.echo(
"Sciebo RDS is installed now via helm. Check it out via `kubectl get pods`."
)
def execute(
channel, fun, commands, owncloud_host_hostname_command, owncloud_host_config_command
):
for cmd in commands:
click.echo(f"Running command: {cmd}")
fun(channel, cmd)
# via php hostname
owncloud_url = fun(channel, owncloud_host_hostname_command)
# via overwrites from config
for overwrite in fun(channel, owncloud_host_config_command):
# remove comma, because we look at php dict parts
overwrite = overwrite.replace(",", "", 1)
# separate key and value
_, _, val = str(overwrite).partition(":")
owncloud_url = val
return owncloud_url
@click.group()
def cli():
pass
values_file_path = "values.yaml"
config_file_path = "config.yaml"
cert_file_path = "create_certs.sh"
@click.command()
@click.option(
"--self-signed-cert",
"-s",
"self_signed",
is_flag=True,
default=False,
help=f"Creates the script {cert_file_path} for self-signed certificates. Not recommended for production use, but handy for testing.",
)
@click.option(
"--force",
"-f",
"overwrite_values",
is_flag=True,
default=False,
help=f"Overwrites the {values_file_path}, if it already exists. Otherwise exits with statuscode greater then 0.",
)
@click.option(
"--one-file",
"-c",
"single_file",
is_flag=True,
default=False,
help=f"Writes down the needed config stuff in {values_file_path}. Otherwise it creates a separate file {config_file_path}.",
)
def init(self_signed, overwrite_values, single_file):
"""
Initialize needed files for sciebo RDS. Places the files in the current folder.
"""
if self_signed:
click.echo("Self-signed script selected.")
if not os.path.isfile(cert_file_path) or overwrite_values:
if overwrite_values:
click.echo(f"WARN: Overwrites {cert_file_path} if it exists.")
cnt = requests.get(
"https://raw.githubusercontent.com/Sciebo-RDS/Sciebo-RDS/release/getting-started/create_certs.sh.example"
).text
with open(cert_file_path, "w") as f:
f.write(cnt)
click.echo(f"{cert_file_path} created.")
else:
click.echo(
f"{cert_file_path} already in place. Delete it or use -f to overwrite it.",
err=True,
)
cnt = requests.get(
"https://raw.githubusercontent.com/Sciebo-RDS/Sciebo-RDS/release/getting-started/values.yaml.example"
).text
cfg = requests.get(
"https://raw.githubusercontent.com/Sciebo-RDS/Sciebo-RDS-CLI/develop/config.yaml.example"
).text
if (not os.path.isfile(config_file_path) or overwrite_values) and not single_file:
if overwrite_values:
click.echo(f"WARN: Overwrites {config_file_path} if it exists.")
with open(config_file_path, "w") as f:
f.write(cfg)
click.echo(f"{config_file_path} created.")
if not os.path.isfile(values_file_path) or overwrite_values:
if overwrite_values:
click.echo(f"WARN: Overwrites {values_file_path} if it exists.")
if single_file:
click.echo(
f"WARN: Places {config_file_path} content at the top of {values_file_path}."
)
cnt = cfg + "\n\n\n" + cnt
with open(values_file_path, "w") as f:
f.write(cnt)
click.echo(f"{values_file_path} created.")
else:
click.echo(
f"{values_file_path} already in place. Delete it or use -f to overwrite it.",
err=True,
)
if not single_file:
click.echo(
f"Adjust the {values_file_path} and {config_file_path} to your needs with your favourite editor, before you `install` sciebo RDS."
)
else:
click.echo(
f"Adjust the {values_file_path} to your needs with your favourite editor, before you `install` sciebo RDS."
)
@click.command()
@click.option(
"--one-file",
"-c",
"single_file",
is_flag=True,
default=False,
help=f"Writes down the needed config stuff in {values_file_path}. Otherwise it creates a separate file {config_file_path}.",
)
@click.option(
"--helm-sciebords-name",
"-n",
"helm_name",
default="sciebords",
help="Use the given name for helm install process. Defaults to 'sciebords'.",
)
def checks(single_file, helm_name):
"""
Runs several checks if all requirements for sciebo RDS are fulfilled.
"""
error_found = False
if not os.path.isfile(values_file_path):
click.echo(f"values.yaml is not in place: {values_file_path}", err=True)
error_found = True
return
if not single_file and not os.path.isfile(config_file_path):
click.echo(f"config.yaml is not in place: {config_file_path}", err=True)
error_found = True
return
if os.system("kubectl version") > 0:
click.echo("kubectl not found", err=True)
error_found = True
if os.system("helm version") > 0:
click.echo("helm not found", err=True)
error_found = True
if (
os.path.isfile(values_file_path)
and os.system(
f"helm upgrade -i {helm_name} sciebo-rds/all --values {values_file_path} --dry-run"
)
> 0
):
click.echo(f"{values_file_path} not valid. Helm founds error.", err=True)
error_found = True
if not error_found:
click.echo("Everything is fine. You should be good to install sciebo RDS.")
@click.command()
@click.option(
"--dry-run",
"dry_run",
is_flag=True,
default=False,
help="Execute install without any changes. WARNING: It connects to the ownCloud instances and your k8s cluster via SSH and Kubectl to get some informations. Nevertheless it does not change anything.",
)
@click.argument(
"values_file",
default=Path(f"{os.getcwd()}/values.yaml"),
type=click.Path(exists=True),
)
def upgrade(dry_run, values_file):
"""
A wrapper method for convenience to upgrade the sciebo RDS instance with helm. Use this command if you changed something in your values.yaml
"""
execute_helm(values_file, install=False, dry_run=dry_run)
@click.command()
def commands():
"""
Shows all commands, which will be executed to configure the owncloud instances properly.
"""
data = {
"client_id": "${CLIENT_ID}",
"client_secret": "${CLIENT_SECRET}",
"oauthname": "${OAUTHNAME}",
"rds_domain": "${RDS_DOMAIN}",
"owncloud_path": "${OWNCLOUD_PATH}",
}
click.echo(
"""Conditions:
$CLIENT_ID and $CLIENT_SECRET has a length of 64 characters (no special character like [/\.,] allowed).
$OWNCLOUD_PATH is empty "" (occ can be found through $PATH) or set to a folder with trailing slash / e.g. /var/www/owncloud/
$OAUTHNAME is not in use for oauth2 already.
$RDS_DOMAIN points to the sciebo-rds installation root domain.
Remember that you also need the domainname of the owncloud instance to configure the values.yaml, which will be automatically guessed by this script.
ownCloud needs php-gmp for oauth2 plugin. Install it on your own.
"""
)
click.echo("Commands: ")
for cmd in get_commands():
click.echo(cmd.format(**data))
@click.command()
@click.option(
"--only-kubeconfig",
"-k",
"force_kubectl",
is_flag=True,
default=False,
help="Ignore servers object in config.yaml and use the user kubeconfig for a single pod configuration.",
)
@click.option(
"-h",
"--helm-install",
"helm_install",
is_flag=True,
default=False,
help="A convenient parameter. It runs all needed helm commands to install sciebo-rds in your current kubectl context after configuration. Helm upgrades should not use this parameter. Please use `sciebords upgrade` for this.",
)
@click.option(
"-c",
"--config",
"file",
type=click.Path(exists=True),
is_flag=False,
flag_value=Path(f"{os.getcwd()}/config.yaml"),
help="The given path will be used as config.yaml file. If not given, it will use the values.yaml per default as a single-file-configuration otherwise.",
)
@click.argument(
"values_file",
default=Path(f"{os.getcwd()}/values.yaml"),
type=click.Path(exists=True),
)
@click.option(
"--dry-run",
"dry_run",
is_flag=True,
default=False,
help="Execute install without any changes. WARNING: It connects to the ownCloud instances and your k8s cluster via SSH and Kubectl to get some informations. Nevertheless it does not change anything.",
)
def install(force_kubectl, helm_install, values_file, file, dry_run):
"""
Use defined interfaces in given VALUES_FILE to get all needed informations from ownCloud installations and prepare the VALUES_FILE to install sciebo RDS.
VALUES_FILE defaults to ./values.yaml. Take a look at --config to specify a different file for interface configuration.
Primarily it sets up all needed plugins in ownCloud, gets everything in place and writes down the domains object in the values.yaml file, which will be used later to install sciebo RDS.
"""
config_file = None
values = None
config = None
try:
with open(values_file, "r") as f:
try:
values = yaml.safe_load(f)
except yaml.YAMLError as exc:
click.echo(f"Error in values.yaml: {exc}", err=True)
exit(1)
except OSError as exc:
click.echo(f"Missing file: {values_file}", err=True)
exit(1)
if config_file is None:
config = values
else:
try:
with open(config_file, "r") as f:
try:
config = yaml.safe_load(f)
except yaml.YAMLError as exc:
click.echo(f"Error in config.yaml: {exc}", err=True)
exit(1)
except OSError as exc:
click.echo(f"Missing file: {config_file}", err=True)
exit(1)
owncloud_path_global = config.get("owncloud_path", "")
if force_kubectl:
try:
config["servers"] = [{"selector": config["k8sselector"]}]
except KeyError as exc:
click.echo(
"Missing `k8sselector` field in config. --only-kubeconfig needs this field.",
err=True,
)
exit(1)
click.echo("use kubeconfig only")
servers = config.get("servers", [])
if len(servers) == 0:
click.echo("No servers were found.")
exit(1)
for val in servers:
key_filename = val.get("private_key")
if key_filename is not None:
key_filename = key_filename.replace("{$HOME}", os.environ["HOME"])
client_id, client_secret = (random(), random())
oauthname = config.get("oauthname", "sciebo-rds")
rds_domain = config["rds"]
owncloud_path = val.get("owncloud_path", owncloud_path_global)
if owncloud_path != "" and not str(owncloud_path).endswith("/"):
owncloud_path += "/"
data = {
"client_id": client_id,
"client_secret": client_secret,
"oauthname": oauthname,
"rds_domain": rds_domain,
"owncloud_path": owncloud_path,
}
commands = [cmd.format(**data) for cmd in get_commands()]
owncloud_host_hostname_command = 'php -r "echo gethostname();"'
owncloud_host_config_command = (
f'{owncloud_path}occ config:list | grep "overwritehost\|overwrite.cli.url"'
)
owncloud_url = ""
if "address" in val:
ssh = paramiko.client.SSHClient()
ssh.load_system_host_keys()
ssh.connect(
val["address"],
username=val.get("user"),
password=val.get("password"),
key_filename=key_filename,
)
if dry_run:
click.echo(
"SSH can connect to ownCloud server: {}".format(val["address"])
)
continue
owncloud_url = execute(
ssh,
execute_ssh,
commands,
owncloud_host_hostname_command,
owncloud_host_config_command,
)
ssh.close()
elif "namespace" in val:
context = val.get("context", config.get("k8scontext"))
selector = val.get("selector", config.get("k8sselector"))
containername = val.get("containername", config.get("k8scontainername"))
kubernetes.config.load_kube_config(context=context)
namespace = val.get(
"namespace",
config.get(
"k8snamespace",
kubernetes.config.list_kube_config_contexts()[1]["context"][
"namespace"
],
),
)
api = kubernetes.client.CoreV1Api()
pods = api.list_namespaced_pod(
namespace=namespace,
label_selector=selector,
field_selector="status.phase=Running",
)
k8s = None
for pod in pods.items:
k8s = kubernetes.stream.stream(
api.connect_get_namespaced_pod_exec,
pod.metadata.name,
namespace,
container=containername,
command="/bin/bash",
stderr=True,
stdin=True,
stdout=True,
tty=False,
_preload_content=False,
)
if k8s.is_open():
continue
if k8s is None or not k8s.is_open():
click.echo(f"No connection via kubectl possible: {val}")
exit(1)
click.echo(
f"kubectl initialized: Connected to pod {pod.metadata.name}, container {containername} in namespace {namespace}"
)
if dry_run:
click.echo(
"kubectl can connect to ownCloud label: {}, container: {}".format(
selector, containername
)
)
continue
owncloud_url = execute(
k8s,
execute_kubectl,
commands,
owncloud_host_hostname_command,
owncloud_host_config_command,
)
k8s.close()
else:
click.echo(
f"Skipped: Server was not valid to work with: {val}\nIt needs to be an object with `address` for ssh or `namespace` for kubectl"
)
continue
if not owncloud_url:
click.echo(
f"owncloud domain cannot be found automatically for {val}. Enter the correct domain without protocol. If port needed, add it too.\nExample: sciebords.uni-muenster.de, localhost:8000"
)
value = ""
while not value:
value = input(f"Address: ")
if value:
owncloud_url = value
else:
exit(1)
domain = {
"name": val["name"],
"ADDRESS": owncloud_url,
"OAUTH_CLIENT_ID": client_id,
"OAUTH_CLIENT_SECRET": client_secret,
}
values["global"]["domains"].append(domain)
if not dry_run:
with open(values_file, "w") as yaml_file:
yaml.dump(values, yaml_file, default_flow_style=False)
if helm_install:
execute_helm(values_file, install=True, dry_run=dry_run)
cli.add_command(commands, "get-commands")
cli.add_command(init, "init")
cli.add_command(install, "install")
cli.add_command(checks, "checks")
cli.add_command(upgrade, "upgrade")
if __name__ == "__main__":
cli()
| 0.387111 | 0.123974 |
# scieconlib
This is a machine learning toolkit to game theory or econometrics analysis.
## Dev environment setup
In your virtual environment, run
```shell
python3 -m pip install -r requirements.txt
```
To build the project, run
```shell
make clean && make start
```
## Basic usage
### Installation
```sheel
python3 -m pip install scieconlib
```
### Example
```python
import scieconlib.gametheory.multi_armed_bandit as bandit
import scieconlib
print('version: ', scieconlib.__version__)
# create actions
action_1 = bandit.Action.from_array([1, 2, 3, 4, 5])
action_2 = bandit.Action.from_array([2, 4, 5, 4, 8])
action_3 = bandit.Action.from_array([0, 1, 2, 1, 3])
# create agent and add actions
agent = bandit.Agent()
agent.add_action(action_1, verbose=1)
agent.add_action(action_2, verbose=1)
agent.add_action(action_3, verbose=1)
# setup the model
model = bandit.Model(
agent=agent,
agent_num=10,
epsilon=0.1,
epochs=500
)
# train the model
model.train()
# draw the result
model.history()
```
|
scieconlib
|
/scieconlib-0.0.5.tar.gz/scieconlib-0.0.5/README.md
|
README.md
|
python3 -m pip install -r requirements.txt
make clean && make start
python3 -m pip install scieconlib
import scieconlib.gametheory.multi_armed_bandit as bandit
import scieconlib
print('version: ', scieconlib.__version__)
# create actions
action_1 = bandit.Action.from_array([1, 2, 3, 4, 5])
action_2 = bandit.Action.from_array([2, 4, 5, 4, 8])
action_3 = bandit.Action.from_array([0, 1, 2, 1, 3])
# create agent and add actions
agent = bandit.Agent()
agent.add_action(action_1, verbose=1)
agent.add_action(action_2, verbose=1)
agent.add_action(action_3, verbose=1)
# setup the model
model = bandit.Model(
agent=agent,
agent_num=10,
epsilon=0.1,
epochs=500
)
# train the model
model.train()
# draw the result
model.history()
| 0.375592 | 0.770637 |
# Clea
This project is an XML front matter metadata reader for documents
that *almost* follows the [SciELO Publishing Schema],
extracting and sanitizing the values regarding the affiliations.
## Installation
One can install Clea with either:
```
pip install scielo-clea # Minimal
pip install scielo-clea[cli] # Clea with CLI (recommended)
pip install scielo-clea[server] # Clea with the testing/example server
pip install scielo-clea[all] # Clea with both CLI and the server
```
Actually all these commands installs everything,
only the dependencies aren't the same.
The first is an installation with minimal requirements,
intended for use within Python, as an imported package.
## Running the command line interface
The CLI is a way to use Clea as an article XML to JSONL converter
(one JSON output line for each XML input):
```
clea -o output.jsonl article1.xml article2.xml article3.xml
```
The same can be done with ``python -m clea`` instead of ``clea``.
The output is the standard output stream.
See ``clea --help`` for more information.
## Running the testing server
You can run the development server using the flask CLI.
For example, for listening at 8080 from every host:
```
FLASK_APP=clea.server flask run -h 0.0.0.0 -p 8080
```
In a production server with 4 worker processes for handling requests,
you can, for example:
- Install gunicorn (it's not a dependency)
- Run `gunicorn -b 0.0.0.0:8080 -w 4 clea.server:app`
## Clea as a library
A simple example to see all the extracted data is:
```python
from clea import Article
from pprint import pprint
art = Article("some_file.xml")
pprint(art.data_full)
```
That's a dictionary of lists with all the "raw" extracted data.
The keys of that dictionary can be directly accessed,
so one can avoid extracting everything from the XML
by getting just the specific items/attributes
(e.g. `art["journal_meta"][0].data_full`
or `art.journal_meta[0].data_full`
instead of `art.data_full["journal_meta"][0]`).
These items/attributes are always lists, for example:
* `art["aff"]`: List of `clea.core.Branch` instances
* `art["sub_article"]`: List of `clea.core.SubArticle` instances
* `art["contrib"][0]["contrib_name"]`: List of strings
Where the `art["contrib"][0]` is a `Branch` instance,
and all such instances behave in the same way
(there's no nested branches).
That can be seen as another way to navigate in the former dictionary,
the last example should return the same list one would get with
`art.data_full["contrib"][0]["contrib_name"]`,
but without extracting everything else
that appears in the `art.data_full` dictionary.
More simple stuff that can be done:
```python
len(art.aff) # Number of <aff> entries
len(art.sub_article) # Number of <sub-article>
art.contrib[0].data_full # Data from the first contributor as a dict
# Something like {"type": ["translation"], "lang": ["en"]},
# the content from <sub-article> attributes
art["sub_article"][0]["article"][0].data_full
# A string with the article title, accessing just the desired content
art["article_meta"][0]["article_title"][0]
```
All `SubArticle`, `Article` and `Branch` instances
have the `data_full` property and the `get` method,
the latter being internally used for item/attribute getting.
Their behavior is:
* `Branch.get` always returns a list of strings
* `Article.get("sub_article")` returns a list of `SubArticle`
* `Article.get(...)` returns a list of `Branch`
* `SubArticle` behaves like `Article`
The extracted information is not exhaustive!
Its result should not be seen as a replacement of the raw XML.
One of the goals of this library was
to help on creating a tabular data from a given XML
with as many rows as required
to have a pair of a matching `<aff>` and `<contrib>` in each row.
These are the `Article` methods/properties that does that matching:
* `art.aff_contrib_inner_gen()`
* `art.aff_contrib_full_gen()`
* `art.aff_contrib_inner`
* `art.aff_contrib_full`
* `art.aff_contrib_inner_indices`
* `art.aff_contrib_full_indices`
The most useful ones are probably the last ones,
which return a list of pairs (tuples) of indices (ints),
so one can use a `(ai, ci)` result
to access the `(art.aff[ai], art.contrib[ci])` pair,
unless the index is `-1` (not found).
The ones with the `_gen` suffix are generator functions
that yields a tuple with two `Branch` entries (or `None`),
the ones without a suffix return a list of merged dictionaries
in an almost tabular format (dictionary of lists of strings).
Each list regarding these elements for these specific elements
should usually have at most one string,
but that's not always the case even for these specific elements,
then one should be careful when using the `data` property.
The `inner` and `full` in the names
regards to `INNER JOIN` and `FULL OUTER JOIN` from SQL,
meaning the unmatched elements
(all `<aff>` and `<contrib>` unreferred nodes)
are discarded in the former strategy,
whereas they're forcefully matched with `None` in the latter.
To print all the extracted data from a XML
including the indices of matching `<aff>` and `<contrib>` pairs
performed in the `FULL OUTER JOIN` sense,
similar to the test server response:
```python
pprint({
**article.data_full,
"aff_contrib_pairs": article.aff_contrib_full_indices,
})
```
[SciELO Publishing Schema]: http://docs.scielo.org/projects/scielo-publishing-schema
|
scielo-clea
|
/scielo-clea-0.4.4.tar.gz/scielo-clea-0.4.4/README.md
|
README.md
|
pip install scielo-clea # Minimal
pip install scielo-clea[cli] # Clea with CLI (recommended)
pip install scielo-clea[server] # Clea with the testing/example server
pip install scielo-clea[all] # Clea with both CLI and the server
clea -o output.jsonl article1.xml article2.xml article3.xml
FLASK_APP=clea.server flask run -h 0.0.0.0 -p 8080
from clea import Article
from pprint import pprint
art = Article("some_file.xml")
pprint(art.data_full)
len(art.aff) # Number of <aff> entries
len(art.sub_article) # Number of <sub-article>
art.contrib[0].data_full # Data from the first contributor as a dict
# Something like {"type": ["translation"], "lang": ["en"]},
# the content from <sub-article> attributes
art["sub_article"][0]["article"][0].data_full
# A string with the article title, accessing just the desired content
art["article_meta"][0]["article_title"][0]
pprint({
**article.data_full,
"aff_contrib_pairs": article.aff_contrib_full_indices,
})
| 0.427038 | 0.92597 |
from .misc import get_lev
def aff_contrib_inner_gen(article):
"""Generator of matching <aff> and <contrib> of an article
as pairs of Branch instances,
using a strategy based on SQL's INNER JOIN."""
affs_ids = [get_lev(aff.node, "id") for aff in article.aff]
contrib_rids = [[get_lev(xref, "rid")
for xref in contrib.get_field_nodes("xref_aff")]
for contrib in article.contrib]
for aff_id, aff in zip(affs_ids, article.aff):
for rid_list, contrib in zip(contrib_rids, article.contrib):
for rid in rid_list:
if rid == aff_id:
yield aff, contrib
def aff_contrib_full_gen(article):
"""Generator of matching <aff> and <contrib> of an article
as pairs of Branch instances,
using a strategy based on SQL's FULL OUTER JOIN."""
affs_ids = [get_lev(aff.node, "id") for aff in article.aff]
contrib_rids = [[get_lev(xref, "rid")
for xref in contrib.get_field_nodes("xref_aff")]
for contrib in article.contrib]
contrib_missing = set(range(len(article.contrib)))
for aff_id, aff in zip(affs_ids, article.aff):
amiss = True
for cidx, (rid_list, contrib) in enumerate(zip(contrib_rids,
article.contrib)):
for rid in rid_list:
if rid == aff_id:
yield aff, contrib
amiss = False
contrib_missing.discard(cidx)
if amiss:
yield aff, None
for cidx in sorted(contrib_missing):
yield None, article.contrib[cidx]
def aff_contrib_inner(article):
"""Inner join list of matching <aff> and <contrib> entries."""
return [{**aff.data_full, **contrib.data_full}
for aff, contrib in aff_contrib_inner_gen(article)]
def aff_contrib_full(article):
"""Full outer join list of matching <aff> and <contrib> entries."""
return [{**(aff.data_full if aff else {}),
**(contrib.data_full if contrib else {}),
} for aff, contrib in aff_contrib_full_gen(article)]
def aff_contrib_inner_indices(article):
"""List of ``(ia, ic)`` tuples of indices for all matching
``(article["aff"][ia], article["contrib"][ic])`` pairs,
using a strategy based on SQL's INNER JOIN.
"""
affs = [None] + article["aff"]
contribs = [None] + article["contrib"]
return [(affs.index(aff) - 1, contribs.index(contrib) - 1)
for aff, contrib in aff_contrib_inner_gen(article)]
def aff_contrib_full_indices(article):
"""List of ``(ia, ic)`` tuples of indices for all matching
``(article["aff"][ia], article["contrib"][ic])`` pairs,
using a strategy based on SQL's FULL OUTER JOIN.
"""
affs = [None] + article["aff"]
contribs = [None] + article["contrib"]
return [(affs.index(aff) - 1, contribs.index(contrib) - 1)
for aff, contrib in aff_contrib_full_gen(article)]
|
scielo-clea
|
/scielo-clea-0.4.4.tar.gz/scielo-clea-0.4.4/clea/join.py
|
join.py
|
from .misc import get_lev
def aff_contrib_inner_gen(article):
"""Generator of matching <aff> and <contrib> of an article
as pairs of Branch instances,
using a strategy based on SQL's INNER JOIN."""
affs_ids = [get_lev(aff.node, "id") for aff in article.aff]
contrib_rids = [[get_lev(xref, "rid")
for xref in contrib.get_field_nodes("xref_aff")]
for contrib in article.contrib]
for aff_id, aff in zip(affs_ids, article.aff):
for rid_list, contrib in zip(contrib_rids, article.contrib):
for rid in rid_list:
if rid == aff_id:
yield aff, contrib
def aff_contrib_full_gen(article):
"""Generator of matching <aff> and <contrib> of an article
as pairs of Branch instances,
using a strategy based on SQL's FULL OUTER JOIN."""
affs_ids = [get_lev(aff.node, "id") for aff in article.aff]
contrib_rids = [[get_lev(xref, "rid")
for xref in contrib.get_field_nodes("xref_aff")]
for contrib in article.contrib]
contrib_missing = set(range(len(article.contrib)))
for aff_id, aff in zip(affs_ids, article.aff):
amiss = True
for cidx, (rid_list, contrib) in enumerate(zip(contrib_rids,
article.contrib)):
for rid in rid_list:
if rid == aff_id:
yield aff, contrib
amiss = False
contrib_missing.discard(cidx)
if amiss:
yield aff, None
for cidx in sorted(contrib_missing):
yield None, article.contrib[cidx]
def aff_contrib_inner(article):
"""Inner join list of matching <aff> and <contrib> entries."""
return [{**aff.data_full, **contrib.data_full}
for aff, contrib in aff_contrib_inner_gen(article)]
def aff_contrib_full(article):
"""Full outer join list of matching <aff> and <contrib> entries."""
return [{**(aff.data_full if aff else {}),
**(contrib.data_full if contrib else {}),
} for aff, contrib in aff_contrib_full_gen(article)]
def aff_contrib_inner_indices(article):
"""List of ``(ia, ic)`` tuples of indices for all matching
``(article["aff"][ia], article["contrib"][ic])`` pairs,
using a strategy based on SQL's INNER JOIN.
"""
affs = [None] + article["aff"]
contribs = [None] + article["contrib"]
return [(affs.index(aff) - 1, contribs.index(contrib) - 1)
for aff, contrib in aff_contrib_inner_gen(article)]
def aff_contrib_full_indices(article):
"""List of ``(ia, ic)`` tuples of indices for all matching
``(article["aff"][ia], article["contrib"][ic])`` pairs,
using a strategy based on SQL's FULL OUTER JOIN.
"""
affs = [None] + article["aff"]
contribs = [None] + article["contrib"]
return [(affs.index(aff) - 1, contribs.index(contrib) - 1)
for aff, contrib in aff_contrib_full_gen(article)]
| 0.637934 | 0.391813 |
from contextlib import contextmanager
import html
from lxml import etree
from unidecode import unidecode
import numpy as np
import regex
from .cache import CachedMethod, CachedProperty
from . import join
from .misc import get_lev
from .regexes import TAG_PATH_REGEXES, SUB_ARTICLE_NAME, get_branch_dicts
_PARSER = etree.XMLParser(recover=True)
_DOCTYPE = '<!DOCTYPE article PUBLIC "" "http://">\n' # Force Entity objects
class InvalidInput(Exception):
pass
def etree_tag_path_gen(root, start=""):
"""Extract the tag path."""
start += "/" + root.tag
yield start, root
for node in root.iterchildren(tag=etree.Element):
yield from etree_tag_path_gen(node, start)
def etree_path_gen(branch, path=""):
"""Extract the branch path."""
path += "/" + branch.tag
for k, v in sorted(branch.items()):
path += f"@{xml_attr_cleanup(k)}={xml_attr_cleanup(v)}"
yield path, branch
for node in branch.iterchildren(tag=etree.Element):
yield from etree_path_gen(node, path)
def xml_attr_cleanup(name):
"""Clean the given XML attribute name/value.
This just removes what's required in order to build a branch path.
"""
return regex.sub("[/@]", "%", unidecode(name))
def node_getattr(node, attr=""):
"""Item getter from an Element node of an ElementTree.
Returns the decoded inner text string form the node,
unless an attribute name is given.
"""
if node is None:
return ""
if attr:
return get_lev(node, attr)
full_text = etree.tostring(node,
encoding=str,
method="text",
with_tail=False,
)
return regex.sub(r"\s+", " ", full_text).strip()
@contextmanager
def open_or_bypass(fileobj_or_filename, mode="r"):
if isinstance(fileobj_or_filename, str):
with open(fileobj_or_filename, mode) as result:
yield result
else:
yield fileobj_or_filename
def replace_html_entity_by_text(entity):
value = html.unescape(entity.text) + (entity.tail or "")
previous = entity.getprevious()
parent = entity.getparent()
parent.remove(entity)
if previous is not None:
if previous.tail is None:
previous.tail = value
else:
previous.tail += value
else:
if parent.text is None:
parent.text = value
else:
parent.text += value
class Article(object):
"""Article abstraction from its XML file."""
def __init__(self, xml_file, raise_on_invalid=True):
with open_or_bypass(xml_file) as fobj:
raw_data = fobj.read()
if isinstance(raw_data, bytes):
raw_data = raw_data.decode("utf-8")
try: # Remove <?xml> and <!DOCTYPE> headers
document = regex.search("<[^?!](?:.|\n)*$", raw_data,
flags=regex.MULTILINE).group()
except AttributeError:
document = raw_data
self.root = etree.fromstring(_DOCTYPE + document, parser=_PARSER)
if self.root is None:
if raise_on_invalid:
raise InvalidInput("Not an XML file")
self.root = etree.Element("article")
# There should be no entity at all,
# but if there's any (legacy), they are the HTML5 ones
for entity in self.root.iterdescendants(tag=etree.Entity):
replace_html_entity_by_text(entity)
@CachedProperty
def tag_paths_pairs(self):
return list(etree_tag_path_gen(self.root))
@CachedMethod
def get(self, tag_name):
tag_regex = TAG_PATH_REGEXES[tag_name]
if tag_name == SUB_ARTICLE_NAME:
return [SubArticle(parent=self, root=el, tag_name=tag_name)
for path, el in self.tag_paths_pairs
if tag_regex.search(path)]
return [Branch(article=self, node=el, tag_name=tag_name)
for path, el in self.tag_paths_pairs
if tag_regex.search(path)]
@CachedProperty
def data_full(self):
return {tag_name: [branch.data_full for branch in self.get(tag_name)]
for tag_name in TAG_PATH_REGEXES}
__getitem__ = __getattr__ = lambda self, name: self.get(name)
aff_contrib_inner_gen = join.aff_contrib_inner_gen
aff_contrib_full_gen = join.aff_contrib_full_gen
aff_contrib_inner = CachedProperty(join.aff_contrib_inner)
aff_contrib_full = CachedProperty(join.aff_contrib_full)
aff_contrib_inner_indices = CachedProperty(join.aff_contrib_inner_indices)
aff_contrib_full_indices = CachedProperty(join.aff_contrib_full_indices)
class SubArticle(Article):
def __init__(self, parent, root, tag_name):
self.parent = parent # Should be the <article> (main XML root)
self.root = root # The <sub-article> element
self.tag_name = tag_name
class Branch(object):
def __init__(self, article, node, tag_name):
self.article = article
self.node = node # Branch "root" element
self.tag_name = tag_name
self.field_regexes, self.field_attrs = get_branch_dicts(tag_name)
@CachedProperty
def paths_pairs(self):
return list(etree_path_gen(self.node))
@CachedProperty
def _paths_nodes_pair(self):
return tuple(zip(*self.paths_pairs))
@CachedProperty
def paths(self):
return self._paths_nodes_pair[0]
@CachedProperty
def nodes(self):
return self._paths_nodes_pair[1]
@CachedProperty
def paths_str(self):
return "\n".join(self.paths)
@CachedProperty
def ends(self):
return np.cumsum([len(p) + 1 for p in self.paths]) # Add \n
@CachedProperty
def data_full(self):
return {key: self.get(key) for key in self.field_regexes}
@CachedMethod
def get_field_nodes(self, field):
field_regex = self.field_regexes[field]
matches = field_regex.finditer(self.paths_str)
return [self.nodes[np.where(self.ends > m.start())[0][0]]
for m in matches]
@CachedMethod
def get(self, field):
attr = self.field_attrs[field]
nodes = self.get_field_nodes(field)
return [node_getattr(node, attr) for node in nodes]
__getitem__ = __getattr__ = lambda self, name: self.get(name)
|
scielo-clea
|
/scielo-clea-0.4.4.tar.gz/scielo-clea-0.4.4/clea/core.py
|
core.py
|
from contextlib import contextmanager
import html
from lxml import etree
from unidecode import unidecode
import numpy as np
import regex
from .cache import CachedMethod, CachedProperty
from . import join
from .misc import get_lev
from .regexes import TAG_PATH_REGEXES, SUB_ARTICLE_NAME, get_branch_dicts
_PARSER = etree.XMLParser(recover=True)
_DOCTYPE = '<!DOCTYPE article PUBLIC "" "http://">\n' # Force Entity objects
class InvalidInput(Exception):
pass
def etree_tag_path_gen(root, start=""):
"""Extract the tag path."""
start += "/" + root.tag
yield start, root
for node in root.iterchildren(tag=etree.Element):
yield from etree_tag_path_gen(node, start)
def etree_path_gen(branch, path=""):
"""Extract the branch path."""
path += "/" + branch.tag
for k, v in sorted(branch.items()):
path += f"@{xml_attr_cleanup(k)}={xml_attr_cleanup(v)}"
yield path, branch
for node in branch.iterchildren(tag=etree.Element):
yield from etree_path_gen(node, path)
def xml_attr_cleanup(name):
"""Clean the given XML attribute name/value.
This just removes what's required in order to build a branch path.
"""
return regex.sub("[/@]", "%", unidecode(name))
def node_getattr(node, attr=""):
"""Item getter from an Element node of an ElementTree.
Returns the decoded inner text string form the node,
unless an attribute name is given.
"""
if node is None:
return ""
if attr:
return get_lev(node, attr)
full_text = etree.tostring(node,
encoding=str,
method="text",
with_tail=False,
)
return regex.sub(r"\s+", " ", full_text).strip()
@contextmanager
def open_or_bypass(fileobj_or_filename, mode="r"):
if isinstance(fileobj_or_filename, str):
with open(fileobj_or_filename, mode) as result:
yield result
else:
yield fileobj_or_filename
def replace_html_entity_by_text(entity):
value = html.unescape(entity.text) + (entity.tail or "")
previous = entity.getprevious()
parent = entity.getparent()
parent.remove(entity)
if previous is not None:
if previous.tail is None:
previous.tail = value
else:
previous.tail += value
else:
if parent.text is None:
parent.text = value
else:
parent.text += value
class Article(object):
"""Article abstraction from its XML file."""
def __init__(self, xml_file, raise_on_invalid=True):
with open_or_bypass(xml_file) as fobj:
raw_data = fobj.read()
if isinstance(raw_data, bytes):
raw_data = raw_data.decode("utf-8")
try: # Remove <?xml> and <!DOCTYPE> headers
document = regex.search("<[^?!](?:.|\n)*$", raw_data,
flags=regex.MULTILINE).group()
except AttributeError:
document = raw_data
self.root = etree.fromstring(_DOCTYPE + document, parser=_PARSER)
if self.root is None:
if raise_on_invalid:
raise InvalidInput("Not an XML file")
self.root = etree.Element("article")
# There should be no entity at all,
# but if there's any (legacy), they are the HTML5 ones
for entity in self.root.iterdescendants(tag=etree.Entity):
replace_html_entity_by_text(entity)
@CachedProperty
def tag_paths_pairs(self):
return list(etree_tag_path_gen(self.root))
@CachedMethod
def get(self, tag_name):
tag_regex = TAG_PATH_REGEXES[tag_name]
if tag_name == SUB_ARTICLE_NAME:
return [SubArticle(parent=self, root=el, tag_name=tag_name)
for path, el in self.tag_paths_pairs
if tag_regex.search(path)]
return [Branch(article=self, node=el, tag_name=tag_name)
for path, el in self.tag_paths_pairs
if tag_regex.search(path)]
@CachedProperty
def data_full(self):
return {tag_name: [branch.data_full for branch in self.get(tag_name)]
for tag_name in TAG_PATH_REGEXES}
__getitem__ = __getattr__ = lambda self, name: self.get(name)
aff_contrib_inner_gen = join.aff_contrib_inner_gen
aff_contrib_full_gen = join.aff_contrib_full_gen
aff_contrib_inner = CachedProperty(join.aff_contrib_inner)
aff_contrib_full = CachedProperty(join.aff_contrib_full)
aff_contrib_inner_indices = CachedProperty(join.aff_contrib_inner_indices)
aff_contrib_full_indices = CachedProperty(join.aff_contrib_full_indices)
class SubArticle(Article):
def __init__(self, parent, root, tag_name):
self.parent = parent # Should be the <article> (main XML root)
self.root = root # The <sub-article> element
self.tag_name = tag_name
class Branch(object):
def __init__(self, article, node, tag_name):
self.article = article
self.node = node # Branch "root" element
self.tag_name = tag_name
self.field_regexes, self.field_attrs = get_branch_dicts(tag_name)
@CachedProperty
def paths_pairs(self):
return list(etree_path_gen(self.node))
@CachedProperty
def _paths_nodes_pair(self):
return tuple(zip(*self.paths_pairs))
@CachedProperty
def paths(self):
return self._paths_nodes_pair[0]
@CachedProperty
def nodes(self):
return self._paths_nodes_pair[1]
@CachedProperty
def paths_str(self):
return "\n".join(self.paths)
@CachedProperty
def ends(self):
return np.cumsum([len(p) + 1 for p in self.paths]) # Add \n
@CachedProperty
def data_full(self):
return {key: self.get(key) for key in self.field_regexes}
@CachedMethod
def get_field_nodes(self, field):
field_regex = self.field_regexes[field]
matches = field_regex.finditer(self.paths_str)
return [self.nodes[np.where(self.ends > m.start())[0][0]]
for m in matches]
@CachedMethod
def get(self, field):
attr = self.field_attrs[field]
nodes = self.get_field_nodes(field)
return [node_getattr(node, attr) for node in nodes]
__getitem__ = __getattr__ = lambda self, name: self.get(name)
| 0.669205 | 0.131118 |
from django.utils.translation import ugettext as _
from django.db import models
# tell south how to instrospect this field
try:
from south.modelsinspector import add_introspection_rules
except ImportError:
pass
else:
add_introspection_rules([],
["^scielo_extensions\.modelfields\.CountryField"])
COUNTRIES = [
('AD', _('Andorra')),
('AE', _('United Arab Emirates')),
('AF', _('Afghanistan')),
('AG', _('Antigua & Barbuda')),
('AI', _('Anguilla')),
('AL', _('Albania')),
('AM', _('Armenia')),
('AN', _('Netherlands Antilles')),
('AO', _('Angola')),
('AQ', _('Antarctica')),
('AR', _('Argentina')),
('AS', _('American Samoa')),
('AT', _('Austria')),
('AU', _('Australia')),
('AW', _('Aruba')),
('AZ', _('Azerbaijan')),
('BA', _('Bosnia and Herzegovina')),
('BB', _('Barbados')),
('BD', _('Bangladesh')),
('BE', _('Belgium')),
('BF', _('Burkina Faso')),
('BG', _('Bulgaria')),
('BH', _('Bahrain')),
('BI', _('Burundi')),
('BJ', _('Benin')),
('BM', _('Bermuda')),
('BN', _('Brunei Darussalam')),
('BO', _('Bolivia')),
('BR', _('Brazil')),
('BS', _('Bahama')),
('BT', _('Bhutan')),
('BV', _('Bouvet Island')),
('BW', _('Botswana')),
('BY', _('Belarus')),
('BZ', _('Belize')),
('CA', _('Canada')),
('CC', _('Cocos (Keeling) Islands')),
('CF', _('Central African Republic')),
('CG', _('Congo')),
('CH', _('Switzerland')),
('CI', _('Ivory Coast')),
('CK', _('Cook Iislands')),
('CL', _('Chile')),
('CM', _('Cameroon')),
('CN', _('China')),
('CO', _('Colombia')),
('CR', _('Costa Rica')),
('CU', _('Cuba')),
('CV', _('Cape Verde')),
('CX', _('Christmas Island')),
('CY', _('Cyprus')),
('CZ', _('Czech Republic')),
('DE', _('Germany')),
('DJ', _('Djibouti')),
('DK', _('Denmark')),
('DM', _('Dominica')),
('DO', _('Dominican Republic')),
('DZ', _('Algeria')),
('EC', _('Ecuador')),
('EE', _('Estonia')),
('EG', _('Egypt')),
('EH', _('Western Sahara')),
('ER', _('Eritrea')),
('ES', _('Spain')),
('ET', _('Ethiopia')),
('FI', _('Finland')),
('FJ', _('Fiji')),
('FK', _('Falkland Islands (Malvinas)')),
('FM', _('Micronesia')),
('FO', _('Faroe Islands')),
('FR', _('France')),
('FX', _('France, Metropolitan')),
('GA', _('Gabon')),
('GB', _('United Kingdom (Great Britain)')),
('GD', _('Grenada')),
('GE', _('Georgia')),
('GF', _('French Guiana')),
('GH', _('Ghana')),
('GI', _('Gibraltar')),
('GL', _('Greenland')),
('GM', _('Gambia')),
('GN', _('Guinea')),
('GP', _('Guadeloupe')),
('GQ', _('Equatorial Guinea')),
('GR', _('Greece')),
('GS', _('South Georgia and the South Sandwich Islands')),
('GT', _('Guatemala')),
('GU', _('Guam')),
('GW', _('Guinea-Bissau')),
('GY', _('Guyana')),
('HK', _('Hong Kong')),
('HM', _('Heard & McDonald Islands')),
('HN', _('Honduras')),
('HR', _('Croatia')),
('HT', _('Haiti')),
('HU', _('Hungary')),
('ID', _('Indonesia')),
('IE', _('Ireland')),
('IL', _('Israel')),
('IN', _('India')),
('IO', _('British Indian Ocean Territory')),
('IQ', _('Iraq')),
('IR', _('Islamic Republic of Iran')),
('IS', _('Iceland')),
('IT', _('Italy')),
('JM', _('Jamaica')),
('JO', _('Jordan')),
('JP', _('Japan')),
('KE', _('Kenya')),
('KG', _('Kyrgyzstan')),
('KH', _('Cambodia')),
('KI', _('Kiribati')),
('KM', _('Comoros')),
('KN', _('St. Kitts and Nevis')),
('KP', _('Korea, Democratic People\'s Republic of')),
('KR', _('Korea, Republic of')),
('KW', _('Kuwait')),
('KY', _('Cayman Islands')),
('KZ', _('Kazakhstan')),
('LA', _('Lao People\'s Democratic Republic')),
('LB', _('Lebanon')),
('LC', _('Saint Lucia')),
('LI', _('Liechtenstein')),
('LK', _('Sri Lanka')),
('LR', _('Liberia')),
('LS', _('Lesotho')),
('LT', _('Lithuania')),
('LU', _('Luxembourg')),
('LV', _('Latvia')),
('LY', _('Libyan Arab Jamahiriya')),
('MA', _('Morocco')),
('MC', _('Monaco')),
('MD', _('Moldova, Republic of')),
('MG', _('Madagascar')),
('MH', _('Marshall Islands')),
('ML', _('Mali')),
('MN', _('Mongolia')),
('MM', _('Myanmar')),
('MO', _('Macau')),
('MP', _('Northern Mariana Islands')),
('MQ', _('Martinique')),
('MR', _('Mauritania')),
('MS', _('Monserrat')),
('MT', _('Malta')),
('MU', _('Mauritius')),
('MV', _('Maldives')),
('MW', _('Malawi')),
('MX', _('Mexico')),
('MY', _('Malaysia')),
('MZ', _('Mozambique')),
('NA', _('Namibia')),
('NC', _('New Caledonia')),
('NE', _('Niger')),
('NF', _('Norfolk Island')),
('NG', _('Nigeria')),
('NI', _('Nicaragua')),
('NL', _('Netherlands')),
('NO', _('Norway')),
('NP', _('Nepal')),
('NR', _('Nauru')),
('NU', _('Niue')),
('NZ', _('New Zealand')),
('OM', _('Oman')),
('PA', _('Panama')),
('PE', _('Peru')),
('PF', _('French Polynesia')),
('PG', _('Papua New Guinea')),
('PH', _('Philippines')),
('PK', _('Pakistan')),
('PL', _('Poland')),
('PM', _('St. Pierre & Miquelon')),
('PN', _('Pitcairn')),
('PR', _('Puerto Rico')),
('PT', _('Portugal')),
('PW', _('Palau')),
('PY', _('Paraguay')),
('QA', _('Qatar')),
('RE', _('Reunion')),
('RO', _('Romania')),
('RU', _('Russian Federation')),
('RW', _('Rwanda')),
('SA', _('Saudi Arabia')),
('SB', _('Solomon Islands')),
('SC', _('Seychelles')),
('SD', _('Sudan')),
('SE', _('Sweden')),
('SG', _('Singapore')),
('SH', _('St. Helena')),
('SI', _('Slovenia')),
('SJ', _('Svalbard & Jan Mayen Islands')),
('SK', _('Slovakia')),
('SL', _('Sierra Leone')),
('SM', _('San Marino')),
('SN', _('Senegal')),
('SO', _('Somalia')),
('SR', _('Suriname')),
('ST', _('Sao Tome & Principe')),
('SV', _('El Salvador')),
('SY', _('Syrian Arab Republic')),
('SZ', _('Swaziland')),
('TC', _('Turks & Caicos Islands')),
('TD', _('Chad')),
('TF', _('French Southern Territories')),
('TG', _('Togo')),
('TH', _('Thailand')),
('TJ', _('Tajikistan')),
('TK', _('Tokelau')),
('TM', _('Turkmenistan')),
('TN', _('Tunisia')),
('TO', _('Tonga')),
('TP', _('East Timor')),
('TR', _('Turkey')),
('TT', _('Trinidad & Tobago')),
('TV', _('Tuvalu')),
('TW', _('Taiwan, Province of China')),
('TZ', _('Tanzania, United Republic of')),
('UA', _('Ukraine')),
('UG', _('Uganda')),
('UM', _('United States Minor Outlying Islands')),
('US', _('United States of America')),
('UY', _('Uruguay')),
('UZ', _('Uzbekistan')),
('VA', _('Vatican City State (Holy See)')),
('VC', _('St. Vincent & the Grenadines')),
('VE', _('Venezuela')),
('VG', _('British Virgin Islands')),
('VI', _('United States Virgin Islands')),
('VN', _('Viet Nam')),
('VU', _('Vanuatu')),
('WF', _('Wallis & Futuna Islands')),
('WS', _('Samoa')),
('YE', _('Yemen')),
('YT', _('Mayotte')),
('YU', _('Yugoslavia')),
('ZA', _('South Africa')),
('ZM', _('Zambia')),
('ZR', _('Zaire')),
('ZW', _('Zimbabwe')),
('ZZ', _('Unknown or unspecified country')),
]
class CountryField(models.CharField):
def __init__(self, *args, **kwargs):
kwargs.setdefault('max_length', 2)
kwargs.setdefault('choices', COUNTRIES)
super(CountryField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return "CharField"
|
scielo-django-extensions
|
/scielo-django-extensions-0.4.tar.gz/scielo-django-extensions-0.4/scielo_extensions/modelfields.py
|
modelfields.py
|
from django.utils.translation import ugettext as _
from django.db import models
# tell south how to instrospect this field
try:
from south.modelsinspector import add_introspection_rules
except ImportError:
pass
else:
add_introspection_rules([],
["^scielo_extensions\.modelfields\.CountryField"])
COUNTRIES = [
('AD', _('Andorra')),
('AE', _('United Arab Emirates')),
('AF', _('Afghanistan')),
('AG', _('Antigua & Barbuda')),
('AI', _('Anguilla')),
('AL', _('Albania')),
('AM', _('Armenia')),
('AN', _('Netherlands Antilles')),
('AO', _('Angola')),
('AQ', _('Antarctica')),
('AR', _('Argentina')),
('AS', _('American Samoa')),
('AT', _('Austria')),
('AU', _('Australia')),
('AW', _('Aruba')),
('AZ', _('Azerbaijan')),
('BA', _('Bosnia and Herzegovina')),
('BB', _('Barbados')),
('BD', _('Bangladesh')),
('BE', _('Belgium')),
('BF', _('Burkina Faso')),
('BG', _('Bulgaria')),
('BH', _('Bahrain')),
('BI', _('Burundi')),
('BJ', _('Benin')),
('BM', _('Bermuda')),
('BN', _('Brunei Darussalam')),
('BO', _('Bolivia')),
('BR', _('Brazil')),
('BS', _('Bahama')),
('BT', _('Bhutan')),
('BV', _('Bouvet Island')),
('BW', _('Botswana')),
('BY', _('Belarus')),
('BZ', _('Belize')),
('CA', _('Canada')),
('CC', _('Cocos (Keeling) Islands')),
('CF', _('Central African Republic')),
('CG', _('Congo')),
('CH', _('Switzerland')),
('CI', _('Ivory Coast')),
('CK', _('Cook Iislands')),
('CL', _('Chile')),
('CM', _('Cameroon')),
('CN', _('China')),
('CO', _('Colombia')),
('CR', _('Costa Rica')),
('CU', _('Cuba')),
('CV', _('Cape Verde')),
('CX', _('Christmas Island')),
('CY', _('Cyprus')),
('CZ', _('Czech Republic')),
('DE', _('Germany')),
('DJ', _('Djibouti')),
('DK', _('Denmark')),
('DM', _('Dominica')),
('DO', _('Dominican Republic')),
('DZ', _('Algeria')),
('EC', _('Ecuador')),
('EE', _('Estonia')),
('EG', _('Egypt')),
('EH', _('Western Sahara')),
('ER', _('Eritrea')),
('ES', _('Spain')),
('ET', _('Ethiopia')),
('FI', _('Finland')),
('FJ', _('Fiji')),
('FK', _('Falkland Islands (Malvinas)')),
('FM', _('Micronesia')),
('FO', _('Faroe Islands')),
('FR', _('France')),
('FX', _('France, Metropolitan')),
('GA', _('Gabon')),
('GB', _('United Kingdom (Great Britain)')),
('GD', _('Grenada')),
('GE', _('Georgia')),
('GF', _('French Guiana')),
('GH', _('Ghana')),
('GI', _('Gibraltar')),
('GL', _('Greenland')),
('GM', _('Gambia')),
('GN', _('Guinea')),
('GP', _('Guadeloupe')),
('GQ', _('Equatorial Guinea')),
('GR', _('Greece')),
('GS', _('South Georgia and the South Sandwich Islands')),
('GT', _('Guatemala')),
('GU', _('Guam')),
('GW', _('Guinea-Bissau')),
('GY', _('Guyana')),
('HK', _('Hong Kong')),
('HM', _('Heard & McDonald Islands')),
('HN', _('Honduras')),
('HR', _('Croatia')),
('HT', _('Haiti')),
('HU', _('Hungary')),
('ID', _('Indonesia')),
('IE', _('Ireland')),
('IL', _('Israel')),
('IN', _('India')),
('IO', _('British Indian Ocean Territory')),
('IQ', _('Iraq')),
('IR', _('Islamic Republic of Iran')),
('IS', _('Iceland')),
('IT', _('Italy')),
('JM', _('Jamaica')),
('JO', _('Jordan')),
('JP', _('Japan')),
('KE', _('Kenya')),
('KG', _('Kyrgyzstan')),
('KH', _('Cambodia')),
('KI', _('Kiribati')),
('KM', _('Comoros')),
('KN', _('St. Kitts and Nevis')),
('KP', _('Korea, Democratic People\'s Republic of')),
('KR', _('Korea, Republic of')),
('KW', _('Kuwait')),
('KY', _('Cayman Islands')),
('KZ', _('Kazakhstan')),
('LA', _('Lao People\'s Democratic Republic')),
('LB', _('Lebanon')),
('LC', _('Saint Lucia')),
('LI', _('Liechtenstein')),
('LK', _('Sri Lanka')),
('LR', _('Liberia')),
('LS', _('Lesotho')),
('LT', _('Lithuania')),
('LU', _('Luxembourg')),
('LV', _('Latvia')),
('LY', _('Libyan Arab Jamahiriya')),
('MA', _('Morocco')),
('MC', _('Monaco')),
('MD', _('Moldova, Republic of')),
('MG', _('Madagascar')),
('MH', _('Marshall Islands')),
('ML', _('Mali')),
('MN', _('Mongolia')),
('MM', _('Myanmar')),
('MO', _('Macau')),
('MP', _('Northern Mariana Islands')),
('MQ', _('Martinique')),
('MR', _('Mauritania')),
('MS', _('Monserrat')),
('MT', _('Malta')),
('MU', _('Mauritius')),
('MV', _('Maldives')),
('MW', _('Malawi')),
('MX', _('Mexico')),
('MY', _('Malaysia')),
('MZ', _('Mozambique')),
('NA', _('Namibia')),
('NC', _('New Caledonia')),
('NE', _('Niger')),
('NF', _('Norfolk Island')),
('NG', _('Nigeria')),
('NI', _('Nicaragua')),
('NL', _('Netherlands')),
('NO', _('Norway')),
('NP', _('Nepal')),
('NR', _('Nauru')),
('NU', _('Niue')),
('NZ', _('New Zealand')),
('OM', _('Oman')),
('PA', _('Panama')),
('PE', _('Peru')),
('PF', _('French Polynesia')),
('PG', _('Papua New Guinea')),
('PH', _('Philippines')),
('PK', _('Pakistan')),
('PL', _('Poland')),
('PM', _('St. Pierre & Miquelon')),
('PN', _('Pitcairn')),
('PR', _('Puerto Rico')),
('PT', _('Portugal')),
('PW', _('Palau')),
('PY', _('Paraguay')),
('QA', _('Qatar')),
('RE', _('Reunion')),
('RO', _('Romania')),
('RU', _('Russian Federation')),
('RW', _('Rwanda')),
('SA', _('Saudi Arabia')),
('SB', _('Solomon Islands')),
('SC', _('Seychelles')),
('SD', _('Sudan')),
('SE', _('Sweden')),
('SG', _('Singapore')),
('SH', _('St. Helena')),
('SI', _('Slovenia')),
('SJ', _('Svalbard & Jan Mayen Islands')),
('SK', _('Slovakia')),
('SL', _('Sierra Leone')),
('SM', _('San Marino')),
('SN', _('Senegal')),
('SO', _('Somalia')),
('SR', _('Suriname')),
('ST', _('Sao Tome & Principe')),
('SV', _('El Salvador')),
('SY', _('Syrian Arab Republic')),
('SZ', _('Swaziland')),
('TC', _('Turks & Caicos Islands')),
('TD', _('Chad')),
('TF', _('French Southern Territories')),
('TG', _('Togo')),
('TH', _('Thailand')),
('TJ', _('Tajikistan')),
('TK', _('Tokelau')),
('TM', _('Turkmenistan')),
('TN', _('Tunisia')),
('TO', _('Tonga')),
('TP', _('East Timor')),
('TR', _('Turkey')),
('TT', _('Trinidad & Tobago')),
('TV', _('Tuvalu')),
('TW', _('Taiwan, Province of China')),
('TZ', _('Tanzania, United Republic of')),
('UA', _('Ukraine')),
('UG', _('Uganda')),
('UM', _('United States Minor Outlying Islands')),
('US', _('United States of America')),
('UY', _('Uruguay')),
('UZ', _('Uzbekistan')),
('VA', _('Vatican City State (Holy See)')),
('VC', _('St. Vincent & the Grenadines')),
('VE', _('Venezuela')),
('VG', _('British Virgin Islands')),
('VI', _('United States Virgin Islands')),
('VN', _('Viet Nam')),
('VU', _('Vanuatu')),
('WF', _('Wallis & Futuna Islands')),
('WS', _('Samoa')),
('YE', _('Yemen')),
('YT', _('Mayotte')),
('YU', _('Yugoslavia')),
('ZA', _('South Africa')),
('ZM', _('Zambia')),
('ZR', _('Zaire')),
('ZW', _('Zimbabwe')),
('ZZ', _('Unknown or unspecified country')),
]
class CountryField(models.CharField):
def __init__(self, *args, **kwargs):
kwargs.setdefault('max_length', 2)
kwargs.setdefault('choices', COUNTRIES)
super(CountryField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return "CharField"
| 0.362518 | 0.067793 |
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ugettext as __
from django.conf import settings
from django import template
register = template.Library()
GLOSSARY_URL = settings.DOCUMENTATION_BASE_URL +'/glossary.html#'
def easy_tag(func):
"""
Deals with the repetitive parts of parsing template tags
"""
def inner(parser, token):
try:
return func(*token.split_contents())
except TypeError:
raise template.TemplateSyntaxError('Bad arguments for tag "%s"' % token.split_contents()[0])
inner.__name__ = func.__name__
inner.__doc__ = inner.__doc__
return inner
def full_path(context, **params):
url_path = ''
url_get = context['request'].GET.copy()
if 'PATH_INFO' in context['request'].META:
url_path = context['request'].META['PATH_INFO']
for key, value in params.items():
url_get[key] = value
if len(url_get):
url_path += "?%s" % "&".join(("%s=%s" % (key, value) for key, value in url_get.items() if value))
return url_path.encode('utf8')
class NamedPagination(template.Node):
def __init__(self, letters, selected):
self.letters = template.Variable(letters)
self.selected = template.Variable(selected)
def render(self, context):
letters = self.letters.resolve(context)
selected = self.selected.resolve(context)
html_snippet = '''<div class="pagination" style="margin:0;padding-top:8px;text-align:center;">
<ul><li><a href="?" style="line-height: 20px;padding: 0 5px;">''' + str(__('All')) + '''</a></li>'''
for letter in letters:
if letter != selected:
html_snippet += '''
<li><a href="{0}" style="line-height: 20px;padding: 0 5px;">{1}</a></li>'''\
.format(full_path(context, letter=letter),letter.encode('utf8'))
else:
html_snippet += '''
<li class="active"><a href="{0}" style="line-height: 20px;padding: 0 5px;">{1}</a></li>'''\
.format(full_path(context, letter=letter),letter.encode('utf8'))
html_snippet += '''
</ul></div>'''
return html_snippet
@register.tag()
@easy_tag
def named_pagination(_tag_name, *params):
return NamedPagination(*params)
class Pagination(template.Node):
def __init__(self, object_record):
self.object_record = template.Variable(object_record)
def render(self, context):
object_record = self.object_record.resolve(context)
if not object_record.paginator:
# the paginator is empty
return ''
if object_record.paginator.count > settings.PAGINATION__ITEMS_PER_PAGE:
class_li_previous = 'disabled' if not object_record.has_previous() else ''
class_li_next = 'disabled' if not object_record.has_next() else ''
html_pages = []
for page in object_record.paginator.page_range:
class_li_page = 'active' if object_record.number == page else ''
html_pages.append(u'<li class="{0}"><a href="{1}">{2}</a></li>'.format(class_li_page, full_path(context, page=page), page))
html_snippet = u'''
<div class="pagination">
<ul>
<li class="prev {0}"><a href="{1}">← {2}</a></li>
{3}
<li class="next {4}"><a href="{5}">{6} →</a></li>
</ul>
</div>
'''.format(
class_li_previous,
full_path(context, page=object_record.previous_page_number()),
_('Previous'),
''.join(html_pages),
class_li_next,
full_path(context, page=object_record.next_page_number()),
_('Next')
)
return html_snippet
else:
return ''
@register.tag()
@easy_tag
def pagination(_tag_name, params):
return Pagination(params)
class SimplePagination(template.Node):
def __init__(self, object_record):
self.object_record = template.Variable(object_record)
def render(self, context):
object_record = self.object_record.resolve(context)
if not object_record.paginator:
# the paginator is empty
return ''
if object_record.paginator.count > settings.PAGINATION__ITEMS_PER_PAGE:
class_li_previous = 'disabled' if not object_record.has_previous() else ''
class_li_next = 'disabled' if not object_record.has_next() else ''
html_snippet = u'''
<span style=""><b>{0}-{1}</b> {2} <b>{3}</b></span>
<span class="pagination"><ul>
<li class="prev {4}">
<a href="{5}">←</a></li>
<li class="next {6}">
<a href="{7}">→</a></li>
</ul></span>
'''.format(object_record.start_index(),
object_record.end_index(), _('of'),
object_record.paginator.count,
class_li_previous,
full_path(context, page=object_record.previous_page_number()),
class_li_next,
full_path(context, page=object_record.next_page_number()))
return html_snippet
else: return ''
@register.tag()
@easy_tag
def simple_pagination(_tag_name, params):
return SimplePagination(params)
class FieldHelpText(template.Node):
def __init__(self, field_name, help_text, glossary_refname):
self.field_name = template.Variable(field_name)
self.help_text = template.Variable(help_text)
self.glossary_refname = glossary_refname
def render(self, context):
field_name = self.field_name.resolve(context)
help_text = self.help_text.resolve(context)
glossary_refname = self.glossary_refname
for value in ['field_name', 'help_text', 'glossary_refname']:
if len(locals().get(value)) < 1:
return ''
html_snippet = u'''
<a class="help-text"
target="_blank"
rel="popover"
data-original-title="{0} {1}"
data-content="{2}"
href="{3}{4}">
<i class="icon-question-sign"> </i>
</a>
'''.format(_('Help on:'),
field_name,
help_text,
GLOSSARY_URL,
glossary_refname).strip()
return html_snippet
@register.tag()
@easy_tag
def field_help(_tag_name, *params):
"""
Renders the help for a given field.
Usage: {% field_help field_label help_text %}
"""
return FieldHelpText(*params)
|
scielo-django-extensions
|
/scielo-django-extensions-0.4.tar.gz/scielo-django-extensions-0.4/scielo_extensions/templatetags/scielo_common.py
|
scielo_common.py
|
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ugettext as __
from django.conf import settings
from django import template
register = template.Library()
GLOSSARY_URL = settings.DOCUMENTATION_BASE_URL +'/glossary.html#'
def easy_tag(func):
"""
Deals with the repetitive parts of parsing template tags
"""
def inner(parser, token):
try:
return func(*token.split_contents())
except TypeError:
raise template.TemplateSyntaxError('Bad arguments for tag "%s"' % token.split_contents()[0])
inner.__name__ = func.__name__
inner.__doc__ = inner.__doc__
return inner
def full_path(context, **params):
url_path = ''
url_get = context['request'].GET.copy()
if 'PATH_INFO' in context['request'].META:
url_path = context['request'].META['PATH_INFO']
for key, value in params.items():
url_get[key] = value
if len(url_get):
url_path += "?%s" % "&".join(("%s=%s" % (key, value) for key, value in url_get.items() if value))
return url_path.encode('utf8')
class NamedPagination(template.Node):
def __init__(self, letters, selected):
self.letters = template.Variable(letters)
self.selected = template.Variable(selected)
def render(self, context):
letters = self.letters.resolve(context)
selected = self.selected.resolve(context)
html_snippet = '''<div class="pagination" style="margin:0;padding-top:8px;text-align:center;">
<ul><li><a href="?" style="line-height: 20px;padding: 0 5px;">''' + str(__('All')) + '''</a></li>'''
for letter in letters:
if letter != selected:
html_snippet += '''
<li><a href="{0}" style="line-height: 20px;padding: 0 5px;">{1}</a></li>'''\
.format(full_path(context, letter=letter),letter.encode('utf8'))
else:
html_snippet += '''
<li class="active"><a href="{0}" style="line-height: 20px;padding: 0 5px;">{1}</a></li>'''\
.format(full_path(context, letter=letter),letter.encode('utf8'))
html_snippet += '''
</ul></div>'''
return html_snippet
@register.tag()
@easy_tag
def named_pagination(_tag_name, *params):
return NamedPagination(*params)
class Pagination(template.Node):
def __init__(self, object_record):
self.object_record = template.Variable(object_record)
def render(self, context):
object_record = self.object_record.resolve(context)
if not object_record.paginator:
# the paginator is empty
return ''
if object_record.paginator.count > settings.PAGINATION__ITEMS_PER_PAGE:
class_li_previous = 'disabled' if not object_record.has_previous() else ''
class_li_next = 'disabled' if not object_record.has_next() else ''
html_pages = []
for page in object_record.paginator.page_range:
class_li_page = 'active' if object_record.number == page else ''
html_pages.append(u'<li class="{0}"><a href="{1}">{2}</a></li>'.format(class_li_page, full_path(context, page=page), page))
html_snippet = u'''
<div class="pagination">
<ul>
<li class="prev {0}"><a href="{1}">← {2}</a></li>
{3}
<li class="next {4}"><a href="{5}">{6} →</a></li>
</ul>
</div>
'''.format(
class_li_previous,
full_path(context, page=object_record.previous_page_number()),
_('Previous'),
''.join(html_pages),
class_li_next,
full_path(context, page=object_record.next_page_number()),
_('Next')
)
return html_snippet
else:
return ''
@register.tag()
@easy_tag
def pagination(_tag_name, params):
return Pagination(params)
class SimplePagination(template.Node):
def __init__(self, object_record):
self.object_record = template.Variable(object_record)
def render(self, context):
object_record = self.object_record.resolve(context)
if not object_record.paginator:
# the paginator is empty
return ''
if object_record.paginator.count > settings.PAGINATION__ITEMS_PER_PAGE:
class_li_previous = 'disabled' if not object_record.has_previous() else ''
class_li_next = 'disabled' if not object_record.has_next() else ''
html_snippet = u'''
<span style=""><b>{0}-{1}</b> {2} <b>{3}</b></span>
<span class="pagination"><ul>
<li class="prev {4}">
<a href="{5}">←</a></li>
<li class="next {6}">
<a href="{7}">→</a></li>
</ul></span>
'''.format(object_record.start_index(),
object_record.end_index(), _('of'),
object_record.paginator.count,
class_li_previous,
full_path(context, page=object_record.previous_page_number()),
class_li_next,
full_path(context, page=object_record.next_page_number()))
return html_snippet
else: return ''
@register.tag()
@easy_tag
def simple_pagination(_tag_name, params):
return SimplePagination(params)
class FieldHelpText(template.Node):
def __init__(self, field_name, help_text, glossary_refname):
self.field_name = template.Variable(field_name)
self.help_text = template.Variable(help_text)
self.glossary_refname = glossary_refname
def render(self, context):
field_name = self.field_name.resolve(context)
help_text = self.help_text.resolve(context)
glossary_refname = self.glossary_refname
for value in ['field_name', 'help_text', 'glossary_refname']:
if len(locals().get(value)) < 1:
return ''
html_snippet = u'''
<a class="help-text"
target="_blank"
rel="popover"
data-original-title="{0} {1}"
data-content="{2}"
href="{3}{4}">
<i class="icon-question-sign"> </i>
</a>
'''.format(_('Help on:'),
field_name,
help_text,
GLOSSARY_URL,
glossary_refname).strip()
return html_snippet
@register.tag()
@easy_tag
def field_help(_tag_name, *params):
"""
Renders the help for a given field.
Usage: {% field_help field_label help_text %}
"""
return FieldHelpText(*params)
| 0.50415 | 0.094218 |
Paperboy
========
Utilitário para envio de dados SciELO de servidores de sites locais para de processamento e também de servidores de uma coleção para o de processamento da rede. O
utilitario permite o envio de bases para processamento, images, pdfs, traduções
e XML's.
PaperBoy Dockerizado?
=====================
`PaperBoy Dockerizado <https://github.com/rondinelisaad/paperboy/blob/master/paperboy-dockerizado.md>`_
Como instalar
=============
Linux
-----
pip install scielo-paperboy
Windows
-------
1. Instalar as seguintes dependência:
paramiko 1.16.0 ou superior
pycrypto 2.6.1 ou superior
2. Instalar Paperboy
pip install scielo-paperboy
Como utilizar
=============
Com arquivo de configuração
---------------------------
Criar um arquivo de configuração utilizando o template config.ini-TEMPLATE
config.ini::
[app:main]
source_dir=/var/www/scielo
serial_source_dir=/var/www/scielo
cisis_dir=/var/www/scielo/proc/cisis
scilista=/var/www/scielo/serial/scilista.lst
destiny_dir=/var/www/scielo
server=localhost
port=21
user=anonymous
password=anonymous
Criar variável de ambiente indicando o arquivo de configuração
Linux
export PAPERBOY_SETTINGS_FILE=config.ini
Windows
set PAPERBOY_SETTINGS_FILE=config.ini
Utilitários disponíveis
* paperboy_delivery_to_server
Executa o envio de dados do site local para o servidor de processamento
* paperboy_delivery_to_scielo
Executa o envio de dados de uma coleção SciELO para o processamento dos dados de toda a Rede SciELO
Para ajuda
paperboy_delivery_to_server --help
paperboy_delivery_to_scielo --help
Para ativar módulo de compatibilidade de bases no utilitário **paperboy_delivery_to_server**. O modulo de compatibilidade
converte as bases de dados para que sejam compatíveis com o sistema operacional
de destino. Deve ser utilizado quando o objetivo é enviar bases do Windows para
o Linux ou o contrário.
paperboy_delivery_to_server -m
Sem arquivo de configuração
---------------------------
Executar
paperboy_delivery_to_scielo --help
paperboy_delivery_to_server --help
|
scielo-paperboy
|
/scielo_paperboy-0.12.7.tar.gz/scielo_paperboy-0.12.7/README.rst
|
README.rst
|
Paperboy
========
Utilitário para envio de dados SciELO de servidores de sites locais para de processamento e também de servidores de uma coleção para o de processamento da rede. O
utilitario permite o envio de bases para processamento, images, pdfs, traduções
e XML's.
PaperBoy Dockerizado?
=====================
`PaperBoy Dockerizado <https://github.com/rondinelisaad/paperboy/blob/master/paperboy-dockerizado.md>`_
Como instalar
=============
Linux
-----
pip install scielo-paperboy
Windows
-------
1. Instalar as seguintes dependência:
paramiko 1.16.0 ou superior
pycrypto 2.6.1 ou superior
2. Instalar Paperboy
pip install scielo-paperboy
Como utilizar
=============
Com arquivo de configuração
---------------------------
Criar um arquivo de configuração utilizando o template config.ini-TEMPLATE
config.ini::
[app:main]
source_dir=/var/www/scielo
serial_source_dir=/var/www/scielo
cisis_dir=/var/www/scielo/proc/cisis
scilista=/var/www/scielo/serial/scilista.lst
destiny_dir=/var/www/scielo
server=localhost
port=21
user=anonymous
password=anonymous
Criar variável de ambiente indicando o arquivo de configuração
Linux
export PAPERBOY_SETTINGS_FILE=config.ini
Windows
set PAPERBOY_SETTINGS_FILE=config.ini
Utilitários disponíveis
* paperboy_delivery_to_server
Executa o envio de dados do site local para o servidor de processamento
* paperboy_delivery_to_scielo
Executa o envio de dados de uma coleção SciELO para o processamento dos dados de toda a Rede SciELO
Para ajuda
paperboy_delivery_to_server --help
paperboy_delivery_to_scielo --help
Para ativar módulo de compatibilidade de bases no utilitário **paperboy_delivery_to_server**. O modulo de compatibilidade
converte as bases de dados para que sejam compatíveis com o sistema operacional
de destino. Deve ser utilizado quando o objetivo é enviar bases do Windows para
o Linux ou o contrário.
paperboy_delivery_to_server -m
Sem arquivo de configuração
---------------------------
Executar
paperboy_delivery_to_scielo --help
paperboy_delivery_to_server --help
| 0.613237 | 0.237156 |
import argparse
import logging
import logging.config
import os
import subprocess
from paperboy.utils import settings
from paperboy.communicator import SFTP, FTP
logger = logging.getLogger(__name__)
LOGGING = {
'version': 1,
'formatters': {
'simple': {
'format': '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
}
},
'handlers': {
'console': {
'level': 'NOTSET',
'class': 'logging.StreamHandler',
'formatter': 'simple',
}
},
'loggers': {
'': {
'handlers': ['console'],
'level': 'ERROR'
},
'paperboy': {
'handlers': ['console'],
'level': 'INFO'
}
}
}
def _config_logging(logging_level='INFO'):
LOGGING['loggers']['paperboy']['level'] = logging_level
logging.config.dictConfig(LOGGING)
def make_iso(mst_input, iso_output, cisis_dir=None, fltr=None, proc=None):
logger.info(u'Making iso for %s', mst_input)
status = '1' # erro de acordo com stdout do CISIS
command = [remove_last_slash(cisis_dir) + u'/mx' if cisis_dir else u'mx']
command.append(mst_input)
if fltr:
command.append(u'btell=0')
command.append(fltr)
if proc:
command.append(proc)
command.append(u'iso=%s' % (iso_output))
command.append(u'-all')
command.append(u'now')
logger.debug(u'Running: %s', u' '.join(command))
try:
status = subprocess.call(command)
except OSError:
logger.error(u'Error while running mx, check if the command is available on the syspath, or the CISIS path was correctly indicated in the config file')
if str(status) == '0':
logger.debug(u'ISO %s creation done for %s', iso_output, mst_input)
return True
if str(status) == '1':
logger.error(u'ISO creation did not work for %s', mst_input)
return False
return False
def make_section_catalog_report(source_dir, cisis_dir):
logger.info(u'Making report static_section_catalog.txt')
command = u"""mkdir -p %s/bases/reports; %s/mx %s/bases/issue/issue btell=0 "pft=if p(v49) then (v35[1],v65[1]*0.4,s(f(val(s(v36[1]*4.3))+10000,2,0))*1.4,'|',v49^l,'|',v49^c,'|',v49^t,/) fi" lw=0 -all now > %s/bases/reports/static_section_catalog.txt""" % (
source_dir,
cisis_dir,
source_dir,
source_dir,
)
logger.debug(u'Running: %s', command)
try:
status = subprocess.Popen(command, shell=True)
status.wait()
except OSError:
logger.error(u'Error while creating report, static_section_catalog.txt was not updated')
logger.debug(u'Report static_section_catalog.txt done')
def make_static_file_report(source_dir, report):
extension_name = 'htm' if report == 'translation' else report
report_name = 'html' if report == 'translation' else report
logger.info(u'Making report static_%s_files.txt', report_name)
command = u'mkdir -p %s/bases/%s; mkdir -p %s/bases/reports; cd %s/bases/%s; find . -name "*.%s*" > %s/bases/reports/static_%s_files.txt' %(
source_dir,
report,
source_dir,
source_dir,
report,
extension_name,
source_dir,
report_name
)
logger.debug(u'Running: %s', command)
try:
status = subprocess.Popen(command, shell=True)
status.wait()
except OSError:
logger.error(u'Error while creating report, static_%s_files.txt was not updated', report_name)
logger.debug(u'Report static_%s_files.txt done', report_name)
def remove_last_slash(path):
path = path.replace('\\', '/')
try:
return path[:-1] if path[-1] == '/' else path
except IndexError:
return path
class Delivery(object):
def __init__(self, source_type, cisis_dir, source_dir, destiny_dir, server,
server_type, port, user, password, original_dataset):
self.source_type = source_type
self.cisis_dir = remove_last_slash(cisis_dir)
self.source_dir = remove_last_slash(source_dir)
self.destiny_dir = remove_last_slash(destiny_dir)
self.original_dataset = bool(original_dataset)
if str(server_type) == 'sftp':
self.client = SFTP(server, int(port), user, password)
elif str(server_type) == 'ftp':
self.client = FTP(server, int(port), user, password)
else:
raise TypeError(u'server_type must be ftp or sftp')
def _local_remove(self, path):
logger.info(u'Removing temporary file (%s)', path)
try:
os.remove(path)
logger.debug(u'Temporary has being file removed (%s)', path)
except OSError as e:
logger.error(
u'Fail while removing temporary file (%s): %s',
path,
e.strerror
)
def send_full_isos(self):
"""
This method will prepare and send article, issue, and title iso files to
SciELO.
Those files are used to produce bibliometric and site usage indicators.
This method will use the mst, xrf files available in bases-work directory
"""
# Making title ISO
make_iso(
self.source_dir + u'/bases-work/title/title',
self.source_dir + u'/bases-work/title/title_full.iso',
self.cisis_dir
)
self.client.put(
self.source_dir + u'/bases-work/title/title_full.iso',
self.destiny_dir + u'/title_full.iso'
)
# Making issue ISO
make_iso(
self.source_dir + u'/bases-work/issue/issue',
self.source_dir + u'/bases-work/issue/issue_full.iso',
self.cisis_dir
)
self.client.put(
self.source_dir + u'/bases-work/issue/issue_full.iso',
self.destiny_dir + u'/issue_full.iso'
)
# Making article ISO
make_iso(
self.source_dir + u'/bases-work/artigo/artigo',
self.source_dir + u'/bases-work/artigo/artigo_full.iso',
self.cisis_dir
)
self.client.put(
self.source_dir + u'/bases-work/artigo/artigo_full.iso',
self.destiny_dir + u'/artigo_full.iso'
)
def send_isos(self):
"""
This method will prepare and send article, issue, issues, title and bib4cit
iso files to SciELO.
Those files are used to produce bibliometric and site usage indicators.
This method will use the mst, xrf files available in bases directory
"""
# Making title ISO
make_iso(
self.source_dir + u'/bases/title/title',
self.source_dir + u'/bases/title/title.iso',
self.cisis_dir
)
self.client.put(
self.source_dir + u'/bases/title/title.iso',
self.destiny_dir + u'/title.iso'
)
# Making issue ISO
make_iso(
self.source_dir + u'/bases/issue/issue',
self.source_dir + u'/bases/issue/issue.iso',
self.cisis_dir
)
self.client.put(
self.source_dir + u'/bases/issue/issue.iso',
self.destiny_dir + u'/issue.iso'
)
# Making issues ISO
make_iso(
self.source_dir + u'/bases/artigo/artigo',
self.source_dir + u'/bases/issue/issues.iso',
self.cisis_dir,
u'TP=I'
)
self.client.put(
self.source_dir + u'/bases/issue/issues.iso',
self.destiny_dir + u'/issues.iso'
)
# Making article ISO
make_iso(
self.source_dir + u'/bases/artigo/artigo',
self.source_dir + u'/bases/artigo/artigo.iso',
self.cisis_dir,
u'TP=H',
u'''"proc='d91<91 0>',ref(mfn-1,v91),'</91>'"'''
)
self.client.put(
self.source_dir + u'/bases/artigo/artigo.iso',
self.destiny_dir + u'/artigo.iso'
)
# Making bib4cit ISO
make_iso(
self.source_dir + u'/bases/artigo/artigo',
self.source_dir + u'/bases/artigo/bib4cit.iso',
self.cisis_dir,
u'TP=C'
)
self.client.put(
self.source_dir + u'/bases/artigo/bib4cit.iso',
self.destiny_dir + u'/bib4cit.iso'
)
def send_static_reports(self):
"""
This method will prepare and send static reports to the SciELO FPT.
The static reports are:
static_pdf_files.txt
List of PDF files available in the server side file system.
static_html_files.txt
List of HTML files available in the server side file system.
static_xml_files.txt
List of XML files available in the server side file system.
static_section_catalog.txt
List of the journals sections extracted from the issue database.
Those files are used to improve the metadata quality and completeness of
the Article Meta API.
"""
make_static_file_report(self.source_dir, u'pdf')
self.client.put(
self.source_dir + u'/bases/reports/static_pdf_files.txt',
self.destiny_dir + u'/static_pdf_files.txt'
)
make_static_file_report(self.source_dir, u'translation')
self.client.put(
self.source_dir + u'/bases/reports/static_html_files.txt',
self.destiny_dir + u'/static_html_files.txt'
)
make_static_file_report(self.source_dir, u'xml')
self.client.put(
self.source_dir + u'/bases/reports/static_xml_files.txt',
self.destiny_dir + u'/static_xml_files.txt'
)
make_section_catalog_report(self.source_dir, self.cisis_dir)
self.client.put(
self.source_dir + u'/bases/reports/static_section_catalog.txt',
self.destiny_dir + u'/static_section_catalog.txt'
)
def run(self, source_type=None):
source_type = source_type if source_type else self.source_type
sender = self.send_full_isos if self.original_dataset is True else self.send_isos
if source_type == u'isos':
sender()
elif source_type == u'reports':
self.send_static_reports()
else:
sender()
self.send_static_reports()
def main():
setts = settings.get(u'app:main', {})
parser = argparse.ArgumentParser(
description=u'Tools to send ISO databases to SciELO Network processing'
)
parser.add_argument(
u'--source_type',
u'-t',
choices=['isos', 'reports'],
help=u'Type of data that will be send to the server'
)
parser.add_argument(
u'--cisis_dir',
u'-r',
default=setts.get(u'cisis_dir', u''),
help=u'absolute path to the source where the ISIS utilitaries are where installed. It is not necessary to informe when the utiliaries are in the syspath.'
)
parser.add_argument(
u'--original_dataset',
u'-o',
action="store_true",
help=u'Send the original dataset [title, issue, artigo] without bib4cit, all the content is available at artigo field 706=c 706=h 706=i 706=o.'
)
parser.add_argument(
u'--source_dir',
u'-s',
default=setts.get(u'source_dir', u'.'),
help=u'absolute path where the SciELO site was installed. this directory must contain the directories bases, htcos, proc and serial'
)
parser.add_argument(
u'--destiny_dir',
u'-d',
default=setts.get(u'destiny_dir', u'.'),
help=u'absolute path (server site) where the SciELO site was installed. this directory must contain the directories bases, htcos, proc and serial'
)
parser.add_argument(
u'--server',
u'-f',
default=setts.get(u'server', u'localhost'),
help=u'FTP or SFTP Server'
)
parser.add_argument(
u'--server_type',
u'-e',
default=setts.get(u'server_type', u'sftp'),
choices=['ftp', 'sftp']
)
parser.add_argument(
u'--port',
u'-x',
default=setts.get(u'port', u'22'),
help=u'usually 22 for SFTP connection or 21 for FTP connection'
)
parser.add_argument(
u'--user',
u'-u',
default=setts.get(u'user', u'anonymous'),
help=u'FTP or SFTP username'
)
parser.add_argument(
u'--password',
u'-p',
default=setts.get(u'password', u'anonymous'),
help=u'FTP or SFTP password'
)
parser.add_argument(
u'--logging_level',
u'-l',
default=u'DEBUG',
choices=[u'DEBUG', u'INFO', u'WARNING', u'ERROR', u'CRITICAL'],
help=u'Log level'
)
args = parser.parse_args()
_config_logging(args.logging_level)
delivery = Delivery(
args.source_type,
args.cisis_dir,
args.source_dir,
args.destiny_dir,
args.server,
args.server_type,
args.port,
args.user,
args.password,
args.original_dataset
)
delivery.run()
|
scielo-paperboy
|
/scielo_paperboy-0.12.7.tar.gz/scielo_paperboy-0.12.7/paperboy/send_to_scielo.py
|
send_to_scielo.py
|
import argparse
import logging
import logging.config
import os
import subprocess
from paperboy.utils import settings
from paperboy.communicator import SFTP, FTP
logger = logging.getLogger(__name__)
LOGGING = {
'version': 1,
'formatters': {
'simple': {
'format': '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
}
},
'handlers': {
'console': {
'level': 'NOTSET',
'class': 'logging.StreamHandler',
'formatter': 'simple',
}
},
'loggers': {
'': {
'handlers': ['console'],
'level': 'ERROR'
},
'paperboy': {
'handlers': ['console'],
'level': 'INFO'
}
}
}
def _config_logging(logging_level='INFO'):
LOGGING['loggers']['paperboy']['level'] = logging_level
logging.config.dictConfig(LOGGING)
def make_iso(mst_input, iso_output, cisis_dir=None, fltr=None, proc=None):
logger.info(u'Making iso for %s', mst_input)
status = '1' # erro de acordo com stdout do CISIS
command = [remove_last_slash(cisis_dir) + u'/mx' if cisis_dir else u'mx']
command.append(mst_input)
if fltr:
command.append(u'btell=0')
command.append(fltr)
if proc:
command.append(proc)
command.append(u'iso=%s' % (iso_output))
command.append(u'-all')
command.append(u'now')
logger.debug(u'Running: %s', u' '.join(command))
try:
status = subprocess.call(command)
except OSError:
logger.error(u'Error while running mx, check if the command is available on the syspath, or the CISIS path was correctly indicated in the config file')
if str(status) == '0':
logger.debug(u'ISO %s creation done for %s', iso_output, mst_input)
return True
if str(status) == '1':
logger.error(u'ISO creation did not work for %s', mst_input)
return False
return False
def make_section_catalog_report(source_dir, cisis_dir):
logger.info(u'Making report static_section_catalog.txt')
command = u"""mkdir -p %s/bases/reports; %s/mx %s/bases/issue/issue btell=0 "pft=if p(v49) then (v35[1],v65[1]*0.4,s(f(val(s(v36[1]*4.3))+10000,2,0))*1.4,'|',v49^l,'|',v49^c,'|',v49^t,/) fi" lw=0 -all now > %s/bases/reports/static_section_catalog.txt""" % (
source_dir,
cisis_dir,
source_dir,
source_dir,
)
logger.debug(u'Running: %s', command)
try:
status = subprocess.Popen(command, shell=True)
status.wait()
except OSError:
logger.error(u'Error while creating report, static_section_catalog.txt was not updated')
logger.debug(u'Report static_section_catalog.txt done')
def make_static_file_report(source_dir, report):
extension_name = 'htm' if report == 'translation' else report
report_name = 'html' if report == 'translation' else report
logger.info(u'Making report static_%s_files.txt', report_name)
command = u'mkdir -p %s/bases/%s; mkdir -p %s/bases/reports; cd %s/bases/%s; find . -name "*.%s*" > %s/bases/reports/static_%s_files.txt' %(
source_dir,
report,
source_dir,
source_dir,
report,
extension_name,
source_dir,
report_name
)
logger.debug(u'Running: %s', command)
try:
status = subprocess.Popen(command, shell=True)
status.wait()
except OSError:
logger.error(u'Error while creating report, static_%s_files.txt was not updated', report_name)
logger.debug(u'Report static_%s_files.txt done', report_name)
def remove_last_slash(path):
path = path.replace('\\', '/')
try:
return path[:-1] if path[-1] == '/' else path
except IndexError:
return path
class Delivery(object):
def __init__(self, source_type, cisis_dir, source_dir, destiny_dir, server,
server_type, port, user, password, original_dataset):
self.source_type = source_type
self.cisis_dir = remove_last_slash(cisis_dir)
self.source_dir = remove_last_slash(source_dir)
self.destiny_dir = remove_last_slash(destiny_dir)
self.original_dataset = bool(original_dataset)
if str(server_type) == 'sftp':
self.client = SFTP(server, int(port), user, password)
elif str(server_type) == 'ftp':
self.client = FTP(server, int(port), user, password)
else:
raise TypeError(u'server_type must be ftp or sftp')
def _local_remove(self, path):
logger.info(u'Removing temporary file (%s)', path)
try:
os.remove(path)
logger.debug(u'Temporary has being file removed (%s)', path)
except OSError as e:
logger.error(
u'Fail while removing temporary file (%s): %s',
path,
e.strerror
)
def send_full_isos(self):
"""
This method will prepare and send article, issue, and title iso files to
SciELO.
Those files are used to produce bibliometric and site usage indicators.
This method will use the mst, xrf files available in bases-work directory
"""
# Making title ISO
make_iso(
self.source_dir + u'/bases-work/title/title',
self.source_dir + u'/bases-work/title/title_full.iso',
self.cisis_dir
)
self.client.put(
self.source_dir + u'/bases-work/title/title_full.iso',
self.destiny_dir + u'/title_full.iso'
)
# Making issue ISO
make_iso(
self.source_dir + u'/bases-work/issue/issue',
self.source_dir + u'/bases-work/issue/issue_full.iso',
self.cisis_dir
)
self.client.put(
self.source_dir + u'/bases-work/issue/issue_full.iso',
self.destiny_dir + u'/issue_full.iso'
)
# Making article ISO
make_iso(
self.source_dir + u'/bases-work/artigo/artigo',
self.source_dir + u'/bases-work/artigo/artigo_full.iso',
self.cisis_dir
)
self.client.put(
self.source_dir + u'/bases-work/artigo/artigo_full.iso',
self.destiny_dir + u'/artigo_full.iso'
)
def send_isos(self):
"""
This method will prepare and send article, issue, issues, title and bib4cit
iso files to SciELO.
Those files are used to produce bibliometric and site usage indicators.
This method will use the mst, xrf files available in bases directory
"""
# Making title ISO
make_iso(
self.source_dir + u'/bases/title/title',
self.source_dir + u'/bases/title/title.iso',
self.cisis_dir
)
self.client.put(
self.source_dir + u'/bases/title/title.iso',
self.destiny_dir + u'/title.iso'
)
# Making issue ISO
make_iso(
self.source_dir + u'/bases/issue/issue',
self.source_dir + u'/bases/issue/issue.iso',
self.cisis_dir
)
self.client.put(
self.source_dir + u'/bases/issue/issue.iso',
self.destiny_dir + u'/issue.iso'
)
# Making issues ISO
make_iso(
self.source_dir + u'/bases/artigo/artigo',
self.source_dir + u'/bases/issue/issues.iso',
self.cisis_dir,
u'TP=I'
)
self.client.put(
self.source_dir + u'/bases/issue/issues.iso',
self.destiny_dir + u'/issues.iso'
)
# Making article ISO
make_iso(
self.source_dir + u'/bases/artigo/artigo',
self.source_dir + u'/bases/artigo/artigo.iso',
self.cisis_dir,
u'TP=H',
u'''"proc='d91<91 0>',ref(mfn-1,v91),'</91>'"'''
)
self.client.put(
self.source_dir + u'/bases/artigo/artigo.iso',
self.destiny_dir + u'/artigo.iso'
)
# Making bib4cit ISO
make_iso(
self.source_dir + u'/bases/artigo/artigo',
self.source_dir + u'/bases/artigo/bib4cit.iso',
self.cisis_dir,
u'TP=C'
)
self.client.put(
self.source_dir + u'/bases/artigo/bib4cit.iso',
self.destiny_dir + u'/bib4cit.iso'
)
def send_static_reports(self):
"""
This method will prepare and send static reports to the SciELO FPT.
The static reports are:
static_pdf_files.txt
List of PDF files available in the server side file system.
static_html_files.txt
List of HTML files available in the server side file system.
static_xml_files.txt
List of XML files available in the server side file system.
static_section_catalog.txt
List of the journals sections extracted from the issue database.
Those files are used to improve the metadata quality and completeness of
the Article Meta API.
"""
make_static_file_report(self.source_dir, u'pdf')
self.client.put(
self.source_dir + u'/bases/reports/static_pdf_files.txt',
self.destiny_dir + u'/static_pdf_files.txt'
)
make_static_file_report(self.source_dir, u'translation')
self.client.put(
self.source_dir + u'/bases/reports/static_html_files.txt',
self.destiny_dir + u'/static_html_files.txt'
)
make_static_file_report(self.source_dir, u'xml')
self.client.put(
self.source_dir + u'/bases/reports/static_xml_files.txt',
self.destiny_dir + u'/static_xml_files.txt'
)
make_section_catalog_report(self.source_dir, self.cisis_dir)
self.client.put(
self.source_dir + u'/bases/reports/static_section_catalog.txt',
self.destiny_dir + u'/static_section_catalog.txt'
)
def run(self, source_type=None):
source_type = source_type if source_type else self.source_type
sender = self.send_full_isos if self.original_dataset is True else self.send_isos
if source_type == u'isos':
sender()
elif source_type == u'reports':
self.send_static_reports()
else:
sender()
self.send_static_reports()
def main():
setts = settings.get(u'app:main', {})
parser = argparse.ArgumentParser(
description=u'Tools to send ISO databases to SciELO Network processing'
)
parser.add_argument(
u'--source_type',
u'-t',
choices=['isos', 'reports'],
help=u'Type of data that will be send to the server'
)
parser.add_argument(
u'--cisis_dir',
u'-r',
default=setts.get(u'cisis_dir', u''),
help=u'absolute path to the source where the ISIS utilitaries are where installed. It is not necessary to informe when the utiliaries are in the syspath.'
)
parser.add_argument(
u'--original_dataset',
u'-o',
action="store_true",
help=u'Send the original dataset [title, issue, artigo] without bib4cit, all the content is available at artigo field 706=c 706=h 706=i 706=o.'
)
parser.add_argument(
u'--source_dir',
u'-s',
default=setts.get(u'source_dir', u'.'),
help=u'absolute path where the SciELO site was installed. this directory must contain the directories bases, htcos, proc and serial'
)
parser.add_argument(
u'--destiny_dir',
u'-d',
default=setts.get(u'destiny_dir', u'.'),
help=u'absolute path (server site) where the SciELO site was installed. this directory must contain the directories bases, htcos, proc and serial'
)
parser.add_argument(
u'--server',
u'-f',
default=setts.get(u'server', u'localhost'),
help=u'FTP or SFTP Server'
)
parser.add_argument(
u'--server_type',
u'-e',
default=setts.get(u'server_type', u'sftp'),
choices=['ftp', 'sftp']
)
parser.add_argument(
u'--port',
u'-x',
default=setts.get(u'port', u'22'),
help=u'usually 22 for SFTP connection or 21 for FTP connection'
)
parser.add_argument(
u'--user',
u'-u',
default=setts.get(u'user', u'anonymous'),
help=u'FTP or SFTP username'
)
parser.add_argument(
u'--password',
u'-p',
default=setts.get(u'password', u'anonymous'),
help=u'FTP or SFTP password'
)
parser.add_argument(
u'--logging_level',
u'-l',
default=u'DEBUG',
choices=[u'DEBUG', u'INFO', u'WARNING', u'ERROR', u'CRITICAL'],
help=u'Log level'
)
args = parser.parse_args()
_config_logging(args.logging_level)
delivery = Delivery(
args.source_type,
args.cisis_dir,
args.source_dir,
args.destiny_dir,
args.server,
args.server_type,
args.port,
args.user,
args.password,
args.original_dataset
)
delivery.run()
| 0.199386 | 0.091626 |
import logging
import paramiko
from paramiko.client import SSHClient
from paramiko import ssh_exception
from ftplib import FTP as FTPLIB
import ftplib
logger = logging.getLogger(__name__)
class Communicator(object):
def __init__(self, host, port, user, password):
self.host = host
self.port = port
self.user = user
self.password = password
self._active_client = None
class FTP(Communicator):
ftp_client = None
@property
def client(self):
self.ftp_client = FTPLIB(self.host)
try:
self.ftp_client.login(user=self.user, passwd=self.password)
except ftplib.error_perm:
logger.error(u'Fail while connecting through FTP. Check your creadentials.')
else:
return self.ftp_client
def exists_dir(self, path):
logger.info(u'Checking if directory already exists (%s)', path)
try:
self.client.nlst(str(path))
logger.debug(u'Directory already exists (%s)', path)
return True
except ftplib.error_perm:
logger.debug(u'Directory do not exists (%s)', path)
return False
def mkdir(self, path):
logger.info(u'Creating directory (%s)', path)
try:
self.client.mkd(path)
logger.debug(u'Directory has being created (%s)', path)
except ftplib.error_perm as e:
if not self.exists_dir(path):
logger.error(
u'Fail while creating directory (%s): %s',
path,
e.message
)
def chdir(self, path):
logger.info(u'Changing to directory (%s)', path)
try:
self.client.chdir(path)
except IOError as e:
logger.error(
u'Fail while accessing directory (%s): %s',
path,
e.strerror
)
raise(e)
def put(self, from_fl, to_fl, binary=True):
logger.info(
u'Copying file from (%s) to (%s)',
from_fl,
to_fl
)
read_type = u'rb'
if not binary:
read_type = u'r'
try:
command = u'STOR %s' % to_fl
if binary:
self.client.storbinary(
command.encode('utf-8'), open(from_fl, read_type)
)
else:
self.client.storlines(
command.encode('utf-8'), open(from_fl, read_type)
)
except IOError:
logger.error(u'File not found (%s)', from_fl)
logger.debug(u'File has being copied (%s)', to_fl)
class SFTP(Communicator):
ssh_client = None
@property
def client(self):
if self.ssh_client and self.ssh_client.get_transport().is_active():
return self._active_client
self._active_client = self._client()
return self._active_client
def _client(self):
logger.info(
u'Conecting through SSH to the server (%s:%s)',
self.host,
self.port
)
try:
self.ssh_client = SSHClient()
self.ssh_client.set_missing_host_key_policy(
paramiko.AutoAddPolicy()
)
self.ssh_client.connect(
self.host,
username=self.user,
password=self.password,
compress=True
)
except ssh_exception.AuthenticationException:
logger.error(
u'Fail while connecting through SSH. Check your creadentials.')
return None
except ssh_exception.NoValidConnectionsError:
logger.error(u'Fail while connecting through SSH. Check your credentials or the server availability.')
return None
else:
return self.ssh_client.open_sftp()
def mkdir(self, path):
logger.info(u'Creating directory (%s)', path)
try:
self.client.mkdir(path)
logger.debug(u'Directory has being created (%s)', path)
except IOError as e:
try:
self.client.stat(path)
logger.warning(u'Directory already exists (%s)', path)
except IOError as e:
logger.error(
u'Fail while creating directory (%s): %s',
path,
e.strerror
)
raise(e)
def chdir(self, path):
logger.info(u'Changing to directory (%s)', path)
try:
self.client.chdir(path)
except IOError as e:
logger.error(
u'Fail while accessing directory (%s): %s',
path,
e.strerror
)
raise(e)
def put(self, from_fl, to_fl):
logger.info(
u'Copying file from (%s) to (%s)',
from_fl,
to_fl
)
try:
self.client.put(from_fl, to_fl)
logger.debug(u'File has being copied (%s)', to_fl)
except OSError as e:
logger.error(
u'Fail while copying file (%s), file not found',
to_fl
)
except IOError as e:
logger.error(
u'Fail while copying file (%s): %s',
to_fl,
e.strerror
)
|
scielo-paperboy
|
/scielo_paperboy-0.12.7.tar.gz/scielo_paperboy-0.12.7/paperboy/communicator.py
|
communicator.py
|
import logging
import paramiko
from paramiko.client import SSHClient
from paramiko import ssh_exception
from ftplib import FTP as FTPLIB
import ftplib
logger = logging.getLogger(__name__)
class Communicator(object):
def __init__(self, host, port, user, password):
self.host = host
self.port = port
self.user = user
self.password = password
self._active_client = None
class FTP(Communicator):
ftp_client = None
@property
def client(self):
self.ftp_client = FTPLIB(self.host)
try:
self.ftp_client.login(user=self.user, passwd=self.password)
except ftplib.error_perm:
logger.error(u'Fail while connecting through FTP. Check your creadentials.')
else:
return self.ftp_client
def exists_dir(self, path):
logger.info(u'Checking if directory already exists (%s)', path)
try:
self.client.nlst(str(path))
logger.debug(u'Directory already exists (%s)', path)
return True
except ftplib.error_perm:
logger.debug(u'Directory do not exists (%s)', path)
return False
def mkdir(self, path):
logger.info(u'Creating directory (%s)', path)
try:
self.client.mkd(path)
logger.debug(u'Directory has being created (%s)', path)
except ftplib.error_perm as e:
if not self.exists_dir(path):
logger.error(
u'Fail while creating directory (%s): %s',
path,
e.message
)
def chdir(self, path):
logger.info(u'Changing to directory (%s)', path)
try:
self.client.chdir(path)
except IOError as e:
logger.error(
u'Fail while accessing directory (%s): %s',
path,
e.strerror
)
raise(e)
def put(self, from_fl, to_fl, binary=True):
logger.info(
u'Copying file from (%s) to (%s)',
from_fl,
to_fl
)
read_type = u'rb'
if not binary:
read_type = u'r'
try:
command = u'STOR %s' % to_fl
if binary:
self.client.storbinary(
command.encode('utf-8'), open(from_fl, read_type)
)
else:
self.client.storlines(
command.encode('utf-8'), open(from_fl, read_type)
)
except IOError:
logger.error(u'File not found (%s)', from_fl)
logger.debug(u'File has being copied (%s)', to_fl)
class SFTP(Communicator):
ssh_client = None
@property
def client(self):
if self.ssh_client and self.ssh_client.get_transport().is_active():
return self._active_client
self._active_client = self._client()
return self._active_client
def _client(self):
logger.info(
u'Conecting through SSH to the server (%s:%s)',
self.host,
self.port
)
try:
self.ssh_client = SSHClient()
self.ssh_client.set_missing_host_key_policy(
paramiko.AutoAddPolicy()
)
self.ssh_client.connect(
self.host,
username=self.user,
password=self.password,
compress=True
)
except ssh_exception.AuthenticationException:
logger.error(
u'Fail while connecting through SSH. Check your creadentials.')
return None
except ssh_exception.NoValidConnectionsError:
logger.error(u'Fail while connecting through SSH. Check your credentials or the server availability.')
return None
else:
return self.ssh_client.open_sftp()
def mkdir(self, path):
logger.info(u'Creating directory (%s)', path)
try:
self.client.mkdir(path)
logger.debug(u'Directory has being created (%s)', path)
except IOError as e:
try:
self.client.stat(path)
logger.warning(u'Directory already exists (%s)', path)
except IOError as e:
logger.error(
u'Fail while creating directory (%s): %s',
path,
e.strerror
)
raise(e)
def chdir(self, path):
logger.info(u'Changing to directory (%s)', path)
try:
self.client.chdir(path)
except IOError as e:
logger.error(
u'Fail while accessing directory (%s): %s',
path,
e.strerror
)
raise(e)
def put(self, from_fl, to_fl):
logger.info(
u'Copying file from (%s) to (%s)',
from_fl,
to_fl
)
try:
self.client.put(from_fl, to_fl)
logger.debug(u'File has being copied (%s)', to_fl)
except OSError as e:
logger.error(
u'Fail while copying file (%s), file not found',
to_fl
)
except IOError as e:
logger.error(
u'Fail while copying file (%s): %s',
to_fl,
e.strerror
)
| 0.387111 | 0.063715 |
import os
import weakref
import logging
try:
from configparser import ConfigParser
except ImportError:
from ConfigParser import ConfigParser
logger = logging.getLogger(__name__)
class SingletonMixin(object):
"""
Adds a singleton behaviour to an existing class.
weakrefs are used in order to keep a low memory footprint.
As a result, args and kwargs passed to classes initializers
must be of weakly refereable types.
"""
_instances = weakref.WeakValueDictionary()
def __new__(cls, *args, **kwargs):
key = (cls, args, tuple(kwargs.items()))
if key in cls._instances:
return cls._instances[key]
try:
new_instance = super(type(cls), cls).__new__(cls, *args, **kwargs)
except TypeError:
new_instance = super(type(cls), cls).__new__(cls, **kwargs)
cls._instances[key] = new_instance
return new_instance
class Configuration(SingletonMixin):
"""
Acts as a proxy to the ConfigParser module
"""
def __init__(self, fp, parser_dep=ConfigParser):
self.conf = parser_dep()
try:
self.conf.read_file(fp)
except AttributeError:
self.conf.readfp(fp)
@classmethod
def from_env(cls):
try:
filepath = os.environ['PAPERBOY_SETTINGS_FILE']
except KeyError:
logger.warning('missing env variable PAPERBOY_SETTINGS_FILE, no presets available')
return {}
return cls.from_file(filepath)
@classmethod
def from_file(cls, filepath):
"""
Returns an instance of Configuration
``filepath`` is a text string.
"""
try:
fp = open(filepath, 'r')
except IOError:
logger.warning('file defined on PAPERBOY_SETTINGS_FILE environment variable not found (%s), no presets available', filepath)
return {}
return cls(fp)
def __getattr__(self, attr):
return getattr(self.conf, attr)
def items(self):
"""Settings as key-value pair.
"""
return [(section, dict(self.conf.items(section, raw=True))) for \
section in [section for section in self.conf.sections()]]
config = Configuration.from_env()
settings = dict(config.items())
|
scielo-paperboy
|
/scielo_paperboy-0.12.7.tar.gz/scielo_paperboy-0.12.7/paperboy/utils.py
|
utils.py
|
import os
import weakref
import logging
try:
from configparser import ConfigParser
except ImportError:
from ConfigParser import ConfigParser
logger = logging.getLogger(__name__)
class SingletonMixin(object):
"""
Adds a singleton behaviour to an existing class.
weakrefs are used in order to keep a low memory footprint.
As a result, args and kwargs passed to classes initializers
must be of weakly refereable types.
"""
_instances = weakref.WeakValueDictionary()
def __new__(cls, *args, **kwargs):
key = (cls, args, tuple(kwargs.items()))
if key in cls._instances:
return cls._instances[key]
try:
new_instance = super(type(cls), cls).__new__(cls, *args, **kwargs)
except TypeError:
new_instance = super(type(cls), cls).__new__(cls, **kwargs)
cls._instances[key] = new_instance
return new_instance
class Configuration(SingletonMixin):
"""
Acts as a proxy to the ConfigParser module
"""
def __init__(self, fp, parser_dep=ConfigParser):
self.conf = parser_dep()
try:
self.conf.read_file(fp)
except AttributeError:
self.conf.readfp(fp)
@classmethod
def from_env(cls):
try:
filepath = os.environ['PAPERBOY_SETTINGS_FILE']
except KeyError:
logger.warning('missing env variable PAPERBOY_SETTINGS_FILE, no presets available')
return {}
return cls.from_file(filepath)
@classmethod
def from_file(cls, filepath):
"""
Returns an instance of Configuration
``filepath`` is a text string.
"""
try:
fp = open(filepath, 'r')
except IOError:
logger.warning('file defined on PAPERBOY_SETTINGS_FILE environment variable not found (%s), no presets available', filepath)
return {}
return cls(fp)
def __getattr__(self, attr):
return getattr(self.conf, attr)
def items(self):
"""Settings as key-value pair.
"""
return [(section, dict(self.conf.items(section, raw=True))) for \
section in [section for section in self.conf.sections()]]
config = Configuration.from_env()
settings = dict(config.items())
| 0.560854 | 0.146575 |
import argparse
import logging
import logging.config
import os
import subprocess
from paperboy.utils import settings
from paperboy.communicator import SFTP, FTP
logger = logging.getLogger(__name__)
ALLOWED_ITENS = ['serial', 'pdfs', 'images', 'translations']
LOGGING = {
'version': 1,
'formatters': {
'simple': {
'format': '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
}
},
'handlers': {
'console': {
'level': 'NOTSET',
'class': 'logging.StreamHandler',
'formatter': 'simple',
}
},
'loggers': {
'': {
'handlers': ['console'],
'level': 'ERROR'
},
'paperboy': {
'handlers': ['console'],
'level': 'INFO'
}
}
}
def _config_logging(logging_level='INFO'):
LOGGING['loggers']['paperboy']['level'] = logging_level
logging.config.dictConfig(LOGGING)
def master_conversor(mst_input, mst_output, cisis_dir=None):
logger.debug(u'Running database conversion for %s', mst_input)
status = '1' # erro de acordo com stdout do CISIS
command = remove_last_slash(cisis_dir) + '/crunchmf' if cisis_dir else 'crunchmf'
logger.debug(u'Running: %s %s %s', command, mst_input, mst_output)
try:
status = subprocess.call([command, mst_input, mst_output])
except OSError:
logger.error(u'Error while running crunchmf, check if the command is available on the syspath, or the CISIS path was correctly indicated in the config file')
if str(status) == '0':
logger.debug(u'Conversion done for %s', mst_input)
return True
if str(status) == '1':
logger.error(u'Conversion did not work fot %s', mst_input)
return False
return False
def parse_scilista(scilista):
logger.info(u'Loading scilista (%s)', scilista)
lista = []
try:
f = open(scilista, 'r')
except IOError:
logger.error(
u'Fail while loading scilista, file not found (%s)',
scilista
)
else:
with f:
count = 0
for line in f:
line = line.strip()
count += 1
splited_line = [i.strip() for i in line.split(' ')]
if len(splited_line) > 3 or len(splited_line) < 2:
logger.warning(
u'Wrong value in the file (%s) line (%d): %s',
scilista,
count,
line
)
continue
if len(splited_line) == 3: # issue to remove
if splited_line[2].lower() == 'del':
lista.append((splited_line[0], splited_line[1], True))
else:
lista.append((splited_line[0], splited_line[1], False))
if len(splited_line) == 2: # issue to remove
lista.append((splited_line[0], splited_line[1], False))
logger.info(u'scilista loaded (%s)', scilista)
return lista
def remove_last_slash(path):
path = path.replace('\\', '/')
try:
return path[:-1] if path[-1] == '/' else path
except IndexError:
return path
class Delivery(object):
def __init__(self, source_type, cisis_dir, scilista, source_dir, destiny_dir,
compatibility_mode, server, server_type, port, user, password, serial_source_dir=None):
self._scilista = parse_scilista(scilista)
self.scilista = scilista
self.cisis_dir = remove_last_slash(cisis_dir)
self.source_type = source_type
self.source_dir = remove_last_slash(source_dir)
self.serial_source_dir = remove_last_slash(serial_source_dir) if serial_source_dir else self.source_dir
self.destiny_dir = remove_last_slash(destiny_dir)
self.compatibility_mode = compatibility_mode
if str(server_type) == 'sftp':
self.client = SFTP(server, int(port), user, password)
elif str(server_type) == 'ftp':
self.client = FTP(server, int(port), user, password)
else:
raise TypeError(u'server_type must be ftp or sftp')
def _local_remove(self, path):
logger.info(u'Removing temporary file (%s)', path)
try:
os.remove(path)
logger.debug(u'Temporary has being file removed (%s)', path)
except OSError as e:
logger.error(
u'Fail while removing temporary file (%s): %s',
path,
e.strerror
)
def transfer_data_general(self, base_path):
base_path = base_path.replace(u'\\', u'/')
# Cria a estrutura de diretorio informada em base_path dentro de destiny_dir
path = u''
for item in base_path.split(u'/'):
path += u'/' + item
self.client.mkdir(self.destiny_dir + path)
# Cria recursivamente todo conteudo baixo o source_dir + base_path
tree = os.walk(self.source_dir + u'/' + base_path)
for item in tree:
root = item[0].replace(u'\\', u'/')
current = root.replace(self.source_dir+u'/', '')
dirs = item[1]
files = item[2]
for fl in files:
from_fl = root + u'/' + fl
to_fl = self.destiny_dir + u'/' + current + u'/' + fl
self.client.put(from_fl, to_fl)
for directory in dirs:
self.client.mkdir(self.destiny_dir + u'/' + current + u'/' + directory)
def transfer_data_databases(self, base_path):
"""
base_path: directory inside the source path that will be transfered.
ex: serial/rsap img/revistas/rsap
compatibility_mode: Will convert the original MST and XRF files for the
inversed SO system of the source data.
ex: if the source data is on a windows machine, it will be converted to
linux compatible files. If the source data is on a linux machine it will
convert the files to windown compatible files. The default is false.
"""
base_path = base_path.replace(u'\\', u'/')
allowed_extensions = [u'mst', u'xrf']
# Cria a estrutura de diretorio informada em base_path dentro de destiny_dir
path = u''
for item in base_path.split(u'/'):
path += u'/' + item
self.client.mkdir(self.destiny_dir + path)
# Cria recursivamente todo conteudo baixo o serial_source_dir + base_path
tree = os.walk(self.serial_source_dir + u'/' + base_path)
converted = set()
for item in tree:
root = item[0].replace(u'\\', u'/')
current = root.replace(self.serial_source_dir + u'/', u'')
dirs = item[1]
files = item[2]
for fl in files:
if not fl[-3:].lower() in allowed_extensions:
continue
from_fl = root + u'/' + fl
from_fl_name = from_fl[:-4]
converted_fl = from_fl_name + u'_converted'
to_fl = self.destiny_dir + u'/' + current + u'/' + fl
if not self.compatibility_mode:
self.client.put(from_fl, to_fl)
continue
if from_fl_name in converted:
continue
converted.add(from_fl_name)
convertion_status = master_conversor(
from_fl_name,
converted_fl,
cisis_dir=self.cisis_dir
)
if not convertion_status:
continue
if convertion_status:
from_fl = converted_fl
to_fl = to_fl[:-4]
for extension in allowed_extensions:
self.client.put(from_fl + u'.' + extension, to_fl + u'.' + extension)
self._local_remove(from_fl + u'.' + extension)
for directory in dirs:
self.client.mkdir(self.destiny_dir + u'/' + current + u'/' + directory)
def run_serial(self):
self.client.mkdir(self.destiny_dir + u'/serial')
logger.info(u'Copying scilista.lst file')
self.client.put(self.scilista, self.destiny_dir + u'/serial/scilista.lst')
logger.info(u'Copying issue database')
self.transfer_data_databases(u'serial/issue')
logger.info(u'Copying title database')
self.transfer_data_databases(u'serial/title')
for item in self._scilista:
journal_acronym = item[0]
issue_label = item[1]
# pulando itens do scilista indicados para exclusao, ex: rsap v12n3 del
if item[2]:
continue
logger.info(
u'Copying databases from %s %s',
journal_acronym,
issue_label
)
self.transfer_data_databases(u'serial/%s/%s/base' % (
journal_acronym, issue_label)
)
def run_pdfs(self):
for item in self._scilista:
journal_acronym = item[0]
issue_label = item[1]
# pulando itens do scilista indicados para exclusao, ex: rsap v12n3 del
if item[2]:
continue
logger.info(
u'Copying pdf\'s from %s %s',
journal_acronym,
issue_label
)
self.transfer_data_general(u'bases/pdf/%s/%s' % (
journal_acronym, issue_label)
)
def run_translations(self):
for item in self._scilista:
journal_acronym = item[0]
issue_label = item[1]
# pulando itens do scilista indicados para exclusao, ex: rsap v12n3 del
if item[2]:
continue
logger.info(
u'Copying translations from %s %s',
journal_acronym,
issue_label
)
self.transfer_data_general(u'bases/translation/%s/%s' % (
journal_acronym, issue_label)
)
def run_xmls(self):
for item in self._scilista:
journal_acronym = item[0]
issue_label = item[1]
# pulando itens do scilista indicados para exclusao, ex: rsap v12n3 del
if item[2]:
continue
logger.info(
u'Copying xmls from %s %s',
journal_acronym,
issue_label
)
self.transfer_data_general(u'bases/xml/%s/%s' % (
journal_acronym, issue_label)
)
def run_images(self):
for item in self._scilista:
journal_acronym = item[0]
issue_label = item[1]
# pulando itens do scilista indicados para exclusao, ex: rsap v12n3 del
if item[2]:
continue
logger.info(
u'Copying images from %s %s',
journal_acronym,
issue_label
)
self.transfer_data_general(u'htdocs/img/revistas/%s/%s' % (
journal_acronym, issue_label)
)
def run(self, source_type=None):
source_type = source_type if source_type else self.source_type
if source_type == u'pdfs':
self.run_pdfs()
elif source_type == u'images':
self.run_images()
elif source_type == u'translations':
self.run_translations()
elif source_type == u'databases':
self.run_serial()
elif source_type == u'xmls':
self.run_xmls()
else:
self.run_serial()
self.run_images()
self.run_pdfs()
self.run_translations()
self.run_xmls()
def main():
setts = settings.get('app:main', {})
parser = argparse.ArgumentParser(
description=u'Tools to send images, PDF\'s, translations and databases from the local SciELO sites to the stage and production servers'
)
parser.add_argument(
u'--source_type',
u'-t',
choices=[u'pdfs', u'images', u'translations', u'xmls', u'databases'],
help=u'Type of data that will be send to the server'
)
parser.add_argument(
u'--cisis_dir',
u'-r',
default=setts.get(u'cisis_dir', u''),
help=u'absolute path to the source where the ISIS utilitaries are where installed. It is not necessary to informe when the utiliaries are in the syspath.'
)
parser.add_argument(
u'--scilista',
u'-i',
default=setts.get(u'scilista', u'./serial/scilista.lst'),
help=u'absolute path to the scilista.lst file'
)
parser.add_argument(
u'--source_dir',
u'-s',
default=setts.get(u'source_dir', u'.'),
help=u'absolute path where the SciELO site was installed. this directory must contain the directories bases, htcos, proc and serial'
)
parser.add_argument(
u'--serial_source_dir',
u'-b',
default=setts.get(u'serial_source_dir', ''),
help=u'absolute path where the SciELO site was installed. this directory must contain the serial directory'
)
parser.add_argument(
u'--destiny_dir',
u'-d',
default=setts.get(u'destiny_dir', u'.'),
help=u'absolute path (server site) where the SciELO site was installed. this directory must contain the directories bases, htcos, proc and serial'
)
parser.add_argument(
u'--compatibility_mode',
u'-m',
action=u'store_true',
help=u'Activate the compatibility mode between operating systems. It is necessary to have the CISIS configured in the syspath or in the configuration file'
)
parser.add_argument(
u'--server',
u'-f',
default=setts.get(u'server', u'localhost'),
help=u'FTP or SFTP'
)
parser.add_argument(
u'--server_type',
u'-e',
default=setts.get(u'server_type', u'sftp'),
choices=['ftp', 'sftp']
)
parser.add_argument(
u'--port',
u'-x',
default=setts.get(u'port', u'22'),
help=u'usually 22 for SFTP connection or 21 for FTP connection'
)
parser.add_argument(
u'--user',
u'-u',
default=setts.get(u'user', u'anonymous'),
help=u'FTP or SFTP username'
)
parser.add_argument(
u'--password',
u'-p',
default=setts.get(u'password', u'anonymous'),
help=u'FTP or SFTP password'
)
parser.add_argument(
u'--logging_level',
u'-l',
default=u'DEBUG',
choices=[u'DEBUG', u'INFO', u'WARNING', u'ERROR', u'CRITICAL'],
help=u'Log level'
)
args = parser.parse_args()
_config_logging(args.logging_level)
delivery = Delivery(
args.source_type,
args.cisis_dir,
args.scilista,
args.source_dir,
args.destiny_dir,
args.compatibility_mode,
args.server,
args.server_type,
args.port,
args.user,
args.password,
args.serial_source_dir
)
delivery.run()
|
scielo-paperboy
|
/scielo_paperboy-0.12.7.tar.gz/scielo_paperboy-0.12.7/paperboy/send_to_server.py
|
send_to_server.py
|
import argparse
import logging
import logging.config
import os
import subprocess
from paperboy.utils import settings
from paperboy.communicator import SFTP, FTP
logger = logging.getLogger(__name__)
ALLOWED_ITENS = ['serial', 'pdfs', 'images', 'translations']
LOGGING = {
'version': 1,
'formatters': {
'simple': {
'format': '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
}
},
'handlers': {
'console': {
'level': 'NOTSET',
'class': 'logging.StreamHandler',
'formatter': 'simple',
}
},
'loggers': {
'': {
'handlers': ['console'],
'level': 'ERROR'
},
'paperboy': {
'handlers': ['console'],
'level': 'INFO'
}
}
}
def _config_logging(logging_level='INFO'):
LOGGING['loggers']['paperboy']['level'] = logging_level
logging.config.dictConfig(LOGGING)
def master_conversor(mst_input, mst_output, cisis_dir=None):
logger.debug(u'Running database conversion for %s', mst_input)
status = '1' # erro de acordo com stdout do CISIS
command = remove_last_slash(cisis_dir) + '/crunchmf' if cisis_dir else 'crunchmf'
logger.debug(u'Running: %s %s %s', command, mst_input, mst_output)
try:
status = subprocess.call([command, mst_input, mst_output])
except OSError:
logger.error(u'Error while running crunchmf, check if the command is available on the syspath, or the CISIS path was correctly indicated in the config file')
if str(status) == '0':
logger.debug(u'Conversion done for %s', mst_input)
return True
if str(status) == '1':
logger.error(u'Conversion did not work fot %s', mst_input)
return False
return False
def parse_scilista(scilista):
logger.info(u'Loading scilista (%s)', scilista)
lista = []
try:
f = open(scilista, 'r')
except IOError:
logger.error(
u'Fail while loading scilista, file not found (%s)',
scilista
)
else:
with f:
count = 0
for line in f:
line = line.strip()
count += 1
splited_line = [i.strip() for i in line.split(' ')]
if len(splited_line) > 3 or len(splited_line) < 2:
logger.warning(
u'Wrong value in the file (%s) line (%d): %s',
scilista,
count,
line
)
continue
if len(splited_line) == 3: # issue to remove
if splited_line[2].lower() == 'del':
lista.append((splited_line[0], splited_line[1], True))
else:
lista.append((splited_line[0], splited_line[1], False))
if len(splited_line) == 2: # issue to remove
lista.append((splited_line[0], splited_line[1], False))
logger.info(u'scilista loaded (%s)', scilista)
return lista
def remove_last_slash(path):
path = path.replace('\\', '/')
try:
return path[:-1] if path[-1] == '/' else path
except IndexError:
return path
class Delivery(object):
def __init__(self, source_type, cisis_dir, scilista, source_dir, destiny_dir,
compatibility_mode, server, server_type, port, user, password, serial_source_dir=None):
self._scilista = parse_scilista(scilista)
self.scilista = scilista
self.cisis_dir = remove_last_slash(cisis_dir)
self.source_type = source_type
self.source_dir = remove_last_slash(source_dir)
self.serial_source_dir = remove_last_slash(serial_source_dir) if serial_source_dir else self.source_dir
self.destiny_dir = remove_last_slash(destiny_dir)
self.compatibility_mode = compatibility_mode
if str(server_type) == 'sftp':
self.client = SFTP(server, int(port), user, password)
elif str(server_type) == 'ftp':
self.client = FTP(server, int(port), user, password)
else:
raise TypeError(u'server_type must be ftp or sftp')
def _local_remove(self, path):
logger.info(u'Removing temporary file (%s)', path)
try:
os.remove(path)
logger.debug(u'Temporary has being file removed (%s)', path)
except OSError as e:
logger.error(
u'Fail while removing temporary file (%s): %s',
path,
e.strerror
)
def transfer_data_general(self, base_path):
base_path = base_path.replace(u'\\', u'/')
# Cria a estrutura de diretorio informada em base_path dentro de destiny_dir
path = u''
for item in base_path.split(u'/'):
path += u'/' + item
self.client.mkdir(self.destiny_dir + path)
# Cria recursivamente todo conteudo baixo o source_dir + base_path
tree = os.walk(self.source_dir + u'/' + base_path)
for item in tree:
root = item[0].replace(u'\\', u'/')
current = root.replace(self.source_dir+u'/', '')
dirs = item[1]
files = item[2]
for fl in files:
from_fl = root + u'/' + fl
to_fl = self.destiny_dir + u'/' + current + u'/' + fl
self.client.put(from_fl, to_fl)
for directory in dirs:
self.client.mkdir(self.destiny_dir + u'/' + current + u'/' + directory)
def transfer_data_databases(self, base_path):
"""
base_path: directory inside the source path that will be transfered.
ex: serial/rsap img/revistas/rsap
compatibility_mode: Will convert the original MST and XRF files for the
inversed SO system of the source data.
ex: if the source data is on a windows machine, it will be converted to
linux compatible files. If the source data is on a linux machine it will
convert the files to windown compatible files. The default is false.
"""
base_path = base_path.replace(u'\\', u'/')
allowed_extensions = [u'mst', u'xrf']
# Cria a estrutura de diretorio informada em base_path dentro de destiny_dir
path = u''
for item in base_path.split(u'/'):
path += u'/' + item
self.client.mkdir(self.destiny_dir + path)
# Cria recursivamente todo conteudo baixo o serial_source_dir + base_path
tree = os.walk(self.serial_source_dir + u'/' + base_path)
converted = set()
for item in tree:
root = item[0].replace(u'\\', u'/')
current = root.replace(self.serial_source_dir + u'/', u'')
dirs = item[1]
files = item[2]
for fl in files:
if not fl[-3:].lower() in allowed_extensions:
continue
from_fl = root + u'/' + fl
from_fl_name = from_fl[:-4]
converted_fl = from_fl_name + u'_converted'
to_fl = self.destiny_dir + u'/' + current + u'/' + fl
if not self.compatibility_mode:
self.client.put(from_fl, to_fl)
continue
if from_fl_name in converted:
continue
converted.add(from_fl_name)
convertion_status = master_conversor(
from_fl_name,
converted_fl,
cisis_dir=self.cisis_dir
)
if not convertion_status:
continue
if convertion_status:
from_fl = converted_fl
to_fl = to_fl[:-4]
for extension in allowed_extensions:
self.client.put(from_fl + u'.' + extension, to_fl + u'.' + extension)
self._local_remove(from_fl + u'.' + extension)
for directory in dirs:
self.client.mkdir(self.destiny_dir + u'/' + current + u'/' + directory)
def run_serial(self):
self.client.mkdir(self.destiny_dir + u'/serial')
logger.info(u'Copying scilista.lst file')
self.client.put(self.scilista, self.destiny_dir + u'/serial/scilista.lst')
logger.info(u'Copying issue database')
self.transfer_data_databases(u'serial/issue')
logger.info(u'Copying title database')
self.transfer_data_databases(u'serial/title')
for item in self._scilista:
journal_acronym = item[0]
issue_label = item[1]
# pulando itens do scilista indicados para exclusao, ex: rsap v12n3 del
if item[2]:
continue
logger.info(
u'Copying databases from %s %s',
journal_acronym,
issue_label
)
self.transfer_data_databases(u'serial/%s/%s/base' % (
journal_acronym, issue_label)
)
def run_pdfs(self):
for item in self._scilista:
journal_acronym = item[0]
issue_label = item[1]
# pulando itens do scilista indicados para exclusao, ex: rsap v12n3 del
if item[2]:
continue
logger.info(
u'Copying pdf\'s from %s %s',
journal_acronym,
issue_label
)
self.transfer_data_general(u'bases/pdf/%s/%s' % (
journal_acronym, issue_label)
)
def run_translations(self):
for item in self._scilista:
journal_acronym = item[0]
issue_label = item[1]
# pulando itens do scilista indicados para exclusao, ex: rsap v12n3 del
if item[2]:
continue
logger.info(
u'Copying translations from %s %s',
journal_acronym,
issue_label
)
self.transfer_data_general(u'bases/translation/%s/%s' % (
journal_acronym, issue_label)
)
def run_xmls(self):
for item in self._scilista:
journal_acronym = item[0]
issue_label = item[1]
# pulando itens do scilista indicados para exclusao, ex: rsap v12n3 del
if item[2]:
continue
logger.info(
u'Copying xmls from %s %s',
journal_acronym,
issue_label
)
self.transfer_data_general(u'bases/xml/%s/%s' % (
journal_acronym, issue_label)
)
def run_images(self):
for item in self._scilista:
journal_acronym = item[0]
issue_label = item[1]
# pulando itens do scilista indicados para exclusao, ex: rsap v12n3 del
if item[2]:
continue
logger.info(
u'Copying images from %s %s',
journal_acronym,
issue_label
)
self.transfer_data_general(u'htdocs/img/revistas/%s/%s' % (
journal_acronym, issue_label)
)
def run(self, source_type=None):
source_type = source_type if source_type else self.source_type
if source_type == u'pdfs':
self.run_pdfs()
elif source_type == u'images':
self.run_images()
elif source_type == u'translations':
self.run_translations()
elif source_type == u'databases':
self.run_serial()
elif source_type == u'xmls':
self.run_xmls()
else:
self.run_serial()
self.run_images()
self.run_pdfs()
self.run_translations()
self.run_xmls()
def main():
setts = settings.get('app:main', {})
parser = argparse.ArgumentParser(
description=u'Tools to send images, PDF\'s, translations and databases from the local SciELO sites to the stage and production servers'
)
parser.add_argument(
u'--source_type',
u'-t',
choices=[u'pdfs', u'images', u'translations', u'xmls', u'databases'],
help=u'Type of data that will be send to the server'
)
parser.add_argument(
u'--cisis_dir',
u'-r',
default=setts.get(u'cisis_dir', u''),
help=u'absolute path to the source where the ISIS utilitaries are where installed. It is not necessary to informe when the utiliaries are in the syspath.'
)
parser.add_argument(
u'--scilista',
u'-i',
default=setts.get(u'scilista', u'./serial/scilista.lst'),
help=u'absolute path to the scilista.lst file'
)
parser.add_argument(
u'--source_dir',
u'-s',
default=setts.get(u'source_dir', u'.'),
help=u'absolute path where the SciELO site was installed. this directory must contain the directories bases, htcos, proc and serial'
)
parser.add_argument(
u'--serial_source_dir',
u'-b',
default=setts.get(u'serial_source_dir', ''),
help=u'absolute path where the SciELO site was installed. this directory must contain the serial directory'
)
parser.add_argument(
u'--destiny_dir',
u'-d',
default=setts.get(u'destiny_dir', u'.'),
help=u'absolute path (server site) where the SciELO site was installed. this directory must contain the directories bases, htcos, proc and serial'
)
parser.add_argument(
u'--compatibility_mode',
u'-m',
action=u'store_true',
help=u'Activate the compatibility mode between operating systems. It is necessary to have the CISIS configured in the syspath or in the configuration file'
)
parser.add_argument(
u'--server',
u'-f',
default=setts.get(u'server', u'localhost'),
help=u'FTP or SFTP'
)
parser.add_argument(
u'--server_type',
u'-e',
default=setts.get(u'server_type', u'sftp'),
choices=['ftp', 'sftp']
)
parser.add_argument(
u'--port',
u'-x',
default=setts.get(u'port', u'22'),
help=u'usually 22 for SFTP connection or 21 for FTP connection'
)
parser.add_argument(
u'--user',
u'-u',
default=setts.get(u'user', u'anonymous'),
help=u'FTP or SFTP username'
)
parser.add_argument(
u'--password',
u'-p',
default=setts.get(u'password', u'anonymous'),
help=u'FTP or SFTP password'
)
parser.add_argument(
u'--logging_level',
u'-l',
default=u'DEBUG',
choices=[u'DEBUG', u'INFO', u'WARNING', u'ERROR', u'CRITICAL'],
help=u'Log level'
)
args = parser.parse_args()
_config_logging(args.logging_level)
delivery = Delivery(
args.source_type,
args.cisis_dir,
args.scilista,
args.source_dir,
args.destiny_dir,
args.compatibility_mode,
args.server,
args.server_type,
args.port,
args.user,
args.password,
args.serial_source_dir
)
delivery.run()
| 0.155848 | 0.124266 |
import json
import re
from accessstats.client import ThriftClient
REGEX_ISSN = re.compile("^[0-9]{4}-[0-9]{3}[0-9xX]$")
REGEX_ISSUE = re.compile("^[0-9]{4}-[0-9]{3}[0-9xX][0-2][0-9]{3}[0-9]{4}$")
REGEX_ARTICLE = re.compile("^S[0-9]{4}-[0-9]{3}[0-9xX][0-2][0-9]{3}[0-9]{4}[0-9]{5}$")
def _code_type(code):
if not code:
return None
if REGEX_ISSN.match(code):
return 'issn'
if REGEX_ISSUE.match(code):
return 'issue'
if REGEX_ARTICLE.match(code):
return 'pid'
def _compute_downloads_per_year(query_result):
result = []
for item in query_result['aggregations']['access_year']['buckets']:
result.append(
(item['key'], int(item['access_total']['value']))
)
return result
def downloads_per_year(collection, code, raw=False):
"""
This method retrieve the total of downloads per year.
arguments
collection: SciELO 3 letters Acronym
code: (Journal ISSN, Issue PID, Article PID)
return
[
("2017", "20101"),
("2016", "11201"),
("2015", "12311"),
...
]
"""
tc = ThriftClient()
body = {"query": {"filtered": {}}}
fltr = {}
query = {
"query": {
"bool": {
"must": [
{
"match": {
"collection": collection
}
}
]
}
}
}
aggs = {
"aggs": {
"access_year": {
"terms": {
"field": "access_year",
"size": 0,
"order": {
"_term": "asc"
}
},
"aggs": {
"access_total": {
"sum": {
"field": "access_total"
}
}
}
}
}
}
body['query']['filtered'].update(fltr)
body['query']['filtered'].update(query)
body.update(aggs)
code_type = _code_type(code)
if code_type:
query["query"]["bool"]["must"].append({
"match": {
code_type: code
}
})
query_parameters = [
('size', '0')
]
query_result = tc.search(json.dumps(body), query_parameters)
return query_result if raw is True else _compute_downloads_per_year(query_result)
|
scielo_accessstatsapi
|
/scielo_accessstatsapi-1.1.0.tar.gz/scielo_accessstatsapi-1.1.0/accessstats/queries.py
|
queries.py
|
import json
import re
from accessstats.client import ThriftClient
REGEX_ISSN = re.compile("^[0-9]{4}-[0-9]{3}[0-9xX]$")
REGEX_ISSUE = re.compile("^[0-9]{4}-[0-9]{3}[0-9xX][0-2][0-9]{3}[0-9]{4}$")
REGEX_ARTICLE = re.compile("^S[0-9]{4}-[0-9]{3}[0-9xX][0-2][0-9]{3}[0-9]{4}[0-9]{5}$")
def _code_type(code):
if not code:
return None
if REGEX_ISSN.match(code):
return 'issn'
if REGEX_ISSUE.match(code):
return 'issue'
if REGEX_ARTICLE.match(code):
return 'pid'
def _compute_downloads_per_year(query_result):
result = []
for item in query_result['aggregations']['access_year']['buckets']:
result.append(
(item['key'], int(item['access_total']['value']))
)
return result
def downloads_per_year(collection, code, raw=False):
"""
This method retrieve the total of downloads per year.
arguments
collection: SciELO 3 letters Acronym
code: (Journal ISSN, Issue PID, Article PID)
return
[
("2017", "20101"),
("2016", "11201"),
("2015", "12311"),
...
]
"""
tc = ThriftClient()
body = {"query": {"filtered": {}}}
fltr = {}
query = {
"query": {
"bool": {
"must": [
{
"match": {
"collection": collection
}
}
]
}
}
}
aggs = {
"aggs": {
"access_year": {
"terms": {
"field": "access_year",
"size": 0,
"order": {
"_term": "asc"
}
},
"aggs": {
"access_total": {
"sum": {
"field": "access_total"
}
}
}
}
}
}
body['query']['filtered'].update(fltr)
body['query']['filtered'].update(query)
body.update(aggs)
code_type = _code_type(code)
if code_type:
query["query"]["bool"]["must"].append({
"match": {
code_type: code
}
})
query_parameters = [
('size', '0')
]
query_result = tc.search(json.dumps(body), query_parameters)
return query_result if raw is True else _compute_downloads_per_year(query_result)
| 0.400984 | 0.208642 |
import os
import thriftpy
import json
import logging
import time
# URLJOIN Python 3 and 2 import compatibilities
try:
from urllib.parse import urljoin
except:
from urlparse import urljoin
import requests
from thriftpy.rpc import make_client
logger = logging.getLogger(__name__)
class CitedByExceptions(Exception):
pass
class ServerError(CitedByExceptions):
pass
class ThriftClient(object):
ACCESSSTATS_THRIFT = thriftpy.load(
os.path.join(os.path.dirname(__file__))+'/thrift/access_stats.thrift')
def __init__(self, domain=None):
"""
Cliente thrift para o Articlemeta.
"""
self.domain = domain or 'ratchet.scielo.org:11660'
self._set_address()
def _set_address(self):
address = self.domain.split(':')
self._address = address[0]
try:
self._port = int(address[1])
except:
self._port = 11660
@property
def client(self):
client = make_client(
self.ACCESSSTATS_THRIFT.AccessStats,
self._address,
self._port
)
return client
def document(self, code, collection=None):
result = self.client.document(code=code, collection=collection)
try:
return json.loads(result)
except:
return None
def search(self, dsl, params):
"""
Free queries to ES index.
dsl (string): with DSL query
params (list): [(key, value), (key, value)]
where key is a query parameter, and value is the value required for
parameter, ex: [('size', '0'), ('search_type', 'count')]
"""
query_parameters = []
for key, value in params:
query_parameters.append(
self.ACCESSSTATS_THRIFT.kwargs(str(key), str(value))
)
try:
result = self.client.search(dsl, query_parameters)
except self.ACCESSSTATS_THRIFT.ServerError:
raise ServerError('you may trying to run a bad DSL Query')
try:
return json.loads(result)
except:
return None
|
scielo_accessstatsapi
|
/scielo_accessstatsapi-1.1.0.tar.gz/scielo_accessstatsapi-1.1.0/accessstats/client.py
|
client.py
|
import os
import thriftpy
import json
import logging
import time
# URLJOIN Python 3 and 2 import compatibilities
try:
from urllib.parse import urljoin
except:
from urlparse import urljoin
import requests
from thriftpy.rpc import make_client
logger = logging.getLogger(__name__)
class CitedByExceptions(Exception):
pass
class ServerError(CitedByExceptions):
pass
class ThriftClient(object):
ACCESSSTATS_THRIFT = thriftpy.load(
os.path.join(os.path.dirname(__file__))+'/thrift/access_stats.thrift')
def __init__(self, domain=None):
"""
Cliente thrift para o Articlemeta.
"""
self.domain = domain or 'ratchet.scielo.org:11660'
self._set_address()
def _set_address(self):
address = self.domain.split(':')
self._address = address[0]
try:
self._port = int(address[1])
except:
self._port = 11660
@property
def client(self):
client = make_client(
self.ACCESSSTATS_THRIFT.AccessStats,
self._address,
self._port
)
return client
def document(self, code, collection=None):
result = self.client.document(code=code, collection=collection)
try:
return json.loads(result)
except:
return None
def search(self, dsl, params):
"""
Free queries to ES index.
dsl (string): with DSL query
params (list): [(key, value), (key, value)]
where key is a query parameter, and value is the value required for
parameter, ex: [('size', '0'), ('search_type', 'count')]
"""
query_parameters = []
for key, value in params:
query_parameters.append(
self.ACCESSSTATS_THRIFT.kwargs(str(key), str(value))
)
try:
result = self.client.search(dsl, query_parameters)
except self.ACCESSSTATS_THRIFT.ServerError:
raise ServerError('you may trying to run a bad DSL Query')
try:
return json.loads(result)
except:
return None
| 0.483161 | 0.066904 |
scieloapi.py
============
Thin wrapper around the SciELO Manager RESTful API.
[](https://travis-ci.org/scieloorg/scieloapi.py)
Usage example:
import scieloapi
client = scieloapi.Client('some.user', 'some.api_key')
for journal in client.journals.all():
print journal['id'], journal['title']
How to install
--------------
You can install it via `pip`, directly from the github repo:
pip install -e git+git://github.com/scieloorg/scieloapi.py.git#egg=scieloapi
Or from PyPi (more stable):
pip install scieloapi
Basics
------
When a `Client` instance is initialized, the process automaticaly instrospects the API server
in order to make available only the endpoints part of the specified API version. The API version
may be passed as keyword argument `version` when creating the `Client` instance. If ommited,
the highest version is used.
>>> client = scieloapi.Client('some.user', 'some.api_key', api_uri='http://manager.scielo.org/api/', version='v1')
Listing available endpoints:
>>> client.endpoints
[u'pressreleases', u'users', u'sections', u'sponsors', u'collections', u'changes', u'apressreleases', u'uselicenses', u'journals', u'issues']
>>>
Listing all items of an endpoint:
>>> for journal in client.journals.all(): print journal['title']
...
Acta Médica Costarricense
Acta Pediátrica Costarricense
Actualidades Investigativas en Educación
Adolescencia y Salud
Agronomía Costarricense
Agronomía Mesoamericana
Annali dell'Istituto Superiore di Sanità
Arquivos em Odontologia
Brazilian Journal of Oral Sciences
Bulletin of the World Health Organization
Cadernos de Saúde Pública
>>>
Listing items matching some params:
>>> for journal in client.journals.filter(collection='saude-publica'): print journal['title']
...
Annali dell'Istituto Superiore di Sanità
Bulletin of the World Health Organization
Cadernos de Saúde Pública
Ciência & Saúde Coletiva
Gaceta Sanitaria
MEDICC Review
Revista Brasileira de Epidemiologia
Revista Cubana de Salud Pública
Revista de Salud Pública
>>>
Getting a specific item:
>>> journal = client.journals.get(62)
>>> journal['title']
u'Acta M\xe9dica Costarricense'
>>>
Use license
-----------
This project is licensed under FreeBSD 2-clause. See `LICENSE` for more details.
|
scieloapi
|
/scieloapi-0.5.tar.gz/scieloapi-0.5/README.md
|
README.md
|
scieloapi.py
============
Thin wrapper around the SciELO Manager RESTful API.
[](https://travis-ci.org/scieloorg/scieloapi.py)
Usage example:
import scieloapi
client = scieloapi.Client('some.user', 'some.api_key')
for journal in client.journals.all():
print journal['id'], journal['title']
How to install
--------------
You can install it via `pip`, directly from the github repo:
pip install -e git+git://github.com/scieloorg/scieloapi.py.git#egg=scieloapi
Or from PyPi (more stable):
pip install scieloapi
Basics
------
When a `Client` instance is initialized, the process automaticaly instrospects the API server
in order to make available only the endpoints part of the specified API version. The API version
may be passed as keyword argument `version` when creating the `Client` instance. If ommited,
the highest version is used.
>>> client = scieloapi.Client('some.user', 'some.api_key', api_uri='http://manager.scielo.org/api/', version='v1')
Listing available endpoints:
>>> client.endpoints
[u'pressreleases', u'users', u'sections', u'sponsors', u'collections', u'changes', u'apressreleases', u'uselicenses', u'journals', u'issues']
>>>
Listing all items of an endpoint:
>>> for journal in client.journals.all(): print journal['title']
...
Acta Médica Costarricense
Acta Pediátrica Costarricense
Actualidades Investigativas en Educación
Adolescencia y Salud
Agronomía Costarricense
Agronomía Mesoamericana
Annali dell'Istituto Superiore di Sanità
Arquivos em Odontologia
Brazilian Journal of Oral Sciences
Bulletin of the World Health Organization
Cadernos de Saúde Pública
>>>
Listing items matching some params:
>>> for journal in client.journals.filter(collection='saude-publica'): print journal['title']
...
Annali dell'Istituto Superiore di Sanità
Bulletin of the World Health Organization
Cadernos de Saúde Pública
Ciência & Saúde Coletiva
Gaceta Sanitaria
MEDICC Review
Revista Brasileira de Epidemiologia
Revista Cubana de Salud Pública
Revista de Salud Pública
>>>
Getting a specific item:
>>> journal = client.journals.get(62)
>>> journal['title']
u'Acta M\xe9dica Costarricense'
>>>
Use license
-----------
This project is licensed under FreeBSD 2-clause. See `LICENSE` for more details.
| 0.782413 | 0.380011 |
History
=======
0.5 (2014-02-10)
----------------
* Added `tox.ini` to help the porting to Python3.3.
* Support for https (without verifiying CA).
* Added `Content-Type: application/json` HTTP header to all post requests.
* Added a do-nothing logger handler by default.
0.4 (2013-08-30)
----------------
* Params are sorted by key before the GET request is dispatched. This minor
change aims to improve server-side caching capabilities.
* Minor changes to the API of the function `httpbroker.get`. It now accepts a `auth` kwarg
to handle server-side authentication.
* Minor changes to `scieloapi.Connector`:
* A custom http broker can be passed as `http_broker` kwarg during init.
* Http methods are created dinamically during initialization, with user credentials bound
into it. Api_key is no longer maintained by the instance.
* `Client.fetch_relations` now accepts the param `only` to specify a subset of relations to fetch.
* Now the User-Agent is set to `scieloapi/:version`.
* The module `scieloapi.scieloapi` was renamed to `scieloapi.core` to make things clearer.
* Added POST method capabilities on endpoints.
* Added the exception `exceptions.MethodNotAllowed` to represent 405 status code.
0.3 (2013-08-02)
----------------
* Added more unit tests (Now at 73% of code coverage).
* Minor adjusts at `setup.py` installation script.
* New exceptions to represent http status codes.
* Better documentation at `http://docs.scielo.org/projects/scieloapipy/`.
0.2 (2013-07-26)
----------------
* Slumber dependency was removed. The module `scieloapi.httpbroker` was created
to deal with http requests and responses.
* Better test reports now using Nosetests + coverage.
* Added method `Client.fetch_relations` to fetch all first-level relations of
a document and replace the value by the full document.
|
scieloapi
|
/scieloapi-0.5.tar.gz/scieloapi-0.5/HISTORY.md
|
HISTORY.md
|
History
=======
0.5 (2014-02-10)
----------------
* Added `tox.ini` to help the porting to Python3.3.
* Support for https (without verifiying CA).
* Added `Content-Type: application/json` HTTP header to all post requests.
* Added a do-nothing logger handler by default.
0.4 (2013-08-30)
----------------
* Params are sorted by key before the GET request is dispatched. This minor
change aims to improve server-side caching capabilities.
* Minor changes to the API of the function `httpbroker.get`. It now accepts a `auth` kwarg
to handle server-side authentication.
* Minor changes to `scieloapi.Connector`:
* A custom http broker can be passed as `http_broker` kwarg during init.
* Http methods are created dinamically during initialization, with user credentials bound
into it. Api_key is no longer maintained by the instance.
* `Client.fetch_relations` now accepts the param `only` to specify a subset of relations to fetch.
* Now the User-Agent is set to `scieloapi/:version`.
* The module `scieloapi.scieloapi` was renamed to `scieloapi.core` to make things clearer.
* Added POST method capabilities on endpoints.
* Added the exception `exceptions.MethodNotAllowed` to represent 405 status code.
0.3 (2013-08-02)
----------------
* Added more unit tests (Now at 73% of code coverage).
* Minor adjusts at `setup.py` installation script.
* New exceptions to represent http status codes.
* Better documentation at `http://docs.scielo.org/projects/scieloapipy/`.
0.2 (2013-07-26)
----------------
* Slumber dependency was removed. The module `scieloapi.httpbroker` was created
to deal with http requests and responses.
* Better test reports now using Nosetests + coverage.
* Added method `Client.fetch_relations` to fetch all first-level relations of
a document and replace the value by the full document.
| 0.788217 | 0.251383 |
# Science Concierge
a Python repository for content-based recommendation
based on Latent semantic analysis (LSA) topic distance and Rocchio Algorithm.
Science Concierge is an backend algorithm for Scholarfy
[www.scholarfy.net](http://www.scholarfy.net/),
an automatic scheduler for conference.
See full article on [PLOS ONE](http://journals.plos.org/plosone/article?id=10.1371%2Fjournal.pone.0158423), [Arxiv](http://arxiv.org/abs/1604.01070) or full tex manuscript and
presentation [here](https://github.com/titipata/science_concierge_manuscript). You can also see
the scale version of Scholarfy to 14.3M articles from Pubmed
at [pubmed.scholarfy.net](http://pubmed.scholarfy.net/).
## Usage
First, clone the repository.
```bash
$ git clone https://github.com/titipata/science_concierge
```
Install dependencies using `pip`,
```bash
$ pip install -r requirements.txt
```
Install the library using `setup.py`,
```bash
$ python setup.py develop install
```
## Download example data
We provide example `csv` file from Pubmed Open Acess Subset that you can download and
play with (we parsed using [pubmed_parser](https://github.com/titipata/pubmed_parser)).
Each file contains `pmc`, `pmid`, `title`, `abstract`, `publication_year` as column name.
Use `download` function to download example data,
```python
import science_concierge
science_concierge.download(['pubmed_oa_2015.csv', 'pubmed_oa_2016.csv'])
```
We provide `pubmed_oa_{year}.csv` from `{year} = 2007, ..., 2016` (**note** 2007 is
all publications before year 2008). Alternative is to use `awscli` to download,
```bash
$ aws s3 cp s3://science-of-science-bucket/science_concierge/data/ . --recursive
```
## Example usage of Science Concierge
You can build quick recommendation by importing `ScienceConcierge` class
then use `fit` method to fit list of documents. Then use `recommend` to recommend
documents based on like or dislike documents.
```python
import pandas as pd
from science_concierge import ScienceConcierge
df = pd.read_csv('data/pubmed_oa_2016.csv', encoding='utf-8')
docs = list(df.abstract) # provide list of abstracts
titles = list(df.title) # titles
# select weighting from 'count', 'tfidf', or 'entropy'
recommend_model = ScienceConcierge(stemming=True, ngram_range=(1,1),
weighting='entropy', norm=None,
n_components=200, n_recommend=200,
verbose=True)
recommend_model.fit(docs) # input list of documents or abstracts
index = recommend_model.recommend(likes=[10000], dislikes=[]) # input list of like/dislike index (here we like title[10000])
docs_recommend = [titles[i] for i in index[0:10]] # recommended documents
```
## Vectorizer available
We have adds on vectorizer classes including `LogEntropyVectorizer` and
`BM25Vectorizer` for calculating documents-terms weighting from input
list of documents. Here is an example usage.
```python
from science_concierge import LogEntropyVectorizer
l_model = LogEntropyVectorizer(norm=None, ngram_range=(1,2),
stop_words='english', min_df=1, max_df=0.8)
X = l_model.fit_transform(docs) # where docs is list of documents
```
In this case when we have sparse matrix of documents,
we can use `fit_document_matrix` method directly.
```python
recommend_model = ScienceConcierge(n_components=200, n_recommend=200)
recommend_model.fit_document_matrix(X)
index = recommend_model.recommend(likes=[10000], dislikes=[])
```
## Dependencies
- [numpy](http://www.numpy.org/)
- [pandas](http://pandas.pydata.org/)
- [unidecode](https://pypi.python.org/pypi/Unidecode)
- [nltk](http://www.nltk.org/) with white space tokenizer and Porter stemmer, <br>
use `science_concierge.download_nltk()` to download required corpora (there is a stemmer bug in `nltk==3.2.2`)
- [scikit-learn](http://scikit-learn.org/)
- [cachetools](http://pythonhosted.org/cachetools/)
- [joblib](http://pythonhosted.org/joblib/)
## Members
- [Titipat Achakulvisut](http://titipata.github.io)
- [Daniel Acuna](http://www.scienceofscience.org)
- [Tulakan Ruangrong](http://github.com/bluenex)
- [Konrad Kording](http://koerding.com/)
## License
[](https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode)
Copyright (c) 2015 Titipat Achakulvisut, Daniel E. Acuna, Tulakan Ruangrong, Konrad Kording
|
science-concierge
|
/science_concierge-0.1.tar.gz/science_concierge-0.1/README.md
|
README.md
|
$ git clone https://github.com/titipata/science_concierge
$ pip install -r requirements.txt
$ python setup.py develop install
import science_concierge
science_concierge.download(['pubmed_oa_2015.csv', 'pubmed_oa_2016.csv'])
$ aws s3 cp s3://science-of-science-bucket/science_concierge/data/ . --recursive
import pandas as pd
from science_concierge import ScienceConcierge
df = pd.read_csv('data/pubmed_oa_2016.csv', encoding='utf-8')
docs = list(df.abstract) # provide list of abstracts
titles = list(df.title) # titles
# select weighting from 'count', 'tfidf', or 'entropy'
recommend_model = ScienceConcierge(stemming=True, ngram_range=(1,1),
weighting='entropy', norm=None,
n_components=200, n_recommend=200,
verbose=True)
recommend_model.fit(docs) # input list of documents or abstracts
index = recommend_model.recommend(likes=[10000], dislikes=[]) # input list of like/dislike index (here we like title[10000])
docs_recommend = [titles[i] for i in index[0:10]] # recommended documents
from science_concierge import LogEntropyVectorizer
l_model = LogEntropyVectorizer(norm=None, ngram_range=(1,2),
stop_words='english', min_df=1, max_df=0.8)
X = l_model.fit_transform(docs) # where docs is list of documents
recommend_model = ScienceConcierge(n_components=200, n_recommend=200)
recommend_model.fit_document_matrix(X)
index = recommend_model.recommend(likes=[10000], dislikes=[])
| 0.535584 | 0.933915 |
import logging
import sys
import re
import numpy as np
import string
from six import string_types
from unidecode import unidecode
from nltk.stem.porter import PorterStemmer
from nltk.tokenize import WhitespaceTokenizer
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import TruncatedSVD
from sklearn.preprocessing import normalize
from .vectorizer import LogEntropyVectorizer
from .recommend import build_nearest_neighbors, get_rocchio_topic
logger = logging.getLogger('scienceconcierge')
logger.addHandler(logging.StreamHandler())
stemmer = PorterStemmer()
w_tokenizer = WhitespaceTokenizer()
punct_re = re.compile('[{}]'.format(re.escape(string.punctuation)))
def set_log_level(verbose):
"""Convenience function for setting the log level.
Parameters
----------
verbose : bool, str, int, or None
The verbosity of messages to print. If a str, it can be either DEBUG,
INFO, WARNING, ERROR, or CRITICAL. Note that these are for
convenience and are equivalent to passing in logging.DEBUG, etc.
For bool, True is the same as 'INFO', False is the same as 'WARNING'.
"""
if isinstance(verbose, bool):
if verbose is True:
verbose = 'INFO'
else:
verbose = 'WARNING'
if isinstance(verbose, str):
verbose = verbose.upper()
logging_types = dict(DEBUG=logging.DEBUG, INFO=logging.INFO,
WARNING=logging.WARNING, ERROR=logging.ERROR,
CRITICAL=logging.CRITICAL)
if verbose not in logging_types:
raise ValueError('verbose must be of a valid type')
verbose = logging_types[verbose]
logger.setLevel(verbose)
class ScienceConcierge:
"""Science Concierge
Recommendation class using Latent Semantic Analysis on list of abstracts
Process workflow are as follows
- Word tokenize and stemming (optional)
- Create tf-idf matrix, unigram or bigram recommended
- Latent Semantic Analysis (LSA) i.e. reduce dimension of using
truncated SVD
- Nearest neighbor assignment for recommendation
Parameters
----------
* parameters for preprocessing
stemming: boolean, if True it will apply Porter stemmer as a preprocessor
to , default: True
parallel: boolean, if True multipleprocessing will used to apply preprocessing
to abstract text, default: True
* parameters for term frequenct weighting scheme
weighting: str, options from ['count', 'tfidf', 'entropy']
min_df: int or float [0.0, 1.0] ignore term that appear less than min_df or has
weight less than min_df, default: 3
max_df: int or float [0.0, 1.0] ignore term that appear more than max_df or has
weight greater than max_df, default: 0.8
ngram_range: tuple, parameter for tfidf transformation
(1, 1) for unigram, (1, 2) for bigram, default (1, 2) i.e. bigram
norm: 'l2', 'l1' or None, default: 'l2'
* parameters for dimensionality reduction
algorithm: str, 'arpack' or 'randomized', default 'arpack'
n_components: int, number of components of reduced dimension vector in LSA,
default=200
n_iter: int, iteration for LSA
* For recommendation
w_like: weight term for liked documents (called alpha in literature)
w_dislike: wieght term for disliked documents
n_recommend: number of total documents that want to be recommended, if None it will be
set to total number of documents
TO DO
-----
- update nearest neighbor model so that it allows larger scale of documents
- print logging output for preprocessing step
"""
def __init__(self, stemming=True, parallel=True,
weighting='tfidf', strip_accents='unicode',
norm='l2', lowercase=True,
min_df=3, max_df=0.8, ngram_range=(1,2),
algorithm='arpack',
n_components=200, n_iter=150,
n_recommend=None, save=False,
verbose=False):
self.docs = None
self.docs_preprocess = None
self.stemming = stemming
self.parallel = parallel
self.weighting = weighting
self.strip_accents = strip_accents
self.min_df = min_df
self.max_df = max_df
self.ngram_range = ngram_range
self.analyzer = 'word'
self.token_pattern = r'\w{1,}'
self.stop_words = 'english'
self.lowercase = lowercase
self.norm = norm
self.n_components = int(n_components)
self.n_iter = int(n_iter)
self.algorithm = algorithm
self.vectors = None
self.nbrs_model = None # holder for nearest neighbor model
self.n_recommend = n_recommend
self.save = False
set_log_level(verbose)
def preprocess(self, text):
"""
Apply Porter stemmer to input string
Parameters
----------
text: str, input string
Returns
-------
text_preprocess: str, output stemming string
"""
if isinstance(text, (type(None), float)):
text_preprocess = ''
else:
text = unidecode(text).lower()
text = punct_re.sub(' ', text) # remove punctuation
if self.stemming:
text_preprocess = [stemmer.stem(token) for token in w_tokenizer.tokenize(text)]
else:
text_preprocess = w_tokenizer.tokenize(text)
text_preprocess = ' '.join(text_preprocess)
return text_preprocess
def preprocess_docs(self, docs):
"""
Preprocess string or list of strings
"""
if isinstance(docs, string_types):
docs = [docs]
if self.stemming is True:
if not self.parallel:
logger.info('preprocess %i documents without multiprocessing' % len(docs))
docs_preprocess = list(map(self.preprocess, docs))
else:
if sys.version_info[0] == 3:
from multiprocessing import Pool
pool = Pool()
n_processes = pool._processes
docs_preprocess = pool.map(self.preprocess, docs)
logger.info('preprocess %i documents with %i workers' % (len(docs), n_processes))
else:
logger.info('using simple map for preprocessing abstracts')
docs_preprocess = list(map(self.preprocess, docs))
else:
logger.info('no prepocess function apply')
docs_preprocess = docs
return docs_preprocess
def fit_document_matrix(self, X):
"""
Reduce dimension of sparse matrix X
using Latent Semantic Analysis and
build nearst neighbor model
Parameters
----------
X: sparse csr matrix, sparse term frequency matrix or
others weighting matrix from documents
"""
n_components = self.n_components
n_iter = self.n_iter
algorithm = self.algorithm
lsa_model = TruncatedSVD(n_components=n_components,
n_iter=n_iter,
algorithm=algorithm)
# reduce dimension using Latent Semantic Analysis
vectors = lsa_model.fit_transform(X)
self.vectors = vectors
# build nearest neighbor model
nbrs_model = build_nearest_neighbors(vectors, n_recommend=self.n_recommend)
self.nbrs_model = nbrs_model
return self
def fit(self, docs):
"""
Create recommendation vectors and nearest neighbor model
from list of documents
Parameters
----------
docs: list of string, list of documents' text or abstracts from papers or
publications or posters
"""
# parameters from class
weighting = self.weighting
strip_accents = self.strip_accents
token_pattern = self.token_pattern
lowercase = self.lowercase
min_df = self.min_df
max_df = self.max_df
norm = self.norm
ngram_range = self.ngram_range
analyzer = self.analyzer
stop_words = self.stop_words
# preprocess text
docs_preprocess = self.preprocess_docs(docs)
self.docs = docs
if self.save:
self.docs_preprocess = docs_preprocess
# weighting documents
if self.weighting == 'count':
model = CountVectorizer(min_df=min_df, max_df=max_df,
lowercase=lowercase,
strip_accents=strip_accents, analyzer=analyzer,
token_pattern=token_pattern, ngram_range=ngram_range,
stop_words=stop_words)
elif self.weighting == 'tfidf':
model = TfidfVectorizer(min_df=min_df, max_df=max_df,
lowercase=lowercase, norm=norm,
strip_accents=strip_accents, analyzer=analyzer,
token_pattern=token_pattern, ngram_range=ngram_range,
use_idf=True, smooth_idf=True, sublinear_tf=True,
stop_words=stop_words)
elif self.weighting == 'entropy':
model = LogEntropyVectorizer(min_df=min_df, max_df=max_df,
lowercase=lowercase, norm=norm,
token_pattern=token_pattern,
ngram_range=ngram_range, analyzer=analyzer,
smooth_idf=False,
stop_words=stop_words)
else:
logger.error('choose one weighting scheme from count, tfidf or entropy')
# text transformation and latent-semantic-analysis
logger.info('apply %s weighting to documents' % self.weighting)
X = model.fit_transform(docs_preprocess)
# fit documents matrix from sparse matrix
logger.info('perform Latent Semantic Analysis with %i components' % self.n_components)
self.fit_document_matrix(X)
return self
def recommend(self, likes=list(), dislikes=list(), w_like=1.8, w_dislike=0.2):
"""
Apply Rocchio algorithm and nearest neighbor to
recommend related documents:
x_pref = w_like * mean(x_likes) - w_dislike * mean(x_dislikes)
see article on how to cross-validate parameters. Use recommend
after fit method
Parameters
----------
likes: list, list of index of liked documents
dislikes: list, list of index of disliked documents
w_like: float, weight for liked documents, default 1.8 (from cross-validation)
w_dislike: float, weight for disliked documents, default 0.2
(we got 0.0 from cross-validation)
Returns
-------
recommend_index: 1d array, array of recommended index from documents
"""
self.w_like = w_like
self.w_dislike = w_dislike
# compute preference vector
topic_pref = get_rocchio_topic(self.vectors, likes, dislikes, w_like, w_dislike)
# nearest neighbor to suggest related abstract with close topic
_, recommend_index = self.nbrs_model.kneighbors(topic_pref)
return recommend_index.flatten()
|
science-concierge
|
/science_concierge-0.1.tar.gz/science_concierge-0.1/science_concierge/science_concierge.py
|
science_concierge.py
|
import logging
import sys
import re
import numpy as np
import string
from six import string_types
from unidecode import unidecode
from nltk.stem.porter import PorterStemmer
from nltk.tokenize import WhitespaceTokenizer
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import TruncatedSVD
from sklearn.preprocessing import normalize
from .vectorizer import LogEntropyVectorizer
from .recommend import build_nearest_neighbors, get_rocchio_topic
logger = logging.getLogger('scienceconcierge')
logger.addHandler(logging.StreamHandler())
stemmer = PorterStemmer()
w_tokenizer = WhitespaceTokenizer()
punct_re = re.compile('[{}]'.format(re.escape(string.punctuation)))
def set_log_level(verbose):
"""Convenience function for setting the log level.
Parameters
----------
verbose : bool, str, int, or None
The verbosity of messages to print. If a str, it can be either DEBUG,
INFO, WARNING, ERROR, or CRITICAL. Note that these are for
convenience and are equivalent to passing in logging.DEBUG, etc.
For bool, True is the same as 'INFO', False is the same as 'WARNING'.
"""
if isinstance(verbose, bool):
if verbose is True:
verbose = 'INFO'
else:
verbose = 'WARNING'
if isinstance(verbose, str):
verbose = verbose.upper()
logging_types = dict(DEBUG=logging.DEBUG, INFO=logging.INFO,
WARNING=logging.WARNING, ERROR=logging.ERROR,
CRITICAL=logging.CRITICAL)
if verbose not in logging_types:
raise ValueError('verbose must be of a valid type')
verbose = logging_types[verbose]
logger.setLevel(verbose)
class ScienceConcierge:
"""Science Concierge
Recommendation class using Latent Semantic Analysis on list of abstracts
Process workflow are as follows
- Word tokenize and stemming (optional)
- Create tf-idf matrix, unigram or bigram recommended
- Latent Semantic Analysis (LSA) i.e. reduce dimension of using
truncated SVD
- Nearest neighbor assignment for recommendation
Parameters
----------
* parameters for preprocessing
stemming: boolean, if True it will apply Porter stemmer as a preprocessor
to , default: True
parallel: boolean, if True multipleprocessing will used to apply preprocessing
to abstract text, default: True
* parameters for term frequenct weighting scheme
weighting: str, options from ['count', 'tfidf', 'entropy']
min_df: int or float [0.0, 1.0] ignore term that appear less than min_df or has
weight less than min_df, default: 3
max_df: int or float [0.0, 1.0] ignore term that appear more than max_df or has
weight greater than max_df, default: 0.8
ngram_range: tuple, parameter for tfidf transformation
(1, 1) for unigram, (1, 2) for bigram, default (1, 2) i.e. bigram
norm: 'l2', 'l1' or None, default: 'l2'
* parameters for dimensionality reduction
algorithm: str, 'arpack' or 'randomized', default 'arpack'
n_components: int, number of components of reduced dimension vector in LSA,
default=200
n_iter: int, iteration for LSA
* For recommendation
w_like: weight term for liked documents (called alpha in literature)
w_dislike: wieght term for disliked documents
n_recommend: number of total documents that want to be recommended, if None it will be
set to total number of documents
TO DO
-----
- update nearest neighbor model so that it allows larger scale of documents
- print logging output for preprocessing step
"""
def __init__(self, stemming=True, parallel=True,
weighting='tfidf', strip_accents='unicode',
norm='l2', lowercase=True,
min_df=3, max_df=0.8, ngram_range=(1,2),
algorithm='arpack',
n_components=200, n_iter=150,
n_recommend=None, save=False,
verbose=False):
self.docs = None
self.docs_preprocess = None
self.stemming = stemming
self.parallel = parallel
self.weighting = weighting
self.strip_accents = strip_accents
self.min_df = min_df
self.max_df = max_df
self.ngram_range = ngram_range
self.analyzer = 'word'
self.token_pattern = r'\w{1,}'
self.stop_words = 'english'
self.lowercase = lowercase
self.norm = norm
self.n_components = int(n_components)
self.n_iter = int(n_iter)
self.algorithm = algorithm
self.vectors = None
self.nbrs_model = None # holder for nearest neighbor model
self.n_recommend = n_recommend
self.save = False
set_log_level(verbose)
def preprocess(self, text):
"""
Apply Porter stemmer to input string
Parameters
----------
text: str, input string
Returns
-------
text_preprocess: str, output stemming string
"""
if isinstance(text, (type(None), float)):
text_preprocess = ''
else:
text = unidecode(text).lower()
text = punct_re.sub(' ', text) # remove punctuation
if self.stemming:
text_preprocess = [stemmer.stem(token) for token in w_tokenizer.tokenize(text)]
else:
text_preprocess = w_tokenizer.tokenize(text)
text_preprocess = ' '.join(text_preprocess)
return text_preprocess
def preprocess_docs(self, docs):
"""
Preprocess string or list of strings
"""
if isinstance(docs, string_types):
docs = [docs]
if self.stemming is True:
if not self.parallel:
logger.info('preprocess %i documents without multiprocessing' % len(docs))
docs_preprocess = list(map(self.preprocess, docs))
else:
if sys.version_info[0] == 3:
from multiprocessing import Pool
pool = Pool()
n_processes = pool._processes
docs_preprocess = pool.map(self.preprocess, docs)
logger.info('preprocess %i documents with %i workers' % (len(docs), n_processes))
else:
logger.info('using simple map for preprocessing abstracts')
docs_preprocess = list(map(self.preprocess, docs))
else:
logger.info('no prepocess function apply')
docs_preprocess = docs
return docs_preprocess
def fit_document_matrix(self, X):
"""
Reduce dimension of sparse matrix X
using Latent Semantic Analysis and
build nearst neighbor model
Parameters
----------
X: sparse csr matrix, sparse term frequency matrix or
others weighting matrix from documents
"""
n_components = self.n_components
n_iter = self.n_iter
algorithm = self.algorithm
lsa_model = TruncatedSVD(n_components=n_components,
n_iter=n_iter,
algorithm=algorithm)
# reduce dimension using Latent Semantic Analysis
vectors = lsa_model.fit_transform(X)
self.vectors = vectors
# build nearest neighbor model
nbrs_model = build_nearest_neighbors(vectors, n_recommend=self.n_recommend)
self.nbrs_model = nbrs_model
return self
def fit(self, docs):
"""
Create recommendation vectors and nearest neighbor model
from list of documents
Parameters
----------
docs: list of string, list of documents' text or abstracts from papers or
publications or posters
"""
# parameters from class
weighting = self.weighting
strip_accents = self.strip_accents
token_pattern = self.token_pattern
lowercase = self.lowercase
min_df = self.min_df
max_df = self.max_df
norm = self.norm
ngram_range = self.ngram_range
analyzer = self.analyzer
stop_words = self.stop_words
# preprocess text
docs_preprocess = self.preprocess_docs(docs)
self.docs = docs
if self.save:
self.docs_preprocess = docs_preprocess
# weighting documents
if self.weighting == 'count':
model = CountVectorizer(min_df=min_df, max_df=max_df,
lowercase=lowercase,
strip_accents=strip_accents, analyzer=analyzer,
token_pattern=token_pattern, ngram_range=ngram_range,
stop_words=stop_words)
elif self.weighting == 'tfidf':
model = TfidfVectorizer(min_df=min_df, max_df=max_df,
lowercase=lowercase, norm=norm,
strip_accents=strip_accents, analyzer=analyzer,
token_pattern=token_pattern, ngram_range=ngram_range,
use_idf=True, smooth_idf=True, sublinear_tf=True,
stop_words=stop_words)
elif self.weighting == 'entropy':
model = LogEntropyVectorizer(min_df=min_df, max_df=max_df,
lowercase=lowercase, norm=norm,
token_pattern=token_pattern,
ngram_range=ngram_range, analyzer=analyzer,
smooth_idf=False,
stop_words=stop_words)
else:
logger.error('choose one weighting scheme from count, tfidf or entropy')
# text transformation and latent-semantic-analysis
logger.info('apply %s weighting to documents' % self.weighting)
X = model.fit_transform(docs_preprocess)
# fit documents matrix from sparse matrix
logger.info('perform Latent Semantic Analysis with %i components' % self.n_components)
self.fit_document_matrix(X)
return self
def recommend(self, likes=list(), dislikes=list(), w_like=1.8, w_dislike=0.2):
"""
Apply Rocchio algorithm and nearest neighbor to
recommend related documents:
x_pref = w_like * mean(x_likes) - w_dislike * mean(x_dislikes)
see article on how to cross-validate parameters. Use recommend
after fit method
Parameters
----------
likes: list, list of index of liked documents
dislikes: list, list of index of disliked documents
w_like: float, weight for liked documents, default 1.8 (from cross-validation)
w_dislike: float, weight for disliked documents, default 0.2
(we got 0.0 from cross-validation)
Returns
-------
recommend_index: 1d array, array of recommended index from documents
"""
self.w_like = w_like
self.w_dislike = w_dislike
# compute preference vector
topic_pref = get_rocchio_topic(self.vectors, likes, dislikes, w_like, w_dislike)
# nearest neighbor to suggest related abstract with close topic
_, recommend_index = self.nbrs_model.kneighbors(topic_pref)
return recommend_index.flatten()
| 0.649245 | 0.298453 |



# Science data structure
This library makes it straight forward to make a tree folder structure for large data-sets. For now it supports numpy arrays only, but I have plans to implement pandas, csv, tab-separated and excel soon.
The idea behind the library is to make a data-set browse-able with a normal file browser. The components can be rearranged with the use of Python, the terminal or a simple file-browser.
## Install
Install through pip
```
pip install science-data-structure
```
Manual installation
```
python setup.py install
```
## Command line tools
This library is bundled with command line tools to create a system wide author
```bash
science_data_structure global create author "<name>"
```
or
```bash
science_data_structure global create author
```
and you will be prompted for the name of the author. You only have to run the above commands a single time, the data is stored in a configuration file (the location is dependent of your OS). From the command line you can create a dataset:
```bash
science_data_structure create dataset "<name>" "<description>"
```
The author you have created for you system is added to this dataset. Go into the folder of the dataset and execute:
```bash
science_data_structure list author
```
to view all the authors in this dataset. Alternatively you can list the entire meta file
```bash
science_data_structure list meta
```
## Examples
### Simple data-set
In this simple example a data-set is created, with a single branch `parabola`. In this branch two "leafs" are added `x` and `y`. At the end of the example the data_set is written to disk.
Before we can create a dataset we need to create a meta file containing an author, you can do this with the earlier mentioned command line example above.
```python
import science_data_structure.structures as structures
from pathlib import Path
import numpy
# initialize the empty data-set
dataset = structures.StructuredDataSet.create_dataset(Path("./."),
"test_set")
# add data to the data-set
data_set["parabola"]["x"] = numpy.linspace(-2, 2, 100)
data_set["parabola"]["y"] = data_set["parabola"]["x"].data ** 2
# write the data to disk
data_set.write()
```
### Branch overriding
What will happen when a branch or a leaf is overwritten with another leaf or branch? This example extends the previous example
```python
data_set["parabola"]["x"] = None
```
In this case the variable ~x~ stored in the branch ~parabola~ will be deleted upon the first write.
|
science-data-structure
|
/science_data_structure-0.0.4.tar.gz/science_data_structure-0.0.4/README.md
|
README.md
|
pip install science-data-structure
python setup.py install
science_data_structure global create author "<name>"
science_data_structure global create author
science_data_structure create dataset "<name>" "<description>"
science_data_structure list author
science_data_structure list meta
import science_data_structure.structures as structures
from pathlib import Path
import numpy
# initialize the empty data-set
dataset = structures.StructuredDataSet.create_dataset(Path("./."),
"test_set")
# add data to the data-set
data_set["parabola"]["x"] = numpy.linspace(-2, 2, 100)
data_set["parabola"]["y"] = data_set["parabola"]["x"].data ** 2
# write the data to disk
data_set.write()
data_set["parabola"]["x"] = None
| 0.535584 | 0.972908 |
import json
from typing import Dict, List
import json
from pathlib import Path
import abc
class JSONObject:
def to_json(self) -> str:
return json.dumps(self, default=lambda o: o.__dict__(),
sort_keys=True,
indent=4)
class Author(JSONObject):
def __init__(self,
author_id: int,
name: str) -> None:
self._author_id = author_id
self._name = name
def __dict__(self):
return {
"id": self._author_id,
"name": self._name
}
def __str__(self) -> str:
return "{:d} {:s}".format(self._author_id,
self._name)
@staticmethod
def from_json(json_data: Dict) -> Dict[int, "Author"]:
content = list(json_data.items())
return dict(map(lambda item: (int(item[0]), Author(item[1]["id"], item[1]["name"])), content))
class Meta(JSONObject):
def __init__(self,
path: Path) -> None:
self._path = path
self._authors = {} # type: Dict[int, Author]
def __dict__(self) -> Dict:
result = {
"path": str(self._path),
"authors": self._authors
}
return result
def write(self) -> None:
output_line = self.to_json()
self._path.write_text(output_line)
@staticmethod
def read(path: Path) -> "Meta":
with path.open("r") as content:
text = content.read()
return Meta.from_json(path, json.loads(text))
@staticmethod
def from_json(path: Path, json_data: Dict) -> "Meta":
meta = Meta(path)
meta.authors = Author.from_json(json_data["authors"])
return meta
@property
def authors(self) -> Dict[int, Author]:
return self._authors
@authors.setter
def authors(self,
authors: Dict[int, Author]) -> None:
self._authors = authors
def __str__(self) -> None:
line = "{:s} authors = ".format(str(self._path))
line += str(self._authors)
return line
def remove(self) -> None:
self._path.unlink()
|
science-data-structure
|
/science_data_structure-0.0.4.tar.gz/science_data_structure-0.0.4/science_data_structure/descriptions.py
|
descriptions.py
|
import json
from typing import Dict, List
import json
from pathlib import Path
import abc
class JSONObject:
def to_json(self) -> str:
return json.dumps(self, default=lambda o: o.__dict__(),
sort_keys=True,
indent=4)
class Author(JSONObject):
def __init__(self,
author_id: int,
name: str) -> None:
self._author_id = author_id
self._name = name
def __dict__(self):
return {
"id": self._author_id,
"name": self._name
}
def __str__(self) -> str:
return "{:d} {:s}".format(self._author_id,
self._name)
@staticmethod
def from_json(json_data: Dict) -> Dict[int, "Author"]:
content = list(json_data.items())
return dict(map(lambda item: (int(item[0]), Author(item[1]["id"], item[1]["name"])), content))
class Meta(JSONObject):
def __init__(self,
path: Path) -> None:
self._path = path
self._authors = {} # type: Dict[int, Author]
def __dict__(self) -> Dict:
result = {
"path": str(self._path),
"authors": self._authors
}
return result
def write(self) -> None:
output_line = self.to_json()
self._path.write_text(output_line)
@staticmethod
def read(path: Path) -> "Meta":
with path.open("r") as content:
text = content.read()
return Meta.from_json(path, json.loads(text))
@staticmethod
def from_json(path: Path, json_data: Dict) -> "Meta":
meta = Meta(path)
meta.authors = Author.from_json(json_data["authors"])
return meta
@property
def authors(self) -> Dict[int, Author]:
return self._authors
@authors.setter
def authors(self,
authors: Dict[int, Author]) -> None:
self._authors = authors
def __str__(self) -> None:
line = "{:s} authors = ".format(str(self._path))
line += str(self._authors)
return line
def remove(self) -> None:
self._path.unlink()
| 0.637031 | 0.168309 |
from pathlib import Path
from typing import List
from author import Author
from core import JSONObject
from logger import LogEntry
import uuid
import json
from typing import Dict
import abc
from datetime import datetime
class NodeProperty(JSONObject):
@abc.abstractproperty
def name(self):
raise NotImplementedError("Must override the property name")
class Meta(JSONObject):
id_counter = 0
def __init__(self,
path: Path,
dataset_id: int,
branch_id: int,
description: str = "",
authors: List[Author] = [],
log: Dict[int, LogEntry] = {},
additional_properties: Dict[str, NodeProperty] = {}):
self._path = path
self._dataset_id = dataset_id
self._branch_id = branch_id
self._description = description
self._authors = authors
self._log = log
self._additional_properties = additional_properties
def write(self):
self.path.write_text(self.to_json())
def __str__(self):
line = "meta information \n"
line += "dataset id \t {:d} \n".format(self._dataset_id)
line += "branch id \t {:d} \n".format(self._branch_id)
line += "description \t {:s} \n".format(self._description)
line += "\n"
line += "authors: \n"
for author in self.authors:
line += "{:s} \n \n".format(str(author))
line += "\n"
for name in self._additional_properties.keys():
line += "{:s}\n".format(str(self._additional_properties[name]))
return line
def __dict__(self):
base_dict = {
"dataset_id": self._dataset_id,
"branch_id": self._branch_id,
"authors": self._authors,
"description": self._description,
"log": self._log
}
for property_name in self._additional_properties.keys():
base_dict[property_name] = self._additional_properties[property_name].__dict__()
return base_dict
@property
def path(self):
return self._path
@path.setter
def path(self, path):
self._path = path
@property
def dataset_id(self):
return self._dataset_id
@property
def branch_id(self):
return self._branch_id
@property
def authors(self):
return self._authors
@property
def description(self):
return self._description
@description.setter
def description(self, description: str):
self._description = description
@staticmethod
def create_top_level_meta(path: Path,
author: Author,
description: str = ""):
# create a uuid for the dataset
dataset_id = uuid.uuid4().int
branch_id = 0
Meta.id_counter += 1
meta = Meta(path,
dataset_id,
branch_id,
description,
[author])
return meta
@staticmethod
def create_meta(top_level_meta: "Meta",
path):
dataset_id = top_level_meta.dataset_id
branch_id = Meta.id_counter
Meta.id_counter += 1
meta = Meta(path / ".meta.json", dataset_id, branch_id)
return meta
@staticmethod
def from_json(path: Path) -> "Meta":
text = path.read_text()
json_data = json.loads(text)
authors = list(map(lambda author_content: Author.from_dict(author_content), json_data["authors"]))
return Meta(path, int(json_data["dataset_id"]),
int(json_data["branch_id"]),
json_data["description"], authors)
def add_property(self, node_property: NodeProperty):
self._additional_properties[node_property.name] = node_property
def __getitem__(self, name: str) -> NodeProperty:
return self._additional_properties[name]
def add_log_entry(self, log_entry):
self._log[log_entry.log_id] = log_entry
class FileProperty(NodeProperty):
def __init__(self):
# properties
self._size = None # type: int
self._n_childs = None # type: int
@property
def size(self) -> int:
return self._size
@size.setter
def size(self, size):
self._size = size
@property
def n_childs(self) -> int:
return self._n_childs
@n_childs.setter
def n_childs(self, n_childs):
self._n_childs = n_childs
@staticmethod
def from_dict(content: Dict) -> "FileProperty":
file_property = FileProperty()
file_property.size = int(content["size"])
file_property.n_childs = int(content["n_childs"])
return file_property
def __dict__(self):
return {
"size": self._size,
"n_childs": self._n_childs
}
@property
def name(self) -> str:
return "file_properties"
|
science-data-structure
|
/science_data_structure-0.0.4.tar.gz/science_data_structure-0.0.4/science_data_structure/meta.py
|
meta.py
|
from pathlib import Path
from typing import List
from author import Author
from core import JSONObject
from logger import LogEntry
import uuid
import json
from typing import Dict
import abc
from datetime import datetime
class NodeProperty(JSONObject):
@abc.abstractproperty
def name(self):
raise NotImplementedError("Must override the property name")
class Meta(JSONObject):
id_counter = 0
def __init__(self,
path: Path,
dataset_id: int,
branch_id: int,
description: str = "",
authors: List[Author] = [],
log: Dict[int, LogEntry] = {},
additional_properties: Dict[str, NodeProperty] = {}):
self._path = path
self._dataset_id = dataset_id
self._branch_id = branch_id
self._description = description
self._authors = authors
self._log = log
self._additional_properties = additional_properties
def write(self):
self.path.write_text(self.to_json())
def __str__(self):
line = "meta information \n"
line += "dataset id \t {:d} \n".format(self._dataset_id)
line += "branch id \t {:d} \n".format(self._branch_id)
line += "description \t {:s} \n".format(self._description)
line += "\n"
line += "authors: \n"
for author in self.authors:
line += "{:s} \n \n".format(str(author))
line += "\n"
for name in self._additional_properties.keys():
line += "{:s}\n".format(str(self._additional_properties[name]))
return line
def __dict__(self):
base_dict = {
"dataset_id": self._dataset_id,
"branch_id": self._branch_id,
"authors": self._authors,
"description": self._description,
"log": self._log
}
for property_name in self._additional_properties.keys():
base_dict[property_name] = self._additional_properties[property_name].__dict__()
return base_dict
@property
def path(self):
return self._path
@path.setter
def path(self, path):
self._path = path
@property
def dataset_id(self):
return self._dataset_id
@property
def branch_id(self):
return self._branch_id
@property
def authors(self):
return self._authors
@property
def description(self):
return self._description
@description.setter
def description(self, description: str):
self._description = description
@staticmethod
def create_top_level_meta(path: Path,
author: Author,
description: str = ""):
# create a uuid for the dataset
dataset_id = uuid.uuid4().int
branch_id = 0
Meta.id_counter += 1
meta = Meta(path,
dataset_id,
branch_id,
description,
[author])
return meta
@staticmethod
def create_meta(top_level_meta: "Meta",
path):
dataset_id = top_level_meta.dataset_id
branch_id = Meta.id_counter
Meta.id_counter += 1
meta = Meta(path / ".meta.json", dataset_id, branch_id)
return meta
@staticmethod
def from_json(path: Path) -> "Meta":
text = path.read_text()
json_data = json.loads(text)
authors = list(map(lambda author_content: Author.from_dict(author_content), json_data["authors"]))
return Meta(path, int(json_data["dataset_id"]),
int(json_data["branch_id"]),
json_data["description"], authors)
def add_property(self, node_property: NodeProperty):
self._additional_properties[node_property.name] = node_property
def __getitem__(self, name: str) -> NodeProperty:
return self._additional_properties[name]
def add_log_entry(self, log_entry):
self._log[log_entry.log_id] = log_entry
class FileProperty(NodeProperty):
def __init__(self):
# properties
self._size = None # type: int
self._n_childs = None # type: int
@property
def size(self) -> int:
return self._size
@size.setter
def size(self, size):
self._size = size
@property
def n_childs(self) -> int:
return self._n_childs
@n_childs.setter
def n_childs(self, n_childs):
self._n_childs = n_childs
@staticmethod
def from_dict(content: Dict) -> "FileProperty":
file_property = FileProperty()
file_property.size = int(content["size"])
file_property.n_childs = int(content["n_childs"])
return file_property
def __dict__(self):
return {
"size": self._size,
"n_childs": self._n_childs
}
@property
def name(self) -> str:
return "file_properties"
| 0.786623 | 0.158597 |
import abc
from typing import Dict, List
from pathlib import Path
import os
from meta import Meta
from config import ConfigManager
import logger as logger
from author import Author
class Node:
def __init__(self,
parent: "Node",
meta: Meta,
name: str):
self._parent = parent
self._meta = meta
self._name = name
@property
def name(self) -> str:
return self._name
@property
def path(self) -> Path:
return self._parent.path / self.name
@abc.abstractmethod
def write(self) -> "None":
raise NotImplementedError("write functions must be overwritten")
@abc.abstractmethod
def remove(self) -> "None":
raise NotImplementedError("remove function must be overwritten")
@property
def meta(self):
return self._meta
@property
def top_level_meta(self) -> Meta:
if isinstance(self, StructuredDataSet):
return self.meta
return self._parent.top_level_meta
class Branch(Node):
def __init__(self,
parent: Node,
name: str,
content: Dict[str, Node],
meta: Meta) -> None:
super().__init__(parent, meta, name)
self._content = content # type: Dict[str, Node]
self._kill = [] # type: List[Node]
@logger.logger
def write(self) -> None:
os.makedirs(self.path, exist_ok=True)
self._meta.write()
for node_name in self._content.keys():
self._content[node_name].write()
# empty the kill ring
for node_kill in self._kill:
node_kill.remove()
self._kill = []
def read(self) -> "Branch":
content = list(self.path.glob("./*"))
content = list(filter(lambda x: not x.stem.startswith("."), content))
branches = list(filter(lambda x: x.suffix != ".leaf", content))
data = list(filter(lambda x: x.suffix == ".leaf", content))
for branch in branches:
self._content[branch.name] = Branch(self,
branch.name,
{},
Meta.from_json(self.path / branch.name / ".meta.json"))
self._content[branch.name].read()
for data_node in data:
self._content[data_node.with_suffix("").name] = Leaf.initialize(self,
data_node.with_suffix("").name)
def keys(self) -> List[str]:
return list(self._content.keys())
def remove(self) -> None:
for key in self._content.keys():
self._content[key].remove()
self.meta.path.unlink()
self._clear_kill()
self.path.rmdir()
# protected functions
def _remove_item(self, key: str) -> Node:
self._kill += [self._content[key]]
return self._content.pop(key)
def _clear_kill(self) -> None:
for node in self._kill:
node._remove()
self._kill.remove(node)
for branch in self.branches:
branch._clear_kill()
def __getitem__(self, name: str) -> Node:
try:
return self._content[name]
except KeyError:
self._content[name] = Branch.create_branch(self, name)
return self._content[name]
def __setitem__(self, key: str, item) -> None:
if not isinstance(key, str):
raise KeyError
if item is None:
self._remove_item(key)
elif not isinstance(item, Node):
if key in self._content:
self._kill += [self._content[key]]
import data_formats
self._content[key] = data_formats.available_types[type(item)](self,
"{:s}.leaf".format(key),
Meta.create_meta(self.top_level_meta,
self.path / "{:s}.leaf/".format(key)))
self._content[key].data = item
else:
if key not in self._content:
self._content[key] = item
else:
self._kill += [self._content[key]]
self._content[key] = item
@property
def name(self) -> str:
return self._name
@property
def branches(self) -> List["Branch"]:
return list(filter(lambda content: isinstance(content, Branch),
self._content.values()))
@property
def leafs(self) -> List["Leaf"]:
return list(filter(lambda content: isinstance(content, Leaf),
self._content.values()))
@staticmethod
def create_branch(parent: "Branch",
name: str) -> "Branch":
return Branch(parent,
name,
{},
Meta.create_meta(parent.top_level_meta,
parent.path / name))
class StructuredDataSet(Branch):
def __init__(self,
path: Path,
name: str,
content: Dict[str, Node],
meta: Meta) -> None:
super().__init__(None,
"{:s}.struct".format(name),
content,
meta)
self._path = path
@property
def path(self):
return self._path / self._name
@staticmethod
def create_dataset(path: Path,
name: str,
author: Author,
description: str = "") -> "StructuredDataSet":
top_level_meta = Meta.create_top_level_meta(None, author, description=description)
path_tmp = path / "{:s}.struct".format(name)
path_meta = path_tmp / ".meta.json"
top_level_meta.path = path_meta
return StructuredDataSet(path,
name,
{},
top_level_meta)
class Leaf(Node):
def __init__(self,
parent: Node,
name: str,
meta: Meta) -> None:
super().__init__(parent,
meta,
name)
# public functions
def write(self) -> None:
if not self.path.exists():
self.path.mkdir()
self.meta.write()
self._write_child()
@property
def data(self):
return self._get_data()
@data.setter
def data(self, data):
self._set_data(data)
@abc.abstractmethod
def _get_data(self):
raise NotImplementedError("Must override the _get_data function")
@abc.abstractmethod
def _set_data(self, data):
raise NotImplementedError("Must override the _set_data function")
@staticmethod
def initialize(parent: Node,
name: str) -> "Leaf":
name = name.replace(".leaf", "")
leaf_path = (parent.path / name).with_suffix(".leaf")
meta = Meta.from_json(leaf_path / ".meta.json")
# read all the non-hidden files
content = list(leaf_path.with_suffix(".leaf").glob("./*"))
content = list(filter(lambda x: not x.stem.startswith("."), content))
if len(content) == 0:
import data_formats
return data_formats.available_extensions[content[0].suffix](parent, name.replace(".leaf"), meta)
else:
if len(content) > 1:
raise FileNotFoundError("To many files in the leaf")
else:
raise FileNotFoundError("The leaf does not exist {:s} {:s}".format(str(leaf_path), name))
|
science-data-structure
|
/science_data_structure-0.0.4.tar.gz/science_data_structure-0.0.4/science_data_structure/structures.py
|
structures.py
|
import abc
from typing import Dict, List
from pathlib import Path
import os
from meta import Meta
from config import ConfigManager
import logger as logger
from author import Author
class Node:
def __init__(self,
parent: "Node",
meta: Meta,
name: str):
self._parent = parent
self._meta = meta
self._name = name
@property
def name(self) -> str:
return self._name
@property
def path(self) -> Path:
return self._parent.path / self.name
@abc.abstractmethod
def write(self) -> "None":
raise NotImplementedError("write functions must be overwritten")
@abc.abstractmethod
def remove(self) -> "None":
raise NotImplementedError("remove function must be overwritten")
@property
def meta(self):
return self._meta
@property
def top_level_meta(self) -> Meta:
if isinstance(self, StructuredDataSet):
return self.meta
return self._parent.top_level_meta
class Branch(Node):
def __init__(self,
parent: Node,
name: str,
content: Dict[str, Node],
meta: Meta) -> None:
super().__init__(parent, meta, name)
self._content = content # type: Dict[str, Node]
self._kill = [] # type: List[Node]
@logger.logger
def write(self) -> None:
os.makedirs(self.path, exist_ok=True)
self._meta.write()
for node_name in self._content.keys():
self._content[node_name].write()
# empty the kill ring
for node_kill in self._kill:
node_kill.remove()
self._kill = []
def read(self) -> "Branch":
content = list(self.path.glob("./*"))
content = list(filter(lambda x: not x.stem.startswith("."), content))
branches = list(filter(lambda x: x.suffix != ".leaf", content))
data = list(filter(lambda x: x.suffix == ".leaf", content))
for branch in branches:
self._content[branch.name] = Branch(self,
branch.name,
{},
Meta.from_json(self.path / branch.name / ".meta.json"))
self._content[branch.name].read()
for data_node in data:
self._content[data_node.with_suffix("").name] = Leaf.initialize(self,
data_node.with_suffix("").name)
def keys(self) -> List[str]:
return list(self._content.keys())
def remove(self) -> None:
for key in self._content.keys():
self._content[key].remove()
self.meta.path.unlink()
self._clear_kill()
self.path.rmdir()
# protected functions
def _remove_item(self, key: str) -> Node:
self._kill += [self._content[key]]
return self._content.pop(key)
def _clear_kill(self) -> None:
for node in self._kill:
node._remove()
self._kill.remove(node)
for branch in self.branches:
branch._clear_kill()
def __getitem__(self, name: str) -> Node:
try:
return self._content[name]
except KeyError:
self._content[name] = Branch.create_branch(self, name)
return self._content[name]
def __setitem__(self, key: str, item) -> None:
if not isinstance(key, str):
raise KeyError
if item is None:
self._remove_item(key)
elif not isinstance(item, Node):
if key in self._content:
self._kill += [self._content[key]]
import data_formats
self._content[key] = data_formats.available_types[type(item)](self,
"{:s}.leaf".format(key),
Meta.create_meta(self.top_level_meta,
self.path / "{:s}.leaf/".format(key)))
self._content[key].data = item
else:
if key not in self._content:
self._content[key] = item
else:
self._kill += [self._content[key]]
self._content[key] = item
@property
def name(self) -> str:
return self._name
@property
def branches(self) -> List["Branch"]:
return list(filter(lambda content: isinstance(content, Branch),
self._content.values()))
@property
def leafs(self) -> List["Leaf"]:
return list(filter(lambda content: isinstance(content, Leaf),
self._content.values()))
@staticmethod
def create_branch(parent: "Branch",
name: str) -> "Branch":
return Branch(parent,
name,
{},
Meta.create_meta(parent.top_level_meta,
parent.path / name))
class StructuredDataSet(Branch):
def __init__(self,
path: Path,
name: str,
content: Dict[str, Node],
meta: Meta) -> None:
super().__init__(None,
"{:s}.struct".format(name),
content,
meta)
self._path = path
@property
def path(self):
return self._path / self._name
@staticmethod
def create_dataset(path: Path,
name: str,
author: Author,
description: str = "") -> "StructuredDataSet":
top_level_meta = Meta.create_top_level_meta(None, author, description=description)
path_tmp = path / "{:s}.struct".format(name)
path_meta = path_tmp / ".meta.json"
top_level_meta.path = path_meta
return StructuredDataSet(path,
name,
{},
top_level_meta)
class Leaf(Node):
def __init__(self,
parent: Node,
name: str,
meta: Meta) -> None:
super().__init__(parent,
meta,
name)
# public functions
def write(self) -> None:
if not self.path.exists():
self.path.mkdir()
self.meta.write()
self._write_child()
@property
def data(self):
return self._get_data()
@data.setter
def data(self, data):
self._set_data(data)
@abc.abstractmethod
def _get_data(self):
raise NotImplementedError("Must override the _get_data function")
@abc.abstractmethod
def _set_data(self, data):
raise NotImplementedError("Must override the _set_data function")
@staticmethod
def initialize(parent: Node,
name: str) -> "Leaf":
name = name.replace(".leaf", "")
leaf_path = (parent.path / name).with_suffix(".leaf")
meta = Meta.from_json(leaf_path / ".meta.json")
# read all the non-hidden files
content = list(leaf_path.with_suffix(".leaf").glob("./*"))
content = list(filter(lambda x: not x.stem.startswith("."), content))
if len(content) == 0:
import data_formats
return data_formats.available_extensions[content[0].suffix](parent, name.replace(".leaf"), meta)
else:
if len(content) > 1:
raise FileNotFoundError("To many files in the leaf")
else:
raise FileNotFoundError("The leaf does not exist {:s} {:s}".format(str(leaf_path), name))
| 0.752104 | 0.144601 |
import click
from science_data_structure.author import Author
from science_data_structure.meta import Meta
from science_data_structure.config import ConfigManager
from science_data_structure.tools import files as file_tools
from pathlib import Path
from science_data_structure.structures import StructuredDataSet
import os
@click.group()
def manage():
pass
@click.group()
def create():
pass
@click.group()
def edit():
pass
@click.group(name="global")
def _global():
pass
@click.group(name="create")
def global_create():
pass
@click.group(name="list")
def global_list():
pass
@click.group(name="list")
def _list():
pass
@click.command(name="dataset")
@click.argument("name")
@click.argument("description", required=False)
def create_dataset(name,
description):
path = Path(os.getcwd())
if (path / name / ".struct").exists():
raise FileExistsError("There is already a dataset in this folder with that name")
author = ConfigManager().default_author
dataset = StructuredDataSet.create_dataset(path, name, Meta.create_top_level_meta(None, author))
if description is not None:
dataset.meta.description = description
click.echo(dataset.path)
dataset.write()
@click.command(name="meta")
def list_meta():
meta = Meta.from_json(Path(os.getcwd()) / ".meta.json")
click.echo(str(meta))
@click.command(name="author")
def list_author():
meta = Meta.from_json(Path(os.getcwd()) / ".meta.json")
authors = meta.authors
authors = list(map(lambda x: str(x), authors))
for author in authors:
click.echo(author)
@click.command(name="author")
@click.argument("name", required=False)
def create_global_author(name):
config_manager = ConfigManager()
if name is None:
name = click.prompt("What is the name of the new author?")
author = Author.create_author(name)
config_manager.default_author = author
click.echo(config_manager._path)
config_manager.write()
@click.command(name="author")
def list_global_author():
config_manager = ConfigManager()
click.echo("{:s}".format(str(config_manager.default_author)))
# globals
global_create.add_command(create_global_author)
global_list.add_command(list_global_author)
_global.add_command(global_create)
_global.add_command(global_list)
manage.add_command(_global)
# Create group
create.add_command(create_dataset)
manage.add_command(create)
# Delete group
# List group
_list.add_command(list_author)
_list.add_command(list_meta)
manage.add_command(_list)
# edit group
manage.add_command(edit)
if __name__ == "__main__":
manage()
|
science-data-structure
|
/science_data_structure-0.0.4.tar.gz/science_data_structure-0.0.4/science_data_structure/tools/manage.py
|
manage.py
|
import click
from science_data_structure.author import Author
from science_data_structure.meta import Meta
from science_data_structure.config import ConfigManager
from science_data_structure.tools import files as file_tools
from pathlib import Path
from science_data_structure.structures import StructuredDataSet
import os
@click.group()
def manage():
pass
@click.group()
def create():
pass
@click.group()
def edit():
pass
@click.group(name="global")
def _global():
pass
@click.group(name="create")
def global_create():
pass
@click.group(name="list")
def global_list():
pass
@click.group(name="list")
def _list():
pass
@click.command(name="dataset")
@click.argument("name")
@click.argument("description", required=False)
def create_dataset(name,
description):
path = Path(os.getcwd())
if (path / name / ".struct").exists():
raise FileExistsError("There is already a dataset in this folder with that name")
author = ConfigManager().default_author
dataset = StructuredDataSet.create_dataset(path, name, Meta.create_top_level_meta(None, author))
if description is not None:
dataset.meta.description = description
click.echo(dataset.path)
dataset.write()
@click.command(name="meta")
def list_meta():
meta = Meta.from_json(Path(os.getcwd()) / ".meta.json")
click.echo(str(meta))
@click.command(name="author")
def list_author():
meta = Meta.from_json(Path(os.getcwd()) / ".meta.json")
authors = meta.authors
authors = list(map(lambda x: str(x), authors))
for author in authors:
click.echo(author)
@click.command(name="author")
@click.argument("name", required=False)
def create_global_author(name):
config_manager = ConfigManager()
if name is None:
name = click.prompt("What is the name of the new author?")
author = Author.create_author(name)
config_manager.default_author = author
click.echo(config_manager._path)
config_manager.write()
@click.command(name="author")
def list_global_author():
config_manager = ConfigManager()
click.echo("{:s}".format(str(config_manager.default_author)))
# globals
global_create.add_command(create_global_author)
global_list.add_command(list_global_author)
_global.add_command(global_create)
_global.add_command(global_list)
manage.add_command(_global)
# Create group
create.add_command(create_dataset)
manage.add_command(create)
# Delete group
# List group
_list.add_command(list_author)
_list.add_command(list_meta)
manage.add_command(_list)
# edit group
manage.add_command(edit)
if __name__ == "__main__":
manage()
| 0.336331 | 0.072374 |
import numpy as np
from typing import List
class Variable:
"""Class for optimization variables.
"""
# attributes
_x_min = None # variables
_x_max = None # variables
_x_type = None # variables' type
def __init__(self, x_min: np.ndarray, x_max: np.ndarray, x_type: List[str]=None):
"""Constructor of variables.
Args:
x_min : (np.ndarray) (n x 1)-array with lower bounds.
x_max : (np.ndarray) (n x 1)-array with upper bounds.
x_type: (np.ndarray) (n x 1)-list with variables' type ('c': continuous or 'd': discrete).
"""
# set bounds
self.x_min = x_min
self.x_max = x_max
self.x_type = x_type
# getters
@property
def x_min(self):
return self._x_min
@property
def x_max(self):
return self._x_max
@property
def x_type(self):
return self._x_type
# setters
@x_min.setter
def x_min(self, x_lb):
"""Setter of x_min.
Args:
x_lb: (n x 1)-numpy array
"""
# check numpy
if not isinstance(x_lb, np.ndarray):
raise ValueError("x_min must be a numpy array!")
# check dimension
if not x_lb.shape[1]:
raise ValueError("x_min must be a (n x 1)-numpy array!")
# check consistency
if self._x_min is not None:
n = self._x_min.shape[0]
if n != x_lb.shape[0] and n > 0:
raise ValueError("x_min must be a ({} x 1)-numpy array!".format(n))
# set
self._x_min = x_lb
@x_max.setter
def x_max(self, x_ub):
"""Setter of x_max.
Args:
x_ub: (n x 1)-numpy array
"""
# check numpy
if not isinstance(x_ub, np.ndarray):
raise ValueError("x_max must be a numpy array!")
# check dimension
if not x_ub.shape[1]:
raise ValueError("x_max must be a (n x 1)-numpy array!")
# check dimension consistency
n = self._x_min.shape[0]
if n != x_ub.shape[0] and n > 0:
raise ValueError("x_max must be a ({} x 1)-numpy array!".format(n))
# check range consistency
if np.any((x_ub - self._x_min) < 0):
raise ValueError("x_max must be greater than or equal x_min!")
# set
self._x_max = x_ub
@x_type.setter
def x_type(self, x_type):
"""Setter of x_min.
Args:
x_type: (n )-list
"""
if x_type is not None:
# check numpy
if not isinstance(x_type, list):
raise ValueError("x_type must be a list!")
# check consistency
n = self._x_min.shape[0]
if n != len(x_type) and n > 0:
raise ValueError("x_type must be a list of {} elements!".format(n))
# check values
if (x_type.count('c') + x_type.count('d')) != n:
raise ValueError("x_type must be either 'c' or 'd'.")
self._x_type = x_type
else:
self.x_type = ['c'] * self.x_min.shape[0]
def dimension(self):
"""Return variable dimension."""
return self.x_min.shape[0]
|
science-optimization
|
/science_optimization-9.0.2-cp310-cp310-manylinux_2_35_x86_64.whl/science_optimization/builder/variable.py
|
variable.py
|
import numpy as np
from typing import List
class Variable:
"""Class for optimization variables.
"""
# attributes
_x_min = None # variables
_x_max = None # variables
_x_type = None # variables' type
def __init__(self, x_min: np.ndarray, x_max: np.ndarray, x_type: List[str]=None):
"""Constructor of variables.
Args:
x_min : (np.ndarray) (n x 1)-array with lower bounds.
x_max : (np.ndarray) (n x 1)-array with upper bounds.
x_type: (np.ndarray) (n x 1)-list with variables' type ('c': continuous or 'd': discrete).
"""
# set bounds
self.x_min = x_min
self.x_max = x_max
self.x_type = x_type
# getters
@property
def x_min(self):
return self._x_min
@property
def x_max(self):
return self._x_max
@property
def x_type(self):
return self._x_type
# setters
@x_min.setter
def x_min(self, x_lb):
"""Setter of x_min.
Args:
x_lb: (n x 1)-numpy array
"""
# check numpy
if not isinstance(x_lb, np.ndarray):
raise ValueError("x_min must be a numpy array!")
# check dimension
if not x_lb.shape[1]:
raise ValueError("x_min must be a (n x 1)-numpy array!")
# check consistency
if self._x_min is not None:
n = self._x_min.shape[0]
if n != x_lb.shape[0] and n > 0:
raise ValueError("x_min must be a ({} x 1)-numpy array!".format(n))
# set
self._x_min = x_lb
@x_max.setter
def x_max(self, x_ub):
"""Setter of x_max.
Args:
x_ub: (n x 1)-numpy array
"""
# check numpy
if not isinstance(x_ub, np.ndarray):
raise ValueError("x_max must be a numpy array!")
# check dimension
if not x_ub.shape[1]:
raise ValueError("x_max must be a (n x 1)-numpy array!")
# check dimension consistency
n = self._x_min.shape[0]
if n != x_ub.shape[0] and n > 0:
raise ValueError("x_max must be a ({} x 1)-numpy array!".format(n))
# check range consistency
if np.any((x_ub - self._x_min) < 0):
raise ValueError("x_max must be greater than or equal x_min!")
# set
self._x_max = x_ub
@x_type.setter
def x_type(self, x_type):
"""Setter of x_min.
Args:
x_type: (n )-list
"""
if x_type is not None:
# check numpy
if not isinstance(x_type, list):
raise ValueError("x_type must be a list!")
# check consistency
n = self._x_min.shape[0]
if n != len(x_type) and n > 0:
raise ValueError("x_type must be a list of {} elements!".format(n))
# check values
if (x_type.count('c') + x_type.count('d')) != n:
raise ValueError("x_type must be either 'c' or 'd'.")
self._x_type = x_type
else:
self.x_type = ['c'] * self.x_min.shape[0]
def dimension(self):
"""Return variable dimension."""
return self.x_min.shape[0]
| 0.912089 | 0.510802 |
from science_optimization.solvers.pareto_samplers import BaseParetoSamplers
from science_optimization.solvers import OptimizationResults
from science_optimization.builder import OptimizationProblem
from science_optimization.function import GenericFunction, LinearFunction
from typing import Any
import numpy as np
from copy import deepcopy
class LambdaSampler(BaseParetoSamplers):
"""p-lambda Pareto front sampler."""
def __init__(self,
optimization_problem: OptimizationProblem,
algorithm: Any = None,
n_samples: int = None):
"""Constructor of optimizer class.
Args:
optimization_problem: (OptimizationProblem) optimization problem instance.
algorithm : (Any) an algorithm instance.
n_samples : (int) number os samples.
"""
# instantiate super class
super().__init__(optimization_problem, algorithm, n_samples)
def sample_aux(self) -> OptimizationResults:
""" p-lambda sampler.
Returns:
output: (OptimizationResults) optimization results.
"""
# cardinalities
n = self.optimization_problem.variables.dimension()
o = self.optimization_problem.objective.objectives.n_functions
# verify
if self.optimization_problem.objective.objectives.n_functions != 2:
raise ValueError("Sampler only implemented for bi-objective optimization problems.")
# generate lambda values from [0, 1]
l = np.linspace(0, 1, self.n_samples) # remove vertices
# sample
x = np.zeros((n, 0))
fx = np.zeros((o, 0))
for k in range(self.n_samples):
# p-lambda optimization problem
op = self.op_lambda(l[k])
# optimize
o = self.algorithm.optimize(optimization_problem=op, debug=False)
x = np.hstack((x, o.x))
fx = np.hstack((fx, self.optimization_problem.objective.objectives.eval(o.x)))
# output
output = OptimizationResults()
output.x = x
output.fx = fx
return output
def op_lambda(self, l):
""" Builds a p-lambda optimization problem.
Args:
l : used in the weighted sum of two objectives.
Returns:
op: optimization problem.
"""
# copy of optimization problem
op = deepcopy(self.optimization_problem)
obj = deepcopy(self.optimization_problem.objective)
# nonparametric functions
w = np.array([[1-l, l]])
if not obj.objectives.is_linear():
def fo(x):
return obj.objectives.eval(x, composition='series', weights=w)
# delete original objectives and evaluate
op.objective.objectives.clear() # delete functions
op.objective.objectives.add(GenericFunction(func=fo, n=op.variables.dimension()))
else:
# new objective parameters
c = w @ obj.C()
# delete original objectives and evaluate
op.objective.objectives.clear() # delete functions
op.objective.objectives.add(LinearFunction(c=c.T))
return op
|
science-optimization
|
/science_optimization-9.0.2-cp310-cp310-manylinux_2_35_x86_64.whl/science_optimization/solvers/pareto_samplers/lambda_sampler.py
|
lambda_sampler.py
|
from science_optimization.solvers.pareto_samplers import BaseParetoSamplers
from science_optimization.solvers import OptimizationResults
from science_optimization.builder import OptimizationProblem
from science_optimization.function import GenericFunction, LinearFunction
from typing import Any
import numpy as np
from copy import deepcopy
class LambdaSampler(BaseParetoSamplers):
"""p-lambda Pareto front sampler."""
def __init__(self,
optimization_problem: OptimizationProblem,
algorithm: Any = None,
n_samples: int = None):
"""Constructor of optimizer class.
Args:
optimization_problem: (OptimizationProblem) optimization problem instance.
algorithm : (Any) an algorithm instance.
n_samples : (int) number os samples.
"""
# instantiate super class
super().__init__(optimization_problem, algorithm, n_samples)
def sample_aux(self) -> OptimizationResults:
""" p-lambda sampler.
Returns:
output: (OptimizationResults) optimization results.
"""
# cardinalities
n = self.optimization_problem.variables.dimension()
o = self.optimization_problem.objective.objectives.n_functions
# verify
if self.optimization_problem.objective.objectives.n_functions != 2:
raise ValueError("Sampler only implemented for bi-objective optimization problems.")
# generate lambda values from [0, 1]
l = np.linspace(0, 1, self.n_samples) # remove vertices
# sample
x = np.zeros((n, 0))
fx = np.zeros((o, 0))
for k in range(self.n_samples):
# p-lambda optimization problem
op = self.op_lambda(l[k])
# optimize
o = self.algorithm.optimize(optimization_problem=op, debug=False)
x = np.hstack((x, o.x))
fx = np.hstack((fx, self.optimization_problem.objective.objectives.eval(o.x)))
# output
output = OptimizationResults()
output.x = x
output.fx = fx
return output
def op_lambda(self, l):
""" Builds a p-lambda optimization problem.
Args:
l : used in the weighted sum of two objectives.
Returns:
op: optimization problem.
"""
# copy of optimization problem
op = deepcopy(self.optimization_problem)
obj = deepcopy(self.optimization_problem.objective)
# nonparametric functions
w = np.array([[1-l, l]])
if not obj.objectives.is_linear():
def fo(x):
return obj.objectives.eval(x, composition='series', weights=w)
# delete original objectives and evaluate
op.objective.objectives.clear() # delete functions
op.objective.objectives.add(GenericFunction(func=fo, n=op.variables.dimension()))
else:
# new objective parameters
c = w @ obj.C()
# delete original objectives and evaluate
op.objective.objectives.clear() # delete functions
op.objective.objectives.add(LinearFunction(c=c.T))
return op
| 0.942593 | 0.587174 |
import numpy as np
from science_optimization.builder import OptimizationProblem
from science_optimization.function import GenericFunction
from science_optimization.solvers import Optimizer
from science_optimization.problems import SeparableResourceAllocation
from science_optimization.algorithms.decomposition import DualDecomposition
def decomposition_example():
"""Decomposition problem example.
Solve problem:
min f_1(x_1) + f_2(x_2), f_i(x_i) = e^(-2*x_i)
s.t. x_1 + x_2 - 10 <= 0
2 <= x_i <= 6
"""
# dimension
n = 2
# objective functions
def f_1(x):
return np.exp(-2*x[0, :]) + 0 * x[1, :]
def f_2(x):
return np.exp(-2*x[1, :]) + 0 * x[0, :]
# inequality constraints functions
def g_1(x):
return x[0, :] - 10
def g_2(x):
return x[1, :]
# input lists
f_i = [GenericFunction(func=f_1, n=n), GenericFunction(func=f_2, n=n)] # f_i list
g_i = [GenericFunction(func=g_1, n=n), GenericFunction(func=g_2, n=n)] # g_i list
# bounds
x_min = np.array([2, 2]).reshape(-1, 1) # lower
x_max = np.array([6, 6]).reshape(-1, 1) # upper
x_bounds = np.hstack((x_min, x_max))
# build generic problem instance
generic = OptimizationProblem(builder=SeparableResourceAllocation(f_i=f_i,
coupling_eq_constraints=[],
coupling_ineq_constraints=g_i,
x_bounds=x_bounds
))
# starting point
x0 = np.array([0, 0]).reshape(-1, 1)
# builder optimization
optimizer = Optimizer(opt_problem=generic, algorithm=DualDecomposition(x0=x0))
results = optimizer.optimize()
# result
results.info()
if __name__ == "__main__":
# run example
decomposition_example()
|
science-optimization
|
/science_optimization-9.0.2-cp310-cp310-manylinux_2_35_x86_64.whl/science_optimization/examples/decomposition_example.py
|
decomposition_example.py
|
import numpy as np
from science_optimization.builder import OptimizationProblem
from science_optimization.function import GenericFunction
from science_optimization.solvers import Optimizer
from science_optimization.problems import SeparableResourceAllocation
from science_optimization.algorithms.decomposition import DualDecomposition
def decomposition_example():
"""Decomposition problem example.
Solve problem:
min f_1(x_1) + f_2(x_2), f_i(x_i) = e^(-2*x_i)
s.t. x_1 + x_2 - 10 <= 0
2 <= x_i <= 6
"""
# dimension
n = 2
# objective functions
def f_1(x):
return np.exp(-2*x[0, :]) + 0 * x[1, :]
def f_2(x):
return np.exp(-2*x[1, :]) + 0 * x[0, :]
# inequality constraints functions
def g_1(x):
return x[0, :] - 10
def g_2(x):
return x[1, :]
# input lists
f_i = [GenericFunction(func=f_1, n=n), GenericFunction(func=f_2, n=n)] # f_i list
g_i = [GenericFunction(func=g_1, n=n), GenericFunction(func=g_2, n=n)] # g_i list
# bounds
x_min = np.array([2, 2]).reshape(-1, 1) # lower
x_max = np.array([6, 6]).reshape(-1, 1) # upper
x_bounds = np.hstack((x_min, x_max))
# build generic problem instance
generic = OptimizationProblem(builder=SeparableResourceAllocation(f_i=f_i,
coupling_eq_constraints=[],
coupling_ineq_constraints=g_i,
x_bounds=x_bounds
))
# starting point
x0 = np.array([0, 0]).reshape(-1, 1)
# builder optimization
optimizer = Optimizer(opt_problem=generic, algorithm=DualDecomposition(x0=x0))
results = optimizer.optimize()
# result
results.info()
if __name__ == "__main__":
# run example
decomposition_example()
| 0.779196 | 0.557243 |
import numpy as np
from science_optimization.builder import OptimizationProblem
from science_optimization.function import QuadraticFunction
from science_optimization.solvers.pareto_samplers import NonDominatedSampler, EpsilonSampler, LambdaSampler, MuSampler
from science_optimization.problems import GenericProblem
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
def pareto_sampling_cs0(s):
"""Multiobjective problem example.
Args:
s: nondominated_sampler.
"""
# parameters objective function 1
Q = np.array([[1, 0], [0, 1]])
c1 = np.array([[0], [0]])
d1 = np.array([0])
# parameters objective function 2
c2 = np.array([[-2], [-2]])
d2 = np.array([2])
# objectives
f1 = QuadraticFunction(Q=Q, c=c1, d=d1)
f2 = QuadraticFunction(Q=Q, c=c2, d=d2)
f = [f1, f2]
# constraints
ineq_cons = []
eq_cons = []
# bounds
x_min = np.array([-5, -5]).reshape(-1, 1) # lower
x_max = np.array([5, 5]).reshape(-1, 1) # upper
x_lim = np.hstack((x_min, x_max))
# build generic problem instance
generic = OptimizationProblem(builder=GenericProblem(f=f, eq_cons=eq_cons, ineq_cons=ineq_cons, x_bounds=x_lim))
# builder pareto sampler
if s == 0:
sampler = EpsilonSampler(optimization_problem=generic)
elif s == 1:
sampler = NonDominatedSampler(optimization_problem=generic)
elif s == 2:
sampler = MuSampler(optimization_problem=generic)
else:
sampler = LambdaSampler(optimization_problem=generic)
results = sampler.sample()
# contour
delta = 0.02
x = np.arange(-5, 5, delta)
y = np.arange(-5, 5, delta)
X, Y = np.meshgrid(x, y)
XY = np.vstack((X.reshape(1, -1), Y.reshape(1, -1)))
f1eval = np.reshape(f1.eval(XY), X.shape)
f2eval = np.reshape(f2.eval(XY), X.shape)
# contour plot of individual functions
fig, ax = plt.subplots()
ax.contour(X, Y, f1eval, 17, colors='k', linewidths=.8)
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
ax.set_xlabel(r'$x_1$')
ax.set_ylabel(r'$x_2$')
# contour plot of individual functions
fig, ax = plt.subplots()
ax.contour(X, Y, f2eval, 17, colors='k', linewidths=.8)
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
ax.set_xlabel(r'$x_1$')
ax.set_ylabel(r'$x_2$')
# contour plot of functions and solution
fig, ax = plt.subplots()
ax.contour(X, Y, f1eval, 17, colors='k', linewidths=.8)
ax.contour(X, Y, f2eval, 17, colors='r', linewidths=.8)
plt.scatter(results.x[0, :], results.x[1, :], s=8)
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
ax.set_xlabel(r'$x_1$')
ax.set_ylabel(r'$x_2$')
# pareto front plot
plt.figure()
plt.scatter(results.fx[0, :], results.fx[1, :], s=8)
plt.xlabel(r'$f_1$')
plt.ylabel(r'$f_2$')
plt.show()
if __name__ == "__main__":
# run example
pareto_sampling_cs0(s=2)
|
science-optimization
|
/science_optimization-9.0.2-cp310-cp310-manylinux_2_35_x86_64.whl/science_optimization/examples/pareto_sampling_cs0.py
|
pareto_sampling_cs0.py
|
import numpy as np
from science_optimization.builder import OptimizationProblem
from science_optimization.function import QuadraticFunction
from science_optimization.solvers.pareto_samplers import NonDominatedSampler, EpsilonSampler, LambdaSampler, MuSampler
from science_optimization.problems import GenericProblem
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
def pareto_sampling_cs0(s):
"""Multiobjective problem example.
Args:
s: nondominated_sampler.
"""
# parameters objective function 1
Q = np.array([[1, 0], [0, 1]])
c1 = np.array([[0], [0]])
d1 = np.array([0])
# parameters objective function 2
c2 = np.array([[-2], [-2]])
d2 = np.array([2])
# objectives
f1 = QuadraticFunction(Q=Q, c=c1, d=d1)
f2 = QuadraticFunction(Q=Q, c=c2, d=d2)
f = [f1, f2]
# constraints
ineq_cons = []
eq_cons = []
# bounds
x_min = np.array([-5, -5]).reshape(-1, 1) # lower
x_max = np.array([5, 5]).reshape(-1, 1) # upper
x_lim = np.hstack((x_min, x_max))
# build generic problem instance
generic = OptimizationProblem(builder=GenericProblem(f=f, eq_cons=eq_cons, ineq_cons=ineq_cons, x_bounds=x_lim))
# builder pareto sampler
if s == 0:
sampler = EpsilonSampler(optimization_problem=generic)
elif s == 1:
sampler = NonDominatedSampler(optimization_problem=generic)
elif s == 2:
sampler = MuSampler(optimization_problem=generic)
else:
sampler = LambdaSampler(optimization_problem=generic)
results = sampler.sample()
# contour
delta = 0.02
x = np.arange(-5, 5, delta)
y = np.arange(-5, 5, delta)
X, Y = np.meshgrid(x, y)
XY = np.vstack((X.reshape(1, -1), Y.reshape(1, -1)))
f1eval = np.reshape(f1.eval(XY), X.shape)
f2eval = np.reshape(f2.eval(XY), X.shape)
# contour plot of individual functions
fig, ax = plt.subplots()
ax.contour(X, Y, f1eval, 17, colors='k', linewidths=.8)
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
ax.set_xlabel(r'$x_1$')
ax.set_ylabel(r'$x_2$')
# contour plot of individual functions
fig, ax = plt.subplots()
ax.contour(X, Y, f2eval, 17, colors='k', linewidths=.8)
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
ax.set_xlabel(r'$x_1$')
ax.set_ylabel(r'$x_2$')
# contour plot of functions and solution
fig, ax = plt.subplots()
ax.contour(X, Y, f1eval, 17, colors='k', linewidths=.8)
ax.contour(X, Y, f2eval, 17, colors='r', linewidths=.8)
plt.scatter(results.x[0, :], results.x[1, :], s=8)
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
ax.set_xlabel(r'$x_1$')
ax.set_ylabel(r'$x_2$')
# pareto front plot
plt.figure()
plt.scatter(results.fx[0, :], results.fx[1, :], s=8)
plt.xlabel(r'$f_1$')
plt.ylabel(r'$f_2$')
plt.show()
if __name__ == "__main__":
# run example
pareto_sampling_cs0(s=2)
| 0.847858 | 0.60542 |
import numpy as np
from science_optimization.solvers import Optimizer
from science_optimization.builder import OptimizationProblem
from science_optimization.function import GenericFunction
from science_optimization.problems import Quadratic, GenericProblem
from science_optimization.algorithms.derivative_free import NelderMead
def generate_grid(x_min, x_max, n):
coords = []
for i in range(n):
coords.append(np.arange(x_min[i][0], x_max[i][0]+1, 5))
g = np.meshgrid(*coords)
for i in range(n):
coords[i] = g[i].reshape((np.prod(g[i].shape), )).reshape(-1, 1)
return np.hstack(coords)
def quadratic(Q, c, d):
# bounds
x_min = np.array([-10, -10]).reshape(-1, 1) # lower bound
x_max = np.array([10, 10]).reshape(-1, 1) # upper bound
x_bounds = np.hstack((x_min, x_max))
# builder quadratic problem instance
quadratic = OptimizationProblem(builder=Quadratic(Q=Q, c=c, d=d, x_bounds=x_bounds))
# builder optimization
x0 = np.array([[5], [6]])
delta_r = 1.0
delta_e = 2.0
delta_ic = 0.5
delta_oc = 0.5
delta_s = 0.5
optimizer = Optimizer(
opt_problem=quadratic,
algorithm=NelderMead(x0, delta_r, delta_e, delta_ic, delta_oc, delta_s)
)
results = optimizer.optimize()
# result
results.info()
def generic_fun(f, x0, x_lim, ineq_cons, eq_cons):
delta_r = 1.0
delta_e = 2.0
delta_ic = 0.5
delta_oc = 0.5
delta_s = 0.5
generic = OptimizationProblem(builder=GenericProblem(f=f, eq_cons=eq_cons, ineq_cons=ineq_cons, x_bounds=x_lim))
optimizer = Optimizer(
opt_problem=generic,
algorithm=NelderMead(x0, delta_r, delta_e, delta_ic, delta_oc, delta_s)
)
optimizer.algorithm.n_max = 500
results = optimizer.optimize(debug=True)
results.info()
return results
def get_bm_1_problem(n):
def obj_func(x):
a = [10 for i in range(n)]
b = [100 for i in range(n)]
s = 0
for i in range(n):
s += a[i] * np.abs(x[i][0] / b[i])
return s
def c_1(x):
c = 4
s = 0
for i in range(n):
s += np.power(x[i][0], 3)
s -= c
return s
def c_2(x):
d = 1 / np.pi
s = 0
for i in range(n):
s += np.power(-1 + x[i][0], 2)
s -= d
return s
def c_3(x):
d = 3
m = 100
s = 0
for i in range(n):
s += x[i][0]
s = d - m * np.sqrt(s)
return s
x_min = np.full((n, 1), -10) # lower
x_max = np.full((n, 1), 10) # upper
x_lim = np.hstack((x_min, x_max))
f = [GenericFunction(func=obj_func, n=n)]
ineq_cons = [
GenericFunction(func=c_1, n=n),
GenericFunction(func=c_2, n=n),
GenericFunction(func=c_3, n=n)
]
eq_cons = []
return f, ineq_cons, eq_cons, x_min, x_max, x_lim
def get_bm_2_problem(n):
def obj_func(x):
s = 1
for i in range(n):
s *= x[i][0]
s *= -1 * np.power(np.sqrt(n), n)
return s
def c_1(x):
s = 0
for i in range(n):
s += x[i][0]
return s - 1
return obj_func, c_1
def get_bm_3_problem():
def obj_func(x):
s = np.sum(x[0:4, ])
s -= np.sum(np.power(x[0:4, ], 2))
s -= np.sum(x[4:13, ])
return s
def c_1(x):
return 2*x[0][0] + 2*x[1][0] + x[9][0] + x[10][0] - 10
def c_2(x):
return 2*x[0][0] + 2*x[2][0] + x[9][0] + x[11][0] - 10
def c_3(x):
return 2*x[0][0] + 2*x[2][0] + x[10][0] + x[11][0] - 10
def c_4(x):
return -8 * x[0][0] + x[9][0]
def c_5(x):
return -8 * x[1][0] + x[10][0]
def c_6(x):
return -8 * x[2][0] + x[11][0]
def c_7(x):
return -2 * x[3][0] - x[4][0] + x[9][0]
def c_8(x):
return -2 * x[5][0] - x[6][0] + x[10][0]
def c_9(x):
return -2 * x[7][0] - x[8][0] + x[11][0]
x_min = np.zeros((13, 1))
x_max = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 100, 100, 100, 1]).reshape(-1, 1)
x_bounds = np.hstack((x_min, x_max))
x0 = np.array([.5, .5, .5, .5, .5, .5, .5, .5, .5, 3, 3, 3, .5]).reshape(-1, 1)
f = [GenericFunction(obj_func, 13)]
ineq_cons = [
GenericFunction(func=c_1, n=13),
GenericFunction(func=c_2, n=13),
GenericFunction(func=c_3, n=13),
GenericFunction(func=c_4, n=13),
GenericFunction(func=c_5, n=13),
GenericFunction(func=c_6, n=13),
GenericFunction(func=c_7, n=13),
GenericFunction(func=c_8, n=13),
GenericFunction(func=c_9, n=13)
]
eq_cons = []
return x0, x_bounds, f, ineq_cons, eq_cons
def get_bm_4_problem():
def obj_func(x):
a = np.sum(np.power(np.cos(x), 4))
b = np.prod(np.power(np.cos(x), 2))
c = np.sqrt(np.sum(np.arange(1, 21).reshape(-1, 1) * np.power(x, 2)))
s = np.abs((a - 2*b)/c)
return s
def c_1(x):
return 0.75 - np.prod(x)
def c_2(x):
return np.sum(x) - 7.5 * x.shape[0]
x_min = np.zeros((20, 1))
x_max = np.full((20, 1), 10)
x_bounds = np.hstack((x_min, x_max))
x0 = np.full((20, 1), 5)
f = [GenericFunction(func=obj_func, n=20)]
ineq_cons = [
GenericFunction(func=c_1, n=20),
GenericFunction(func=c_2, n=20)
]
eq_cons = []
return x0, x_bounds, f, ineq_cons, eq_cons
def get_bm_5_problem():
def obj_func(x):
return np.abs(np.power(x[0][0], 2) + np.power(x[1][0], 2)) + np.abs(np.sin(x[0][0])) + np.abs(np.cos(x[1][0]))
def c_1(x):
c = 4
s = 0
for i in range(2):
s += np.power(x[i][0], 3)
s -= c
return s
def c_2(x):
d = 1 / np.pi
s = 0
for i in range(2):
s += np.power(-1 + x[i][0], 2)
s -= d
return s
def c_3(x):
d = 3
m = 100
s = 0
for i in range(2):
s += x[i][0]
s = d - m * np.sqrt(s)
return s
# bounds
x_min = np.array([-10, -10]).reshape(-1, 1) # lower
x_max = np.array([10, 10]).reshape(-1, 1) # upper
x_lim = np.hstack((x_min, x_max))
f = [GenericFunction(func=obj_func, n=2)]
ineq_cons = [
GenericFunction(func=c_1, n=2),
GenericFunction(func=c_2, n=2),
GenericFunction(func=c_3, n=2)
]
eq_cons = []
x0 = np.array([[5.0], [1.0]])
return x0, x_lim, f, ineq_cons, eq_cons
def neldermead_example(problem=1):
"""
Args:
problem:
Returns:
"""
np.set_printoptions(precision=9, suppress=True)
if problem == 1:
# Problem: (x[0]-1)^2 + 4.0*x[1]^2
Q = np.array([[1, 0], [0, 4]])
c = np.array([-2, 0]).reshape(-1, 1)
d = 1
quadratic(Q, c, d)
elif problem == 2:
# Problem: x[0]^2 + 3.0*x[1]^2
Q = np.array([[1, 0], [0, 3]])
c = np.array([0, 0]).reshape(-1, 1)
d = 0
quadratic(Q, c, d)
elif problem == 3:
def f_obj(x): return np.max(np.abs(x * (.5 + 1e-2) - .5 * np.sin(x) * np.cos(x)), axis=0)
# bounds
x_min = np.array([-5, -5]).reshape(-1, 1) # lower
x_max = np.array([10, 10]).reshape(-1, 1) # upper
x_lim = np.hstack((x_min, x_max))
f = [GenericFunction(func=f_obj, n=2)]
ineq_cons = []
eq_cons = []
x0 = np.array([[2], [2]])
generic_fun(f, x0, x_lim, ineq_cons, eq_cons)
elif problem == 4:
def f_obj(x): return x[0][0]*x[0][0] + x[1][0]*x[1][0] - x[0][0]*x[1][0]
# bounds
x_min = np.array([-5, -5]).reshape(-1, 1) # lower
x_max = np.array([10, 10]).reshape(-1, 1) # upper
x_lim = np.hstack((x_min, x_max))
f = [GenericFunction(func=f_obj, n=2)]
ineq_cons = []
eq_cons = []
x0 = np.array([[2], [2]])
generic_fun(f, x0, x_lim, ineq_cons, eq_cons)
elif problem == 5:
def f_obj(x): return 200 * x[0][0]*x[0][0] + x[1][0]*x[1][0]
# bounds
x_min = np.array([-10, -10]).reshape(-1, 1) # lower
x_max = np.array([10, 10]).reshape(-1, 1) # upper
x_lim = np.hstack((x_min, x_max))
f = [GenericFunction(func=f_obj, n=2)]
ineq_cons = []
eq_cons = []
x0 = np.array([[10], [10]])
generic_fun(f, x0, x_lim, ineq_cons, eq_cons)
elif problem == 6:
def f_obj(x): return 100 * np.square((x[1][0] - np.square(x[0][0]))) + np.square(1 - x[0][0])
# bounds
x_min = np.array([-5, -5]).reshape(-1, 1) # lower
x_max = np.array([10, 10]).reshape(-1, 1) # upper
x_lim = np.hstack((x_min, x_max))
f = [GenericFunction(func=f_obj, n=2)]
ineq_cons = []
eq_cons = []
x0 = np.array([[-2], [1]])
generic_fun(f, x0, x_lim, ineq_cons, eq_cons)
elif problem == 7:
def f_obj(x):
return np.square(x[0][0] + 10 * x[1][0]) + 5 * np.square(x[2][0] - x[3][0]) + \
np.power((x[1][0] - 2 * x[2][0]), 4) + 10 * np.power(x[0][0] - x[3][0], 4)
# bounds
x_min = np.array([-5, -5, -5, -5]).reshape(-1, 1) # lower
x_max = np.array([10, 10, 10, 10]).reshape(-1, 1) # upper
x_lim = np.hstack((x_min, x_max))
f = [GenericFunction(func=f_obj, n=4)]
ineq_cons = []
eq_cons = []
x0 = np.array([[3], [-1], [0], [1]])
generic_fun(f, x0, x_lim, ineq_cons, eq_cons)
elif problem == 8:
n = 5
f, ineq_cons, eq_cons, x_min, x_max, x_lim = get_bm_1_problem(n)
x0 = np.full((n, 1), 1.0)
generic_fun(f, x0, x_lim, ineq_cons, eq_cons)
elif problem == 9:
x0, x_lim, obj_func, ineq_cons, eq_cons = get_bm_3_problem()
generic_fun(obj_func, x0, x_lim, ineq_cons, eq_cons)
elif problem == 10:
x0, x_lim, obj_func, ineq_cons, eq_cons = get_bm_4_problem()
generic_fun(obj_func, x0, x_lim, ineq_cons, eq_cons)
elif problem == 11:
x0, x_bounds, f, ineq_cons, eq_cons = get_bm_5_problem()
generic_fun(f, x0, x_bounds, ineq_cons, eq_cons)
else:
raise Warning("Undefined problem example.")
if __name__ == '__main__':
neldermead_example(problem=1)
|
science-optimization
|
/science_optimization-9.0.2-cp310-cp310-manylinux_2_35_x86_64.whl/science_optimization/examples/neldermead_example.py
|
neldermead_example.py
|
import numpy as np
from science_optimization.solvers import Optimizer
from science_optimization.builder import OptimizationProblem
from science_optimization.function import GenericFunction
from science_optimization.problems import Quadratic, GenericProblem
from science_optimization.algorithms.derivative_free import NelderMead
def generate_grid(x_min, x_max, n):
coords = []
for i in range(n):
coords.append(np.arange(x_min[i][0], x_max[i][0]+1, 5))
g = np.meshgrid(*coords)
for i in range(n):
coords[i] = g[i].reshape((np.prod(g[i].shape), )).reshape(-1, 1)
return np.hstack(coords)
def quadratic(Q, c, d):
# bounds
x_min = np.array([-10, -10]).reshape(-1, 1) # lower bound
x_max = np.array([10, 10]).reshape(-1, 1) # upper bound
x_bounds = np.hstack((x_min, x_max))
# builder quadratic problem instance
quadratic = OptimizationProblem(builder=Quadratic(Q=Q, c=c, d=d, x_bounds=x_bounds))
# builder optimization
x0 = np.array([[5], [6]])
delta_r = 1.0
delta_e = 2.0
delta_ic = 0.5
delta_oc = 0.5
delta_s = 0.5
optimizer = Optimizer(
opt_problem=quadratic,
algorithm=NelderMead(x0, delta_r, delta_e, delta_ic, delta_oc, delta_s)
)
results = optimizer.optimize()
# result
results.info()
def generic_fun(f, x0, x_lim, ineq_cons, eq_cons):
delta_r = 1.0
delta_e = 2.0
delta_ic = 0.5
delta_oc = 0.5
delta_s = 0.5
generic = OptimizationProblem(builder=GenericProblem(f=f, eq_cons=eq_cons, ineq_cons=ineq_cons, x_bounds=x_lim))
optimizer = Optimizer(
opt_problem=generic,
algorithm=NelderMead(x0, delta_r, delta_e, delta_ic, delta_oc, delta_s)
)
optimizer.algorithm.n_max = 500
results = optimizer.optimize(debug=True)
results.info()
return results
def get_bm_1_problem(n):
def obj_func(x):
a = [10 for i in range(n)]
b = [100 for i in range(n)]
s = 0
for i in range(n):
s += a[i] * np.abs(x[i][0] / b[i])
return s
def c_1(x):
c = 4
s = 0
for i in range(n):
s += np.power(x[i][0], 3)
s -= c
return s
def c_2(x):
d = 1 / np.pi
s = 0
for i in range(n):
s += np.power(-1 + x[i][0], 2)
s -= d
return s
def c_3(x):
d = 3
m = 100
s = 0
for i in range(n):
s += x[i][0]
s = d - m * np.sqrt(s)
return s
x_min = np.full((n, 1), -10) # lower
x_max = np.full((n, 1), 10) # upper
x_lim = np.hstack((x_min, x_max))
f = [GenericFunction(func=obj_func, n=n)]
ineq_cons = [
GenericFunction(func=c_1, n=n),
GenericFunction(func=c_2, n=n),
GenericFunction(func=c_3, n=n)
]
eq_cons = []
return f, ineq_cons, eq_cons, x_min, x_max, x_lim
def get_bm_2_problem(n):
def obj_func(x):
s = 1
for i in range(n):
s *= x[i][0]
s *= -1 * np.power(np.sqrt(n), n)
return s
def c_1(x):
s = 0
for i in range(n):
s += x[i][0]
return s - 1
return obj_func, c_1
def get_bm_3_problem():
def obj_func(x):
s = np.sum(x[0:4, ])
s -= np.sum(np.power(x[0:4, ], 2))
s -= np.sum(x[4:13, ])
return s
def c_1(x):
return 2*x[0][0] + 2*x[1][0] + x[9][0] + x[10][0] - 10
def c_2(x):
return 2*x[0][0] + 2*x[2][0] + x[9][0] + x[11][0] - 10
def c_3(x):
return 2*x[0][0] + 2*x[2][0] + x[10][0] + x[11][0] - 10
def c_4(x):
return -8 * x[0][0] + x[9][0]
def c_5(x):
return -8 * x[1][0] + x[10][0]
def c_6(x):
return -8 * x[2][0] + x[11][0]
def c_7(x):
return -2 * x[3][0] - x[4][0] + x[9][0]
def c_8(x):
return -2 * x[5][0] - x[6][0] + x[10][0]
def c_9(x):
return -2 * x[7][0] - x[8][0] + x[11][0]
x_min = np.zeros((13, 1))
x_max = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 100, 100, 100, 1]).reshape(-1, 1)
x_bounds = np.hstack((x_min, x_max))
x0 = np.array([.5, .5, .5, .5, .5, .5, .5, .5, .5, 3, 3, 3, .5]).reshape(-1, 1)
f = [GenericFunction(obj_func, 13)]
ineq_cons = [
GenericFunction(func=c_1, n=13),
GenericFunction(func=c_2, n=13),
GenericFunction(func=c_3, n=13),
GenericFunction(func=c_4, n=13),
GenericFunction(func=c_5, n=13),
GenericFunction(func=c_6, n=13),
GenericFunction(func=c_7, n=13),
GenericFunction(func=c_8, n=13),
GenericFunction(func=c_9, n=13)
]
eq_cons = []
return x0, x_bounds, f, ineq_cons, eq_cons
def get_bm_4_problem():
def obj_func(x):
a = np.sum(np.power(np.cos(x), 4))
b = np.prod(np.power(np.cos(x), 2))
c = np.sqrt(np.sum(np.arange(1, 21).reshape(-1, 1) * np.power(x, 2)))
s = np.abs((a - 2*b)/c)
return s
def c_1(x):
return 0.75 - np.prod(x)
def c_2(x):
return np.sum(x) - 7.5 * x.shape[0]
x_min = np.zeros((20, 1))
x_max = np.full((20, 1), 10)
x_bounds = np.hstack((x_min, x_max))
x0 = np.full((20, 1), 5)
f = [GenericFunction(func=obj_func, n=20)]
ineq_cons = [
GenericFunction(func=c_1, n=20),
GenericFunction(func=c_2, n=20)
]
eq_cons = []
return x0, x_bounds, f, ineq_cons, eq_cons
def get_bm_5_problem():
def obj_func(x):
return np.abs(np.power(x[0][0], 2) + np.power(x[1][0], 2)) + np.abs(np.sin(x[0][0])) + np.abs(np.cos(x[1][0]))
def c_1(x):
c = 4
s = 0
for i in range(2):
s += np.power(x[i][0], 3)
s -= c
return s
def c_2(x):
d = 1 / np.pi
s = 0
for i in range(2):
s += np.power(-1 + x[i][0], 2)
s -= d
return s
def c_3(x):
d = 3
m = 100
s = 0
for i in range(2):
s += x[i][0]
s = d - m * np.sqrt(s)
return s
# bounds
x_min = np.array([-10, -10]).reshape(-1, 1) # lower
x_max = np.array([10, 10]).reshape(-1, 1) # upper
x_lim = np.hstack((x_min, x_max))
f = [GenericFunction(func=obj_func, n=2)]
ineq_cons = [
GenericFunction(func=c_1, n=2),
GenericFunction(func=c_2, n=2),
GenericFunction(func=c_3, n=2)
]
eq_cons = []
x0 = np.array([[5.0], [1.0]])
return x0, x_lim, f, ineq_cons, eq_cons
def neldermead_example(problem=1):
"""
Args:
problem:
Returns:
"""
np.set_printoptions(precision=9, suppress=True)
if problem == 1:
# Problem: (x[0]-1)^2 + 4.0*x[1]^2
Q = np.array([[1, 0], [0, 4]])
c = np.array([-2, 0]).reshape(-1, 1)
d = 1
quadratic(Q, c, d)
elif problem == 2:
# Problem: x[0]^2 + 3.0*x[1]^2
Q = np.array([[1, 0], [0, 3]])
c = np.array([0, 0]).reshape(-1, 1)
d = 0
quadratic(Q, c, d)
elif problem == 3:
def f_obj(x): return np.max(np.abs(x * (.5 + 1e-2) - .5 * np.sin(x) * np.cos(x)), axis=0)
# bounds
x_min = np.array([-5, -5]).reshape(-1, 1) # lower
x_max = np.array([10, 10]).reshape(-1, 1) # upper
x_lim = np.hstack((x_min, x_max))
f = [GenericFunction(func=f_obj, n=2)]
ineq_cons = []
eq_cons = []
x0 = np.array([[2], [2]])
generic_fun(f, x0, x_lim, ineq_cons, eq_cons)
elif problem == 4:
def f_obj(x): return x[0][0]*x[0][0] + x[1][0]*x[1][0] - x[0][0]*x[1][0]
# bounds
x_min = np.array([-5, -5]).reshape(-1, 1) # lower
x_max = np.array([10, 10]).reshape(-1, 1) # upper
x_lim = np.hstack((x_min, x_max))
f = [GenericFunction(func=f_obj, n=2)]
ineq_cons = []
eq_cons = []
x0 = np.array([[2], [2]])
generic_fun(f, x0, x_lim, ineq_cons, eq_cons)
elif problem == 5:
def f_obj(x): return 200 * x[0][0]*x[0][0] + x[1][0]*x[1][0]
# bounds
x_min = np.array([-10, -10]).reshape(-1, 1) # lower
x_max = np.array([10, 10]).reshape(-1, 1) # upper
x_lim = np.hstack((x_min, x_max))
f = [GenericFunction(func=f_obj, n=2)]
ineq_cons = []
eq_cons = []
x0 = np.array([[10], [10]])
generic_fun(f, x0, x_lim, ineq_cons, eq_cons)
elif problem == 6:
def f_obj(x): return 100 * np.square((x[1][0] - np.square(x[0][0]))) + np.square(1 - x[0][0])
# bounds
x_min = np.array([-5, -5]).reshape(-1, 1) # lower
x_max = np.array([10, 10]).reshape(-1, 1) # upper
x_lim = np.hstack((x_min, x_max))
f = [GenericFunction(func=f_obj, n=2)]
ineq_cons = []
eq_cons = []
x0 = np.array([[-2], [1]])
generic_fun(f, x0, x_lim, ineq_cons, eq_cons)
elif problem == 7:
def f_obj(x):
return np.square(x[0][0] + 10 * x[1][0]) + 5 * np.square(x[2][0] - x[3][0]) + \
np.power((x[1][0] - 2 * x[2][0]), 4) + 10 * np.power(x[0][0] - x[3][0], 4)
# bounds
x_min = np.array([-5, -5, -5, -5]).reshape(-1, 1) # lower
x_max = np.array([10, 10, 10, 10]).reshape(-1, 1) # upper
x_lim = np.hstack((x_min, x_max))
f = [GenericFunction(func=f_obj, n=4)]
ineq_cons = []
eq_cons = []
x0 = np.array([[3], [-1], [0], [1]])
generic_fun(f, x0, x_lim, ineq_cons, eq_cons)
elif problem == 8:
n = 5
f, ineq_cons, eq_cons, x_min, x_max, x_lim = get_bm_1_problem(n)
x0 = np.full((n, 1), 1.0)
generic_fun(f, x0, x_lim, ineq_cons, eq_cons)
elif problem == 9:
x0, x_lim, obj_func, ineq_cons, eq_cons = get_bm_3_problem()
generic_fun(obj_func, x0, x_lim, ineq_cons, eq_cons)
elif problem == 10:
x0, x_lim, obj_func, ineq_cons, eq_cons = get_bm_4_problem()
generic_fun(obj_func, x0, x_lim, ineq_cons, eq_cons)
elif problem == 11:
x0, x_bounds, f, ineq_cons, eq_cons = get_bm_5_problem()
generic_fun(f, x0, x_bounds, ineq_cons, eq_cons)
else:
raise Warning("Undefined problem example.")
if __name__ == '__main__':
neldermead_example(problem=1)
| 0.687945 | 0.544922 |
import numpy as np
from science_optimization.solvers import Optimizer
from science_optimization.builder import OptimizationProblem
from science_optimization.function import GenericFunction
from science_optimization.problems import Quadratic, GenericProblem
from science_optimization.algorithms.derivative_free import NelderMead
def generic_fun(f, x0, x_lim, ineq_cons, eq_cons):
delta_r = 1.0
delta_e = 2.0
delta_ic = 0.5
delta_oc = 0.5
delta_s = 0.5
generic = OptimizationProblem(builder=GenericProblem(f=f, eq_cons=eq_cons, ineq_cons=ineq_cons, x_bounds=x_lim))
optimizer = Optimizer(
opt_problem=generic,
algorithm=NelderMead(x0, delta_r, delta_e, delta_ic, delta_oc, delta_s)
)
optimizer.algorithm.n_max = 2000
results = optimizer.optimize(debug=False)
# results.info()
return results
def generate_points(x_min, x_max, dim, n=30):
points = []
for i in range(n):
p = x_min + np.random.random_sample((dim, 1)) * (x_max - x_min)
points.append(p)
return points
def get_bm_1_problem(n):
def obj_func(x):
a = [10 for i in range(n)]
b = [100 for i in range(n)]
s = 0
for i in range(n):
s += a[i] * np.abs(x[i][0] / b[i])
return s
def c_1(x):
c = 4
s = 0
for i in range(n):
s += np.power(x[i][0], 3)
s -= c
return s
def c_2(x):
d = 1 / np.pi
s = 0
for i in range(n):
s += np.power(-1 + x[i][0], 2)
s -= d
return s
def c_3(x):
d = 3
m = 100
s = 0
for i in range(n):
s += x[i][0]
s = d - m * np.sqrt(s)
return s
x_min = np.full((n, 1), -10) # lower
x_max = np.full((n, 1), 10) # upper
x_lim = np.hstack((x_min, x_max))
f = [GenericFunction(func=obj_func, n=n)]
ineq_cons = [
GenericFunction(func=c_1, n=n),
GenericFunction(func=c_2, n=n),
GenericFunction(func=c_3, n=n)
]
eq_cons = []
return f, ineq_cons, eq_cons, x_min, x_max, x_lim
def write_x0_result(dim, x0, fx, n_evals, stop_crit):
with open(str(dim) + "_dim_x0_results.txt", "a+") as fp:
fp.write(str(x0.T[0].tolist()) + "\t" + str(fx) + "\t" + str(n_evals) + stop_crit)
fp.write("\n")
def write_dim_result(dim, fx_min, fx_median, fx_std, fx_max, n_evals_mean):
with open("results.txt", "a+") as fp:
fp.write(
str(dim) + "\t" +
str(fx_min) + "\t" +
str(fx_median) + "\t" +
str(fx_std) + "\t" +
str(fx_max) + "\t" +
str(n_evals_mean)
)
fp.write("\n")
def run_tests():
for dim in range(11, 16):
fx = []
n_evals = []
f, ineq_cons, eq_cons, x_min, x_max, x_lim = get_bm_1_problem(dim)
initial_points = generate_points(x_min, x_max, dim, n=30)
for p in range(len(initial_points)):
x0 = initial_points[p].reshape(-1, 1)
results = generic_fun(f, x0, x_lim, ineq_cons, eq_cons)
n_evals.append(results.n_function_evaluations)
fx.append(results.fx)
with open(str(dim) + "_dim_x0_results.txt", "a+") as fp:
fp.write(str(x0.T[0].tolist()) + "\t" + str(results.fx) + "\t" + str(results.n_function_evaluations))
fp.write("\n")
# print(x0.T[0].tolist(), results.fx, results.n_function_evaluations)
fx = np.array(fx)
n_evals = np.array(n_evals)
n_data = [np.min(fx), np.median(fx), np.std(fx), np.max(fx), np.mean(n_evals)]
with open("results.txt", "a+") as fp:
fp.write(
str(dim) + "\t" +
str(np.min(fx)) + "\t" +
str(np.median(fx)) + "\t" +
str(np.std(fx)) + "\t" +
str(np.max(fx)) + "\t" +
str(np.mean(n_evals))
)
fp.write("\n")
print(n_data)
if __name__ == "__main__":
run_tests()
|
science-optimization
|
/science_optimization-9.0.2-cp310-cp310-manylinux_2_35_x86_64.whl/science_optimization/examples/neldermead_article_example.py
|
neldermead_article_example.py
|
import numpy as np
from science_optimization.solvers import Optimizer
from science_optimization.builder import OptimizationProblem
from science_optimization.function import GenericFunction
from science_optimization.problems import Quadratic, GenericProblem
from science_optimization.algorithms.derivative_free import NelderMead
def generic_fun(f, x0, x_lim, ineq_cons, eq_cons):
delta_r = 1.0
delta_e = 2.0
delta_ic = 0.5
delta_oc = 0.5
delta_s = 0.5
generic = OptimizationProblem(builder=GenericProblem(f=f, eq_cons=eq_cons, ineq_cons=ineq_cons, x_bounds=x_lim))
optimizer = Optimizer(
opt_problem=generic,
algorithm=NelderMead(x0, delta_r, delta_e, delta_ic, delta_oc, delta_s)
)
optimizer.algorithm.n_max = 2000
results = optimizer.optimize(debug=False)
# results.info()
return results
def generate_points(x_min, x_max, dim, n=30):
points = []
for i in range(n):
p = x_min + np.random.random_sample((dim, 1)) * (x_max - x_min)
points.append(p)
return points
def get_bm_1_problem(n):
def obj_func(x):
a = [10 for i in range(n)]
b = [100 for i in range(n)]
s = 0
for i in range(n):
s += a[i] * np.abs(x[i][0] / b[i])
return s
def c_1(x):
c = 4
s = 0
for i in range(n):
s += np.power(x[i][0], 3)
s -= c
return s
def c_2(x):
d = 1 / np.pi
s = 0
for i in range(n):
s += np.power(-1 + x[i][0], 2)
s -= d
return s
def c_3(x):
d = 3
m = 100
s = 0
for i in range(n):
s += x[i][0]
s = d - m * np.sqrt(s)
return s
x_min = np.full((n, 1), -10) # lower
x_max = np.full((n, 1), 10) # upper
x_lim = np.hstack((x_min, x_max))
f = [GenericFunction(func=obj_func, n=n)]
ineq_cons = [
GenericFunction(func=c_1, n=n),
GenericFunction(func=c_2, n=n),
GenericFunction(func=c_3, n=n)
]
eq_cons = []
return f, ineq_cons, eq_cons, x_min, x_max, x_lim
def write_x0_result(dim, x0, fx, n_evals, stop_crit):
with open(str(dim) + "_dim_x0_results.txt", "a+") as fp:
fp.write(str(x0.T[0].tolist()) + "\t" + str(fx) + "\t" + str(n_evals) + stop_crit)
fp.write("\n")
def write_dim_result(dim, fx_min, fx_median, fx_std, fx_max, n_evals_mean):
with open("results.txt", "a+") as fp:
fp.write(
str(dim) + "\t" +
str(fx_min) + "\t" +
str(fx_median) + "\t" +
str(fx_std) + "\t" +
str(fx_max) + "\t" +
str(n_evals_mean)
)
fp.write("\n")
def run_tests():
for dim in range(11, 16):
fx = []
n_evals = []
f, ineq_cons, eq_cons, x_min, x_max, x_lim = get_bm_1_problem(dim)
initial_points = generate_points(x_min, x_max, dim, n=30)
for p in range(len(initial_points)):
x0 = initial_points[p].reshape(-1, 1)
results = generic_fun(f, x0, x_lim, ineq_cons, eq_cons)
n_evals.append(results.n_function_evaluations)
fx.append(results.fx)
with open(str(dim) + "_dim_x0_results.txt", "a+") as fp:
fp.write(str(x0.T[0].tolist()) + "\t" + str(results.fx) + "\t" + str(results.n_function_evaluations))
fp.write("\n")
# print(x0.T[0].tolist(), results.fx, results.n_function_evaluations)
fx = np.array(fx)
n_evals = np.array(n_evals)
n_data = [np.min(fx), np.median(fx), np.std(fx), np.max(fx), np.mean(n_evals)]
with open("results.txt", "a+") as fp:
fp.write(
str(dim) + "\t" +
str(np.min(fx)) + "\t" +
str(np.median(fx)) + "\t" +
str(np.std(fx)) + "\t" +
str(np.max(fx)) + "\t" +
str(np.mean(n_evals))
)
fp.write("\n")
print(n_data)
if __name__ == "__main__":
run_tests()
| 0.486819 | 0.462473 |
import numpy as np
from science_optimization.builder import OptimizationProblem
from science_optimization.function import QuadraticFunction
from science_optimization.function import GenericFunction
from science_optimization.solvers.pareto_samplers import NonDominatedSampler, EpsilonSampler, LambdaSampler, MuSampler
from science_optimization.problems import GenericProblem
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
def pareto_sampling_cs1(s):
"""Multiobjective problem example.
Args:
s: nondominated_sampler.
"""
# objective function 1
def f_obj1(x): return np.max(np.abs(x * (.5 + 1e-2) - .5 * np.sin(x) * np.cos(x)), axis=0)
# parameters objective function 2
Q = np.array([[10, 9], [9, 10]])
c = np.array([[-90], [-100]])
d = np.array([250])
# objectives
f1 = GenericFunction(func=f_obj1, n=2)
f2 = QuadraticFunction(Q=Q, c=c, d=d)
f = [f1, f2]
# constraints
ineq_cons = []
eq_cons = []
# bounds
x_min = np.array([-5, -5]).reshape(-1, 1) # lower
x_max = np.array([10, 10]).reshape(-1, 1) # upper
x_lim = np.hstack((x_min, x_max))
# build generic problem instance
generic = OptimizationProblem(builder=GenericProblem(f=f, eq_cons=eq_cons, ineq_cons=ineq_cons, x_bounds=x_lim))
# builder sampler
if s == 0:
sampler = EpsilonSampler(optimization_problem=generic, n_samples=13)
elif s == 1:
sampler = NonDominatedSampler(optimization_problem=generic, n_samples=13)
elif s == 2:
sampler = MuSampler(optimization_problem=generic, n_samples=13)
else:
sampler = LambdaSampler(optimization_problem=generic, n_samples=13)
results = sampler.sample()
# contour
delta = 0.02
x = np.arange(-5, 10, delta)
y = np.arange(-5, 10, delta)
X, Y = np.meshgrid(x, y)
XY = np.vstack((X.reshape(1, -1), Y.reshape(1, -1)))
f1eval = np.reshape(f_obj1(XY), X.shape)
f2eval = np.reshape(f2.eval(XY), X.shape)
# contour plot of individual functions
fig, ax = plt.subplots()
ax.contour(X, Y, f1eval, 17, colors='k', linewidths=.8)
ax.xaxis.set_major_locator(ticker.MultipleLocator(5))
ax.yaxis.set_major_locator(ticker.MultipleLocator(5))
ax.set_xlabel(r'$x_1$')
ax.set_ylabel(r'$x_2$')
# contour plot of individual functions
fig, ax = plt.subplots()
ax.contour(X, Y, f2eval, 17, colors='k', linewidths=.8)
ax.xaxis.set_major_locator(ticker.MultipleLocator(5))
ax.yaxis.set_major_locator(ticker.MultipleLocator(5))
ax.set_xlabel(r'$x_1$')
ax.set_ylabel(r'$x_2$')
# contour plot of functions and solution
fig, ax = plt.subplots()
ax.contour(X, Y, f1eval, 17, colors='k', linewidths=.8)
ax.contour(X, Y, f2eval, 17, colors='r', linewidths=.8)
plt.scatter(results.x[0, :], results.x[1, :], s=8)
ax.xaxis.set_major_locator(ticker.MultipleLocator(5))
ax.yaxis.set_major_locator(ticker.MultipleLocator(5))
ax.set_xlabel(r'$x_1$')
ax.set_ylabel(r'$x_2$')
# pareto front plot
plt.figure()
plt.scatter(results.fx[0, :], results.fx[1, :], s=8)
plt.xlabel(r'$f_1$')
plt.ylabel(r'$f_2$')
plt.show()
if __name__ == "__main__":
# run example
s = 1
pareto_sampling_cs1(s)
|
science-optimization
|
/science_optimization-9.0.2-cp310-cp310-manylinux_2_35_x86_64.whl/science_optimization/examples/pareto_sampling_cs1.py
|
pareto_sampling_cs1.py
|
import numpy as np
from science_optimization.builder import OptimizationProblem
from science_optimization.function import QuadraticFunction
from science_optimization.function import GenericFunction
from science_optimization.solvers.pareto_samplers import NonDominatedSampler, EpsilonSampler, LambdaSampler, MuSampler
from science_optimization.problems import GenericProblem
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
def pareto_sampling_cs1(s):
"""Multiobjective problem example.
Args:
s: nondominated_sampler.
"""
# objective function 1
def f_obj1(x): return np.max(np.abs(x * (.5 + 1e-2) - .5 * np.sin(x) * np.cos(x)), axis=0)
# parameters objective function 2
Q = np.array([[10, 9], [9, 10]])
c = np.array([[-90], [-100]])
d = np.array([250])
# objectives
f1 = GenericFunction(func=f_obj1, n=2)
f2 = QuadraticFunction(Q=Q, c=c, d=d)
f = [f1, f2]
# constraints
ineq_cons = []
eq_cons = []
# bounds
x_min = np.array([-5, -5]).reshape(-1, 1) # lower
x_max = np.array([10, 10]).reshape(-1, 1) # upper
x_lim = np.hstack((x_min, x_max))
# build generic problem instance
generic = OptimizationProblem(builder=GenericProblem(f=f, eq_cons=eq_cons, ineq_cons=ineq_cons, x_bounds=x_lim))
# builder sampler
if s == 0:
sampler = EpsilonSampler(optimization_problem=generic, n_samples=13)
elif s == 1:
sampler = NonDominatedSampler(optimization_problem=generic, n_samples=13)
elif s == 2:
sampler = MuSampler(optimization_problem=generic, n_samples=13)
else:
sampler = LambdaSampler(optimization_problem=generic, n_samples=13)
results = sampler.sample()
# contour
delta = 0.02
x = np.arange(-5, 10, delta)
y = np.arange(-5, 10, delta)
X, Y = np.meshgrid(x, y)
XY = np.vstack((X.reshape(1, -1), Y.reshape(1, -1)))
f1eval = np.reshape(f_obj1(XY), X.shape)
f2eval = np.reshape(f2.eval(XY), X.shape)
# contour plot of individual functions
fig, ax = plt.subplots()
ax.contour(X, Y, f1eval, 17, colors='k', linewidths=.8)
ax.xaxis.set_major_locator(ticker.MultipleLocator(5))
ax.yaxis.set_major_locator(ticker.MultipleLocator(5))
ax.set_xlabel(r'$x_1$')
ax.set_ylabel(r'$x_2$')
# contour plot of individual functions
fig, ax = plt.subplots()
ax.contour(X, Y, f2eval, 17, colors='k', linewidths=.8)
ax.xaxis.set_major_locator(ticker.MultipleLocator(5))
ax.yaxis.set_major_locator(ticker.MultipleLocator(5))
ax.set_xlabel(r'$x_1$')
ax.set_ylabel(r'$x_2$')
# contour plot of functions and solution
fig, ax = plt.subplots()
ax.contour(X, Y, f1eval, 17, colors='k', linewidths=.8)
ax.contour(X, Y, f2eval, 17, colors='r', linewidths=.8)
plt.scatter(results.x[0, :], results.x[1, :], s=8)
ax.xaxis.set_major_locator(ticker.MultipleLocator(5))
ax.yaxis.set_major_locator(ticker.MultipleLocator(5))
ax.set_xlabel(r'$x_1$')
ax.set_ylabel(r'$x_2$')
# pareto front plot
plt.figure()
plt.scatter(results.fx[0, :], results.fx[1, :], s=8)
plt.xlabel(r'$f_1$')
plt.ylabel(r'$f_2$')
plt.show()
if __name__ == "__main__":
# run example
s = 1
pareto_sampling_cs1(s)
| 0.832169 | 0.574335 |
import numpy as np
from science_optimization.builder import OptimizationProblem
from science_optimization.function import QuadraticFunction
from science_optimization.solvers import Optimizer
from science_optimization.problems import GenericProblem
from science_optimization.algorithms.cutting_plane import EllipsoidMethod
def multiobjective_example():
"""Multiobjective problem example.
"""
# objective functions
xf = np.array([1, 1, 1]).reshape(-1, 1)
Af = 2 * np.identity(3)
bf = -np.matmul(Af, xf)
cf = .5 * np.matmul(np.transpose(xf), np.matmul(Af, xf))
xf2 = np.array([-1, -1, -1]).reshape(-1, 1)
Af2 = np.diag([1, 2, 4])
bf2 = -np.matmul(Af2, xf2)
cf2 = .5 * np.matmul(np.transpose(xf2), np.matmul(Af2, xf2))
f = [QuadraticFunction(Q=.5*Af, c=bf, d=cf), QuadraticFunction(Q=.5*Af2, c=bf2, d=cf2)]
# inequality constraints
Ag = 2 * np.identity(3)
bg = np.zeros((3, 1))
cg = -1
xg2 = np.array([1, 1, 1]).reshape(-1, 1)
Ag2 = 2 * np.identity(3)
bg2 = -np.matmul(Ag2, xg2)
cg2 = .5 * np.matmul(np.transpose(xg2), np.matmul(Ag2, xg2)) - 1
ineq_cons = [QuadraticFunction(Q=.5*Ag, c=bg, d=cg), QuadraticFunction(Q=.5*Ag2, c=bg2, d=cg2)]
# equality constraints
eq_cons = []
# bounds
x_min = np.array([-10, -10, -10]).reshape(-1, 1) # lower
x_max = np.array([10, 10, 10]).reshape(-1, 1) # upper
x_lim = np.hstack((x_min, x_max))
# build generic problem instance
generic = OptimizationProblem(builder=GenericProblem(f=f, eq_cons=eq_cons, ineq_cons=ineq_cons, x_bounds=x_lim))
# starting point
x0 = np.array([20, 20, 20]).reshape(-1, 1)
# cut option
shallow_cut = 0
# builder optimization
optimizer = Optimizer(opt_problem=generic, algorithm=EllipsoidMethod(x0=x0, shallow_cut=shallow_cut))
results = optimizer.optimize(debug=True, n_step=5)
# result
results.info()
if __name__ == "__main__":
# run example
multiobjective_example()
|
science-optimization
|
/science_optimization-9.0.2-cp310-cp310-manylinux_2_35_x86_64.whl/science_optimization/examples/multiobjective_example.py
|
multiobjective_example.py
|
import numpy as np
from science_optimization.builder import OptimizationProblem
from science_optimization.function import QuadraticFunction
from science_optimization.solvers import Optimizer
from science_optimization.problems import GenericProblem
from science_optimization.algorithms.cutting_plane import EllipsoidMethod
def multiobjective_example():
"""Multiobjective problem example.
"""
# objective functions
xf = np.array([1, 1, 1]).reshape(-1, 1)
Af = 2 * np.identity(3)
bf = -np.matmul(Af, xf)
cf = .5 * np.matmul(np.transpose(xf), np.matmul(Af, xf))
xf2 = np.array([-1, -1, -1]).reshape(-1, 1)
Af2 = np.diag([1, 2, 4])
bf2 = -np.matmul(Af2, xf2)
cf2 = .5 * np.matmul(np.transpose(xf2), np.matmul(Af2, xf2))
f = [QuadraticFunction(Q=.5*Af, c=bf, d=cf), QuadraticFunction(Q=.5*Af2, c=bf2, d=cf2)]
# inequality constraints
Ag = 2 * np.identity(3)
bg = np.zeros((3, 1))
cg = -1
xg2 = np.array([1, 1, 1]).reshape(-1, 1)
Ag2 = 2 * np.identity(3)
bg2 = -np.matmul(Ag2, xg2)
cg2 = .5 * np.matmul(np.transpose(xg2), np.matmul(Ag2, xg2)) - 1
ineq_cons = [QuadraticFunction(Q=.5*Ag, c=bg, d=cg), QuadraticFunction(Q=.5*Ag2, c=bg2, d=cg2)]
# equality constraints
eq_cons = []
# bounds
x_min = np.array([-10, -10, -10]).reshape(-1, 1) # lower
x_max = np.array([10, 10, 10]).reshape(-1, 1) # upper
x_lim = np.hstack((x_min, x_max))
# build generic problem instance
generic = OptimizationProblem(builder=GenericProblem(f=f, eq_cons=eq_cons, ineq_cons=ineq_cons, x_bounds=x_lim))
# starting point
x0 = np.array([20, 20, 20]).reshape(-1, 1)
# cut option
shallow_cut = 0
# builder optimization
optimizer = Optimizer(opt_problem=generic, algorithm=EllipsoidMethod(x0=x0, shallow_cut=shallow_cut))
results = optimizer.optimize(debug=True, n_step=5)
# result
results.info()
if __name__ == "__main__":
# run example
multiobjective_example()
| 0.770206 | 0.634656 |
import numpy as np
from science_optimization.algorithms import BaseAlgorithms
from science_optimization.builder import OptimizationProblem
from science_optimization.problems import GenericProblem
from science_optimization.function import GenericFunction, FunctionsComposite
from science_optimization.solvers import OptimizationResults
from science_optimization.algorithms.unidimensional import GoldenSection
import copy
class DualDecomposition(BaseAlgorithms):
"""Dual decomposition method.
"""
# attributes
_x0 = None
def __init__(self,
x0: np.ndarray=np.array([[]]).reshape(-1, 1),
n_max: int=None,
eps: float=None):
"""Dual decomposition method constructor.
Args:
x0 : (np.ndarray) initial point
n_max: (int) maximum number of iterations for stop criterion
eps : (float) maximum uncertainty for stop criterion
"""
# parameters
self.x0 = 1.0 * x0
if n_max is not None:
self.n_max = n_max
if eps is not None:
self.eps = eps
# getters
@property
def x0(self):
return self._x0
# setters
@x0.setter
def x0(self, x0):
if x0.shape[1] == 1:
self._x0 = x0
else:
raise ValueError("Initial point must be a column vector.")
def optimize(self, optimization_problem, debug=False, n_step=5):
"""Optimization core of Decomposition method.
Args:
optimization_problem: (OptimizationProblem) an optimization problem.
debug : (bool) debug option indicator.
n_step : (int) iterations steps to store optimization results.
Returns:
optimization_results: (OptimizationResults) optimization results.
"""
# optimization parameters
f = optimization_problem.objective.objectives
x_bounds = np.hstack((optimization_problem.variables.x_min, optimization_problem.variables.x_max))
# check whether inequality of inequality
if not optimization_problem.constraints.inequality_constraints.functions:
g = optimization_problem.constraints.equality_constraints
constraint_type = 1
else:
g = optimization_problem.constraints.inequality_constraints
constraint_type = 0
# instantiate sub-problem and its solver
sp_solver = GoldenSection(eps=self.eps, n_max=int(self.n_max / 2))
sp = OptimizationProblem(builder=GenericProblem(f=[GenericFunction(func=lambda: 1, n=1)],
eq_cons=[],
ineq_cons=[],
x_bounds=np.zeros((0, 2))))
# solve master problem evaluator
def f_master(n):
return -self.master_eval(f=f, g=g, nu=n, x_bounds=x_bounds, op=sp, solver=sp_solver)[1]
# master problem bounds (nu bounds)
if constraint_type:
# equality constraint
x_bounds_master = np.array([[-self.eps**-1, self.eps**-1]])
else:
# inequality constraint
x_bounds_master = np.array([[0, self.eps**-1]])
# optimization parameters
nu = 1.
x = self.x0
k = 0
k_max = int(self.n_max / 10)
stop = False
# master problem and solver
mp = OptimizationProblem(builder=GenericProblem(f=[GenericFunction(func=f_master, n=1)],
eq_cons=[],
ineq_cons=[],
x_bounds=x_bounds_master))
# main loop
mp_solver = GoldenSection(eps=self.eps, n_max=self.n_max)
results = OptimizationResults()
while not stop and k < k_max:
# run algorithm
output = mp_solver.optimize(optimization_problem=mp, debug=False)
# new price (nu)
nu_new = output.x
nu_diff = np.abs(nu - nu_new)
nu = copy.copy(nu_new)
# evaluate master problem
x, fx, gx = self.master_eval(f=f, g=g, nu=nu, x_bounds=x_bounds, op=sp, solver=sp_solver)
# update nu: bounds of master problem
h = 2
x_lb = nu-h*np.abs(nu) if constraint_type else np.maximum(0, nu-h*np.abs(nu))
x_bounds_master = np.array([[x_lb, nu+h*np.abs(nu)]])
# update problem bounds
mp.variables.x_min = x_bounds_master[:, 0].reshape(-1, 1)
mp.variables.x_max = x_bounds_master[:, 1].reshape(-1, 1)
# stop criteria
stop = (np.abs(gx) < self.eps and constraint_type) or (np.abs(nu) < self.eps) or \
(np.diff(x_bounds_master) < self.eps) or (nu_diff < self.eps and k > 0)
# update counter
k += 1
# output
results.x = x
results.fx = f.eval(x)
results.parameter = {'nu': nu}
results.n_iterations = k
return results
@staticmethod
def master_eval(f: FunctionsComposite,
g: FunctionsComposite,
nu: float,
x_bounds: np.ndarray,
op: OptimizationProblem,
solver: GoldenSection):
""" Evaluates master problem.
Args:
f : (FunctionsComposite) objective functions.
g : (FunctionsComposite) constraints.
nu : (float) allocation factor.
x_bounds: (np.ndarray) bounds.
op : (OptimizationProblem) optimization problem.
solver : (GoldenSection) algorithm solver
Returns:
x : (np.ndarray) sub-problems' solution.
fx_master: (np.ndarray) objective evaluation at x.
gx : (np.ndarray) constraint evaluation at x.
"""
# build and solve sub-problems
n = x_bounds.shape[0] # number of variables
x_out = np.zeros((n, 1))
# build generic problem instance
for i in range(f.n_functions):
# sub-problem
def f_i(x):
y = np.zeros((n, 1))
y[i, :] = x
return f.functions[i].eval(y) + nu * g.functions[i].eval(y)
# update problem objective
op.objective.objectives.remove()
op.objective.objectives.add(GenericFunction(func=f_i, n=1))
# update problem bounds
op.variables.x_min = x_bounds[i, 0].reshape(-1, 1)
op.variables.x_max = x_bounds[i, 1].reshape(-1, 1)
output = solver.optimize(optimization_problem=op, debug=False)
x_out[i, 0] = output.x
# master eval
gx = g.eval(x_out, composition='series')
fx_master = f.eval(x_out, composition='series') + nu * gx
return x_out, fx_master, gx
|
science-optimization
|
/science_optimization-9.0.2-cp310-cp310-manylinux_2_35_x86_64.whl/science_optimization/algorithms/decomposition/dual_decomposition.py
|
dual_decomposition.py
|
import numpy as np
from science_optimization.algorithms import BaseAlgorithms
from science_optimization.builder import OptimizationProblem
from science_optimization.problems import GenericProblem
from science_optimization.function import GenericFunction, FunctionsComposite
from science_optimization.solvers import OptimizationResults
from science_optimization.algorithms.unidimensional import GoldenSection
import copy
class DualDecomposition(BaseAlgorithms):
"""Dual decomposition method.
"""
# attributes
_x0 = None
def __init__(self,
x0: np.ndarray=np.array([[]]).reshape(-1, 1),
n_max: int=None,
eps: float=None):
"""Dual decomposition method constructor.
Args:
x0 : (np.ndarray) initial point
n_max: (int) maximum number of iterations for stop criterion
eps : (float) maximum uncertainty for stop criterion
"""
# parameters
self.x0 = 1.0 * x0
if n_max is not None:
self.n_max = n_max
if eps is not None:
self.eps = eps
# getters
@property
def x0(self):
return self._x0
# setters
@x0.setter
def x0(self, x0):
if x0.shape[1] == 1:
self._x0 = x0
else:
raise ValueError("Initial point must be a column vector.")
def optimize(self, optimization_problem, debug=False, n_step=5):
"""Optimization core of Decomposition method.
Args:
optimization_problem: (OptimizationProblem) an optimization problem.
debug : (bool) debug option indicator.
n_step : (int) iterations steps to store optimization results.
Returns:
optimization_results: (OptimizationResults) optimization results.
"""
# optimization parameters
f = optimization_problem.objective.objectives
x_bounds = np.hstack((optimization_problem.variables.x_min, optimization_problem.variables.x_max))
# check whether inequality of inequality
if not optimization_problem.constraints.inequality_constraints.functions:
g = optimization_problem.constraints.equality_constraints
constraint_type = 1
else:
g = optimization_problem.constraints.inequality_constraints
constraint_type = 0
# instantiate sub-problem and its solver
sp_solver = GoldenSection(eps=self.eps, n_max=int(self.n_max / 2))
sp = OptimizationProblem(builder=GenericProblem(f=[GenericFunction(func=lambda: 1, n=1)],
eq_cons=[],
ineq_cons=[],
x_bounds=np.zeros((0, 2))))
# solve master problem evaluator
def f_master(n):
return -self.master_eval(f=f, g=g, nu=n, x_bounds=x_bounds, op=sp, solver=sp_solver)[1]
# master problem bounds (nu bounds)
if constraint_type:
# equality constraint
x_bounds_master = np.array([[-self.eps**-1, self.eps**-1]])
else:
# inequality constraint
x_bounds_master = np.array([[0, self.eps**-1]])
# optimization parameters
nu = 1.
x = self.x0
k = 0
k_max = int(self.n_max / 10)
stop = False
# master problem and solver
mp = OptimizationProblem(builder=GenericProblem(f=[GenericFunction(func=f_master, n=1)],
eq_cons=[],
ineq_cons=[],
x_bounds=x_bounds_master))
# main loop
mp_solver = GoldenSection(eps=self.eps, n_max=self.n_max)
results = OptimizationResults()
while not stop and k < k_max:
# run algorithm
output = mp_solver.optimize(optimization_problem=mp, debug=False)
# new price (nu)
nu_new = output.x
nu_diff = np.abs(nu - nu_new)
nu = copy.copy(nu_new)
# evaluate master problem
x, fx, gx = self.master_eval(f=f, g=g, nu=nu, x_bounds=x_bounds, op=sp, solver=sp_solver)
# update nu: bounds of master problem
h = 2
x_lb = nu-h*np.abs(nu) if constraint_type else np.maximum(0, nu-h*np.abs(nu))
x_bounds_master = np.array([[x_lb, nu+h*np.abs(nu)]])
# update problem bounds
mp.variables.x_min = x_bounds_master[:, 0].reshape(-1, 1)
mp.variables.x_max = x_bounds_master[:, 1].reshape(-1, 1)
# stop criteria
stop = (np.abs(gx) < self.eps and constraint_type) or (np.abs(nu) < self.eps) or \
(np.diff(x_bounds_master) < self.eps) or (nu_diff < self.eps and k > 0)
# update counter
k += 1
# output
results.x = x
results.fx = f.eval(x)
results.parameter = {'nu': nu}
results.n_iterations = k
return results
@staticmethod
def master_eval(f: FunctionsComposite,
g: FunctionsComposite,
nu: float,
x_bounds: np.ndarray,
op: OptimizationProblem,
solver: GoldenSection):
""" Evaluates master problem.
Args:
f : (FunctionsComposite) objective functions.
g : (FunctionsComposite) constraints.
nu : (float) allocation factor.
x_bounds: (np.ndarray) bounds.
op : (OptimizationProblem) optimization problem.
solver : (GoldenSection) algorithm solver
Returns:
x : (np.ndarray) sub-problems' solution.
fx_master: (np.ndarray) objective evaluation at x.
gx : (np.ndarray) constraint evaluation at x.
"""
# build and solve sub-problems
n = x_bounds.shape[0] # number of variables
x_out = np.zeros((n, 1))
# build generic problem instance
for i in range(f.n_functions):
# sub-problem
def f_i(x):
y = np.zeros((n, 1))
y[i, :] = x
return f.functions[i].eval(y) + nu * g.functions[i].eval(y)
# update problem objective
op.objective.objectives.remove()
op.objective.objectives.add(GenericFunction(func=f_i, n=1))
# update problem bounds
op.variables.x_min = x_bounds[i, 0].reshape(-1, 1)
op.variables.x_max = x_bounds[i, 1].reshape(-1, 1)
output = solver.optimize(optimization_problem=op, debug=False)
x_out[i, 0] = output.x
# master eval
gx = g.eval(x_out, composition='series')
fx_master = f.eval(x_out, composition='series') + nu * gx
return x_out, fx_master, gx
| 0.889042 | 0.45641 |
import numpy as np
from science_optimization.algorithms.derivative_free import NelderMead
from science_optimization.algorithms import BaseAlgorithms
from science_optimization.algorithms.search_direction import QuasiNewton, GradientAlgorithm, NewtonAlgorithm
from science_optimization.builder import OptimizationProblem
from science_optimization.function.lagrange_function import AugmentedLagrangeFunction
from science_optimization.problems import GenericProblem
from science_optimization.solvers import OptimizationResults
from typing import Tuple, Any
class AugmentedLagrangian(BaseAlgorithms):
"""
Augmented Lagrangian algorithm
"""
def __init__(self,
x0: np.ndarray,
n_max: int = None,
eps: float = None,
randx: bool = False,
algorithm: Any = None,
c: float = 1.1):
"""Algorithm constructor.
Args:
x0 : (np.ndarray) initial point
n_max: (int) maximum number of iterations for stop criterion
eps : (float) maximum uncertainty for stop criterion
randx: (bool) True to use a different initial point in each Lagrangian iteration
alg_choose: (int) chooses the method to solve the unconstrained problem
(0 -> Quasi Newton (BFGS) / 1 -> Gradient method / 2 -> Newton method / 3 -> Nelder Mead)
c: (float) parameter used to update the rho value
"""
# parameters
self.x0 = x0
if n_max is not None:
self.n_max = n_max
if eps is not None:
self.eps = eps
self.randx = randx
if algorithm is not None:
self.algorithm = algorithm
else:
self.algorithm = QuasiNewton(x0=x0)
if c <= 1:
raise Exception('Invalid value, must be greater than one')
self.c = c
# getters
@property
def algorithm(self):
return self._algorithm
@algorithm.setter
def algorithm(self, algorithm):
# verify instances
if issubclass(type(algorithm), QuasiNewton) or issubclass(type(algorithm), GradientAlgorithm) \
or issubclass(type(algorithm), NewtonAlgorithm) or issubclass(type(algorithm), NelderMead):
self._algorithm = algorithm
else:
raise Warning("Invalid algorithm, must solve constrained problems")
def optimize(self, optimization_problem, debug=False, n_step=5):
"""Optimization core of Augmented Lagrangian method
Args:
optimization_problem: (OptimizationProblem) an optimization problem.
debug : (bool) debug option indicator.
n_step : (int) iterations steps to store optimization results.
Returns:
optimization_results: (OptimizationResults) optimization results.
"""
optimization_results = OptimizationResults()
optimization_results.message = 'Stop by maximum number of iterations.'
f_obj = optimization_problem.objective.objectives
x_bounds = np.hstack((optimization_problem.variables.x_min, optimization_problem.variables.x_max))
n = len(optimization_problem.variables.x_type)
h = optimization_problem.constraints.equality_constraints
g = optimization_problem.constraints.inequality_constraints
x0 = self.x0
la_function = AugmentedLagrangeFunction(f_obj=f_obj, g=g, h=h, rho=1, c=self.c)
# only parameter that changes through the iterations is f
op_generic = OptimizationProblem(builder=GenericProblem(f=[la_function],
eq_cons=[], ineq_cons=[], x_bounds=x_bounds))
stop_criteria = False
k = 0
prev_x = x0
x_hist = np.array(x0)
f_hist = [f_obj.eval(x0)]
while k < self.n_max and not stop_criteria:
self.algorithm.x0 = x0
results = self.algorithm.optimize(optimization_problem=op_generic, debug=False)
x_new = results.x
if debug:
x_hist = np.hstack((x_hist, x_new))
f_hist.append(results.fx)
# update Lagrange multipliers
la_function.update_multipliers(x_new)
k += 1
if np.linalg.norm(x_new - prev_x) < self.eps:
optimization_results.message = 'Stop by unchanged x value.'
stop_criteria = True
prev_x = x_new
if self.randx:
x0 = np.random.uniform(x_bounds[:, 0], x_bounds[:, 1], (1, n)).transpose()
else:
x0 = x_new
if debug:
optimization_results.x = x_hist
optimization_results.fx = np.array(f_hist)
else:
optimization_results.x = prev_x
optimization_results.fx = f_obj.eval(prev_x)
optimization_results.n_iterations = k
optimization_results.parameter = {'lambda': la_function.lag_eq, 'mu': la_function.lag_ineq}
return optimization_results
|
science-optimization
|
/science_optimization-9.0.2-cp310-cp310-manylinux_2_35_x86_64.whl/science_optimization/algorithms/lagrange/augmented_lagrangian.py
|
augmented_lagrangian.py
|
import numpy as np
from science_optimization.algorithms.derivative_free import NelderMead
from science_optimization.algorithms import BaseAlgorithms
from science_optimization.algorithms.search_direction import QuasiNewton, GradientAlgorithm, NewtonAlgorithm
from science_optimization.builder import OptimizationProblem
from science_optimization.function.lagrange_function import AugmentedLagrangeFunction
from science_optimization.problems import GenericProblem
from science_optimization.solvers import OptimizationResults
from typing import Tuple, Any
class AugmentedLagrangian(BaseAlgorithms):
"""
Augmented Lagrangian algorithm
"""
def __init__(self,
x0: np.ndarray,
n_max: int = None,
eps: float = None,
randx: bool = False,
algorithm: Any = None,
c: float = 1.1):
"""Algorithm constructor.
Args:
x0 : (np.ndarray) initial point
n_max: (int) maximum number of iterations for stop criterion
eps : (float) maximum uncertainty for stop criterion
randx: (bool) True to use a different initial point in each Lagrangian iteration
alg_choose: (int) chooses the method to solve the unconstrained problem
(0 -> Quasi Newton (BFGS) / 1 -> Gradient method / 2 -> Newton method / 3 -> Nelder Mead)
c: (float) parameter used to update the rho value
"""
# parameters
self.x0 = x0
if n_max is not None:
self.n_max = n_max
if eps is not None:
self.eps = eps
self.randx = randx
if algorithm is not None:
self.algorithm = algorithm
else:
self.algorithm = QuasiNewton(x0=x0)
if c <= 1:
raise Exception('Invalid value, must be greater than one')
self.c = c
# getters
@property
def algorithm(self):
return self._algorithm
@algorithm.setter
def algorithm(self, algorithm):
# verify instances
if issubclass(type(algorithm), QuasiNewton) or issubclass(type(algorithm), GradientAlgorithm) \
or issubclass(type(algorithm), NewtonAlgorithm) or issubclass(type(algorithm), NelderMead):
self._algorithm = algorithm
else:
raise Warning("Invalid algorithm, must solve constrained problems")
def optimize(self, optimization_problem, debug=False, n_step=5):
"""Optimization core of Augmented Lagrangian method
Args:
optimization_problem: (OptimizationProblem) an optimization problem.
debug : (bool) debug option indicator.
n_step : (int) iterations steps to store optimization results.
Returns:
optimization_results: (OptimizationResults) optimization results.
"""
optimization_results = OptimizationResults()
optimization_results.message = 'Stop by maximum number of iterations.'
f_obj = optimization_problem.objective.objectives
x_bounds = np.hstack((optimization_problem.variables.x_min, optimization_problem.variables.x_max))
n = len(optimization_problem.variables.x_type)
h = optimization_problem.constraints.equality_constraints
g = optimization_problem.constraints.inequality_constraints
x0 = self.x0
la_function = AugmentedLagrangeFunction(f_obj=f_obj, g=g, h=h, rho=1, c=self.c)
# only parameter that changes through the iterations is f
op_generic = OptimizationProblem(builder=GenericProblem(f=[la_function],
eq_cons=[], ineq_cons=[], x_bounds=x_bounds))
stop_criteria = False
k = 0
prev_x = x0
x_hist = np.array(x0)
f_hist = [f_obj.eval(x0)]
while k < self.n_max and not stop_criteria:
self.algorithm.x0 = x0
results = self.algorithm.optimize(optimization_problem=op_generic, debug=False)
x_new = results.x
if debug:
x_hist = np.hstack((x_hist, x_new))
f_hist.append(results.fx)
# update Lagrange multipliers
la_function.update_multipliers(x_new)
k += 1
if np.linalg.norm(x_new - prev_x) < self.eps:
optimization_results.message = 'Stop by unchanged x value.'
stop_criteria = True
prev_x = x_new
if self.randx:
x0 = np.random.uniform(x_bounds[:, 0], x_bounds[:, 1], (1, n)).transpose()
else:
x0 = x_new
if debug:
optimization_results.x = x_hist
optimization_results.fx = np.array(f_hist)
else:
optimization_results.x = prev_x
optimization_results.fx = f_obj.eval(prev_x)
optimization_results.n_iterations = k
optimization_results.parameter = {'lambda': la_function.lag_eq, 'mu': la_function.lag_ineq}
return optimization_results
| 0.910468 | 0.551151 |
import copy
import numpy as np
from science_optimization.algorithms.utils import box_constraints
from science_optimization.solvers import OptimizationResults
from science_optimization.builder import OptimizationProblem
from science_optimization.function import BaseFunction
from science_optimization.algorithms import BaseAlgorithms
class NelderMead(BaseAlgorithms):
"""
Nelder-Mead simplex algorithm to minimize derivative-free non-linear functions.
"""
# starting point
_x0 = None
_x_min = None
_x_max = None
_x_bounds = None
_x_min_norm = None
_x_max_norm = None
# problem dimensio
_dim = None
# function
_f = None
# constraint
_g = None
# function values
_fx = None
_gx = None
# algorithm constants
_delta_r = None
_delta_e = None
_delta_ic = None
_delta_oc = None
_delta_s = None
# simplex point lists
_simplex = None
def __init__(self, x0, delta_r=1.0, delta_e=2.0, delta_ic=0.5, delta_oc=0.5, delta_s=0.5):
"""
Args:
x0:
delta_r:
delta_e:
delta_ic:
delta_oc:
delta_s:
"""
self.x0 = x0
self.dim = x0.shape[0]
self.x_min_norm = np.zeros((self.dim, 1))
self.x_max_norm = np.full((self.dim, 1), 100)
self.delta_r = delta_r
self.delta_e = delta_e
self.delta_ic = delta_ic
self.delta_oc = delta_oc
self.delta_s = delta_s
self.simplex = []
self.fx = None
self.gx = None
self.x_min = None
self.x_max = None
self.x_bounds = None
@property
def x0(self):
return self._x0
@property
def x_min(self):
return self._x_min
@property
def x_max(self):
return self._x_max
@property
def x_bounds(self):
return self._x_bounds
@property
def x_min_norm(self):
return self._x_min_norm
@property
def x_max_norm(self):
return self._x_max_norm
@property
def dim(self):
return self._dim
@property
def f(self):
return self._f
@property
def g(self):
return self._g
@property
def fx(self):
return self._fx
@property
def gx(self):
return self._gx
@property
def delta_r(self):
return self._delta_r
@property
def delta_e(self):
return self._delta_e
@property
def delta_ic(self):
return self._delta_ic
@property
def delta_oc(self):
return self._delta_oc
@property
def delta_s(self):
return self._delta_s
@property
def simplex(self):
return self._simplex
@x0.setter
def x0(self, value):
self._x0 = value
@x_min.setter
def x_min(self, value):
self._x_min = value
@x_max.setter
def x_max(self, value):
self._x_max = value
@x_bounds.setter
def x_bounds(self, value):
self._x_bounds = value
@x_min_norm.setter
def x_min_norm(self, value):
self._x_min_norm = value
@x_max_norm.setter
def x_max_norm(self, value):
self._x_max_norm = value
@dim.setter
def dim(self, value):
self._dim = value
@f.setter
def f(self, value):
if not isinstance(value, BaseFunction):
raise Exception("The function must be an instance of BaseFunction!")
self._f = value
@g.setter
def g(self, value):
if not isinstance(value, BaseFunction):
raise Exception("The function must be an instance of BaseFunction!")
self._g = value
@fx.setter
def fx(self, value):
self._fx = value
@gx.setter
def gx(self, value):
self._gx = value
@delta_r.setter
def delta_r(self, value):
self._delta_r = value
@delta_e.setter
def delta_e(self, value):
self._delta_e = value
@delta_ic.setter
def delta_ic(self, value):
self._delta_ic = value
@delta_oc.setter
def delta_oc(self, value):
self._delta_oc = value
@delta_s.setter
def delta_s(self, value):
self._delta_s = value
@simplex.setter
def simplex(self, value):
self._simplex = value
def initialize_fminsearch(self):
"""
Args:
dim:
Returns:
"""
simplex = [self.x0]
for i in range(self.dim):
e_i = np.eye(1, self.dim, i).reshape(self.dim, 1)
h_i = 0.05 if self.x0[i][0] != 0 else 0.00025
simplex.append(box_constraints(self.x0 + h_i * e_i, self.x_bounds))
self.simplex = simplex
def initialize_simplex_size(self, size):
"""
Args:
size:
Returns:
"""
dim = self.dim
simplex = [self.x0]
p = size / (dim * np.sqrt(2))
p = p * ((np.sqrt(dim+1)) + dim - 1)
q = size / (dim * np.sqrt(2))
q = q * ((np.sqrt(dim + 1)) - 1)
e = np.identity(dim)
for i in range(1, dim+1):
point_sum = np.zeros((dim, 1))
p_sign = 1
e[i - 1][i - 1] = 0
for j in range(dim):
if self.x0[j][0] > (self.x_min_norm[j][0] + self.x_max_norm[j][0]) / 2:
point_sum += -1 * q * e[:, j].reshape(dim, 1)
else:
point_sum += q * e[:, j].reshape(dim, 1)
e[i - 1][i - 1] = 1
if self.x0[i - 1][0] > (self.x_min_norm[i - 1][0] + self.x_min_norm[i - 1][0]) / 2:
p_sign = -1
new_point = self.x0 + p_sign * p * e[i - 1].reshape(dim, 1) + point_sum
simplex.append(new_point)
self.simplex = simplex
def centroid(self, xw_index):
"""
Args:
xw_index:
Returns:
"""
simplex = copy.deepcopy(self.simplex)
del(simplex[xw_index])
return np.mean(simplex, axis=0)
def reflect(self, x_centroid, xw_index):
"""
Args:
x_centroid:
Returns:
"""
return x_centroid + self.delta_r * (x_centroid - self.simplex[xw_index])
def expand(self, x_centroid, x_reflect):
"""
Args:
x_centroid:
x_reflect:
Returns:
"""
return x_centroid + self.delta_e * (x_reflect - x_centroid)
def inside_contraction(self, x_centroid, x_reflect):
"""
Args:
x_centroid:
x_reflect:
Returns:
"""
return x_centroid - self.delta_ic * (x_reflect - x_centroid)
def outside_contraction(self, x_centroid, x_reflect):
"""
Args:
x_centroid:
x_reflect:
Returns:
"""
return x_centroid + self.delta_oc * (x_reflect - x_centroid)
def shrink(self, x_best):
"""
Args:
x_best:
Returns:
"""
for j in range(1, len(self.simplex)):
x_new = x_best + self.delta_s * (self.simplex[j] - x_best)
fx_new, gx_new = self.eval_fg(self.norm2real(x_new))
self.replace_point(idx=j, x=x_new, fx=fx_new, gx=gx_new)
def box_feasible(self, x):
"""
Args:
x:
Returns:
"""
return not(any(np.less(x, self.x_min_norm)) or any(np.greater(x, self.x_max_norm)))
@staticmethod
def is_less_than(fx_1, gx_1, fx_2, gx_2):
"""
Args:
fx_1:
gx_1:
fx_2:
gx_2:
Returns:
"""
if gx_1 > 0 and gx_2 > 0:
return gx_1 < gx_2
elif gx_1 <= 0 and gx_2 <= 0:
return fx_1 < fx_2
else:
return gx_1 <= 0
def norm2real(self, x_norm):
"""
Args:
x_norm:
Returns:
"""
x = 0.01 * x_norm
x = (self.x_max - self.x_min) * x
x = x + self.x_min
return x
def real2norm(self, x):
"""
Args:
x:
Returns:
"""
x_norm = (x - self.x_min) / (self.x_max - self.x_min)
x_norm = x_norm * 100
return x_norm
def constraint_sum(self, x):
"""
Args:
x:
Returns:
"""
if self.g is not None:
gx_eval = self.g.eval(x)
return np.sum(gx_eval[np.where(gx_eval > self.eps)])
else:
return 0
def eval_fg(self, x):
"""
Args:
x:
Returns:
"""
fx = self.f.eval(x)
gx = self.constraint_sum(x=x)
return fx, gx
def replace_point(self, idx, x, fx, gx):
"""
Args:
idx:
x:
fx:
gx:
Returns:
"""
self.simplex[idx] = x
self.fx[idx] = fx
self.gx[idx] = gx
def min(self, x, y):
"""
Args:
x:
y:
Returns:
"""
x_real = self.norm2real(x)
y_real = self.norm2real(y)
fx, gx = self.eval_fg(x_real)
fy, gy = self.eval_fg(y_real)
if self.is_less_than(fx, gx, fy, gy):
return x
return y
def sort_simplex(self):
"""
Returns:
"""
index = [x for x in range(len(self.fx))]
gx_fx_idx = [(x, y, z) for x, y, z in zip(self.gx, self.fx, index)]
result = [t[2] for t in sorted(gx_fx_idx)]
return result
def optimize(self, optimization_problem, debug=False, n_step=10):
"""
Args:
optimization_problem:
debug:
n_step:
Returns:
"""
if not isinstance(optimization_problem, OptimizationProblem):
raise Exception("Optimize must have and OptimizationProblem instance as argument!")
if optimization_problem.objective.objectives.n_functions != 1:
raise Exception("Method able to optimize only one function.")
optimization_results = OptimizationResults()
optimization_results.message = 'Stop by maximum number of iterations.'
self.f = optimization_problem.objective.objectives.functions[0]
if optimization_problem.has_inequality_constraints():
self.g = optimization_problem.constraints.inequality_constraints
self.x_min = optimization_problem.variables.x_min
self.x_max = optimization_problem.variables.x_max
self.x_bounds = np.hstack((optimization_problem.variables.x_min, optimization_problem.variables.x_max))
self.x0 = box_constraints(self.x0, self.x_bounds)
self.x0 = self.real2norm(self.x0)
self.initialize_simplex_size(size=10)
self.fx = np.array([self.f.eval(self.norm2real(x)) for x in self.simplex])
optimization_results.n_function_evaluations += len(self.simplex)
if self.g is not None:
gx = []
for x in self.simplex:
gx.append(self.constraint_sum(x=self.norm2real(x)))
self.gx = np.array(gx)
else:
self.gx = np.zeros(len(self.simplex))
index = self.sort_simplex()
b = index[0]
s = index[-2]
w = index[-1]
stop = False
while optimization_results.n_iterations < self.n_max and not stop:
x_c = self.centroid(xw_index=w)
x_r = self.reflect(x_c, w)
x_b = self.simplex[b]
x_s = self.simplex[s]
x_w = self.simplex[w]
fx_b, gx_b = self.eval_fg(self.norm2real(x_b))
fx_s, gx_s = self.eval_fg(self.norm2real(x_s))
fx_w, gx_w = self.eval_fg(self.norm2real(x_w))
optimization_results.n_function_evaluations += 3
if self.box_feasible(x_r):
fx_r, gx_r = self.eval_fg(self.norm2real(x_r))
optimization_results.n_function_evaluations += 1
if self.is_less_than(fx_r, gx_r, fx_b, gx_b):
x_e = self.expand(x_centroid=x_c, x_reflect=x_r)
use_reflection = True
if self.box_feasible(x_e):
fx_e, gx_e = self.eval_fg(self.norm2real(x_e))
optimization_results.n_function_evaluations += 1
if self.is_less_than(fx_e, gx_e, fx_r, gx_r):
self.replace_point(idx=w, x=x_e, fx=fx_e, gx=gx_e)
use_reflection = False
if debug:
print("expansion")
if use_reflection:
self.replace_point(idx=w, x=x_r, fx=fx_r, gx=gx_r)
if debug:
print("reflection e")
elif self.is_less_than(fx_r, gx_r, fx_s, gx_s):
self.replace_point(idx=w, x=x_r, fx=fx_r, gx=gx_r)
if debug:
print("reflection r")
elif self.is_less_than(fx_r, gx_r, fx_w, gx_w):
x_oc = self.outside_contraction(x_centroid=x_c, x_reflect=x_r)
use_reflection = True
if self.box_feasible(x_oc):
fx_oc, gx_oc = self.eval_fg(self.norm2real(x_oc))
optimization_results.n_function_evaluations += 1
if self.is_less_than(fx_oc, gx_oc, fx_r, gx_r):
self.replace_point(idx=w, x=x_oc, fx=fx_oc, gx=gx_oc)
use_reflection = False
if debug:
print("outside contract")
if use_reflection:
self.replace_point(idx=w, x=x_r, fx=fx_r, gx=gx_r)
if debug:
print("reflection oc")
else:
x_ic = self.inside_contraction(x_centroid=x_c, x_reflect=x_r)
use_shrink = True
if self.box_feasible(x_ic):
fx_ic, gx_ic = self.eval_fg(self.norm2real(x_ic))
optimization_results.n_function_evaluations += 1
if self.is_less_than(fx_ic, gx_ic, fx_r, gx_r):
self.replace_point(idx=w, x=x_ic, fx=fx_ic, gx=gx_ic)
use_shrink = False
if debug:
print("inside contract")
if use_shrink:
self.shrink(x_best=x_b)
optimization_results.n_function_evaluations += self.dim
if debug:
print("shrink")
else:
x_oc = self.outside_contraction(x_centroid=x_c, x_reflect=x_r)
x_ic = self.inside_contraction(x_centroid=x_c, x_reflect=x_r)
fx_ic, gx_ic = self.eval_fg(self.norm2real(x_ic))
if debug:
print("xr infeasible")
if self.box_feasible(x_oc):
x_new = self.min(x_oc, self.min(x_ic, x_w))
optimization_results.n_function_evaluations += 4
if not all(np.equal(x_new, x_w)):
fx_new, gx_new = self.eval_fg(x_new)
optimization_results.n_function_evaluations += 1
self.replace_point(idx=w, x=x_new, fx=fx_new, gx=gx_new)
else:
self.shrink(x_best=x_b)
optimization_results.n_function_evaluations += self.dim
elif self.is_less_than(fx_ic, gx_ic, fx_w, gx_w):
self.replace_point(idx=w, x=x_ic, fx=fx_ic, gx=gx_ic)
else:
self.shrink(x_best=x_b)
optimization_results.n_function_evaluations += self.dim
index = self.sort_simplex()
b = index[0]
s = index[-2]
w = index[-1]
x_norms = [np.linalg.norm(x - self.simplex[b], ord=np.inf, axis=0) for x in self.simplex]
if max(x_norms) < self.eps:
optimization_results.message = "Stop by norm of the max edge of the simplex less than " + str(self.eps)
stop = True
fx_norms = [np.abs(self.f.eval(x) - self.f.eval(self.simplex[b])) for x in self.simplex]
if max(fx_norms) < self.eps:
optimization_results.message = "Stop by norm of the max image of the simplex points less than " +\
str(self.eps)
stop = True
optimization_results.n_iterations += 1
optimization_results.x = self.norm2real(self.simplex[b])
optimization_results.fx = self.fx[b]
return optimization_results
def print_simplex(self):
simplex = np.array(self.simplex)
print(simplex, '\n')
|
science-optimization
|
/science_optimization-9.0.2-cp310-cp310-manylinux_2_35_x86_64.whl/science_optimization/algorithms/derivative_free/nelder_mead.py
|
nelder_mead.py
|
import copy
import numpy as np
from science_optimization.algorithms.utils import box_constraints
from science_optimization.solvers import OptimizationResults
from science_optimization.builder import OptimizationProblem
from science_optimization.function import BaseFunction
from science_optimization.algorithms import BaseAlgorithms
class NelderMead(BaseAlgorithms):
"""
Nelder-Mead simplex algorithm to minimize derivative-free non-linear functions.
"""
# starting point
_x0 = None
_x_min = None
_x_max = None
_x_bounds = None
_x_min_norm = None
_x_max_norm = None
# problem dimensio
_dim = None
# function
_f = None
# constraint
_g = None
# function values
_fx = None
_gx = None
# algorithm constants
_delta_r = None
_delta_e = None
_delta_ic = None
_delta_oc = None
_delta_s = None
# simplex point lists
_simplex = None
def __init__(self, x0, delta_r=1.0, delta_e=2.0, delta_ic=0.5, delta_oc=0.5, delta_s=0.5):
"""
Args:
x0:
delta_r:
delta_e:
delta_ic:
delta_oc:
delta_s:
"""
self.x0 = x0
self.dim = x0.shape[0]
self.x_min_norm = np.zeros((self.dim, 1))
self.x_max_norm = np.full((self.dim, 1), 100)
self.delta_r = delta_r
self.delta_e = delta_e
self.delta_ic = delta_ic
self.delta_oc = delta_oc
self.delta_s = delta_s
self.simplex = []
self.fx = None
self.gx = None
self.x_min = None
self.x_max = None
self.x_bounds = None
@property
def x0(self):
return self._x0
@property
def x_min(self):
return self._x_min
@property
def x_max(self):
return self._x_max
@property
def x_bounds(self):
return self._x_bounds
@property
def x_min_norm(self):
return self._x_min_norm
@property
def x_max_norm(self):
return self._x_max_norm
@property
def dim(self):
return self._dim
@property
def f(self):
return self._f
@property
def g(self):
return self._g
@property
def fx(self):
return self._fx
@property
def gx(self):
return self._gx
@property
def delta_r(self):
return self._delta_r
@property
def delta_e(self):
return self._delta_e
@property
def delta_ic(self):
return self._delta_ic
@property
def delta_oc(self):
return self._delta_oc
@property
def delta_s(self):
return self._delta_s
@property
def simplex(self):
return self._simplex
@x0.setter
def x0(self, value):
self._x0 = value
@x_min.setter
def x_min(self, value):
self._x_min = value
@x_max.setter
def x_max(self, value):
self._x_max = value
@x_bounds.setter
def x_bounds(self, value):
self._x_bounds = value
@x_min_norm.setter
def x_min_norm(self, value):
self._x_min_norm = value
@x_max_norm.setter
def x_max_norm(self, value):
self._x_max_norm = value
@dim.setter
def dim(self, value):
self._dim = value
@f.setter
def f(self, value):
if not isinstance(value, BaseFunction):
raise Exception("The function must be an instance of BaseFunction!")
self._f = value
@g.setter
def g(self, value):
if not isinstance(value, BaseFunction):
raise Exception("The function must be an instance of BaseFunction!")
self._g = value
@fx.setter
def fx(self, value):
self._fx = value
@gx.setter
def gx(self, value):
self._gx = value
@delta_r.setter
def delta_r(self, value):
self._delta_r = value
@delta_e.setter
def delta_e(self, value):
self._delta_e = value
@delta_ic.setter
def delta_ic(self, value):
self._delta_ic = value
@delta_oc.setter
def delta_oc(self, value):
self._delta_oc = value
@delta_s.setter
def delta_s(self, value):
self._delta_s = value
@simplex.setter
def simplex(self, value):
self._simplex = value
def initialize_fminsearch(self):
"""
Args:
dim:
Returns:
"""
simplex = [self.x0]
for i in range(self.dim):
e_i = np.eye(1, self.dim, i).reshape(self.dim, 1)
h_i = 0.05 if self.x0[i][0] != 0 else 0.00025
simplex.append(box_constraints(self.x0 + h_i * e_i, self.x_bounds))
self.simplex = simplex
def initialize_simplex_size(self, size):
"""
Args:
size:
Returns:
"""
dim = self.dim
simplex = [self.x0]
p = size / (dim * np.sqrt(2))
p = p * ((np.sqrt(dim+1)) + dim - 1)
q = size / (dim * np.sqrt(2))
q = q * ((np.sqrt(dim + 1)) - 1)
e = np.identity(dim)
for i in range(1, dim+1):
point_sum = np.zeros((dim, 1))
p_sign = 1
e[i - 1][i - 1] = 0
for j in range(dim):
if self.x0[j][0] > (self.x_min_norm[j][0] + self.x_max_norm[j][0]) / 2:
point_sum += -1 * q * e[:, j].reshape(dim, 1)
else:
point_sum += q * e[:, j].reshape(dim, 1)
e[i - 1][i - 1] = 1
if self.x0[i - 1][0] > (self.x_min_norm[i - 1][0] + self.x_min_norm[i - 1][0]) / 2:
p_sign = -1
new_point = self.x0 + p_sign * p * e[i - 1].reshape(dim, 1) + point_sum
simplex.append(new_point)
self.simplex = simplex
def centroid(self, xw_index):
"""
Args:
xw_index:
Returns:
"""
simplex = copy.deepcopy(self.simplex)
del(simplex[xw_index])
return np.mean(simplex, axis=0)
def reflect(self, x_centroid, xw_index):
"""
Args:
x_centroid:
Returns:
"""
return x_centroid + self.delta_r * (x_centroid - self.simplex[xw_index])
def expand(self, x_centroid, x_reflect):
"""
Args:
x_centroid:
x_reflect:
Returns:
"""
return x_centroid + self.delta_e * (x_reflect - x_centroid)
def inside_contraction(self, x_centroid, x_reflect):
"""
Args:
x_centroid:
x_reflect:
Returns:
"""
return x_centroid - self.delta_ic * (x_reflect - x_centroid)
def outside_contraction(self, x_centroid, x_reflect):
"""
Args:
x_centroid:
x_reflect:
Returns:
"""
return x_centroid + self.delta_oc * (x_reflect - x_centroid)
def shrink(self, x_best):
"""
Args:
x_best:
Returns:
"""
for j in range(1, len(self.simplex)):
x_new = x_best + self.delta_s * (self.simplex[j] - x_best)
fx_new, gx_new = self.eval_fg(self.norm2real(x_new))
self.replace_point(idx=j, x=x_new, fx=fx_new, gx=gx_new)
def box_feasible(self, x):
"""
Args:
x:
Returns:
"""
return not(any(np.less(x, self.x_min_norm)) or any(np.greater(x, self.x_max_norm)))
@staticmethod
def is_less_than(fx_1, gx_1, fx_2, gx_2):
"""
Args:
fx_1:
gx_1:
fx_2:
gx_2:
Returns:
"""
if gx_1 > 0 and gx_2 > 0:
return gx_1 < gx_2
elif gx_1 <= 0 and gx_2 <= 0:
return fx_1 < fx_2
else:
return gx_1 <= 0
def norm2real(self, x_norm):
"""
Args:
x_norm:
Returns:
"""
x = 0.01 * x_norm
x = (self.x_max - self.x_min) * x
x = x + self.x_min
return x
def real2norm(self, x):
"""
Args:
x:
Returns:
"""
x_norm = (x - self.x_min) / (self.x_max - self.x_min)
x_norm = x_norm * 100
return x_norm
def constraint_sum(self, x):
"""
Args:
x:
Returns:
"""
if self.g is not None:
gx_eval = self.g.eval(x)
return np.sum(gx_eval[np.where(gx_eval > self.eps)])
else:
return 0
def eval_fg(self, x):
"""
Args:
x:
Returns:
"""
fx = self.f.eval(x)
gx = self.constraint_sum(x=x)
return fx, gx
def replace_point(self, idx, x, fx, gx):
"""
Args:
idx:
x:
fx:
gx:
Returns:
"""
self.simplex[idx] = x
self.fx[idx] = fx
self.gx[idx] = gx
def min(self, x, y):
"""
Args:
x:
y:
Returns:
"""
x_real = self.norm2real(x)
y_real = self.norm2real(y)
fx, gx = self.eval_fg(x_real)
fy, gy = self.eval_fg(y_real)
if self.is_less_than(fx, gx, fy, gy):
return x
return y
def sort_simplex(self):
"""
Returns:
"""
index = [x for x in range(len(self.fx))]
gx_fx_idx = [(x, y, z) for x, y, z in zip(self.gx, self.fx, index)]
result = [t[2] for t in sorted(gx_fx_idx)]
return result
def optimize(self, optimization_problem, debug=False, n_step=10):
"""
Args:
optimization_problem:
debug:
n_step:
Returns:
"""
if not isinstance(optimization_problem, OptimizationProblem):
raise Exception("Optimize must have and OptimizationProblem instance as argument!")
if optimization_problem.objective.objectives.n_functions != 1:
raise Exception("Method able to optimize only one function.")
optimization_results = OptimizationResults()
optimization_results.message = 'Stop by maximum number of iterations.'
self.f = optimization_problem.objective.objectives.functions[0]
if optimization_problem.has_inequality_constraints():
self.g = optimization_problem.constraints.inequality_constraints
self.x_min = optimization_problem.variables.x_min
self.x_max = optimization_problem.variables.x_max
self.x_bounds = np.hstack((optimization_problem.variables.x_min, optimization_problem.variables.x_max))
self.x0 = box_constraints(self.x0, self.x_bounds)
self.x0 = self.real2norm(self.x0)
self.initialize_simplex_size(size=10)
self.fx = np.array([self.f.eval(self.norm2real(x)) for x in self.simplex])
optimization_results.n_function_evaluations += len(self.simplex)
if self.g is not None:
gx = []
for x in self.simplex:
gx.append(self.constraint_sum(x=self.norm2real(x)))
self.gx = np.array(gx)
else:
self.gx = np.zeros(len(self.simplex))
index = self.sort_simplex()
b = index[0]
s = index[-2]
w = index[-1]
stop = False
while optimization_results.n_iterations < self.n_max and not stop:
x_c = self.centroid(xw_index=w)
x_r = self.reflect(x_c, w)
x_b = self.simplex[b]
x_s = self.simplex[s]
x_w = self.simplex[w]
fx_b, gx_b = self.eval_fg(self.norm2real(x_b))
fx_s, gx_s = self.eval_fg(self.norm2real(x_s))
fx_w, gx_w = self.eval_fg(self.norm2real(x_w))
optimization_results.n_function_evaluations += 3
if self.box_feasible(x_r):
fx_r, gx_r = self.eval_fg(self.norm2real(x_r))
optimization_results.n_function_evaluations += 1
if self.is_less_than(fx_r, gx_r, fx_b, gx_b):
x_e = self.expand(x_centroid=x_c, x_reflect=x_r)
use_reflection = True
if self.box_feasible(x_e):
fx_e, gx_e = self.eval_fg(self.norm2real(x_e))
optimization_results.n_function_evaluations += 1
if self.is_less_than(fx_e, gx_e, fx_r, gx_r):
self.replace_point(idx=w, x=x_e, fx=fx_e, gx=gx_e)
use_reflection = False
if debug:
print("expansion")
if use_reflection:
self.replace_point(idx=w, x=x_r, fx=fx_r, gx=gx_r)
if debug:
print("reflection e")
elif self.is_less_than(fx_r, gx_r, fx_s, gx_s):
self.replace_point(idx=w, x=x_r, fx=fx_r, gx=gx_r)
if debug:
print("reflection r")
elif self.is_less_than(fx_r, gx_r, fx_w, gx_w):
x_oc = self.outside_contraction(x_centroid=x_c, x_reflect=x_r)
use_reflection = True
if self.box_feasible(x_oc):
fx_oc, gx_oc = self.eval_fg(self.norm2real(x_oc))
optimization_results.n_function_evaluations += 1
if self.is_less_than(fx_oc, gx_oc, fx_r, gx_r):
self.replace_point(idx=w, x=x_oc, fx=fx_oc, gx=gx_oc)
use_reflection = False
if debug:
print("outside contract")
if use_reflection:
self.replace_point(idx=w, x=x_r, fx=fx_r, gx=gx_r)
if debug:
print("reflection oc")
else:
x_ic = self.inside_contraction(x_centroid=x_c, x_reflect=x_r)
use_shrink = True
if self.box_feasible(x_ic):
fx_ic, gx_ic = self.eval_fg(self.norm2real(x_ic))
optimization_results.n_function_evaluations += 1
if self.is_less_than(fx_ic, gx_ic, fx_r, gx_r):
self.replace_point(idx=w, x=x_ic, fx=fx_ic, gx=gx_ic)
use_shrink = False
if debug:
print("inside contract")
if use_shrink:
self.shrink(x_best=x_b)
optimization_results.n_function_evaluations += self.dim
if debug:
print("shrink")
else:
x_oc = self.outside_contraction(x_centroid=x_c, x_reflect=x_r)
x_ic = self.inside_contraction(x_centroid=x_c, x_reflect=x_r)
fx_ic, gx_ic = self.eval_fg(self.norm2real(x_ic))
if debug:
print("xr infeasible")
if self.box_feasible(x_oc):
x_new = self.min(x_oc, self.min(x_ic, x_w))
optimization_results.n_function_evaluations += 4
if not all(np.equal(x_new, x_w)):
fx_new, gx_new = self.eval_fg(x_new)
optimization_results.n_function_evaluations += 1
self.replace_point(idx=w, x=x_new, fx=fx_new, gx=gx_new)
else:
self.shrink(x_best=x_b)
optimization_results.n_function_evaluations += self.dim
elif self.is_less_than(fx_ic, gx_ic, fx_w, gx_w):
self.replace_point(idx=w, x=x_ic, fx=fx_ic, gx=gx_ic)
else:
self.shrink(x_best=x_b)
optimization_results.n_function_evaluations += self.dim
index = self.sort_simplex()
b = index[0]
s = index[-2]
w = index[-1]
x_norms = [np.linalg.norm(x - self.simplex[b], ord=np.inf, axis=0) for x in self.simplex]
if max(x_norms) < self.eps:
optimization_results.message = "Stop by norm of the max edge of the simplex less than " + str(self.eps)
stop = True
fx_norms = [np.abs(self.f.eval(x) - self.f.eval(self.simplex[b])) for x in self.simplex]
if max(fx_norms) < self.eps:
optimization_results.message = "Stop by norm of the max image of the simplex points less than " +\
str(self.eps)
stop = True
optimization_results.n_iterations += 1
optimization_results.x = self.norm2real(self.simplex[b])
optimization_results.fx = self.fx[b]
return optimization_results
def print_simplex(self):
simplex = np.array(self.simplex)
print(simplex, '\n')
| 0.787646 | 0.533944 |
import nlpalg
import numpy as np
from science_optimization.algorithms import BaseAlgorithms
from science_optimization.solvers import OptimizationResults
from science_optimization.builder import OptimizationProblem
class EllipsoidMethod(BaseAlgorithms):
"""Ellipsoid algorithm method.
"""
# attributes
_x0 = None
_Q0 = None
_max_cuts = None
_shallow_cut = None
_decomposition = None
_memory = None
def __init__(self,
x0: np.ndarray=np.array([[]]).reshape(-1, 1),
Q0: np.ndarray=np.array([[]]),
max_cuts: int=32,
shallow_cut: float=0,
decomposition: bool=True,
memory: bool=True,
n_max: int=None,
eps: float=None):
"""Ellipsoid algorithm constructor.
Args:
x0 : (np.ndarray) initial point.
Q0 : (np.ndarray) initial inverse ellipsoid matrix.
max_cuts : (int) maximum number of ellipsoid cuts per iteration.
shallow_cut : (float) shallow cut option [0, 1].
decomposition: (bool) is matrix decomposition indicator (True: sqrt decomposition).
memory : (bool) cut memory indicator.
n_max : (int) maximum number of iterations for stop criterion.
eps : (float) maximum uncertainty for stop criterion.
"""
# parameters
self.x0 = 1.0 * x0
self.Q0 = Q0
self.max_cuts = max_cuts
self.shallow_cut = shallow_cut
self.decomposition = decomposition
self.memory = memory
if n_max is not None:
self.n_max = n_max
if eps is not None:
self.eps = eps
# getters
@property
def x0(self):
return self._x0
@property
def Q0(self):
return self._Q0
@property
def max_cuts(self):
return self._max_cuts
@property
def shallow_cut(self):
return self._shallow_cut
@property
def decomposition(self):
return self._decomposition
@property
def memory(self):
return self._memory
# setters
@x0.setter
def x0(self, x0):
if x0.shape[1] == 1:
self._x0 = x0
else:
raise ValueError("Initial point must be a column vector.")
@Q0.setter
def Q0(self, Q0):
# check if input is numpy
if not isinstance(Q0, np.ndarray):
raise Warning("x must be a numpy array!")
else:
self._Q0 = Q0
@max_cuts.setter
def max_cuts(self, k):
if k > 0:
self._max_cuts = k
else:
raise ValueError("Maximum number of cuts must be a positive number!")
@shallow_cut.setter
def shallow_cut(self, s):
if 0 <= s <= 1:
self._shallow_cut = s
else:
raise ValueError("Shallow cut must be in [0, 1).")
@decomposition.setter
def decomposition(self, d):
# check if input is numpy
if not isinstance(d, bool):
raise Warning("Decomposition must be a boolean!")
else:
self._decomposition = d
@memory.setter
def memory(self, m):
# check if input is numpy
if not isinstance(m, bool):
raise Warning("Memory must be a boolean!")
else:
self._memory = m
def optimize(self,
optimization_problem: OptimizationProblem,
debug: bool=True,
n_step: int=5) -> OptimizationResults:
"""Optimization core of Ellipsoid method.
Args:
optimization_problem: (OptimizationProblem) an optimization problem.
debug : (bool) debug option indicator.
n_step : (int) iterations steps to store optimization results.
Returns:
optimization_results: (OptimizationResults) optimization results.
"""
# get input arguments
f, df, _, _, g, dg, A, b, Aeq, beq, x_min, x_max, _ = optimization_problem.op_arguments()
# optimization results
optimization_results = OptimizationResults()
# call method
if not debug:
# method output
xb, fxb, _, _, _, stop = nlpalg.ellipsoidmethod(f, df, g, dg, A, b, Aeq, beq, x_min, x_max, self.x0,
self.Q0, self.eps, self.n_max, self.max_cuts,
self.shallow_cut, self.decomposition, self.memory, debug)
# results
optimization_results.x = xb
optimization_results.fx = fxb
else:
# TODO (matheus): implement iterative run
_, _, x, fx, Qi, stop = nlpalg.ellipsoidmethod(f, df, g, dg, A, b, Aeq, beq, x_min, x_max, self.x0, self.Q0,
self.eps, self.n_max, self.max_cuts, self.shallow_cut,
self.decomposition, self.memory, debug)
# optimization results
optimization_results.n_iterations = x.shape[1] # number of iterations
optimization_results.x = x[:, 0::n_step]
optimization_results.fx = fx[:, 0::n_step]
optimization_results.parameter = {'Q': Qi[..., 0::n_step]}
# stop criteria
if stop == 0:
optimization_results.message = 'Stop by maximum number of iterations.'
elif stop == 1:
optimization_results.message = 'Stop by ellipsoid volume reduction.'
elif stop == 2:
optimization_results.message = 'Stop by empty localizing set.'
elif stop == 3:
optimization_results.message = 'Stop by degenerate ellipsoid.'
else:
optimization_results.message = 'Unknown termination cause.'
return optimization_results
|
science-optimization
|
/science_optimization-9.0.2-cp310-cp310-manylinux_2_35_x86_64.whl/science_optimization/algorithms/cutting_plane/ellipsoid_method.py
|
ellipsoid_method.py
|
import nlpalg
import numpy as np
from science_optimization.algorithms import BaseAlgorithms
from science_optimization.solvers import OptimizationResults
from science_optimization.builder import OptimizationProblem
class EllipsoidMethod(BaseAlgorithms):
"""Ellipsoid algorithm method.
"""
# attributes
_x0 = None
_Q0 = None
_max_cuts = None
_shallow_cut = None
_decomposition = None
_memory = None
def __init__(self,
x0: np.ndarray=np.array([[]]).reshape(-1, 1),
Q0: np.ndarray=np.array([[]]),
max_cuts: int=32,
shallow_cut: float=0,
decomposition: bool=True,
memory: bool=True,
n_max: int=None,
eps: float=None):
"""Ellipsoid algorithm constructor.
Args:
x0 : (np.ndarray) initial point.
Q0 : (np.ndarray) initial inverse ellipsoid matrix.
max_cuts : (int) maximum number of ellipsoid cuts per iteration.
shallow_cut : (float) shallow cut option [0, 1].
decomposition: (bool) is matrix decomposition indicator (True: sqrt decomposition).
memory : (bool) cut memory indicator.
n_max : (int) maximum number of iterations for stop criterion.
eps : (float) maximum uncertainty for stop criterion.
"""
# parameters
self.x0 = 1.0 * x0
self.Q0 = Q0
self.max_cuts = max_cuts
self.shallow_cut = shallow_cut
self.decomposition = decomposition
self.memory = memory
if n_max is not None:
self.n_max = n_max
if eps is not None:
self.eps = eps
# getters
@property
def x0(self):
return self._x0
@property
def Q0(self):
return self._Q0
@property
def max_cuts(self):
return self._max_cuts
@property
def shallow_cut(self):
return self._shallow_cut
@property
def decomposition(self):
return self._decomposition
@property
def memory(self):
return self._memory
# setters
@x0.setter
def x0(self, x0):
if x0.shape[1] == 1:
self._x0 = x0
else:
raise ValueError("Initial point must be a column vector.")
@Q0.setter
def Q0(self, Q0):
# check if input is numpy
if not isinstance(Q0, np.ndarray):
raise Warning("x must be a numpy array!")
else:
self._Q0 = Q0
@max_cuts.setter
def max_cuts(self, k):
if k > 0:
self._max_cuts = k
else:
raise ValueError("Maximum number of cuts must be a positive number!")
@shallow_cut.setter
def shallow_cut(self, s):
if 0 <= s <= 1:
self._shallow_cut = s
else:
raise ValueError("Shallow cut must be in [0, 1).")
@decomposition.setter
def decomposition(self, d):
# check if input is numpy
if not isinstance(d, bool):
raise Warning("Decomposition must be a boolean!")
else:
self._decomposition = d
@memory.setter
def memory(self, m):
# check if input is numpy
if not isinstance(m, bool):
raise Warning("Memory must be a boolean!")
else:
self._memory = m
def optimize(self,
optimization_problem: OptimizationProblem,
debug: bool=True,
n_step: int=5) -> OptimizationResults:
"""Optimization core of Ellipsoid method.
Args:
optimization_problem: (OptimizationProblem) an optimization problem.
debug : (bool) debug option indicator.
n_step : (int) iterations steps to store optimization results.
Returns:
optimization_results: (OptimizationResults) optimization results.
"""
# get input arguments
f, df, _, _, g, dg, A, b, Aeq, beq, x_min, x_max, _ = optimization_problem.op_arguments()
# optimization results
optimization_results = OptimizationResults()
# call method
if not debug:
# method output
xb, fxb, _, _, _, stop = nlpalg.ellipsoidmethod(f, df, g, dg, A, b, Aeq, beq, x_min, x_max, self.x0,
self.Q0, self.eps, self.n_max, self.max_cuts,
self.shallow_cut, self.decomposition, self.memory, debug)
# results
optimization_results.x = xb
optimization_results.fx = fxb
else:
# TODO (matheus): implement iterative run
_, _, x, fx, Qi, stop = nlpalg.ellipsoidmethod(f, df, g, dg, A, b, Aeq, beq, x_min, x_max, self.x0, self.Q0,
self.eps, self.n_max, self.max_cuts, self.shallow_cut,
self.decomposition, self.memory, debug)
# optimization results
optimization_results.n_iterations = x.shape[1] # number of iterations
optimization_results.x = x[:, 0::n_step]
optimization_results.fx = fx[:, 0::n_step]
optimization_results.parameter = {'Q': Qi[..., 0::n_step]}
# stop criteria
if stop == 0:
optimization_results.message = 'Stop by maximum number of iterations.'
elif stop == 1:
optimization_results.message = 'Stop by ellipsoid volume reduction.'
elif stop == 2:
optimization_results.message = 'Stop by empty localizing set.'
elif stop == 3:
optimization_results.message = 'Stop by degenerate ellipsoid.'
else:
optimization_results.message = 'Unknown termination cause.'
return optimization_results
| 0.82151 | 0.428592 |
import abc
import numpy as np
from science_optimization.algorithms import BaseAlgorithms
from science_optimization.algorithms.unidimensional import GoldenSection, MultimodalGoldenSection
from science_optimization.solvers import OptimizationResults
from science_optimization.algorithms.utils import hypercube_intersection
from science_optimization.algorithms.utils import box_constraints
from science_optimization.function import GenericFunction, BaseFunction
from science_optimization.problems import GenericProblem
from science_optimization.builder import OptimizationProblem
from typing import Tuple
class BaseSearchDirection(BaseAlgorithms):
"""Base class for search direction algorithms.
"""
# attributes
_x0 = None
_x_bounds = None
_uni_dimensional_opt_strategy = None
_fun = None
def __init__(self,
x0: np.ndarray,
n_max: int = None,
eps: float = None,
line_search_method: str='gs'):
"""Constructor of search direction algorithms.
Args:
x0 : (np.ndarray) initial point.
n_max : (int) maximum number of iterations.
eps : (float) maximum uncertainty for stop criterion.
line_search_method: (str) line search strategy ('gs': golden section or 'mgs' multimodal gs).
"""
self.x0 = 1.0 * x0
self.uni_dimensional_opt_strategy = line_search_method
if n_max is not None:
self.n_max = n_max
if eps is not None:
self.eps = eps
# attributes interface
@property
def x0(self):
return self._x0
@property
def x_bounds(self):
return self._x_bounds
@property
def uni_dimensional_opt_strategy(self):
return self._uni_dimensional_opt_strategy
@property
def fun(self):
return self._fun
# setters
@x0.setter
def x0(self, x0):
if x0.shape[1] == 1:
self._x0 = x0
else:
raise ValueError("Initial point must be a column vector.")
@x_bounds.setter
def x_bounds(self, x_bounds):
if x_bounds.shape[1] == 2:
self._x_bounds = x_bounds
else:
raise ValueError("x_bounds must be a nx2-array.")
@fun.setter
def fun(self, fun):
self._fun = fun
@uni_dimensional_opt_strategy.setter
def uni_dimensional_opt_strategy(self, uni_d_strategy):
self._uni_dimensional_opt_strategy = uni_d_strategy
def correct_direction_by_box(self, d: np.ndarray, x: np.ndarray, alpha):
"""
check for values too near the box limits, and avoid the direction to go that way
Args:
d: current direction
x: current x value
alpha: previous value of alpha (unidimensional optimization)
Returns:
"""
for i, d_each in enumerate(d):
if x[i] + d_each * alpha > self.x_bounds[i][1] + self.eps:
d[i] = self.eps ** 2
d = d / np.linalg.norm(d, 2)
if x[i] + d_each * alpha < self.x_bounds[i][0] + self.eps:
d[i] = self.eps ** 2
d = d / np.linalg.norm(d, 2)
# methods
def optimize(self,
optimization_problem: OptimizationProblem,
debug: bool,
n_step: int=5):
"""Optimization core of Search direction methods.
Args:
optimization_problem: (OptimizationProblem) an optimization problem.
debug : (bool) debug option indicator.
n_step : (int) iterations steps to store optimization results.
Returns:
optimization_results: (OptimizationResults) optimization results.
"""
# instantiate results
optimization_results = OptimizationResults()
optimization_results.message = 'Stop by maximum number of iterations.'
# define functions
self.fun = optimization_problem.objective.objectives
# initial point
x = self.x0
# bounds
self.x_bounds = np.hstack((optimization_problem.variables.x_min,
optimization_problem.variables.x_max))
# correct x to bounds
x = box_constraints(x, self.x_bounds)
# initial results
nf = optimization_problem.objective.objectives.n_functions # number of functions
fx = np.zeros((nf, 0))
optimization_results.x = np.zeros((x.shape[0], 0))
# store parameters in debug option
debug = False # TODO(Matheus): debug
if debug:
optimization_results.parameter = {'alpha': np.zeros((0,))}
alpha = 1
# main loop
stop = False
while optimization_results.n_iterations < self.n_max and not stop:
# compute search direction
d = self._search_direction(fun=self.fun, x=x)
self.correct_direction_by_box(d, x, alpha)
# compute search interval
interval = self._search_interval(x=x, d=d)
# uni-dimensional optimization
alpha, nfe = self._uni_dimensional_optimization(x=x, d=d, fun=self.fun, interval=interval,
strategy=self.uni_dimensional_opt_strategy, debug=debug)
if debug:
alpha = alpha[:, -1]
# update function evaluation count
optimization_results.n_function_evaluations += nfe
# step towards search direction
y = x + alpha*d
fx_x = self.fun.eval(x)
fx_y = self.fun.eval(y)
# stop criteria: stalled
if np.linalg.norm(x-y, 2) < self.eps:
optimization_results.message = 'Stop by stalled search.'
stop = True
# stop criteria: unchanged function value
if np.abs(fx_x - fx_y) < self.eps:
optimization_results.message = 'Stop by unchanged function value.'
stop = True
# stop criteria: null gradient
if np.linalg.norm(self.fun.gradient(y), 2) < self.eps:
optimization_results.message = 'Stop by null gradient.'
stop = True
# update x
x = y.copy()
fx_x = fx_y.copy()
# update results
if debug and not (optimization_results.n_iterations + 1) % n_step:
optimization_results.x = np.hstack((optimization_results.x, x))
fx = np.hstack((fx, fx_x))
optimization_results.fx = fx
optimization_results.parameter['alpha'] = np.hstack((optimization_results.parameter['alpha'],
np.array(alpha)))
if not debug:
optimization_results.x = x
optimization_results.fx = fx_x
# update count
optimization_results.n_iterations += 1
return optimization_results
@abc.abstractmethod
def _search_direction(self, **kwargs) -> np.ndarray:
"""Abstract search direction."""
pass
@staticmethod
def _uni_dimensional_optimization(x: np.ndarray,
d: np.ndarray,
fun: BaseFunction,
interval: list,
strategy: str,
debug: bool) -> Tuple[np.ndarray, int]:
"""Unidimensional optimization.
Args:
x : (np.ndarray) current point.
d : (np.ndarray) search direction.
fun : (BaseFunction) function object.
interval: (list) interval of search [a, b].
strategy: (str) which uni-dimensional strategy to use.
debug : (bool) debug option indicator.
Returns:
alpha: optimal step
nfe : number of function evaluations
"""
# objective function
def line_search_function(a):
return fun.eval(x + a*d)
# function encapsulation
f = [GenericFunction(func=line_search_function, n=1)]
interval = np.array(interval).reshape(1, -1)
# build problem
op = OptimizationProblem(builder=GenericProblem(f=f, eq_cons=[], ineq_cons=[], x_bounds=interval))
# instantiate uni-dimensional optimization class
if strategy == "gs":
op_result = GoldenSection()
elif strategy == 'mgs':
op_result = MultimodalGoldenSection(all_minima=False)
else:
raise Warning("Unknown unidimensional optimization strategy.")
# optimize
output = op_result.optimize(optimization_problem=op, debug=debug)
alpha = output.x
nfe = output.n_function_evaluations
return alpha, nfe
def _search_interval(self, x: np.ndarray, d: np.ndarray) -> list:
"""Determination of search interval.
Args:
x: (np.ndarray) current point.
d: (np.ndarray) search direction.
Returns:
interval: (list) [a, b] search interval.
"""
# interval
a = 0
if np.linalg.norm(d) < self.eps:
b = a
else:
b, _ = hypercube_intersection(x=x, d=d, x_bounds=self.x_bounds) # maximum step
interval = [a, b]
return interval
|
science-optimization
|
/science_optimization-9.0.2-cp310-cp310-manylinux_2_35_x86_64.whl/science_optimization/algorithms/search_direction/base_search_direction.py
|
base_search_direction.py
|
import abc
import numpy as np
from science_optimization.algorithms import BaseAlgorithms
from science_optimization.algorithms.unidimensional import GoldenSection, MultimodalGoldenSection
from science_optimization.solvers import OptimizationResults
from science_optimization.algorithms.utils import hypercube_intersection
from science_optimization.algorithms.utils import box_constraints
from science_optimization.function import GenericFunction, BaseFunction
from science_optimization.problems import GenericProblem
from science_optimization.builder import OptimizationProblem
from typing import Tuple
class BaseSearchDirection(BaseAlgorithms):
"""Base class for search direction algorithms.
"""
# attributes
_x0 = None
_x_bounds = None
_uni_dimensional_opt_strategy = None
_fun = None
def __init__(self,
x0: np.ndarray,
n_max: int = None,
eps: float = None,
line_search_method: str='gs'):
"""Constructor of search direction algorithms.
Args:
x0 : (np.ndarray) initial point.
n_max : (int) maximum number of iterations.
eps : (float) maximum uncertainty for stop criterion.
line_search_method: (str) line search strategy ('gs': golden section or 'mgs' multimodal gs).
"""
self.x0 = 1.0 * x0
self.uni_dimensional_opt_strategy = line_search_method
if n_max is not None:
self.n_max = n_max
if eps is not None:
self.eps = eps
# attributes interface
@property
def x0(self):
return self._x0
@property
def x_bounds(self):
return self._x_bounds
@property
def uni_dimensional_opt_strategy(self):
return self._uni_dimensional_opt_strategy
@property
def fun(self):
return self._fun
# setters
@x0.setter
def x0(self, x0):
if x0.shape[1] == 1:
self._x0 = x0
else:
raise ValueError("Initial point must be a column vector.")
@x_bounds.setter
def x_bounds(self, x_bounds):
if x_bounds.shape[1] == 2:
self._x_bounds = x_bounds
else:
raise ValueError("x_bounds must be a nx2-array.")
@fun.setter
def fun(self, fun):
self._fun = fun
@uni_dimensional_opt_strategy.setter
def uni_dimensional_opt_strategy(self, uni_d_strategy):
self._uni_dimensional_opt_strategy = uni_d_strategy
def correct_direction_by_box(self, d: np.ndarray, x: np.ndarray, alpha):
"""
check for values too near the box limits, and avoid the direction to go that way
Args:
d: current direction
x: current x value
alpha: previous value of alpha (unidimensional optimization)
Returns:
"""
for i, d_each in enumerate(d):
if x[i] + d_each * alpha > self.x_bounds[i][1] + self.eps:
d[i] = self.eps ** 2
d = d / np.linalg.norm(d, 2)
if x[i] + d_each * alpha < self.x_bounds[i][0] + self.eps:
d[i] = self.eps ** 2
d = d / np.linalg.norm(d, 2)
# methods
def optimize(self,
optimization_problem: OptimizationProblem,
debug: bool,
n_step: int=5):
"""Optimization core of Search direction methods.
Args:
optimization_problem: (OptimizationProblem) an optimization problem.
debug : (bool) debug option indicator.
n_step : (int) iterations steps to store optimization results.
Returns:
optimization_results: (OptimizationResults) optimization results.
"""
# instantiate results
optimization_results = OptimizationResults()
optimization_results.message = 'Stop by maximum number of iterations.'
# define functions
self.fun = optimization_problem.objective.objectives
# initial point
x = self.x0
# bounds
self.x_bounds = np.hstack((optimization_problem.variables.x_min,
optimization_problem.variables.x_max))
# correct x to bounds
x = box_constraints(x, self.x_bounds)
# initial results
nf = optimization_problem.objective.objectives.n_functions # number of functions
fx = np.zeros((nf, 0))
optimization_results.x = np.zeros((x.shape[0], 0))
# store parameters in debug option
debug = False # TODO(Matheus): debug
if debug:
optimization_results.parameter = {'alpha': np.zeros((0,))}
alpha = 1
# main loop
stop = False
while optimization_results.n_iterations < self.n_max and not stop:
# compute search direction
d = self._search_direction(fun=self.fun, x=x)
self.correct_direction_by_box(d, x, alpha)
# compute search interval
interval = self._search_interval(x=x, d=d)
# uni-dimensional optimization
alpha, nfe = self._uni_dimensional_optimization(x=x, d=d, fun=self.fun, interval=interval,
strategy=self.uni_dimensional_opt_strategy, debug=debug)
if debug:
alpha = alpha[:, -1]
# update function evaluation count
optimization_results.n_function_evaluations += nfe
# step towards search direction
y = x + alpha*d
fx_x = self.fun.eval(x)
fx_y = self.fun.eval(y)
# stop criteria: stalled
if np.linalg.norm(x-y, 2) < self.eps:
optimization_results.message = 'Stop by stalled search.'
stop = True
# stop criteria: unchanged function value
if np.abs(fx_x - fx_y) < self.eps:
optimization_results.message = 'Stop by unchanged function value.'
stop = True
# stop criteria: null gradient
if np.linalg.norm(self.fun.gradient(y), 2) < self.eps:
optimization_results.message = 'Stop by null gradient.'
stop = True
# update x
x = y.copy()
fx_x = fx_y.copy()
# update results
if debug and not (optimization_results.n_iterations + 1) % n_step:
optimization_results.x = np.hstack((optimization_results.x, x))
fx = np.hstack((fx, fx_x))
optimization_results.fx = fx
optimization_results.parameter['alpha'] = np.hstack((optimization_results.parameter['alpha'],
np.array(alpha)))
if not debug:
optimization_results.x = x
optimization_results.fx = fx_x
# update count
optimization_results.n_iterations += 1
return optimization_results
@abc.abstractmethod
def _search_direction(self, **kwargs) -> np.ndarray:
"""Abstract search direction."""
pass
@staticmethod
def _uni_dimensional_optimization(x: np.ndarray,
d: np.ndarray,
fun: BaseFunction,
interval: list,
strategy: str,
debug: bool) -> Tuple[np.ndarray, int]:
"""Unidimensional optimization.
Args:
x : (np.ndarray) current point.
d : (np.ndarray) search direction.
fun : (BaseFunction) function object.
interval: (list) interval of search [a, b].
strategy: (str) which uni-dimensional strategy to use.
debug : (bool) debug option indicator.
Returns:
alpha: optimal step
nfe : number of function evaluations
"""
# objective function
def line_search_function(a):
return fun.eval(x + a*d)
# function encapsulation
f = [GenericFunction(func=line_search_function, n=1)]
interval = np.array(interval).reshape(1, -1)
# build problem
op = OptimizationProblem(builder=GenericProblem(f=f, eq_cons=[], ineq_cons=[], x_bounds=interval))
# instantiate uni-dimensional optimization class
if strategy == "gs":
op_result = GoldenSection()
elif strategy == 'mgs':
op_result = MultimodalGoldenSection(all_minima=False)
else:
raise Warning("Unknown unidimensional optimization strategy.")
# optimize
output = op_result.optimize(optimization_problem=op, debug=debug)
alpha = output.x
nfe = output.n_function_evaluations
return alpha, nfe
def _search_interval(self, x: np.ndarray, d: np.ndarray) -> list:
"""Determination of search interval.
Args:
x: (np.ndarray) current point.
d: (np.ndarray) search direction.
Returns:
interval: (list) [a, b] search interval.
"""
# interval
a = 0
if np.linalg.norm(d) < self.eps:
b = a
else:
b, _ = hypercube_intersection(x=x, d=d, x_bounds=self.x_bounds) # maximum step
interval = [a, b]
return interval
| 0.809803 | 0.443902 |
from science_optimization.algorithms import BaseAlgorithms
from science_optimization.solvers import OptimizationResults
from science_optimization.function import LinearFunction
from science_optimization.builder import OptimizationProblem
from scipy.optimize import linprog
import numpy as np
class ScipyBaseLinear(BaseAlgorithms):
"""Base scipy linear method.
"""
# parameters
_method = None
def __init__(self, method=None, n_max=None):
"""Constructor.
Args:
method: 'simplex' or 'interior-point'.
n_max: maximum number of iterations.
"""
if n_max is not None:
self.n_max = n_max
if method is not None:
self.method = method
# get
@property
def method(self):
"""Gets method."""
return self._method
# sets
@method.setter
def method(self, method):
"""Sets method."""
if method == 'simplex' or method == 'interior-point':
self._method = method
else:
raise ValueError("method must be either 'simplex' or 'interior-point'!")
# optimize method
def optimize(self,
optimization_problem: OptimizationProblem,
debug: bool,
n_step: int) -> OptimizationResults:
"""Optimization core.
Args:
optimization_problem: (OptimizationProblem) an optimization problem.
debug : (bool) debug option indicator.
n_step : (int) iterations steps to store optimization results.
Returns:
optimization_results: (OptimizationResults) optimization results.
"""
# optimization problem check
self.input(optimization_problem)
# get input arguments
_, _, c, d, _, _, A, b, Aeq, beq, x_min, x_max, _ = optimization_problem.op_arguments()
# output
optimization_results = OptimizationResults()
output = linprog(c.ravel(), method=self.method, A_ub=A, b_ub=b, A_eq=Aeq, b_eq=beq,
bounds=np.hstack((x_min, x_max)), options={'maxiter': self.n_max})
optimization_results.x = output.x.reshape(-1, 1) if isinstance(output.x, np.ndarray) else output.x
optimization_results.fx = output.fun
optimization_results.message = output.message
optimization_results.n_iterations = output.nit
return optimization_results
@staticmethod
def input(op: OptimizationProblem):
"""Optimization problem input.
Args:
op: (OptimizationProblem) an optimization problem instance.
"""
# number of functions test
if op.objective.objectives.n_functions > 1:
raise ValueError('Not yet implemented multiobjective linear programming.')
# linear objective function test
if not isinstance(op.objective.objectives.functions[0], LinearFunction):
raise ValueError('Objective function must be linear!')
if op.nonlinear_functions_indices(op.constraints.inequality_constraints.functions) \
or op.nonlinear_functions_indices(op.constraints.equality_constraints.functions):
raise ValueError('Constraints must be linear.')
|
science-optimization
|
/science_optimization-9.0.2-cp310-cp310-manylinux_2_35_x86_64.whl/science_optimization/algorithms/linear_programming/scipy_base_linear.py
|
scipy_base_linear.py
|
from science_optimization.algorithms import BaseAlgorithms
from science_optimization.solvers import OptimizationResults
from science_optimization.function import LinearFunction
from science_optimization.builder import OptimizationProblem
from scipy.optimize import linprog
import numpy as np
class ScipyBaseLinear(BaseAlgorithms):
"""Base scipy linear method.
"""
# parameters
_method = None
def __init__(self, method=None, n_max=None):
"""Constructor.
Args:
method: 'simplex' or 'interior-point'.
n_max: maximum number of iterations.
"""
if n_max is not None:
self.n_max = n_max
if method is not None:
self.method = method
# get
@property
def method(self):
"""Gets method."""
return self._method
# sets
@method.setter
def method(self, method):
"""Sets method."""
if method == 'simplex' or method == 'interior-point':
self._method = method
else:
raise ValueError("method must be either 'simplex' or 'interior-point'!")
# optimize method
def optimize(self,
optimization_problem: OptimizationProblem,
debug: bool,
n_step: int) -> OptimizationResults:
"""Optimization core.
Args:
optimization_problem: (OptimizationProblem) an optimization problem.
debug : (bool) debug option indicator.
n_step : (int) iterations steps to store optimization results.
Returns:
optimization_results: (OptimizationResults) optimization results.
"""
# optimization problem check
self.input(optimization_problem)
# get input arguments
_, _, c, d, _, _, A, b, Aeq, beq, x_min, x_max, _ = optimization_problem.op_arguments()
# output
optimization_results = OptimizationResults()
output = linprog(c.ravel(), method=self.method, A_ub=A, b_ub=b, A_eq=Aeq, b_eq=beq,
bounds=np.hstack((x_min, x_max)), options={'maxiter': self.n_max})
optimization_results.x = output.x.reshape(-1, 1) if isinstance(output.x, np.ndarray) else output.x
optimization_results.fx = output.fun
optimization_results.message = output.message
optimization_results.n_iterations = output.nit
return optimization_results
@staticmethod
def input(op: OptimizationProblem):
"""Optimization problem input.
Args:
op: (OptimizationProblem) an optimization problem instance.
"""
# number of functions test
if op.objective.objectives.n_functions > 1:
raise ValueError('Not yet implemented multiobjective linear programming.')
# linear objective function test
if not isinstance(op.objective.objectives.functions[0], LinearFunction):
raise ValueError('Objective function must be linear!')
if op.nonlinear_functions_indices(op.constraints.inequality_constraints.functions) \
or op.nonlinear_functions_indices(op.constraints.equality_constraints.functions):
raise ValueError('Constraints must be linear.')
| 0.944382 | 0.388241 |
from science_optimization.algorithms import BaseAlgorithms
from science_optimization.solvers import OptimizationResults
from science_optimization.function import LinearFunction
from science_optimization.builder import OptimizationProblem
from ortools.linear_solver import pywraplp
import numpy as np
class Glop(BaseAlgorithms):
"""Interface to Google GLOP solver (https://developers.google.com/optimization/install/)."""
# parameters
_t_max = None
def __init__(self, t_max: float=5):
"""Constructor of glop optimization solver.
Args:
t_max: (float) time limit in seconds.
"""
self.t_max = t_max
# get
@property
def t_max(self):
"""Gets method."""
return self._t_max
# sets
@t_max.setter
def t_max(self, t_max):
"""Sets method."""
self._t_max = int(t_max/1e3)
# optimize method
def optimize(self,
optimization_problem: OptimizationProblem,
debug: bool = False,
n_step: int = 0) -> OptimizationResults:
"""Optimization core.
Args:
optimization_problem: (OptimizationProblem) an optimization problem.
debug : (bool) debug option indicator.
n_step : (int) iterations steps to store optimization results.
Returns:
optimization_results: (OptimizationResults) optimization results.
"""
# optimization problem check
self.input(optimization_problem)
# get input arguments
_, _, c, d, _, _, A, b, Aeq, beq, x_min, x_max, x_type = optimization_problem.op_arguments()
# instantiate solver object
if 'd' in x_type:
problem_type = 'MIP'
problem_solver = pywraplp.Solver.CBC_MIXED_INTEGER_PROGRAMMING
else:
problem_type = 'LP'
problem_solver = pywraplp.Solver.GLOP_LINEAR_PROGRAMMING
solver = pywraplp.Solver(problem_type, problem_solver)
# create variables
n = x_min.shape[0]
x = []
for i in range(n):
if x_type[i] == 'c':
x.append(solver.NumVar(float(x_min[i, 0]), float(x_max[i, 0]), "x_"+str(i)))
elif x_type[i] == 'd':
x.append(solver.IntVar(float(x_min[i, 0]), float(x_max[i, 0]), "x_"+str(i)))
else:
raise ValueError("Variable type must be either 'c' or 'd'.")
# create inequality constraints (A*x <= b)
mi = A.shape[0]
ic = [[]] * mi
for i in range(mi):
ic[i] = solver.Constraint(-solver.infinity(), float(b[i, 0]))
for j in range(n):
ic[i].SetCoefficient(x[j], float(A[i, j]))
# create equality constraints (Aeq*x = beq)
me = Aeq.shape[0] if Aeq is not None else 0
ec = [[]] * me
for i in range(me):
ec[i] = solver.Constraint(float(beq[i, 0]), float(beq[i, 0]))
for j in range(n):
ec[i].SetCoefficient(x[j], float(Aeq[i, j]))
# set objective function
objective = solver.Objective()
for i in range(n):
objective.SetCoefficient(x[i], float(c[0, i]))
objective.SetMinimization()
# set time limit
solver.SetTimeLimit(self.t_max)
# solver
solver.Solve()
# output
op_results = OptimizationResults()
xb = np.zeros((n, 1))
for i in range(n):
xb[i, 0] = x[i].solution_value()
op_results.x = xb
op_results.fx = np.array([solver.Objective().Value()])
return op_results
@staticmethod
def input(op: OptimizationProblem):
"""Optimization problem input.
Args:
op: (OptimizationProblem)an optimization problem instance
"""
# number of functions test
if op.objective.objectives.n_functions > 1:
raise ValueError('Not yet implemented multiobjective linear programming.')
# linear objective function test
if not isinstance(op.objective.objectives.functions[0], LinearFunction):
raise ValueError('Objective function must be linear!')
if op.nonlinear_functions_indices(op.constraints.inequality_constraints.functions) \
or op.nonlinear_functions_indices(op.constraints.equality_constraints.functions):
raise ValueError('Constraints must be linear.')
|
science-optimization
|
/science_optimization-9.0.2-cp310-cp310-manylinux_2_35_x86_64.whl/science_optimization/algorithms/linear_programming/glop.py
|
glop.py
|
from science_optimization.algorithms import BaseAlgorithms
from science_optimization.solvers import OptimizationResults
from science_optimization.function import LinearFunction
from science_optimization.builder import OptimizationProblem
from ortools.linear_solver import pywraplp
import numpy as np
class Glop(BaseAlgorithms):
"""Interface to Google GLOP solver (https://developers.google.com/optimization/install/)."""
# parameters
_t_max = None
def __init__(self, t_max: float=5):
"""Constructor of glop optimization solver.
Args:
t_max: (float) time limit in seconds.
"""
self.t_max = t_max
# get
@property
def t_max(self):
"""Gets method."""
return self._t_max
# sets
@t_max.setter
def t_max(self, t_max):
"""Sets method."""
self._t_max = int(t_max/1e3)
# optimize method
def optimize(self,
optimization_problem: OptimizationProblem,
debug: bool = False,
n_step: int = 0) -> OptimizationResults:
"""Optimization core.
Args:
optimization_problem: (OptimizationProblem) an optimization problem.
debug : (bool) debug option indicator.
n_step : (int) iterations steps to store optimization results.
Returns:
optimization_results: (OptimizationResults) optimization results.
"""
# optimization problem check
self.input(optimization_problem)
# get input arguments
_, _, c, d, _, _, A, b, Aeq, beq, x_min, x_max, x_type = optimization_problem.op_arguments()
# instantiate solver object
if 'd' in x_type:
problem_type = 'MIP'
problem_solver = pywraplp.Solver.CBC_MIXED_INTEGER_PROGRAMMING
else:
problem_type = 'LP'
problem_solver = pywraplp.Solver.GLOP_LINEAR_PROGRAMMING
solver = pywraplp.Solver(problem_type, problem_solver)
# create variables
n = x_min.shape[0]
x = []
for i in range(n):
if x_type[i] == 'c':
x.append(solver.NumVar(float(x_min[i, 0]), float(x_max[i, 0]), "x_"+str(i)))
elif x_type[i] == 'd':
x.append(solver.IntVar(float(x_min[i, 0]), float(x_max[i, 0]), "x_"+str(i)))
else:
raise ValueError("Variable type must be either 'c' or 'd'.")
# create inequality constraints (A*x <= b)
mi = A.shape[0]
ic = [[]] * mi
for i in range(mi):
ic[i] = solver.Constraint(-solver.infinity(), float(b[i, 0]))
for j in range(n):
ic[i].SetCoefficient(x[j], float(A[i, j]))
# create equality constraints (Aeq*x = beq)
me = Aeq.shape[0] if Aeq is not None else 0
ec = [[]] * me
for i in range(me):
ec[i] = solver.Constraint(float(beq[i, 0]), float(beq[i, 0]))
for j in range(n):
ec[i].SetCoefficient(x[j], float(Aeq[i, j]))
# set objective function
objective = solver.Objective()
for i in range(n):
objective.SetCoefficient(x[i], float(c[0, i]))
objective.SetMinimization()
# set time limit
solver.SetTimeLimit(self.t_max)
# solver
solver.Solve()
# output
op_results = OptimizationResults()
xb = np.zeros((n, 1))
for i in range(n):
xb[i, 0] = x[i].solution_value()
op_results.x = xb
op_results.fx = np.array([solver.Objective().Value()])
return op_results
@staticmethod
def input(op: OptimizationProblem):
"""Optimization problem input.
Args:
op: (OptimizationProblem)an optimization problem instance
"""
# number of functions test
if op.objective.objectives.n_functions > 1:
raise ValueError('Not yet implemented multiobjective linear programming.')
# linear objective function test
if not isinstance(op.objective.objectives.functions[0], LinearFunction):
raise ValueError('Objective function must be linear!')
if op.nonlinear_functions_indices(op.constraints.inequality_constraints.functions) \
or op.nonlinear_functions_indices(op.constraints.equality_constraints.functions):
raise ValueError('Constraints must be linear.')
| 0.932176 | 0.500854 |
import numpy as np
from .base_function import BaseFunction
class PolynomialFunction(BaseFunction):
"""
Class that implements a polynomial function
"""
_flag_num_g = False # this function uses analytical gradient
def __init__(self, exponents, coefficients):
"""The constructor for the polynomial function instance.
Args:
exponents: A matrix with the exponents of the function in order of the variables
for each element of the function
coefficients: A vector with the coefficients of each element of the function
Example:
For the function ax² + bxy + cy²:
exponents : [[2,0],[1,1],[0,2]]
coefficients : [a, b, c]
"""
# parameters check
self.numpy_check(exponents, coefficients)
self.parameters = {'e': exponents,
'c': coefficients}
@staticmethod
def aux_eval(f, i, x):
return ((np.tile((x[:, i]).transpose(), (f.parameters['e'].shape[0], 1)) ** f.parameters['e']).prod(axis=1)
* f.parameters['c']).sum(axis=0)
# TODO: explain this function
def aux_grad_j(self, i, j, x, dfdx):
C = np.copy(self.parameters['e'])
val = np.copy(C[:, j])
d = np.where(val > 0)
C[d, j] = C[d, j] - 1
dfdx[j, i] = ((np.tile((x[:, i]).transpose(), (self.parameters['e'].shape[0], 1)) ** C).prod(axis=1) * val *
self.parameters['c']).sum(axis=0)
# TODO: explain this function
def aux_grad_i(self, i, j, x, dfdx):
grad_j_vec = np.vectorize(PolynomialFunction.aux_grad_j, excluded=['self', 'i', 'x', 'dfdx'], otypes=[float])
grad_j_vec(self, i=i, j=j, x=x, dfdx=dfdx)
def dimension(self):
return len(self.parameters['e'][0])
def eval(self, x):
""" Polynomial function evaluation.
Args:
x: A matrix with the evaluation points, the structure of the matrix should have the tuples in the
columns, so each column is an evaluation point
Returns:
aux: Returns a vector with the evaluation value in each point (the index of the value matches the index
of the column of the evaluation point)
Example:
For the function ax² + bxy + cy²:
With x = [[1,2,3],[3,2,1]]
Returns: [a + 3b + 9c, 4a + 4b + 4c, 9a + 3b + c]
For the function x³ + y³ + z³
With x = [[1],[2],[3]]
Returns: [36]
"""
# input check
self.input_check(x)
# eval
num = x.shape[1]
fx = np.arange(start=0, stop=num, step=1)
eval_vec = np.vectorize(self.aux_eval, excluded=['f', 'x'])
fx = eval_vec(f=self, i=fx, x=x)
return fx
def gradient(self, x):
"""Polynomial gradient evaluation.
Args:
x: A matrix with the evaluation points, the structure of the matrix should have the tuples in the
columns, so each column is an evaluation point
Returns:
dfdx: Returns a matrix with the gradient vector in each point (the index of the row where the gradient is
matches the index of the column of the evaluation point)
Example:
For the function ax² + bxy + cy²:
With x = [[1,2,3],[3,2,1]]
The gradient should be : [2ax + by, 2cy + bx]
Returns:[[2a + 3b, 6c + b],[4a + 2b, 4c + 2b],[6a + b, 2c + 3b]]
For the function x³ + y³ + z³
With x = [[1],[2],[3]]
The gradient should be : [3x²,3y²,3z²]
Returns: [3, 12, 27]
"""
# input check
self.input_check(x)
# gradient
rows, columns = x.shape
if self.parameters['c'].size <= 1:
dfdx = np.zeros((rows, columns))
else:
dfdx = np.zeros((rows, columns))
auxi = np.arange(start=0, stop=columns, step=1)
auxj = np.arange(start=0, stop=rows, step=1)
grad_i_vec = \
np.vectorize(PolynomialFunction.aux_grad_i, excluded=['self', 'j', 'x', 'dfdx'], otypes={object})
np.array(grad_i_vec(self, i=auxi, j=auxj, x=x, dfdx=dfdx))
return dfdx
def aux_hes_k(self, i, j, k, x, hfdx):
C = np.copy(self.parameters['e'])
valj = np.copy(C[:, j])
d = np.where(valj > 0) # PolynomialFunction.indices(valj, lambda x: x > 0)
for a in d:
C[a, j] = C[a, j] - 1
valk = np.copy(C[:, k])
d = np.where(valk > 0) # PolynomialFunction.indices(valk, lambda x: x > 0)
for a in d:
C[a, k] = C[a, k] - 1
hfdx[j, k, i] = ((np.tile((x[:, i]).transpose(), (self.parameters['e'].shape[0], 1)) ** C).prod(
axis=1) * valj * valk * self.parameters['c']).sum(axis=0)
hfdx[k, j, i] = hfdx[j, k, i]
return hfdx
def aux_hes_j(self, i, j, k, x, hfdx):
C = np.copy(self.parameters['e'])
val = np.copy(C[:, j])
val = val * (val - 1)
d = np.where(val > 1) # PolynomialFunction.indices(val, lambda x: x > 1)
for a in d:
C[a, j] = C[a, j] - 2
hfdx[j, j, i] = ((np.tile((x[:, i]).transpose(), (self.parameters['e'].shape[0], 1)) ** C).prod(axis=1) * val *
self.parameters['c']).sum(axis=0)
grad_hes_k = np.vectorize(PolynomialFunction.aux_hes_k, excluded=['i', 'j', 'x', 'hfdx'], otypes={object})
grad_hes_k(self, i=i, j=j, k=k, x=x, hfdx=hfdx)
def aux_hes_i(self, i, j, k, x, hfdx):
grad_hes_j = np.vectorize(PolynomialFunction.aux_hes_j, excluded=['i', 'k', 'x', 'hfdx'], otypes={object})
grad_hes_j(self, i=i, j=j, k=k, x=x, hfdx=hfdx)
def hessian(self, x):
"""Polynomial hessian evaluation.
Args:
x: A matrix with the evaluation points, the structure of the matrix should have the tuples in the
columns, so each column is an evaluation point
Returns:
hfdx: Returns a vector of matrices with the hessian matrix in each point (the index of the row where
the hessian is matches the index of the column of the evaluation point)
Example:
For the function ax² + bxy + cy²:
With x = [[1,2,3],[3,2,1]]
The gradient should be : [2ax + by, 2cy + bx]
So the hessian should be : [[2a,b],[b,2c]]
Returns:[[[2a,b],[b,2c]],[[2a,b],[b,2c]],[[2a,b],[b,2c]]]
For the function x³ + y³ + z³
With x = [[1],[2],[3]]
The gradient should be : [3x²,3y²,3z²]
So the hessian should be : [[6x,0,0],[0,6y,0],[0,0,6z]]
Returns: [[6,0,0],[0,12,0],[0,0,18]]
"""
# input check
self.input_check(x)
# hessian
rows, columns = x.shape
if self.parameters['c'].size < rows:
hfdx = np.zeros((rows, rows, columns))
else:
hfdx = np.zeros((rows, rows, columns))
auxi = np.arange(start=0, stop=columns, step=1)
auxj = np.arange(start=0, stop=rows, step=1)
auxk = np.arange(start=0, stop=rows, step=1)
hes_i_vec = np.vectorize(PolynomialFunction.aux_hes_i, excluded=['self', 'j', 'k', 'x', 'hfdx'],
otypes={object})
np.array(hes_i_vec(self, i=auxi, j=auxj, k=auxk, x=x, hfdx=hfdx))
return hfdx.transpose()
def input_check(self, x):
"""Check input dimension.
Args:
x: point to be evaluated.
Returns:
indicator: indicator if input os consistent
"""
# check if input is numpy
self.numpy_check(x)
# check dimension
x_dim = x.shape
param_dim = len(self.parameters['e'][0])
if len(x_dim) == 1:
raise Warning("x must be a {}xm (m>0) array!".format(param_dim))
if not x_dim[0] == param_dim:
raise Warning("x must be a {}xm array!".format(param_dim))
if not all(len(e) == param_dim for e in self.parameters['e']):
raise Warning("List of exponents must have the same dimension!")
|
science-optimization
|
/science_optimization-9.0.2-cp310-cp310-manylinux_2_35_x86_64.whl/science_optimization/function/polynomial_function.py
|
polynomial_function.py
|
import numpy as np
from .base_function import BaseFunction
class PolynomialFunction(BaseFunction):
"""
Class that implements a polynomial function
"""
_flag_num_g = False # this function uses analytical gradient
def __init__(self, exponents, coefficients):
"""The constructor for the polynomial function instance.
Args:
exponents: A matrix with the exponents of the function in order of the variables
for each element of the function
coefficients: A vector with the coefficients of each element of the function
Example:
For the function ax² + bxy + cy²:
exponents : [[2,0],[1,1],[0,2]]
coefficients : [a, b, c]
"""
# parameters check
self.numpy_check(exponents, coefficients)
self.parameters = {'e': exponents,
'c': coefficients}
@staticmethod
def aux_eval(f, i, x):
return ((np.tile((x[:, i]).transpose(), (f.parameters['e'].shape[0], 1)) ** f.parameters['e']).prod(axis=1)
* f.parameters['c']).sum(axis=0)
# TODO: explain this function
def aux_grad_j(self, i, j, x, dfdx):
C = np.copy(self.parameters['e'])
val = np.copy(C[:, j])
d = np.where(val > 0)
C[d, j] = C[d, j] - 1
dfdx[j, i] = ((np.tile((x[:, i]).transpose(), (self.parameters['e'].shape[0], 1)) ** C).prod(axis=1) * val *
self.parameters['c']).sum(axis=0)
# TODO: explain this function
def aux_grad_i(self, i, j, x, dfdx):
grad_j_vec = np.vectorize(PolynomialFunction.aux_grad_j, excluded=['self', 'i', 'x', 'dfdx'], otypes=[float])
grad_j_vec(self, i=i, j=j, x=x, dfdx=dfdx)
def dimension(self):
return len(self.parameters['e'][0])
def eval(self, x):
""" Polynomial function evaluation.
Args:
x: A matrix with the evaluation points, the structure of the matrix should have the tuples in the
columns, so each column is an evaluation point
Returns:
aux: Returns a vector with the evaluation value in each point (the index of the value matches the index
of the column of the evaluation point)
Example:
For the function ax² + bxy + cy²:
With x = [[1,2,3],[3,2,1]]
Returns: [a + 3b + 9c, 4a + 4b + 4c, 9a + 3b + c]
For the function x³ + y³ + z³
With x = [[1],[2],[3]]
Returns: [36]
"""
# input check
self.input_check(x)
# eval
num = x.shape[1]
fx = np.arange(start=0, stop=num, step=1)
eval_vec = np.vectorize(self.aux_eval, excluded=['f', 'x'])
fx = eval_vec(f=self, i=fx, x=x)
return fx
def gradient(self, x):
"""Polynomial gradient evaluation.
Args:
x: A matrix with the evaluation points, the structure of the matrix should have the tuples in the
columns, so each column is an evaluation point
Returns:
dfdx: Returns a matrix with the gradient vector in each point (the index of the row where the gradient is
matches the index of the column of the evaluation point)
Example:
For the function ax² + bxy + cy²:
With x = [[1,2,3],[3,2,1]]
The gradient should be : [2ax + by, 2cy + bx]
Returns:[[2a + 3b, 6c + b],[4a + 2b, 4c + 2b],[6a + b, 2c + 3b]]
For the function x³ + y³ + z³
With x = [[1],[2],[3]]
The gradient should be : [3x²,3y²,3z²]
Returns: [3, 12, 27]
"""
# input check
self.input_check(x)
# gradient
rows, columns = x.shape
if self.parameters['c'].size <= 1:
dfdx = np.zeros((rows, columns))
else:
dfdx = np.zeros((rows, columns))
auxi = np.arange(start=0, stop=columns, step=1)
auxj = np.arange(start=0, stop=rows, step=1)
grad_i_vec = \
np.vectorize(PolynomialFunction.aux_grad_i, excluded=['self', 'j', 'x', 'dfdx'], otypes={object})
np.array(grad_i_vec(self, i=auxi, j=auxj, x=x, dfdx=dfdx))
return dfdx
def aux_hes_k(self, i, j, k, x, hfdx):
C = np.copy(self.parameters['e'])
valj = np.copy(C[:, j])
d = np.where(valj > 0) # PolynomialFunction.indices(valj, lambda x: x > 0)
for a in d:
C[a, j] = C[a, j] - 1
valk = np.copy(C[:, k])
d = np.where(valk > 0) # PolynomialFunction.indices(valk, lambda x: x > 0)
for a in d:
C[a, k] = C[a, k] - 1
hfdx[j, k, i] = ((np.tile((x[:, i]).transpose(), (self.parameters['e'].shape[0], 1)) ** C).prod(
axis=1) * valj * valk * self.parameters['c']).sum(axis=0)
hfdx[k, j, i] = hfdx[j, k, i]
return hfdx
def aux_hes_j(self, i, j, k, x, hfdx):
C = np.copy(self.parameters['e'])
val = np.copy(C[:, j])
val = val * (val - 1)
d = np.where(val > 1) # PolynomialFunction.indices(val, lambda x: x > 1)
for a in d:
C[a, j] = C[a, j] - 2
hfdx[j, j, i] = ((np.tile((x[:, i]).transpose(), (self.parameters['e'].shape[0], 1)) ** C).prod(axis=1) * val *
self.parameters['c']).sum(axis=0)
grad_hes_k = np.vectorize(PolynomialFunction.aux_hes_k, excluded=['i', 'j', 'x', 'hfdx'], otypes={object})
grad_hes_k(self, i=i, j=j, k=k, x=x, hfdx=hfdx)
def aux_hes_i(self, i, j, k, x, hfdx):
grad_hes_j = np.vectorize(PolynomialFunction.aux_hes_j, excluded=['i', 'k', 'x', 'hfdx'], otypes={object})
grad_hes_j(self, i=i, j=j, k=k, x=x, hfdx=hfdx)
def hessian(self, x):
"""Polynomial hessian evaluation.
Args:
x: A matrix with the evaluation points, the structure of the matrix should have the tuples in the
columns, so each column is an evaluation point
Returns:
hfdx: Returns a vector of matrices with the hessian matrix in each point (the index of the row where
the hessian is matches the index of the column of the evaluation point)
Example:
For the function ax² + bxy + cy²:
With x = [[1,2,3],[3,2,1]]
The gradient should be : [2ax + by, 2cy + bx]
So the hessian should be : [[2a,b],[b,2c]]
Returns:[[[2a,b],[b,2c]],[[2a,b],[b,2c]],[[2a,b],[b,2c]]]
For the function x³ + y³ + z³
With x = [[1],[2],[3]]
The gradient should be : [3x²,3y²,3z²]
So the hessian should be : [[6x,0,0],[0,6y,0],[0,0,6z]]
Returns: [[6,0,0],[0,12,0],[0,0,18]]
"""
# input check
self.input_check(x)
# hessian
rows, columns = x.shape
if self.parameters['c'].size < rows:
hfdx = np.zeros((rows, rows, columns))
else:
hfdx = np.zeros((rows, rows, columns))
auxi = np.arange(start=0, stop=columns, step=1)
auxj = np.arange(start=0, stop=rows, step=1)
auxk = np.arange(start=0, stop=rows, step=1)
hes_i_vec = np.vectorize(PolynomialFunction.aux_hes_i, excluded=['self', 'j', 'k', 'x', 'hfdx'],
otypes={object})
np.array(hes_i_vec(self, i=auxi, j=auxj, k=auxk, x=x, hfdx=hfdx))
return hfdx.transpose()
def input_check(self, x):
"""Check input dimension.
Args:
x: point to be evaluated.
Returns:
indicator: indicator if input os consistent
"""
# check if input is numpy
self.numpy_check(x)
# check dimension
x_dim = x.shape
param_dim = len(self.parameters['e'][0])
if len(x_dim) == 1:
raise Warning("x must be a {}xm (m>0) array!".format(param_dim))
if not x_dim[0] == param_dim:
raise Warning("x must be a {}xm array!".format(param_dim))
if not all(len(e) == param_dim for e in self.parameters['e']):
raise Warning("List of exponents must have the same dimension!")
| 0.818845 | 0.755997 |
from .base_function import BaseFunction
class GenericFunction(BaseFunction):
"""Class to convert a python function to a BaseFunction instance."""
def __init__(self, func, n, grad_func=None):
"""Constructor of a generic function.
Args:
func : (callable) instance of a python function for function evaluation
n : (int) number of function arguments
grad_func: (callable) instance of a python function for gradient evaluation
"""
# check if object is a function
if not callable(func):
raise Warning("func must be callable.")
if grad_func is not None and not callable(grad_func):
raise Warning("grad_func must be callable.")
if grad_func is not None:
self.flag_num_g = False
# set parameters
self.parameters = {'func': func,
'n': n,
'grad_func': grad_func}
def dimension(self):
return self.parameters['n']
def eval(self, x):
"""Evaluates generic function
Args:
x: (numpy array) evaluation point.
Returns:
fx: (numpy array) function evaluation at point x.
"""
# input check
self.input_check(x)
# function evaluation
f = self.parameters['func']
fx = f(x)
return fx
def gradient(self, x):
"""Gradient of generic function
Args:
x: (numpy array) evaluation point.
Returns:
dfx: (numpy array) function evaluation at point x.
"""
# gradient evaluation
df = self.parameters['grad_func']
# input check
self.input_check(x)
if df is not None:
# evaluate
dfx = df(x)
# check dimension
if dfx.shape[0] != self.parameters['n']:
raise ValueError('Callable grad_func must return a {}xm array'.format(self.parameters['n']))
else:
dfx = self.numerical_gradient(x)
return dfx
def input_check(self, x):
"""Check input dimension.
Args:
x: (numpy array) point to be evaluated.
"""
# check if input is numpy
self.numpy_check(x)
if not x.shape[0] == self.parameters['n']:
raise Warning("Point x must have {} dimensions.".format(self.parameters['n']))
|
science-optimization
|
/science_optimization-9.0.2-cp310-cp310-manylinux_2_35_x86_64.whl/science_optimization/function/generic_function.py
|
generic_function.py
|
from .base_function import BaseFunction
class GenericFunction(BaseFunction):
"""Class to convert a python function to a BaseFunction instance."""
def __init__(self, func, n, grad_func=None):
"""Constructor of a generic function.
Args:
func : (callable) instance of a python function for function evaluation
n : (int) number of function arguments
grad_func: (callable) instance of a python function for gradient evaluation
"""
# check if object is a function
if not callable(func):
raise Warning("func must be callable.")
if grad_func is not None and not callable(grad_func):
raise Warning("grad_func must be callable.")
if grad_func is not None:
self.flag_num_g = False
# set parameters
self.parameters = {'func': func,
'n': n,
'grad_func': grad_func}
def dimension(self):
return self.parameters['n']
def eval(self, x):
"""Evaluates generic function
Args:
x: (numpy array) evaluation point.
Returns:
fx: (numpy array) function evaluation at point x.
"""
# input check
self.input_check(x)
# function evaluation
f = self.parameters['func']
fx = f(x)
return fx
def gradient(self, x):
"""Gradient of generic function
Args:
x: (numpy array) evaluation point.
Returns:
dfx: (numpy array) function evaluation at point x.
"""
# gradient evaluation
df = self.parameters['grad_func']
# input check
self.input_check(x)
if df is not None:
# evaluate
dfx = df(x)
# check dimension
if dfx.shape[0] != self.parameters['n']:
raise ValueError('Callable grad_func must return a {}xm array'.format(self.parameters['n']))
else:
dfx = self.numerical_gradient(x)
return dfx
def input_check(self, x):
"""Check input dimension.
Args:
x: (numpy array) point to be evaluated.
"""
# check if input is numpy
self.numpy_check(x)
if not x.shape[0] == self.parameters['n']:
raise Warning("Point x must have {} dimensions.".format(self.parameters['n']))
| 0.91501 | 0.437944 |
import numpy as np
import numpy.matlib
from .base_function import BaseFunction
class QuadraticFunction(BaseFunction):
"""
Class that implements a quadratic function
"""
_flag_num_g = False # this function uses analytical gradient
def __init__(self, Q, c, d=0):
""" Set parameters for x'Qx + c'x + d.
Args:
Q: quadratic coefficients of equations (n x n)-matrix
c: scaling n-vector coefficients of equations
d: constants of equations
"""
# parameters check
self.numpy_check(Q, c)
# set parameters
self.parameters = {'Q': Q,
'c': c,
'd': d}
def dimension(self):
return self.parameters['Q'].shape[0]
def eval(self, x):
""" Quadratic function evaluation.
Args:
x: evaluation point
Returns:
fx: evaluates the point value in the function
"""
# input check
self.input_check(x)
# define parameters
Q = self.parameters['Q']
c = self.parameters['c']
d = self.parameters['d']
# evaluates the point
fx = np.sum(x*(np.dot(Q, x)), axis=0) + np.dot(c.T, x) + d
return fx
def gradient(self, x):
"""Derivative relative to input.
Args:
x: evaluation point
Returns:
dfdx: derivative at evaluation points
"""
# input check
self.input_check(x)
# define parameters
Q = self.parameters['Q']
c = self.parameters['c']
# quadratic function gradient
dfdx = np.matlib.repmat(c, 1, x.shape[1])
dfdx = dfdx + np.dot((Q + Q.T), x)
return dfdx
def hessian(self, x):
"""Second derivative relative to input.
Args:
x: evaluation point
Returns:
hfdx: second derivative at evaluation points
"""
# input check
self.input_check(x)
# define parameters
Q = self.parameters['Q']
# quadratic function hessian
hfdx = np.tile(Q + Q.T, (x.shape[1], 1, 1))
return hfdx
def input_check(self, x):
"""Check input dimension.
Args:
x: point to be evaluated.
Returns:
indicator: indicator if input os consistent
"""
# check if input is numpy
self.numpy_check(x)
# check dimension
x_dim = x.shape
param_dim = self.parameters['Q'].shape[0]
if len(x_dim) == 1:
raise Warning("x must be a {}xm (m>0) array!".format(param_dim))
if not x_dim[0] == param_dim:
raise Warning("x must be a {}xm array!".format(param_dim))
|
science-optimization
|
/science_optimization-9.0.2-cp310-cp310-manylinux_2_35_x86_64.whl/science_optimization/function/quadratic_function.py
|
quadratic_function.py
|
import numpy as np
import numpy.matlib
from .base_function import BaseFunction
class QuadraticFunction(BaseFunction):
"""
Class that implements a quadratic function
"""
_flag_num_g = False # this function uses analytical gradient
def __init__(self, Q, c, d=0):
""" Set parameters for x'Qx + c'x + d.
Args:
Q: quadratic coefficients of equations (n x n)-matrix
c: scaling n-vector coefficients of equations
d: constants of equations
"""
# parameters check
self.numpy_check(Q, c)
# set parameters
self.parameters = {'Q': Q,
'c': c,
'd': d}
def dimension(self):
return self.parameters['Q'].shape[0]
def eval(self, x):
""" Quadratic function evaluation.
Args:
x: evaluation point
Returns:
fx: evaluates the point value in the function
"""
# input check
self.input_check(x)
# define parameters
Q = self.parameters['Q']
c = self.parameters['c']
d = self.parameters['d']
# evaluates the point
fx = np.sum(x*(np.dot(Q, x)), axis=0) + np.dot(c.T, x) + d
return fx
def gradient(self, x):
"""Derivative relative to input.
Args:
x: evaluation point
Returns:
dfdx: derivative at evaluation points
"""
# input check
self.input_check(x)
# define parameters
Q = self.parameters['Q']
c = self.parameters['c']
# quadratic function gradient
dfdx = np.matlib.repmat(c, 1, x.shape[1])
dfdx = dfdx + np.dot((Q + Q.T), x)
return dfdx
def hessian(self, x):
"""Second derivative relative to input.
Args:
x: evaluation point
Returns:
hfdx: second derivative at evaluation points
"""
# input check
self.input_check(x)
# define parameters
Q = self.parameters['Q']
# quadratic function hessian
hfdx = np.tile(Q + Q.T, (x.shape[1], 1, 1))
return hfdx
def input_check(self, x):
"""Check input dimension.
Args:
x: point to be evaluated.
Returns:
indicator: indicator if input os consistent
"""
# check if input is numpy
self.numpy_check(x)
# check dimension
x_dim = x.shape
param_dim = self.parameters['Q'].shape[0]
if len(x_dim) == 1:
raise Warning("x must be a {}xm (m>0) array!".format(param_dim))
if not x_dim[0] == param_dim:
raise Warning("x must be a {}xm array!".format(param_dim))
| 0.880964 | 0.632588 |
import numpy as np
from science_optimization.function import BaseFunction, LinearFunction, FunctionsComposite
class AugmentedLagrangeFunction(BaseFunction):
"""
Class that deals with the function used in the Augmented Lagrangian method
"""
eq_aux_func = None
ineq_aux_func = None
aux_rho = None
_flag_num_g = False # this function uses analytical gradient
def input_check(self, x):
"""Check input dimension.
Args:
x: (numpy array) point to be evaluated.
"""
# check if input is numpy
self.numpy_check(x)
if not x.shape[0] == self.dimension():
raise Warning("Point x must have {} dimensions.".format(self.parameters['n']))
def eval(self, x):
"""
Args:
x:
Returns:
"""
if self.ineq_aux_func is not None:
aux_max = self.ineq_aux_func.eval(x=x)
aux_max[aux_max < 0] = 0
ineq_part = 0.5 * self.rho * sum(aux_max ** 2)
else:
ineq_part = 0
if self.eq_aux_func is not None:
eq_part = 0.5 * sum((self.aux_rho * (self.eq_aux_func * self.eq_aux_func)).eval(x=x))
else:
eq_part = 0
return self.f_obj.eval(x) + eq_part + ineq_part
def gradient(self, x):
if self.ineq_aux_func is not None:
aux_max = self.ineq_aux_func.eval(x=x)
aux_max[aux_max < 0] = 0
ineq_part = self.rho * np.dot(self.g.gradient(x), aux_max)
else:
ineq_part = 0
if self.eq_aux_func is not None:
eq_part = self.rho * np.dot(self.h.gradient(x), self.eq_aux_func.eval(x))
else:
eq_part = 0
return self.f_obj.gradient(x) + eq_part + ineq_part
def hessian(self, x):
if self.ineq_aux_func is not None:
aux_grad = self.g.gradient(x)
aux_hess = self.g.hessian(x)
aux_max = self.ineq_aux_func.eval(x=x)
aux_max[aux_max < 0] = 0
ineq_part = np.zeros((self.dimension(), self.dimension()))
for i in range(self.g.n_functions):
if aux_max[i] > 0:
ineq_part += (
(aux_hess[i] * aux_max[i]) +
np.dot(aux_grad[0], aux_grad[0].transpose())
)
ineq_part = self.rho * ineq_part
else:
ineq_part = 0
if self.eq_aux_func is not None:
aux_grad = self.h.gradient(x)
aux_hess = self.h.hessian(x)
eq_part = np.zeros((self.dimension(), self.dimension()))
# TODO (Feres) tirar o for
for i in range(self.h.n_functions):
eq_part += (
(aux_hess[i] * self.eq_aux_func.eval(x)[i]) +
np.dot(aux_grad[0], aux_grad[0].transpose())
)
eq_part = self.rho * eq_part
else:
eq_part = 0
return self.f_obj.hessian(x) + eq_part + ineq_part
def dimension(self):
return self.f_obj.dimension()
def __init__(self, f_obj, g, h, rho, c):
"""
Initialize functions and multipliers properly
Args:
f_obj: (FunctionsComposite) objective function
g: (FunctionsComposite) inequality constraints
h: (FunctionsComposite) inequality constraints
rho: (float) initial rho value (penalty parameter)
c: (float) constant used to update rho value
"""
self.f_obj = f_obj
self.g = g
self.h = h
self.lag_eq = np.zeros((h.n_functions, 1)) # lagrangian multipliers (equality constraints)
self.lag_ineq = np.zeros((g.n_functions, 1)) # lagrangian multipliers (equality constraints)
self.rho = rho
self.c = c
self.update_aux_functions()
def update_aux_functions(self):
"""
Uses current multipliers and rho value to update auxiliary functions use to evaluate function
Returns:
"""
self.aux_rho = LinearFunction(c=np.zeros((self.dimension(), 1)), d=self.rho)
aux_lag_eq = FunctionsComposite()
for aux in self.lag_eq:
aux_lag_eq.add(LinearFunction(
c=np.zeros((self.dimension(), 1)), d=aux
))
aux_lag_ineq = FunctionsComposite()
for aux in self.lag_ineq:
aux_lag_ineq.add(LinearFunction(
c=np.zeros((self.dimension(), 1)), d=aux
))
if self.h.n_functions > 0:
self.eq_aux_func = (self.h + aux_lag_eq / self.aux_rho)
if self.g.n_functions > 0:
self.ineq_aux_func = (self.g + aux_lag_ineq / self.aux_rho)
def update_multipliers(self, x_new):
"""
Uses current point to update lagrange multipliers properly
Args:
x_new: (np array) new point found by the unconstrained optimization
Returns:
"""
h_val = self.h.eval(x_new)
self.lag_eq = self.lag_eq + self.rho * h_val
g_val = self.g.eval(x_new)
self.lag_ineq = self.lag_ineq + self.rho * g_val
self.lag_ineq[self.lag_ineq < 0] = 0
# TODO (Feres) adicionar condicional aqui
self.rho = self.c * self.rho
self.update_aux_functions()
|
science-optimization
|
/science_optimization-9.0.2-cp310-cp310-manylinux_2_35_x86_64.whl/science_optimization/function/lagrange_function.py
|
lagrange_function.py
|
import numpy as np
from science_optimization.function import BaseFunction, LinearFunction, FunctionsComposite
class AugmentedLagrangeFunction(BaseFunction):
"""
Class that deals with the function used in the Augmented Lagrangian method
"""
eq_aux_func = None
ineq_aux_func = None
aux_rho = None
_flag_num_g = False # this function uses analytical gradient
def input_check(self, x):
"""Check input dimension.
Args:
x: (numpy array) point to be evaluated.
"""
# check if input is numpy
self.numpy_check(x)
if not x.shape[0] == self.dimension():
raise Warning("Point x must have {} dimensions.".format(self.parameters['n']))
def eval(self, x):
"""
Args:
x:
Returns:
"""
if self.ineq_aux_func is not None:
aux_max = self.ineq_aux_func.eval(x=x)
aux_max[aux_max < 0] = 0
ineq_part = 0.5 * self.rho * sum(aux_max ** 2)
else:
ineq_part = 0
if self.eq_aux_func is not None:
eq_part = 0.5 * sum((self.aux_rho * (self.eq_aux_func * self.eq_aux_func)).eval(x=x))
else:
eq_part = 0
return self.f_obj.eval(x) + eq_part + ineq_part
def gradient(self, x):
if self.ineq_aux_func is not None:
aux_max = self.ineq_aux_func.eval(x=x)
aux_max[aux_max < 0] = 0
ineq_part = self.rho * np.dot(self.g.gradient(x), aux_max)
else:
ineq_part = 0
if self.eq_aux_func is not None:
eq_part = self.rho * np.dot(self.h.gradient(x), self.eq_aux_func.eval(x))
else:
eq_part = 0
return self.f_obj.gradient(x) + eq_part + ineq_part
def hessian(self, x):
if self.ineq_aux_func is not None:
aux_grad = self.g.gradient(x)
aux_hess = self.g.hessian(x)
aux_max = self.ineq_aux_func.eval(x=x)
aux_max[aux_max < 0] = 0
ineq_part = np.zeros((self.dimension(), self.dimension()))
for i in range(self.g.n_functions):
if aux_max[i] > 0:
ineq_part += (
(aux_hess[i] * aux_max[i]) +
np.dot(aux_grad[0], aux_grad[0].transpose())
)
ineq_part = self.rho * ineq_part
else:
ineq_part = 0
if self.eq_aux_func is not None:
aux_grad = self.h.gradient(x)
aux_hess = self.h.hessian(x)
eq_part = np.zeros((self.dimension(), self.dimension()))
# TODO (Feres) tirar o for
for i in range(self.h.n_functions):
eq_part += (
(aux_hess[i] * self.eq_aux_func.eval(x)[i]) +
np.dot(aux_grad[0], aux_grad[0].transpose())
)
eq_part = self.rho * eq_part
else:
eq_part = 0
return self.f_obj.hessian(x) + eq_part + ineq_part
def dimension(self):
return self.f_obj.dimension()
def __init__(self, f_obj, g, h, rho, c):
"""
Initialize functions and multipliers properly
Args:
f_obj: (FunctionsComposite) objective function
g: (FunctionsComposite) inequality constraints
h: (FunctionsComposite) inequality constraints
rho: (float) initial rho value (penalty parameter)
c: (float) constant used to update rho value
"""
self.f_obj = f_obj
self.g = g
self.h = h
self.lag_eq = np.zeros((h.n_functions, 1)) # lagrangian multipliers (equality constraints)
self.lag_ineq = np.zeros((g.n_functions, 1)) # lagrangian multipliers (equality constraints)
self.rho = rho
self.c = c
self.update_aux_functions()
def update_aux_functions(self):
"""
Uses current multipliers and rho value to update auxiliary functions use to evaluate function
Returns:
"""
self.aux_rho = LinearFunction(c=np.zeros((self.dimension(), 1)), d=self.rho)
aux_lag_eq = FunctionsComposite()
for aux in self.lag_eq:
aux_lag_eq.add(LinearFunction(
c=np.zeros((self.dimension(), 1)), d=aux
))
aux_lag_ineq = FunctionsComposite()
for aux in self.lag_ineq:
aux_lag_ineq.add(LinearFunction(
c=np.zeros((self.dimension(), 1)), d=aux
))
if self.h.n_functions > 0:
self.eq_aux_func = (self.h + aux_lag_eq / self.aux_rho)
if self.g.n_functions > 0:
self.ineq_aux_func = (self.g + aux_lag_ineq / self.aux_rho)
def update_multipliers(self, x_new):
"""
Uses current point to update lagrange multipliers properly
Args:
x_new: (np array) new point found by the unconstrained optimization
Returns:
"""
h_val = self.h.eval(x_new)
self.lag_eq = self.lag_eq + self.rho * h_val
g_val = self.g.eval(x_new)
self.lag_ineq = self.lag_ineq + self.rho * g_val
self.lag_ineq[self.lag_ineq < 0] = 0
# TODO (Feres) adicionar condicional aqui
self.rho = self.c * self.rho
self.update_aux_functions()
| 0.758421 | 0.443721 |
import numpy as np
import numpy.matlib
from .base_function import BaseFunction
class LinearFunction(BaseFunction):
"""
Class that implements a linear function
"""
_flag_num_g = False # this function uses analytical gradient
def parameter_check(self, c: np.ndarray, d):
# checking c parameter
self.numpy_check(c)
if len(c.shape) != 2 or c.shape[1] != 1:
raise Exception("Invalid format for 'c' parameter")
# checking d parameter
try:
int(d)
except (ValueError, TypeError):
raise Exception("'d' parameter must be a valid number")
def __init__(self, c, d=0):
""" Linear Function constructor: c'x + d.
Args:
c: scaling n-vector coefficients of equations
d: constants of equations
"""
self.parameter_check(c, d)
# set parameters
self.parameters = {'c': c,
'd': d}
def dimension(self):
"""Linear problem dimension."""
return self.parameters['c'].shape[0]
def eval(self, x):
""" Linear function evaluation.
Args:
x: evaluation point
Returns:
fx: evaluates the point value in the function
"""
# input check
self.input_check(x)
# define parameters
c = self.parameters['c']
d = self.parameters['d']
# evaluates the point
fx = np.dot(c.T, x) + d
return fx
def gradient(self, x):
"""Derivative relative to input.
Args:
x: evaluation point
Returns:
dfdx: derivative at evaluation points
"""
# input check
self.input_check(x)
# define parameters
c = self.parameters['c']
# linear function gradient
dim = x.shape
if len(dim) == 1:
dfdx = c
else:
dfdx = np.matlib.repmat(c, 1, dim[1])
return dfdx
def hessian(self, x):
"""Second derivative relative to input.
Args:
x: evaluation point
Returns:
hfdx: second derivative at evaluation points
"""
# input check
self.input_check(x)
# linear function hessian
dim = x.shape
input_dimension = dim[0]
if len(dim) == 1:
input_number = 1
else:
input_number = dim[1]
hfdx = np.zeros((input_number, input_dimension, input_dimension))
return hfdx
def input_check(self, x):
"""Check input dimension.
Args:
x: point to be evaluated.
Returns:
indicator: indicator if input os consistent
"""
# check if input is numpy
self.numpy_check(x)
# check dimension
x_dim = x.shape
param_dim = self.parameters['c'].shape[0]
if len(x_dim) == 1:
raise Warning("x must be a {}xm (m>0) array!".format(param_dim))
if not x_dim[0] == param_dim:
raise Warning("x must be a {}xm array!".format(param_dim))
|
science-optimization
|
/science_optimization-9.0.2-cp310-cp310-manylinux_2_35_x86_64.whl/science_optimization/function/linear_function.py
|
linear_function.py
|
import numpy as np
import numpy.matlib
from .base_function import BaseFunction
class LinearFunction(BaseFunction):
"""
Class that implements a linear function
"""
_flag_num_g = False # this function uses analytical gradient
def parameter_check(self, c: np.ndarray, d):
# checking c parameter
self.numpy_check(c)
if len(c.shape) != 2 or c.shape[1] != 1:
raise Exception("Invalid format for 'c' parameter")
# checking d parameter
try:
int(d)
except (ValueError, TypeError):
raise Exception("'d' parameter must be a valid number")
def __init__(self, c, d=0):
""" Linear Function constructor: c'x + d.
Args:
c: scaling n-vector coefficients of equations
d: constants of equations
"""
self.parameter_check(c, d)
# set parameters
self.parameters = {'c': c,
'd': d}
def dimension(self):
"""Linear problem dimension."""
return self.parameters['c'].shape[0]
def eval(self, x):
""" Linear function evaluation.
Args:
x: evaluation point
Returns:
fx: evaluates the point value in the function
"""
# input check
self.input_check(x)
# define parameters
c = self.parameters['c']
d = self.parameters['d']
# evaluates the point
fx = np.dot(c.T, x) + d
return fx
def gradient(self, x):
"""Derivative relative to input.
Args:
x: evaluation point
Returns:
dfdx: derivative at evaluation points
"""
# input check
self.input_check(x)
# define parameters
c = self.parameters['c']
# linear function gradient
dim = x.shape
if len(dim) == 1:
dfdx = c
else:
dfdx = np.matlib.repmat(c, 1, dim[1])
return dfdx
def hessian(self, x):
"""Second derivative relative to input.
Args:
x: evaluation point
Returns:
hfdx: second derivative at evaluation points
"""
# input check
self.input_check(x)
# linear function hessian
dim = x.shape
input_dimension = dim[0]
if len(dim) == 1:
input_number = 1
else:
input_number = dim[1]
hfdx = np.zeros((input_number, input_dimension, input_dimension))
return hfdx
def input_check(self, x):
"""Check input dimension.
Args:
x: point to be evaluated.
Returns:
indicator: indicator if input os consistent
"""
# check if input is numpy
self.numpy_check(x)
# check dimension
x_dim = x.shape
param_dim = self.parameters['c'].shape[0]
if len(x_dim) == 1:
raise Warning("x must be a {}xm (m>0) array!".format(param_dim))
if not x_dim[0] == param_dim:
raise Warning("x must be a {}xm array!".format(param_dim))
| 0.861188 | 0.573081 |
import numpy as np
from science_optimization.builder import BuilderOptimizationProblem, Objective, Variable, Constraint
from science_optimization.function import BaseFunction, FunctionsComposite
class RosenSuzukiProblem(BuilderOptimizationProblem):
"""Concrete builder implementation.
This class builds the Rosen-Suzuki problem.
"""
def build_objectives(self):
obj_fun = FunctionsComposite()
obj_fun.add(RosenSuzukiFunction(self.n, self.Q0, self.c))
objective = Objective(objective=obj_fun)
return objective
def build_variables(self):
variables = Variable(x_min=self.x_min, x_max=self.x_max)
return variables
def build_constraints(self):
constraints = Constraint(eq_cons=FunctionsComposite(), ineq_cons=RosenSuzukiConstraints(self.n, self.b))
return constraints
def __init__(self, n):
"""
Constructor of Rosen-Suzuki optimization problem.
Args:
n: desired dimension
"""
# Step 1
self.n = n
x_star = []
u_star = []
for i in range(1, self.n):
x_star.append((-1) ** i)
u_star.append((-1) ** i + 1)
x_star.append((-1) ** self.n)
self.x_star = np.array(x_star).reshape((-1, 1))
self.u_star = np.array(u_star).reshape((-1, 1))
self.x_min = np.ones((self.n, 1)) * (-5)
self.x_max = np.ones((self.n, 1)) * 5
# Step 2
mdg = []
b = []
for j in range(1, self.n):
v = []
a = []
for i in range(1, self.n+1):
v.append(2 - (-1) ** (i + j))
a.append(1 + (-1) ** j + (-1) ** i)
a = np.array(a).reshape((-1, 1))
v = np.array(v).reshape((-1, 1))
Q = np.diag(v.transpose()[0])
g_now = np.dot(
np.dot(self.x_star.transpose(), Q), self.x_star
) + np.dot(a.transpose(), self.x_star)
mdg.append(2*np.dot(Q, self.x_star) + a)
if self.u_star[j-1] > 0:
b.append(-g_now)
else:
b.append(-g_now - 1)
self.b = np.array(b).reshape((-1, 1))
mdg = np.array(mdg).transpose()[0]
# Step 3
v = []
for i in range(1, self.n + 1):
v.append(2 - (-1) ** i)
v = np.array(v).reshape((-1, 1))
self.Q0 = np.diag(v.transpose()[0])
df = 2 * np.dot(self.Q0, self.x_star)
self.c = -df - np.dot(mdg, self.u_star)
class RosenSuzukiFunction(BaseFunction):
"""
Rosen-Suzuki objective function
"""
n = None
def __init__(self, n, Q0, c):
# Step 1
self.n = n
self.Q0 = Q0
self.c = c
def dimension(self):
return self.n
def eval(self, x: np.ndarray):
self.input_check(x)
return np.dot(np.dot(x.transpose(), self.Q0), x) + np.dot(self.c.transpose(), x)
def gradient(self, x: np.ndarray):
self.input_check(x)
return 2 * np.dot(self.Q0, x) + self.c
def input_check(self, x):
# check if input is numpy
self.numpy_check(x)
if not x.shape[0] == self.dimension():
raise Warning("Point x must have {} dimensions.".format(self.parameters['n']))
class RosenSuzukiConstraints(FunctionsComposite):
"""
Rosen-Suzuki constraints
"""
def __init__(self, n, b):
super().__init__()
self.n = n
self.n_functions = n-1
self.b = b
def dimension(self):
return self.n
def eval(self, x, idx=None, composition="parallel", weights=None):
# input check
idx, composition, weights, n_functions = self.input_check(idx=idx,
composition=composition,
weights=weights)
g = []
# evaluate
for j in range(1, self.n_functions+1):
v = []
a = []
for i in range(1, self.n+1):
v.append(2 - (-1) ** (i + j))
a.append(1 + (-1) ** j + (-1) ** i)
a = np.array(a).reshape((-1, 1))
v = np.array(v).reshape((-1, 1))
Q = np.diag(v.transpose()[0])
g.append(
np.dot(np.dot(x.transpose(), Q), x)[0] + np.dot(a.transpose(), x)[0] + self.b[j-1]
)
g_return = np.array(g).reshape((-1, 1))
# series composition
if composition == "series":
g_return = np.dot(weights, g_return)
return g_return
def gradient(self, x, idx=None, composition="parallel", weights=None):
# input check
idx, composition, weights, n_functions = self.input_check(idx=idx,
composition=composition,
weights=weights)
mdg = []
# evaluate
for j in range(1, self.n_functions+1):
v = []
a = []
for i in range(1, self.n+1):
v.append(2 - (-1) ** (i + j))
a.append(1 + (-1) ** j + (-1) ** i)
a = np.array(a).reshape((-1, 1))
v = np.array(v).reshape((-1, 1))
Q = np.diag(v.transpose()[0])
mdg.append(2 * np.dot(Q, x) + a)
j_matrix = np.array(mdg).transpose()[0] # jacobian (gradient of each constraint)
# series composition
if composition == "series":
j_matrix = np.dot(weights, j_matrix)
return j_matrix
def hessian(self, x, idx=None, composition="parallel", weights=None):
# TODO (Feres) implement hessian analytical calculus
raise Exception('Not implemented calculus')
|
science-optimization
|
/science_optimization-9.0.2-cp310-cp310-manylinux_2_35_x86_64.whl/science_optimization/problems/rosen_suzuki.py
|
rosen_suzuki.py
|
import numpy as np
from science_optimization.builder import BuilderOptimizationProblem, Objective, Variable, Constraint
from science_optimization.function import BaseFunction, FunctionsComposite
class RosenSuzukiProblem(BuilderOptimizationProblem):
"""Concrete builder implementation.
This class builds the Rosen-Suzuki problem.
"""
def build_objectives(self):
obj_fun = FunctionsComposite()
obj_fun.add(RosenSuzukiFunction(self.n, self.Q0, self.c))
objective = Objective(objective=obj_fun)
return objective
def build_variables(self):
variables = Variable(x_min=self.x_min, x_max=self.x_max)
return variables
def build_constraints(self):
constraints = Constraint(eq_cons=FunctionsComposite(), ineq_cons=RosenSuzukiConstraints(self.n, self.b))
return constraints
def __init__(self, n):
"""
Constructor of Rosen-Suzuki optimization problem.
Args:
n: desired dimension
"""
# Step 1
self.n = n
x_star = []
u_star = []
for i in range(1, self.n):
x_star.append((-1) ** i)
u_star.append((-1) ** i + 1)
x_star.append((-1) ** self.n)
self.x_star = np.array(x_star).reshape((-1, 1))
self.u_star = np.array(u_star).reshape((-1, 1))
self.x_min = np.ones((self.n, 1)) * (-5)
self.x_max = np.ones((self.n, 1)) * 5
# Step 2
mdg = []
b = []
for j in range(1, self.n):
v = []
a = []
for i in range(1, self.n+1):
v.append(2 - (-1) ** (i + j))
a.append(1 + (-1) ** j + (-1) ** i)
a = np.array(a).reshape((-1, 1))
v = np.array(v).reshape((-1, 1))
Q = np.diag(v.transpose()[0])
g_now = np.dot(
np.dot(self.x_star.transpose(), Q), self.x_star
) + np.dot(a.transpose(), self.x_star)
mdg.append(2*np.dot(Q, self.x_star) + a)
if self.u_star[j-1] > 0:
b.append(-g_now)
else:
b.append(-g_now - 1)
self.b = np.array(b).reshape((-1, 1))
mdg = np.array(mdg).transpose()[0]
# Step 3
v = []
for i in range(1, self.n + 1):
v.append(2 - (-1) ** i)
v = np.array(v).reshape((-1, 1))
self.Q0 = np.diag(v.transpose()[0])
df = 2 * np.dot(self.Q0, self.x_star)
self.c = -df - np.dot(mdg, self.u_star)
class RosenSuzukiFunction(BaseFunction):
"""
Rosen-Suzuki objective function
"""
n = None
def __init__(self, n, Q0, c):
# Step 1
self.n = n
self.Q0 = Q0
self.c = c
def dimension(self):
return self.n
def eval(self, x: np.ndarray):
self.input_check(x)
return np.dot(np.dot(x.transpose(), self.Q0), x) + np.dot(self.c.transpose(), x)
def gradient(self, x: np.ndarray):
self.input_check(x)
return 2 * np.dot(self.Q0, x) + self.c
def input_check(self, x):
# check if input is numpy
self.numpy_check(x)
if not x.shape[0] == self.dimension():
raise Warning("Point x must have {} dimensions.".format(self.parameters['n']))
class RosenSuzukiConstraints(FunctionsComposite):
"""
Rosen-Suzuki constraints
"""
def __init__(self, n, b):
super().__init__()
self.n = n
self.n_functions = n-1
self.b = b
def dimension(self):
return self.n
def eval(self, x, idx=None, composition="parallel", weights=None):
# input check
idx, composition, weights, n_functions = self.input_check(idx=idx,
composition=composition,
weights=weights)
g = []
# evaluate
for j in range(1, self.n_functions+1):
v = []
a = []
for i in range(1, self.n+1):
v.append(2 - (-1) ** (i + j))
a.append(1 + (-1) ** j + (-1) ** i)
a = np.array(a).reshape((-1, 1))
v = np.array(v).reshape((-1, 1))
Q = np.diag(v.transpose()[0])
g.append(
np.dot(np.dot(x.transpose(), Q), x)[0] + np.dot(a.transpose(), x)[0] + self.b[j-1]
)
g_return = np.array(g).reshape((-1, 1))
# series composition
if composition == "series":
g_return = np.dot(weights, g_return)
return g_return
def gradient(self, x, idx=None, composition="parallel", weights=None):
# input check
idx, composition, weights, n_functions = self.input_check(idx=idx,
composition=composition,
weights=weights)
mdg = []
# evaluate
for j in range(1, self.n_functions+1):
v = []
a = []
for i in range(1, self.n+1):
v.append(2 - (-1) ** (i + j))
a.append(1 + (-1) ** j + (-1) ** i)
a = np.array(a).reshape((-1, 1))
v = np.array(v).reshape((-1, 1))
Q = np.diag(v.transpose()[0])
mdg.append(2 * np.dot(Q, x) + a)
j_matrix = np.array(mdg).transpose()[0] # jacobian (gradient of each constraint)
# series composition
if composition == "series":
j_matrix = np.dot(weights, j_matrix)
return j_matrix
def hessian(self, x, idx=None, composition="parallel", weights=None):
# TODO (Feres) implement hessian analytical calculus
raise Exception('Not implemented calculus')
| 0.760517 | 0.523177 |
from science_optimization.builder import BuilderOptimizationProblem
from science_optimization.builder import Objective
from science_optimization.builder import Variable
from science_optimization.builder import Constraint
from science_optimization.function import FunctionsComposite, LinearFunction
import numpy as np
from typing import List
class MIP(BuilderOptimizationProblem):
"""This class builds a mixed integer linear problem."""
# objective function(s)
_c = None
# inequality constraint matrix
_A = None
# inequality constraint vector
_b = None
# the variables' bounds
_x_bounds = None
# variables' type
_x_type = None
# equality constraint matrix
_Aeq = None
# equality constraint vector
_beq = None
def __init__(self,
c: np.ndarray,
A: np.ndarray,
b: np.ndarray,
x_bounds: np.ndarray=None,
x_type: List[str]=None,
Aeq: np.ndarray=None,
beq: np.ndarray=None):
"""Constructor of a generic mixed-integer linear problem.
min c' @ x
st. A @ x <= b
Aeq @ x == beq
x_min <= x <= x_max
Args:
c : (np.ndarray) (n x 1)-objective function coefficients.
A : (np.ndarray) (m1 x n)-inequality linear constraints matrix.
b : (np.ndarray) (m1 x 1)-inequality linear constraints bounds.
x_bounds: (np.ndarray) (n x 2)-lower bound and upper bounds.
x_type : (List[str]) variables' types ('c' or 'd').
Aeq : (m2 x n)-equality linear constraints matrix.
beq : (m2 x 1)-equality linear constraints bounds.
"""
# set parameters
self.c = c
self.A = A
self.b = b
self.x_bounds = x_bounds
self.x_type = x_type
self.Aeq = Aeq
self.beq = beq
# getters
@property
def c(self):
return self._c
@property
def A(self):
return self._A
@property
def b(self):
return self._b
@property
def Aeq(self):
return self._Aeq
@property
def beq(self):
return self._beq
@property
def x_bounds(self):
return self._x_bounds
@property
def x_type(self):
return self._x_type
# setters
@c.setter
def c(self, value):
self._c = value
@A.setter
def A(self, value):
self._A = value
@b.setter
def b(self, value):
self._b = value
@x_bounds.setter
def x_bounds(self, value):
self._x_bounds = value
@x_type.setter
def x_type(self, value):
self._x_type = value
@Aeq.setter
def Aeq(self, value):
self._Aeq = value
@beq.setter
def beq(self, value):
self._beq = value
def build_objectives(self):
# cardinalities
m, n = self.c.shape
# composition
obj_fun = FunctionsComposite()
# mono-objective problem
if (m > 1 and n == 1) or (m == 1 and n > 1):
# add to function composition
obj_fun.add(LinearFunction(c=self.c.reshape(-1, 1)))
elif m >= 1 and n >= 1:
for i in range(m):
# add to function composition
obj_fun.add(LinearFunction(c=self.c[i, :].reshape(-1, 1)))
else:
raise ValueError("({}x{})-array not supported!".format(m, n))
objective = Objective(objective=obj_fun)
return objective
def build_constraints(self):
# cardinalities
mi = self.A.shape[0]
me = self.Aeq.shape[0] if self.Aeq is not None else 0
# create object
ineq_cons = FunctionsComposite()
eq_cons = FunctionsComposite()
# add linear inequality functions
for i in range(mi):
ineq_cons.add(LinearFunction(c=self.A[i, :].reshape(-1, 1), d=-self.b[i, 0]))
# add linear equality functions
for i in range(me):
eq_cons.add(LinearFunction(c=self.Aeq[i, :].reshape(-1, 1), d=-self.beq[i, 0]))
# set constraints
constraints = Constraint(eq_cons=eq_cons, ineq_cons=ineq_cons)
return constraints
def build_variables(self):
# default unbounded variables
if self.x_bounds is None:
self.x_bounds = np.ones((self.c.shape[0], 2))
self.x_bounds[:, 0] = -np.inf
self.x_bounds[:, 1] = np.inf
# create variables
variables = Variable(x_min=self.x_bounds[:, 0].reshape(-1, 1),
x_max=self.x_bounds[:, 1].reshape(-1, 1),
x_type=self.x_type)
return variables
|
science-optimization
|
/science_optimization-9.0.2-cp310-cp310-manylinux_2_35_x86_64.whl/science_optimization/problems/mip.py
|
mip.py
|
from science_optimization.builder import BuilderOptimizationProblem
from science_optimization.builder import Objective
from science_optimization.builder import Variable
from science_optimization.builder import Constraint
from science_optimization.function import FunctionsComposite, LinearFunction
import numpy as np
from typing import List
class MIP(BuilderOptimizationProblem):
"""This class builds a mixed integer linear problem."""
# objective function(s)
_c = None
# inequality constraint matrix
_A = None
# inequality constraint vector
_b = None
# the variables' bounds
_x_bounds = None
# variables' type
_x_type = None
# equality constraint matrix
_Aeq = None
# equality constraint vector
_beq = None
def __init__(self,
c: np.ndarray,
A: np.ndarray,
b: np.ndarray,
x_bounds: np.ndarray=None,
x_type: List[str]=None,
Aeq: np.ndarray=None,
beq: np.ndarray=None):
"""Constructor of a generic mixed-integer linear problem.
min c' @ x
st. A @ x <= b
Aeq @ x == beq
x_min <= x <= x_max
Args:
c : (np.ndarray) (n x 1)-objective function coefficients.
A : (np.ndarray) (m1 x n)-inequality linear constraints matrix.
b : (np.ndarray) (m1 x 1)-inequality linear constraints bounds.
x_bounds: (np.ndarray) (n x 2)-lower bound and upper bounds.
x_type : (List[str]) variables' types ('c' or 'd').
Aeq : (m2 x n)-equality linear constraints matrix.
beq : (m2 x 1)-equality linear constraints bounds.
"""
# set parameters
self.c = c
self.A = A
self.b = b
self.x_bounds = x_bounds
self.x_type = x_type
self.Aeq = Aeq
self.beq = beq
# getters
@property
def c(self):
return self._c
@property
def A(self):
return self._A
@property
def b(self):
return self._b
@property
def Aeq(self):
return self._Aeq
@property
def beq(self):
return self._beq
@property
def x_bounds(self):
return self._x_bounds
@property
def x_type(self):
return self._x_type
# setters
@c.setter
def c(self, value):
self._c = value
@A.setter
def A(self, value):
self._A = value
@b.setter
def b(self, value):
self._b = value
@x_bounds.setter
def x_bounds(self, value):
self._x_bounds = value
@x_type.setter
def x_type(self, value):
self._x_type = value
@Aeq.setter
def Aeq(self, value):
self._Aeq = value
@beq.setter
def beq(self, value):
self._beq = value
def build_objectives(self):
# cardinalities
m, n = self.c.shape
# composition
obj_fun = FunctionsComposite()
# mono-objective problem
if (m > 1 and n == 1) or (m == 1 and n > 1):
# add to function composition
obj_fun.add(LinearFunction(c=self.c.reshape(-1, 1)))
elif m >= 1 and n >= 1:
for i in range(m):
# add to function composition
obj_fun.add(LinearFunction(c=self.c[i, :].reshape(-1, 1)))
else:
raise ValueError("({}x{})-array not supported!".format(m, n))
objective = Objective(objective=obj_fun)
return objective
def build_constraints(self):
# cardinalities
mi = self.A.shape[0]
me = self.Aeq.shape[0] if self.Aeq is not None else 0
# create object
ineq_cons = FunctionsComposite()
eq_cons = FunctionsComposite()
# add linear inequality functions
for i in range(mi):
ineq_cons.add(LinearFunction(c=self.A[i, :].reshape(-1, 1), d=-self.b[i, 0]))
# add linear equality functions
for i in range(me):
eq_cons.add(LinearFunction(c=self.Aeq[i, :].reshape(-1, 1), d=-self.beq[i, 0]))
# set constraints
constraints = Constraint(eq_cons=eq_cons, ineq_cons=ineq_cons)
return constraints
def build_variables(self):
# default unbounded variables
if self.x_bounds is None:
self.x_bounds = np.ones((self.c.shape[0], 2))
self.x_bounds[:, 0] = -np.inf
self.x_bounds[:, 1] = np.inf
# create variables
variables = Variable(x_min=self.x_bounds[:, 0].reshape(-1, 1),
x_max=self.x_bounds[:, 1].reshape(-1, 1),
x_type=self.x_type)
return variables
| 0.95202 | 0.64058 |
from science_optimization.builder import BuilderOptimizationProblem
from science_optimization.builder import Objective
from science_optimization.builder import Variable
from science_optimization.builder import Constraint
from science_optimization.function import FunctionsComposite
class SeparableResourceAllocation(BuilderOptimizationProblem):
"""Concrete builder implementation.
This class builds a dual decomposition optimization problem.
"""
# objective function(s)
_f_i = None
# equality constraint function(s)
_coupling_eq_constraints = None
# inequality constraint function(s)
_coupling_ineq_constraints = None
# the variables' bounds
_x_bounds = None
def __init__(self, f_i, coupling_eq_constraints, coupling_ineq_constraints, x_bounds):
"""Constructor of a Dual Decomposition problem builder.
Args:
f_i : Objective functions composition with i individual functions.
coupling_eq_constraints : Composition with functions in equality coupling.
coupling_ineq_constraints: Composition with functions in inequality coupling.
x_bounds : Lower bound and upper bounds.
"""
self.f_i = f_i
self.coupling_eq_constraints = coupling_eq_constraints
self.coupling_ineq_constraints = coupling_ineq_constraints
self.x_bounds = x_bounds
# gets
@property
def f_i(self):
return self._f_i
@property
def coupling_eq_constraints(self):
return self._coupling_eq_constraints
@property
def coupling_ineq_constraints(self):
return self._coupling_ineq_constraints
@property
def x_bounds(self):
return self._x_bounds
@f_i.setter
def f_i(self, value):
self._f_i = value
# sets
@coupling_eq_constraints.setter
def coupling_eq_constraints(self, value):
self._coupling_eq_constraints = value
@coupling_ineq_constraints.setter
def coupling_ineq_constraints(self, value):
self._coupling_ineq_constraints = value
@x_bounds.setter
def x_bounds(self, value):
self._x_bounds = value
# methods
def build_objectives(self):
# instantiate composition
obj_fun = FunctionsComposite()
for f in self.f_i:
obj_fun.add(f)
objective = Objective(objective=obj_fun)
return objective
def build_constraints(self):
# instantiate composition
eq_cons = FunctionsComposite()
ineq_cons = FunctionsComposite()
for eq_g in self.coupling_eq_constraints:
eq_cons.add(eq_g)
for ineq_g in self.coupling_ineq_constraints:
ineq_cons.add(ineq_g)
constraints = Constraint(eq_cons=eq_cons, ineq_cons=ineq_cons)
return constraints
def build_variables(self):
# variables
variables = Variable(x_min=self.x_bounds[:, 0].reshape(-1, 1),
x_max=self.x_bounds[:, 1].reshape(-1, 1))
return variables
|
science-optimization
|
/science_optimization-9.0.2-cp310-cp310-manylinux_2_35_x86_64.whl/science_optimization/problems/separable_resource_allocation.py
|
separable_resource_allocation.py
|
from science_optimization.builder import BuilderOptimizationProblem
from science_optimization.builder import Objective
from science_optimization.builder import Variable
from science_optimization.builder import Constraint
from science_optimization.function import FunctionsComposite
class SeparableResourceAllocation(BuilderOptimizationProblem):
"""Concrete builder implementation.
This class builds a dual decomposition optimization problem.
"""
# objective function(s)
_f_i = None
# equality constraint function(s)
_coupling_eq_constraints = None
# inequality constraint function(s)
_coupling_ineq_constraints = None
# the variables' bounds
_x_bounds = None
def __init__(self, f_i, coupling_eq_constraints, coupling_ineq_constraints, x_bounds):
"""Constructor of a Dual Decomposition problem builder.
Args:
f_i : Objective functions composition with i individual functions.
coupling_eq_constraints : Composition with functions in equality coupling.
coupling_ineq_constraints: Composition with functions in inequality coupling.
x_bounds : Lower bound and upper bounds.
"""
self.f_i = f_i
self.coupling_eq_constraints = coupling_eq_constraints
self.coupling_ineq_constraints = coupling_ineq_constraints
self.x_bounds = x_bounds
# gets
@property
def f_i(self):
return self._f_i
@property
def coupling_eq_constraints(self):
return self._coupling_eq_constraints
@property
def coupling_ineq_constraints(self):
return self._coupling_ineq_constraints
@property
def x_bounds(self):
return self._x_bounds
@f_i.setter
def f_i(self, value):
self._f_i = value
# sets
@coupling_eq_constraints.setter
def coupling_eq_constraints(self, value):
self._coupling_eq_constraints = value
@coupling_ineq_constraints.setter
def coupling_ineq_constraints(self, value):
self._coupling_ineq_constraints = value
@x_bounds.setter
def x_bounds(self, value):
self._x_bounds = value
# methods
def build_objectives(self):
# instantiate composition
obj_fun = FunctionsComposite()
for f in self.f_i:
obj_fun.add(f)
objective = Objective(objective=obj_fun)
return objective
def build_constraints(self):
# instantiate composition
eq_cons = FunctionsComposite()
ineq_cons = FunctionsComposite()
for eq_g in self.coupling_eq_constraints:
eq_cons.add(eq_g)
for ineq_g in self.coupling_ineq_constraints:
ineq_cons.add(ineq_g)
constraints = Constraint(eq_cons=eq_cons, ineq_cons=ineq_cons)
return constraints
def build_variables(self):
# variables
variables = Variable(x_min=self.x_bounds[:, 0].reshape(-1, 1),
x_max=self.x_bounds[:, 1].reshape(-1, 1))
return variables
| 0.953416 | 0.49823 |
from science_optimization.builder import BuilderOptimizationProblem
from science_optimization.builder import Objective
from science_optimization.builder import Variable
from science_optimization.builder import Constraint
from science_optimization.function import FunctionsComposite
class GenericProblem(BuilderOptimizationProblem):
"""Concrete builder implementation.
This class builds a generic optimization problem.
"""
# objective function(s)
_f = None
# equality constraint function(s)
_eq_cons = None
# inequality constraint function(s)
_ineq_cons = None
# the variables' bounds
_x_bounds = None
def __init__(self, f, eq_cons, ineq_cons, x_bounds, x_type=None):
"""Constructor of a generic optimization problem.
Args:
f : Objective functions.
eq_cons : Equality constraint functions.
ineq_cons: Inequality constraint functions.
x_bounds : Lower bound and upper bounds.
x_type: (np.ndarray) (n x 1)-list with variables' type ('c': continuous or 'd': discrete).
"""
self.f = f
self.eq_cons = eq_cons
self.ineq_cons = ineq_cons
self.x_bounds = x_bounds
self.x_type = x_type
@property
def f(self):
return self._f
@property
def eq_cons(self):
return self._eq_cons
@property
def ineq_cons(self):
return self._ineq_cons
@property
def x_bounds(self):
return self._x_bounds
@f.setter
def f(self, value):
self._f = value
@eq_cons.setter
def eq_cons(self, value):
self._eq_cons = value
@ineq_cons.setter
def ineq_cons(self, value):
self._ineq_cons = value
@x_bounds.setter
def x_bounds(self, value):
self._x_bounds = value
def build_objectives(self):
obj_fun = FunctionsComposite()
for f in self.f:
obj_fun.add(f)
objective = Objective(objective=obj_fun)
return objective
def build_constraints(self):
eq_cons = FunctionsComposite()
ineq_cons = FunctionsComposite()
for eq_g in self.eq_cons:
eq_cons.add(eq_g)
for ineq_g in self.ineq_cons:
ineq_cons.add(ineq_g)
constraints = Constraint(eq_cons=eq_cons, ineq_cons=ineq_cons)
return constraints
def build_variables(self):
variables = Variable(x_min=self.x_bounds[:, 0].reshape(-1, 1),
x_max=self.x_bounds[:, 1].reshape(-1, 1),
x_type=self.x_type)
return variables
|
science-optimization
|
/science_optimization-9.0.2-cp310-cp310-manylinux_2_35_x86_64.whl/science_optimization/problems/generic.py
|
generic.py
|
from science_optimization.builder import BuilderOptimizationProblem
from science_optimization.builder import Objective
from science_optimization.builder import Variable
from science_optimization.builder import Constraint
from science_optimization.function import FunctionsComposite
class GenericProblem(BuilderOptimizationProblem):
"""Concrete builder implementation.
This class builds a generic optimization problem.
"""
# objective function(s)
_f = None
# equality constraint function(s)
_eq_cons = None
# inequality constraint function(s)
_ineq_cons = None
# the variables' bounds
_x_bounds = None
def __init__(self, f, eq_cons, ineq_cons, x_bounds, x_type=None):
"""Constructor of a generic optimization problem.
Args:
f : Objective functions.
eq_cons : Equality constraint functions.
ineq_cons: Inequality constraint functions.
x_bounds : Lower bound and upper bounds.
x_type: (np.ndarray) (n x 1)-list with variables' type ('c': continuous or 'd': discrete).
"""
self.f = f
self.eq_cons = eq_cons
self.ineq_cons = ineq_cons
self.x_bounds = x_bounds
self.x_type = x_type
@property
def f(self):
return self._f
@property
def eq_cons(self):
return self._eq_cons
@property
def ineq_cons(self):
return self._ineq_cons
@property
def x_bounds(self):
return self._x_bounds
@f.setter
def f(self, value):
self._f = value
@eq_cons.setter
def eq_cons(self, value):
self._eq_cons = value
@ineq_cons.setter
def ineq_cons(self, value):
self._ineq_cons = value
@x_bounds.setter
def x_bounds(self, value):
self._x_bounds = value
def build_objectives(self):
obj_fun = FunctionsComposite()
for f in self.f:
obj_fun.add(f)
objective = Objective(objective=obj_fun)
return objective
def build_constraints(self):
eq_cons = FunctionsComposite()
ineq_cons = FunctionsComposite()
for eq_g in self.eq_cons:
eq_cons.add(eq_g)
for ineq_g in self.ineq_cons:
ineq_cons.add(ineq_g)
constraints = Constraint(eq_cons=eq_cons, ineq_cons=ineq_cons)
return constraints
def build_variables(self):
variables = Variable(x_min=self.x_bounds[:, 0].reshape(-1, 1),
x_max=self.x_bounds[:, 1].reshape(-1, 1),
x_type=self.x_type)
return variables
| 0.947076 | 0.479686 |
__all__ = ['parse_pdf', 'logger']
# Cell
import logging
from pathlib import Path
from typing import Optional, Dict, Any
import requests
logger = logging.getLogger(__name__)
def parse_pdf(server_address: str, file_path: Path, port: str = '', timeout: int = 60
) -> Optional[Dict[str, Any]]:
'''
This function if successful returns the JSON output of the
science parse server as a dictionary. Else if a Timeout Exception
or any other Exception occurs it will return None. If any of the
exceptions do occur they will be logged as an error.
1. **server_address**: Address of the server e.g. `http://127.0.0.1`
2. **file_path**: Path to the pdf file to be processed.
3. **port**: The port to the server e.g. 8080
4. **timeout**: The amount of time to allow the request to take.
**returns** A dictionary with the following keys:
```python
['abstractText', 'authors', 'id', 'references', 'sections', 'title', 'year']
```
**Note** not all of these dictionary keys will always exist if science parse
cannot detect the relevant information e.g. if it cannot find any references
then there will be no reference key.
**Note** See the example on the main page of the documentation for a
detailed example of this method.
'''
endpoint = "/v1"
if port:
url = f'{server_address}:{port}{endpoint}'
else:
url = f'{server_address}{endpoint}'
file_name = file_path.name
files = {'data-binary': (file_name, file_path.open('rb'), 'application/pdf',
{'Expires': '0'})}
try:
response = requests.post(url, files=files,
headers={'Accept': 'application/json'},
timeout=timeout)
status_code = response.status_code
if status_code != 200:
error_message = (f'URL: {url}. {file_name} failed with a '
f'status code: {status_code}')
logger.error(error_message)
return None
return response.json()
except requests.exceptions.Timeout:
error_message = (f'URL: {url}. {file_name} failed due to a timeout.')
logger.error(error_message)
except Exception as e:
error_message = f'URL: {url}. {file_name} failed due to the following error:'
logger.error(error_message, exc_info=True)
return None
|
science-parse-api
|
/science_parse_api-1.0.1-py3-none-any.whl/science_parse_api/api.py
|
api.py
|
__all__ = ['parse_pdf', 'logger']
# Cell
import logging
from pathlib import Path
from typing import Optional, Dict, Any
import requests
logger = logging.getLogger(__name__)
def parse_pdf(server_address: str, file_path: Path, port: str = '', timeout: int = 60
) -> Optional[Dict[str, Any]]:
'''
This function if successful returns the JSON output of the
science parse server as a dictionary. Else if a Timeout Exception
or any other Exception occurs it will return None. If any of the
exceptions do occur they will be logged as an error.
1. **server_address**: Address of the server e.g. `http://127.0.0.1`
2. **file_path**: Path to the pdf file to be processed.
3. **port**: The port to the server e.g. 8080
4. **timeout**: The amount of time to allow the request to take.
**returns** A dictionary with the following keys:
```python
['abstractText', 'authors', 'id', 'references', 'sections', 'title', 'year']
```
**Note** not all of these dictionary keys will always exist if science parse
cannot detect the relevant information e.g. if it cannot find any references
then there will be no reference key.
**Note** See the example on the main page of the documentation for a
detailed example of this method.
'''
endpoint = "/v1"
if port:
url = f'{server_address}:{port}{endpoint}'
else:
url = f'{server_address}{endpoint}'
file_name = file_path.name
files = {'data-binary': (file_name, file_path.open('rb'), 'application/pdf',
{'Expires': '0'})}
try:
response = requests.post(url, files=files,
headers={'Accept': 'application/json'},
timeout=timeout)
status_code = response.status_code
if status_code != 200:
error_message = (f'URL: {url}. {file_name} failed with a '
f'status code: {status_code}')
logger.error(error_message)
return None
return response.json()
except requests.exceptions.Timeout:
error_message = (f'URL: {url}. {file_name} failed due to a timeout.')
logger.error(error_message)
except Exception as e:
error_message = f'URL: {url}. {file_name} failed due to the following error:'
logger.error(error_message, exc_info=True)
return None
| 0.890205 | 0.575946 |
science_tools
==============================
[//]: # (Badges)
[](https://github.com/REPLACE_WITH_OWNER_ACCOUNT/science_tools/actions?query=workflow%3ACI)
[](https://codecov.io/gh/REPLACE_WITH_OWNER_ACCOUNT/science_tools/branch/main)
Beautiful tools for beautiful science.
### Copyright
Copyright (c) 2023, Max Gallant
#### Acknowledgements
Project based on the
[Computational Molecular Science Python Cookiecutter](https://github.com/molssi/cookiecutter-cms) version 1.1.
|
science-tools
|
/science_tools-0.0.2.tar.gz/science_tools-0.0.2/README.md
|
README.md
|
science_tools
==============================
[//]: # (Badges)
[](https://github.com/REPLACE_WITH_OWNER_ACCOUNT/science_tools/actions?query=workflow%3ACI)
[](https://codecov.io/gh/REPLACE_WITH_OWNER_ACCOUNT/science_tools/branch/main)
Beautiful tools for beautiful science.
### Copyright
Copyright (c) 2023, Max Gallant
#### Acknowledgements
Project based on the
[Computational Molecular Science Python Cookiecutter](https://github.com/molssi/cookiecutter-cms) version 1.1.
| 0.569613 | 0.349144 |
# Contributor Covenant Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as
contributors and maintainers pledge to making participation in our project and
our community a harassment-free experience for everyone, regardless of age,
body size, disability, ethnicity, gender identity and expression, level of
experience, nationality, personal appearance, race, religion, or sexual
identity and orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a professional setting
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable
behavior and are expected to take appropriate and fair corrective action in
response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or
reject comments, commits, code, wiki edits, issues, and other contributions
that are not aligned to this Code of Conduct, or to ban temporarily or
permanently any contributor for other behaviors that they deem inappropriate,
threatening, offensive, or harmful.
Moreover, project maintainers will strive to offer feedback and advice to
ensure quality and consistency of contributions to the code. Contributions
from outside the group of project maintainers are strongly welcomed but the
final decision as to whether commits are merged into the codebase rests with
the team of project maintainers.
## Scope
This Code of Conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community. Examples of
representing a project or community include using an official project e-mail
address, posting via an official social media account, or acting as an
appointed representative at an online or offline event. Representation of a
project may be further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported by contacting the project team at '[email protected]'. The project team will
review and investigate all complaints, and will respond in a way that it deems
appropriate to the circumstances. The project team is obligated to maintain
confidentiality with regard to the reporter of an incident. Further details of
specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good
faith may face temporary or permanent repercussions as determined by other
members of the project's leadership.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 1.4, available at
[http://contributor-covenant.org/version/1/4][version]
[homepage]: http://contributor-covenant.org
[version]: http://contributor-covenant.org/version/1/4/
|
science-tools
|
/science_tools-0.0.2.tar.gz/science_tools-0.0.2/CODE_OF_CONDUCT.md
|
CODE_OF_CONDUCT.md
|
# Contributor Covenant Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as
contributors and maintainers pledge to making participation in our project and
our community a harassment-free experience for everyone, regardless of age,
body size, disability, ethnicity, gender identity and expression, level of
experience, nationality, personal appearance, race, religion, or sexual
identity and orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a professional setting
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable
behavior and are expected to take appropriate and fair corrective action in
response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or
reject comments, commits, code, wiki edits, issues, and other contributions
that are not aligned to this Code of Conduct, or to ban temporarily or
permanently any contributor for other behaviors that they deem inappropriate,
threatening, offensive, or harmful.
Moreover, project maintainers will strive to offer feedback and advice to
ensure quality and consistency of contributions to the code. Contributions
from outside the group of project maintainers are strongly welcomed but the
final decision as to whether commits are merged into the codebase rests with
the team of project maintainers.
## Scope
This Code of Conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community. Examples of
representing a project or community include using an official project e-mail
address, posting via an official social media account, or acting as an
appointed representative at an online or offline event. Representation of a
project may be further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported by contacting the project team at '[email protected]'. The project team will
review and investigate all complaints, and will respond in a way that it deems
appropriate to the circumstances. The project team is obligated to maintain
confidentiality with regard to the reporter of an incident. Further details of
specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good
faith may face temporary or permanent repercussions as determined by other
members of the project's leadership.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 1.4, available at
[http://contributor-covenant.org/version/1/4][version]
[homepage]: http://contributor-covenant.org
[version]: http://contributor-covenant.org/version/1/4/
| 0.485844 | 0.674577 |
from re import S
from tracemalloc import start
from science_utils_k.echarts.EChart import EChart
from science_utils_k.utils import princess
import time
import os
class LineChart(EChart):
def __init__(self, global_args=None, options=None):
super(LineChart, self).__init__(global_args)
if options == None:
self.options = {
'title': {
'text': 'Classification',
},
'tooltip': {},
'legend': {},
'xAxis': {
'name': "epoch",
'nameLocation': "end",
'data': None,
'axisLabel': {
'show': 'true'
}
},
'yAxis': {},
'series': None
}
else:
self.options = options
def get_options(self):
return self.options
def set_options(self, attrs, values):
for attr, value in zip(attrs, values):
# print(attr)
# print(value)
attr = attr.split(".")
princess.set_attr(self.options, attr, value)
def output(self, time_stamp,type="html", log_root_path="log"):
o_path = log_root_path + "/"+time_stamp
if type == "html":
if not os.path.exists("log"):
os.makedirs("log")
if time_stamp not in os.listdir("log"):
os.makedirs(o_path)
if "html" not in os.listdir(o_path):
os.makedirs(o_path+"/html")
if "js" not in os.listdir(o_path+"/html"):
os.makedirs(o_path+"/html/js")
# os.makedirs(o_path+"/html/js/")
# 如果main.html已存在,从以有文件读入html。否则使用默认html模板
if "main.html" in os.listdir(o_path+"/html"):
echart_html = princess.echart2html(self,log_path=o_path)
else:
echart_html = princess.echart2html(self)
# write html file
fd = open(o_path+"/html/main.html", "w")
fd.write(str(echart_html))
fd.close()
# write js file
fd = open(o_path+"/html/js/charts_cfg.js","a")
fd.write(princess.js_init_echart(self))
fd.write(princess.js_init_chartOption(self))
fd.close()
|
science-utils-k
|
/science_utils_k-0.0.6.tar.gz/science_utils_k-0.0.6/science_utils_k/echarts/LineChart.py
|
LineChart.py
|
from re import S
from tracemalloc import start
from science_utils_k.echarts.EChart import EChart
from science_utils_k.utils import princess
import time
import os
class LineChart(EChart):
def __init__(self, global_args=None, options=None):
super(LineChart, self).__init__(global_args)
if options == None:
self.options = {
'title': {
'text': 'Classification',
},
'tooltip': {},
'legend': {},
'xAxis': {
'name': "epoch",
'nameLocation': "end",
'data': None,
'axisLabel': {
'show': 'true'
}
},
'yAxis': {},
'series': None
}
else:
self.options = options
def get_options(self):
return self.options
def set_options(self, attrs, values):
for attr, value in zip(attrs, values):
# print(attr)
# print(value)
attr = attr.split(".")
princess.set_attr(self.options, attr, value)
def output(self, time_stamp,type="html", log_root_path="log"):
o_path = log_root_path + "/"+time_stamp
if type == "html":
if not os.path.exists("log"):
os.makedirs("log")
if time_stamp not in os.listdir("log"):
os.makedirs(o_path)
if "html" not in os.listdir(o_path):
os.makedirs(o_path+"/html")
if "js" not in os.listdir(o_path+"/html"):
os.makedirs(o_path+"/html/js")
# os.makedirs(o_path+"/html/js/")
# 如果main.html已存在,从以有文件读入html。否则使用默认html模板
if "main.html" in os.listdir(o_path+"/html"):
echart_html = princess.echart2html(self,log_path=o_path)
else:
echart_html = princess.echart2html(self)
# write html file
fd = open(o_path+"/html/main.html", "w")
fd.write(str(echart_html))
fd.close()
# write js file
fd = open(o_path+"/html/js/charts_cfg.js","a")
fd.write(princess.js_init_echart(self))
fd.write(princess.js_init_chartOption(self))
fd.close()
| 0.086828 | 0.0745 |
from bs4 import BeautifulSoup
def set_attr(options: dict, attr: list, value: any) -> str:
i = 0
for x in attr:
if i == 0:
exec_str = "options['%s']" % x
i += 1
else:
exec_str += "['%s']" % x
exec_str += " = %s" % value
print(exec_str)
exec(exec_str)
# print(options)
return exec_str
def load_html(file_path=None):
html_temp = '<!DOCTYPE html>\
<html>\
\
<head>\
<meta charset="utf-8" />\
<meta http-equiv="X-UA-Compatible" content="IE=edge">\
<meta name="viewport" content="width=device-width, initial-scale=1">\
<title></title>\
\
<!-- ZUI Javascript 依赖 jQuery -->\
<script src="https://cdn.staticfile.org/echarts/4.3.0/echarts.min.js"></script>\
</head>\
\
<body>\
<div class="container"></div>\
</body>\
<script src="js/charts_cfg.js"></script>\
\
</html>'
if file_path == None:
return BeautifulSoup(html_temp, features="lxml")
else:
return BeautifulSoup(open(file_path),features="lxml")
def load_data(data) -> str:
return str(data)
def load_data_from_file(path: str, args_name: list, split=","):
pass
def js_init_echart(chart):
global_args = chart.get_global_args()
options = chart.get_options()
i = 0
for arg in global_args["init"]:
if i == 0:
init_js = "var %s = echarts.init(\n\
%s=%s, \n \
" % (
global_args["id"],
arg,
global_args["init"][arg] ,
)
i = 1
else:
init_js += "%s = %s,\n" % (
arg,
global_args["init"][arg]if global_args["init"][arg] != None else "null",
)
init_js += ");\n"
print(init_js)
return init_js
def js_init_chartOption(chart):
global_args = chart.get_global_args()
options = chart.get_options()
options_js = "%s.setOption(%s)" % (
global_args["id"],
options
)
options_js +=";\n"
return options_js
def echart2html(chart,log_path=None):
# echart_container = '<div id="%s" style="float: left;"></div>' % chart.get_global_args()["id"]
if log_path == None:
soup = load_html()
else:
soup = load_html(file_path=log_path+"/html/main.html")
echart_container = soup.new_tag("div", id=chart.get_global_args()["id"],style="float: left;")
soup.div.append(echart_container)
return soup.prettify()
# soup = load_html().prettify
# print(soup)
|
science-utils-k
|
/science_utils_k-0.0.6.tar.gz/science_utils_k-0.0.6/science_utils_k/utils/princess.py
|
princess.py
|
from bs4 import BeautifulSoup
def set_attr(options: dict, attr: list, value: any) -> str:
i = 0
for x in attr:
if i == 0:
exec_str = "options['%s']" % x
i += 1
else:
exec_str += "['%s']" % x
exec_str += " = %s" % value
print(exec_str)
exec(exec_str)
# print(options)
return exec_str
def load_html(file_path=None):
html_temp = '<!DOCTYPE html>\
<html>\
\
<head>\
<meta charset="utf-8" />\
<meta http-equiv="X-UA-Compatible" content="IE=edge">\
<meta name="viewport" content="width=device-width, initial-scale=1">\
<title></title>\
\
<!-- ZUI Javascript 依赖 jQuery -->\
<script src="https://cdn.staticfile.org/echarts/4.3.0/echarts.min.js"></script>\
</head>\
\
<body>\
<div class="container"></div>\
</body>\
<script src="js/charts_cfg.js"></script>\
\
</html>'
if file_path == None:
return BeautifulSoup(html_temp, features="lxml")
else:
return BeautifulSoup(open(file_path),features="lxml")
def load_data(data) -> str:
return str(data)
def load_data_from_file(path: str, args_name: list, split=","):
pass
def js_init_echart(chart):
global_args = chart.get_global_args()
options = chart.get_options()
i = 0
for arg in global_args["init"]:
if i == 0:
init_js = "var %s = echarts.init(\n\
%s=%s, \n \
" % (
global_args["id"],
arg,
global_args["init"][arg] ,
)
i = 1
else:
init_js += "%s = %s,\n" % (
arg,
global_args["init"][arg]if global_args["init"][arg] != None else "null",
)
init_js += ");\n"
print(init_js)
return init_js
def js_init_chartOption(chart):
global_args = chart.get_global_args()
options = chart.get_options()
options_js = "%s.setOption(%s)" % (
global_args["id"],
options
)
options_js +=";\n"
return options_js
def echart2html(chart,log_path=None):
# echart_container = '<div id="%s" style="float: left;"></div>' % chart.get_global_args()["id"]
if log_path == None:
soup = load_html()
else:
soup = load_html(file_path=log_path+"/html/main.html")
echart_container = soup.new_tag("div", id=chart.get_global_args()["id"],style="float: left;")
soup.div.append(echart_container)
return soup.prettify()
# soup = load_html().prettify
# print(soup)
| 0.069494 | 0.088702 |
# [<img src="https://pbs.twimg.com/profile_images/1396102254487384065/ZjD8GvMw_400x400.png" alt="drawing" width="50"/>ViewsOnDrugsBot<img src="https://pbs.twimg.com/media/E1_0586WQAYCNym?format=png&name=small" alt="drawing" width="50"/>](https://twitter.com/ViewsOnDrugsBot/)
A bot application sharing scientific publications and educated opinions on psychedelic research, harm reduction and drug policy issues. A collaboration with: [#mybrainmychoice](https://mybrainmychoice.de/twitter-bot/)
[<img src="https://mybrainmychoice.de/wp-content/uploads/mybrainmychoice_Logo-500x500_GIF.gif" alt="drawing" width="50"/>](https://mybrainmychoice.de/)
## What it does:
* Reads and parses a list of RSS feeds (i.e. PubMed)
Tweets an article's title, link, abstract, and authors as a 5/5 thread :thread:.
* Retweets older RSS post after a given time.
* Retweets the most retweeted and up-voted post from:
- a global search for specified keywords or hashtags defined on `Settings.add_hashtag`.
- a search result from a given distribution Twitter list defined on `Settings.mylist_id`.
* Interacts with users by faving posts from the above.
* Schedule jobs for any of the above.
* Send automated debug reports via Telegram.
All functions can be used independently.
## Install
0. Download or git clone Twitterbot:
- `git clone https://github.com/franasal/science_bot.git`
1. Run:
- `cd scibot`
- `pip install . --user`
2. Create a [Twitter application](https://apps.twitter.com/), and generate keys, tokens etc.
3. Create a [Telegram bot](https://python-telegram-bot.readthedocs.io/en/stable/) for post and debugging notifications.
4. Modify the settings in the source code.
- Modify `feed_urls` list to add the RSS feeds of your choice. [Here](https://github.com/roblanf/phypapers) you can find a description on how to set an RSS search.
- Modify the variables in the `example.env` file and add keys, tokens etc. for connecting to your Twitter app and save it as `.env` in your home directory.
- Modify `retweet_include_words` for keywords you want to search and retweet, and `retweet_exclude_words` for keywords you would like to exclude from retweeting. For example `retweet_include_words = ["foo"]` and `retweet_exclude_words = ["bar"]` will include any tweet with the word "foo", as long as the word "bar" is absent. This list can also be left empty, i.e. `retweet_exclude_words = []`.
- Modify or add jobs to the `scheduled_job()` function.
## Requirements
* Python 3+
* Twitter account
* Telegram account
## Usage
Read the RSS feeds and post a thread to Twitter account:
```bash
$ scibot rss
```
Search globally for tweets and retweet them:
```bash
$ scibot rtg
```
Search for tweets within a Twitter list and retweet them:
```bash
$ scibot rtl
```
Retweet last own tweet:
```bash
$ scibot rto
```
### Deploy:
[Here](https://schedule.readthedocs.io/en/stable/) you can learn how set-up tasks for the the `scheduled_job()` function
There are some good free cloud solutions such as [pythonanywhere](https://www.pythonanywhere.com/), where you can deploy the bot,
to do that just run:
```bash
$ scibot sch
```
:hibiscus:
|
scienceBot
|
/scienceBot-0.1.1.1.tar.gz/scienceBot-0.1.1.1/README.md
|
README.md
|
$ scibot rss
$ scibot rtg
$ scibot rtl
$ scibot rto
$ scibot sch
| 0.370339 | 0.844024 |
import tweepy
from scibot.telebot import telegram_bot_sendtext
from dotenv import load_dotenv
from os.path import expanduser
import os
env_path = expanduser("~/.env")
load_dotenv(dotenv_path=env_path, override=True)
# Setup API:
def twitter_setup():
"""
Setup Twitter connection for a developer account
Returns: tweepy.API object
"""
# Authenticate and access using keys:
auth = tweepy.OAuthHandler(os.getenv("CONSUMER_KEY"), os.getenv("CONSUMER_SECRET"))
auth.set_access_token(os.getenv("ACCESS_TOKEN"), os.getenv("ACCESS_SECRET"))
# Return API access:
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
return api
api = twitter_setup()
banned_profiles = ['nydancesafe']
class MyStreamListener(tweepy.StreamListener):
def on_status(self, status):
if hasattr(status, "retweeted_status"): # Check if Retweet
# try:
# print('rt',status['retweeted_status']['extended_tweet']["full_text"])
# except AttributeError:
telegram_bot_sendtext(f" check if retweet:, {status.retweeted_status.text}")
if "constellation" not in status.retweeted_status.text.lower():
pass
else:
try:
## catch nesting
if status.user.screen_name in banned_profiles or status.in_reply_to_screen_name:
pass
replied_to=status.in_reply_to_screen_name
answer_user=status.user.screen_name
answer_id=status.id
answer_user_id = status.user.id
## ignore replies that by default contain mention
in_reply_to_status_id=status.in_reply_to_status_id
in_reply_to_user_id=status.in_reply_to_user_id
telegram_bot_sendtext(f"{replied_to}, 'nesting', {in_reply_to_user_id}, 'replied to', {replied_to}, 'message', {status.text}")
except AttributeError:
replied_to=status.in_reply_to_screen_name
answer_user=status.user.screen_name
answer_user_id = status.user.id
answer_id=status.id
in_reply_to_status_id=status.in_reply_to_status_id
in_reply_to_user_id=status.in_reply_to_user_id
telegram_bot_sendtext(f"ATRIB ERROR: {replied_to}, 'nesting', {in_reply_to_user_id}, 'replied to', {replied_to}, 'message', {status.text}")
update_status = f""" #ConstellationsFest live RT. From 16-24 NOV:
https://twitter.com/{answer_user}/status/{answer_id}
"""
# don't reply to yourself!!
self_ids=[1319577341056733184, 1118874276961116162]
if status.user.id not in self_ids:
api.update_status(update_status,
auto_populate_reply_metadata=True)
def on_error(self, status):
telegram_bot_sendtext(f"ERROR with: {status}")
def listen_stream_and_rt(keywords_list):
api = twitter_setup()
myStreamListener = MyStreamListener()
try:
myStream = tweepy.Stream(auth=api.auth, listener=myStreamListener)
myStream.filter(track=keywords_list, is_async=True)
except Exception as ex:
telegram_bot_sendtext(f"ERROR with: {ex}")
pass
|
scienceBot
|
/scienceBot-0.1.1.1.tar.gz/scienceBot-0.1.1.1/scibot/streamer.py
|
streamer.py
|
import tweepy
from scibot.telebot import telegram_bot_sendtext
from dotenv import load_dotenv
from os.path import expanduser
import os
env_path = expanduser("~/.env")
load_dotenv(dotenv_path=env_path, override=True)
# Setup API:
def twitter_setup():
"""
Setup Twitter connection for a developer account
Returns: tweepy.API object
"""
# Authenticate and access using keys:
auth = tweepy.OAuthHandler(os.getenv("CONSUMER_KEY"), os.getenv("CONSUMER_SECRET"))
auth.set_access_token(os.getenv("ACCESS_TOKEN"), os.getenv("ACCESS_SECRET"))
# Return API access:
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
return api
api = twitter_setup()
banned_profiles = ['nydancesafe']
class MyStreamListener(tweepy.StreamListener):
def on_status(self, status):
if hasattr(status, "retweeted_status"): # Check if Retweet
# try:
# print('rt',status['retweeted_status']['extended_tweet']["full_text"])
# except AttributeError:
telegram_bot_sendtext(f" check if retweet:, {status.retweeted_status.text}")
if "constellation" not in status.retweeted_status.text.lower():
pass
else:
try:
## catch nesting
if status.user.screen_name in banned_profiles or status.in_reply_to_screen_name:
pass
replied_to=status.in_reply_to_screen_name
answer_user=status.user.screen_name
answer_id=status.id
answer_user_id = status.user.id
## ignore replies that by default contain mention
in_reply_to_status_id=status.in_reply_to_status_id
in_reply_to_user_id=status.in_reply_to_user_id
telegram_bot_sendtext(f"{replied_to}, 'nesting', {in_reply_to_user_id}, 'replied to', {replied_to}, 'message', {status.text}")
except AttributeError:
replied_to=status.in_reply_to_screen_name
answer_user=status.user.screen_name
answer_user_id = status.user.id
answer_id=status.id
in_reply_to_status_id=status.in_reply_to_status_id
in_reply_to_user_id=status.in_reply_to_user_id
telegram_bot_sendtext(f"ATRIB ERROR: {replied_to}, 'nesting', {in_reply_to_user_id}, 'replied to', {replied_to}, 'message', {status.text}")
update_status = f""" #ConstellationsFest live RT. From 16-24 NOV:
https://twitter.com/{answer_user}/status/{answer_id}
"""
# don't reply to yourself!!
self_ids=[1319577341056733184, 1118874276961116162]
if status.user.id not in self_ids:
api.update_status(update_status,
auto_populate_reply_metadata=True)
def on_error(self, status):
telegram_bot_sendtext(f"ERROR with: {status}")
def listen_stream_and_rt(keywords_list):
api = twitter_setup()
myStreamListener = MyStreamListener()
try:
myStream = tweepy.Stream(auth=api.auth, listener=myStreamListener)
myStream.filter(track=keywords_list, is_async=True)
except Exception as ex:
telegram_bot_sendtext(f"ERROR with: {ex}")
pass
| 0.214362 | 0.058346 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.